before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def __getitem__(self, idx):
if self.two_views:
<DeepExtract>
path = os.path.join(self.img_path, self.img_A_names[idx])
(image_A, im_size_A) = cv2.imread(path, 1)[:, :, ::-1]
</DeepExtract>
<DeepExtract>
path = os.path.join(self.img_path, self.img_B_names[idx])
(image_B, im_size_B) = cv2.imread(path, 1)[:, :, ::-1]
</DeepExtract>
if self.transform_source:
image_A = self.transform_source(image_A)
if self.transform_target:
image_B = self.transform_target(image_B)
sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'sparse': False}
else:
<DeepExtract>
path = os.path.join(self.img_path, self.images[idx])
(image, im_size) = cv2.imread(path, 1)[:, :, ::-1]
</DeepExtract>
sample = {'image': image, 'image_size': im_size}
return sample
|
def __getitem__(self, idx):
if self.two_views:
path = os.path.join(self.img_path, self.img_A_names[idx])
(image_A, im_size_A) = cv2.imread(path, 1)[:, :, ::-1]
path = os.path.join(self.img_path, self.img_B_names[idx])
(image_B, im_size_B) = cv2.imread(path, 1)[:, :, ::-1]
if self.transform_source:
image_A = self.transform_source(image_A)
if self.transform_target:
image_B = self.transform_target(image_B)
sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'sparse': False}
else:
path = os.path.join(self.img_path, self.images[idx])
(image, im_size) = cv2.imread(path, 1)[:, :, ::-1]
sample = {'image': image, 'image_size': im_size}
return sample
|
DenseMatching
|
positive
|
@register_vcs_handler('git', 'pieces_from_vcs')
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, '.git')):
if verbose:
print('no .git in %s' % root)
raise NotThisMethod('no .git directory')
GITS = ['git']
if sys.platform == 'win32':
GITS = ['git.cmd', 'git.exe']
<DeepExtract>
assert isinstance(GITS, list)
p = None
for c in GITS:
try:
dispcmd = str([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix])
p = subprocess.Popen([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix], cwd=root, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print('unable to run %s' % dispcmd)
print(e)
describe_out = None
else:
if verbose:
print('unable to find command, tried %s' % (GITS,))
describe_out = None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print('unable to run %s (error)' % dispcmd)
describe_out = None
describe_out = stdout
</DeepExtract>
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
<DeepExtract>
assert isinstance(GITS, list)
p = None
for c in GITS:
try:
dispcmd = str([c] + ['rev-parse', 'HEAD'])
p = subprocess.Popen([c] + ['rev-parse', 'HEAD'], cwd=root, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print('unable to run %s' % dispcmd)
print(e)
full_out = None
else:
if verbose:
print('unable to find command, tried %s' % (GITS,))
full_out = None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print('unable to run %s (error)' % dispcmd)
full_out = None
full_out = stdout
</DeepExtract>
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces['long'] = full_out
pieces['short'] = full_out[:7]
pieces['error'] = None
git_describe = describe_out
dirty = git_describe.endswith('-dirty')
pieces['dirty'] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex('-dirty')]
if '-' in git_describe:
mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
pieces['error'] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces['error'] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)
return pieces
pieces['closest-tag'] = full_tag[len(tag_prefix):]
pieces['distance'] = int(mo.group(2))
pieces['short'] = mo.group(3)
else:
pieces['closest-tag'] = None
<DeepExtract>
assert isinstance(GITS, list)
p = None
for c in GITS:
try:
dispcmd = str([c] + ['rev-list', 'HEAD', '--count'])
p = subprocess.Popen([c] + ['rev-list', 'HEAD', '--count'], cwd=root, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print('unable to run %s' % dispcmd)
print(e)
count_out = None
else:
if verbose:
print('unable to find command, tried %s' % (GITS,))
count_out = None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print('unable to run %s (error)' % dispcmd)
count_out = None
count_out = stdout
</DeepExtract>
pieces['distance'] = int(count_out)
return pieces
|
@register_vcs_handler('git', 'pieces_from_vcs')
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, '.git')):
if verbose:
print('no .git in %s' % root)
raise NotThisMethod('no .git directory')
GITS = ['git']
if sys.platform == 'win32':
GITS = ['git.cmd', 'git.exe']
assert isinstance(GITS, list)
p = None
for c in GITS:
try:
dispcmd = str([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix])
p = subprocess.Popen([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix], cwd=root, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print('unable to run %s' % dispcmd)
print(e)
describe_out = None
else:
if verbose:
print('unable to find command, tried %s' % (GITS,))
describe_out = None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print('unable to run %s (error)' % dispcmd)
describe_out = None
describe_out = stdout
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
assert isinstance(GITS, list)
p = None
for c in GITS:
try:
dispcmd = str([c] + ['rev-parse', 'HEAD'])
p = subprocess.Popen([c] + ['rev-parse', 'HEAD'], cwd=root, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print('unable to run %s' % dispcmd)
print(e)
full_out = None
else:
if verbose:
print('unable to find command, tried %s' % (GITS,))
full_out = None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print('unable to run %s (error)' % dispcmd)
full_out = None
full_out = stdout
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces['long'] = full_out
pieces['short'] = full_out[:7]
pieces['error'] = None
git_describe = describe_out
dirty = git_describe.endswith('-dirty')
pieces['dirty'] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex('-dirty')]
if '-' in git_describe:
mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
pieces['error'] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces['error'] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)
return pieces
pieces['closest-tag'] = full_tag[len(tag_prefix):]
pieces['distance'] = int(mo.group(2))
pieces['short'] = mo.group(3)
else:
pieces['closest-tag'] = None
assert isinstance(GITS, list)
p = None
for c in GITS:
try:
dispcmd = str([c] + ['rev-list', 'HEAD', '--count'])
p = subprocess.Popen([c] + ['rev-list', 'HEAD', '--count'], cwd=root, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print('unable to run %s' % dispcmd)
print(e)
count_out = None
else:
if verbose:
print('unable to find command, tried %s' % (GITS,))
count_out = None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print('unable to run %s (error)' % dispcmd)
count_out = None
count_out = stdout
pieces['distance'] = int(count_out)
return pieces
|
coalition
|
positive
|
def __init__(self, quaternion, translation, rotation=None, normalize=True, unstack_inputs=False):
"""Initialize from quaternion and translation.
Args:
quaternion: Rotation represented by a quaternion, to be applied
before translation. Must be a unit quaternion unless normalize==True.
translation: Translation represented as a vector.
rotation: Same rotation as the quaternion, represented as a (..., 3, 3)
tensor. If None, rotation will be calculated from the quaternion.
normalize: If True, l2 normalize the quaternion on input.
unstack_inputs: If True, translation is a vector with last component 3
"""
if quaternion is not None:
assert quaternion.shape[-1] == 4
if unstack_inputs:
if rotation is not None:
rotation = [jnp.moveaxis(x, -1, 0) for x in jnp.moveaxis(rotation, -2, 0)]
translation = jnp.moveaxis(translation, -1, 0)
if normalize and quaternion is not None:
quaternion = quaternion / jnp.linalg.norm(quaternion, axis=-1, keepdims=True)
if rotation is None:
<DeepExtract>
rot_tensor = jnp.sum(np.reshape(QUAT_TO_ROT, (4, 4, 9)) * quaternion[..., :, None, None] * quaternion[..., None, :, None], axis=(-3, -2))
rot = jnp.moveaxis(rot_tensor, -1, 0)
rotation = [[rot[0], rot[1], rot[2]], [rot[3], rot[4], rot[5]], [rot[6], rot[7], rot[8]]]
</DeepExtract>
self.quaternion = quaternion
self.rotation = [list(row) for row in rotation]
self.translation = list(translation)
assert all((len(row) == 3 for row in self.rotation))
assert len(self.translation) == 3
|
def __init__(self, quaternion, translation, rotation=None, normalize=True, unstack_inputs=False):
"""Initialize from quaternion and translation.
Args:
quaternion: Rotation represented by a quaternion, to be applied
before translation. Must be a unit quaternion unless normalize==True.
translation: Translation represented as a vector.
rotation: Same rotation as the quaternion, represented as a (..., 3, 3)
tensor. If None, rotation will be calculated from the quaternion.
normalize: If True, l2 normalize the quaternion on input.
unstack_inputs: If True, translation is a vector with last component 3
"""
if quaternion is not None:
assert quaternion.shape[-1] == 4
if unstack_inputs:
if rotation is not None:
rotation = [jnp.moveaxis(x, -1, 0) for x in jnp.moveaxis(rotation, -2, 0)]
translation = jnp.moveaxis(translation, -1, 0)
if normalize and quaternion is not None:
quaternion = quaternion / jnp.linalg.norm(quaternion, axis=-1, keepdims=True)
if rotation is None:
rot_tensor = jnp.sum(np.reshape(QUAT_TO_ROT, (4, 4, 9)) * quaternion[..., :, None, None] * quaternion[..., None, :, None], axis=(-3, -2))
rot = jnp.moveaxis(rot_tensor, -1, 0)
rotation = [[rot[0], rot[1], rot[2]], [rot[3], rot[4], rot[5]], [rot[6], rot[7], rot[8]]]
self.quaternion = quaternion
self.rotation = [list(row) for row in rotation]
self.translation = list(translation)
assert all((len(row) == 3 for row in self.rotation))
assert len(self.translation) == 3
|
alphafold
|
positive
|
def get_hits(self, rule, starttime, endtime, index, scroll=False):
""" Query Elasticsearch for the given rule and return the results.
:param rule: The rule configuration.
:param starttime: The earliest time to query.
:param endtime: The latest time to query.
:return: A list of hits, bounded by rule['max_query_size'] (or self.max_query_size).
"""
<DeepExtract>
starttime = rule['dt_to_ts'](starttime)
endtime = rule['dt_to_ts'](endtime)
rule['filter'] = copy.copy(rule['filter'])
es_filters = {'filter': {'bool': {'must': rule['filter']}}}
if starttime and endtime:
es_filters['filter']['bool']['must'].insert(0, {'range': {rule['timestamp_field']: {'gt': starttime, 'lte': endtime}}})
if rule['five']:
query = {'query': {'bool': es_filters}}
else:
query = {'query': {'filtered': es_filters}}
if sort:
query['sort'] = [{rule['timestamp_field']: {'order': 'desc' if desc else 'asc'}}]
query = query
</DeepExtract>
if self.thread_data.current_es.is_atleastsixsix():
extra_args = {'_source_includes': rule['include']}
else:
extra_args = {'_source_include': rule['include']}
scroll_keepalive = rule.get('scroll_keepalive', self.scroll_keepalive)
if not rule.get('_source_enabled'):
if rule['five']:
query['stored_fields'] = rule['include']
else:
query['fields'] = rule['include']
extra_args = {}
try:
if scroll:
res = self.thread_data.current_es.scroll(scroll_id=rule['scroll_id'], scroll=scroll_keepalive)
else:
res = self.thread_data.current_es.search(scroll=scroll_keepalive, index=index, size=rule.get('max_query_size', self.max_query_size), body=query, ignore_unavailable=True, **extra_args)
if '_scroll_id' in res:
rule['scroll_id'] = res['_scroll_id']
if self.thread_data.current_es.is_atleastseven():
self.thread_data.total_hits = int(res['hits']['total']['value'])
else:
self.thread_data.total_hits = int(res['hits']['total'])
if len(res.get('_shards', {}).get('failures', [])) > 0:
try:
errs = [e['reason']['reason'] for e in res['_shards']['failures'] if 'Failed to parse' in e['reason']['reason']]
if len(errs):
raise ElasticsearchException(errs)
except (TypeError, KeyError):
raise ElasticsearchException(str(res['_shards']['failures']))
logging.debug(str(res))
except ElasticsearchException as e:
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
<DeepExtract>
logging.error('Error running query: %s' % e)
body = {'message': 'Error running query: %s' % e}
tb = traceback.format_exc()
body['traceback'] = tb.strip().split('\n')
if {'rule': rule['name'], 'query': query}:
body['data'] = {'rule': rule['name'], 'query': query}
self.writeback('elastalert_error', body)
</DeepExtract>
return None
hits = res['hits']['hits']
self.thread_data.num_hits += len(hits)
lt = rule.get('use_local_time')
status_log = 'Queried rule %s from %s to %s: %s / %s hits' % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), self.thread_data.num_hits, len(hits))
if self.thread_data.total_hits > rule.get('max_query_size', self.max_query_size):
elastalert_logger.info('%s (scrolling..)' % status_log)
else:
elastalert_logger.info(status_log)
<DeepExtract>
processed_hits = []
for hit in hits:
hit.setdefault('_source', {})
for (key, value) in list(hit.get('fields', {}).items()):
hit['_source'].setdefault(key, value[0] if type(value) is list and len(value) == 1 else value)
ts = lookup_es_key(hit['_source'], rule['timestamp_field'])
if not ts and (not rule['_source_enabled']):
raise EAException("Error: No timestamp was found for hit. '_source_enabled' is set to false, check your mappings for stored fields")
set_es_key(hit['_source'], rule['timestamp_field'], rule['ts_to_dt'](ts))
set_es_key(hit, rule['timestamp_field'], lookup_es_key(hit['_source'], rule['timestamp_field']))
for field in ['_id', '_index', '_type']:
if field in hit:
hit['_source'][field] = hit[field]
if rule.get('compound_query_key'):
values = [lookup_es_key(hit['_source'], key) for key in rule['compound_query_key']]
hit['_source'][rule['query_key']] = ', '.join([str(value) for value in values])
if rule.get('compound_aggregation_key'):
values = [lookup_es_key(hit['_source'], key) for key in rule['compound_aggregation_key']]
hit['_source'][rule['aggregation_key']] = ', '.join([str(value) for value in values])
processed_hits.append(hit['_source'])
hits = processed_hits
</DeepExtract>
if 'doc_type' not in rule and len(hits):
rule['doc_type'] = hits[0]['_type']
return hits
|
def get_hits(self, rule, starttime, endtime, index, scroll=False):
""" Query Elasticsearch for the given rule and return the results.
:param rule: The rule configuration.
:param starttime: The earliest time to query.
:param endtime: The latest time to query.
:return: A list of hits, bounded by rule['max_query_size'] (or self.max_query_size).
"""
starttime = rule['dt_to_ts'](starttime)
endtime = rule['dt_to_ts'](endtime)
rule['filter'] = copy.copy(rule['filter'])
es_filters = {'filter': {'bool': {'must': rule['filter']}}}
if starttime and endtime:
es_filters['filter']['bool']['must'].insert(0, {'range': {rule['timestamp_field']: {'gt': starttime, 'lte': endtime}}})
if rule['five']:
query = {'query': {'bool': es_filters}}
else:
query = {'query': {'filtered': es_filters}}
if sort:
query['sort'] = [{rule['timestamp_field']: {'order': 'desc' if desc else 'asc'}}]
query = query
if self.thread_data.current_es.is_atleastsixsix():
extra_args = {'_source_includes': rule['include']}
else:
extra_args = {'_source_include': rule['include']}
scroll_keepalive = rule.get('scroll_keepalive', self.scroll_keepalive)
if not rule.get('_source_enabled'):
if rule['five']:
query['stored_fields'] = rule['include']
else:
query['fields'] = rule['include']
extra_args = {}
try:
if scroll:
res = self.thread_data.current_es.scroll(scroll_id=rule['scroll_id'], scroll=scroll_keepalive)
else:
res = self.thread_data.current_es.search(scroll=scroll_keepalive, index=index, size=rule.get('max_query_size', self.max_query_size), body=query, ignore_unavailable=True, **extra_args)
if '_scroll_id' in res:
rule['scroll_id'] = res['_scroll_id']
if self.thread_data.current_es.is_atleastseven():
self.thread_data.total_hits = int(res['hits']['total']['value'])
else:
self.thread_data.total_hits = int(res['hits']['total'])
if len(res.get('_shards', {}).get('failures', [])) > 0:
try:
errs = [e['reason']['reason'] for e in res['_shards']['failures'] if 'Failed to parse' in e['reason']['reason']]
if len(errs):
raise ElasticsearchException(errs)
except (TypeError, KeyError):
raise ElasticsearchException(str(res['_shards']['failures']))
logging.debug(str(res))
except ElasticsearchException as e:
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
logging.error('Error running query: %s' % e)
body = {'message': 'Error running query: %s' % e}
tb = traceback.format_exc()
body['traceback'] = tb.strip().split('\n')
if {'rule': rule['name'], 'query': query}:
body['data'] = {'rule': rule['name'], 'query': query}
self.writeback('elastalert_error', body)
return None
hits = res['hits']['hits']
self.thread_data.num_hits += len(hits)
lt = rule.get('use_local_time')
status_log = 'Queried rule %s from %s to %s: %s / %s hits' % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), self.thread_data.num_hits, len(hits))
if self.thread_data.total_hits > rule.get('max_query_size', self.max_query_size):
elastalert_logger.info('%s (scrolling..)' % status_log)
else:
elastalert_logger.info(status_log)
processed_hits = []
for hit in hits:
hit.setdefault('_source', {})
for (key, value) in list(hit.get('fields', {}).items()):
hit['_source'].setdefault(key, value[0] if type(value) is list and len(value) == 1 else value)
ts = lookup_es_key(hit['_source'], rule['timestamp_field'])
if not ts and (not rule['_source_enabled']):
raise EAException("Error: No timestamp was found for hit. '_source_enabled' is set to false, check your mappings for stored fields")
set_es_key(hit['_source'], rule['timestamp_field'], rule['ts_to_dt'](ts))
set_es_key(hit, rule['timestamp_field'], lookup_es_key(hit['_source'], rule['timestamp_field']))
for field in ['_id', '_index', '_type']:
if field in hit:
hit['_source'][field] = hit[field]
if rule.get('compound_query_key'):
values = [lookup_es_key(hit['_source'], key) for key in rule['compound_query_key']]
hit['_source'][rule['query_key']] = ', '.join([str(value) for value in values])
if rule.get('compound_aggregation_key'):
values = [lookup_es_key(hit['_source'], key) for key in rule['compound_aggregation_key']]
hit['_source'][rule['aggregation_key']] = ', '.join([str(value) for value in values])
processed_hits.append(hit['_source'])
hits = processed_hits
if 'doc_type' not in rule and len(hits):
rule['doc_type'] = hits[0]['_type']
return hits
|
elastalert
|
positive
|
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False):
if verbose:
print(' - scoring {:d} candidates'.format(x.shape[0]))
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
<DeepExtract>
scores[i, j] = margin(x[i].dot(y[k]), (fwd_mean[i] + bwd_mean[k]) / 2)
</DeepExtract>
return scores
|
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False):
if verbose:
print(' - scoring {:d} candidates'.format(x.shape[0]))
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = margin(x[i].dot(y[k]), (fwd_mean[i] + bwd_mean[k]) / 2)
return scores
|
banglanmt
|
positive
|
def call(self, samples, training: bool=None):
x0 = samples['input']
y0 = insert_sos_in_labels(samples['output'], self.sos)
x = self.x_net(x0, training=training)
<DeepExtract>
samples['input_length'] = tf.cast(samples['input_length'], tf.float32)
logit_length = tf.math.ceil(samples['input_length'] / 2)
logit_length = tf.math.ceil(logit_length / 2)
logit_length = tf.cast(logit_length, tf.int32)
samples['input_length'] = logit_length
</DeepExtract>
(input_mask, output_mask) = create_multihead_mask(x, input_length, y0)
y = self.conformer_ctc(x, input_mask, training=training, return_encoder_output=True)
y = self.layer_norm(y)
y = self.layer_dense(y)
return y
|
def call(self, samples, training: bool=None):
x0 = samples['input']
y0 = insert_sos_in_labels(samples['output'], self.sos)
x = self.x_net(x0, training=training)
samples['input_length'] = tf.cast(samples['input_length'], tf.float32)
logit_length = tf.math.ceil(samples['input_length'] / 2)
logit_length = tf.math.ceil(logit_length / 2)
logit_length = tf.cast(logit_length, tf.int32)
samples['input_length'] = logit_length
(input_mask, output_mask) = create_multihead_mask(x, input_length, y0)
y = self.conformer_ctc(x, input_mask, training=training, return_encoder_output=True)
y = self.layer_norm(y)
y = self.layer_dense(y)
return y
|
athena
|
positive
|
def copy_between(self, src, dest, dest_node):
"""Copy src to dest on dest_node
:param src: Path to the file or directory we want to copy
:param dest: The destination path
:param dest_node: The node to which we want to copy the file/directory
Note that if src is a directory, this will automatically copy recursively.
"""
temp_dir = tempfile.mkdtemp()
try:
<DeepExtract>
path_basename = src
if path_basename.endswith(os.path.sep):
path_basename = path_basename[:-len(os.path.sep)]
path_basename = os.path.basename(path_basename)
local_dest = os.path.join(temp_dir, path_basename)
</DeepExtract>
<DeepExtract>
if os.path.isdir(local_dest):
local_dest = self._re_anchor_basename(src, local_dest)
if self.isfile(src):
self.sftp_client.get(src, local_dest)
elif self.isdir(src):
os.mkdir(local_dest)
for obj in self.sftp_client.listdir(src):
obj_path = os.path.join(src, obj)
if self.isfile(obj_path) or self.isdir(obj_path):
self.copy_from(obj_path, local_dest)
else:
pass
</DeepExtract>
dest_node.account.copy_to(local_dest, dest)
finally:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
|
def copy_between(self, src, dest, dest_node):
"""Copy src to dest on dest_node
:param src: Path to the file or directory we want to copy
:param dest: The destination path
:param dest_node: The node to which we want to copy the file/directory
Note that if src is a directory, this will automatically copy recursively.
"""
temp_dir = tempfile.mkdtemp()
try:
path_basename = src
if path_basename.endswith(os.path.sep):
path_basename = path_basename[:-len(os.path.sep)]
path_basename = os.path.basename(path_basename)
local_dest = os.path.join(temp_dir, path_basename)
if os.path.isdir(local_dest):
local_dest = self._re_anchor_basename(src, local_dest)
if self.isfile(src):
self.sftp_client.get(src, local_dest)
elif self.isdir(src):
os.mkdir(local_dest)
for obj in self.sftp_client.listdir(src):
obj_path = os.path.join(src, obj)
if self.isfile(obj_path) or self.isdir(obj_path):
self.copy_from(obj_path, local_dest)
else:
pass
dest_node.account.copy_to(local_dest, dest)
finally:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
|
ducktape
|
positive
|
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def check_for_value() -> bool:
<DeepExtract>
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def check_for_value() -> Optional[int]:
try:
sched_metrics = get_scheduler_metrics(service_name)
if 'counters' not in sched_metrics:
log.info('No counters present for service {}. Types were: {}'.format(service_name, sched_metrics.keys()))
value = None
sched_counters = sched_metrics['counters']
if counter_name not in sched_counters:
log.info("No counter named '{}' was found for service {}. Counters were: {}".format(counter_name, service_name, sched_counters.keys()))
value = None
value = sched_counters[counter_name]['count']
assert isinstance(value, int)
log.info('{} metric counter: {}={}'.format(service_name, counter_name, value))
value = value
except Exception as e:
log.error('Caught exception trying to get metrics: {}'.format(e))
value = None
value = int(check_for_value())
</DeepExtract>
return value >= min_value
|
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def check_for_value() -> bool:
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def check_for_value() -> Optional[int]:
try:
sched_metrics = get_scheduler_metrics(service_name)
if 'counters' not in sched_metrics:
log.info('No counters present for service {}. Types were: {}'.format(service_name, sched_metrics.keys()))
value = None
sched_counters = sched_metrics['counters']
if counter_name not in sched_counters:
log.info("No counter named '{}' was found for service {}. Counters were: {}".format(counter_name, service_name, sched_counters.keys()))
value = None
value = sched_counters[counter_name]['count']
assert isinstance(value, int)
log.info('{} metric counter: {}={}'.format(service_name, counter_name, value))
value = value
except Exception as e:
log.error('Caught exception trying to get metrics: {}'.format(e))
value = None
value = int(check_for_value())
return value >= min_value
|
dcos-kafka-service
|
positive
|
def write_start_info(self):
try:
ncbi_code_int = int(self.config_params['ncbi_code'])
except:
ncbi_code_int = 0
<DeepExtract>
if self.__session is None:
self.__session = cm2db.make_session_from_config(self.config_params)
session = self.__session
</DeepExtract>
session.add(cm2db.RunInfo(start_time=datetime.now(), num_iterations=self.config_params['num_iterations'], organism=self.organism().code, species=self.organism().species(), ncbi_code=ncbi_code_int, num_rows=self.ratios.num_rows, num_columns=self.ratios.num_columns, num_clusters=self.config_params['num_clusters'], git_sha='$Id: 7556c82379b23b338e1b21756f90d288c373e25c $'))
session.commit()
|
def write_start_info(self):
try:
ncbi_code_int = int(self.config_params['ncbi_code'])
except:
ncbi_code_int = 0
if self.__session is None:
self.__session = cm2db.make_session_from_config(self.config_params)
session = self.__session
session.add(cm2db.RunInfo(start_time=datetime.now(), num_iterations=self.config_params['num_iterations'], organism=self.organism().code, species=self.organism().species(), ncbi_code=ncbi_code_int, num_rows=self.ratios.num_rows, num_columns=self.ratios.num_columns, num_clusters=self.config_params['num_clusters'], git_sha='$Id: 7556c82379b23b338e1b21756f90d288c373e25c $'))
session.commit()
|
cmonkey2
|
positive
|
def forward(self, pred, target, label_weight, *args, **kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
Returns:
The gradient harmonized loss.
"""
if pred.dim() != target.dim():
<DeepExtract>
bin_labels = target.new_full((target.size(0), pred.size(-1)), 0)
inds = torch.nonzero(target >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, target[inds] - 1] = 1
bin_label_weights = label_weight.view(-1, 1).expand(label_weight.size(0), pred.size(-1))
(target, label_weight) = (bin_labels, bin_label_weights)
</DeepExtract>
(target, label_weight) = (target.float(), label_weight.float())
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot
return loss * self.loss_weight
|
def forward(self, pred, target, label_weight, *args, **kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
Returns:
The gradient harmonized loss.
"""
if pred.dim() != target.dim():
bin_labels = target.new_full((target.size(0), pred.size(-1)), 0)
inds = torch.nonzero(target >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, target[inds] - 1] = 1
bin_label_weights = label_weight.view(-1, 1).expand(label_weight.size(0), pred.size(-1))
(target, label_weight) = (bin_labels, bin_label_weights)
(target, label_weight) = (target.float(), label_weight.float())
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot
return loss * self.loss_weight
|
Cascade-RPN
|
positive
|
def _block_cfgs_to_list(block_cfgs):
assert isinstance(block_cfgs, list)
ret = []
for (stage_idx, stage) in enumerate(block_cfgs):
<DeepExtract>
assert isinstance(stage, list)
ret = []
for x in stage:
ret += _expand_block_cfg(x)
stage = ret
</DeepExtract>
for (block_idx, block) in enumerate(stage):
cur = {'stage_idx': stage_idx, 'block_idx': block_idx, 'block': block}
ret.append(cur)
return ret
|
def _block_cfgs_to_list(block_cfgs):
assert isinstance(block_cfgs, list)
ret = []
for (stage_idx, stage) in enumerate(block_cfgs):
assert isinstance(stage, list)
ret = []
for x in stage:
ret += _expand_block_cfg(x)
stage = ret
for (block_idx, block) in enumerate(stage):
cur = {'stage_idx': stage_idx, 'block_idx': block_idx, 'block': block}
ret.append(cur)
return ret
|
EmbedMask
|
positive
|
def get_labels(self):
bug_fix_times = []
for bug in bugzilla.get_bugs():
fix_time = bug_features.get_time_to_fix(bug)
if fix_time is None:
continue
bug_fix_times.append((bug['id'], fix_time))
def _quantiles(n):
return statistics.quantiles((fix_time for (bug_id, fix_time) in bug_fix_times), n=n)
<DeepExtract>
quantiles = statistics.quantiles((fix_time for (bug_id, fix_time) in bug_fix_times), n=2)
</DeepExtract>
logger.info('Max fix time: %s', max((fix_time for (bug_id, fix_time) in bug_fix_times)))
logger.info('Fix time quantiles: %s', quantiles)
logger.info('Fix time quartiles: %s', _quantiles(4))
logger.info('Fix time deciles: %s', _quantiles(10))
classes = {}
for (bug_id, fix_time) in bug_fix_times:
for (i, quantile) in enumerate(quantiles):
if fix_time <= quantile:
classes[bug_id] = i
break
if bug_id not in classes:
classes[bug_id] = i + 1
for i in range(len(quantiles) + 1):
logger.info('%d bugs are in the %dth quantile', sum((1 for label in classes.values() if label == i)), i)
return (classes, list(range(len(quantiles) + 1)))
|
def get_labels(self):
bug_fix_times = []
for bug in bugzilla.get_bugs():
fix_time = bug_features.get_time_to_fix(bug)
if fix_time is None:
continue
bug_fix_times.append((bug['id'], fix_time))
def _quantiles(n):
return statistics.quantiles((fix_time for (bug_id, fix_time) in bug_fix_times), n=n)
quantiles = statistics.quantiles((fix_time for (bug_id, fix_time) in bug_fix_times), n=2)
logger.info('Max fix time: %s', max((fix_time for (bug_id, fix_time) in bug_fix_times)))
logger.info('Fix time quantiles: %s', quantiles)
logger.info('Fix time quartiles: %s', _quantiles(4))
logger.info('Fix time deciles: %s', _quantiles(10))
classes = {}
for (bug_id, fix_time) in bug_fix_times:
for (i, quantile) in enumerate(quantiles):
if fix_time <= quantile:
classes[bug_id] = i
break
if bug_id not in classes:
classes[bug_id] = i + 1
for i in range(len(quantiles) + 1):
logger.info('%d bugs are in the %dth quantile', sum((1 for label in classes.values() if label == i)), i)
return (classes, list(range(len(quantiles) + 1)))
|
bugbug
|
positive
|
def length_of_workspace_object_depiction(self, o, description_structures):
result = len(str(o))
if o.descriptions:
result += 2
result += 2 * (len(o.descriptions) - 1)
for d in o.descriptions:
<DeepExtract>
if d.descriptor.activation == 100:
(s, _) = (d.descriptor.name.upper(), curses.A_STANDOUT)
if d.descriptor.activation > 50:
(s, _) = (d.descriptor.name.upper(), curses.A_BOLD)
else:
(s, _) = (d.descriptor.name.lower(), curses.A_NORMAL)
</DeepExtract>
result += len(s)
if d not in description_structures:
result += 2
result += 1
return result
|
def length_of_workspace_object_depiction(self, o, description_structures):
result = len(str(o))
if o.descriptions:
result += 2
result += 2 * (len(o.descriptions) - 1)
for d in o.descriptions:
if d.descriptor.activation == 100:
(s, _) = (d.descriptor.name.upper(), curses.A_STANDOUT)
if d.descriptor.activation > 50:
(s, _) = (d.descriptor.name.upper(), curses.A_BOLD)
else:
(s, _) = (d.descriptor.name.lower(), curses.A_NORMAL)
result += len(s)
if d not in description_structures:
result += 2
result += 1
return result
|
copycat
|
positive
|
def trading_loop(self, caller):
"""
Main loop that runs based on caller.
:param caller: Caller object that determines which bot is running.
"""
lower_trend = None
running_loop = self.gui.running_live if caller == LIVE else self.gui.simulation_running_live
trader: SimulationTrader = self.gui.get_trader(caller=caller)
while running_loop:
trader.completed_loop = False
<DeepExtract>
trader = self.gui.get_trader(caller)
if not trader.data_view.data_is_updated():
trader.data_view.update_data()
</DeepExtract>
<DeepExtract>
if self.gui.advanced_logging:
self.gui.get_trader(caller).output_basic_information()
</DeepExtract>
<DeepExtract>
trader: SimulationTrader = self.gui.get_trader(caller)
trader.data_view.get_current_data()
trader.current_price = trader.data_view.current_values['close']
trader.handle_trailing_prices()
</DeepExtract>
<DeepExtract>
trader = self.gui.get_trader(caller)
trader.main_logic(log_data=self.gui.advanced_logging)
</DeepExtract>
<DeepExtract>
if self.next_scheduled_event and datetime.now() >= self.next_scheduled_event:
self.gui.telegram_bot.send_statistics_telegram(self.telegram_chat_id, self.schedule_period, self.trader)
self.next_scheduled_event = datetime.now() + timedelta(seconds=self.schedule_seconds)
</DeepExtract>
<DeepExtract>
if self.lower_interval_notification:
trader: SimulationTrader = self.gui.get_trader(caller)
lower_data = self.gui.get_lower_interval_data(caller)
lower_data.get_current_data()
lower_trend = trader.get_trend(dataObject=lower_data, log_data=self.gui.advanced_logging, in_lower_interval=True)
self.lower_trend = str(lower_trend)
if lower_trend != lower_trend:
message = f'{self.lower_trend.capitalize()} trend detected on lower interval data.'
self.signals.activity.emit(caller, message)
if self.gui.configuration.enableTelegramNotification.isChecked() and caller == LIVE:
self.gui.telegram_bot.send_message(message=message, chat_id=self.telegram_chat_id)
lower_trend = lower_trend
</DeepExtract>
<DeepExtract>
trader: SimulationTrader = self.trader
net = trader.get_net()
profit = trader.get_profit()
self.percentage = trader.get_profit_percentage(trader.starting_balance, net)
self.elapsed = get_elapsed_time(self.starting_time)
self.set_daily_percentages(trader=trader, net=net)
grouped_dict = trader.get_grouped_statistics()
grouped_dict['general']['net'] = f'${round(net, 2)}'
grouped_dict['general']['profit'] = f'${round(profit, 2)}'
grouped_dict['general']['elapsed'] = self.elapsed
grouped_dict['general']['totalPercentage'] = f'{round(self.percentage, 2)}%'
grouped_dict['general']['dailyPercentage'] = f'{round(self.daily_percentage, 2)}%'
grouped_dict['general']['lowerTrend'] = self.lower_trend
value_dict = {'profitLossLabel': trader.get_profit_or_loss_string(profit=profit), 'profitLossValue': f'${abs(round(profit, 2))}', 'percentageValue': f'{round(self.percentage, 2)}%', 'netValue': f'${round(net, 2)}', 'tickerValue': f'${trader.current_price}', 'tickerLabel': trader.symbol, 'currentPositionValue': trader.get_position_string(), 'net': net, 'price': trader.current_price}
(value_dict, grouped_dict) = (value_dict, grouped_dict)
</DeepExtract>
self.signals.updated.emit(caller, value_dict, grouped_dict)
running_loop = self.gui.running_live if caller == LIVE else self.gui.simulation_running_live
self.fail_count = 0
trader.completed_loop = True
|
def trading_loop(self, caller):
"""
Main loop that runs based on caller.
:param caller: Caller object that determines which bot is running.
"""
lower_trend = None
running_loop = self.gui.running_live if caller == LIVE else self.gui.simulation_running_live
trader: SimulationTrader = self.gui.get_trader(caller=caller)
while running_loop:
trader.completed_loop = False
trader = self.gui.get_trader(caller)
if not trader.data_view.data_is_updated():
trader.data_view.update_data()
if self.gui.advanced_logging:
self.gui.get_trader(caller).output_basic_information()
trader: SimulationTrader = self.gui.get_trader(caller)
trader.data_view.get_current_data()
trader.current_price = trader.data_view.current_values['close']
trader.handle_trailing_prices()
trader = self.gui.get_trader(caller)
trader.main_logic(log_data=self.gui.advanced_logging)
if self.next_scheduled_event and datetime.now() >= self.next_scheduled_event:
self.gui.telegram_bot.send_statistics_telegram(self.telegram_chat_id, self.schedule_period, self.trader)
self.next_scheduled_event = datetime.now() + timedelta(seconds=self.schedule_seconds)
if self.lower_interval_notification:
trader: SimulationTrader = self.gui.get_trader(caller)
lower_data = self.gui.get_lower_interval_data(caller)
lower_data.get_current_data()
lower_trend = trader.get_trend(dataObject=lower_data, log_data=self.gui.advanced_logging, in_lower_interval=True)
self.lower_trend = str(lower_trend)
if lower_trend != lower_trend:
message = f'{self.lower_trend.capitalize()} trend detected on lower interval data.'
self.signals.activity.emit(caller, message)
if self.gui.configuration.enableTelegramNotification.isChecked() and caller == LIVE:
self.gui.telegram_bot.send_message(message=message, chat_id=self.telegram_chat_id)
lower_trend = lower_trend
trader: SimulationTrader = self.trader
net = trader.get_net()
profit = trader.get_profit()
self.percentage = trader.get_profit_percentage(trader.starting_balance, net)
self.elapsed = get_elapsed_time(self.starting_time)
self.set_daily_percentages(trader=trader, net=net)
grouped_dict = trader.get_grouped_statistics()
grouped_dict['general']['net'] = f'${round(net, 2)}'
grouped_dict['general']['profit'] = f'${round(profit, 2)}'
grouped_dict['general']['elapsed'] = self.elapsed
grouped_dict['general']['totalPercentage'] = f'{round(self.percentage, 2)}%'
grouped_dict['general']['dailyPercentage'] = f'{round(self.daily_percentage, 2)}%'
grouped_dict['general']['lowerTrend'] = self.lower_trend
value_dict = {'profitLossLabel': trader.get_profit_or_loss_string(profit=profit), 'profitLossValue': f'${abs(round(profit, 2))}', 'percentageValue': f'{round(self.percentage, 2)}%', 'netValue': f'${round(net, 2)}', 'tickerValue': f'${trader.current_price}', 'tickerLabel': trader.symbol, 'currentPositionValue': trader.get_position_string(), 'net': net, 'price': trader.current_price}
(value_dict, grouped_dict) = (value_dict, grouped_dict)
self.signals.updated.emit(caller, value_dict, grouped_dict)
running_loop = self.gui.running_live if caller == LIVE else self.gui.simulation_running_live
self.fail_count = 0
trader.completed_loop = True
|
algobot
|
positive
|
def get_marginal_prob(X, mask=None):
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
<DeepExtract>
self.left_boundary = K.expand_dims(K.expand_dims(self.left_boundary, 0), 0)
self.right_boundary = K.expand_dims(K.expand_dims(self.right_boundary, 0), 0)
if mask is None:
input_energy = K.concatenate([input_energy[:, :1, :] + self.left_boundary, input_energy[:, 1:, :]], axis=1)
input_energy = K.concatenate([input_energy[:, :-1, :], input_energy[:, -1:, :] + self.right_boundary], axis=1)
else:
mask = K.expand_dims(K.cast(mask, K.floatx()))
start_mask = K.cast(K.greater(mask, self.shift_right(mask)), K.floatx())
end_mask = K.cast(K.greater(self.shift_left(mask), mask), K.floatx())
input_energy = input_energy + start_mask * self.left_boundary
input_energy = input_energy + end_mask * self.right_boundary
input_energy = input_energy
</DeepExtract>
input_length = K.int_shape(X)[1]
<DeepExtract>
alpha = self.recursion(input_energy, **kwargs)
</DeepExtract>
<DeepExtract>
beta = self.recursion(input_energy, go_backwards=True, **kwargs)
</DeepExtract>
if mask is not None:
input_energy = input_energy * K.expand_dims(K.cast(mask, K.floatx()))
margin = -(self.shift_right(alpha) + input_energy + self.shift_left(beta))
return self.softmaxNd(margin)
|
def get_marginal_prob(X, mask=None):
input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
if self.use_boundary:
self.left_boundary = K.expand_dims(K.expand_dims(self.left_boundary, 0), 0)
self.right_boundary = K.expand_dims(K.expand_dims(self.right_boundary, 0), 0)
if mask is None:
input_energy = K.concatenate([input_energy[:, :1, :] + self.left_boundary, input_energy[:, 1:, :]], axis=1)
input_energy = K.concatenate([input_energy[:, :-1, :], input_energy[:, -1:, :] + self.right_boundary], axis=1)
else:
mask = K.expand_dims(K.cast(mask, K.floatx()))
start_mask = K.cast(K.greater(mask, self.shift_right(mask)), K.floatx())
end_mask = K.cast(K.greater(self.shift_left(mask), mask), K.floatx())
input_energy = input_energy + start_mask * self.left_boundary
input_energy = input_energy + end_mask * self.right_boundary
input_energy = input_energy
input_length = K.int_shape(X)[1]
alpha = self.recursion(input_energy, **kwargs)
beta = self.recursion(input_energy, go_backwards=True, **kwargs)
if mask is not None:
input_energy = input_energy * K.expand_dims(K.cast(mask, K.floatx()))
margin = -(self.shift_right(alpha) + input_energy + self.shift_left(beta))
return self.softmaxNd(margin)
|
Chinese-clinical-NER
|
positive
|
def load_fb13(check_md5hash=False, clean_unseen=True, add_reciprocal_rels=False):
"""Load the Freebase13 (FB13) dataset.
FB13 is a subset of Freebase :cite:`bollacker2008freebase`
and was initially presented in
`Reasoning With Neural Tensor Networks for Knowledge Base Completion` :cite:`socher2013reasoning`.
.. note::
FB13 also provide true and negative labels for the triples in the validation and tests sets.
The positive base rate is close to 50%.
FB13 dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
If ``AMPLIGRAPH_DATA_HOME`` is not set, the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location, it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
This dataset is divided in three splits:
- `train`: 316232 triples
- `valid`: 11816 triples
- `test`: 47464 triples
Both the validation and test splits are associated with labels (binary ndarrays),
with `True` for positive statements and `False` for negatives:
- `valid_labels`
- `test_labels`
========= ========= ========== ========== ======== ======== ============ ===========
Dataset Train Valid Pos Valid Neg Test Pos Test Neg Entities Relations
========= ========= ========== ========== ======== ======== ============ ===========
FB13 316232 5908 5908 23733 23731 75043 13
========= ========= ========== ========== ======== ======== ============ ===========
Parameters
----------
check_md5hash: bool
If `True` check the md5hash of the files (default: `False`).
clean_unseen: bool
If `True`, filters triples in validation and test sets that include entities not present in the training set.
add_reciprocal_rels: bool
Flag which specifies whether to add reciprocal relations. For every <s, p, o> in the dataset
this creates a corresponding triple with reciprocal relation <o, p_reciprocal, s> (default: False).
Returns
-------
splits: dict
The dataset splits: {'train': train, 'valid': valid, 'valid_labels': valid_labels,
'test': test, 'test_labels': test_labels}.
Each split containing a dataset is a ndarray of shape (n, 3).
The labels are ndarray of shape (n).
Example
-------
>>> from ampligraph.datasets import load_fb13
>>> X = load_fb13()
>>> X["valid"][0]
array(['cornelie_van_zanten', 'gender', 'female'], dtype=object)
>>> X["valid_labels"][0:3]
array([True, False, True], dtype=object)
"""
fb13 = DatasetMetadata(dataset_name='freebase13', filename='freebase13.zip', url='https://s3-eu-west-1.amazonaws.com/ampligraph/datasets/freebase13.zip', train_name='train.txt', valid_name='dev.txt', test_name='test.txt', train_checksum='9099ebcd85ab3ce723cfaaf34f74dceb', valid_checksum='c4ef7b244baa436a97c2a5e57d4ba7ed', test_checksum='f9af2eac7c5a86996c909bdffd295528')
<DeepExtract>
dataset = {}
if fb13.dataset_name is None:
if fb13.url is None:
raise ValueError('The dataset name or url must be provided to load a dataset.')
fb13.dataset_name = fb13.url[fb13.url.rfind('/') + 1:fb13.url.rfind('.')]
dataset_path = _fetch_dataset(fb13, None, check_md5hash)
train = load_from_csv(dataset_path, fb13.train_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['train'] = train
valid = load_from_csv(dataset_path, fb13.valid_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['valid'] = valid
test = load_from_csv(dataset_path, fb13.test_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['test'] = test
if fb13.valid_negatives_name is not None:
valid_negatives = load_from_csv(dataset_path, fb13.valid_negatives_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['valid_negatives'] = valid_negatives
if fb13.test_negatives_name is not None:
test_negatives = load_from_csv(dataset_path, fb13.test_negatives_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['test_negatives'] = test_negatives
if fb13.test_human_checksum is not None and fb13.test_human_ids_checksum is not None:
test_human = load_from_csv(dataset_path, fb13.test_human_name)
dataset['test-human'] = test_human
test_human_ids = load_from_csv(dataset_path, fb13.test_human_ids_name)
dataset['test-human-ids'] = test_human_ids
if fb13.mapper_checksum is not None:
mapper = load_mapper_from_json(dataset_path, fb13.mapper_name)
dataset['mapper'] = mapper
dataset = dataset
</DeepExtract>
valid_labels = dataset['valid'][:, 3]
test_labels = dataset['test'][:, 3]
dataset['valid'] = dataset['valid'][:, 0:3]
dataset['test'] = dataset['test'][:, 0:3]
dataset['valid_labels'] = valid_labels == '1'
dataset['test_labels'] = test_labels == '1'
if clean_unseen:
<DeepExtract>
filtered_X = {}
train = pd.DataFrame(dataset['train'][:, :3], columns=['s', 'p', 'o'])
filtered_X['train'] = dataset['train']
valid = pd.DataFrame(dataset['valid'][:, :3], columns=['s', 'p', 'o'])
test = pd.DataFrame(dataset['test'][:, :3], columns=['s', 'p', 'o'])
train_ent = np.unique(np.concatenate((train.s, train.o)))
train_rel = train.p.unique()
if 'valid_negatives' in dataset:
valid_negatives = pd.DataFrame(dataset['valid_negatives'][:, :3], columns=['s', 'p', 'o'])
valid_negatives_idx = valid_negatives.s.isin(train_ent) & valid_negatives.o.isin(train_ent) & valid_negatives.p.isin(train_rel)
filtered_valid_negatives = valid_negatives[valid_negatives_idx].values
filtered_X['valid_negatives'] = filtered_valid_negatives
if 'test_negatives' in dataset:
test_negatives = pd.DataFrame(dataset['test_negatives'][:, :3], columns=['s', 'p', 'o'])
test_negatives_idx = test_negatives.s.isin(train_ent) & test_negatives.o.isin(train_ent) & test_negatives.p.isin(train_rel)
filtered_test_negatives = test[test_negatives_idx].values
filtered_X['test_negatives'] = filtered_test_negatives
valid_idx = valid.s.isin(train_ent) & valid.o.isin(train_ent) & valid.p.isin(train_rel)
test_idx = test.s.isin(train_ent) & test.o.isin(train_ent) & test.p.isin(train_rel)
filtered_valid = dataset['valid'][valid_idx]
filtered_test = dataset['test'][test_idx]
filtered_X['valid'] = filtered_valid
filtered_X['test'] = filtered_test
if 'mapper' in dataset:
filtered_X['mapper'] = dataset['mapper']
if 'test-human' in dataset and 'test-human-ids' in dataset:
filtered_X['test-human'] = dataset['test-human']
filtered_X['test-human-ids'] = dataset['test-human-ids']
if True:
(clean_dataset, valid_idx, test_idx) = (filtered_X, valid_idx, test_idx)
else:
(clean_dataset, valid_idx, test_idx) = filtered_X
</DeepExtract>
clean_dataset['valid_labels'] = dataset['valid_labels'][valid_idx]
clean_dataset['test_labels'] = dataset['test_labels'][test_idx]
return clean_dataset
else:
return dataset
|
def load_fb13(check_md5hash=False, clean_unseen=True, add_reciprocal_rels=False):
"""Load the Freebase13 (FB13) dataset.
FB13 is a subset of Freebase :cite:`bollacker2008freebase`
and was initially presented in
`Reasoning With Neural Tensor Networks for Knowledge Base Completion` :cite:`socher2013reasoning`.
.. note::
FB13 also provide true and negative labels for the triples in the validation and tests sets.
The positive base rate is close to 50%.
FB13 dataset is loaded from file if it exists at the ``AMPLIGRAPH_DATA_HOME`` location.
If ``AMPLIGRAPH_DATA_HOME`` is not set, the default ``~/ampligraph_datasets`` is checked.
If the dataset is not found at either location, it is downloaded and placed in ``AMPLIGRAPH_DATA_HOME``
or ``~/ampligraph_datasets``.
This dataset is divided in three splits:
- `train`: 316232 triples
- `valid`: 11816 triples
- `test`: 47464 triples
Both the validation and test splits are associated with labels (binary ndarrays),
with `True` for positive statements and `False` for negatives:
- `valid_labels`
- `test_labels`
========= ========= ========== ========== ======== ======== ============ ===========
Dataset Train Valid Pos Valid Neg Test Pos Test Neg Entities Relations
========= ========= ========== ========== ======== ======== ============ ===========
FB13 316232 5908 5908 23733 23731 75043 13
========= ========= ========== ========== ======== ======== ============ ===========
Parameters
----------
check_md5hash: bool
If `True` check the md5hash of the files (default: `False`).
clean_unseen: bool
If `True`, filters triples in validation and test sets that include entities not present in the training set.
add_reciprocal_rels: bool
Flag which specifies whether to add reciprocal relations. For every <s, p, o> in the dataset
this creates a corresponding triple with reciprocal relation <o, p_reciprocal, s> (default: False).
Returns
-------
splits: dict
The dataset splits: {'train': train, 'valid': valid, 'valid_labels': valid_labels,
'test': test, 'test_labels': test_labels}.
Each split containing a dataset is a ndarray of shape (n, 3).
The labels are ndarray of shape (n).
Example
-------
>>> from ampligraph.datasets import load_fb13
>>> X = load_fb13()
>>> X["valid"][0]
array(['cornelie_van_zanten', 'gender', 'female'], dtype=object)
>>> X["valid_labels"][0:3]
array([True, False, True], dtype=object)
"""
fb13 = DatasetMetadata(dataset_name='freebase13', filename='freebase13.zip', url='https://s3-eu-west-1.amazonaws.com/ampligraph/datasets/freebase13.zip', train_name='train.txt', valid_name='dev.txt', test_name='test.txt', train_checksum='9099ebcd85ab3ce723cfaaf34f74dceb', valid_checksum='c4ef7b244baa436a97c2a5e57d4ba7ed', test_checksum='f9af2eac7c5a86996c909bdffd295528')
dataset = {}
if fb13.dataset_name is None:
if fb13.url is None:
raise ValueError('The dataset name or url must be provided to load a dataset.')
fb13.dataset_name = fb13.url[fb13.url.rfind('/') + 1:fb13.url.rfind('.')]
dataset_path = _fetch_dataset(fb13, None, check_md5hash)
train = load_from_csv(dataset_path, fb13.train_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['train'] = train
valid = load_from_csv(dataset_path, fb13.valid_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['valid'] = valid
test = load_from_csv(dataset_path, fb13.test_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['test'] = test
if fb13.valid_negatives_name is not None:
valid_negatives = load_from_csv(dataset_path, fb13.valid_negatives_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['valid_negatives'] = valid_negatives
if fb13.test_negatives_name is not None:
test_negatives = load_from_csv(dataset_path, fb13.test_negatives_name, add_reciprocal_rels=add_reciprocal_rels)
dataset['test_negatives'] = test_negatives
if fb13.test_human_checksum is not None and fb13.test_human_ids_checksum is not None:
test_human = load_from_csv(dataset_path, fb13.test_human_name)
dataset['test-human'] = test_human
test_human_ids = load_from_csv(dataset_path, fb13.test_human_ids_name)
dataset['test-human-ids'] = test_human_ids
if fb13.mapper_checksum is not None:
mapper = load_mapper_from_json(dataset_path, fb13.mapper_name)
dataset['mapper'] = mapper
dataset = dataset
valid_labels = dataset['valid'][:, 3]
test_labels = dataset['test'][:, 3]
dataset['valid'] = dataset['valid'][:, 0:3]
dataset['test'] = dataset['test'][:, 0:3]
dataset['valid_labels'] = valid_labels == '1'
dataset['test_labels'] = test_labels == '1'
if clean_unseen:
filtered_X = {}
train = pd.DataFrame(dataset['train'][:, :3], columns=['s', 'p', 'o'])
filtered_X['train'] = dataset['train']
valid = pd.DataFrame(dataset['valid'][:, :3], columns=['s', 'p', 'o'])
test = pd.DataFrame(dataset['test'][:, :3], columns=['s', 'p', 'o'])
train_ent = np.unique(np.concatenate((train.s, train.o)))
train_rel = train.p.unique()
if 'valid_negatives' in dataset:
valid_negatives = pd.DataFrame(dataset['valid_negatives'][:, :3], columns=['s', 'p', 'o'])
valid_negatives_idx = valid_negatives.s.isin(train_ent) & valid_negatives.o.isin(train_ent) & valid_negatives.p.isin(train_rel)
filtered_valid_negatives = valid_negatives[valid_negatives_idx].values
filtered_X['valid_negatives'] = filtered_valid_negatives
if 'test_negatives' in dataset:
test_negatives = pd.DataFrame(dataset['test_negatives'][:, :3], columns=['s', 'p', 'o'])
test_negatives_idx = test_negatives.s.isin(train_ent) & test_negatives.o.isin(train_ent) & test_negatives.p.isin(train_rel)
filtered_test_negatives = test[test_negatives_idx].values
filtered_X['test_negatives'] = filtered_test_negatives
valid_idx = valid.s.isin(train_ent) & valid.o.isin(train_ent) & valid.p.isin(train_rel)
test_idx = test.s.isin(train_ent) & test.o.isin(train_ent) & test.p.isin(train_rel)
filtered_valid = dataset['valid'][valid_idx]
filtered_test = dataset['test'][test_idx]
filtered_X['valid'] = filtered_valid
filtered_X['test'] = filtered_test
if 'mapper' in dataset:
filtered_X['mapper'] = dataset['mapper']
if 'test-human' in dataset and 'test-human-ids' in dataset:
filtered_X['test-human'] = dataset['test-human']
filtered_X['test-human-ids'] = dataset['test-human-ids']
if True:
(clean_dataset, valid_idx, test_idx) = (filtered_X, valid_idx, test_idx)
else:
(clean_dataset, valid_idx, test_idx) = filtered_X
clean_dataset['valid_labels'] = dataset['valid_labels'][valid_idx]
clean_dataset['test_labels'] = dataset['test_labels'][test_idx]
return clean_dataset
else:
return dataset
|
AmpliGraph
|
positive
|
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({'table_items': self.get_table_items()})
<DeepExtract>
if 'js_data' in context:
context['page_data'] = json.dumps(context['js_data'], cls=LazyEncoder)
context['page_data'] = None
</DeepExtract>
overview_data = []
if self.course_api_enabled:
if switch_is_active('display_course_name_in_nav'):
overview_data.append((_('Course ID'), self.course_id))
else:
overview_data.append((_('Course Name'), self.course_info.get('name')))
def parse_course_date(date_str):
return datetime.strptime(date_str, CourseStructureApiClient.DATETIME_FORMAT) if date_str else None
def format_date(date):
return dateformat.format(date, settings.DATE_FORMAT) if date else '--'
<DeepExtract>
start_date = datetime.strptime(self.course_info.get('start'), CourseStructureApiClient.DATETIME_FORMAT) if self.course_info.get('start') else None
</DeepExtract>
<DeepExtract>
end_date = datetime.strptime(self.course_info.get('end'), CourseStructureApiClient.DATETIME_FORMAT) if self.course_info.get('end') else None
</DeepExtract>
todays_date = datetime.now()
status_str = '--'
if start_date:
if todays_date >= start_date:
in_progress = end_date is None or end_date > todays_date
status_str = _('In Progress') if in_progress else _('Ended')
else:
status_str = _('Not Started Yet')
overview_data += [(_('Start Date'), format_date(start_date)), (_('End Date'), format_date(end_date)), (_('Status'), status_str)]
context['course_overview'] = overview_data
external_tools = []
if settings.LMS_COURSE_SHORTCUT_BASE_URL:
external_tools.append({'title': ugettext_noop('Instructor Dashboard'), 'url': f'{settings.LMS_COURSE_SHORTCUT_BASE_URL}/{self.course_id}/instructor', 'icon': 'fa-dashboard'})
external_tools.append({'title': ugettext_noop('Courseware'), 'url': f'{settings.LMS_COURSE_SHORTCUT_BASE_URL}/{self.course_id}/courseware', 'icon': 'fa-pencil-square-o'})
if settings.CMS_COURSE_SHORTCUT_BASE_URL:
external_tools.append({'title': 'Studio', 'translated_title': 'Studio', 'url': f'{settings.CMS_COURSE_SHORTCUT_BASE_URL}/{self.course_id}', 'icon': 'fa-sliders'})
translate_dict_values(external_tools, ('title',))
context['external_course_tools'] = external_tools
return context
|
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({'table_items': self.get_table_items()})
if 'js_data' in context:
context['page_data'] = json.dumps(context['js_data'], cls=LazyEncoder)
context['page_data'] = None
overview_data = []
if self.course_api_enabled:
if switch_is_active('display_course_name_in_nav'):
overview_data.append((_('Course ID'), self.course_id))
else:
overview_data.append((_('Course Name'), self.course_info.get('name')))
def parse_course_date(date_str):
return datetime.strptime(date_str, CourseStructureApiClient.DATETIME_FORMAT) if date_str else None
def format_date(date):
return dateformat.format(date, settings.DATE_FORMAT) if date else '--'
start_date = datetime.strptime(self.course_info.get('start'), CourseStructureApiClient.DATETIME_FORMAT) if self.course_info.get('start') else None
end_date = datetime.strptime(self.course_info.get('end'), CourseStructureApiClient.DATETIME_FORMAT) if self.course_info.get('end') else None
todays_date = datetime.now()
status_str = '--'
if start_date:
if todays_date >= start_date:
in_progress = end_date is None or end_date > todays_date
status_str = _('In Progress') if in_progress else _('Ended')
else:
status_str = _('Not Started Yet')
overview_data += [(_('Start Date'), format_date(start_date)), (_('End Date'), format_date(end_date)), (_('Status'), status_str)]
context['course_overview'] = overview_data
external_tools = []
if settings.LMS_COURSE_SHORTCUT_BASE_URL:
external_tools.append({'title': ugettext_noop('Instructor Dashboard'), 'url': f'{settings.LMS_COURSE_SHORTCUT_BASE_URL}/{self.course_id}/instructor', 'icon': 'fa-dashboard'})
external_tools.append({'title': ugettext_noop('Courseware'), 'url': f'{settings.LMS_COURSE_SHORTCUT_BASE_URL}/{self.course_id}/courseware', 'icon': 'fa-pencil-square-o'})
if settings.CMS_COURSE_SHORTCUT_BASE_URL:
external_tools.append({'title': 'Studio', 'translated_title': 'Studio', 'url': f'{settings.CMS_COURSE_SHORTCUT_BASE_URL}/{self.course_id}', 'icon': 'fa-sliders'})
translate_dict_values(external_tools, ('title',))
context['external_course_tools'] = external_tools
return context
|
edx-analytics-dashboard
|
positive
|
def _InstallProvisioningProfile(profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
<DeepExtract>
profiles_dir = os.path.join(os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
(print >> sys.stderr, 'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get('Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
(print >> sys.stderr, 'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
(source_path, provisioning_data, team_id) = valid_provisioning_profiles[selected_key]
</DeepExtract>
target_path = os.path.join(os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
<DeepExtract>
substitutions = {'CFBundleIdentifier': bundle_identifier, 'AppIdentifierPrefix': team_id + '.'}
</DeepExtract>
return (substitutions, provisioning_data['Entitlements'])
|
def _InstallProvisioningProfile(profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
profiles_dir = os.path.join(os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
(print >> sys.stderr, 'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get('Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
(print >> sys.stderr, 'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
(source_path, provisioning_data, team_id) = valid_provisioning_profiles[selected_key]
target_path = os.path.join(os.environ['BUILT_PRODUCTS_DIR'], os.environ['CONTENTS_FOLDER_PATH'], 'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = {'CFBundleIdentifier': bundle_identifier, 'AppIdentifierPrefix': team_id + '.'}
return (substitutions, provisioning_data['Entitlements'])
|
archived-pangyp
|
positive
|
def test_shift_range(self):
for endian in ('little', 'big'):
for direction in ('left', 'right'):
for n in range(0, 200):
<DeepExtract>
a = urandom(n, endian)
self.assertEqual(len(a), n)
b = a.copy()
if direction == 'left':
b <<= 1
else:
b >>= 1
self.assertEQUAL(b, self.shift(a, 1, direction))
</DeepExtract>
<DeepExtract>
a = urandom(n, endian)
self.assertEqual(len(a), n)
b = a.copy()
if direction == 'left':
b <<= randint(0, n)
else:
b >>= randint(0, n)
self.assertEQUAL(b, self.shift(a, randint(0, n), direction))
</DeepExtract>
for n_shift in range(0, 100):
<DeepExtract>
a = urandom(100, endian)
self.assertEqual(len(a), 100)
b = a.copy()
if direction == 'left':
b <<= n_shift
else:
b >>= n_shift
self.assertEQUAL(b, self.shift(a, n_shift, direction))
</DeepExtract>
|
def test_shift_range(self):
for endian in ('little', 'big'):
for direction in ('left', 'right'):
for n in range(0, 200):
a = urandom(n, endian)
self.assertEqual(len(a), n)
b = a.copy()
if direction == 'left':
b <<= 1
else:
b >>= 1
self.assertEQUAL(b, self.shift(a, 1, direction))
a = urandom(n, endian)
self.assertEqual(len(a), n)
b = a.copy()
if direction == 'left':
b <<= randint(0, n)
else:
b >>= randint(0, n)
self.assertEQUAL(b, self.shift(a, randint(0, n), direction))
for n_shift in range(0, 100):
a = urandom(100, endian)
self.assertEqual(len(a), 100)
b = a.copy()
if direction == 'left':
b <<= n_shift
else:
b >>= n_shift
self.assertEQUAL(b, self.shift(a, n_shift, direction))
</DeepExtract>
|
bitarray
|
positive
|
def on_varset(self, varset):
<DeepExtract>
if varset.arrow:
H_CONF_HAS_ARROW(varset.where)
</DeepExtract>
return super().on_varset(varset)
|
def on_varset(self, varset):
if varset.arrow:
H_CONF_HAS_ARROW(varset.where)
return super().on_varset(varset)
|
asterisklint
|
positive
|
def slctAccGrp(self, sender=0):
<DeepExtract>
self.builder = get_builder('warehousing')
self.viewGrpWin = self.builder.get_object('viewGroupsWindow')
self.groupsTreeView = self.builder.get_object('GroupsTreeView')
self.grpListStore.clear()
self.groupsTreeView.set_model(self.grpListStore)
column = Gtk.TreeViewColumn(_('Code'), Gtk.CellRendererText(), text=0)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
column = Gtk.TreeViewColumn(_('Name'), Gtk.CellRendererText(), text=1)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
column = Gtk.TreeViewColumn(_('Buy ID'), Gtk.CellRendererText(), text=2)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
column = Gtk.TreeViewColumn(_('Sell ID'), Gtk.CellRendererText(), text=3)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
self.groupsTreeView.get_selection().set_mode(Gtk.SelectionMode.SINGLE)
self.populateGrpList()
self.viewGrpWin.show_all()
self.builder.connect_signals(self)
</DeepExtract>
self.handid = self.connect('group-selected', self.setSelectedID)
|
def slctAccGrp(self, sender=0):
self.builder = get_builder('warehousing')
self.viewGrpWin = self.builder.get_object('viewGroupsWindow')
self.groupsTreeView = self.builder.get_object('GroupsTreeView')
self.grpListStore.clear()
self.groupsTreeView.set_model(self.grpListStore)
column = Gtk.TreeViewColumn(_('Code'), Gtk.CellRendererText(), text=0)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
column = Gtk.TreeViewColumn(_('Name'), Gtk.CellRendererText(), text=1)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
column = Gtk.TreeViewColumn(_('Buy ID'), Gtk.CellRendererText(), text=2)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
column = Gtk.TreeViewColumn(_('Sell ID'), Gtk.CellRendererText(), text=3)
column.set_spacing(5)
column.set_resizable(True)
self.groupsTreeView.append_column(column)
self.groupsTreeView.get_selection().set_mode(Gtk.SelectionMode.SINGLE)
self.populateGrpList()
self.viewGrpWin.show_all()
self.builder.connect_signals(self)
self.handid = self.connect('group-selected', self.setSelectedID)
|
amir
|
positive
|
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
<DeepExtract>
features = x.features
(n, in_channels) = features.shape
assert in_channels % x_m.features.shape[1] == 0 and in_channels >= x_m.features.shape[1]
x.features = features.view(n, x_m.features.shape[1], -1).sum(dim=2)
x = x
</DeepExtract>
x.features = x_m.features + x.features
x = conv_inv(x)
return x
|
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
features = x.features
(n, in_channels) = features.shape
assert in_channels % x_m.features.shape[1] == 0 and in_channels >= x_m.features.shape[1]
x.features = features.view(n, x_m.features.shape[1], -1).sum(dim=2)
x = x
x.features = x_m.features + x.features
x = conv_inv(x)
return x
|
CenterPoint-KITTI
|
positive
|
def update(self):
"""Main method which should be called."""
self.logger.debug('Update')
with self.get_inactivity_monitor():
try:
self.restart_event.clear()
self.buffer = Buffer()
<DeepExtract>
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.config_get('ssl'):
self.soc = ssl.wrap_socket(self.soc)
else:
self.logger.warning('SSL disabled')
self.soc.connect((self.config_get('server'), self.config_get('port')))
self.soc.settimeout(5)
</DeepExtract>
<DeepExtract>
self.send('NICK ' + self.config_get('nick'))
self.send('USER botnet botnet botnet :Python bot')
</DeepExtract>
<DeepExtract>
while not self.stop_event.is_set() and (not self.restart_event.is_set()):
(reads, writes, errors) = select.select([self.soc], [], [], self.deltatime)
for sock in reads:
if sock == self.soc:
data = self.soc.recv(4096)
if not data:
return
self.process_data(data)
</DeepExtract>
finally:
if self.soc:
self.soc.close()
|
def update(self):
"""Main method which should be called."""
self.logger.debug('Update')
with self.get_inactivity_monitor():
try:
self.restart_event.clear()
self.buffer = Buffer()
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.config_get('ssl'):
self.soc = ssl.wrap_socket(self.soc)
else:
self.logger.warning('SSL disabled')
self.soc.connect((self.config_get('server'), self.config_get('port')))
self.soc.settimeout(5)
self.send('NICK ' + self.config_get('nick'))
self.send('USER botnet botnet botnet :Python bot')
while not self.stop_event.is_set() and (not self.restart_event.is_set()):
(reads, writes, errors) = select.select([self.soc], [], [], self.deltatime)
for sock in reads:
if sock == self.soc:
data = self.soc.recv(4096)
if not data:
return
self.process_data(data)
finally:
if self.soc:
self.soc.close()
|
botnet
|
positive
|
def test_serializer_no_geom(self):
response = HttpResponse()
MushroomSpot.objects.create()
self.serializer.serialize(MushroomSpot.objects.all(), stream=response, fields=['id', 'name', 'number', 'size', 'boolean', 'tags'], delete=False)
<DeepExtract>
shapefiles = self.serializer.path_directory
shapefiles = [shapefile for shapefile in os.listdir(shapefiles) if shapefile[-3:] == 'shp']
layers = {s: gdal.DataSource(os.path.join(self.serializer.path_directory, s))[0] for s in shapefiles}
layers = layers
</DeepExtract>
self.assertEqual(len(layers['Point.shp']), 1)
self.assertEqual(len(layers['LineString.shp']), 1)
self.assertEqual(len(layers['Polygon.shp']), 1)
self.assertEqual(len(layers['MultiPoint.shp']), 2)
self.assertEqual(len(layers['MultiLineString.shp']), 1)
self.assertEqual(len(layers['MultiPolygon.shp']), 2)
|
def test_serializer_no_geom(self):
response = HttpResponse()
MushroomSpot.objects.create()
self.serializer.serialize(MushroomSpot.objects.all(), stream=response, fields=['id', 'name', 'number', 'size', 'boolean', 'tags'], delete=False)
shapefiles = self.serializer.path_directory
shapefiles = [shapefile for shapefile in os.listdir(shapefiles) if shapefile[-3:] == 'shp']
layers = {s: gdal.DataSource(os.path.join(self.serializer.path_directory, s))[0] for s in shapefiles}
layers = layers
self.assertEqual(len(layers['Point.shp']), 1)
self.assertEqual(len(layers['LineString.shp']), 1)
self.assertEqual(len(layers['Polygon.shp']), 1)
self.assertEqual(len(layers['MultiPoint.shp']), 2)
self.assertEqual(len(layers['MultiLineString.shp']), 1)
self.assertEqual(len(layers['MultiPolygon.shp']), 2)
|
django-mapentity
|
positive
|
def connect(self):
<DeepExtract>
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection((self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(self, 'Connection to %s timed out. (connect timeout=%s)' % (self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(self, 'Failed to establish a new connection: %s' % e)
conn = conn
</DeepExtract>
<DeepExtract>
self.sock = conn
if getattr(self, '_tunnel_host', None):
self._tunnel()
self.auto_open = 0
</DeepExtract>
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
|
def connect(self):
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection((self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(self, 'Connection to %s timed out. (connect timeout=%s)' % (self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(self, 'Failed to establish a new connection: %s' % e)
conn = conn
self.sock = conn
if getattr(self, '_tunnel_host', None):
self._tunnel()
self.auto_open = 0
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
|
aws-waf-security-automation
|
positive
|
def construct(self):
<DeepExtract>
particles = self.particles = VGroup()
for n in range(self.num_particles):
if n % 2 == 0:
particle = Proton(radius=0.2)
particle.charge = +1
else:
particle = Electron(radius=0.2)
particle.charge = -1
particle.velocity = np.random.normal(0, 0.1, 3)
particles.add(particle)
particle.shift(np.random.normal(0, 0.2, 3))
particles.arrange_in_grid(buff=LARGE_BUFF)
particles = particles
</DeepExtract>
<DeepExtract>
func = get_force_field_func(*list(zip(list(map(lambda x: x.get_center(), self.particles)), [p.charge for p in self.particles])))
self.vector_field = VectorField(func, **self.vector_field_config)
vector_field = self.vector_field
</DeepExtract>
def update_vector_field(vector_field):
<DeepExtract>
func = get_force_field_func(*list(zip(list(map(lambda x: x.get_center(), self.particles)), [p.charge for p in self.particles])))
self.vector_field = VectorField(func, **self.vector_field_config)
new_field = self.vector_field
</DeepExtract>
vector_field.become(new_field)
vector_field.func = new_field.func
def update_particles(particles, dt):
func = vector_field.func
for particle in particles:
<DeepExtract>
result = np.array(ORIGIN)
for (center, strength) in point_strength_pairs:
to_center = center - particle.get_center()
norm = get_norm(to_center)
if norm == 0:
continue
elif norm < radius:
to_center /= radius ** 3
elif norm >= radius:
to_center /= norm ** 3
to_center *= -strength
result += to_center
force = result
</DeepExtract>
particle.velocity += force * dt
particle.shift(particle.velocity * dt)
(vector_field.add_updater(update_vector_field),)
(particles.add_updater(update_particles),)
self.add(vector_field, particles)
self.wait(self.anim_time)
for mob in (vector_field, particles):
mob.suspend_updating()
self.wait()
for mob in (vector_field, particles):
mob.resume_updating()
self.wait(3)
|
def construct(self):
particles = self.particles = VGroup()
for n in range(self.num_particles):
if n % 2 == 0:
particle = Proton(radius=0.2)
particle.charge = +1
else:
particle = Electron(radius=0.2)
particle.charge = -1
particle.velocity = np.random.normal(0, 0.1, 3)
particles.add(particle)
particle.shift(np.random.normal(0, 0.2, 3))
particles.arrange_in_grid(buff=LARGE_BUFF)
particles = particles
func = get_force_field_func(*list(zip(list(map(lambda x: x.get_center(), self.particles)), [p.charge for p in self.particles])))
self.vector_field = VectorField(func, **self.vector_field_config)
vector_field = self.vector_field
def update_vector_field(vector_field):
func = get_force_field_func(*list(zip(list(map(lambda x: x.get_center(), self.particles)), [p.charge for p in self.particles])))
self.vector_field = VectorField(func, **self.vector_field_config)
new_field = self.vector_field
vector_field.become(new_field)
vector_field.func = new_field.func
def update_particles(particles, dt):
func = vector_field.func
for particle in particles:
result = np.array(ORIGIN)
for (center, strength) in point_strength_pairs:
to_center = center - particle.get_center()
norm = get_norm(to_center)
if norm == 0:
continue
elif norm < radius:
to_center /= radius ** 3
elif norm >= radius:
to_center /= norm ** 3
to_center *= -strength
result += to_center
force = result
particle.velocity += force * dt
particle.shift(particle.velocity * dt)
(vector_field.add_updater(update_vector_field),)
(particles.add_updater(update_particles),)
self.add(vector_field, particles)
self.wait(self.anim_time)
for mob in (vector_field, particles):
mob.suspend_updating()
self.wait()
for mob in (vector_field, particles):
mob.resume_updating()
self.wait(3)
|
AnimationsWithManim
|
positive
|
def __init__(self):
<DeepExtract>
self.dict_CString = util.load_dict_from_file(config.EXPLICIT_DICT_CSTRING)
</DeepExtract>
<DeepExtract>
self.dict_CPOS = util.load_dict_from_file(config.EXPLICIT_DICT_CPOS)
</DeepExtract>
<DeepExtract>
self.dict_C_Prev = util.load_dict_from_file(config.EXPLICIT_DICT_C_PREV)
</DeepExtract>
<DeepExtract>
self.dict_CLString = util.load_dict_from_file(config.EXPLICIT_DICT_CLSTRING)
</DeepExtract>
<DeepExtract>
self.self_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.parent_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_PARENT_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.left_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_LEFT_SIBLING_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.right_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_RIGHT_SIBLING_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.conn_self_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_SELF_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.conn_parent_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PARENT_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.conn_left_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_LEFT_SIBLING_CATEGORY_PATH)
</DeepExtract>
<DeepExtract>
self.conn_right_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_RIGHT_SIBLING_CATEGORY_PATH)
</DeepExtract>
self.self_parent_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_PARENT_CATEGORY_PATH)
self.self_right_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_RIGHT_CATEGORY_PATH)
self.self_left_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_LEFT_CATEGORY_PATH)
self.parent_left_dict = util.load_dict_from_file(config.EXPLICIT_DICT_PARENT_LEFT_CATEGORY_PATH)
self.parent_right_dict = util.load_dict_from_file(config.EXPLICIT_DICT_PARENT_RIGHT_CATEGORY_PATH)
self.left_right_dict = util.load_dict_from_file(config.EXPLICIT_DICT_LEFT_RIGHT_CATEGORY_PATH)
' mine '
self.dict_conn_to_root_path = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_TO_ROOT_PATH)
self.dict_conn_next = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_NEXT)
self.dict_conn_connCtx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_CONNCTX)
self.dict_conn_rightSiblingCtx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_RIGHTSIBLINGCTX)
self.dict_conn_parent_category_ctx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PARENT_CATEGORY_CTX)
self.dict_conn_leftSibling_ctx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_LEFTSIBLING_CTX)
self.dict_CParent_to_root_path_node_names = util.load_dict_from_file(config.EXPLICIT_DICT_CPARENT_TO_ROOT_PATH_NODE_NAMES)
self.dict_conn_parent_category_not_linked_ctx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PARENT_CATEGORY_NOT_LINKED_CTX)
self.dict_conn_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PREV_CONN)
self.dict_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_PREV_CONN)
self.dict_as_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_AS_PREV_CONN)
self.dict_as_prev_connPOS = util.load_dict_from_file(config.EXPLICIT_DICT_AS_PREV_CONNPOS)
self.dict_when_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_PREV_CONN)
self.dict_when_prev_connPOS = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_PREV_CONNPOS)
self.dict_as_before_after_tense = util.load_dict_from_file(config.EXPLICIT_DICT_AS_BEFORE_AFTER_TENSE)
self.dict_when_before_after_tense = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_BEFORE_AFTER_TENSE)
self.dict_when_after_lemma_verbs = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_AFTER_LEMMA_VERBS)
|
def __init__(self):
self.dict_CString = util.load_dict_from_file(config.EXPLICIT_DICT_CSTRING)
self.dict_CPOS = util.load_dict_from_file(config.EXPLICIT_DICT_CPOS)
self.dict_C_Prev = util.load_dict_from_file(config.EXPLICIT_DICT_C_PREV)
self.dict_CLString = util.load_dict_from_file(config.EXPLICIT_DICT_CLSTRING)
self.self_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_CATEGORY_PATH)
self.parent_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_PARENT_CATEGORY_PATH)
self.left_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_LEFT_SIBLING_CATEGORY_PATH)
self.right_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_RIGHT_SIBLING_CATEGORY_PATH)
self.conn_self_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_SELF_CATEGORY_PATH)
self.conn_parent_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PARENT_CATEGORY_PATH)
self.conn_left_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_LEFT_SIBLING_CATEGORY_PATH)
self.conn_right_sibling_category_dict = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_RIGHT_SIBLING_CATEGORY_PATH)
self.self_parent_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_PARENT_CATEGORY_PATH)
self.self_right_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_RIGHT_CATEGORY_PATH)
self.self_left_dict = util.load_dict_from_file(config.EXPLICIT_DICT_SELF_LEFT_CATEGORY_PATH)
self.parent_left_dict = util.load_dict_from_file(config.EXPLICIT_DICT_PARENT_LEFT_CATEGORY_PATH)
self.parent_right_dict = util.load_dict_from_file(config.EXPLICIT_DICT_PARENT_RIGHT_CATEGORY_PATH)
self.left_right_dict = util.load_dict_from_file(config.EXPLICIT_DICT_LEFT_RIGHT_CATEGORY_PATH)
' mine '
self.dict_conn_to_root_path = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_TO_ROOT_PATH)
self.dict_conn_next = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_NEXT)
self.dict_conn_connCtx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_CONNCTX)
self.dict_conn_rightSiblingCtx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_RIGHTSIBLINGCTX)
self.dict_conn_parent_category_ctx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PARENT_CATEGORY_CTX)
self.dict_conn_leftSibling_ctx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_LEFTSIBLING_CTX)
self.dict_CParent_to_root_path_node_names = util.load_dict_from_file(config.EXPLICIT_DICT_CPARENT_TO_ROOT_PATH_NODE_NAMES)
self.dict_conn_parent_category_not_linked_ctx = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PARENT_CATEGORY_NOT_LINKED_CTX)
self.dict_conn_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_CONN_PREV_CONN)
self.dict_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_PREV_CONN)
self.dict_as_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_AS_PREV_CONN)
self.dict_as_prev_connPOS = util.load_dict_from_file(config.EXPLICIT_DICT_AS_PREV_CONNPOS)
self.dict_when_prev_conn = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_PREV_CONN)
self.dict_when_prev_connPOS = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_PREV_CONNPOS)
self.dict_as_before_after_tense = util.load_dict_from_file(config.EXPLICIT_DICT_AS_BEFORE_AFTER_TENSE)
self.dict_when_before_after_tense = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_BEFORE_AFTER_TENSE)
self.dict_when_after_lemma_verbs = util.load_dict_from_file(config.EXPLICIT_DICT_WHEN_AFTER_LEMMA_VERBS)
|
conll2015_discourse
|
positive
|
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for (key, sub_opt) in opt.items():
<DeepExtract>
if isinstance(sub_opt, dict):
new_opt = dict()
for (key, sub_opt) in sub_opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
new_opt[key] = NoneDict(**new_opt)
elif isinstance(sub_opt, list):
new_opt[key] = [dict_to_nonedict(sub_opt) for sub_opt in sub_opt]
else:
new_opt[key] = sub_opt
</DeepExtract>
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
|
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for (key, sub_opt) in opt.items():
if isinstance(sub_opt, dict):
new_opt = dict()
for (key, sub_opt) in sub_opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
new_opt[key] = NoneDict(**new_opt)
elif isinstance(sub_opt, list):
new_opt[key] = [dict_to_nonedict(sub_opt) for sub_opt in sub_opt]
else:
new_opt[key] = sub_opt
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
|
DAN
|
positive
|
def _group(self, columns):
dframe = self.dframe[self.groups]
<DeepExtract>
for (idx, column) in enumerate(columns):
column.name = self.__name_for_idx(idx)
dframe = concat([dframe] + [DataFrame(col) for col in columns], axis=1)
</DeepExtract>
groupby = dframe.groupby(self.groups, as_index=False)
return self._add_calculated_column(groupby.sum())
|
def _group(self, columns):
dframe = self.dframe[self.groups]
for (idx, column) in enumerate(columns):
column.name = self.__name_for_idx(idx)
dframe = concat([dframe] + [DataFrame(col) for col in columns], axis=1)
groupby = dframe.groupby(self.groups, as_index=False)
return self._add_calculated_column(groupby.sum())
|
bamboo
|
positive
|
def get_context(self, context):
if 'delete_url' in context:
<DeepExtract>
context['delete_url'] = context['delete_url'] + ('&' if context['delete_url'].find('?') > 0 else '?') + '%s=%s' % self._get_relate_params()
</DeepExtract>
return context
|
def get_context(self, context):
if 'delete_url' in context:
context['delete_url'] = context['delete_url'] + ('&' if context['delete_url'].find('?') > 0 else '?') + '%s=%s' % self._get_relate_params()
return context
|
book
|
positive
|
def __init__(self, body: bytes, *, headers: Optional[HeadersType]=None, content_type: Optional[str]=None, content_encoding: Optional[str]=None, delivery_mode: Union[DeliveryMode, int, None]=None, priority: Optional[int]=None, correlation_id: Optional[str]=None, reply_to: Optional[str]=None, expiration: Optional[DateType]=None, message_id: Optional[str]=None, timestamp: Optional[DateType]=None, type: Optional[str]=None, user_id: Optional[str]=None, app_id: Optional[str]=None):
""" Creates a new instance of Message
:param body: message body
:param headers: message headers
:param content_type: content type
:param content_encoding: content encoding
:param delivery_mode: delivery mode
:param priority: priority
:param correlation_id: correlation id
:param reply_to: reply to
:param expiration: expiration in seconds (or datetime or timedelta)
:param message_id: message id
:param timestamp: timestamp
:param type: type
:param user_id: user id
:param app_id: app id
"""
self.__lock = False
self.body = body if isinstance(body, bytes) else bytes(body)
self.body_size = len(self.body) if self.body else 0
self.headers: HeadersType = headers or {}
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_mode: DeliveryMode = DeliveryMode(optional(delivery_mode, int, DeliveryMode.NOT_PERSISTENT))
<DeepExtract>
self.priority = int(priority) if priority else 0
</DeepExtract>
<DeepExtract>
self.correlation_id = str(correlation_id) if correlation_id else default
</DeepExtract>
<DeepExtract>
self.reply_to = str(reply_to) if reply_to else default
</DeepExtract>
self.expiration = expiration
<DeepExtract>
self.message_id = str(message_id) if message_id else default
</DeepExtract>
<DeepExtract>
raise ValueError('Invalid timestamp type: %r' % type(timestamp), timestamp)
</DeepExtract>
<DeepExtract>
self.type = str(type) if type else default
</DeepExtract>
<DeepExtract>
self.user_id = str(user_id) if user_id else default
</DeepExtract>
<DeepExtract>
self.app_id = str(app_id) if app_id else default
</DeepExtract>
|
def __init__(self, body: bytes, *, headers: Optional[HeadersType]=None, content_type: Optional[str]=None, content_encoding: Optional[str]=None, delivery_mode: Union[DeliveryMode, int, None]=None, priority: Optional[int]=None, correlation_id: Optional[str]=None, reply_to: Optional[str]=None, expiration: Optional[DateType]=None, message_id: Optional[str]=None, timestamp: Optional[DateType]=None, type: Optional[str]=None, user_id: Optional[str]=None, app_id: Optional[str]=None):
""" Creates a new instance of Message
:param body: message body
:param headers: message headers
:param content_type: content type
:param content_encoding: content encoding
:param delivery_mode: delivery mode
:param priority: priority
:param correlation_id: correlation id
:param reply_to: reply to
:param expiration: expiration in seconds (or datetime or timedelta)
:param message_id: message id
:param timestamp: timestamp
:param type: type
:param user_id: user id
:param app_id: app id
"""
self.__lock = False
self.body = body if isinstance(body, bytes) else bytes(body)
self.body_size = len(self.body) if self.body else 0
self.headers: HeadersType = headers or {}
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_mode: DeliveryMode = DeliveryMode(optional(delivery_mode, int, DeliveryMode.NOT_PERSISTENT))
self.priority = int(priority) if priority else 0
self.correlation_id = str(correlation_id) if correlation_id else default
self.reply_to = str(reply_to) if reply_to else default
self.expiration = expiration
self.message_id = str(message_id) if message_id else default
raise ValueError('Invalid timestamp type: %r' % type(timestamp), timestamp)
self.type = str(type) if type else default
self.user_id = str(user_id) if user_id else default
self.app_id = str(app_id) if app_id else default
</DeepExtract>
|
aio-pika
|
positive
|
def to_dict(self):
result = {}
fields_accepted_as_none = ('result_path', 'input_path', 'output_path')
for (k, v) in self.fields.items():
if v is not None or k in fields_accepted_as_none:
<DeepExtract>
k = ''.join([t.title() for t in k.split('_')])
</DeepExtract>
if k == to_pascalcase(Field.Parameters.value):
<DeepExtract>
if not isinstance(v, dict):
result[k] = v
modified_parameters = {}
for (k, v) in v.items():
if isinstance(v, Placeholder):
modified_key = '{key}.$'.format(key=k)
modified_parameters[modified_key] = v.to_jsonpath()
elif isinstance(v, dict):
modified_parameters[k] = self._replace_placeholders(v)
elif isinstance(v, list):
modified_parameters[k] = [self._replace_placeholders(i) for i in v]
else:
modified_parameters[k] = v
result[k] = modified_parameters
</DeepExtract>
else:
result[k] = v
return result
|
def to_dict(self):
result = {}
fields_accepted_as_none = ('result_path', 'input_path', 'output_path')
for (k, v) in self.fields.items():
if v is not None or k in fields_accepted_as_none:
k = ''.join([t.title() for t in k.split('_')])
if k == to_pascalcase(Field.Parameters.value):
if not isinstance(v, dict):
result[k] = v
modified_parameters = {}
for (k, v) in v.items():
if isinstance(v, Placeholder):
modified_key = '{key}.$'.format(key=k)
modified_parameters[modified_key] = v.to_jsonpath()
elif isinstance(v, dict):
modified_parameters[k] = self._replace_placeholders(v)
elif isinstance(v, list):
modified_parameters[k] = [self._replace_placeholders(i) for i in v]
else:
modified_parameters[k] = v
result[k] = modified_parameters
else:
result[k] = v
return result
|
aws-step-functions-data-science-sdk-python
|
positive
|
def _get_shape(center, radius, bounds):
<DeepExtract>
out = False
if bounds is not None and bounds.has_any:
if bounds.lower is not None:
lower_binding = np.min(center - bounds.lower) <= radius
if bounds.upper is not None:
upper_binding = np.min(bounds.upper - center) <= radius
out = np.any(lower_binding) or np.any(upper_binding)
any_bounds_binding = out
</DeepExtract>
return 'cube' if any_bounds_binding else 'sphere'
|
def _get_shape(center, radius, bounds):
out = False
if bounds is not None and bounds.has_any:
if bounds.lower is not None:
lower_binding = np.min(center - bounds.lower) <= radius
if bounds.upper is not None:
upper_binding = np.min(bounds.upper - center) <= radius
out = np.any(lower_binding) or np.any(upper_binding)
any_bounds_binding = out
return 'cube' if any_bounds_binding else 'sphere'
|
estimagic
|
positive
|
def __init__(self, factory, key, *args, **kwargs):
<DeepExtract>
param = kwargs.get('mode')
if len(args) > 0:
if param:
raise ValueError("Parameter '%s' is specified twice" % 'mode')
param = args[0]
self.mode = param or MODE_ECB
</DeepExtract>
self.block_size = factory.block_size
if self.mode != MODE_OPENPGP:
self._cipher = factory.new(key, *args, **kwargs)
self.IV = self._cipher.IV
else:
self._done_first_block = False
self._done_last_block = False
<DeepExtract>
param = kwargs.get('iv')
if len(args) > 1:
if param:
raise ValueError("Parameter '%s' is specified twice" % 'iv')
param = args[1]
self.IV = param or default
</DeepExtract>
if not self.IV:
raise ValueError('MODE_OPENPGP requires an IV')
IV_cipher = factory.new(key, MODE_CFB, b('\x00') * self.block_size, segment_size=self.block_size * 8)
if len(self.IV) == self.block_size:
self._encrypted_IV = IV_cipher.encrypt(self.IV + self.IV[-2:] + b('\x00') * (self.block_size - 2))[:self.block_size + 2]
elif len(self.IV) == self.block_size + 2:
self._encrypted_IV = self.IV
self.IV = IV_cipher.decrypt(self.IV + b('\x00') * (self.block_size - 2))[:self.block_size + 2]
if self.IV[-2:] != self.IV[-4:-2]:
raise ValueError('Failed integrity check for OPENPGP IV')
self.IV = self.IV[:-2]
else:
raise ValueError('Length of IV must be %d or %d bytes for MODE_OPENPGP' % (self.block_size, self.block_size + 2))
self._cipher = factory.new(key, MODE_CFB, self._encrypted_IV[-self.block_size:], segment_size=self.block_size * 8)
|
def __init__(self, factory, key, *args, **kwargs):
param = kwargs.get('mode')
if len(args) > 0:
if param:
raise ValueError("Parameter '%s' is specified twice" % 'mode')
param = args[0]
self.mode = param or MODE_ECB
self.block_size = factory.block_size
if self.mode != MODE_OPENPGP:
self._cipher = factory.new(key, *args, **kwargs)
self.IV = self._cipher.IV
else:
self._done_first_block = False
self._done_last_block = False
param = kwargs.get('iv')
if len(args) > 1:
if param:
raise ValueError("Parameter '%s' is specified twice" % 'iv')
param = args[1]
self.IV = param or default
if not self.IV:
raise ValueError('MODE_OPENPGP requires an IV')
IV_cipher = factory.new(key, MODE_CFB, b('\x00') * self.block_size, segment_size=self.block_size * 8)
if len(self.IV) == self.block_size:
self._encrypted_IV = IV_cipher.encrypt(self.IV + self.IV[-2:] + b('\x00') * (self.block_size - 2))[:self.block_size + 2]
elif len(self.IV) == self.block_size + 2:
self._encrypted_IV = self.IV
self.IV = IV_cipher.decrypt(self.IV + b('\x00') * (self.block_size - 2))[:self.block_size + 2]
if self.IV[-2:] != self.IV[-4:-2]:
raise ValueError('Failed integrity check for OPENPGP IV')
self.IV = self.IV[:-2]
else:
raise ValueError('Length of IV must be %d or %d bytes for MODE_OPENPGP' % (self.block_size, self.block_size + 2))
self._cipher = factory.new(key, MODE_CFB, self._encrypted_IV[-self.block_size:], segment_size=self.block_size * 8)
|
awslambda-pycrypto
|
positive
|
def get_model_input_shape(model, mf_path):
input_shape = model.input_shape[1:3]
if input_shape[0] == None or input_shape[1] == None:
<DeepExtract>
config = json.loads(open(get_file_path(mf_path, constants.CONFIG_FILE)).read())
</DeepExtract>
architecture = config['architecture']
if not is_keras_application(architecture):
raise IOError("You must provide an input shape for your architecture '{}'".format(architecture))
return keras_applications[architecture].get('input_shape', (224, 224))
else:
return input_shape
|
def get_model_input_shape(model, mf_path):
input_shape = model.input_shape[1:3]
if input_shape[0] == None or input_shape[1] == None:
config = json.loads(open(get_file_path(mf_path, constants.CONFIG_FILE)).read())
architecture = config['architecture']
if not is_keras_application(architecture):
raise IOError("You must provide an input shape for your architecture '{}'".format(architecture))
return keras_applications[architecture].get('input_shape', (224, 224))
else:
return input_shape
|
dataiku-contrib
|
positive
|
def test_distribution_mediatype_iana_uri_without_format(self):
<DeepExtract>
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
distribution = URIRef('http://example.org/datasets/1/ds/1')
g.add((dataset, DCAT.distribution, distribution))
g.add((distribution, RDF.type, DCAT.Distribution))
if format_item:
g.add((distribution, DCT['format'], format_item))
if URIRef('https://www.iana.org/assignments/media-types/application/json'):
g.add((distribution, DCAT.mediaType, URIRef('https://www.iana.org/assignments/media-types/application/json')))
if format_item is None and URIRef('https://www.iana.org/assignments/media-types/application/json') is None:
raise AssertionError('At least one of format or mediaType is required!')
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
dataset = [d for d in p.datasets()][0]
resources = dataset.get('resources')
</DeepExtract>
assert u'https://www.iana.org/assignments/media-types/application/json' == resources[0].get('mimetype')
assert u'https://www.iana.org/assignments/media-types/application/json' == resources[0].get('format')
|
def test_distribution_mediatype_iana_uri_without_format(self):
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
distribution = URIRef('http://example.org/datasets/1/ds/1')
g.add((dataset, DCAT.distribution, distribution))
g.add((distribution, RDF.type, DCAT.Distribution))
if format_item:
g.add((distribution, DCT['format'], format_item))
if URIRef('https://www.iana.org/assignments/media-types/application/json'):
g.add((distribution, DCAT.mediaType, URIRef('https://www.iana.org/assignments/media-types/application/json')))
if format_item is None and URIRef('https://www.iana.org/assignments/media-types/application/json') is None:
raise AssertionError('At least one of format or mediaType is required!')
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
dataset = [d for d in p.datasets()][0]
resources = dataset.get('resources')
assert u'https://www.iana.org/assignments/media-types/application/json' == resources[0].get('mimetype')
assert u'https://www.iana.org/assignments/media-types/application/json' == resources[0].get('format')
|
ckanext-dcat
|
positive
|
def scan(self, background_length=200, fpr=0.02, n_cpus=-1, verbose=True, motifs=None, TF_evidence_level='direct_and_indirect', TF_formatting='auto', divide=100000):
"""
Scan DNA sequences searching for TF binding motifs.
Args:
background_length (int): background length. This is used for the calculation of the binding score.
fpr (float): False positive rate for motif identification.
n_cpus (int): number of CPUs for parallel calculation.
verbose (bool): Whether to show a progress bar.
motifs (list): a list of gimmemotifs motifs, will revert to default_motifs() if None
TF_evidence_level (str): Please select one from ["direct", "direct_and_indirect"]. If "direct" is selected, TFs that have a binding evidence were used.
If "direct_and_indirect" is selected, TFs with binding evidence and inferred TFs are used.
For more information, please read explanation of Motif class in gimmemotifs documentation (https://gimmemotifs.readthedocs.io/en/master/index.html)
"""
self.fpr = fpr
self.background_length = background_length
if motifs is None:
if verbose:
print('No motif data entered. Loading default motifs for your species ...')
if self.species in ['Mouse', 'Human', 'Rat']:
motifs = default_motifs()
self.motif_db_name = 'gimme.vertebrate.v5.0'
self.TF_formatting = True
if verbose:
print(' Default motif for vertebrate: gimme.vertebrate.v5.0. \n For more information, please see https://gimmemotifs.readthedocs.io/en/master/overview.html \n')
elif self.species in ['Zebrafish']:
self.motif_db_name = 'CisBP_ver2_Danio_rerio.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['S.cerevisiae']:
self.motif_db_name = 'CisBP_ver2_Saccharomyces_cerevisiae.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Xenopus tropicalis']:
self.motif_db_name = 'CisBP_ver2_Xenopus_tropicalis.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}.')
print(f' Default motif for {self.species} was changed at celloracle 0.10.14. \n For more information, please see celloracle documentation Changelog page. \n')
elif self.species in ['Xenopus laevis']:
self.motif_db_name = 'CisBP_ver2_Xenopus_laevis.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Drosophila']:
self.motif_db_name = 'CisBP_ver2_Drosophila_melanogaster.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['C.elegans']:
self.motif_db_name = 'CisBP_ver2_Caenorhabditis_elegans.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Arabidopsis']:
self.motif_db_name = 'CisBP_ver2_Arabidopsis_thaliana_GENE_ID.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Chicken']:
self.motif_db_name = 'CisBP_ver2_Gallus_gallus.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Guinea_Pig']:
self.motif_db_name = 'CisBP_ver2_Cavia_porcellus.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Pig']:
self.motif_db_name = 'CisBP_ver2_Sus_scrofa.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
else:
raise ValueError(f"We don't have default motifs for your species, Please specify motif data by yourself.")
else:
if isinstance(motifs, list):
if isinstance(motifs[0], Motif):
if verbose:
print('Checking your motifs... Motifs format looks good. \n')
else:
raise ValueError(f'Motif data type was invalid.')
else:
raise ValueError(f'motifs should be a list of Motif object in gimmemotifs.')
self.motif_db_name = 'custom_motifs'
if TF_formatting == 'auto':
self.TF_formatting = False
else:
self.TF_formatting = TF_formatting
self.motifs = motifs
<DeepExtract>
if TF_evidence_level == 'direct_and_indirect':
factor_kind = [DIRECT_NAME, INDIRECT_NAME]
elif TF_evidence_level == 'direct':
factor_kind = [DIRECT_NAME]
dic_motif2TFs = {}
for i in motifs:
fcs = []
for j in factor_kind:
fcs += i.factors[j]
dic_motif2TFs[i.id] = fcs
if self.TF_formatting:
if self.species in ['Mouse', 'Rat']:
for key in dic_motif2TFs.keys():
dic_motif2TFs[key] = [tf.capitalize() for tf in dic_motif2TFs[key]]
elif self.species in ['Human', 'S.cerevisiae', 'Arabidopsis']:
for key in dic_motif2TFs.keys():
dic_motif2TFs[key] = [tf.upper() for tf in dic_motif2TFs[key]]
elif self.species in ['Zebrafish', 'Xenopus']:
for key in dic_motif2TFs.keys():
dic_motif2TFs[key] = [tf.lower() for tf in dic_motif2TFs[key]]
elif self.species in ['Drosophila', 'C.elegans']:
pass
self.dic_motif2TFs = dic_motif2TFs
</DeepExtract>
self.TF_evidence_level = TF_evidence_level
if verbose:
print('Initiating scanner... \n')
s = Scanner(ncpus=n_cpus)
s.set_motifs(motifs)
try:
s.set_background(genome=self.ref_genome, size=background_length)
except:
s.set_background(genome=self.ref_genome, length=background_length)
if verbose:
print('Calculating FPR-based threshold. This step may take substantial time when you load a new ref-genome. It will be done quicker on the second time. \n')
s.set_threshold(fpr=fpr)
target_sequences = peak2fasta(self.all_peaks, self.ref_genome)
target_sequences = remove_zero_seq(fasta_object=target_sequences)
if verbose:
print('Motif scan started .. It may take long time.\n')
self.scanned_df = scan_dna_for_motifs(scanner_object=s, motifs_object=motifs, sequence_object=target_sequences, divide=divide, verbose=verbose)
<DeepExtract>
new_df = pd.DataFrame({'time': [datetime.now().ctime()], 'info': ['scanMotifs']})
self.easy_log = pd.concat([self.easy_log, new_df], axis=0).reset_index(drop=True)
</DeepExtract>
|
def scan(self, background_length=200, fpr=0.02, n_cpus=-1, verbose=True, motifs=None, TF_evidence_level='direct_and_indirect', TF_formatting='auto', divide=100000):
"""
Scan DNA sequences searching for TF binding motifs.
Args:
background_length (int): background length. This is used for the calculation of the binding score.
fpr (float): False positive rate for motif identification.
n_cpus (int): number of CPUs for parallel calculation.
verbose (bool): Whether to show a progress bar.
motifs (list): a list of gimmemotifs motifs, will revert to default_motifs() if None
TF_evidence_level (str): Please select one from ["direct", "direct_and_indirect"]. If "direct" is selected, TFs that have a binding evidence were used.
If "direct_and_indirect" is selected, TFs with binding evidence and inferred TFs are used.
For more information, please read explanation of Motif class in gimmemotifs documentation (https://gimmemotifs.readthedocs.io/en/master/index.html)
"""
self.fpr = fpr
self.background_length = background_length
if motifs is None:
if verbose:
print('No motif data entered. Loading default motifs for your species ...')
if self.species in ['Mouse', 'Human', 'Rat']:
motifs = default_motifs()
self.motif_db_name = 'gimme.vertebrate.v5.0'
self.TF_formatting = True
if verbose:
print(' Default motif for vertebrate: gimme.vertebrate.v5.0. \n For more information, please see https://gimmemotifs.readthedocs.io/en/master/overview.html \n')
elif self.species in ['Zebrafish']:
self.motif_db_name = 'CisBP_ver2_Danio_rerio.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['S.cerevisiae']:
self.motif_db_name = 'CisBP_ver2_Saccharomyces_cerevisiae.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Xenopus tropicalis']:
self.motif_db_name = 'CisBP_ver2_Xenopus_tropicalis.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}.')
print(f' Default motif for {self.species} was changed at celloracle 0.10.14. \n For more information, please see celloracle documentation Changelog page. \n')
elif self.species in ['Xenopus laevis']:
self.motif_db_name = 'CisBP_ver2_Xenopus_laevis.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Drosophila']:
self.motif_db_name = 'CisBP_ver2_Drosophila_melanogaster.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['C.elegans']:
self.motif_db_name = 'CisBP_ver2_Caenorhabditis_elegans.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Arabidopsis']:
self.motif_db_name = 'CisBP_ver2_Arabidopsis_thaliana_GENE_ID.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Chicken']:
self.motif_db_name = 'CisBP_ver2_Gallus_gallus.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Guinea_Pig']:
self.motif_db_name = 'CisBP_ver2_Cavia_porcellus.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
elif self.species in ['Pig']:
self.motif_db_name = 'CisBP_ver2_Sus_scrofa.pfm'
motifs = load_motifs(self.motif_db_name)
self.TF_formatting = False
if verbose:
print(f' Default motif for {self.species}: {self.motif_db_name}. \n For more information about the motif data, please see http://cisbp.ccbr.utoronto.ca. \n')
else:
raise ValueError(f"We don't have default motifs for your species, Please specify motif data by yourself.")
else:
if isinstance(motifs, list):
if isinstance(motifs[0], Motif):
if verbose:
print('Checking your motifs... Motifs format looks good. \n')
else:
raise ValueError(f'Motif data type was invalid.')
else:
raise ValueError(f'motifs should be a list of Motif object in gimmemotifs.')
self.motif_db_name = 'custom_motifs'
if TF_formatting == 'auto':
self.TF_formatting = False
else:
self.TF_formatting = TF_formatting
self.motifs = motifs
if TF_evidence_level == 'direct_and_indirect':
factor_kind = [DIRECT_NAME, INDIRECT_NAME]
elif TF_evidence_level == 'direct':
factor_kind = [DIRECT_NAME]
dic_motif2TFs = {}
for i in motifs:
fcs = []
for j in factor_kind:
fcs += i.factors[j]
dic_motif2TFs[i.id] = fcs
if self.TF_formatting:
if self.species in ['Mouse', 'Rat']:
for key in dic_motif2TFs.keys():
dic_motif2TFs[key] = [tf.capitalize() for tf in dic_motif2TFs[key]]
elif self.species in ['Human', 'S.cerevisiae', 'Arabidopsis']:
for key in dic_motif2TFs.keys():
dic_motif2TFs[key] = [tf.upper() for tf in dic_motif2TFs[key]]
elif self.species in ['Zebrafish', 'Xenopus']:
for key in dic_motif2TFs.keys():
dic_motif2TFs[key] = [tf.lower() for tf in dic_motif2TFs[key]]
elif self.species in ['Drosophila', 'C.elegans']:
pass
self.dic_motif2TFs = dic_motif2TFs
self.TF_evidence_level = TF_evidence_level
if verbose:
print('Initiating scanner... \n')
s = Scanner(ncpus=n_cpus)
s.set_motifs(motifs)
try:
s.set_background(genome=self.ref_genome, size=background_length)
except:
s.set_background(genome=self.ref_genome, length=background_length)
if verbose:
print('Calculating FPR-based threshold. This step may take substantial time when you load a new ref-genome. It will be done quicker on the second time. \n')
s.set_threshold(fpr=fpr)
target_sequences = peak2fasta(self.all_peaks, self.ref_genome)
target_sequences = remove_zero_seq(fasta_object=target_sequences)
if verbose:
print('Motif scan started .. It may take long time.\n')
self.scanned_df = scan_dna_for_motifs(scanner_object=s, motifs_object=motifs, sequence_object=target_sequences, divide=divide, verbose=verbose)
new_df = pd.DataFrame({'time': [datetime.now().ctime()], 'info': ['scanMotifs']})
self.easy_log = pd.concat([self.easy_log, new_df], axis=0).reset_index(drop=True)
</DeepExtract>
|
CellOracle
|
positive
|
def _get_interpolated_xy(target_x, target_y, current_cube_x, current_cube_y, next_cube_x, next_cube_y):
"""
:param target_x: target x of the grip center.
:param target_y: target y of the grip center.
:param current_cube_x: x of current cube to be gripped.
:param current_cube_y: y of current cube to be gripped.
:param next_cube_x: x of next cube to be gripped.
:param next_cube_y: y of next cube to be gripped.
:return:
"""
if self._phase < 4:
current_x = current_cube_x
current_y = current_cube_y
else:
current_x = next_cube_x
current_y = next_cube_y
<DeepExtract>
if self._phase < 3:
alpha = 0
elif self._phase == 3:
alpha = self._mix_sin(self._t)
elif self._phase == 4:
alpha = 1.0
elif self._phase == 5:
alpha = 1.0
elif self._phase == 6:
alpha = 1 - self._mix_sin(self._t)
else:
raise ValueError()
</DeepExtract>
xy_target = (1 - alpha) * np.array([current_x, current_y]) + alpha * np.array([target_x, target_y])
return xy_target
|
def _get_interpolated_xy(target_x, target_y, current_cube_x, current_cube_y, next_cube_x, next_cube_y):
"""
:param target_x: target x of the grip center.
:param target_y: target y of the grip center.
:param current_cube_x: x of current cube to be gripped.
:param current_cube_y: y of current cube to be gripped.
:param next_cube_x: x of next cube to be gripped.
:param next_cube_y: y of next cube to be gripped.
:return:
"""
if self._phase < 4:
current_x = current_cube_x
current_y = current_cube_y
else:
current_x = next_cube_x
current_y = next_cube_y
if self._phase < 3:
alpha = 0
elif self._phase == 3:
alpha = self._mix_sin(self._t)
elif self._phase == 4:
alpha = 1.0
elif self._phase == 5:
alpha = 1.0
elif self._phase == 6:
alpha = 1 - self._mix_sin(self._t)
else:
raise ValueError()
xy_target = (1 - alpha) * np.array([current_x, current_y]) + alpha * np.array([target_x, target_y])
return xy_target
|
CausalWorld
|
positive
|
def _get_or_create_bundle_ids(bundle_id_identifier: str, platform: BundleIdPlatform, create_resource: bool, strict_match: bool) -> List[BundleId]:
<DeepExtract>
def predicate(bundle_id):
bundle_ids = bundle_id.attributes.identifier == bundle_id_identifier
bundle_id_filter = self.api_client.bundle_ids.Filter(identifier=bundle_id_identifier, name=bundle_id_name, platform=platform)
bundle_ids = self._list_resources(bundle_id_filter, self.api_client.bundle_ids, False, filter_predicate=predicate if strict_match else None)
bundle_ids = bundle_ids
</DeepExtract>
if not bundle_ids:
if not create_resource:
raise AppStoreConnectError(f'Did not find {BundleId.s} with identifier {bundle_id_identifier}')
bundle_ids.append(self.create_bundle_id(bundle_id_identifier, platform=platform, should_print=False))
else:
for bundle_id in bundle_ids:
self.logger.info(f'- {bundle_id.attributes.name} {bundle_id.attributes.identifier} ({bundle_id.id})')
return bundle_ids
|
def _get_or_create_bundle_ids(bundle_id_identifier: str, platform: BundleIdPlatform, create_resource: bool, strict_match: bool) -> List[BundleId]:
def predicate(bundle_id):
bundle_ids = bundle_id.attributes.identifier == bundle_id_identifier
bundle_id_filter = self.api_client.bundle_ids.Filter(identifier=bundle_id_identifier, name=bundle_id_name, platform=platform)
bundle_ids = self._list_resources(bundle_id_filter, self.api_client.bundle_ids, False, filter_predicate=predicate if strict_match else None)
bundle_ids = bundle_ids
if not bundle_ids:
if not create_resource:
raise AppStoreConnectError(f'Did not find {BundleId.s} with identifier {bundle_id_identifier}')
bundle_ids.append(self.create_bundle_id(bundle_id_identifier, platform=platform, should_print=False))
else:
for bundle_id in bundle_ids:
self.logger.info(f'- {bundle_id.attributes.name} {bundle_id.attributes.identifier} ({bundle_id.id})')
return bundle_ids
|
cli-tools
|
positive
|
def test_clinical_event_with_code():
condition_code = 'ASTHMA'
<DeepExtract>
if [None, ['2001-01-01', '2002-01-01', '2002-02-01', '2002-06-01'], ['2001-06-01']] is None:
[None, ['2001-01-01', '2002-01-01', '2002-02-01', '2002-06-01'], ['2001-06-01']] = ['2001-06-01', '2002-06-01', None]
session = make_session()
for dates in [None, ['2001-01-01', '2002-01-01', '2002-02-01', '2002-06-01'], ['2001-06-01']]:
patient = Patient()
if dates is None:
dates = []
elif isinstance(dates, str):
dates = [dates]
for date in dates:
if isinstance(date, tuple):
(date, value) = date
else:
value = 0.0
patient.CodedEvents.append(CodedEvent(CTV3Code=condition_code, ConsultationDate=date, NumericValue=value))
session.add(patient)
session.commit()
</DeepExtract>
study = StudyDefinition(population=patients.all(), latest_asthma_code=patients.with_these_clinical_events(codelist([condition_code], 'ctv3'), between=['2001-12-01', '2002-06-01'], returning='code', find_last_match_in_period=True), latest_asthma_code_date=patients.date_of('latest_asthma_code', date_format='YYYY-MM'))
results = study.to_dicts()
assert [x['latest_asthma_code'] for x in results] == ['', condition_code, '']
assert [x['latest_asthma_code_date'] for x in results] == ['', '2002-06', '']
|
def test_clinical_event_with_code():
condition_code = 'ASTHMA'
if [None, ['2001-01-01', '2002-01-01', '2002-02-01', '2002-06-01'], ['2001-06-01']] is None:
[None, ['2001-01-01', '2002-01-01', '2002-02-01', '2002-06-01'], ['2001-06-01']] = ['2001-06-01', '2002-06-01', None]
session = make_session()
for dates in [None, ['2001-01-01', '2002-01-01', '2002-02-01', '2002-06-01'], ['2001-06-01']]:
patient = Patient()
if dates is None:
dates = []
elif isinstance(dates, str):
dates = [dates]
for date in dates:
if isinstance(date, tuple):
(date, value) = date
else:
value = 0.0
patient.CodedEvents.append(CodedEvent(CTV3Code=condition_code, ConsultationDate=date, NumericValue=value))
session.add(patient)
session.commit()
study = StudyDefinition(population=patients.all(), latest_asthma_code=patients.with_these_clinical_events(codelist([condition_code], 'ctv3'), between=['2001-12-01', '2002-06-01'], returning='code', find_last_match_in_period=True), latest_asthma_code_date=patients.date_of('latest_asthma_code', date_format='YYYY-MM'))
results = study.to_dicts()
assert [x['latest_asthma_code'] for x in results] == ['', condition_code, '']
assert [x['latest_asthma_code_date'] for x in results] == ['', '2002-06', '']
|
cohort-extractor
|
positive
|
def load_from_file(self, path):
if not self.config_exists(path):
raise ValueError('Config file does not exist!')
(f_path, filename) = os.path.split(path)
if filename.startswith('configspec_'):
<DeepExtract>
filename = os.path.split(path)[1]
config_path = os.path.join(parent_path(path, levels=2), filename.replace('configspec_', ''))
</DeepExtract>
if self.config_exists(config_path):
return self.load_config_and_spec(config_path)
generate_file = parent_dir(f_path) == 'spec'
if generate_file:
<DeepExtract>
if config_path.config_exists(cfg_path):
return False
vdt = Validator()
config = ConfigObj(configspec=config_path._get_spec_path(cfg_path))
config.filename = cfg_path
config.validate(vdt, copy=True)
config.indent_type = ''
config.initial_comment = ('This is generated config with default values', 'Modify to configure')
config.write()
return True
</DeepExtract>
return self.load_only_spec(path, generate_file)
else:
<DeepExtract>
spec_path = modify_filename(path, 'spec/configspec_{}')
</DeepExtract>
if self.config_exists(spec_path):
return self.load_config_and_spec(path)
return self.load_only_config(path)
|
def load_from_file(self, path):
if not self.config_exists(path):
raise ValueError('Config file does not exist!')
(f_path, filename) = os.path.split(path)
if filename.startswith('configspec_'):
filename = os.path.split(path)[1]
config_path = os.path.join(parent_path(path, levels=2), filename.replace('configspec_', ''))
if self.config_exists(config_path):
return self.load_config_and_spec(config_path)
generate_file = parent_dir(f_path) == 'spec'
if generate_file:
if config_path.config_exists(cfg_path):
return False
vdt = Validator()
config = ConfigObj(configspec=config_path._get_spec_path(cfg_path))
config.filename = cfg_path
config.validate(vdt, copy=True)
config.indent_type = ''
config.initial_comment = ('This is generated config with default values', 'Modify to configure')
config.write()
return True
return self.load_only_spec(path, generate_file)
else:
spec_path = modify_filename(path, 'spec/configspec_{}')
if self.config_exists(spec_path):
return self.load_config_and_spec(path)
return self.load_only_config(path)
|
clever-show
|
positive
|
def test_learning_dynamic_signal_static_observation(self):
print('\n ---------------------------------------------------------\n test learning\n using dynamic signal and static observation points\n ---------------------------------------------------------\n ')
MAX_ITER = 1000
loc = amath.random.uniform(self.low, self.high, (self.n_obs, self.dim))
model = FunctionalDyBM(self.dim, loc, self.delay, self.decay_rates, noise_var=0.01, ker_paras={'ker_type': 'rbf', 'gamma': 1.0}, learning_rate=0.0001)
for i in xrange(MAX_ITER):
<DeepExtract>
pattern = amath.sin(loc.sum(axis=1) + i / 10.0) + 1e-05 * amath.random.randn(loc.shape[0])
</DeepExtract>
if i % 100 == 0:
print('step {}:\t log-pdf = {} \t RSME = {}'.format(i, model.get_LL(pattern, loc), model.compute_RMSE(pattern, loc)))
model.learn_one_step(pattern, loc)
<DeepExtract>
pattern = amath.sin(loc.sum(axis=1) + MAX_ITER / 10.0) + 1e-05 * amath.random.randn(loc.shape[0])
</DeepExtract>
print('\npattern = {}'.format(pattern))
print('pred = {}'.format(model.predict_next(loc)))
return 0
|
def test_learning_dynamic_signal_static_observation(self):
print('\n ---------------------------------------------------------\n test learning\n using dynamic signal and static observation points\n ---------------------------------------------------------\n ')
MAX_ITER = 1000
loc = amath.random.uniform(self.low, self.high, (self.n_obs, self.dim))
model = FunctionalDyBM(self.dim, loc, self.delay, self.decay_rates, noise_var=0.01, ker_paras={'ker_type': 'rbf', 'gamma': 1.0}, learning_rate=0.0001)
for i in xrange(MAX_ITER):
pattern = amath.sin(loc.sum(axis=1) + i / 10.0) + 1e-05 * amath.random.randn(loc.shape[0])
if i % 100 == 0:
print('step {}:\t log-pdf = {} \t RSME = {}'.format(i, model.get_LL(pattern, loc), model.compute_RMSE(pattern, loc)))
model.learn_one_step(pattern, loc)
pattern = amath.sin(loc.sum(axis=1) + MAX_ITER / 10.0) + 1e-05 * amath.random.randn(loc.shape[0])
print('\npattern = {}'.format(pattern))
print('pred = {}'.format(model.predict_next(loc)))
return 0
|
dybm
|
positive
|
@property
def charset_name(self):
if not self._best_guess_prober:
<DeepExtract>
state = self.state
if state == ProbingState.FOUND_IT:
return 0.99
elif state == ProbingState.NOT_ME:
return 0.01
best_conf = 0.0
self._best_guess_prober = None
for prober in self.probers:
if not prober:
continue
if not prober.active:
self.logger.debug('%s not active', prober.charset_name)
continue
conf = prober.get_confidence()
self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
if best_conf < conf:
best_conf = conf
self._best_guess_prober = prober
if not self._best_guess_prober:
return 0.0
return best_conf
</DeepExtract>
if not self._best_guess_prober:
return None
return self._best_guess_prober.charset_name
|
@property
def charset_name(self):
if not self._best_guess_prober:
state = self.state
if state == ProbingState.FOUND_IT:
return 0.99
elif state == ProbingState.NOT_ME:
return 0.01
best_conf = 0.0
self._best_guess_prober = None
for prober in self.probers:
if not prober:
continue
if not prober.active:
self.logger.debug('%s not active', prober.charset_name)
continue
conf = prober.get_confidence()
self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
if best_conf < conf:
best_conf = conf
self._best_guess_prober = prober
if not self._best_guess_prober:
return 0.0
return best_conf
if not self._best_guess_prober:
return None
return self._best_guess_prober.charset_name
|
alexa-sky-hd
|
positive
|
def test_run_with_ref_seq_remove_reference(self, test_file, ref_file, ref_seq, run):
expected_length = len(ref_seq.seq) - ref_seq.seq.count('-')
<DeepExtract>
def run(args):
args = '-s %s --reference-sequence %s --remove-reference' % (test_file, ref_file)(args + ' -o %s' % out_file)
align.run(args)
output = SeqIO.to_dict(SeqIO.parse(out_file, 'fasta'))
output = run
</DeepExtract>
assert ref_seq.id not in output
|
def test_run_with_ref_seq_remove_reference(self, test_file, ref_file, ref_seq, run):
expected_length = len(ref_seq.seq) - ref_seq.seq.count('-')
def run(args):
args = '-s %s --reference-sequence %s --remove-reference' % (test_file, ref_file)(args + ' -o %s' % out_file)
align.run(args)
output = SeqIO.to_dict(SeqIO.parse(out_file, 'fasta'))
output = run
assert ref_seq.id not in output
|
augur
|
positive
|
def valid_epoch(datagenerator, steps):
step = 0
loss_sum = 0
loss_rpn_class_sum = 0
loss_rpn_bbox_sum = 0
loss_mrcnn_class_sum = 0
loss_mrcnn_bbox_sum = 0
loss_mrcnn_mask_sum = 0
for inputs in datagenerator:
images = inputs[0]
image_metas = inputs[1]
rpn_match = inputs[2]
rpn_bbox = inputs[3]
gt_class_ids = inputs[4]
gt_boxes = inputs[5]
gt_masks = inputs[6]
image_metas = image_metas.numpy()
images = Variable(images, volatile=True)
rpn_match = Variable(rpn_match, volatile=True)
rpn_bbox = Variable(rpn_bbox, volatile=True)
gt_class_ids = Variable(gt_class_ids, volatile=True)
gt_boxes = Variable(gt_boxes, volatile=True)
gt_masks = Variable(gt_masks, volatile=True)
if self.config.GPU_COUNT:
images = images.cuda()
rpn_match = rpn_match.cuda()
rpn_bbox = rpn_bbox.cuda()
gt_class_ids = gt_class_ids.cuda()
gt_boxes = gt_boxes.cuda()
gt_masks = gt_masks.cuda()
<DeepExtract>
molded_images = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][0]
image_metas = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][1]
if 'training' == 'inference':
self.eval()
elif 'training' == 'training':
self.train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.apply(set_bn_eval)
[p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)
rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]
mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]
layer_outputs = []
for p in rpn_feature_maps:
layer_outputs.append(self.rpn(p))
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
(rpn_class_logits, rpn_class, rpn_bbox) = outputs
proposal_count = self.config.POST_NMS_ROIS_TRAINING if 'training' == 'training' else self.config.POST_NMS_ROIS_INFERENCE
rpn_rois = proposal_layer([rpn_class, rpn_bbox], proposal_count=proposal_count, nms_threshold=self.config.RPN_NMS_THRESHOLD, anchors=self.anchors, config=self.config)
if 'training' == 'inference':
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rpn_rois)
detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, image_metas)
if detections is None:
(rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask) = [None, None]
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
detection_boxes = detections[:, :4] / scale
detection_boxes = detection_boxes.unsqueeze(0)
mrcnn_mask = self.mask(mrcnn_feature_maps, detection_boxes)
detections = detections.unsqueeze(0)
mrcnn_mask = mrcnn_mask.unsqueeze(0)
(rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask) = [detections, mrcnn_mask]
elif 'training' == 'training':
gt_class_ids = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][2]
gt_boxes = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][3]
gt_masks = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][4]
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
gt_boxes = gt_boxes / scale
(rois, target_class_ids, target_deltas, target_mask) = detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
if not rois.size():
mrcnn_class_logits = Variable(torch.FloatTensor())
mrcnn_class = Variable(torch.IntTensor())
mrcnn_bbox = Variable(torch.FloatTensor())
mrcnn_mask = Variable(torch.FloatTensor())
if self.config.GPU_COUNT:
mrcnn_class_logits = mrcnn_class_logits.cuda()
mrcnn_class = mrcnn_class.cuda()
mrcnn_bbox = mrcnn_bbox.cuda()
mrcnn_mask = mrcnn_mask.cuda()
else:
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rois)
mrcnn_mask = self.mask(mrcnn_feature_maps, rois)
(rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask) = [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask]
</DeepExtract>
if not target_class_ids.size():
continue
<DeepExtract>
rpn_class_loss = compute_rpn_class_loss(rpn_match, rpn_class_logits)
rpn_bbox_loss = compute_rpn_bbox_loss(rpn_bbox, rpn_match, rpn_pred_bbox)
mrcnn_class_loss = compute_mrcnn_class_loss(target_class_ids, mrcnn_class_logits)
mrcnn_bbox_loss = compute_mrcnn_bbox_loss(target_deltas, target_class_ids, mrcnn_bbox)
mrcnn_mask_loss = compute_mrcnn_mask_loss(target_mask, target_class_ids, mrcnn_mask)
(rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss) = [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss]
</DeepExtract>
loss = rpn_class_loss + rpn_bbox_loss + mrcnn_class_loss + mrcnn_bbox_loss + mrcnn_mask_loss
loss_sum += loss.data.cpu()[0] / steps
loss_rpn_class_sum += rpn_class_loss.data.cpu()[0] / steps
loss_rpn_bbox_sum += rpn_bbox_loss.data.cpu()[0] / steps
loss_mrcnn_class_sum += mrcnn_class_loss.data.cpu()[0] / steps
loss_mrcnn_bbox_sum += mrcnn_bbox_loss.data.cpu()[0] / steps
loss_mrcnn_mask_sum += mrcnn_mask_loss.data.cpu()[0] / steps
if step == steps - 1:
<DeepExtract>
percent = ('{0:.' + str(decimals) + 'f}').format(100 * (step + 1 / float(steps)))
filledLength = int(10 * step + 1 // steps)
bar = fill * filledLength + '-' * (10 - filledLength)
sys.stdout.write('{}|{}|{}|{}\n'.format('\t{}/{}'.format(step + 1, steps), bar, percent, 'Complete - loss: {:.5f} - rpn_class_loss: {:.5f} - rpn_bbox_loss: {:.5f} - mrcnn_class_loss: {:.5f} - mrcnn_bbox_loss: {:.5f} - mrcnn_mask_loss: {:.5f}'.format(loss_sum, loss_rpn_class_sum, loss_rpn_bbox_sum, loss_mrcnn_class_sum, loss_mrcnn_bbox_sum, loss_mrcnn_mask_sum)))
sys.stdout.flush()
if step + 1 == steps:
print()
</DeepExtract>
break
step += 1
return (loss_sum, loss_rpn_class_sum, loss_rpn_bbox_sum, loss_mrcnn_class_sum, loss_mrcnn_bbox_sum, loss_mrcnn_mask_sum)
|
def valid_epoch(datagenerator, steps):
step = 0
loss_sum = 0
loss_rpn_class_sum = 0
loss_rpn_bbox_sum = 0
loss_mrcnn_class_sum = 0
loss_mrcnn_bbox_sum = 0
loss_mrcnn_mask_sum = 0
for inputs in datagenerator:
images = inputs[0]
image_metas = inputs[1]
rpn_match = inputs[2]
rpn_bbox = inputs[3]
gt_class_ids = inputs[4]
gt_boxes = inputs[5]
gt_masks = inputs[6]
image_metas = image_metas.numpy()
images = Variable(images, volatile=True)
rpn_match = Variable(rpn_match, volatile=True)
rpn_bbox = Variable(rpn_bbox, volatile=True)
gt_class_ids = Variable(gt_class_ids, volatile=True)
gt_boxes = Variable(gt_boxes, volatile=True)
gt_masks = Variable(gt_masks, volatile=True)
if self.config.GPU_COUNT:
images = images.cuda()
rpn_match = rpn_match.cuda()
rpn_bbox = rpn_bbox.cuda()
gt_class_ids = gt_class_ids.cuda()
gt_boxes = gt_boxes.cuda()
gt_masks = gt_masks.cuda()
molded_images = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][0]
image_metas = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][1]
if 'training' == 'inference':
self.eval()
elif 'training' == 'training':
self.train()
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
self.apply(set_bn_eval)
[p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)
rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]
mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]
layer_outputs = []
for p in rpn_feature_maps:
layer_outputs.append(self.rpn(p))
outputs = list(zip(*layer_outputs))
outputs = [torch.cat(list(o), dim=1) for o in outputs]
(rpn_class_logits, rpn_class, rpn_bbox) = outputs
proposal_count = self.config.POST_NMS_ROIS_TRAINING if 'training' == 'training' else self.config.POST_NMS_ROIS_INFERENCE
rpn_rois = proposal_layer([rpn_class, rpn_bbox], proposal_count=proposal_count, nms_threshold=self.config.RPN_NMS_THRESHOLD, anchors=self.anchors, config=self.config)
if 'training' == 'inference':
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rpn_rois)
detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, image_metas)
if detections is None:
(rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask) = [None, None]
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
detection_boxes = detections[:, :4] / scale
detection_boxes = detection_boxes.unsqueeze(0)
mrcnn_mask = self.mask(mrcnn_feature_maps, detection_boxes)
detections = detections.unsqueeze(0)
mrcnn_mask = mrcnn_mask.unsqueeze(0)
(rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask) = [detections, mrcnn_mask]
elif 'training' == 'training':
gt_class_ids = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][2]
gt_boxes = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][3]
gt_masks = [images, image_metas, gt_class_ids, gt_boxes, gt_masks][4]
(h, w) = self.config.IMAGE_SHAPE[:2]
scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)
if self.config.GPU_COUNT:
scale = scale.cuda()
gt_boxes = gt_boxes / scale
(rois, target_class_ids, target_deltas, target_mask) = detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, self.config)
if not rois.size():
mrcnn_class_logits = Variable(torch.FloatTensor())
mrcnn_class = Variable(torch.IntTensor())
mrcnn_bbox = Variable(torch.FloatTensor())
mrcnn_mask = Variable(torch.FloatTensor())
if self.config.GPU_COUNT:
mrcnn_class_logits = mrcnn_class_logits.cuda()
mrcnn_class = mrcnn_class.cuda()
mrcnn_bbox = mrcnn_bbox.cuda()
mrcnn_mask = mrcnn_mask.cuda()
else:
(mrcnn_class_logits, mrcnn_class, mrcnn_bbox) = self.classifier(mrcnn_feature_maps, rois)
mrcnn_mask = self.mask(mrcnn_feature_maps, rois)
(rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask) = [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask]
if not target_class_ids.size():
continue
rpn_class_loss = compute_rpn_class_loss(rpn_match, rpn_class_logits)
rpn_bbox_loss = compute_rpn_bbox_loss(rpn_bbox, rpn_match, rpn_pred_bbox)
mrcnn_class_loss = compute_mrcnn_class_loss(target_class_ids, mrcnn_class_logits)
mrcnn_bbox_loss = compute_mrcnn_bbox_loss(target_deltas, target_class_ids, mrcnn_bbox)
mrcnn_mask_loss = compute_mrcnn_mask_loss(target_mask, target_class_ids, mrcnn_mask)
(rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss) = [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss]
loss = rpn_class_loss + rpn_bbox_loss + mrcnn_class_loss + mrcnn_bbox_loss + mrcnn_mask_loss
loss_sum += loss.data.cpu()[0] / steps
loss_rpn_class_sum += rpn_class_loss.data.cpu()[0] / steps
loss_rpn_bbox_sum += rpn_bbox_loss.data.cpu()[0] / steps
loss_mrcnn_class_sum += mrcnn_class_loss.data.cpu()[0] / steps
loss_mrcnn_bbox_sum += mrcnn_bbox_loss.data.cpu()[0] / steps
loss_mrcnn_mask_sum += mrcnn_mask_loss.data.cpu()[0] / steps
if step == steps - 1:
percent = ('{0:.' + str(decimals) + 'f}').format(100 * (step + 1 / float(steps)))
filledLength = int(10 * step + 1 // steps)
bar = fill * filledLength + '-' * (10 - filledLength)
sys.stdout.write('{}|{}|{}|{}\n'.format('\t{}/{}'.format(step + 1, steps), bar, percent, 'Complete - loss: {:.5f} - rpn_class_loss: {:.5f} - rpn_bbox_loss: {:.5f} - mrcnn_class_loss: {:.5f} - mrcnn_bbox_loss: {:.5f} - mrcnn_mask_loss: {:.5f}'.format(loss_sum, loss_rpn_class_sum, loss_rpn_bbox_sum, loss_mrcnn_class_sum, loss_mrcnn_bbox_sum, loss_mrcnn_mask_sum)))
sys.stdout.flush()
if step + 1 == steps:
print()
break
step += 1
return (loss_sum, loss_rpn_class_sum, loss_rpn_bbox_sum, loss_mrcnn_class_sum, loss_mrcnn_bbox_sum, loss_mrcnn_mask_sum)
|
cvpr-2018-autonomous-driving-autopilot-solution
|
positive
|
def ping(self):
<DeepExtract>
self._putline('echo')
line = self._getline(timeout=1.0)
if line.startswith('OK '):
return line[3:]
if 'unknown command' in line:
self.close()
raise IndexClientError(line)
</DeepExtract>
return True
|
def ping(self):
self._putline('echo')
line = self._getline(timeout=1.0)
if line.startswith('OK '):
return line[3:]
if 'unknown command' in line:
self.close()
raise IndexClientError(line)
return True
|
acoustid-server
|
positive
|
def testClass1Header(self):
<DeepExtract>
m = pyext.Module(f'fq.py.{private}path')
ns = 'clif::name::'
w = ns + 'wrapper'
m.types = [types.ClassType('c::name::cpp_name', 'py.path', w, w + '_Type', ns, can_copy=False, can_move=False, can_destruct=True, virtual='', ns='c::name', suppress_shared_ptr_const_conversion=False)]
if u_ns is not None:
m.types.append(types.ClassType('other::cpp_name', 'py.path.u', w, w + '_Type', ns, can_copy=False, can_move=False, can_destruct=True, virtual='', ns=u_ns, suppress_shared_ptr_const_conversion=False))
header = '\n'.join(m.GenerateHeader('fq/py/my.clif', 'fq/my.h', {}, is_extended_from_python)) + '\n'
</DeepExtract>
self.assertMultiLineEqual(header, textwrap.dedent(' //////////////////////////////////////////////////////////////////////\n // This file was automatically generated by PyCLIF.\n // Version 0.3\n //////////////////////////////////////////////////////////////////////\n // source: fq/py/my.clif\n\n #include <memory>\n #include "absl/types/optional.h"\n #include "fq/my.h"\n #include "clif/python/postconv.h"\n\n namespace c { namespace name {\n using namespace ::clif;\n\n // CLIF use `c::name::cpp_name` as py.path\n bool Clif_PyObjAs(PyObject* input, c::name::cpp_name** output);\n bool Clif_PyObjAs(PyObject* input, std::shared_ptr<c::name::cpp_name>* output);\n bool Clif_PyObjAs(PyObject* input, std::shared_ptr<const c::name::cpp_name>* output);\n bool Clif_PyObjAs(PyObject* input, std::unique_ptr<c::name::cpp_name>* output);\n PyObject* Clif_PyObjFrom(c::name::cpp_name*, py::PostConv);\n PyObject* Clif_PyObjFrom(std::shared_ptr<c::name::cpp_name>, py::PostConv);\n PyObject* Clif_PyObjFrom(std::shared_ptr<const c::name::cpp_name>, py::PostConv);\n PyObject* Clif_PyObjFrom(std::unique_ptr<c::name::cpp_name>, py::PostConv);\n template<typename T>\n typename std::enable_if<std::is_same<T, c::name::cpp_name>::value>::type Clif_PyObjFrom(const c::name::cpp_name*, py::PostConv) = delete;\n template<typename T>\n typename std::enable_if<std::is_same<T, c::name::cpp_name>::value>::type Clif_PyObjFrom(const c::name::cpp_name&, py::PostConv) = delete;\n\n } } // namespace c::name\n\n // CLIF init_module if (PyObject* m = PyImport_ImportModule("fq.py.path")) Py_DECREF(m);\n // CLIF init_module else goto err;\n '))
|
def testClass1Header(self):
m = pyext.Module(f'fq.py.{private}path')
ns = 'clif::name::'
w = ns + 'wrapper'
m.types = [types.ClassType('c::name::cpp_name', 'py.path', w, w + '_Type', ns, can_copy=False, can_move=False, can_destruct=True, virtual='', ns='c::name', suppress_shared_ptr_const_conversion=False)]
if u_ns is not None:
m.types.append(types.ClassType('other::cpp_name', 'py.path.u', w, w + '_Type', ns, can_copy=False, can_move=False, can_destruct=True, virtual='', ns=u_ns, suppress_shared_ptr_const_conversion=False))
header = '\n'.join(m.GenerateHeader('fq/py/my.clif', 'fq/my.h', {}, is_extended_from_python)) + '\n'
self.assertMultiLineEqual(header, textwrap.dedent(' //////////////////////////////////////////////////////////////////////\n // This file was automatically generated by PyCLIF.\n // Version 0.3\n //////////////////////////////////////////////////////////////////////\n // source: fq/py/my.clif\n\n #include <memory>\n #include "absl/types/optional.h"\n #include "fq/my.h"\n #include "clif/python/postconv.h"\n\n namespace c { namespace name {\n using namespace ::clif;\n\n // CLIF use `c::name::cpp_name` as py.path\n bool Clif_PyObjAs(PyObject* input, c::name::cpp_name** output);\n bool Clif_PyObjAs(PyObject* input, std::shared_ptr<c::name::cpp_name>* output);\n bool Clif_PyObjAs(PyObject* input, std::shared_ptr<const c::name::cpp_name>* output);\n bool Clif_PyObjAs(PyObject* input, std::unique_ptr<c::name::cpp_name>* output);\n PyObject* Clif_PyObjFrom(c::name::cpp_name*, py::PostConv);\n PyObject* Clif_PyObjFrom(std::shared_ptr<c::name::cpp_name>, py::PostConv);\n PyObject* Clif_PyObjFrom(std::shared_ptr<const c::name::cpp_name>, py::PostConv);\n PyObject* Clif_PyObjFrom(std::unique_ptr<c::name::cpp_name>, py::PostConv);\n template<typename T>\n typename std::enable_if<std::is_same<T, c::name::cpp_name>::value>::type Clif_PyObjFrom(const c::name::cpp_name*, py::PostConv) = delete;\n template<typename T>\n typename std::enable_if<std::is_same<T, c::name::cpp_name>::value>::type Clif_PyObjFrom(const c::name::cpp_name&, py::PostConv) = delete;\n\n } } // namespace c::name\n\n // CLIF init_module if (PyObject* m = PyImport_ImportModule("fq.py.path")) Py_DECREF(m);\n // CLIF init_module else goto err;\n '))
|
clif
|
positive
|
def get_frame(fname):
if type(fname) == str:
path_out = fname
outname = os.path.basename(path_out)
inname = outname.replace('out', 'in')
path_in = os.path.join(os.path.dirname(path_out), inname)
elif type(fname) == list and len(fname) == 2:
path_in = fname[0]
path_out = fname[1]
else:
raise RuntimeError('invalid input')
with open(path_out, 'r') as fp:
outlines = fp.read().split('\n')
with open(path_in, 'r') as fp:
inlines = fp.read().split('\n')
<DeepExtract>
ret = []
for (idx, ii) in enumerate(inlines):
if 'ibrav' in ii:
break
blk = inlines[idx:idx + 2]
ibrav = int(blk[0].replace(',', '').split('=')[-1])
if ibrav == 0:
for iline in inlines:
if 'CELL_PARAMETERS' in iline and 'angstrom' not in iline.lower():
raise RuntimeError('CELL_PARAMETERS must be written in Angstrom. Other units are not supported yet.')
blk = get_block(inlines, 'CELL_PARAMETERS')
for ii in blk:
ret.append([float(jj) for jj in ii.split()[0:3]])
ret = np.array(ret)
elif ibrav == 1:
a = None
for iline in inlines:
line = iline.replace('=', ' ').replace(',', '').split()
if len(line) >= 2 and 'a' == line[0]:
a = float(line[1])
if len(line) >= 2 and 'celldm(1)' == line[0]:
a = float(line[1]) * bohr2ang
if not a:
raise RuntimeError("parameter 'a' or 'celldm(1)' cannot be found.")
ret = np.array([[a, 0.0, 0.0], [0.0, a, 0.0], [0.0, 0.0, a]])
else:
sys.exit('ibrav > 1 not supported yet.')
cell = ret
</DeepExtract>
<DeepExtract>
coord = []
atom_symbol_list = []
for iline in inlines:
if 'ATOMIC_POSITIONS' in iline and ('angstrom' not in iline.lower() and 'crystal' not in iline.lower()):
raise RuntimeError('ATOMIC_POSITIONS must be written in Angstrom or crystal. Other units are not supported yet.')
if 'ATOMIC_POSITIONS' in iline and 'angstrom' in iline.lower():
blk = get_block(inlines, 'ATOMIC_POSITIONS')
for ii in blk:
coord.append([float(jj) for jj in ii.split()[1:4]])
atom_symbol_list.append(ii.split()[0])
coord = np.array(coord)
elif 'ATOMIC_POSITIONS' in iline and 'crystal' in iline.lower():
blk = get_block(inlines, 'ATOMIC_POSITIONS')
for ii in blk:
coord.append([float(jj) for jj in ii.split()[1:4]])
atom_symbol_list.append(ii.split()[0])
coord = np.array(coord)
coord = np.matmul(coord, cell)
atom_symbol_list = np.array(atom_symbol_list)
(tmp_names, symbol_idx) = np.unique(atom_symbol_list, return_index=True)
atom_types = []
atom_numbs = []
atom_names = atom_symbol_list[np.sort(symbol_idx)]
for jj in atom_symbol_list:
for (idx, ii) in enumerate(atom_names):
if jj == ii:
atom_types.append(idx)
for idx in range(len(atom_names)):
atom_numbs.append(atom_types.count(idx))
atom_types = np.array(atom_types)
(atom_names, natoms, types, coords) = (list(atom_names), atom_numbs, atom_types, coord)
</DeepExtract>
<DeepExtract>
energy = None
for ii in outlines:
if '! total energy' in ii:
energy = ry2ev * float(ii.split('=')[1].split()[0])
energy = energy
</DeepExtract>
<DeepExtract>
blk = get_block(outlines, 'Forces acting on atoms', skip=1)
ret = []
for ii in blk:
ret.append([float(jj) for jj in ii.split('=')[1].split()])
ret = np.array(ret)
ret *= ry2ev / bohr2ang
force = ret
</DeepExtract>
stress = get_stress(outlines) * np.linalg.det(cell)
return (atom_names, natoms, types, cell[np.newaxis, :, :], coords[np.newaxis, :, :], np.array(energy)[np.newaxis], force[np.newaxis, :, :], stress[np.newaxis, :, :])
|
def get_frame(fname):
if type(fname) == str:
path_out = fname
outname = os.path.basename(path_out)
inname = outname.replace('out', 'in')
path_in = os.path.join(os.path.dirname(path_out), inname)
elif type(fname) == list and len(fname) == 2:
path_in = fname[0]
path_out = fname[1]
else:
raise RuntimeError('invalid input')
with open(path_out, 'r') as fp:
outlines = fp.read().split('\n')
with open(path_in, 'r') as fp:
inlines = fp.read().split('\n')
ret = []
for (idx, ii) in enumerate(inlines):
if 'ibrav' in ii:
break
blk = inlines[idx:idx + 2]
ibrav = int(blk[0].replace(',', '').split('=')[-1])
if ibrav == 0:
for iline in inlines:
if 'CELL_PARAMETERS' in iline and 'angstrom' not in iline.lower():
raise RuntimeError('CELL_PARAMETERS must be written in Angstrom. Other units are not supported yet.')
blk = get_block(inlines, 'CELL_PARAMETERS')
for ii in blk:
ret.append([float(jj) for jj in ii.split()[0:3]])
ret = np.array(ret)
elif ibrav == 1:
a = None
for iline in inlines:
line = iline.replace('=', ' ').replace(',', '').split()
if len(line) >= 2 and 'a' == line[0]:
a = float(line[1])
if len(line) >= 2 and 'celldm(1)' == line[0]:
a = float(line[1]) * bohr2ang
if not a:
raise RuntimeError("parameter 'a' or 'celldm(1)' cannot be found.")
ret = np.array([[a, 0.0, 0.0], [0.0, a, 0.0], [0.0, 0.0, a]])
else:
sys.exit('ibrav > 1 not supported yet.')
cell = ret
coord = []
atom_symbol_list = []
for iline in inlines:
if 'ATOMIC_POSITIONS' in iline and ('angstrom' not in iline.lower() and 'crystal' not in iline.lower()):
raise RuntimeError('ATOMIC_POSITIONS must be written in Angstrom or crystal. Other units are not supported yet.')
if 'ATOMIC_POSITIONS' in iline and 'angstrom' in iline.lower():
blk = get_block(inlines, 'ATOMIC_POSITIONS')
for ii in blk:
coord.append([float(jj) for jj in ii.split()[1:4]])
atom_symbol_list.append(ii.split()[0])
coord = np.array(coord)
elif 'ATOMIC_POSITIONS' in iline and 'crystal' in iline.lower():
blk = get_block(inlines, 'ATOMIC_POSITIONS')
for ii in blk:
coord.append([float(jj) for jj in ii.split()[1:4]])
atom_symbol_list.append(ii.split()[0])
coord = np.array(coord)
coord = np.matmul(coord, cell)
atom_symbol_list = np.array(atom_symbol_list)
(tmp_names, symbol_idx) = np.unique(atom_symbol_list, return_index=True)
atom_types = []
atom_numbs = []
atom_names = atom_symbol_list[np.sort(symbol_idx)]
for jj in atom_symbol_list:
for (idx, ii) in enumerate(atom_names):
if jj == ii:
atom_types.append(idx)
for idx in range(len(atom_names)):
atom_numbs.append(atom_types.count(idx))
atom_types = np.array(atom_types)
(atom_names, natoms, types, coords) = (list(atom_names), atom_numbs, atom_types, coord)
energy = None
for ii in outlines:
if '! total energy' in ii:
energy = ry2ev * float(ii.split('=')[1].split()[0])
energy = energy
blk = get_block(outlines, 'Forces acting on atoms', skip=1)
ret = []
for ii in blk:
ret.append([float(jj) for jj in ii.split('=')[1].split()])
ret = np.array(ret)
ret *= ry2ev / bohr2ang
force = ret
stress = get_stress(outlines) * np.linalg.det(cell)
return (atom_names, natoms, types, cell[np.newaxis, :, :], coords[np.newaxis, :, :], np.array(energy)[np.newaxis], force[np.newaxis, :, :], stress[np.newaxis, :, :])
|
dpdata
|
positive
|
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter('./runs/{}'.format(args.output_dir.split('/')[-1]))
(train_dataloader, args) = get_dataloader(train_dataset, tokenizer, args)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
total_batch_size = args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
(optimizer, scheduler) = get_optimizer_scheduler(args, model, t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = {}'.format(len(train_dataset)))
logger.info(' Num Epochs = {}'.format(args.num_train_epochs))
logger.info(' Instantaneous batch size per GPU = {}'.format(args.per_gpu_train_batch_size))
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = {}'.format(total_batch_size))
logger.info(' Gradient Accumulation steps = {}'.format(args.gradient_accumulation_steps))
logger.info(' Total optimization steps = {}'.format(t_total))
<DeepExtract>
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
checkpoint_suffix = args.model_name_or_path.split('-')[-1].split('/')[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
(global_step, epochs_trained, steps_trained_in_current_epoch) = (global_step, epochs_trained, steps_trained_in_current_epoch)
</DeepExtract>
(tr_loss, logging_loss) = (0.0, 0.0)
model_to_resize = model.module if hasattr(model, 'module') else model
model_to_resize.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=args.local_rank not in [-1, 0])
best_dev_perp = 10000.0
for _ in train_iterator:
<DeepExtract>
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0])
cur_dev_perp = None
for (step, batch) in enumerate(epoch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
(inputs, labels) = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
cur_dev_perp = results['perplexity'].item()
print('Result-ckpt{}:\n'.format(global_step))
print('Perplexity: {}'.format(cur_dev_perp))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
if args.evaluate_during_training:
save_checkpoint(model, optimizer, scheduler, tokenizer, args, global_step)
if cur_dev_perp < best_dev_perp:
output_dir = os.path.join(args.output_dir, 'best_dev_checkpoint')
os.makedirs(output_dir, exist_ok=True)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
print('Updated BEST model\nOLD perplexity: {}\nNEW perplexity: {}'.format(best_dev_perp, cur_dev_perp))
best_dev_perp = cur_dev_perp
print('----------------------------------------------------------')
print('')
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
(model, optimizer, scheduler, global_step, tr_loss, logging_loss, best_dev_perp) = (model, optimizer, scheduler, global_step, tr_loss, logging_loss, best_dev_perp)
</DeepExtract>
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return (global_step, tr_loss / global_step)
|
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter('./runs/{}'.format(args.output_dir.split('/')[-1]))
(train_dataloader, args) = get_dataloader(train_dataset, tokenizer, args)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
total_batch_size = args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
(optimizer, scheduler) = get_optimizer_scheduler(args, model, t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = {}'.format(len(train_dataset)))
logger.info(' Num Epochs = {}'.format(args.num_train_epochs))
logger.info(' Instantaneous batch size per GPU = {}'.format(args.per_gpu_train_batch_size))
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = {}'.format(total_batch_size))
logger.info(' Gradient Accumulation steps = {}'.format(args.gradient_accumulation_steps))
logger.info(' Total optimization steps = {}'.format(t_total))
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
checkpoint_suffix = args.model_name_or_path.split('-')[-1].split('/')[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
(global_step, epochs_trained, steps_trained_in_current_epoch) = (global_step, epochs_trained, steps_trained_in_current_epoch)
(tr_loss, logging_loss) = (0.0, 0.0)
model_to_resize = model.module if hasattr(model, 'module') else model
model_to_resize.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=args.local_rank not in [-1, 0])
best_dev_perp = 10000.0
for _ in train_iterator:
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0])
cur_dev_perp = None
for (step, batch) in enumerate(epoch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
(inputs, labels) = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
cur_dev_perp = results['perplexity'].item()
print('Result-ckpt{}:\n'.format(global_step))
print('Perplexity: {}'.format(cur_dev_perp))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
if args.evaluate_during_training:
save_checkpoint(model, optimizer, scheduler, tokenizer, args, global_step)
if cur_dev_perp < best_dev_perp:
output_dir = os.path.join(args.output_dir, 'best_dev_checkpoint')
os.makedirs(output_dir, exist_ok=True)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
print('Updated BEST model\nOLD perplexity: {}\nNEW perplexity: {}'.format(best_dev_perp, cur_dev_perp))
best_dev_perp = cur_dev_perp
print('----------------------------------------------------------')
print('')
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
(model, optimizer, scheduler, global_step, tr_loss, logging_loss, best_dev_perp) = (model, optimizer, scheduler, global_step, tr_loss, logging_loss, best_dev_perp)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return (global_step, tr_loss / global_step)
|
coco-dst
|
positive
|
def _close_channel(frame_in):
"""Close Channel.
:param specification.Channel.Close frame_in: Channel Close frame.
:return:
"""
self.set_state(self.CLOSING)
if not self._connection.is_closed:
try:
<DeepExtract>
self.check_for_errors()
self._connection.write_frame(self.channel_id, specification.Channel.CloseOk())
</DeepExtract>
except AMQPError:
pass
self.remove_consumer_tag()
if self._inbound:
del self._inbound[:]
self.exceptions.append(AMQPChannelError('Channel %d was closed by remote server: %s' % (self._channel_id, try_utf8_decode(frame_in.reply_text)), reply_code=frame_in.reply_code))
self.set_state(self.CLOSED)
|
def _close_channel(frame_in):
"""Close Channel.
:param specification.Channel.Close frame_in: Channel Close frame.
:return:
"""
self.set_state(self.CLOSING)
if not self._connection.is_closed:
try:
self.check_for_errors()
self._connection.write_frame(self.channel_id, specification.Channel.CloseOk())
except AMQPError:
pass
self.remove_consumer_tag()
if self._inbound:
del self._inbound[:]
self.exceptions.append(AMQPChannelError('Channel %d was closed by remote server: %s' % (self._channel_id, try_utf8_decode(frame_in.reply_text)), reply_code=frame_in.reply_code))
self.set_state(self.CLOSED)
|
amqpstorm
|
positive
|
def start_target_system(target, init=False):
<DeepExtract>
igno = self.igno_centos + self.igno_opensuse + self.igno_ubuntu + self.igno_always
if self._show_all:
igno = self.igno_always
if self._force:
igno = []
logg.debug('ignored services filter for default.target:\n\t%s', igno)
default_target = target or self.get_default_target()
services = self.enabled_target_services(default_target, 'S', igno)
</DeepExtract>
<DeepExtract>
conf = self.sysinit_target()
self.write_status_from(conf, **status)
</DeepExtract>
<DeepExtract>
self.wait_system()
done = True
started_units = []
for unit in self.sortedAfter(services):
started_units.append(unit)
if not self.start_unit(unit):
done = False
if init:
logg.info('init-loop start')
sig = self.init_loop_until_stop(started_units)
logg.info('init-loop %s', sig)
for unit in reversed(started_units):
self.stop_unit(unit)
return done
</DeepExtract>
return services
|
def start_target_system(target, init=False):
igno = self.igno_centos + self.igno_opensuse + self.igno_ubuntu + self.igno_always
if self._show_all:
igno = self.igno_always
if self._force:
igno = []
logg.debug('ignored services filter for default.target:\n\t%s', igno)
default_target = target or self.get_default_target()
services = self.enabled_target_services(default_target, 'S', igno)
conf = self.sysinit_target()
self.write_status_from(conf, **status)
self.wait_system()
done = True
started_units = []
for unit in self.sortedAfter(services):
started_units.append(unit)
if not self.start_unit(unit):
done = False
if init:
logg.info('init-loop start')
sig = self.init_loop_until_stop(started_units)
logg.info('init-loop %s', sig)
for unit in reversed(started_units):
self.stop_unit(unit)
return done
return services
|
docker-systemctl-images
|
positive
|
@cli.command(name='test-clean')
@edm_option
@runtime_option
def test_clean(edm, runtime):
""" Run tests in a clean environment, cleaning up afterwards
"""
args = [f'--edm={edm}', f'--runtime={runtime}']
try:
<DeepExtract>
parameters = get_parameters(edm, runtime, environment)
edm_packages = ' '.join(edm_dependencies(runtime))
commands = ['{edm} environments create {environment} --force --version={runtime}', '{edm} install -y -e {environment} ' + edm_packages, '{edm} run -e {environment} -- python -m pip install -r ci-src-requirements.txt --no-deps', '{edm} run -e {environment} -- python -m pip install . --no-deps']
click.echo("Creating environment '{environment}'".format(**parameters))
execute(commands, parameters)
if source:
cmd_fmt = '{edm} plumbing remove-package --environment {environment} --force '
commands = [cmd_fmt + source_pkg for source_pkg in source_dependencies]
execute(commands, parameters)
source_pkgs = [github_url_fmt.format(pkg) for pkg in source_dependencies]
commands = ['python -m pip install {pkg} --no-deps'.format(pkg=pkg) for pkg in source_pkgs]
commands = ['{edm} run -e {environment} -- ' + command for command in commands]
execute(commands, parameters)
click.echo('Done install')
</DeepExtract>
<DeepExtract>
parameters = get_parameters(edm, runtime, environment)
environ = {}
environ['PYTHONUNBUFFERED'] = '1'
commands = ['{edm} run -e {environment} -- python -W default -m coverage run -p -m unittest discover -v apptools']
click.echo("Running tests in '{environment}'".format(**parameters))
with do_in_tempdir(files=['.coveragerc'], capture_files=['./.coverage*']):
os.environ.update(environ)
execute(commands, parameters)
click.echo('Done test')
</DeepExtract>
finally:
<DeepExtract>
parameters = get_parameters(edm, runtime, environment)
commands = ['{edm} environments remove {environment} --purge -y']
click.echo("Cleaning up environment '{environment}'".format(**parameters))
execute(commands, parameters)
click.echo('Done cleanup')
</DeepExtract>
|
@cli.command(name='test-clean')
@edm_option
@runtime_option
def test_clean(edm, runtime):
""" Run tests in a clean environment, cleaning up afterwards
"""
args = [f'--edm={edm}', f'--runtime={runtime}']
try:
parameters = get_parameters(edm, runtime, environment)
edm_packages = ' '.join(edm_dependencies(runtime))
commands = ['{edm} environments create {environment} --force --version={runtime}', '{edm} install -y -e {environment} ' + edm_packages, '{edm} run -e {environment} -- python -m pip install -r ci-src-requirements.txt --no-deps', '{edm} run -e {environment} -- python -m pip install . --no-deps']
click.echo("Creating environment '{environment}'".format(**parameters))
execute(commands, parameters)
if source:
cmd_fmt = '{edm} plumbing remove-package --environment {environment} --force '
commands = [cmd_fmt + source_pkg for source_pkg in source_dependencies]
execute(commands, parameters)
source_pkgs = [github_url_fmt.format(pkg) for pkg in source_dependencies]
commands = ['python -m pip install {pkg} --no-deps'.format(pkg=pkg) for pkg in source_pkgs]
commands = ['{edm} run -e {environment} -- ' + command for command in commands]
execute(commands, parameters)
click.echo('Done install')
parameters = get_parameters(edm, runtime, environment)
environ = {}
environ['PYTHONUNBUFFERED'] = '1'
commands = ['{edm} run -e {environment} -- python -W default -m coverage run -p -m unittest discover -v apptools']
click.echo("Running tests in '{environment}'".format(**parameters))
with do_in_tempdir(files=['.coveragerc'], capture_files=['./.coverage*']):
os.environ.update(environ)
execute(commands, parameters)
click.echo('Done test')
finally:
parameters = get_parameters(edm, runtime, environment)
commands = ['{edm} environments remove {environment} --purge -y']
click.echo("Cleaning up environment '{environment}'".format(**parameters))
execute(commands, parameters)
click.echo('Done cleanup')
</DeepExtract>
|
apptools
|
positive
|
def extract_valid_response(candidate_response: str, num_choices: int, input_prefix: str, output_prefix: str, choice_prefix: str='choice:') -> List[Dict[str, Any]]:
"""Extract generative samples from candidate_response."""
(candidate_response, output_prefix) = (candidate_response.lower(), output_prefix.lower())
(input_prefix, choice_prefix) = (input_prefix.lower(), choice_prefix.lower())
words_to_match = [input_prefix] + [choice_prefix] * num_choices + [output_prefix, input_prefix]
candidate_response += input_prefix
valid_matches = re.findall('.*?'.join(words_to_match), candidate_response, overlapped=True)
valid_matches = [match[:-len(input_prefix)] for match in valid_matches]
def parse_match(match):
if len(match.split(input_prefix)) != 2 or len(match.split(output_prefix)) != 2:
return None
split_output = match.split(output_prefix)
output_prompt = split_output[-1].split(input_prefix)[0].strip()
split_choice = split_output[0].split(choice_prefix)
input_prompt = split_choice[0].split(input_prefix)[-1].strip()
choices = list(set([choice.strip() for choice in split_choice[1:]]))
if output_prompt not in choices or len(choices) != num_choices:
return None
return {'input': input_prompt, 'target': output_prompt, 'choice': choices}
new_samples = []
for valid_match in valid_matches:
<DeepExtract>
if len(valid_match.split(input_prefix)) != 2 or len(valid_match.split(output_prefix)) != 2:
parsed_match = None
split_output = valid_match.split(output_prefix)
output_prompt = split_output[-1].split(input_prefix)[0].strip()
split_choice = split_output[0].split(choice_prefix)
input_prompt = split_choice[0].split(input_prefix)[-1].strip()
choices = list(set([choice.strip() for choice in split_choice[1:]]))
if output_prompt not in choices or len(choices) != num_choices:
parsed_match = None
parsed_match = {'input': input_prompt, 'target': output_prompt, 'choice': choices}
</DeepExtract>
if parsed_match:
target_scores = {choice: 0 for choice in parsed_match['choice']}
clean_target = parsed_match['target'].split(output_prefix)[-1].strip()
assert clean_target in parsed_match['choice']
target_scores[clean_target] = 1
parsed_match['target_scores'] = target_scores
parsed_match['raw_response'] = candidate_response
new_samples.append(parsed_match)
return new_samples
|
def extract_valid_response(candidate_response: str, num_choices: int, input_prefix: str, output_prefix: str, choice_prefix: str='choice:') -> List[Dict[str, Any]]:
"""Extract generative samples from candidate_response."""
(candidate_response, output_prefix) = (candidate_response.lower(), output_prefix.lower())
(input_prefix, choice_prefix) = (input_prefix.lower(), choice_prefix.lower())
words_to_match = [input_prefix] + [choice_prefix] * num_choices + [output_prefix, input_prefix]
candidate_response += input_prefix
valid_matches = re.findall('.*?'.join(words_to_match), candidate_response, overlapped=True)
valid_matches = [match[:-len(input_prefix)] for match in valid_matches]
def parse_match(match):
if len(match.split(input_prefix)) != 2 or len(match.split(output_prefix)) != 2:
return None
split_output = match.split(output_prefix)
output_prompt = split_output[-1].split(input_prefix)[0].strip()
split_choice = split_output[0].split(choice_prefix)
input_prompt = split_choice[0].split(input_prefix)[-1].strip()
choices = list(set([choice.strip() for choice in split_choice[1:]]))
if output_prompt not in choices or len(choices) != num_choices:
return None
return {'input': input_prompt, 'target': output_prompt, 'choice': choices}
new_samples = []
for valid_match in valid_matches:
if len(valid_match.split(input_prefix)) != 2 or len(valid_match.split(output_prefix)) != 2:
parsed_match = None
split_output = valid_match.split(output_prefix)
output_prompt = split_output[-1].split(input_prefix)[0].strip()
split_choice = split_output[0].split(choice_prefix)
input_prompt = split_choice[0].split(input_prefix)[-1].strip()
choices = list(set([choice.strip() for choice in split_choice[1:]]))
if output_prompt not in choices or len(choices) != num_choices:
parsed_match = None
parsed_match = {'input': input_prompt, 'target': output_prompt, 'choice': choices}
if parsed_match:
target_scores = {choice: 0 for choice in parsed_match['choice']}
clean_target = parsed_match['target'].split(output_prefix)[-1].strip()
assert clean_target in parsed_match['choice']
target_scores[clean_target] = 1
parsed_match['target_scores'] = target_scores
parsed_match['raw_response'] = candidate_response
new_samples.append(parsed_match)
return new_samples
|
BIG-bench
|
positive
|
def create_dashboards(settings: ComposeXSettings, x_stack: ComposeXStack, module: XResourceModule) -> None:
"""
Loop to iterate over dashboards definitions
:param ecs_composex.common.settings.ComposeXSettings settings:
:param ecs_composex.common.stacks.ComposeXStack x_stack:
:param ModManager module:
"""
if not keyisset(module.res_key, settings.compose_content):
LOG.error(f'No {module.res_key} defined')
dashboards = settings.compose_content[module.res_key]
for (name, dashboard) in dashboards.items():
widgets = []
if keyisset('Services', dashboard):
<DeepExtract>
services_params = []
families_original_names = [f.name for f in settings.families.values()]
for (name, service_def) in dashboard['Services'].items():
if name not in families_original_names:
LOG.warn(f'Service family {name} is not defined. Skipping')
continue
family = get_family_from_name(settings, name)
if family is None:
LOG.warn(f'Could not identify the {name} family in {families_original_names}')
continue
s_param = Parameter(f'{family.stack.title}{SERVICE_T}Name', Type='String')
if SERVICE_T not in family.template.outputs:
add_outputs(family.template, [Output(s_param.title, Value=GetAtt(SERVICE_T, 'Name'))])
x_stack.Parameters.update({s_param.title: GetAtt(family.stack.title, f'Outputs.{s_param.title}')})
services_params.append((family.stack.title, s_param))
add_parameters(x_stack.stack_template, [value[1] for value in services_params])
service_params = services_params
</DeepExtract>
y_index = 0
for param in service_params:
service_ecs_widgets = ServiceEcsWidget(param[0], param[1], CLUSTER_NAME, y_index=y_index)
widgets += service_ecs_widgets.widgets
y_index += service_ecs_widgets.height + 1
dashboard_body_header = {'start': '-PT12H', 'widgets': widgets}
dashboard_body = Sub(json.dumps(dashboard_body_header))
cfn_dashboard = CWDashboard(NONALPHANUM.sub('', name), DashboardBody=dashboard_body, DashboardName=Sub(f'${{StackName}}--{name}', StackName=define_stack_name(x_stack.template)))
x_stack.stack_template.add_resource(cfn_dashboard)
|
def create_dashboards(settings: ComposeXSettings, x_stack: ComposeXStack, module: XResourceModule) -> None:
"""
Loop to iterate over dashboards definitions
:param ecs_composex.common.settings.ComposeXSettings settings:
:param ecs_composex.common.stacks.ComposeXStack x_stack:
:param ModManager module:
"""
if not keyisset(module.res_key, settings.compose_content):
LOG.error(f'No {module.res_key} defined')
dashboards = settings.compose_content[module.res_key]
for (name, dashboard) in dashboards.items():
widgets = []
if keyisset('Services', dashboard):
services_params = []
families_original_names = [f.name for f in settings.families.values()]
for (name, service_def) in dashboard['Services'].items():
if name not in families_original_names:
LOG.warn(f'Service family {name} is not defined. Skipping')
continue
family = get_family_from_name(settings, name)
if family is None:
LOG.warn(f'Could not identify the {name} family in {families_original_names}')
continue
s_param = Parameter(f'{family.stack.title}{SERVICE_T}Name', Type='String')
if SERVICE_T not in family.template.outputs:
add_outputs(family.template, [Output(s_param.title, Value=GetAtt(SERVICE_T, 'Name'))])
x_stack.Parameters.update({s_param.title: GetAtt(family.stack.title, f'Outputs.{s_param.title}')})
services_params.append((family.stack.title, s_param))
add_parameters(x_stack.stack_template, [value[1] for value in services_params])
service_params = services_params
y_index = 0
for param in service_params:
service_ecs_widgets = ServiceEcsWidget(param[0], param[1], CLUSTER_NAME, y_index=y_index)
widgets += service_ecs_widgets.widgets
y_index += service_ecs_widgets.height + 1
dashboard_body_header = {'start': '-PT12H', 'widgets': widgets}
dashboard_body = Sub(json.dumps(dashboard_body_header))
cfn_dashboard = CWDashboard(NONALPHANUM.sub('', name), DashboardBody=dashboard_body, DashboardName=Sub(f'${{StackName}}--{name}', StackName=define_stack_name(x_stack.template)))
x_stack.stack_template.add_resource(cfn_dashboard)
|
ecs_composex
|
positive
|
def get_mapping(self, types: Sequence[AnyType]) -> Mapping[str, AnyType]:
<DeepExtract>
mapping = {}
for tp in types:
discriminated = get_discriminated(self.alias, tp)
if not discriminated:
raise TypeError(f"{tp} can't be discriminated")
for key in discriminated:
mapping[key] = tp
default_mapping = mapping
</DeepExtract>
if self.mapping is default_discriminator_mapping:
return default_mapping
mapping = self.mapping(self.alias, types) if callable(self.mapping) else self.mapping
if self.override_implicit:
mapping_types = set(mapping.values())
mapping = dict(mapping)
for (key, tp) in default_mapping.items():
if tp not in mapping_types:
mapping[key] = tp
return mapping
else:
return {**default_mapping, **mapping}
|
def get_mapping(self, types: Sequence[AnyType]) -> Mapping[str, AnyType]:
mapping = {}
for tp in types:
discriminated = get_discriminated(self.alias, tp)
if not discriminated:
raise TypeError(f"{tp} can't be discriminated")
for key in discriminated:
mapping[key] = tp
default_mapping = mapping
if self.mapping is default_discriminator_mapping:
return default_mapping
mapping = self.mapping(self.alias, types) if callable(self.mapping) else self.mapping
if self.override_implicit:
mapping_types = set(mapping.values())
mapping = dict(mapping)
for (key, tp) in default_mapping.items():
if tp not in mapping_types:
mapping[key] = tp
return mapping
else:
return {**default_mapping, **mapping}
|
apischema
|
positive
|
def release(self):
"""
Clears any callbacks and calls :meth:`GPIO.release`
"""
self.logger.debug('releasing')
<DeepExtract>
for cb in self.callbacks:
try:
cb.cancel()
except:
self.logger.warning('could not cancel callback: {}'.format(cb))
self.callbacks = []
</DeepExtract>
super(Digital_In, self).release()
|
def release(self):
"""
Clears any callbacks and calls :meth:`GPIO.release`
"""
self.logger.debug('releasing')
for cb in self.callbacks:
try:
cb.cancel()
except:
self.logger.warning('could not cancel callback: {}'.format(cb))
self.callbacks = []
super(Digital_In, self).release()
|
autopilot
|
positive
|
def train_data(self, max_seq_len=512, dataset_size=None, epochs=1, mask_gen=None, debug=False, **kwargs):
if debug:
max_examples = 1000
else:
max_examples = None
<DeepExtract>
with open(os.path.join(self.data_dir, 'train.jsonl')) as fs:
data = [json.loads(l) for l in fs]
examples = []
for d in data:
passage = self.tokenizer.tokenize(d['passage'].strip())
question = self.tokenizer.tokenize(d['question'].strip())
label = None if 'label' not in d else self.label2id(str(d['label']).lower())
examples.append(ExampleInstance(segments=[passage, question], label=label))
def get_stats(l):
train = f'Max={max(l)}, min={min(l)}, avg={np.mean(l)}'
ctx_token_size = [len(e.segments[0]) for e in examples]
q_token_size = [len(e.segments[1]) for e in examples]
total_size = [len(e.segments[0]) + len(e.segments[1]) for e in examples]
logger.info(f'Context statistics: {get_stats(ctx_token_size)}, long={len([t for t in ctx_token_size if t > 500])}/{len(ctx_token_size)}')
logger.info(f'question statistics: {get_stats(q_token_size)}')
logger.info(f'Total statistics: {get_stats(total_size)}, long={len([t for t in total_size if t > 500])}')
train = examples
</DeepExtract>
examples = ExampleSet(train)
if dataset_size is None:
dataset_size = len(examples) * epochs
return DynamicDataset(examples, feature_fn=self.get_feature_fn(max_seq_len=max_seq_len, mask_gen=mask_gen), dataset_size=dataset_size, shuffle=True, **kwargs)
|
def train_data(self, max_seq_len=512, dataset_size=None, epochs=1, mask_gen=None, debug=False, **kwargs):
if debug:
max_examples = 1000
else:
max_examples = None
with open(os.path.join(self.data_dir, 'train.jsonl')) as fs:
data = [json.loads(l) for l in fs]
examples = []
for d in data:
passage = self.tokenizer.tokenize(d['passage'].strip())
question = self.tokenizer.tokenize(d['question'].strip())
label = None if 'label' not in d else self.label2id(str(d['label']).lower())
examples.append(ExampleInstance(segments=[passage, question], label=label))
def get_stats(l):
train = f'Max={max(l)}, min={min(l)}, avg={np.mean(l)}'
ctx_token_size = [len(e.segments[0]) for e in examples]
q_token_size = [len(e.segments[1]) for e in examples]
total_size = [len(e.segments[0]) + len(e.segments[1]) for e in examples]
logger.info(f'Context statistics: {get_stats(ctx_token_size)}, long={len([t for t in ctx_token_size if t > 500])}/{len(ctx_token_size)}')
logger.info(f'question statistics: {get_stats(q_token_size)}')
logger.info(f'Total statistics: {get_stats(total_size)}, long={len([t for t in total_size if t > 500])}')
train = examples
examples = ExampleSet(train)
if dataset_size is None:
dataset_size = len(examples) * epochs
return DynamicDataset(examples, feature_fn=self.get_feature_fn(max_seq_len=max_seq_len, mask_gen=mask_gen), dataset_size=dataset_size, shuffle=True, **kwargs)
|
DeBERTa
|
positive
|
def compute_metrics(true_named_entities, pred_named_entities, tags):
eval_metrics = {'correct': 0, 'incorrect': 0, 'partial': 0, 'missed': 0, 'spurious': 0, 'precision': 0, 'recall': 0}
evaluation = {'strict': deepcopy(eval_metrics), 'ent_type': deepcopy(eval_metrics), 'partial': deepcopy(eval_metrics), 'exact': deepcopy(eval_metrics)}
evaluation_agg_entities_type = {e: deepcopy(evaluation) for e in tags}
true_which_overlapped_with_pred = []
true_named_entities = [ent for ent in true_named_entities if ent.e_type in tags]
pred_named_entities = [ent for ent in pred_named_entities if ent.e_type in tags]
for pred in pred_named_entities:
found_overlap = False
if pred in true_named_entities:
true_which_overlapped_with_pred.append(pred)
evaluation['strict']['correct'] += 1
evaluation['ent_type']['correct'] += 1
evaluation['exact']['correct'] += 1
evaluation['partial']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['strict']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['ent_type']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['exact']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['partial']['correct'] += 1
else:
for true in true_named_entities:
pred_range = range(pred.start_offset, pred.end_offset)
true_range = range(true.start_offset, true.end_offset)
if true.start_offset == pred.start_offset and pred.end_offset == true.end_offset and (true.e_type != pred.e_type):
evaluation['strict']['incorrect'] += 1
evaluation['ent_type']['incorrect'] += 1
evaluation['partial']['correct'] += 1
evaluation['exact']['correct'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['correct'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['correct'] += 1
true_which_overlapped_with_pred.append(true)
found_overlap = True
break
elif find_overlap(true_range, pred_range):
true_which_overlapped_with_pred.append(true)
if pred.e_type == true.e_type:
evaluation['strict']['incorrect'] += 1
evaluation['ent_type']['correct'] += 1
evaluation['partial']['partial'] += 1
evaluation['exact']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['correct'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['partial'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['incorrect'] += 1
found_overlap = True
break
else:
evaluation['strict']['incorrect'] += 1
evaluation['ent_type']['incorrect'] += 1
evaluation['partial']['partial'] += 1
evaluation['exact']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['partial'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['incorrect'] += 1
found_overlap = True
break
if not found_overlap:
evaluation['strict']['spurious'] += 1
evaluation['ent_type']['spurious'] += 1
evaluation['partial']['spurious'] += 1
evaluation['exact']['spurious'] += 1
for true in tags:
evaluation_agg_entities_type[true]['strict']['spurious'] += 1
evaluation_agg_entities_type[true]['ent_type']['spurious'] += 1
evaluation_agg_entities_type[true]['partial']['spurious'] += 1
evaluation_agg_entities_type[true]['exact']['spurious'] += 1
for true in true_named_entities:
if true in true_which_overlapped_with_pred:
continue
else:
evaluation['strict']['missed'] += 1
evaluation['ent_type']['missed'] += 1
evaluation['partial']['missed'] += 1
evaluation['exact']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['missed'] += 1
for eval_type in evaluation:
<DeepExtract>
correct = evaluation[eval_type]['correct']
incorrect = evaluation[eval_type]['incorrect']
partial = evaluation[eval_type]['partial']
missed = evaluation[eval_type]['missed']
spurious = evaluation[eval_type]['spurious']
possible = correct + incorrect + partial + missed
actual = correct + incorrect + partial + spurious
evaluation[eval_type]['actual'] = actual
evaluation[eval_type]['possible'] = possible
evaluation[eval_type] = evaluation[eval_type]
</DeepExtract>
for (entity_type, entity_level) in evaluation_agg_entities_type.items():
for eval_type in entity_level:
<DeepExtract>
correct = entity_level[eval_type]['correct']
incorrect = entity_level[eval_type]['incorrect']
partial = entity_level[eval_type]['partial']
missed = entity_level[eval_type]['missed']
spurious = entity_level[eval_type]['spurious']
possible = correct + incorrect + partial + missed
actual = correct + incorrect + partial + spurious
entity_level[eval_type]['actual'] = actual
entity_level[eval_type]['possible'] = possible
evaluation_agg_entities_type[entity_type][eval_type] = entity_level[eval_type]
</DeepExtract>
return (evaluation, evaluation_agg_entities_type)
|
def compute_metrics(true_named_entities, pred_named_entities, tags):
eval_metrics = {'correct': 0, 'incorrect': 0, 'partial': 0, 'missed': 0, 'spurious': 0, 'precision': 0, 'recall': 0}
evaluation = {'strict': deepcopy(eval_metrics), 'ent_type': deepcopy(eval_metrics), 'partial': deepcopy(eval_metrics), 'exact': deepcopy(eval_metrics)}
evaluation_agg_entities_type = {e: deepcopy(evaluation) for e in tags}
true_which_overlapped_with_pred = []
true_named_entities = [ent for ent in true_named_entities if ent.e_type in tags]
pred_named_entities = [ent for ent in pred_named_entities if ent.e_type in tags]
for pred in pred_named_entities:
found_overlap = False
if pred in true_named_entities:
true_which_overlapped_with_pred.append(pred)
evaluation['strict']['correct'] += 1
evaluation['ent_type']['correct'] += 1
evaluation['exact']['correct'] += 1
evaluation['partial']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['strict']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['ent_type']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['exact']['correct'] += 1
evaluation_agg_entities_type[pred.e_type]['partial']['correct'] += 1
else:
for true in true_named_entities:
pred_range = range(pred.start_offset, pred.end_offset)
true_range = range(true.start_offset, true.end_offset)
if true.start_offset == pred.start_offset and pred.end_offset == true.end_offset and (true.e_type != pred.e_type):
evaluation['strict']['incorrect'] += 1
evaluation['ent_type']['incorrect'] += 1
evaluation['partial']['correct'] += 1
evaluation['exact']['correct'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['correct'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['correct'] += 1
true_which_overlapped_with_pred.append(true)
found_overlap = True
break
elif find_overlap(true_range, pred_range):
true_which_overlapped_with_pred.append(true)
if pred.e_type == true.e_type:
evaluation['strict']['incorrect'] += 1
evaluation['ent_type']['correct'] += 1
evaluation['partial']['partial'] += 1
evaluation['exact']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['correct'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['partial'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['incorrect'] += 1
found_overlap = True
break
else:
evaluation['strict']['incorrect'] += 1
evaluation['ent_type']['incorrect'] += 1
evaluation['partial']['partial'] += 1
evaluation['exact']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['partial'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['incorrect'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['incorrect'] += 1
found_overlap = True
break
if not found_overlap:
evaluation['strict']['spurious'] += 1
evaluation['ent_type']['spurious'] += 1
evaluation['partial']['spurious'] += 1
evaluation['exact']['spurious'] += 1
for true in tags:
evaluation_agg_entities_type[true]['strict']['spurious'] += 1
evaluation_agg_entities_type[true]['ent_type']['spurious'] += 1
evaluation_agg_entities_type[true]['partial']['spurious'] += 1
evaluation_agg_entities_type[true]['exact']['spurious'] += 1
for true in true_named_entities:
if true in true_which_overlapped_with_pred:
continue
else:
evaluation['strict']['missed'] += 1
evaluation['ent_type']['missed'] += 1
evaluation['partial']['missed'] += 1
evaluation['exact']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['strict']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['ent_type']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['partial']['missed'] += 1
evaluation_agg_entities_type[true.e_type]['exact']['missed'] += 1
for eval_type in evaluation:
correct = evaluation[eval_type]['correct']
incorrect = evaluation[eval_type]['incorrect']
partial = evaluation[eval_type]['partial']
missed = evaluation[eval_type]['missed']
spurious = evaluation[eval_type]['spurious']
possible = correct + incorrect + partial + missed
actual = correct + incorrect + partial + spurious
evaluation[eval_type]['actual'] = actual
evaluation[eval_type]['possible'] = possible
evaluation[eval_type] = evaluation[eval_type]
for (entity_type, entity_level) in evaluation_agg_entities_type.items():
for eval_type in entity_level:
correct = entity_level[eval_type]['correct']
incorrect = entity_level[eval_type]['incorrect']
partial = entity_level[eval_type]['partial']
missed = entity_level[eval_type]['missed']
spurious = entity_level[eval_type]['spurious']
possible = correct + incorrect + partial + missed
actual = correct + incorrect + partial + spurious
entity_level[eval_type]['actual'] = actual
entity_level[eval_type]['possible'] = possible
evaluation_agg_entities_type[entity_type][eval_type] = entity_level[eval_type]
return (evaluation, evaluation_agg_entities_type)
|
AiSpace
|
positive
|
def orb_distance(self, pic_package_from: Dict, pic_package_to: Dict) -> Dict[str, sd.AlgoMatch]:
"""
Distance between two provided pictures (dicts) with ORB methods
:param pic_package_from: first picture dict
:param pic_package_to: second picture dict
:return: A dictionary of algo name to the match detail (distance, decision ..)
"""
answer = {}
self.logger.info('Orb distance computation ... ')
if pic_package_from.get('ORB_DESCRIPTORS', None) is None or pic_package_to.get('ORB_DESCRIPTORS', None) is None:
self.logger.warning(f'ORB descriptors are NOT presents in the results.')
raise AlgoFeatureNotPresentError('None ORB descriptors in orb distance.')
try:
if self.fe_conf.ORB.get('is_enabled', False):
<DeepExtract>
algo_name = self.fe_conf.ORB.get('algo_name')
tmp_dist = self.compute_orb_distance(pic_package_from['ORB_DESCRIPTORS'], pic_package_to['ORB_DESCRIPTORS'])
answer[algo_name] = sd.AlgoMatch(name=algo_name, distance=tmp_dist, decision=self.compute_decision_from_distance(self.fe_conf.ORB, tmp_dist))
answer = answer
</DeepExtract>
except Exception as e:
self.logger.error(traceback.print_tb(e.__traceback__))
self.logger.error('Error during orb distance calculation : ' + str(e))
return answer
|
def orb_distance(self, pic_package_from: Dict, pic_package_to: Dict) -> Dict[str, sd.AlgoMatch]:
"""
Distance between two provided pictures (dicts) with ORB methods
:param pic_package_from: first picture dict
:param pic_package_to: second picture dict
:return: A dictionary of algo name to the match detail (distance, decision ..)
"""
answer = {}
self.logger.info('Orb distance computation ... ')
if pic_package_from.get('ORB_DESCRIPTORS', None) is None or pic_package_to.get('ORB_DESCRIPTORS', None) is None:
self.logger.warning(f'ORB descriptors are NOT presents in the results.')
raise AlgoFeatureNotPresentError('None ORB descriptors in orb distance.')
try:
if self.fe_conf.ORB.get('is_enabled', False):
algo_name = self.fe_conf.ORB.get('algo_name')
tmp_dist = self.compute_orb_distance(pic_package_from['ORB_DESCRIPTORS'], pic_package_to['ORB_DESCRIPTORS'])
answer[algo_name] = sd.AlgoMatch(name=algo_name, distance=tmp_dist, decision=self.compute_decision_from_distance(self.fe_conf.ORB, tmp_dist))
answer = answer
except Exception as e:
self.logger.error(traceback.print_tb(e.__traceback__))
self.logger.error('Error during orb distance calculation : ' + str(e))
return answer
|
douglas-quaid
|
positive
|
def _crop(image, boxes, labels):
(height, width, _) = image.shape
if len(boxes) == 0:
return (image, boxes, labels)
while True:
mode = random.choice((None, (0.1, None), (0.3, None), (0.5, None), (0.7, None), (0.9, None), (None, None)))
if mode is None:
return (image, boxes, labels)
(min_iou, max_iou) = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
for _ in range(50):
scale = random.uniform(0.3, 1.0)
min_ratio = max(0.5, scale * scale)
max_ratio = min(2, 1.0 / scale / scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
w = int(scale * ratio * width)
h = int(scale / ratio * height)
l = random.randrange(width - w)
t = random.randrange(height - h)
roi = np.array((l, t, l + w, t + h))
<DeepExtract>
lt = np.maximum(boxes[:, np.newaxis, :2], roi[np.newaxis][:, :2])
rb = np.minimum(boxes[:, np.newaxis, 2:], roi[np.newaxis][:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
area_b = np.prod(roi[np.newaxis][:, 2:] - roi[np.newaxis][:, :2], axis=1)
iou = area_i / (area_a[:, np.newaxis] + area_b - area_i)
</DeepExtract>
if not (min_iou <= iou.min() and iou.max() <= max_iou):
continue
image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
centers = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
boxes_t = boxes[mask].copy()
labels_t = labels[mask].copy()
if len(boxes_t) == 0:
continue
boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
boxes_t[:, :2] -= roi[:2]
boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
boxes_t[:, 2:] -= roi[:2]
return (image_t, boxes_t, labels_t)
|
def _crop(image, boxes, labels):
(height, width, _) = image.shape
if len(boxes) == 0:
return (image, boxes, labels)
while True:
mode = random.choice((None, (0.1, None), (0.3, None), (0.5, None), (0.7, None), (0.9, None), (None, None)))
if mode is None:
return (image, boxes, labels)
(min_iou, max_iou) = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
for _ in range(50):
scale = random.uniform(0.3, 1.0)
min_ratio = max(0.5, scale * scale)
max_ratio = min(2, 1.0 / scale / scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
w = int(scale * ratio * width)
h = int(scale / ratio * height)
l = random.randrange(width - w)
t = random.randrange(height - h)
roi = np.array((l, t, l + w, t + h))
lt = np.maximum(boxes[:, np.newaxis, :2], roi[np.newaxis][:, :2])
rb = np.minimum(boxes[:, np.newaxis, 2:], roi[np.newaxis][:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
area_b = np.prod(roi[np.newaxis][:, 2:] - roi[np.newaxis][:, :2], axis=1)
iou = area_i / (area_a[:, np.newaxis] + area_b - area_i)
if not (min_iou <= iou.min() and iou.max() <= max_iou):
continue
image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
centers = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
boxes_t = boxes[mask].copy()
labels_t = labels[mask].copy()
if len(boxes_t) == 0:
continue
boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
boxes_t[:, :2] -= roi[:2]
boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
boxes_t[:, 2:] -= roi[:2]
return (image_t, boxes_t, labels_t)
|
computer-vision
|
positive
|
def __div__(self, other):
<DeepExtract>
a = Amount(amount=self['amount'], asset=self['asset'].copy(), graphene_instance=self.graphene)
</DeepExtract>
if isinstance(other, Amount):
from .price import Price
return Price(self, other)
else:
a['amount'] /= other
return a
|
def __div__(self, other):
a = Amount(amount=self['amount'], asset=self['asset'].copy(), graphene_instance=self.graphene)
if isinstance(other, Amount):
from .price import Price
return Price(self, other)
else:
a['amount'] /= other
return a
|
CocosFactory
|
positive
|
@contextmanager
def temporary_docker_directory(files, name, metadata, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note, secret, extra_metadata=None, environment_variables=None, port=8001, apt_get_extras=None):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [os.path.join(saved_cwd, file_path) for file_path in files]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
<DeepExtract>
try:
metadata_content = json.loads(metadata.read())
except json.JSONDecodeError:
try:
metadata_content = yaml.safe_load(metadata.read())
except yaml.YAMLError:
raise BadMetadataError('Metadata is not valid JSON or YAML')
</DeepExtract>
else:
metadata_content = {}
mergedeep.merge(metadata_content, {key: value for (key, value) in extra_metadata.items() if value is not None})
try:
<DeepExtract>
cmd = ['datasette', 'serve', '--host', '0.0.0.0']
environment_variables = environment_variables or {}
environment_variables['DATASETTE_SECRET'] = secret
apt_get_extras = apt_get_extras or []
for filename in file_names:
cmd.extend(['-i', filename])
cmd.extend(['--cors', '--inspect-file', 'inspect-data.json'])
if metadata_content and 'metadata.json':
cmd.extend(['--metadata', f"{metadata_content and 'metadata.json'}"])
if template_dir:
cmd.extend(['--template-dir', 'templates/'])
if plugins_dir:
cmd.extend(['--plugins-dir', 'plugins/'])
if version_note:
cmd.extend(['--version-note', f'{version_note}'])
if static:
for (mount_point, _) in static:
cmd.extend(['--static', f'{mount_point}:{mount_point}'])
if extra_options:
for opt in extra_options.split():
cmd.append(f'{opt}')
cmd = [shlex.quote(part) for part in cmd]
cmd.extend(['--port', '$PORT'])
cmd = ' '.join(cmd)
if branch:
install = [f'https://github.com/simonw/datasette/archive/{branch}.zip'] + list(install)
else:
install = ['datasette'] + list(install)
apt_get_extras_ = []
apt_get_extras_.extend(apt_get_extras)
apt_get_extras = apt_get_extras_
if spatialite:
apt_get_extras.extend(['python3-dev', 'gcc', 'libsqlite3-mod-spatialite'])
environment_variables['SQLITE_EXTENSIONS'] = '/usr/lib/x86_64-linux-gnu/mod_spatialite.so'
dockerfile = '\nFROM python:3.11.0-slim-bullseye\nCOPY . /app\nWORKDIR /app\n{apt_get_extras}\n{environment_variables}\nRUN pip install -U {install_from}\nRUN datasette inspect {files} --inspect-file inspect-data.json\nENV PORT {port}\nEXPOSE {port}\nCMD {cmd}'.format(apt_get_extras=APT_GET_DOCKERFILE_EXTRAS.format(' '.join(apt_get_extras)) if apt_get_extras else '', environment_variables='\n'.join(["ENV {} '{}'".format(key, value) for (key, value) in environment_variables.items()]), install_from=' '.join(install), files=' '.join(file_names), port=port, cmd=cmd).strip()
</DeepExtract>
os.chdir(datasette_dir)
if metadata_content:
with open('metadata.json', 'w') as fp:
fp.write(json.dumps(metadata_content, indent=2))
with open('Dockerfile', 'w') as fp:
fp.write(dockerfile)
for (path, filename) in zip(file_paths, file_names):
<DeepExtract>
try:
os.link(path, os.path.join(datasette_dir, filename))
except OSError:
shutil.copyfile(path, os.path.join(datasette_dir, filename))
</DeepExtract>
if template_dir:
<DeepExtract>
try:
copytree(os.path.join(saved_cwd, template_dir), os.path.join(datasette_dir, 'templates'), copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(os.path.join(saved_cwd, template_dir), os.path.join(datasette_dir, 'templates'), dirs_exist_ok=True)
</DeepExtract>
if plugins_dir:
<DeepExtract>
try:
copytree(os.path.join(saved_cwd, plugins_dir), os.path.join(datasette_dir, 'plugins'), copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(os.path.join(saved_cwd, plugins_dir), os.path.join(datasette_dir, 'plugins'), dirs_exist_ok=True)
</DeepExtract>
for (mount_point, path) in static:
<DeepExtract>
try:
copytree(os.path.join(saved_cwd, path), os.path.join(datasette_dir, mount_point), copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(os.path.join(saved_cwd, path), os.path.join(datasette_dir, mount_point), dirs_exist_ok=True)
</DeepExtract>
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
|
@contextmanager
def temporary_docker_directory(files, name, metadata, extra_options, branch, template_dir, plugins_dir, static, install, spatialite, version_note, secret, extra_metadata=None, environment_variables=None, port=8001, apt_get_extras=None):
extra_metadata = extra_metadata or {}
tmp = tempfile.TemporaryDirectory()
datasette_dir = os.path.join(tmp.name, name)
os.mkdir(datasette_dir)
saved_cwd = os.getcwd()
file_paths = [os.path.join(saved_cwd, file_path) for file_path in files]
file_names = [os.path.split(f)[-1] for f in files]
if metadata:
try:
metadata_content = json.loads(metadata.read())
except json.JSONDecodeError:
try:
metadata_content = yaml.safe_load(metadata.read())
except yaml.YAMLError:
raise BadMetadataError('Metadata is not valid JSON or YAML')
else:
metadata_content = {}
mergedeep.merge(metadata_content, {key: value for (key, value) in extra_metadata.items() if value is not None})
try:
cmd = ['datasette', 'serve', '--host', '0.0.0.0']
environment_variables = environment_variables or {}
environment_variables['DATASETTE_SECRET'] = secret
apt_get_extras = apt_get_extras or []
for filename in file_names:
cmd.extend(['-i', filename])
cmd.extend(['--cors', '--inspect-file', 'inspect-data.json'])
if metadata_content and 'metadata.json':
cmd.extend(['--metadata', f"{metadata_content and 'metadata.json'}"])
if template_dir:
cmd.extend(['--template-dir', 'templates/'])
if plugins_dir:
cmd.extend(['--plugins-dir', 'plugins/'])
if version_note:
cmd.extend(['--version-note', f'{version_note}'])
if static:
for (mount_point, _) in static:
cmd.extend(['--static', f'{mount_point}:{mount_point}'])
if extra_options:
for opt in extra_options.split():
cmd.append(f'{opt}')
cmd = [shlex.quote(part) for part in cmd]
cmd.extend(['--port', '$PORT'])
cmd = ' '.join(cmd)
if branch:
install = [f'https://github.com/simonw/datasette/archive/{branch}.zip'] + list(install)
else:
install = ['datasette'] + list(install)
apt_get_extras_ = []
apt_get_extras_.extend(apt_get_extras)
apt_get_extras = apt_get_extras_
if spatialite:
apt_get_extras.extend(['python3-dev', 'gcc', 'libsqlite3-mod-spatialite'])
environment_variables['SQLITE_EXTENSIONS'] = '/usr/lib/x86_64-linux-gnu/mod_spatialite.so'
dockerfile = '\nFROM python:3.11.0-slim-bullseye\nCOPY . /app\nWORKDIR /app\n{apt_get_extras}\n{environment_variables}\nRUN pip install -U {install_from}\nRUN datasette inspect {files} --inspect-file inspect-data.json\nENV PORT {port}\nEXPOSE {port}\nCMD {cmd}'.format(apt_get_extras=APT_GET_DOCKERFILE_EXTRAS.format(' '.join(apt_get_extras)) if apt_get_extras else '', environment_variables='\n'.join(["ENV {} '{}'".format(key, value) for (key, value) in environment_variables.items()]), install_from=' '.join(install), files=' '.join(file_names), port=port, cmd=cmd).strip()
os.chdir(datasette_dir)
if metadata_content:
with open('metadata.json', 'w') as fp:
fp.write(json.dumps(metadata_content, indent=2))
with open('Dockerfile', 'w') as fp:
fp.write(dockerfile)
for (path, filename) in zip(file_paths, file_names):
try:
os.link(path, os.path.join(datasette_dir, filename))
except OSError:
shutil.copyfile(path, os.path.join(datasette_dir, filename))
if template_dir:
try:
copytree(os.path.join(saved_cwd, template_dir), os.path.join(datasette_dir, 'templates'), copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(os.path.join(saved_cwd, template_dir), os.path.join(datasette_dir, 'templates'), dirs_exist_ok=True)
if plugins_dir:
try:
copytree(os.path.join(saved_cwd, plugins_dir), os.path.join(datasette_dir, 'plugins'), copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(os.path.join(saved_cwd, plugins_dir), os.path.join(datasette_dir, 'plugins'), dirs_exist_ok=True)
for (mount_point, path) in static:
try:
copytree(os.path.join(saved_cwd, path), os.path.join(datasette_dir, mount_point), copy_function=os.link, dirs_exist_ok=True)
except OSError:
copytree(os.path.join(saved_cwd, path), os.path.join(datasette_dir, mount_point), dirs_exist_ok=True)
yield datasette_dir
finally:
tmp.cleanup()
os.chdir(saved_cwd)
|
datasette
|
positive
|
def training_step(self, model_in: Dict[str, Any], batch_idx: int) -> Dict[str, Any]:
<DeepExtract>
loss = self.language_model.forward(**model_in)[0]
(loss, metrics) = (loss, dict())
</DeepExtract>
return dict(loss=loss, progress_bar=metrics, log=metrics)
|
def training_step(self, model_in: Dict[str, Any], batch_idx: int) -> Dict[str, Any]:
loss = self.language_model.forward(**model_in)[0]
(loss, metrics) = (loss, dict())
return dict(loss=loss, progress_bar=metrics, log=metrics)
|
agatha
|
positive
|
def _generateSummary(desc):
summary = {}
summary['name'] = self.desc_dict['name']
<DeepExtract>
doi_prefix = '10.5281/'
if desc.startswith(doi_prefix):
summary['descriptor-doi'] = desc
elif desc.split('.')[0].lower() == 'zenodo':
summary['descriptor-doi'] = doi_prefix + desc
if os.path.isfile(desc):
if self.desc_dict.get('doi') is not None:
doi = self.desc_dict.pop('doi')
if loadJson(doi) == self.desc_dict:
self.desc_dict['doi'] = doi
summary['descriptor-doi'] = doi
elif os.path.basename(desc).split('-')[0].lower() == 'zenodo':
doi = os.path.basename(desc).split('.')[0].replace('-', '.')
if loadJson(doi) == self.desc_dict:
summary['descriptor-doi'] = doi_prefix + doi
summary['descriptor-doi'] = self._saveDescriptorToCache()
</DeepExtract>
return summary
|
def _generateSummary(desc):
summary = {}
summary['name'] = self.desc_dict['name']
doi_prefix = '10.5281/'
if desc.startswith(doi_prefix):
summary['descriptor-doi'] = desc
elif desc.split('.')[0].lower() == 'zenodo':
summary['descriptor-doi'] = doi_prefix + desc
if os.path.isfile(desc):
if self.desc_dict.get('doi') is not None:
doi = self.desc_dict.pop('doi')
if loadJson(doi) == self.desc_dict:
self.desc_dict['doi'] = doi
summary['descriptor-doi'] = doi
elif os.path.basename(desc).split('-')[0].lower() == 'zenodo':
doi = os.path.basename(desc).split('.')[0].replace('-', '.')
if loadJson(doi) == self.desc_dict:
summary['descriptor-doi'] = doi_prefix + doi
summary['descriptor-doi'] = self._saveDescriptorToCache()
return summary
|
boutiques
|
positive
|
def getchar(echo: bool) -> str:
with raw_terminal() as fd:
ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), 'replace')
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
<DeepExtract>
if ch == '\x03':
raise KeyboardInterrupt()
if ch == '\x04' and (not WIN):
raise EOFError()
if ch == '\x1a' and WIN:
raise EOFError()
return None
</DeepExtract>
return ch
|
def getchar(echo: bool) -> str:
with raw_terminal() as fd:
ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), 'replace')
if echo and isatty(sys.stdout):
sys.stdout.write(ch)
if ch == '\x03':
raise KeyboardInterrupt()
if ch == '\x04' and (not WIN):
raise EOFError()
if ch == '\x1a' and WIN:
raise EOFError()
return None
return ch
|
click
|
positive
|
def __init__(self, backend='matplotlib'):
<DeepExtract>
init_residual = [1]
self.UxResiduals = init_residual
self.UyResiduals = init_residual
self.UzResiduals = init_residual
self.pResiduals = init_residual
self.residuals = {'xVelocity': self.UxResiduals, 'yVelocity': self.UyResiduals, 'zVelocity': self.UzResiduals, 'pressure': self.pResiduals}
self.niter = 0
</DeepExtract>
if backend == 'gnuplot':
import Gnuplot
self.g = Gnuplot.Gnuplot()
self.g('set style data lines')
self.g.title('Simulation residuals')
self.g.xlabel('Iteration')
self.g.ylabel('Residual')
self.g('set grid')
self.g('set logscale y')
self.g('set yrange [0.95:1.05]')
self.g('set xrange [0:1]')
elif backend == 'matplotlib':
if withinFreeCAD:
self.fig = Plot.figure(FreeCAD.ActiveDocument.Name + 'Residuals')
self.Timer = QtCore.QTimer()
self.Timer.timeout.connect(self.refresh)
self.Timer.start(1000)
self.updated = False
else:
self.fig = plt.figure()
self.axis = self.fig.add_subplot(1, 1, 1)
self.axis.set_title('Simulation residuals')
self.axis.set_xlabel('Iteration')
self.axis.set_ylabel('Residual')
self.axis.grid(True)
self.axis.set_yscale('log')
self.axis.set_ylim(0.0001, 100.0)
for var in self.residuals:
self.axis.plot(self.residuals[var], label=var)
plt.legend()
else:
print('plot backend {} is not supported'.format(backend))
self.backend = backend
|
def __init__(self, backend='matplotlib'):
init_residual = [1]
self.UxResiduals = init_residual
self.UyResiduals = init_residual
self.UzResiduals = init_residual
self.pResiduals = init_residual
self.residuals = {'xVelocity': self.UxResiduals, 'yVelocity': self.UyResiduals, 'zVelocity': self.UzResiduals, 'pressure': self.pResiduals}
self.niter = 0
if backend == 'gnuplot':
import Gnuplot
self.g = Gnuplot.Gnuplot()
self.g('set style data lines')
self.g.title('Simulation residuals')
self.g.xlabel('Iteration')
self.g.ylabel('Residual')
self.g('set grid')
self.g('set logscale y')
self.g('set yrange [0.95:1.05]')
self.g('set xrange [0:1]')
elif backend == 'matplotlib':
if withinFreeCAD:
self.fig = Plot.figure(FreeCAD.ActiveDocument.Name + 'Residuals')
self.Timer = QtCore.QTimer()
self.Timer.timeout.connect(self.refresh)
self.Timer.start(1000)
self.updated = False
else:
self.fig = plt.figure()
self.axis = self.fig.add_subplot(1, 1, 1)
self.axis.set_title('Simulation residuals')
self.axis.set_xlabel('Iteration')
self.axis.set_ylabel('Residual')
self.axis.grid(True)
self.axis.set_yscale('log')
self.axis.set_ylim(0.0001, 100.0)
for var in self.residuals:
self.axis.plot(self.residuals[var], label=var)
plt.legend()
else:
print('plot backend {} is not supported'.format(backend))
self.backend = backend
|
Cfd
|
positive
|
def validation_step(self, batch):
<DeepExtract>
raise NotImplementedError
</DeepExtract>
<DeepExtract>
def has_one_axis(X):
return hasattr('ppl', 'ndim') and 'ppl'.ndim == 1 or (isinstance('ppl', list) and (not hasattr('ppl'[0], '__len__')))
if has_one_axis('ppl'):
'ppl' = ['ppl']
if d2l.exp(l) is None:
('ppl', d2l.exp(l)) = ([[]] * len('ppl'), 'ppl')
elif has_one_axis(d2l.exp(l)):
d2l.exp(l) = [d2l.exp(l)]
if len('ppl') != len(d2l.exp(l)):
'ppl' = 'ppl' * len(d2l.exp(l))
set_figsize(figsize)
if axes is None:
axes = d2l.plt.gca()
axes.cla()
for (x, y, fmt) in zip('ppl', d2l.exp(l), fmts):
axes.plot(x, y, fmt) if len(x) else axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
</DeepExtract>
|
def validation_step(self, batch):
raise NotImplementedError
def has_one_axis(X):
return hasattr('ppl', 'ndim') and 'ppl'.ndim == 1 or (isinstance('ppl', list) and (not hasattr('ppl'[0], '__len__')))
if has_one_axis('ppl'):
'ppl' = ['ppl']
if d2l.exp(l) is None:
('ppl', d2l.exp(l)) = ([[]] * len('ppl'), 'ppl')
elif has_one_axis(d2l.exp(l)):
d2l.exp(l) = [d2l.exp(l)]
if len('ppl') != len(d2l.exp(l)):
'ppl' = 'ppl' * len(d2l.exp(l))
set_figsize(figsize)
if axes is None:
axes = d2l.plt.gca()
axes.cla()
for (x, y, fmt) in zip('ppl', d2l.exp(l), fmts):
axes.plot(x, y, fmt) if len(x) else axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
</DeepExtract>
|
d2l-en
|
positive
|
def readline(self):
"""read line from input data"""
if self.finish:
return None
buffers = []
if not self.data:
<DeepExtract>
item = self.data_queue.get()
if item is None:
self.finish = True
self.data = item
</DeepExtract>
while self.data is not None:
if len(self.data) == 0:
<DeepExtract>
item = self.data_queue.get()
if item is None:
self.finish = True
self.data = item
</DeepExtract>
continue
idx = self.data.find(b'\n')
if idx >= 0:
buffers.append(self.data[0:idx + 1])
self.data = self.data[idx + 1:]
break
if self.data:
buffers.append(self.data)
<DeepExtract>
item = self.data_queue.get()
if item is None:
self.finish = True
self.data = item
</DeepExtract>
if not buffers and self.finish:
return None
return b''.join(buffers)
|
def readline(self):
"""read line from input data"""
if self.finish:
return None
buffers = []
if not self.data:
item = self.data_queue.get()
if item is None:
self.finish = True
self.data = item
while self.data is not None:
if len(self.data) == 0:
item = self.data_queue.get()
if item is None:
self.finish = True
self.data = item
continue
idx = self.data.find(b'\n')
if idx >= 0:
buffers.append(self.data[0:idx + 1])
self.data = self.data[idx + 1:]
break
if self.data:
buffers.append(self.data)
item = self.data_queue.get()
if item is None:
self.finish = True
self.data = item
if not buffers and self.finish:
return None
return b''.join(buffers)
|
CapTipper
|
positive
|
def _build_forward(self):
config = self.config
(N, M, JX, JQ, VW, VC, d, W) = (config.batch_size, config.max_num_sents, config.max_sent_size, config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size)
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
M = tf.shape(self.x)[1]
(dc, dw, dco) = (config.char_emb_size, config.word_emb_size, config.char_out_size)
with tf.variable_scope('emb'):
if config.use_char_emb:
with tf.variable_scope('emb_var'), tf.device('/cpu:0'):
char_emb_mat = tf.get_variable('char_emb_mat', shape=[VC, dc], dtype='float')
with tf.variable_scope('char'):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx)
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq)
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope('conv'):
xx = multi_conv1d(Acx, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
else:
qq = multi_conv1d(Acq, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='qq')
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope('emb_var'), tf.device('/cpu:0'):
if config.mode == 'train':
word_emb_mat = tf.get_variable('word_emb_mat', dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable('word_emb_mat', shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope('word'):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x)
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q)
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(axis=3, values=[xx, Ax])
qq = tf.concat(axis=2, values=[qq, Aq])
else:
xx = Ax
qq = Aq
if config.highway:
with tf.variable_scope('highway'):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell_fw = BasicLSTMCell(d, state_is_tuple=True)
cell_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell2_fw = BasicLSTMCell(d, state_is_tuple=True)
cell2_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell2_fw = SwitchableDropoutWrapper(cell2_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell2_bw = SwitchableDropoutWrapper(cell2_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell3_fw = BasicLSTMCell(d, state_is_tuple=True)
cell3_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell3_fw = SwitchableDropoutWrapper(cell3_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell3_bw = SwitchableDropoutWrapper(cell3_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell4_fw = BasicLSTMCell(d, state_is_tuple=True)
cell4_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell4_fw = SwitchableDropoutWrapper(cell4_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell4_bw = SwitchableDropoutWrapper(cell4_bw, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2)
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1)
with tf.variable_scope('prepro'):
((fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f))) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len, dtype='float', scope='u1')
u = tf.concat(axis=2, values=[fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
((fw_h, bw_h), _) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='u1')
h = tf.concat(axis=3, values=[fw_h, bw_h])
else:
((fw_h, bw_h), _) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='h1')
h = tf.concat(axis=3, values=[fw_h, bw_h])
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope('main'):
if config.dynamic_att:
p0 = h
u = tf.reshape(tf.tile(tf.expand_dims(u, 1), [1, M, 1, 1]), [N * M, JQ, 2 * d])
q_mask = tf.reshape(tf.tile(tf.expand_dims(self.q_mask, 1), [1, M, 1]), [N * M, JQ])
first_cell_fw = AttentionCell(cell2_fw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
first_cell_bw = AttentionCell(cell2_bw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
second_cell_fw = AttentionCell(cell3_fw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
second_cell_bw = AttentionCell(cell3_bw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
else:
<DeepExtract>
with tf.variable_scope('p0' or 'attention_layer'):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
if config.q2c_att or config.c2q_att:
(u_a, h_a) = bi_attention(config, self.is_train, h, u, h_mask=self.x_mask, u_mask=self.q_mask, tensor_dict=self.tensor_dict)
if not config.c2q_att:
u_a = tf.tile(tf.expand_dims(tf.expand_dims(tf.reduce_mean(u, 1), 1), 1), [1, M, JX, 1])
if config.q2c_att:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a, h * h_a])
else:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a])
p0 = p0
</DeepExtract>
first_cell_fw = d_cell2_fw
second_cell_fw = d_cell3_fw
first_cell_bw = d_cell2_bw
second_cell_bw = d_cell3_bw
((fw_g0, bw_g0), _) = bidirectional_dynamic_rnn(first_cell_fw, first_cell_bw, p0, x_len, dtype='float', scope='g0')
g0 = tf.concat(axis=3, values=[fw_g0, bw_g0])
((fw_g1, bw_g1), _) = bidirectional_dynamic_rnn(second_cell_fw, second_cell_bw, g0, x_len, dtype='float', scope='g1')
g1 = tf.concat(axis=3, values=[fw_g1, bw_g1])
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
((fw_g2, bw_g2), _) = bidirectional_dynamic_rnn(d_cell4_fw, d_cell4_bw, tf.concat(axis=3, values=[p0, g1, a1i, g1 * a1i]), x_len, dtype='float', scope='g2')
g2 = tf.concat(axis=3, values=[fw_g2, bw_g2])
logits2 = get_logits([g2, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits2')
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits)
flat_logits2 = tf.reshape(logits2, [-1, M * JX])
flat_yp2 = tf.nn.softmax(flat_logits2)
if config.na:
na_bias = tf.get_variable('na_bias', shape=[], dtype='float')
na_bias_tiled = tf.tile(tf.reshape(na_bias, [1, 1]), [N, 1])
concat_flat_logits = tf.concat(axis=1, values=[na_bias_tiled, flat_logits])
concat_flat_yp = tf.nn.softmax(concat_flat_logits)
na_prob = tf.squeeze(tf.slice(concat_flat_yp, [0, 0], [-1, 1]), [1])
flat_yp = tf.slice(concat_flat_yp, [0, 1], [-1, -1])
concat_flat_logits2 = tf.concat(axis=1, values=[na_bias_tiled, flat_logits2])
concat_flat_yp2 = tf.nn.softmax(concat_flat_logits2)
na_prob2 = tf.squeeze(tf.slice(concat_flat_yp2, [0, 0], [-1, 1]), [1])
flat_yp2 = tf.slice(concat_flat_yp2, [0, 1], [-1, -1])
self.concat_logits = concat_flat_logits
self.concat_logits2 = concat_flat_logits2
self.na_prob = na_prob * na_prob2
yp = tf.reshape(flat_yp, [-1, M, JX])
yp2 = tf.reshape(flat_yp2, [-1, M, JX])
wyp = tf.nn.sigmoid(logits2)
self.tensor_dict['g1'] = g1
self.tensor_dict['g2'] = g2
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
self.wyp = wyp
|
def _build_forward(self):
config = self.config
(N, M, JX, JQ, VW, VC, d, W) = (config.batch_size, config.max_num_sents, config.max_sent_size, config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.hidden_size, config.max_word_size)
JX = tf.shape(self.x)[2]
JQ = tf.shape(self.q)[1]
M = tf.shape(self.x)[1]
(dc, dw, dco) = (config.char_emb_size, config.word_emb_size, config.char_out_size)
with tf.variable_scope('emb'):
if config.use_char_emb:
with tf.variable_scope('emb_var'), tf.device('/cpu:0'):
char_emb_mat = tf.get_variable('char_emb_mat', shape=[VC, dc], dtype='float')
with tf.variable_scope('char'):
Acx = tf.nn.embedding_lookup(char_emb_mat, self.cx)
Acq = tf.nn.embedding_lookup(char_emb_mat, self.cq)
Acx = tf.reshape(Acx, [-1, JX, W, dc])
Acq = tf.reshape(Acq, [-1, JQ, W, dc])
filter_sizes = list(map(int, config.out_channel_dims.split(',')))
heights = list(map(int, config.filter_heights.split(',')))
assert sum(filter_sizes) == dco, (filter_sizes, dco)
with tf.variable_scope('conv'):
xx = multi_conv1d(Acx, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
if config.share_cnn_weights:
tf.get_variable_scope().reuse_variables()
qq = multi_conv1d(Acq, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='xx')
else:
qq = multi_conv1d(Acq, filter_sizes, heights, 'VALID', self.is_train, config.keep_prob, scope='qq')
xx = tf.reshape(xx, [-1, M, JX, dco])
qq = tf.reshape(qq, [-1, JQ, dco])
if config.use_word_emb:
with tf.variable_scope('emb_var'), tf.device('/cpu:0'):
if config.mode == 'train':
word_emb_mat = tf.get_variable('word_emb_mat', dtype='float', shape=[VW, dw], initializer=get_initializer(config.emb_mat))
else:
word_emb_mat = tf.get_variable('word_emb_mat', shape=[VW, dw], dtype='float')
if config.use_glove_for_unk:
word_emb_mat = tf.concat(axis=0, values=[word_emb_mat, self.new_emb_mat])
with tf.name_scope('word'):
Ax = tf.nn.embedding_lookup(word_emb_mat, self.x)
Aq = tf.nn.embedding_lookup(word_emb_mat, self.q)
self.tensor_dict['x'] = Ax
self.tensor_dict['q'] = Aq
if config.use_char_emb:
xx = tf.concat(axis=3, values=[xx, Ax])
qq = tf.concat(axis=2, values=[qq, Aq])
else:
xx = Ax
qq = Aq
if config.highway:
with tf.variable_scope('highway'):
xx = highway_network(xx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
tf.get_variable_scope().reuse_variables()
qq = highway_network(qq, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
self.tensor_dict['xx'] = xx
self.tensor_dict['qq'] = qq
cell_fw = BasicLSTMCell(d, state_is_tuple=True)
cell_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell_fw = SwitchableDropoutWrapper(cell_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell_bw = SwitchableDropoutWrapper(cell_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell2_fw = BasicLSTMCell(d, state_is_tuple=True)
cell2_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell2_fw = SwitchableDropoutWrapper(cell2_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell2_bw = SwitchableDropoutWrapper(cell2_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell3_fw = BasicLSTMCell(d, state_is_tuple=True)
cell3_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell3_fw = SwitchableDropoutWrapper(cell3_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell3_bw = SwitchableDropoutWrapper(cell3_bw, self.is_train, input_keep_prob=config.input_keep_prob)
cell4_fw = BasicLSTMCell(d, state_is_tuple=True)
cell4_bw = BasicLSTMCell(d, state_is_tuple=True)
d_cell4_fw = SwitchableDropoutWrapper(cell4_fw, self.is_train, input_keep_prob=config.input_keep_prob)
d_cell4_bw = SwitchableDropoutWrapper(cell4_bw, self.is_train, input_keep_prob=config.input_keep_prob)
x_len = tf.reduce_sum(tf.cast(self.x_mask, 'int32'), 2)
q_len = tf.reduce_sum(tf.cast(self.q_mask, 'int32'), 1)
with tf.variable_scope('prepro'):
((fw_u, bw_u), ((_, fw_u_f), (_, bw_u_f))) = bidirectional_dynamic_rnn(d_cell_fw, d_cell_bw, qq, q_len, dtype='float', scope='u1')
u = tf.concat(axis=2, values=[fw_u, bw_u])
if config.share_lstm_weights:
tf.get_variable_scope().reuse_variables()
((fw_h, bw_h), _) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='u1')
h = tf.concat(axis=3, values=[fw_h, bw_h])
else:
((fw_h, bw_h), _) = bidirectional_dynamic_rnn(cell_fw, cell_bw, xx, x_len, dtype='float', scope='h1')
h = tf.concat(axis=3, values=[fw_h, bw_h])
self.tensor_dict['u'] = u
self.tensor_dict['h'] = h
with tf.variable_scope('main'):
if config.dynamic_att:
p0 = h
u = tf.reshape(tf.tile(tf.expand_dims(u, 1), [1, M, 1, 1]), [N * M, JQ, 2 * d])
q_mask = tf.reshape(tf.tile(tf.expand_dims(self.q_mask, 1), [1, M, 1]), [N * M, JQ])
first_cell_fw = AttentionCell(cell2_fw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
first_cell_bw = AttentionCell(cell2_bw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
second_cell_fw = AttentionCell(cell3_fw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
second_cell_bw = AttentionCell(cell3_bw, u, mask=q_mask, mapper='sim', input_keep_prob=self.config.input_keep_prob, is_train=self.is_train)
else:
with tf.variable_scope('p0' or 'attention_layer'):
JX = tf.shape(h)[2]
M = tf.shape(h)[1]
JQ = tf.shape(u)[1]
if config.q2c_att or config.c2q_att:
(u_a, h_a) = bi_attention(config, self.is_train, h, u, h_mask=self.x_mask, u_mask=self.q_mask, tensor_dict=self.tensor_dict)
if not config.c2q_att:
u_a = tf.tile(tf.expand_dims(tf.expand_dims(tf.reduce_mean(u, 1), 1), 1), [1, M, JX, 1])
if config.q2c_att:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a, h * h_a])
else:
p0 = tf.concat(axis=3, values=[h, u_a, h * u_a])
p0 = p0
first_cell_fw = d_cell2_fw
second_cell_fw = d_cell3_fw
first_cell_bw = d_cell2_bw
second_cell_bw = d_cell3_bw
((fw_g0, bw_g0), _) = bidirectional_dynamic_rnn(first_cell_fw, first_cell_bw, p0, x_len, dtype='float', scope='g0')
g0 = tf.concat(axis=3, values=[fw_g0, bw_g0])
((fw_g1, bw_g1), _) = bidirectional_dynamic_rnn(second_cell_fw, second_cell_bw, g0, x_len, dtype='float', scope='g1')
g1 = tf.concat(axis=3, values=[fw_g1, bw_g1])
logits = get_logits([g1, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits1')
a1i = softsel(tf.reshape(g1, [N, M * JX, 2 * d]), tf.reshape(logits, [N, M * JX]))
a1i = tf.tile(tf.expand_dims(tf.expand_dims(a1i, 1), 1), [1, M, JX, 1])
((fw_g2, bw_g2), _) = bidirectional_dynamic_rnn(d_cell4_fw, d_cell4_bw, tf.concat(axis=3, values=[p0, g1, a1i, g1 * a1i]), x_len, dtype='float', scope='g2')
g2 = tf.concat(axis=3, values=[fw_g2, bw_g2])
logits2 = get_logits([g2, p0], d, True, wd=config.wd, input_keep_prob=config.input_keep_prob, mask=self.x_mask, is_train=self.is_train, func=config.answer_func, scope='logits2')
flat_logits = tf.reshape(logits, [-1, M * JX])
flat_yp = tf.nn.softmax(flat_logits)
flat_logits2 = tf.reshape(logits2, [-1, M * JX])
flat_yp2 = tf.nn.softmax(flat_logits2)
if config.na:
na_bias = tf.get_variable('na_bias', shape=[], dtype='float')
na_bias_tiled = tf.tile(tf.reshape(na_bias, [1, 1]), [N, 1])
concat_flat_logits = tf.concat(axis=1, values=[na_bias_tiled, flat_logits])
concat_flat_yp = tf.nn.softmax(concat_flat_logits)
na_prob = tf.squeeze(tf.slice(concat_flat_yp, [0, 0], [-1, 1]), [1])
flat_yp = tf.slice(concat_flat_yp, [0, 1], [-1, -1])
concat_flat_logits2 = tf.concat(axis=1, values=[na_bias_tiled, flat_logits2])
concat_flat_yp2 = tf.nn.softmax(concat_flat_logits2)
na_prob2 = tf.squeeze(tf.slice(concat_flat_yp2, [0, 0], [-1, 1]), [1])
flat_yp2 = tf.slice(concat_flat_yp2, [0, 1], [-1, -1])
self.concat_logits = concat_flat_logits
self.concat_logits2 = concat_flat_logits2
self.na_prob = na_prob * na_prob2
yp = tf.reshape(flat_yp, [-1, M, JX])
yp2 = tf.reshape(flat_yp2, [-1, M, JX])
wyp = tf.nn.sigmoid(logits2)
self.tensor_dict['g1'] = g1
self.tensor_dict['g2'] = g2
self.logits = flat_logits
self.logits2 = flat_logits2
self.yp = yp
self.yp2 = yp2
self.wyp = wyp
|
dawn-bench-models
|
positive
|
def _relocate(src: str, dst: str) -> None:
src = fs_access.realpath(src)
dst = fs_access.realpath(dst)
if src == dst:
return
src_can_deleted = any((os.path.commonprefix([p, src]) == p for p in source_directories))
_action = 'move' if action == 'move' and src_can_deleted else 'copy'
if _action == 'move':
_logger.debug('Moving %s to %s', src, dst)
if fs_access.isdir(src) and fs_access.isdir(dst):
for dir_entry in scandir(src):
<DeepExtract>
dir_entry.path = fs_access.realpath(dir_entry.path)
fs_access.join(dst, dir_entry.name) = fs_access.realpath(fs_access.join(dst, dir_entry.name))
if dir_entry.path == fs_access.join(dst, dir_entry.name):
return
src_can_deleted = any((os.path.commonprefix([p, dir_entry.path]) == p for p in source_directories))
_action = 'move' if action == 'move' and src_can_deleted else 'copy'
if _action == 'move':
_logger.debug('Moving %s to %s', dir_entry.path, fs_access.join(dst, dir_entry.name))
if fs_access.isdir(dir_entry.path) and fs_access.isdir(fs_access.join(dst, dir_entry.name)):
for dir_entry in scandir(dir_entry.path):
_relocate(dir_entry.path, fs_access.join(fs_access.join(dst, dir_entry.name), dir_entry.name))
else:
shutil.move(dir_entry.path, fs_access.join(dst, dir_entry.name))
elif _action == 'copy':
_logger.debug('Copying %s to %s', dir_entry.path, fs_access.join(dst, dir_entry.name))
if fs_access.isdir(dir_entry.path):
if os.path.isdir(fs_access.join(dst, dir_entry.name)):
shutil.rmtree(fs_access.join(dst, dir_entry.name))
elif os.path.isfile(fs_access.join(dst, dir_entry.name)):
os.unlink(fs_access.join(dst, dir_entry.name))
shutil.copytree(dir_entry.path, fs_access.join(dst, dir_entry.name))
else:
shutil.copy2(dir_entry.path, fs_access.join(dst, dir_entry.name))
</DeepExtract>
else:
shutil.move(src, dst)
elif _action == 'copy':
_logger.debug('Copying %s to %s', src, dst)
if fs_access.isdir(src):
if os.path.isdir(dst):
shutil.rmtree(dst)
elif os.path.isfile(dst):
os.unlink(dst)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
|
def _relocate(src: str, dst: str) -> None:
src = fs_access.realpath(src)
dst = fs_access.realpath(dst)
if src == dst:
return
src_can_deleted = any((os.path.commonprefix([p, src]) == p for p in source_directories))
_action = 'move' if action == 'move' and src_can_deleted else 'copy'
if _action == 'move':
_logger.debug('Moving %s to %s', src, dst)
if fs_access.isdir(src) and fs_access.isdir(dst):
for dir_entry in scandir(src):
dir_entry.path = fs_access.realpath(dir_entry.path)
fs_access.join(dst, dir_entry.name) = fs_access.realpath(fs_access.join(dst, dir_entry.name))
if dir_entry.path == fs_access.join(dst, dir_entry.name):
return
src_can_deleted = any((os.path.commonprefix([p, dir_entry.path]) == p for p in source_directories))
_action = 'move' if action == 'move' and src_can_deleted else 'copy'
if _action == 'move':
_logger.debug('Moving %s to %s', dir_entry.path, fs_access.join(dst, dir_entry.name))
if fs_access.isdir(dir_entry.path) and fs_access.isdir(fs_access.join(dst, dir_entry.name)):
for dir_entry in scandir(dir_entry.path):
_relocate(dir_entry.path, fs_access.join(fs_access.join(dst, dir_entry.name), dir_entry.name))
else:
shutil.move(dir_entry.path, fs_access.join(dst, dir_entry.name))
elif _action == 'copy':
_logger.debug('Copying %s to %s', dir_entry.path, fs_access.join(dst, dir_entry.name))
if fs_access.isdir(dir_entry.path):
if os.path.isdir(fs_access.join(dst, dir_entry.name)):
shutil.rmtree(fs_access.join(dst, dir_entry.name))
elif os.path.isfile(fs_access.join(dst, dir_entry.name)):
os.unlink(fs_access.join(dst, dir_entry.name))
shutil.copytree(dir_entry.path, fs_access.join(dst, dir_entry.name))
else:
shutil.copy2(dir_entry.path, fs_access.join(dst, dir_entry.name))
else:
shutil.move(src, dst)
elif _action == 'copy':
_logger.debug('Copying %s to %s', src, dst)
if fs_access.isdir(src):
if os.path.isdir(dst):
shutil.rmtree(dst)
elif os.path.isfile(dst):
os.unlink(dst)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
|
cwltool
|
positive
|
def do_voc_evaluation(dataset, predictions, output_folder, logger):
pred_boxlists = []
gt_boxlists = []
for (image_id, prediction) in enumerate(predictions):
img_info = dataset.get_img_info(image_id)
image_width = img_info['width']
image_height = img_info['height']
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
<DeepExtract>
assert len(gt_boxlists) == len(pred_boxlists), 'Length of gt and pred lists need to be same.'
(prec, rec) = calc_detection_voc_prec_rec(pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=0.5)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=True)
result = {'ap': ap, 'map': np.nanmean(ap)}
</DeepExtract>
result_str = 'mAP: {:.4f}\n'.format(result['map'])
for (i, ap) in enumerate(result['ap']):
if i == 0:
continue
result_str += '{:<16}: {:.4f}\n'.format(dataset.map_class_id_to_class_name(i), ap)
logger.info(result_str)
if output_folder:
with open(os.path.join(output_folder, 'result.txt'), 'w') as fid:
fid.write(result_str)
return result
|
def do_voc_evaluation(dataset, predictions, output_folder, logger):
pred_boxlists = []
gt_boxlists = []
for (image_id, prediction) in enumerate(predictions):
img_info = dataset.get_img_info(image_id)
image_width = img_info['width']
image_height = img_info['height']
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
assert len(gt_boxlists) == len(pred_boxlists), 'Length of gt and pred lists need to be same.'
(prec, rec) = calc_detection_voc_prec_rec(pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=0.5)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=True)
result = {'ap': ap, 'map': np.nanmean(ap)}
result_str = 'mAP: {:.4f}\n'.format(result['map'])
for (i, ap) in enumerate(result['ap']):
if i == 0:
continue
result_str += '{:<16}: {:.4f}\n'.format(dataset.map_class_id_to_class_name(i), ap)
logger.info(result_str)
if output_folder:
with open(os.path.join(output_folder, 'result.txt'), 'w') as fid:
fid.write(result_str)
return result
|
Drone_FasterRCNN
|
positive
|
@parameterized.expand([param('Sep 03 2014 | 4:32 pm EDT', 'Sep 03 2014 | 4:32 pm '), param('17th October, 2034 @ 01:08 am PDT', '17th October, 2034 @ 01:08 am '), param('October 17, 2014 at 7:30 am PST', 'October 17, 2014 at 7:30 am '), param('20 Oct 2014 13:08 CET', '20 Oct 2014 13:08 '), param('20 Oct 2014 13:08cet', '20 Oct 2014 13:08'), param('Nov 25 2014 | 10:17 pm EST', 'Nov 25 2014 | 10:17 pm '), param('17th October, 2034 @ 01:08 am +0700', '17th October, 2034 @ 01:08 am '), param('Sep 03 2014 4:32 pm +0630', 'Sep 03 2014 4:32 pm ')])
def test_timezone_deleted_from_string(self, initial_string, result_string):
<DeepExtract>
self.initial_string = initial_string
</DeepExtract>
<DeepExtract>
(self.datetime_string, timezone_offset) = pop_tz_offset_from_string(self.initial_string)
if timezone_offset:
self.timezone_offset = timezone_offset.utcoffset('')
else:
self.timezone_offset = timezone_offset
</DeepExtract>
<DeepExtract>
self.assertEqual(result_string, self.datetime_string)
</DeepExtract>
|
@parameterized.expand([param('Sep 03 2014 | 4:32 pm EDT', 'Sep 03 2014 | 4:32 pm '), param('17th October, 2034 @ 01:08 am PDT', '17th October, 2034 @ 01:08 am '), param('October 17, 2014 at 7:30 am PST', 'October 17, 2014 at 7:30 am '), param('20 Oct 2014 13:08 CET', '20 Oct 2014 13:08 '), param('20 Oct 2014 13:08cet', '20 Oct 2014 13:08'), param('Nov 25 2014 | 10:17 pm EST', 'Nov 25 2014 | 10:17 pm '), param('17th October, 2034 @ 01:08 am +0700', '17th October, 2034 @ 01:08 am '), param('Sep 03 2014 4:32 pm +0630', 'Sep 03 2014 4:32 pm ')])
def test_timezone_deleted_from_string(self, initial_string, result_string):
self.initial_string = initial_string
(self.datetime_string, timezone_offset) = pop_tz_offset_from_string(self.initial_string)
if timezone_offset:
self.timezone_offset = timezone_offset.utcoffset('')
else:
self.timezone_offset = timezone_offset
self.assertEqual(result_string, self.datetime_string)
</DeepExtract>
|
dateparser
|
positive
|
@record
def test_get_entity_with_property_resolver_not_supported(self):
<DeepExtract>
entity = self._create_random_entity_class(pk, rk)
etag = self.ts.insert_entity(self.table_name, entity)
entity.etag = etag
entity = entity
</DeepExtract>
with self.assertRaisesRegexp(AzureException, 'Type not supported when sending data to the service:'):
self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey, property_resolver=lambda pk, rk, name, val, type: 'badType')
|
@record
def test_get_entity_with_property_resolver_not_supported(self):
entity = self._create_random_entity_class(pk, rk)
etag = self.ts.insert_entity(self.table_name, entity)
entity.etag = etag
entity = entity
with self.assertRaisesRegexp(AzureException, 'Type not supported when sending data to the service:'):
self.ts.get_entity(self.table_name, entity.PartitionKey, entity.RowKey, property_resolver=lambda pk, rk, name, val, type: 'badType')
|
azure-cosmos-table-python
|
positive
|
def set_device_on_off(self, device, track, xclip, ident, value=None):
""" Toggles or turns device on/off """
<DeepExtract>
result = None
for parameter in device.parameters:
if str(parameter.name).startswith('Device On'):
result = parameter
break
on_off = result
</DeepExtract>
if on_off and on_off.is_enabled:
if value in KEYWORDS:
on_off.value = KEYWORDS[value]
else:
on_off.value = not on_off.value
|
def set_device_on_off(self, device, track, xclip, ident, value=None):
""" Toggles or turns device on/off """
result = None
for parameter in device.parameters:
if str(parameter.name).startswith('Device On'):
result = parameter
break
on_off = result
if on_off and on_off.is_enabled:
if value in KEYWORDS:
on_off.value = KEYWORDS[value]
else:
on_off.value = not on_off.value
|
clyphx-live10
|
positive
|
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
<DeepExtract>
_bbox = bboxes[i].tolist()
data['bbox'] = [_bbox[0], _bbox[1], _bbox[2] - _bbox[0] + 1, _bbox[3] - _bbox[1] + 1]
</DeepExtract>
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
|
def proposal2json(dataset, results):
json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
_bbox = bboxes[i].tolist()
data['bbox'] = [_bbox[0], _bbox[1], _bbox[2] - _bbox[0] + 1, _bbox[3] - _bbox[1] + 1]
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
|
C-HOI
|
positive
|
def evaluate(self, data, batch_size=128, verbose=0, sample_weight={}):
sample_weight = [standardize_weights(data[name], sample_weight=sample_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
<DeepExtract>
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for (batch_index, (batch_start, batch_end)) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = self._test(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.0)
for (i, batch_out) in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.0)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for (i, out) in enumerate(outs):
outs[i] /= nb_sample
outs = outs
</DeepExtract>
return outs[0]
|
def evaluate(self, data, batch_size=128, verbose=0, sample_weight={}):
sample_weight = [standardize_weights(data[name], sample_weight=sample_weight.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for (batch_index, (batch_start, batch_end)) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = self._test(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.0)
for (i, batch_out) in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.0)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for (i, out) in enumerate(outs):
outs[i] /= nb_sample
outs = outs
return outs[0]
|
deep-coref
|
positive
|
def _load_extension(self, extension=None):
if extension == None:
for x in self.preloads:
if x in self.bot.extensions:
self.bot.dispatch('unloaded_extension', self.bot.extensions.get(x))
try:
self.bot.unload_extension(x)
except:
print('{} failed to unload!'.format(x))
try:
self.bot.load_extension(x)
self.bot.dispatch('loaded_extension', self.bot.extensions.get(x))
except:
print('{} failed to load!'.format(x))
cog_count = len(self.preloads)
cog_loaded = len(self.preloads)
for ext in os.listdir('Cogs'):
if ext.lower().endswith('.py') and (not ext.lower() in ['settings.py', 'mute.py']):
cog_count += 1
try:
if 'Cogs.' + ext[:-3] in self.bot.extensions:
self.bot.dispatch('unloaded_extension', self.bot.extensions.get('Cogs.' + ext[:-3]))
self.bot.unload_extension('Cogs.' + ext[:-3])
except Exception as e:
print('{} failed to unload!'.format(ext[:-3]))
print(' {}'.format(e))
pass
try:
self.bot.load_extension('Cogs.' + ext[:-3])
self.bot.dispatch('loaded_extension', self.bot.extensions.get('Cogs.' + ext[:-3]))
cog_loaded += 1
except Exception as e:
print('{} failed to load!'.format(ext[:-3]))
print(' {}'.format(e))
pass
return (cog_loaded, cog_count)
else:
for ext in os.listdir('Cogs'):
if ext[:-3].lower() == extension.lower():
<DeepExtract>
ext_list = []
for ext in os.listdir('Cogs'):
if not ext.lower().endswith('.py') or ext == ext:
continue
if ext[:-3] in self._get_imports(ext):
ext_list.append(ext)
to_reload = ext_list
</DeepExtract>
to_reload.insert(0, ext)
total = len(to_reload)
success = 0
for e in to_reload:
try:
if 'Cogs.' + e[:-3] in self.bot.extensions:
self.bot.dispatch('unloaded_extension', self.bot.extensions.get('Cogs.' + e[:-3]))
self.bot.unload_extension('Cogs.' + e[:-3])
except Exception as er:
print('{} failed to unload!'.format(e[:-3]))
print(' {}'.format(er))
pass
try:
self.bot.load_extension('Cogs.' + e[:-3])
self.bot.dispatch('loaded_extension', self.bot.extensions.get('Cogs.' + e[:-3]))
success += 1
except Exception as er:
print('{} failed to load!'.format(e[:-3]))
print(' {}'.format(er))
return (success, total)
return (0, 0)
|
def _load_extension(self, extension=None):
if extension == None:
for x in self.preloads:
if x in self.bot.extensions:
self.bot.dispatch('unloaded_extension', self.bot.extensions.get(x))
try:
self.bot.unload_extension(x)
except:
print('{} failed to unload!'.format(x))
try:
self.bot.load_extension(x)
self.bot.dispatch('loaded_extension', self.bot.extensions.get(x))
except:
print('{} failed to load!'.format(x))
cog_count = len(self.preloads)
cog_loaded = len(self.preloads)
for ext in os.listdir('Cogs'):
if ext.lower().endswith('.py') and (not ext.lower() in ['settings.py', 'mute.py']):
cog_count += 1
try:
if 'Cogs.' + ext[:-3] in self.bot.extensions:
self.bot.dispatch('unloaded_extension', self.bot.extensions.get('Cogs.' + ext[:-3]))
self.bot.unload_extension('Cogs.' + ext[:-3])
except Exception as e:
print('{} failed to unload!'.format(ext[:-3]))
print(' {}'.format(e))
pass
try:
self.bot.load_extension('Cogs.' + ext[:-3])
self.bot.dispatch('loaded_extension', self.bot.extensions.get('Cogs.' + ext[:-3]))
cog_loaded += 1
except Exception as e:
print('{} failed to load!'.format(ext[:-3]))
print(' {}'.format(e))
pass
return (cog_loaded, cog_count)
else:
for ext in os.listdir('Cogs'):
if ext[:-3].lower() == extension.lower():
ext_list = []
for ext in os.listdir('Cogs'):
if not ext.lower().endswith('.py') or ext == ext:
continue
if ext[:-3] in self._get_imports(ext):
ext_list.append(ext)
to_reload = ext_list
to_reload.insert(0, ext)
total = len(to_reload)
success = 0
for e in to_reload:
try:
if 'Cogs.' + e[:-3] in self.bot.extensions:
self.bot.dispatch('unloaded_extension', self.bot.extensions.get('Cogs.' + e[:-3]))
self.bot.unload_extension('Cogs.' + e[:-3])
except Exception as er:
print('{} failed to unload!'.format(e[:-3]))
print(' {}'.format(er))
pass
try:
self.bot.load_extension('Cogs.' + e[:-3])
self.bot.dispatch('loaded_extension', self.bot.extensions.get('Cogs.' + e[:-3]))
success += 1
except Exception as er:
print('{} failed to load!'.format(e[:-3]))
print(' {}'.format(er))
return (success, total)
return (0, 0)
|
CorpBot.py
|
positive
|
def list_file():
<DeepExtract>
if not os.path.exists(MAPPING_FILE):
fileinfo = None
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
with open(MAPPING_FILE, 'r') as f:
mapping = json.load(f)
if mapping is None:
fileinfo = None
if asset_group_id_str in mapping:
result = []
for name in mapping[asset_group_id_str]:
result.append(name)
fileinfo = result
fileinfo = None
</DeepExtract>
if fileinfo is None:
print('No files present in local mapping cache. So, asset_id is not known...')
sys.exit(1)
print('%s' % '\n'.join(fileinfo))
|
def list_file():
if not os.path.exists(MAPPING_FILE):
fileinfo = None
asset_group_id_str = binascii.b2a_hex(asset_group_id).decode()
with open(MAPPING_FILE, 'r') as f:
mapping = json.load(f)
if mapping is None:
fileinfo = None
if asset_group_id_str in mapping:
result = []
for name in mapping[asset_group_id_str]:
result.append(name)
fileinfo = result
fileinfo = None
if fileinfo is None:
print('No files present in local mapping cache. So, asset_id is not known...')
sys.exit(1)
print('%s' % '\n'.join(fileinfo))
|
bbc1
|
positive
|
def anchor_beam(self, delta: float=0.05, epsilon: float=0.1, desired_confidence: float=1.0, beam_size: int=1, epsilon_stop: float=0.05, min_samples_start: int=100, max_anchor_size: Optional[int]=None, stop_on_first: bool=False, batch_size: int=100, coverage_samples: int=10000, verbose: bool=False, verbose_every: int=1, **kwargs) -> dict:
"""
Uses the KL-LUCB algorithm (Kaufmann and Kalyanakrishnan, 2013) together with additional sampling to search
feature sets (anchors) that guarantee the prediction made by a classifier model. The search is greedy if
``beam_size=1``. Otherwise, at each of the `max_anchor_size` steps, `beam_size` solutions are explored.
By construction, solutions found have high precision (defined as the expected of number of times the classifier
makes the same prediction when queried with the feature subset combined with arbitrary samples drawn from a
noise distribution). The algorithm maximises the coverage of the solution found - the frequency of occurrence
of records containing the feature subset in set of samples.
Parameters
----------
delta
Used to compute `beta`.
epsilon
Precision bound tolerance for convergence.
desired_confidence
Desired level of precision (`tau` in `paper <https://homes.cs.washington.edu/~marcotcr/aaai18.pdf>`_).
beam_size
Beam width.
epsilon_stop
Confidence bound margin around desired precision.
min_samples_start
Min number of initial samples.
max_anchor_size
Max number of features in result.
stop_on_first
Stop on first valid result found.
coverage_samples
Number of samples from which to build a coverage set.
batch_size
Number of samples used for an arm evaluation.
verbose
Whether to print intermediate LUCB & anchor selection output.
verbose_every
Print intermediate output every verbose_every steps.
Returns
-------
Explanation dictionary containing anchors with metadata like coverage and precision and examples.
"""
<DeepExtract>
[coverage_data] = self.sample_fcn((0, ()), coverage_samples, compute_labels=False)
coverage_data = coverage_data
</DeepExtract>
<DeepExtract>
prealloc_size = batch_size * self.sample_cache_size
self.state: dict = {'t_coverage': defaultdict(lambda : 0.0), 't_coverage_idx': defaultdict(set), 't_covered_true': defaultdict(None), 't_covered_false': defaultdict(None), 't_idx': defaultdict(set), 't_nsamples': defaultdict(lambda : 0.0), 't_order': defaultdict(list), 't_positives': defaultdict(lambda : 0.0), 'prealloc_size': prealloc_size, 'data': np.zeros((prealloc_size, coverage_data.shape[1]), coverage_data.dtype), 'labels': np.zeros(prealloc_size), 'current_idx': 0, 'n_features': coverage_data.shape[1], 'coverage_data': coverage_data}
self.state['t_order'][()] = ()
</DeepExtract>
<DeepExtract>
for anchor in [()]:
if anchor not in self.state['t_order']:
self.state['t_order'][anchor] = list(anchor)
sample_stats: List = []
pos: Tuple = tuple()
total: Tuple = tuple()
samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=min_samples_start) for (i, anchor) in enumerate([()])]
for (samples, anchor) in zip(samples_iter, [()]):
(covered_true, covered_false, labels, *additionals, _) = samples
sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))
(pos, total) = list(zip(*sample_stats))
((pos,), (total,)) = (pos, total)
</DeepExtract>
mean = np.array([pos / total])
beta = np.log(1.0 / delta)
<DeepExtract>
um = mean.copy()
lm = np.clip(mean - np.sqrt(np.array([beta / total]) / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(mean, qm) > np.array([beta / total])
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lb = lm
</DeepExtract>
while mean > desired_confidence and lb < desired_confidence - epsilon:
<DeepExtract>
for anchor in [()]:
if anchor not in self.state['t_order']:
self.state['t_order'][anchor] = list(anchor)
sample_stats: List = []
pos: Tuple = tuple()
total: Tuple = tuple()
samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=batch_size) for (i, anchor) in enumerate([()])]
for (samples, anchor) in zip(samples_iter, [()]):
(covered_true, covered_false, labels, *additionals, _) = samples
sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))
(pos, total) = list(zip(*sample_stats))
((n_pos,), (n_total,)) = (pos, total)
</DeepExtract>
pos += n_pos
total += n_total
mean = np.array([pos / total])
<DeepExtract>
um = mean.copy()
lm = np.clip(mean - np.sqrt(np.array([beta / total]) / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(mean, qm) > np.array([beta / total])
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lb = lm
</DeepExtract>
if lb > desired_confidence:
return {'feature': [], 'mean': [], 'num_preds': total, 'precision': [], 'coverage': [], 'examples': [], 'all_precision': mean, 'success': True}
(current_size, best_coverage) = (1, -1)
best_of_size: Dict[int, list] = {0: []}
best_anchor = ()
if max_anchor_size is None:
max_anchor_size = self.state['n_features']
while current_size <= max_anchor_size:
<DeepExtract>
state = self.state
all_features = range(state['n_features'])
coverage_data = state['coverage_data']
current_idx = state['current_idx']
data = state['data'][:current_idx]
labels = state['labels'][:current_idx]
if len(best_of_size[current_size - 1]) == 0:
tuples = [(x,) for x in all_features]
for x in tuples:
pres = data[:, x[0]].nonzero()[0]
state['t_idx'][x] = set(pres)
state['t_nsamples'][x] = float(len(pres))
state['t_positives'][x] = float(labels[pres].sum())
state['t_order'][x].append(x[0])
state['t_coverage_idx'][x] = set(coverage_data[:, x[0]].nonzero()[0])
state['t_coverage'][x] = float(len(state['t_coverage_idx'][x])) / coverage_data.shape[0]
anchors = tuples
new_tuples: Set[tuple] = set()
for f in all_features:
for t in best_of_size[current_size - 1]:
new_t = self._sort(t + (f,), allow_duplicates=False)
if len(new_t) != len(t) + 1:
continue
if new_t not in new_tuples:
new_tuples.add(new_t)
state['t_order'][new_t] = copy.deepcopy(state['t_order'][t])
state['t_order'][new_t].append(f)
state['t_coverage_idx'][new_t] = state['t_coverage_idx'][t].intersection(state['t_coverage_idx'][f,])
state['t_coverage'][new_t] = float(len(state['t_coverage_idx'][new_t])) / coverage_data.shape[0]
t_idx = np.array(list(state['t_idx'][t]))
t_data = state['data'][t_idx]
present = np.where(t_data[:, f] == 1)[0]
state['t_idx'][new_t] = set(t_idx[present])
idx_list = list(state['t_idx'][new_t])
state['t_nsamples'][new_t] = float(len(idx_list))
state['t_positives'][new_t] = np.sum(state['labels'][idx_list])
anchors = list(new_tuples)
</DeepExtract>
anchors = [anchor for anchor in anchors if self.state['t_coverage'][anchor] > best_coverage]
if len(anchors) == 0:
break
<DeepExtract>
def array_factory(size: tuple):
stats = lambda : np.zeros(size)
state = self.state
stats: Dict[str, np.ndarray] = defaultdict(array_factory((len(anchors),)))
for (i, anchor) in enumerate(anchors):
stats['n_samples'][i] = state['t_nsamples'][anchor]
stats['positives'][i] = state['t_positives'][anchor]
if coverages:
stats['coverages'][i] = state['t_coverage'][anchor]
stats = stats
</DeepExtract>
<DeepExtract>
n_features = len(anchors)
(n_samples, positives) = (stats['n_samples'], stats['positives'])
(anchors_to_sample, anchors_idx) = ([], [])
for f in np.where(n_samples == 0)[0]:
anchors_to_sample.append(anchors[f])
anchors_idx.append(f)
if anchors_idx:
(pos, total) = self.draw_samples(anchors_to_sample, 1)
positives[anchors_idx] += pos
n_samples[anchors_idx] += total
if n_features == min(beam_size, len(anchors)):
candidate_anchors = np.arange(n_features)
means = positives / n_samples
(ub, lb) = (np.zeros(n_samples.shape), np.zeros(n_samples.shape))
t = 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, min(beam_size, len(anchors)), t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
verbose_count = 0
while B > epsilon:
verbose_count += 1
if verbose and verbose_count % verbose_every == 0:
(ut, lt) = crit_a_idx
print('Best: %d (mean:%.10f, n: %d, lb:%.4f)' % (lt, means[lt], n_samples[lt], lb[lt]), end=' ')
print('Worst: %d (mean:%.4f, n: %d, ub:%.4f)' % (ut, means[ut], n_samples[ut], ub[ut]), end=' ')
print('B = %.2f' % B)
selected_anchors = [anchors[idx] for idx in crit_a_idx]
(pos, total) = self.draw_samples(selected_anchors, batch_size)
idx = list(crit_a_idx)
positives[idx] += pos
n_samples[idx] += total
means = positives / n_samples
t += 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, min(beam_size, len(anchors)), t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
sorted_means = np.argsort(means)
candidate_anchors = sorted_means[-min(beam_size, len(anchors)):]
</DeepExtract>
best_of_size[current_size] = [anchors[index] for index in candidate_anchors]
<DeepExtract>
def array_factory(size: tuple):
stats = lambda : np.zeros(size)
state = self.state
stats: Dict[str, np.ndarray] = defaultdict(array_factory((len(best_of_size[current_size]),)))
for (i, anchor) in enumerate(best_of_size[current_size]):
stats['n_samples'][i] = state['t_nsamples'][anchor]
stats['positives'][i] = state['t_positives'][anchor]
if True:
stats['coverages'][i] = state['t_coverage'][anchor]
stats = stats
</DeepExtract>
(positives, n_samples) = (stats['positives'], stats['n_samples'])
beta = np.log(1.0 / (delta / (1 + (beam_size - 1) * self.state['n_features'])))
kl_constraints = beta / n_samples
means = stats['positives'] / stats['n_samples']
<DeepExtract>
um = means.copy()
lm = np.clip(means - np.sqrt(kl_constraints / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means, qm) > kl_constraints
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lbs = lm
</DeepExtract>
<DeepExtract>
lm = means.copy()
um = np.minimum(np.minimum(means + np.sqrt(kl_constraints / 2.0), 1.0), 1.0)
for j in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means, qm) > kl_constraints
kl_lt_idx = np.logical_not(kl_gt_idx)
um[kl_gt_idx] = qm[kl_gt_idx]
lm[kl_lt_idx] = qm[kl_lt_idx]
ubs = um
</DeepExtract>
if verbose:
print('Best of size ', current_size, ':')
for (i, mean, lb, ub) in zip(candidate_anchors, means, lbs, ubs):
print(i, mean, lb, ub)
<DeepExtract>
continue_sampling = (means >= desired_confidence) & (lbs < desired_confidence - epsilon_stop) | (means < desired_confidence) & (ubs >= desired_confidence + epsilon_stop)
</DeepExtract>
while continue_sampling.any():
selected_anchors = [anchors[idx] for idx in candidate_anchors[continue_sampling]]
<DeepExtract>
for anchor in selected_anchors:
if anchor not in self.state['t_order']:
self.state['t_order'][anchor] = list(anchor)
sample_stats: List = []
pos: Tuple = tuple()
total: Tuple = tuple()
samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=batch_size) for (i, anchor) in enumerate(selected_anchors)]
for (samples, anchor) in zip(samples_iter, selected_anchors):
(covered_true, covered_false, labels, *additionals, _) = samples
sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))
(pos, total) = list(zip(*sample_stats))
(pos, total) = (pos, total)
</DeepExtract>
positives[continue_sampling] += pos
n_samples[continue_sampling] += total
means[continue_sampling] = positives[continue_sampling] / n_samples[continue_sampling]
kl_constraints[continue_sampling] = beta / n_samples[continue_sampling]
<DeepExtract>
um = means[continue_sampling].copy()
lm = np.clip(means[continue_sampling] - np.sqrt(kl_constraints[continue_sampling] / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means[continue_sampling], qm) > kl_constraints[continue_sampling]
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lbs[continue_sampling] = lm
</DeepExtract>
<DeepExtract>
lm = means[continue_sampling].copy()
um = np.minimum(np.minimum(means[continue_sampling] + np.sqrt(kl_constraints[continue_sampling] / 2.0), 1.0), 1.0)
for j in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means[continue_sampling], qm) > kl_constraints[continue_sampling]
kl_lt_idx = np.logical_not(kl_gt_idx)
um[kl_gt_idx] = qm[kl_gt_idx]
lm[kl_lt_idx] = qm[kl_lt_idx]
ubs[continue_sampling] = um
</DeepExtract>
<DeepExtract>
continue_sampling = (means >= desired_confidence) & (lbs < desired_confidence - epsilon_stop) | (means < desired_confidence) & (ubs >= desired_confidence + epsilon_stop)
</DeepExtract>
coverages = stats['coverages']
valid_anchors = (means >= desired_confidence) & (lbs > desired_confidence - epsilon_stop)
better_anchors = (valid_anchors & (coverages > best_coverage)).nonzero()[0]
if verbose:
for (i, valid, mean, lb, ub, coverage) in zip(candidate_anchors, valid_anchors, means, lbs, ubs, coverages):
t = anchors[i]
print('%s mean = %.2f lb = %.2f ub = %.2f coverage: %.2f n: %d' % (t, mean, lb, ub, coverage, self.state['t_nsamples'][t]))
if valid:
print('Found eligible result ', t, 'Coverage:', coverage, 'Is best?', coverage > best_coverage)
if better_anchors.size > 0:
best_anchor_idx = better_anchors[np.argmax(coverages[better_anchors])]
best_coverage = coverages[best_anchor_idx]
best_anchor = anchors[candidate_anchors[best_anchor_idx]]
if best_coverage == 1.0 or stop_on_first:
break
current_size += 1
if not best_anchor:
success = False
logger.warning(f'Could not find an anchor satisfying the {desired_confidence} precision constraint. Now returning the best non-eligible result. The desired precision threshold might not be achieved due to the quantile-based discretisation of the numerical features. The resolution of the bins may be too large to find an anchor of required precision. Consider increasing the number of bins in `disc_perc`, but note that for some numerical distribution (e.g. skewed distribution) it may not help.')
anchors = []
for i in range(0, current_size):
anchors.extend(best_of_size[i])
<DeepExtract>
def array_factory(size: tuple):
stats = lambda : np.zeros(size)
state = self.state
stats: Dict[str, np.ndarray] = defaultdict(array_factory((len(anchors),)))
for (i, anchor) in enumerate(anchors):
stats['n_samples'][i] = state['t_nsamples'][anchor]
stats['positives'][i] = state['t_positives'][anchor]
if coverages:
stats['coverages'][i] = state['t_coverage'][anchor]
stats = stats
</DeepExtract>
<DeepExtract>
n_features = len(anchors)
(n_samples, positives) = (stats['n_samples'], stats['positives'])
(anchors_to_sample, anchors_idx) = ([], [])
for f in np.where(n_samples == 0)[0]:
anchors_to_sample.append(anchors[f])
anchors_idx.append(f)
if anchors_idx:
(pos, total) = self.draw_samples(anchors_to_sample, 1)
positives[anchors_idx] += pos
n_samples[anchors_idx] += total
if n_features == 1:
candidate_anchors = np.arange(n_features)
means = positives / n_samples
(ub, lb) = (np.zeros(n_samples.shape), np.zeros(n_samples.shape))
t = 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, 1, t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
verbose_count = 0
while B > epsilon:
verbose_count += 1
if verbose and verbose_count % verbose_every == 0:
(ut, lt) = crit_a_idx
print('Best: %d (mean:%.10f, n: %d, lb:%.4f)' % (lt, means[lt], n_samples[lt], lb[lt]), end=' ')
print('Worst: %d (mean:%.4f, n: %d, ub:%.4f)' % (ut, means[ut], n_samples[ut], ub[ut]), end=' ')
print('B = %.2f' % B)
selected_anchors = [anchors[idx] for idx in crit_a_idx]
(pos, total) = self.draw_samples(selected_anchors, batch_size)
idx = list(crit_a_idx)
positives[idx] += pos
n_samples[idx] += total
means = positives / n_samples
t += 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, 1, t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
sorted_means = np.argsort(means)
candidate_anchors = sorted_means[-1:]
</DeepExtract>
best_anchor = anchors[candidate_anchors[0]]
else:
success = True
return self.get_anchor_metadata(best_anchor, success, batch_size=batch_size)
|
def anchor_beam(self, delta: float=0.05, epsilon: float=0.1, desired_confidence: float=1.0, beam_size: int=1, epsilon_stop: float=0.05, min_samples_start: int=100, max_anchor_size: Optional[int]=None, stop_on_first: bool=False, batch_size: int=100, coverage_samples: int=10000, verbose: bool=False, verbose_every: int=1, **kwargs) -> dict:
"""
Uses the KL-LUCB algorithm (Kaufmann and Kalyanakrishnan, 2013) together with additional sampling to search
feature sets (anchors) that guarantee the prediction made by a classifier model. The search is greedy if
``beam_size=1``. Otherwise, at each of the `max_anchor_size` steps, `beam_size` solutions are explored.
By construction, solutions found have high precision (defined as the expected of number of times the classifier
makes the same prediction when queried with the feature subset combined with arbitrary samples drawn from a
noise distribution). The algorithm maximises the coverage of the solution found - the frequency of occurrence
of records containing the feature subset in set of samples.
Parameters
----------
delta
Used to compute `beta`.
epsilon
Precision bound tolerance for convergence.
desired_confidence
Desired level of precision (`tau` in `paper <https://homes.cs.washington.edu/~marcotcr/aaai18.pdf>`_).
beam_size
Beam width.
epsilon_stop
Confidence bound margin around desired precision.
min_samples_start
Min number of initial samples.
max_anchor_size
Max number of features in result.
stop_on_first
Stop on first valid result found.
coverage_samples
Number of samples from which to build a coverage set.
batch_size
Number of samples used for an arm evaluation.
verbose
Whether to print intermediate LUCB & anchor selection output.
verbose_every
Print intermediate output every verbose_every steps.
Returns
-------
Explanation dictionary containing anchors with metadata like coverage and precision and examples.
"""
[coverage_data] = self.sample_fcn((0, ()), coverage_samples, compute_labels=False)
coverage_data = coverage_data
prealloc_size = batch_size * self.sample_cache_size
self.state: dict = {'t_coverage': defaultdict(lambda : 0.0), 't_coverage_idx': defaultdict(set), 't_covered_true': defaultdict(None), 't_covered_false': defaultdict(None), 't_idx': defaultdict(set), 't_nsamples': defaultdict(lambda : 0.0), 't_order': defaultdict(list), 't_positives': defaultdict(lambda : 0.0), 'prealloc_size': prealloc_size, 'data': np.zeros((prealloc_size, coverage_data.shape[1]), coverage_data.dtype), 'labels': np.zeros(prealloc_size), 'current_idx': 0, 'n_features': coverage_data.shape[1], 'coverage_data': coverage_data}
self.state['t_order'][()] = ()
for anchor in [()]:
if anchor not in self.state['t_order']:
self.state['t_order'][anchor] = list(anchor)
sample_stats: List = []
pos: Tuple = tuple()
total: Tuple = tuple()
samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=min_samples_start) for (i, anchor) in enumerate([()])]
for (samples, anchor) in zip(samples_iter, [()]):
(covered_true, covered_false, labels, *additionals, _) = samples
sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))
(pos, total) = list(zip(*sample_stats))
((pos,), (total,)) = (pos, total)
mean = np.array([pos / total])
beta = np.log(1.0 / delta)
um = mean.copy()
lm = np.clip(mean - np.sqrt(np.array([beta / total]) / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(mean, qm) > np.array([beta / total])
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lb = lm
while mean > desired_confidence and lb < desired_confidence - epsilon:
for anchor in [()]:
if anchor not in self.state['t_order']:
self.state['t_order'][anchor] = list(anchor)
sample_stats: List = []
pos: Tuple = tuple()
total: Tuple = tuple()
samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=batch_size) for (i, anchor) in enumerate([()])]
for (samples, anchor) in zip(samples_iter, [()]):
(covered_true, covered_false, labels, *additionals, _) = samples
sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))
(pos, total) = list(zip(*sample_stats))
((n_pos,), (n_total,)) = (pos, total)
pos += n_pos
total += n_total
mean = np.array([pos / total])
um = mean.copy()
lm = np.clip(mean - np.sqrt(np.array([beta / total]) / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(mean, qm) > np.array([beta / total])
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lb = lm
if lb > desired_confidence:
return {'feature': [], 'mean': [], 'num_preds': total, 'precision': [], 'coverage': [], 'examples': [], 'all_precision': mean, 'success': True}
(current_size, best_coverage) = (1, -1)
best_of_size: Dict[int, list] = {0: []}
best_anchor = ()
if max_anchor_size is None:
max_anchor_size = self.state['n_features']
while current_size <= max_anchor_size:
state = self.state
all_features = range(state['n_features'])
coverage_data = state['coverage_data']
current_idx = state['current_idx']
data = state['data'][:current_idx]
labels = state['labels'][:current_idx]
if len(best_of_size[current_size - 1]) == 0:
tuples = [(x,) for x in all_features]
for x in tuples:
pres = data[:, x[0]].nonzero()[0]
state['t_idx'][x] = set(pres)
state['t_nsamples'][x] = float(len(pres))
state['t_positives'][x] = float(labels[pres].sum())
state['t_order'][x].append(x[0])
state['t_coverage_idx'][x] = set(coverage_data[:, x[0]].nonzero()[0])
state['t_coverage'][x] = float(len(state['t_coverage_idx'][x])) / coverage_data.shape[0]
anchors = tuples
new_tuples: Set[tuple] = set()
for f in all_features:
for t in best_of_size[current_size - 1]:
new_t = self._sort(t + (f,), allow_duplicates=False)
if len(new_t) != len(t) + 1:
continue
if new_t not in new_tuples:
new_tuples.add(new_t)
state['t_order'][new_t] = copy.deepcopy(state['t_order'][t])
state['t_order'][new_t].append(f)
state['t_coverage_idx'][new_t] = state['t_coverage_idx'][t].intersection(state['t_coverage_idx'][f,])
state['t_coverage'][new_t] = float(len(state['t_coverage_idx'][new_t])) / coverage_data.shape[0]
t_idx = np.array(list(state['t_idx'][t]))
t_data = state['data'][t_idx]
present = np.where(t_data[:, f] == 1)[0]
state['t_idx'][new_t] = set(t_idx[present])
idx_list = list(state['t_idx'][new_t])
state['t_nsamples'][new_t] = float(len(idx_list))
state['t_positives'][new_t] = np.sum(state['labels'][idx_list])
anchors = list(new_tuples)
anchors = [anchor for anchor in anchors if self.state['t_coverage'][anchor] > best_coverage]
if len(anchors) == 0:
break
def array_factory(size: tuple):
stats = lambda : np.zeros(size)
state = self.state
stats: Dict[str, np.ndarray] = defaultdict(array_factory((len(anchors),)))
for (i, anchor) in enumerate(anchors):
stats['n_samples'][i] = state['t_nsamples'][anchor]
stats['positives'][i] = state['t_positives'][anchor]
if coverages:
stats['coverages'][i] = state['t_coverage'][anchor]
stats = stats
n_features = len(anchors)
(n_samples, positives) = (stats['n_samples'], stats['positives'])
(anchors_to_sample, anchors_idx) = ([], [])
for f in np.where(n_samples == 0)[0]:
anchors_to_sample.append(anchors[f])
anchors_idx.append(f)
if anchors_idx:
(pos, total) = self.draw_samples(anchors_to_sample, 1)
positives[anchors_idx] += pos
n_samples[anchors_idx] += total
if n_features == min(beam_size, len(anchors)):
candidate_anchors = np.arange(n_features)
means = positives / n_samples
(ub, lb) = (np.zeros(n_samples.shape), np.zeros(n_samples.shape))
t = 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, min(beam_size, len(anchors)), t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
verbose_count = 0
while B > epsilon:
verbose_count += 1
if verbose and verbose_count % verbose_every == 0:
(ut, lt) = crit_a_idx
print('Best: %d (mean:%.10f, n: %d, lb:%.4f)' % (lt, means[lt], n_samples[lt], lb[lt]), end=' ')
print('Worst: %d (mean:%.4f, n: %d, ub:%.4f)' % (ut, means[ut], n_samples[ut], ub[ut]), end=' ')
print('B = %.2f' % B)
selected_anchors = [anchors[idx] for idx in crit_a_idx]
(pos, total) = self.draw_samples(selected_anchors, batch_size)
idx = list(crit_a_idx)
positives[idx] += pos
n_samples[idx] += total
means = positives / n_samples
t += 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, min(beam_size, len(anchors)), t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
sorted_means = np.argsort(means)
candidate_anchors = sorted_means[-min(beam_size, len(anchors)):]
best_of_size[current_size] = [anchors[index] for index in candidate_anchors]
def array_factory(size: tuple):
stats = lambda : np.zeros(size)
state = self.state
stats: Dict[str, np.ndarray] = defaultdict(array_factory((len(best_of_size[current_size]),)))
for (i, anchor) in enumerate(best_of_size[current_size]):
stats['n_samples'][i] = state['t_nsamples'][anchor]
stats['positives'][i] = state['t_positives'][anchor]
if True:
stats['coverages'][i] = state['t_coverage'][anchor]
stats = stats
(positives, n_samples) = (stats['positives'], stats['n_samples'])
beta = np.log(1.0 / (delta / (1 + (beam_size - 1) * self.state['n_features'])))
kl_constraints = beta / n_samples
means = stats['positives'] / stats['n_samples']
um = means.copy()
lm = np.clip(means - np.sqrt(kl_constraints / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means, qm) > kl_constraints
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lbs = lm
lm = means.copy()
um = np.minimum(np.minimum(means + np.sqrt(kl_constraints / 2.0), 1.0), 1.0)
for j in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means, qm) > kl_constraints
kl_lt_idx = np.logical_not(kl_gt_idx)
um[kl_gt_idx] = qm[kl_gt_idx]
lm[kl_lt_idx] = qm[kl_lt_idx]
ubs = um
if verbose:
print('Best of size ', current_size, ':')
for (i, mean, lb, ub) in zip(candidate_anchors, means, lbs, ubs):
print(i, mean, lb, ub)
continue_sampling = (means >= desired_confidence) & (lbs < desired_confidence - epsilon_stop) | (means < desired_confidence) & (ubs >= desired_confidence + epsilon_stop)
while continue_sampling.any():
selected_anchors = [anchors[idx] for idx in candidate_anchors[continue_sampling]]
for anchor in selected_anchors:
if anchor not in self.state['t_order']:
self.state['t_order'][anchor] = list(anchor)
sample_stats: List = []
pos: Tuple = tuple()
total: Tuple = tuple()
samples_iter = [self.sample_fcn((i, tuple(self.state['t_order'][anchor])), num_samples=batch_size) for (i, anchor) in enumerate(selected_anchors)]
for (samples, anchor) in zip(samples_iter, selected_anchors):
(covered_true, covered_false, labels, *additionals, _) = samples
sample_stats.append(self.update_state(covered_true, covered_false, labels, additionals, anchor))
(pos, total) = list(zip(*sample_stats))
(pos, total) = (pos, total)
positives[continue_sampling] += pos
n_samples[continue_sampling] += total
means[continue_sampling] = positives[continue_sampling] / n_samples[continue_sampling]
kl_constraints[continue_sampling] = beta / n_samples[continue_sampling]
um = means[continue_sampling].copy()
lm = np.clip(means[continue_sampling] - np.sqrt(kl_constraints[continue_sampling] / 2.0), 0.0, 1.0)
for _ in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means[continue_sampling], qm) > kl_constraints[continue_sampling]
kl_lt_idx = np.logical_not(kl_gt_idx)
lm[kl_gt_idx] = qm[kl_gt_idx]
um[kl_lt_idx] = qm[kl_lt_idx]
lbs[continue_sampling] = lm
lm = means[continue_sampling].copy()
um = np.minimum(np.minimum(means[continue_sampling] + np.sqrt(kl_constraints[continue_sampling] / 2.0), 1.0), 1.0)
for j in range(1, n_iter):
qm = (um + lm) / 2.0
kl_gt_idx = kl_bernoulli(means[continue_sampling], qm) > kl_constraints[continue_sampling]
kl_lt_idx = np.logical_not(kl_gt_idx)
um[kl_gt_idx] = qm[kl_gt_idx]
lm[kl_lt_idx] = qm[kl_lt_idx]
ubs[continue_sampling] = um
continue_sampling = (means >= desired_confidence) & (lbs < desired_confidence - epsilon_stop) | (means < desired_confidence) & (ubs >= desired_confidence + epsilon_stop)
coverages = stats['coverages']
valid_anchors = (means >= desired_confidence) & (lbs > desired_confidence - epsilon_stop)
better_anchors = (valid_anchors & (coverages > best_coverage)).nonzero()[0]
if verbose:
for (i, valid, mean, lb, ub, coverage) in zip(candidate_anchors, valid_anchors, means, lbs, ubs, coverages):
t = anchors[i]
print('%s mean = %.2f lb = %.2f ub = %.2f coverage: %.2f n: %d' % (t, mean, lb, ub, coverage, self.state['t_nsamples'][t]))
if valid:
print('Found eligible result ', t, 'Coverage:', coverage, 'Is best?', coverage > best_coverage)
if better_anchors.size > 0:
best_anchor_idx = better_anchors[np.argmax(coverages[better_anchors])]
best_coverage = coverages[best_anchor_idx]
best_anchor = anchors[candidate_anchors[best_anchor_idx]]
if best_coverage == 1.0 or stop_on_first:
break
current_size += 1
if not best_anchor:
success = False
logger.warning(f'Could not find an anchor satisfying the {desired_confidence} precision constraint. Now returning the best non-eligible result. The desired precision threshold might not be achieved due to the quantile-based discretisation of the numerical features. The resolution of the bins may be too large to find an anchor of required precision. Consider increasing the number of bins in `disc_perc`, but note that for some numerical distribution (e.g. skewed distribution) it may not help.')
anchors = []
for i in range(0, current_size):
anchors.extend(best_of_size[i])
def array_factory(size: tuple):
stats = lambda : np.zeros(size)
state = self.state
stats: Dict[str, np.ndarray] = defaultdict(array_factory((len(anchors),)))
for (i, anchor) in enumerate(anchors):
stats['n_samples'][i] = state['t_nsamples'][anchor]
stats['positives'][i] = state['t_positives'][anchor]
if coverages:
stats['coverages'][i] = state['t_coverage'][anchor]
stats = stats
n_features = len(anchors)
(n_samples, positives) = (stats['n_samples'], stats['positives'])
(anchors_to_sample, anchors_idx) = ([], [])
for f in np.where(n_samples == 0)[0]:
anchors_to_sample.append(anchors[f])
anchors_idx.append(f)
if anchors_idx:
(pos, total) = self.draw_samples(anchors_to_sample, 1)
positives[anchors_idx] += pos
n_samples[anchors_idx] += total
if n_features == 1:
candidate_anchors = np.arange(n_features)
means = positives / n_samples
(ub, lb) = (np.zeros(n_samples.shape), np.zeros(n_samples.shape))
t = 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, 1, t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
verbose_count = 0
while B > epsilon:
verbose_count += 1
if verbose and verbose_count % verbose_every == 0:
(ut, lt) = crit_a_idx
print('Best: %d (mean:%.10f, n: %d, lb:%.4f)' % (lt, means[lt], n_samples[lt], lb[lt]), end=' ')
print('Worst: %d (mean:%.4f, n: %d, ub:%.4f)' % (ut, means[ut], n_samples[ut], ub[ut]), end=' ')
print('B = %.2f' % B)
selected_anchors = [anchors[idx] for idx in crit_a_idx]
(pos, total) = self.draw_samples(selected_anchors, batch_size)
idx = list(crit_a_idx)
positives[idx] += pos
n_samples[idx] += total
means = positives / n_samples
t += 1
crit_a_idx = self.select_critical_arms(means, ub, lb, n_samples, delta, 1, t)
B = ub[crit_a_idx.ut] - lb[crit_a_idx.lt]
sorted_means = np.argsort(means)
candidate_anchors = sorted_means[-1:]
best_anchor = anchors[candidate_anchors[0]]
else:
success = True
return self.get_anchor_metadata(best_anchor, success, batch_size=batch_size)
|
alibi
|
positive
|
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
<DeepExtract>
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
</DeepExtract>
proposal_list = self.simple_test_rpn(x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
(det_bboxes, det_labels) = self.simple_test_bboxes(x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(x, img_meta, det_bboxes, det_labels, rescale=rescale)
return (bbox_results, segm_results)
|
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
proposal_list = self.simple_test_rpn(x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
(det_bboxes, det_labels) = self.simple_test_bboxes(x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(x, img_meta, det_bboxes, det_labels, rescale=rescale)
return (bbox_results, segm_results)
|
Cross-iterationBatchNorm
|
positive
|
def synthesis(params, job):
data = {'foo'}
if options.inner:
if options.file.sliced:
try:
options.file.load()
raise Exception('Allowed unsliced load of sliced file')
except AssertionError:
pass
else:
value = options.file.load()
assert value == data
value = blob.load(options.file.filename())
assert value == data
job.save({'inner': None}, 'inner.pickle', temp=False)
job.json_save({'inner': None}, 'inner.json', temp=False)
else:
job.save(data, 'data', temp=False)
<DeepExtract>
jid = build('test_jobwithfile', options=dict(inner=True, file=JobWithFile(params.jobid, 'data')))
jj = jid.withfile('inner.json', True)
for sliceno in range(params.slices):
assert jid.load('inner.pickle', sliceno) == {'inner': sliceno}
assert jid.json_load('inner.json', sliceno) == {'inner': sliceno}
assert json.load(jj.open(sliceno=sliceno)) == {'inner': sliceno}
assert jid.load('inner.pickle') == {'inner': None}
assert jid.json_load('inner.json') == {'inner': None}
jj = JobWithFile(jid, 'inner.json')
assert json.load(jj.open()) == {'inner': None}
</DeepExtract>
<DeepExtract>
jid = build('test_jobwithfile', options=dict(inner=True, file=job.withfile('data', True)))
jj = jid.withfile('inner.json', True)
for sliceno in range(params.slices):
assert jid.load('inner.pickle', sliceno) == {'inner': sliceno}
assert jid.json_load('inner.json', sliceno) == {'inner': sliceno}
assert json.load(jj.open(sliceno=sliceno)) == {'inner': sliceno}
assert jid.load('inner.pickle') == {'inner': None}
assert jid.json_load('inner.json') == {'inner': None}
jj = JobWithFile(jid, 'inner.json')
assert json.load(jj.open()) == {'inner': None}
</DeepExtract>
|
def synthesis(params, job):
data = {'foo'}
if options.inner:
if options.file.sliced:
try:
options.file.load()
raise Exception('Allowed unsliced load of sliced file')
except AssertionError:
pass
else:
value = options.file.load()
assert value == data
value = blob.load(options.file.filename())
assert value == data
job.save({'inner': None}, 'inner.pickle', temp=False)
job.json_save({'inner': None}, 'inner.json', temp=False)
else:
job.save(data, 'data', temp=False)
jid = build('test_jobwithfile', options=dict(inner=True, file=JobWithFile(params.jobid, 'data')))
jj = jid.withfile('inner.json', True)
for sliceno in range(params.slices):
assert jid.load('inner.pickle', sliceno) == {'inner': sliceno}
assert jid.json_load('inner.json', sliceno) == {'inner': sliceno}
assert json.load(jj.open(sliceno=sliceno)) == {'inner': sliceno}
assert jid.load('inner.pickle') == {'inner': None}
assert jid.json_load('inner.json') == {'inner': None}
jj = JobWithFile(jid, 'inner.json')
assert json.load(jj.open()) == {'inner': None}
jid = build('test_jobwithfile', options=dict(inner=True, file=job.withfile('data', True)))
jj = jid.withfile('inner.json', True)
for sliceno in range(params.slices):
assert jid.load('inner.pickle', sliceno) == {'inner': sliceno}
assert jid.json_load('inner.json', sliceno) == {'inner': sliceno}
assert json.load(jj.open(sliceno=sliceno)) == {'inner': sliceno}
assert jid.load('inner.pickle') == {'inner': None}
assert jid.json_load('inner.json') == {'inner': None}
jj = JobWithFile(jid, 'inner.json')
assert json.load(jj.open()) == {'inner': None}
</DeepExtract>
|
accelerator
|
positive
|
def exportShape(self, nodeList=None, path=None, publish=False, dpSnapshotGrp='dpSnapshot_Grp', keepSnapshot=False, overrideExisting=True, *args):
""" Export control shapes from a given list or all found dpControl transforms in the scene.
It will save a Maya ASCII file with the control shapes snapshots.
If there is no given path, it will ask user where to save the file.
If publish is True, it will use the current location and create the dpShapeIO directory by default.
If keepSnapshot is True, it will parent a backup dpSnapshotGrp group to Wip_Grp and hide it.
If overrideExisting is True, it will delete the old node before create the new snapshot.
"""
currentPath = cmds.file(query=True, sceneName=True)
if not currentPath:
print(self.dpUIinst.langDic[self.dpUIinst.langName]['i201_saveScene'])
return
if not nodeList:
<DeepExtract>
nodeList = []
allList = cmds.ls(selection=False, type='transform')
if allList:
for item in allList:
if cmds.objExists(item + '.' + DPCONTROL) and cmds.getAttr(item + '.' + DPCONTROL):
nodeList.append(item)
nodeList = nodeList
</DeepExtract>
if nodeList:
if not path:
if publish:
dpFolder = currentPath[:currentPath.rfind('/') + 1] + self.dpUIinst.dpData + '/' + self.dpUIinst.dpShape
if not os.path.exists(dpFolder):
os.makedirs(dpFolder)
path = dpFolder + '/' + self.dpUIinst.dpShape + '_' + currentPath[currentPath.rfind('/') + 1:]
else:
pathList = cmds.fileDialog2(fileMode=0, caption='Export Shapes')
if pathList:
path = pathList[0]
if path:
if not path.endswith('.ma'):
path = path.replace('.*', '.ma')
cmds.undoInfo(openChunk=True)
if not cmds.objExists(dpSnapshotGrp):
cmds.group(name=dpSnapshotGrp, empty=True)
for item in nodeList:
snapshotName = item + SNAPSHOT_SUFFIX
if cmds.objExists(snapshotName):
if overrideExisting:
cmds.delete(snapshotName)
dup = cmds.duplicate(item, name=snapshotName)[0]
cmds.setAttr(dup + '.dpControl', 0)
dupChildList = cmds.listRelatives(dup, allDescendents=True, children=True, fullPath=True)
if dupChildList:
toDeleteList = []
for childNode in dupChildList:
if not cmds.objectType(childNode) == 'nurbsCurve':
toDeleteList.append(childNode)
if toDeleteList:
cmds.delete(toDeleteList)
cmds.parent(dup, dpSnapshotGrp)
if cmds.listRelatives(dpSnapshotGrp, allDescendents=True, children=True, type='nurbsCurve'):
cmds.select(dpSnapshotGrp)
cmds.file(rename=path)
cmds.file(exportSelected=True, type='mayaAscii', prompt=False, force=True)
cmds.file(rename=currentPath)
if not cmds.objExists('WIP_Grp'):
keepSnapshot = False
if keepSnapshot:
try:
cmds.parent(dpSnapshotGrp, 'WIP_Grp')
cmds.setAttr(dpSnapshotGrp + '.visibility', 0)
if cmds.objExists('Backup_' + dpSnapshotGrp):
cmds.delete('Backup_' + dpSnapshotGrp)
cmds.rename(dpSnapshotGrp, 'Backup_' + dpSnapshotGrp)
except:
pass
else:
cmds.delete(dpSnapshotGrp)
print('Exported shapes to: {0}'.format(path))
cmds.undoInfo(closeChunk=True)
else:
print(self.dpUIinst.langDic[self.dpUIinst.langName]['i202_noControls'])
|
def exportShape(self, nodeList=None, path=None, publish=False, dpSnapshotGrp='dpSnapshot_Grp', keepSnapshot=False, overrideExisting=True, *args):
""" Export control shapes from a given list or all found dpControl transforms in the scene.
It will save a Maya ASCII file with the control shapes snapshots.
If there is no given path, it will ask user where to save the file.
If publish is True, it will use the current location and create the dpShapeIO directory by default.
If keepSnapshot is True, it will parent a backup dpSnapshotGrp group to Wip_Grp and hide it.
If overrideExisting is True, it will delete the old node before create the new snapshot.
"""
currentPath = cmds.file(query=True, sceneName=True)
if not currentPath:
print(self.dpUIinst.langDic[self.dpUIinst.langName]['i201_saveScene'])
return
if not nodeList:
nodeList = []
allList = cmds.ls(selection=False, type='transform')
if allList:
for item in allList:
if cmds.objExists(item + '.' + DPCONTROL) and cmds.getAttr(item + '.' + DPCONTROL):
nodeList.append(item)
nodeList = nodeList
if nodeList:
if not path:
if publish:
dpFolder = currentPath[:currentPath.rfind('/') + 1] + self.dpUIinst.dpData + '/' + self.dpUIinst.dpShape
if not os.path.exists(dpFolder):
os.makedirs(dpFolder)
path = dpFolder + '/' + self.dpUIinst.dpShape + '_' + currentPath[currentPath.rfind('/') + 1:]
else:
pathList = cmds.fileDialog2(fileMode=0, caption='Export Shapes')
if pathList:
path = pathList[0]
if path:
if not path.endswith('.ma'):
path = path.replace('.*', '.ma')
cmds.undoInfo(openChunk=True)
if not cmds.objExists(dpSnapshotGrp):
cmds.group(name=dpSnapshotGrp, empty=True)
for item in nodeList:
snapshotName = item + SNAPSHOT_SUFFIX
if cmds.objExists(snapshotName):
if overrideExisting:
cmds.delete(snapshotName)
dup = cmds.duplicate(item, name=snapshotName)[0]
cmds.setAttr(dup + '.dpControl', 0)
dupChildList = cmds.listRelatives(dup, allDescendents=True, children=True, fullPath=True)
if dupChildList:
toDeleteList = []
for childNode in dupChildList:
if not cmds.objectType(childNode) == 'nurbsCurve':
toDeleteList.append(childNode)
if toDeleteList:
cmds.delete(toDeleteList)
cmds.parent(dup, dpSnapshotGrp)
if cmds.listRelatives(dpSnapshotGrp, allDescendents=True, children=True, type='nurbsCurve'):
cmds.select(dpSnapshotGrp)
cmds.file(rename=path)
cmds.file(exportSelected=True, type='mayaAscii', prompt=False, force=True)
cmds.file(rename=currentPath)
if not cmds.objExists('WIP_Grp'):
keepSnapshot = False
if keepSnapshot:
try:
cmds.parent(dpSnapshotGrp, 'WIP_Grp')
cmds.setAttr(dpSnapshotGrp + '.visibility', 0)
if cmds.objExists('Backup_' + dpSnapshotGrp):
cmds.delete('Backup_' + dpSnapshotGrp)
cmds.rename(dpSnapshotGrp, 'Backup_' + dpSnapshotGrp)
except:
pass
else:
cmds.delete(dpSnapshotGrp)
print('Exported shapes to: {0}'.format(path))
cmds.undoInfo(closeChunk=True)
else:
print(self.dpUIinst.langDic[self.dpUIinst.langName]['i202_noControls'])
|
dpAutoRigSystem
|
positive
|
def add_hook(self, trigger_param, kwargs):
"""
:type trigger_param: list[str] | str
:type kwargs: dict[str, unknown]
"""
<DeepExtract>
if not hasattr(kwargs, '_cloudbot_hook'):
kwargs._cloudbot_hook = {}
else:
assert hook.type not in kwargs._cloudbot_hook
kwargs._cloudbot_hook[hook.type] = hook
</DeepExtract>
if isinstance(trigger_param, str):
self.triggers.add(trigger_param)
else:
self.triggers.update(trigger_param)
|
def add_hook(self, trigger_param, kwargs):
"""
:type trigger_param: list[str] | str
:type kwargs: dict[str, unknown]
"""
if not hasattr(kwargs, '_cloudbot_hook'):
kwargs._cloudbot_hook = {}
else:
assert hook.type not in kwargs._cloudbot_hook
kwargs._cloudbot_hook[hook.type] = hook
if isinstance(trigger_param, str):
self.triggers.add(trigger_param)
else:
self.triggers.update(trigger_param)
|
CloudBot
|
positive
|
def delete_volume(group_name, volume_name):
<DeepExtract>
Ret = subprocess.run('lvdisplay %s/%s' % (group_name, volume_name), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=check)
Ret = Ret
</DeepExtract>
if Ret.returncode == 0:
<DeepExtract>
Ret = subprocess.run('lvremove -f %s/%s' % (group_name, volume_name), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=check)
Ret = Ret
</DeepExtract>
if Ret.returncode == 0:
logger.info('delete lv %s in vg %s success' % (volume_name, group_name))
return True
else:
logger.error('delete lv %s in vg %s failed:%s' % (volume_name, group_name, Ret.stdout.decode('utf-8')))
return False
else:
logger.info('lv %s in vg %s does not exists' % (volume_name, group_name))
|
def delete_volume(group_name, volume_name):
Ret = subprocess.run('lvdisplay %s/%s' % (group_name, volume_name), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=check)
Ret = Ret
if Ret.returncode == 0:
Ret = subprocess.run('lvremove -f %s/%s' % (group_name, volume_name), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=check)
Ret = Ret
if Ret.returncode == 0:
logger.info('delete lv %s in vg %s success' % (volume_name, group_name))
return True
else:
logger.error('delete lv %s in vg %s failed:%s' % (volume_name, group_name, Ret.stdout.decode('utf-8')))
return False
else:
logger.info('lv %s in vg %s does not exists' % (volume_name, group_name))
|
docklet
|
positive
|
def test_message_call_method_no_validation(client):
<DeepExtract>
if data is None:
data = {}
response = post_and_get_response(client, url=f"/message/tests.views.fake_components.{'FakeValidationComponent'}", data=data, action_queue=[{'payload': {'name': 'set_text_no_validation'}, 'type': 'callMethod'}])
body = response
</DeepExtract>
assert not body['errors']
|
def test_message_call_method_no_validation(client):
if data is None:
data = {}
response = post_and_get_response(client, url=f"/message/tests.views.fake_components.{'FakeValidationComponent'}", data=data, action_queue=[{'payload': {'name': 'set_text_no_validation'}, 'type': 'callMethod'}])
body = response
assert not body['errors']
|
django-unicorn
|
positive
|
def test_xontrib(self):
<DeepExtract>
print('\n>', 'xonsh')
p = pexpect.spawn('xonsh')
</DeepExtract>
p.expect('$')
p.sendline('xontrib load coconut')
p.expect('$')
p.sendline('!(ls -la) |> bool')
p.expect('True')
p.sendeof()
if p.isalive():
p.terminate()
|
def test_xontrib(self):
print('\n>', 'xonsh')
p = pexpect.spawn('xonsh')
p.expect('$')
p.sendline('xontrib load coconut')
p.expect('$')
p.sendline('!(ls -la) |> bool')
p.expect('True')
p.sendeof()
if p.isalive():
p.terminate()
|
coconut
|
positive
|
def __get_cache_core(query):
if __cache_results.get(query, '') != '':
if CACHE_LOG:
tools.log('get_cache_local: %s' % query, 'notice')
return __cache_results[query]
else:
__cache_results[query] = {}
if CACHE_LOG:
tools.log('get_cache_request: %s' % query, 'notice')
<DeepExtract>
response = __dynamodb('DynamoDB_20120810.GetItem', __map_in_cache(query))
</DeepExtract>
if response.status_code != 200:
if CACHE_LOG:
tools.log('get_cache_err_response: %s' % query, 'notice')
return __cache_results[query]
<DeepExtract>
response.text = json.loads(response.text)
if len(response.text.keys()) == 0:
response.text = None
item = response.text['Item']
response.text = {'t': item['t']['N'], 'd': item['d']['S']}
</DeepExtract>
if result is None:
if CACHE_LOG:
tools.log('get_cache_nocache: %s' % query, 'notice')
return __cache_results[query]
try:
result['d'] = json.loads(result['d'].replace("'", '"'))
except:
result['d'] = base64.b64decode(result['d'].encode('ascii'))
result['d'] = zlib.decompress(result['d']).decode('utf-8')
result['d'] = json.loads(result['d'].replace("'", '"'))
parsed_result = {}
cached_results = []
for scraper_key in result['d'].keys():
key = scraper_keys[scraper_key]
for result_key in result['d'][scraper_key].keys():
scraper_result = result['d'][scraper_key][result_key]
if len(scraper_result) < 2:
continue
cached_results.append({'provider_name_override': key, 'hash': result_key, 'package': package_keys[scraper_result[0]], 'release_title': decode(scraper_result[1]), 'size': scraper_result[2], 'seeds': 0})
parsed_result['cached_results'] = cached_results
__cache_results[query]['result'] = result
__cache_results[query]['parsed_result'] = parsed_result
if CACHE_LOG:
tools.log('get_cache_result: %s' % query, 'notice')
return __cache_results[query]
|
def __get_cache_core(query):
if __cache_results.get(query, '') != '':
if CACHE_LOG:
tools.log('get_cache_local: %s' % query, 'notice')
return __cache_results[query]
else:
__cache_results[query] = {}
if CACHE_LOG:
tools.log('get_cache_request: %s' % query, 'notice')
response = __dynamodb('DynamoDB_20120810.GetItem', __map_in_cache(query))
if response.status_code != 200:
if CACHE_LOG:
tools.log('get_cache_err_response: %s' % query, 'notice')
return __cache_results[query]
response.text = json.loads(response.text)
if len(response.text.keys()) == 0:
response.text = None
item = response.text['Item']
response.text = {'t': item['t']['N'], 'd': item['d']['S']}
if result is None:
if CACHE_LOG:
tools.log('get_cache_nocache: %s' % query, 'notice')
return __cache_results[query]
try:
result['d'] = json.loads(result['d'].replace("'", '"'))
except:
result['d'] = base64.b64decode(result['d'].encode('ascii'))
result['d'] = zlib.decompress(result['d']).decode('utf-8')
result['d'] = json.loads(result['d'].replace("'", '"'))
parsed_result = {}
cached_results = []
for scraper_key in result['d'].keys():
key = scraper_keys[scraper_key]
for result_key in result['d'][scraper_key].keys():
scraper_result = result['d'][scraper_key][result_key]
if len(scraper_result) < 2:
continue
cached_results.append({'provider_name_override': key, 'hash': result_key, 'package': package_keys[scraper_result[0]], 'release_title': decode(scraper_result[1]), 'size': scraper_result[2], 'seeds': 0})
parsed_result['cached_results'] = cached_results
__cache_results[query]['result'] = result
__cache_results[query]['parsed_result'] = parsed_result
if CACHE_LOG:
tools.log('get_cache_result: %s' % query, 'notice')
return __cache_results[query]
|
a4kScrapers
|
positive
|
def register_fn(fn):
<DeepExtract>
assert module_name not in self
self[module_name] = fn
</DeepExtract>
return fn
|
def register_fn(fn):
assert module_name not in self
self[module_name] = fn
return fn
|
DF-Traffic-Sign-Identification
|
positive
|
def __init__(self, *args, **kwargs):
if hasattr(self, '_used'):
<DeepExtract>
self.length = 0
</DeepExtract>
del self._used
return
orig_init(self, *args, **kwargs)
|
def __init__(self, *args, **kwargs):
if hasattr(self, '_used'):
self.length = 0
del self._used
return
orig_init(self, *args, **kwargs)
|
BitTornado
|
positive
|
def load_all_file_name(path, list_name, suffix='', not_include='.py'):
"""
Load all file name including sub folder
:param path:
:param list_name:
:param suffix:
:param not_include:
:return:
"""
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path) and not_include not in file_path:
<DeepExtract>
for file in os.listdir(file_path):
file_path = os.path.join(file_path, file)
if os.path.isdir(file_path) and not_include not in file_path:
load_all_file_name(file_path, list_name, suffix, not_include)
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
</DeepExtract>
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
|
def load_all_file_name(path, list_name, suffix='', not_include='.py'):
"""
Load all file name including sub folder
:param path:
:param list_name:
:param suffix:
:param not_include:
:return:
"""
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path) and not_include not in file_path:
for file in os.listdir(file_path):
file_path = os.path.join(file_path, file)
if os.path.isdir(file_path) and not_include not in file_path:
load_all_file_name(file_path, list_name, suffix, not_include)
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
|
EssayKiller_V2
|
positive
|
def __init__(self, out_channels, kernel_size, stride=1, pad=0, nobias=False, dtype=np.float32, in_channels=None):
"""Two-dimensional deconvolutional (transposed convolution)layer.
Args:
out_channels (int): Number of channels of output arrays.
kernel_size (int or (int, int)): Size of filters.
stride (int or (int, int)): Stride of filter applications.
pad (int or (int, int)): Spatial padding width for input arrays.
nobias (bool): If `True`, then this function does not use the bias.
in_channels (int or None): Number of channels of input arrays. If
`None`, parameter initialization will be deferred until the first
forward data pass at which time the size will be determined.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.pad = pad
self.dtype = dtype
self.W = Parameter(None, name='W')
if in_channels is not None:
<DeepExtract>
(I, O) = (self.in_size, self.out_size)
W_data = xp.random.randn(I, O).astype(self.dtype) * np.sqrt(1 / I)
self.W.data = W_data
</DeepExtract>
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(out_channels, dtype=dtype), name='b')
|
def __init__(self, out_channels, kernel_size, stride=1, pad=0, nobias=False, dtype=np.float32, in_channels=None):
"""Two-dimensional deconvolutional (transposed convolution)layer.
Args:
out_channels (int): Number of channels of output arrays.
kernel_size (int or (int, int)): Size of filters.
stride (int or (int, int)): Stride of filter applications.
pad (int or (int, int)): Spatial padding width for input arrays.
nobias (bool): If `True`, then this function does not use the bias.
in_channels (int or None): Number of channels of input arrays. If
`None`, parameter initialization will be deferred until the first
forward data pass at which time the size will be determined.
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.pad = pad
self.dtype = dtype
self.W = Parameter(None, name='W')
if in_channels is not None:
(I, O) = (self.in_size, self.out_size)
W_data = xp.random.randn(I, O).astype(self.dtype) * np.sqrt(1 / I)
self.W.data = W_data
if nobias:
self.b = None
else:
self.b = Parameter(np.zeros(out_channels, dtype=dtype), name='b')
|
deep-learning-from-scratch-3
|
positive
|
def _mask_transform(self, mask):
<DeepExtract>
values = np.unique(np.array(mask).astype('int32'))
for value in values:
assert value in self._mapping
index = np.digitize(np.array(mask).astype('int32').ravel(), self._mapping, right=True)
target = self._key[index].reshape(np.array(mask).astype('int32').shape)
</DeepExtract>
return torch.LongTensor(np.array(target).astype('int32'))
|
def _mask_transform(self, mask):
values = np.unique(np.array(mask).astype('int32'))
for value in values:
assert value in self._mapping
index = np.digitize(np.array(mask).astype('int32').ravel(), self._mapping, right=True)
target = self._key[index].reshape(np.array(mask).astype('int32').shape)
return torch.LongTensor(np.array(target).astype('int32'))
|
awesome-semantic-segmentation-pytorch
|
positive
|
def _submit_to_app_store(self, build_id: ResourceId, platform: Platform, max_processing_minutes: int, app_store_version_info: AppStoreVersionInfo, app_store_version_localization_infos: List[AppStoreVersionLocalizationInfo], cancel_previous_submissions: bool) -> Tuple[ReviewSubmission, ReviewSubmissionItem]:
self.logger.info(Colors.BLUE(f'\nSubmit build {build_id!r} to App Store review'))
try:
(build, app) = self.api_client.builds.read_with_include(build_id, App)
except AppStoreConnectApiError as api_error:
raise AppStoreConnectError(str(api_error)) from api_error
if cancel_previous_submissions:
<DeepExtract>
self.logger.info(Colors.BLUE('\nCancel previous submissions before creating new submission'))
states_to_cancel = (ReviewSubmissionState.WAITING_FOR_REVIEW, ReviewSubmissionState.IN_REVIEW, ReviewSubmissionState.UNRESOLVED_ISSUES)
cancelled_submissions = self.cancel_review_submissions(application_id=app.id, review_submission_state=states_to_cancel, platform=platform, should_print=False)
if cancelled_submissions:
self._wait_for_cancelled_review_submissions_to_complete(app.id, platform)
</DeepExtract>
if max_processing_minutes:
<DeepExtract>
is_first_attempt = True
start_waiting = time.time()
while time.time() - start_waiting < max_processing_minutes * 60:
if build.attributes.processingState is BuildProcessingState.PROCESSING:
if is_first_attempt:
self._log_build_processing_message(build.id, max_processing_minutes)
msg_template = 'Build %s is still being processed on App Store Connect side, waiting %d seconds and checking again'
self.logger.info(msg_template, build.id, retry_wait_seconds)
time.sleep(retry_wait_seconds)
try:
build = self.api_client.builds.read(build)
except AppStoreConnectApiError as api_error:
raise AppStoreConnectError(str(api_error))
elif build.attributes.processingState in (BuildProcessingState.FAILED, BuildProcessingState.INVALID):
raise IOError(f'Uploaded build {build.id} is {build.attributes.processingState.value.lower()}')
else:
if not is_first_attempt:
self.logger.info(Colors.BLUE('Processing build %s is completed\n'), build.id)
build = build
is_first_attempt = False
raise IOError(f'Waiting for build {build.id} processing timed out in {max_processing_minutes} minutes. You can configure maximum timeout using {PublishArgument.MAX_BUILD_PROCESSING_WAIT.flag} command line option, or {Types.MaxBuildProcessingWait.environment_variable_key} environment variable.')
</DeepExtract>
if app_store_version_info.version_string is None:
self.logger.info("\nVersion string is not specified. Obtain it from build's pre-release version...")
<DeepExtract>
pre_release_version = self._get_related_resource(build_id, Build, PreReleaseVersion, self.api_client.builds.read_pre_release_version, False)
</DeepExtract>
app_store_version_info.version_string = pre_release_version.attributes.version
self.logger.info(Colors.BLUE(f'\nUsing version {app_store_version_info.version_string} for App Store submission'))
<DeepExtract>
app_store_version = self._get_editable_app_store_version(app, app_store_version_info.platform)
if app_store_version is None:
self.logger.info(f'\n{AppStoreVersion} does not exist for build {build.id}')
app_store_version = self.create_app_store_version(build.id, platform=app_store_version_info.platform, copyright=app_store_version_info.copyright, earliest_release_date=app_store_version_info.earliest_release_date, release_type=app_store_version_info.release_type, version_string=app_store_version_info.version_string)
else:
self._update_existing_app_store_version(app_store_version, build, app_store_version_info)
app_store_version = app_store_version
</DeepExtract>
self.echo('')
<DeepExtract>
is_first_app_store_version = self._is_first_app_store_version(app, app_store_version.attributes.platform)
existing_localizations = self._get_existing_app_store_version_localizations(app_store_version)
for localization in app_store_version_localization_infos:
if is_first_app_store_version:
localization.whats_new = None
localization_id = existing_localizations.get(localization.locale or app.attributes.primaryLocale)
try:
self._create_or_update_app_store_version_localization(localization_id, app, app_store_version, localization)
except AppStoreConnectApiError as error:
verb = 'update' if localization_id else 'create new'
message = f'Failed to {verb} {AppStoreVersionLocalization} for locale {localization.locale}:'
self.echo(f'{Colors.YELLOW(message)}\n{error}\n')
</DeepExtract>
<DeepExtract>
self.printer.log_creating(ReviewSubmission, platform=platform, app=app.id)
try:
review_submission = self.api_client.review_submissions.create(platform, app)
except AppStoreConnectApiError as api_error:
existing_submission_error_patt = re.compile('There is another reviewSubmissions with id ([\\w-]+) still in progress')
existing_submission_matches: Iterator[Optional[re.Match]] = (existing_submission_error_patt.search(error.detail) for error in api_error.error_response.errors if error.detail is not None)
try:
existing_submission_match: re.Match = next(filter(bool, existing_submission_matches))
except StopIteration:
raise AppStoreConnectError(str(api_error)) from api_error
self.logger.warning('Review submission already exists, reuse it')
existing_review_submission_id = ResourceId(existing_submission_match.group(1))
review_submission = self.api_client.review_submissions.read(existing_review_submission_id)
self.printer.print_resource(review_submission, True)
if review_submission.created:
self.printer.log_created(review_submission)
self.echo('')
review_submission = review_submission
</DeepExtract>
review_submission_item = self.create_review_submission_item(review_submission_id=review_submission.id, app_store_version_id=app_store_version.id)
self.echo(Colors.BLUE('\nSubmit to App Review\n'))
self.confirm_review_submission(review_submission.id)
submission_url = f'https://appstoreconnect.apple.com/apps/{app.id}/appstore/reviewsubmissions/details/{review_submission.id}'
self.logger.info(f'\nCheck App Store review submission details from\n{submission_url}\n')
return (review_submission, review_submission_item)
|
def _submit_to_app_store(self, build_id: ResourceId, platform: Platform, max_processing_minutes: int, app_store_version_info: AppStoreVersionInfo, app_store_version_localization_infos: List[AppStoreVersionLocalizationInfo], cancel_previous_submissions: bool) -> Tuple[ReviewSubmission, ReviewSubmissionItem]:
self.logger.info(Colors.BLUE(f'\nSubmit build {build_id!r} to App Store review'))
try:
(build, app) = self.api_client.builds.read_with_include(build_id, App)
except AppStoreConnectApiError as api_error:
raise AppStoreConnectError(str(api_error)) from api_error
if cancel_previous_submissions:
self.logger.info(Colors.BLUE('\nCancel previous submissions before creating new submission'))
states_to_cancel = (ReviewSubmissionState.WAITING_FOR_REVIEW, ReviewSubmissionState.IN_REVIEW, ReviewSubmissionState.UNRESOLVED_ISSUES)
cancelled_submissions = self.cancel_review_submissions(application_id=app.id, review_submission_state=states_to_cancel, platform=platform, should_print=False)
if cancelled_submissions:
self._wait_for_cancelled_review_submissions_to_complete(app.id, platform)
if max_processing_minutes:
is_first_attempt = True
start_waiting = time.time()
while time.time() - start_waiting < max_processing_minutes * 60:
if build.attributes.processingState is BuildProcessingState.PROCESSING:
if is_first_attempt:
self._log_build_processing_message(build.id, max_processing_minutes)
msg_template = 'Build %s is still being processed on App Store Connect side, waiting %d seconds and checking again'
self.logger.info(msg_template, build.id, retry_wait_seconds)
time.sleep(retry_wait_seconds)
try:
build = self.api_client.builds.read(build)
except AppStoreConnectApiError as api_error:
raise AppStoreConnectError(str(api_error))
elif build.attributes.processingState in (BuildProcessingState.FAILED, BuildProcessingState.INVALID):
raise IOError(f'Uploaded build {build.id} is {build.attributes.processingState.value.lower()}')
else:
if not is_first_attempt:
self.logger.info(Colors.BLUE('Processing build %s is completed\n'), build.id)
build = build
is_first_attempt = False
raise IOError(f'Waiting for build {build.id} processing timed out in {max_processing_minutes} minutes. You can configure maximum timeout using {PublishArgument.MAX_BUILD_PROCESSING_WAIT.flag} command line option, or {Types.MaxBuildProcessingWait.environment_variable_key} environment variable.')
if app_store_version_info.version_string is None:
self.logger.info("\nVersion string is not specified. Obtain it from build's pre-release version...")
pre_release_version = self._get_related_resource(build_id, Build, PreReleaseVersion, self.api_client.builds.read_pre_release_version, False)
app_store_version_info.version_string = pre_release_version.attributes.version
self.logger.info(Colors.BLUE(f'\nUsing version {app_store_version_info.version_string} for App Store submission'))
app_store_version = self._get_editable_app_store_version(app, app_store_version_info.platform)
if app_store_version is None:
self.logger.info(f'\n{AppStoreVersion} does not exist for build {build.id}')
app_store_version = self.create_app_store_version(build.id, platform=app_store_version_info.platform, copyright=app_store_version_info.copyright, earliest_release_date=app_store_version_info.earliest_release_date, release_type=app_store_version_info.release_type, version_string=app_store_version_info.version_string)
else:
self._update_existing_app_store_version(app_store_version, build, app_store_version_info)
app_store_version = app_store_version
self.echo('')
is_first_app_store_version = self._is_first_app_store_version(app, app_store_version.attributes.platform)
existing_localizations = self._get_existing_app_store_version_localizations(app_store_version)
for localization in app_store_version_localization_infos:
if is_first_app_store_version:
localization.whats_new = None
localization_id = existing_localizations.get(localization.locale or app.attributes.primaryLocale)
try:
self._create_or_update_app_store_version_localization(localization_id, app, app_store_version, localization)
except AppStoreConnectApiError as error:
verb = 'update' if localization_id else 'create new'
message = f'Failed to {verb} {AppStoreVersionLocalization} for locale {localization.locale}:'
self.echo(f'{Colors.YELLOW(message)}\n{error}\n')
self.printer.log_creating(ReviewSubmission, platform=platform, app=app.id)
try:
review_submission = self.api_client.review_submissions.create(platform, app)
except AppStoreConnectApiError as api_error:
existing_submission_error_patt = re.compile('There is another reviewSubmissions with id ([\\w-]+) still in progress')
existing_submission_matches: Iterator[Optional[re.Match]] = (existing_submission_error_patt.search(error.detail) for error in api_error.error_response.errors if error.detail is not None)
try:
existing_submission_match: re.Match = next(filter(bool, existing_submission_matches))
except StopIteration:
raise AppStoreConnectError(str(api_error)) from api_error
self.logger.warning('Review submission already exists, reuse it')
existing_review_submission_id = ResourceId(existing_submission_match.group(1))
review_submission = self.api_client.review_submissions.read(existing_review_submission_id)
self.printer.print_resource(review_submission, True)
if review_submission.created:
self.printer.log_created(review_submission)
self.echo('')
review_submission = review_submission
review_submission_item = self.create_review_submission_item(review_submission_id=review_submission.id, app_store_version_id=app_store_version.id)
self.echo(Colors.BLUE('\nSubmit to App Review\n'))
self.confirm_review_submission(review_submission.id)
submission_url = f'https://appstoreconnect.apple.com/apps/{app.id}/appstore/reviewsubmissions/details/{review_submission.id}'
self.logger.info(f'\nCheck App Store review submission details from\n{submission_url}\n')
return (review_submission, review_submission_item)
|
cli-tools
|
positive
|
def prepare_kallisto_index(transcriptome_fasta, org_build):
<DeepExtract>
def is_exe(fpath):
kallisto = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
(fpath, fname) = os.path.split('kallisto')
if fpath:
if is_exe('kallisto'):
kallisto = 'kallisto'
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, 'kallisto')
if is_exe(exe_file):
kallisto = exe_file
kallisto = None
</DeepExtract>
if not kallisto:
return None
base_dir = os.path.abspath(os.path.dirname(transcriptome_fasta))
kallisto_dir = os.path.join(base_dir, 'kallisto')
safe_makedir(kallisto_dir)
kallisto_index = os.path.join(kallisto_dir, org_build)
if not os.path.exists(kallisto_index):
cmd = 'kallisto index -i {kallisto_index} {transcriptome_fasta}'
subprocess.check_call(cmd.format(**locals()), shell=True)
return kallisto_index
|
def prepare_kallisto_index(transcriptome_fasta, org_build):
def is_exe(fpath):
kallisto = os.path.isfile(fpath) and os.access(fpath, os.X_OK)
(fpath, fname) = os.path.split('kallisto')
if fpath:
if is_exe('kallisto'):
kallisto = 'kallisto'
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, 'kallisto')
if is_exe(exe_file):
kallisto = exe_file
kallisto = None
if not kallisto:
return None
base_dir = os.path.abspath(os.path.dirname(transcriptome_fasta))
kallisto_dir = os.path.join(base_dir, 'kallisto')
safe_makedir(kallisto_dir)
kallisto_index = os.path.join(kallisto_dir, org_build)
if not os.path.exists(kallisto_index):
cmd = 'kallisto index -i {kallisto_index} {transcriptome_fasta}'
subprocess.check_call(cmd.format(**locals()), shell=True)
return kallisto_index
|
cloudbiolinux
|
positive
|
def print_all(self):
"""
print both extract and sort channels
"""
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DO_EXTRACT_STR]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DO_EXTRACT_STR)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DO_SORT_STR_POS]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DO_SORT_STR_POS)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DO_SORT_STR_NEG]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DO_SORT_STR_NEG)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[SORT_MANUAL_STR_POS]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(SORT_MANUAL_STR_POS)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[SORT_MANUAL_STR_NEG]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(SORT_MANUAL_STR_NEG)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DROP_STR_POS]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DROP_STR_POS)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
<DeepExtract>
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DROP_STR_NEG]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DROP_STR_NEG)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
|
def print_all(self):
"""
print both extract and sort channels
"""
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DO_EXTRACT_STR]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DO_EXTRACT_STR)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DO_SORT_STR_POS]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DO_SORT_STR_POS)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DO_SORT_STR_NEG]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DO_SORT_STR_NEG)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[SORT_MANUAL_STR_POS]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(SORT_MANUAL_STR_POS)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[SORT_MANUAL_STR_NEG]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(SORT_MANUAL_STR_NEG)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DROP_STR_POS]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DROP_STR_POS)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
infixes = {DO_EXTRACT_STR: 'extract', DO_SORT_STR_POS: 'sort_pos', DO_SORT_STR_NEG: 'sort_neg', SORT_MANUAL_STR_POS: 'manual_pos', SORT_MANUAL_STR_NEG: 'manual_neg', DROP_STR_POS: 'drop_pos', DROP_STR_NEG: 'drop_neg'}
infix = infixes[DROP_STR_NEG]
if self.label is not None:
infix += '_' + self.label
chans = self.channelmodel.get_channels(DROP_STR_NEG)
if not chans:
return
out_fname = 'do_' + infix + '.txt'
write = True
if os.path.exists(out_fname):
msgbox = QMessageBox()
msgbox.setText('Overwrite {} ?'.format(out_fname))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
ret = msgbox.exec_()
if ret == QMessageBox.Yes:
print('Overwriting ' + out_fname)
os.rename(out_fname, out_fname + '.bak')
else:
write = False
if write:
with open(out_fname, 'w') as fid:
fid.write('\n'.join(chans))
fid.close()
</DeepExtract>
|
combinato
|
positive
|
def os_flavor(self, pool_image=None):
<DeepExtract>
node_sku_id = self.ui.get_node_sku_id()
</DeepExtract>
if 'windows' in node_sku_id:
self._log.debug('Detected windows for skuId: {}'.format(node_sku_id))
return utils.OperatingSystem.windows
else:
self._log.debug('Detected Linux for skuId: {}'.format(node_sku_id))
return utils.OperatingSystem.linux
|
def os_flavor(self, pool_image=None):
node_sku_id = self.ui.get_node_sku_id()
if 'windows' in node_sku_id:
self._log.debug('Detected windows for skuId: {}'.format(node_sku_id))
return utils.OperatingSystem.windows
else:
self._log.debug('Detected Linux for skuId: {}'.format(node_sku_id))
return utils.OperatingSystem.linux
|
azure-batch-maya
|
positive
|
def create_supplementary_gff(hints_db, in_gtf, genome, annotation_gp=None):
"""
Creates the supplementary GFF which contains exon hints derived from the database as well as non-coding introns
and all exons if annotation_gp is passed.
:param hints_db: path to the hints database
:param in_gtf: GTF file for this genome. If we are not doing this on CGP results, and this is the reference genome,
this will be the annotation GTF.
:param genome: current genome
:param annotation_gp: annotation genePred, if we have one
:return: file path
"""
<DeepExtract>
(speciesnames, seqnames, hints, featuretypes, session) = tools.hintsDatabaseInterface.reflect_hints_db(hints_db)
hints_file = tools.fileOps.get_tmp_file()
if tools.hintsDatabaseInterface.genome_has_no_wiggle_hints(hints_db, genome):
hints = []
with open(hints_file, 'w') as outf_h:
wiggle_iter = tools.hintsDatabaseInterface.get_wiggle_hints(genome, speciesnames, seqnames, hints, session)
for (seqname, start, end, score) in wiggle_iter:
outf_h.write('\t'.join(map(str, [seqname, start, end, score])) + '\n')
merged_hints_file = tools.fileOps.get_tmp_file()
cmd = ['bedtools', 'merge', '-i', hints_file, '-c', '4', '-o', 'mean']
tools.procOps.run_proc(cmd, stdout=merged_hints_file, stderr='/dev/null')
tmp_bed = tools.fileOps.get_tmp_file()
cmd = [['grep', '-P', '(\texon\t|\tCDS\t)', in_gtf], ['cut', '-d', '\t', '-f', '1,4,5']]
tools.procOps.run_proc(cmd, stdout=tmp_bed)
tools.procOps.run_proc(['bedSort', tmp_bed, tmp_bed])
cmd = [['bedtools', 'intersect', '-a', tmp_bed, '-b', merged_hints_file, '-f', '0.8', '-wa', '-wb'], ['cut', '-d', '\t', '-f', '1,2,3,7']]
bed_plus_1 = tools.procOps.call_proc_lines(cmd)
hints = []
for line in bed_plus_1:
(chrom, start, end, score) = line.split()
tags = 'pri=3;source=E;mult={}'.format(int(round(float(score))))
hints.append([chrom, 'tmp', 'exon', start, end, '.', '.', '.', tags])
os.remove(hints_file)
os.remove(merged_hints_file)
hints = hints
</DeepExtract>
if annotation_gp is not None:
hints.extend(extract_exons_non_coding_introns(annotation_gp))
tmp_path = tools.fileOps.get_tmp_file()
with open(tmp_path, 'w') as outf:
tools.fileOps.print_rows(outf, hints)
cmd = [['sort', '-n', '-k4,4', tmp_path], ['sort', '-s', '-n', '-k5,5'], ['sort', '-s', '-k3,3'], ['sort', '-s', '-k1,1'], ['join_mult_hints.pl']]
supplementary_gff_path = tools.fileOps.get_tmp_file(suffix='gff')
tools.procOps.run_proc(cmd, stdout=supplementary_gff_path)
os.remove(tmp_path)
return supplementary_gff_path
|
def create_supplementary_gff(hints_db, in_gtf, genome, annotation_gp=None):
"""
Creates the supplementary GFF which contains exon hints derived from the database as well as non-coding introns
and all exons if annotation_gp is passed.
:param hints_db: path to the hints database
:param in_gtf: GTF file for this genome. If we are not doing this on CGP results, and this is the reference genome,
this will be the annotation GTF.
:param genome: current genome
:param annotation_gp: annotation genePred, if we have one
:return: file path
"""
(speciesnames, seqnames, hints, featuretypes, session) = tools.hintsDatabaseInterface.reflect_hints_db(hints_db)
hints_file = tools.fileOps.get_tmp_file()
if tools.hintsDatabaseInterface.genome_has_no_wiggle_hints(hints_db, genome):
hints = []
with open(hints_file, 'w') as outf_h:
wiggle_iter = tools.hintsDatabaseInterface.get_wiggle_hints(genome, speciesnames, seqnames, hints, session)
for (seqname, start, end, score) in wiggle_iter:
outf_h.write('\t'.join(map(str, [seqname, start, end, score])) + '\n')
merged_hints_file = tools.fileOps.get_tmp_file()
cmd = ['bedtools', 'merge', '-i', hints_file, '-c', '4', '-o', 'mean']
tools.procOps.run_proc(cmd, stdout=merged_hints_file, stderr='/dev/null')
tmp_bed = tools.fileOps.get_tmp_file()
cmd = [['grep', '-P', '(\texon\t|\tCDS\t)', in_gtf], ['cut', '-d', '\t', '-f', '1,4,5']]
tools.procOps.run_proc(cmd, stdout=tmp_bed)
tools.procOps.run_proc(['bedSort', tmp_bed, tmp_bed])
cmd = [['bedtools', 'intersect', '-a', tmp_bed, '-b', merged_hints_file, '-f', '0.8', '-wa', '-wb'], ['cut', '-d', '\t', '-f', '1,2,3,7']]
bed_plus_1 = tools.procOps.call_proc_lines(cmd)
hints = []
for line in bed_plus_1:
(chrom, start, end, score) = line.split()
tags = 'pri=3;source=E;mult={}'.format(int(round(float(score))))
hints.append([chrom, 'tmp', 'exon', start, end, '.', '.', '.', tags])
os.remove(hints_file)
os.remove(merged_hints_file)
hints = hints
if annotation_gp is not None:
hints.extend(extract_exons_non_coding_introns(annotation_gp))
tmp_path = tools.fileOps.get_tmp_file()
with open(tmp_path, 'w') as outf:
tools.fileOps.print_rows(outf, hints)
cmd = [['sort', '-n', '-k4,4', tmp_path], ['sort', '-s', '-n', '-k5,5'], ['sort', '-s', '-k3,3'], ['sort', '-s', '-k1,1'], ['join_mult_hints.pl']]
supplementary_gff_path = tools.fileOps.get_tmp_file(suffix='gff')
tools.procOps.run_proc(cmd, stdout=supplementary_gff_path)
os.remove(tmp_path)
return supplementary_gff_path
|
Comparative-Annotation-Toolkit
|
positive
|
def __init__(self, bottom_up, in_features, out_channels, num_top_levels, num_repeats, norm=''):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
num_top_levels (int): the number of the top levels (p6 or p7).
num_repeats (int): the number of repeats of BiFPN.
norm (str): the normalization to use.
"""
super(BiFPN, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = BackboneWithTopLevels(bottom_up, out_channels, num_top_levels, norm)
bottom_up_output_shapes = self.bottom_up.output_shape()
in_features = sorted(in_features, key=lambda x: split_name(x)[1])
self._size_divisibility = bottom_up_output_shapes[in_features[-1]].stride
self.out_channels = out_channels
self.min_level = split_name(in_features[0])[1]
<DeepExtract>
for (i, c) in enumerate(in_features[-1]):
if not c.isalpha():
(prefix, last_suffix) = (in_features[-1][:i], int(in_features[-1][i:]))
raise ValueError()
</DeepExtract>
for i in range(num_top_levels):
in_features.append(prefix + str(last_suffix + i + 1))
self.in_features = in_features
self._out_features = ['p{}'.format(split_name(name)[1]) for name in in_features]
self._out_feature_strides = {out_name: bottom_up_output_shapes[in_name].stride for (out_name, in_name) in zip(self._out_features, in_features)}
self._out_feature_channels = {k: out_channels for k in self._out_features}
self.repeated_bifpn = nn.ModuleList()
for i in range(num_repeats):
if i == 0:
in_channels_list = [bottom_up_output_shapes[name].channels for name in in_features]
else:
in_channels_list = [self._out_feature_channels[name] for name in self._out_features]
self.repeated_bifpn.append(SingleBiFPN(in_channels_list, out_channels, norm))
|
def __init__(self, bottom_up, in_features, out_channels, num_top_levels, num_repeats, norm=''):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
num_top_levels (int): the number of the top levels (p6 or p7).
num_repeats (int): the number of repeats of BiFPN.
norm (str): the normalization to use.
"""
super(BiFPN, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = BackboneWithTopLevels(bottom_up, out_channels, num_top_levels, norm)
bottom_up_output_shapes = self.bottom_up.output_shape()
in_features = sorted(in_features, key=lambda x: split_name(x)[1])
self._size_divisibility = bottom_up_output_shapes[in_features[-1]].stride
self.out_channels = out_channels
self.min_level = split_name(in_features[0])[1]
for (i, c) in enumerate(in_features[-1]):
if not c.isalpha():
(prefix, last_suffix) = (in_features[-1][:i], int(in_features[-1][i:]))
raise ValueError()
for i in range(num_top_levels):
in_features.append(prefix + str(last_suffix + i + 1))
self.in_features = in_features
self._out_features = ['p{}'.format(split_name(name)[1]) for name in in_features]
self._out_feature_strides = {out_name: bottom_up_output_shapes[in_name].stride for (out_name, in_name) in zip(self._out_features, in_features)}
self._out_feature_channels = {k: out_channels for k in self._out_features}
self.repeated_bifpn = nn.ModuleList()
for i in range(num_repeats):
if i == 0:
in_channels_list = [bottom_up_output_shapes[name].channels for name in in_features]
else:
in_channels_list = [self._out_feature_channels[name] for name in self._out_features]
self.repeated_bifpn.append(SingleBiFPN(in_channels_list, out_channels, norm))
|
AdelaiDet
|
positive
|
def get_diffraction_image(coordinates, species, probe, x, wavelength, precession, GPU=True, pointwise=False, **kwargs):
"""
Return kinematically simulated diffraction pattern
Parameters
----------
coordinates : `numpy.ndarray` [`float`], (n_atoms, 3)
List of atomic coordinates
species : `numpy.ndarray` [`int`], (n_atoms,)
List of atomic numbers
probe : `diffsims.ProbeFunction`
Function representing 3D shape of beam
x : `list` [`numpy.ndarray` [`float`] ], of shapes [(nx,), (ny,), (nz,)]
Mesh on which to compute the volume density
wavelength : `float`
Wavelength of electron beam
precession : a pair (`float`, `int`)
The float dictates the angle of precession and the int how many points are
used to discretise the integration.
dtype : (`str`, `str`)
tuple of floating/complex datatypes to cast outputs to
ZERO : `float` > 0, optional
Rounding error permitted in computation of atomic density. This value is
the smallest value rounded to 0.
GPU : `bool`, optional
Flag whether to use GPU or CPU discretisation. Default (if available) is True
pointwise : `bool`, optional
Optional parameter whether atomic intensities are computed point-wise at
the centre of a voxel or an integral over the voxel. default=False
Returns
-------
DP : `numpy.ndarray` [`dtype[0]`], (nx, ny, nz)
The two-dimensional diffraction pattern evaluated on the reciprocal grid
corresponding to the first two vectors of `x`.
"""
FTYPE = kwargs['dtype'][0]
kwargs['GPU'] = GPU
kwargs['pointwise'] = pointwise
x = [X.astype(FTYPE, copy=False) for X in x]
y = to_recip(x)
if wavelength == 0:
p = probe(x).mean(-1)
vol = get_discretisation(coordinates, species, x[:2], **kwargs)[..., 0]
ft = get_DFT(x[:-1], y[:-1])[0]
else:
p = probe(x)
vol = get_discretisation(coordinates, species, x, **kwargs)
ft = get_DFT(x, y)[0]
if precession[0] == 0:
arr = ft(vol * p)
arr = fast_abs(arr, arr).real ** 2
if wavelength == 0:
return normalise(arr)
else:
return normalise(grid2sphere(arr, y, None, 2 * pi / wavelength))
R = [precess_mat(precession[0], i * 360 / precession[1]) for i in range(precession[1])]
if wavelength == 0:
return normalise(sum((get_diffraction_image(coordinates.dot(r), species, probe, x, wavelength, (0, 1), **kwargs) for r in R)))
fftshift_phase(vol)
buf = empty(vol.shape, dtype=FTYPE)
(ft, buf) = plan_fft(buf, overwrite=True, planner=1)
DP = None
for r in R:
probe(to_mesh(x, r.T, dtype=FTYPE), out=buf, scale=vol)
newFT = ft()
newFT = fast_abs(newFT, buf).real
newFT *= newFT
<DeepExtract>
if 2 * pi / wavelength in (None, 0) or y[2].size == 1:
if newFT.real.ndim == 2:
newFT = newFT.real
elif newFT.real.shape[2] == 1:
newFT = newFT.real[:, :, 0]
y = to_mesh((y[0], y[1], array([0])), list(r)).reshape(-1, 3)
if 2 * pi / wavelength is not None:
w = 1 / (1 + (y ** 2).sum(-1) / 2 * pi / wavelength ** 2)
y *= w[:, None]
if list(r) is None:
y[:, 2] = 2 * pi / wavelength * (1 - w)
else:
y += 2 * pi / wavelength * (1 - w)[:, None] * list(r)[2]
out = interpn(y, newFT.real, y, method='linear', bounds_error=False, fill_value=0)
newFT = out.reshape(y[0].size, y[1].size)
</DeepExtract>
if DP is None:
DP = newFT
else:
DP += newFT
return normalise(DP.astype(FTYPE, copy=False))
|
def get_diffraction_image(coordinates, species, probe, x, wavelength, precession, GPU=True, pointwise=False, **kwargs):
"""
Return kinematically simulated diffraction pattern
Parameters
----------
coordinates : `numpy.ndarray` [`float`], (n_atoms, 3)
List of atomic coordinates
species : `numpy.ndarray` [`int`], (n_atoms,)
List of atomic numbers
probe : `diffsims.ProbeFunction`
Function representing 3D shape of beam
x : `list` [`numpy.ndarray` [`float`] ], of shapes [(nx,), (ny,), (nz,)]
Mesh on which to compute the volume density
wavelength : `float`
Wavelength of electron beam
precession : a pair (`float`, `int`)
The float dictates the angle of precession and the int how many points are
used to discretise the integration.
dtype : (`str`, `str`)
tuple of floating/complex datatypes to cast outputs to
ZERO : `float` > 0, optional
Rounding error permitted in computation of atomic density. This value is
the smallest value rounded to 0.
GPU : `bool`, optional
Flag whether to use GPU or CPU discretisation. Default (if available) is True
pointwise : `bool`, optional
Optional parameter whether atomic intensities are computed point-wise at
the centre of a voxel or an integral over the voxel. default=False
Returns
-------
DP : `numpy.ndarray` [`dtype[0]`], (nx, ny, nz)
The two-dimensional diffraction pattern evaluated on the reciprocal grid
corresponding to the first two vectors of `x`.
"""
FTYPE = kwargs['dtype'][0]
kwargs['GPU'] = GPU
kwargs['pointwise'] = pointwise
x = [X.astype(FTYPE, copy=False) for X in x]
y = to_recip(x)
if wavelength == 0:
p = probe(x).mean(-1)
vol = get_discretisation(coordinates, species, x[:2], **kwargs)[..., 0]
ft = get_DFT(x[:-1], y[:-1])[0]
else:
p = probe(x)
vol = get_discretisation(coordinates, species, x, **kwargs)
ft = get_DFT(x, y)[0]
if precession[0] == 0:
arr = ft(vol * p)
arr = fast_abs(arr, arr).real ** 2
if wavelength == 0:
return normalise(arr)
else:
return normalise(grid2sphere(arr, y, None, 2 * pi / wavelength))
R = [precess_mat(precession[0], i * 360 / precession[1]) for i in range(precession[1])]
if wavelength == 0:
return normalise(sum((get_diffraction_image(coordinates.dot(r), species, probe, x, wavelength, (0, 1), **kwargs) for r in R)))
fftshift_phase(vol)
buf = empty(vol.shape, dtype=FTYPE)
(ft, buf) = plan_fft(buf, overwrite=True, planner=1)
DP = None
for r in R:
probe(to_mesh(x, r.T, dtype=FTYPE), out=buf, scale=vol)
newFT = ft()
newFT = fast_abs(newFT, buf).real
newFT *= newFT
if 2 * pi / wavelength in (None, 0) or y[2].size == 1:
if newFT.real.ndim == 2:
newFT = newFT.real
elif newFT.real.shape[2] == 1:
newFT = newFT.real[:, :, 0]
y = to_mesh((y[0], y[1], array([0])), list(r)).reshape(-1, 3)
if 2 * pi / wavelength is not None:
w = 1 / (1 + (y ** 2).sum(-1) / 2 * pi / wavelength ** 2)
y *= w[:, None]
if list(r) is None:
y[:, 2] = 2 * pi / wavelength * (1 - w)
else:
y += 2 * pi / wavelength * (1 - w)[:, None] * list(r)[2]
out = interpn(y, newFT.real, y, method='linear', bounds_error=False, fill_value=0)
newFT = out.reshape(y[0].size, y[1].size)
if DP is None:
DP = newFT
else:
DP += newFT
return normalise(DP.astype(FTYPE, copy=False))
|
diffsims
|
positive
|
def __init__(self, block, layers, num_classes=1000, intermediate=True):
self.inplanes = 64
self.intermediate = intermediate
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
<DeepExtract>
downsample = None
if stride != 1 or self.inplanes != 64 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion))
layers = []
layers.append(block(self.inplanes, 64, stride, downsample))
self.inplanes = 64 * block.expansion
for i in range(1, layers[0]):
layers.append(block(self.inplanes, 64))
self.layer1 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 128 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion))
layers = []
layers.append(block(self.inplanes, 128, 2, downsample))
self.inplanes = 128 * block.expansion
for i in range(1, layers[1]):
layers.append(block(self.inplanes, 128))
self.layer2 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 256 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion))
layers = []
layers.append(block(self.inplanes, 256, 2, downsample))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256))
self.layer3 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 512 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion))
layers = []
layers.append(block(self.inplanes, 512, 2, downsample))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512))
self.layer4 = nn.Sequential(*layers)
</DeepExtract>
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.features = 512 * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
def __init__(self, block, layers, num_classes=1000, intermediate=True):
self.inplanes = 64
self.intermediate = intermediate
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
downsample = None
if stride != 1 or self.inplanes != 64 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion))
layers = []
layers.append(block(self.inplanes, 64, stride, downsample))
self.inplanes = 64 * block.expansion
for i in range(1, layers[0]):
layers.append(block(self.inplanes, 64))
self.layer1 = nn.Sequential(*layers)
downsample = None
if 2 != 1 or self.inplanes != 128 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion))
layers = []
layers.append(block(self.inplanes, 128, 2, downsample))
self.inplanes = 128 * block.expansion
for i in range(1, layers[1]):
layers.append(block(self.inplanes, 128))
self.layer2 = nn.Sequential(*layers)
downsample = None
if 2 != 1 or self.inplanes != 256 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion))
layers = []
layers.append(block(self.inplanes, 256, 2, downsample))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256))
self.layer3 = nn.Sequential(*layers)
downsample = None
if 2 != 1 or self.inplanes != 512 * block.expansion:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion))
layers = []
layers.append(block(self.inplanes, 512, 2, downsample))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512))
self.layer4 = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.features = 512 * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
CPF
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.