before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def annotateSenses(self, currentName, newLexiconName):
<DeepExtract>
sql = 'SHOW COLUMNS from %s' % currentName
self.dbCursor.execute(sql)
data = self.dbCursor.fetchall()
if len(data) > 0:
numColumns = len(data)
if numColumns == 3:
weighted = False
elif numColumns == 4:
weighted = True
else:
raise Exception('Incorrect lexicon specified; number of rows in table [%s] is not 3 or 4')
else:
raise Exception('Lexicon table [%s] has no columns' % currentName)
</DeepExtract>
senseLexicon = WeightedLexicon()
try:
senseLexicon.loadLexicon(newLexiconName)
except MySQLdb.Error as e:
print('in except')
createLike = 'CREATE TABLE %s LIKE %s' % (newLexiconName, currentName)
self.dbCursor.execute(createLike)
senseLexicon.loadLexicon(newLexiconName)
oldLexicon = self.currentLexicon
newLexicon = senseLexicon.currentLexicon
seenWords = set()
for (cat, words) in newLexicon.items():
cat = cat.lower()
for ws in words:
if ws:
(lemma, pos, sense, word) = ws.split('.')
seenWords.add(cat + '#' + word)
for (cat, words) in oldLexicon.items():
cat = cat.lower()
for word in words:
if cat + '#' + word in seenWords:
print('already annotated %s: %s (skipping)' % (cat, word))
else:
<DeepExtract>
os.system('clear')
print('\n[%s] \x1b[92m%s\x1b[0m\n%s' % (PERMA_CODES[cat].title(), word, '=' * (len(word) + 20)))
print('\x1b[90m%s\x1b[0m\n' % PERMA_LONG_DEF[cat])
currentSenses = set()
POSs = {wn.NOUN: 'noun', wn.VERB: 'verb', wn.ADJ: 'adjective', wn.ADV: 'adverb'}
for (pos, posName) in POSs.items():
synsets = wn.synsets(word, pos)
if synsets:
print('\t%s:' % posName)
i = 1
wss = [None]
for syns in synsets:
wss.append(syns.name + '.' + word)
print('\t\t\x1b[92m %d: \x1b[0m(%s)\x1b[92m %s\x1b[0m' % (i, ', '.join([lemma.name for lemma in syns.lemmas]), syns.definition))
i += 1
answered = False
senses = None
while not senses:
print('\n\tWhich of the above senses expresses \x1b[1m%s (i.e. %s)\x1b[0m?' % (PERMA_CODES[cat].title(), PERMA_SHORT_DEF[cat]))
senses = input('\t(separate with spaces; 0 => none; cntrl-c to quit)? ')
senses = senses.strip()
if not re.match('^[0-9, ]+$', senses):
print('entered non-numeric character')
senses = None
continue
senses = re.findall('\\d+', senses)
ins = set(range(len(wss)))
for s in senses:
s = int(s)
if s == 0 and len(senses) > 1:
print('entered 0 along with other senses')
senses = None
if s not in ins:
print('%d not a choice' % s)
senses = None
for s in senses:
if s > 0:
ws = wss[int(s)]
print('\t\t\tadding %s' % ws)
currentSenses.add(ws)
print('The following will be added: %s' % currentSenses)
senses = currentSenses
</DeepExtract>
if senses:
if weighted:
smallNewLex = {cat: dict(list(zip(senses, [words[word]] * len(senses))))}
sys.stderr.write('newLexiconName %s \n' % newLexiconName)
<DeepExtract>
if not smallNewLex:
smallNewLex = self.weightedLexicon
sqlQuery = 'INSERT INTO ' + newLexiconName + ' (term, category, weight) values (%s, %s, %s)'
values = []
for cat in smallNewLex:
for term in smallNewLex[cat]:
if self.weightedLexicon[cat][term] != 0:
values.extend([[term, cat.upper(), self.weightedLexicon[cat][term]]])
try:
nbInserted = 0
length = len(values)
chunks = zip(*[iter(values)] * 100)
pprint.pprint(chunks)
for v in chunks:
nbInserted += self.dbCursor.executemany(sqlQuery, v)
remainingValues = values[nbInserted:]
if remainingValues:
nbInserted += self.dbCursor.executemany(sqlQuery, remainingValues)
print('Inserted %d terms into the lexicon' % nbInserted)
if nbInserted != length:
print("Warning the number of rows inserted doesn't match the total number of rows")
except MySQLdb.Error as e:
dlac.warn('MYSQL ERROR:' + str(e) + sqlQuery)
sys.exit(1)
</DeepExtract>
else:
smallNewLex = {cat: frozenset(senses)}
<DeepExtract>
if not smallNewLex:
smallNewLex = self.currentLexicon
sqlQuery = 'INSERT INTO ' + newLexiconName + ' (term, category) values (%s, %s)'
values = []
for (cat, terms) in smallNewLex.items():
values.extend([[term, cat.upper()] for term in terms])
try:
self.dbCursor.executemany(sqlQuery, values)
except MySQLdb.Error as e:
dlac.warn('MYSQL ERROR in insertLexiconRows:' + str(e) + sqlQuery)
sys.exit(1)
</DeepExtract>
|
def annotateSenses(self, currentName, newLexiconName):
sql = 'SHOW COLUMNS from %s' % currentName
self.dbCursor.execute(sql)
data = self.dbCursor.fetchall()
if len(data) > 0:
numColumns = len(data)
if numColumns == 3:
weighted = False
elif numColumns == 4:
weighted = True
else:
raise Exception('Incorrect lexicon specified; number of rows in table [%s] is not 3 or 4')
else:
raise Exception('Lexicon table [%s] has no columns' % currentName)
senseLexicon = WeightedLexicon()
try:
senseLexicon.loadLexicon(newLexiconName)
except MySQLdb.Error as e:
print('in except')
createLike = 'CREATE TABLE %s LIKE %s' % (newLexiconName, currentName)
self.dbCursor.execute(createLike)
senseLexicon.loadLexicon(newLexiconName)
oldLexicon = self.currentLexicon
newLexicon = senseLexicon.currentLexicon
seenWords = set()
for (cat, words) in newLexicon.items():
cat = cat.lower()
for ws in words:
if ws:
(lemma, pos, sense, word) = ws.split('.')
seenWords.add(cat + '#' + word)
for (cat, words) in oldLexicon.items():
cat = cat.lower()
for word in words:
if cat + '#' + word in seenWords:
print('already annotated %s: %s (skipping)' % (cat, word))
else:
os.system('clear')
print('\n[%s] \x1b[92m%s\x1b[0m\n%s' % (PERMA_CODES[cat].title(), word, '=' * (len(word) + 20)))
print('\x1b[90m%s\x1b[0m\n' % PERMA_LONG_DEF[cat])
currentSenses = set()
POSs = {wn.NOUN: 'noun', wn.VERB: 'verb', wn.ADJ: 'adjective', wn.ADV: 'adverb'}
for (pos, posName) in POSs.items():
synsets = wn.synsets(word, pos)
if synsets:
print('\t%s:' % posName)
i = 1
wss = [None]
for syns in synsets:
wss.append(syns.name + '.' + word)
print('\t\t\x1b[92m %d: \x1b[0m(%s)\x1b[92m %s\x1b[0m' % (i, ', '.join([lemma.name for lemma in syns.lemmas]), syns.definition))
i += 1
answered = False
senses = None
while not senses:
print('\n\tWhich of the above senses expresses \x1b[1m%s (i.e. %s)\x1b[0m?' % (PERMA_CODES[cat].title(), PERMA_SHORT_DEF[cat]))
senses = input('\t(separate with spaces; 0 => none; cntrl-c to quit)? ')
senses = senses.strip()
if not re.match('^[0-9, ]+$', senses):
print('entered non-numeric character')
senses = None
continue
senses = re.findall('\\d+', senses)
ins = set(range(len(wss)))
for s in senses:
s = int(s)
if s == 0 and len(senses) > 1:
print('entered 0 along with other senses')
senses = None
if s not in ins:
print('%d not a choice' % s)
senses = None
for s in senses:
if s > 0:
ws = wss[int(s)]
print('\t\t\tadding %s' % ws)
currentSenses.add(ws)
print('The following will be added: %s' % currentSenses)
senses = currentSenses
if senses:
if weighted:
smallNewLex = {cat: dict(list(zip(senses, [words[word]] * len(senses))))}
sys.stderr.write('newLexiconName %s \n' % newLexiconName)
if not smallNewLex:
smallNewLex = self.weightedLexicon
sqlQuery = 'INSERT INTO ' + newLexiconName + ' (term, category, weight) values (%s, %s, %s)'
values = []
for cat in smallNewLex:
for term in smallNewLex[cat]:
if self.weightedLexicon[cat][term] != 0:
values.extend([[term, cat.upper(), self.weightedLexicon[cat][term]]])
try:
nbInserted = 0
length = len(values)
chunks = zip(*[iter(values)] * 100)
pprint.pprint(chunks)
for v in chunks:
nbInserted += self.dbCursor.executemany(sqlQuery, v)
remainingValues = values[nbInserted:]
if remainingValues:
nbInserted += self.dbCursor.executemany(sqlQuery, remainingValues)
print('Inserted %d terms into the lexicon' % nbInserted)
if nbInserted != length:
print("Warning the number of rows inserted doesn't match the total number of rows")
except MySQLdb.Error as e:
dlac.warn('MYSQL ERROR:' + str(e) + sqlQuery)
sys.exit(1)
else:
smallNewLex = {cat: frozenset(senses)}
if not smallNewLex:
smallNewLex = self.currentLexicon
sqlQuery = 'INSERT INTO ' + newLexiconName + ' (term, category) values (%s, %s)'
values = []
for (cat, terms) in smallNewLex.items():
values.extend([[term, cat.upper()] for term in terms])
try:
self.dbCursor.executemany(sqlQuery, values)
except MySQLdb.Error as e:
dlac.warn('MYSQL ERROR in insertLexiconRows:' + str(e) + sqlQuery)
sys.exit(1)
</DeepExtract>
|
dlatk
|
positive
|
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource('s3')
<DeepExtract>
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
(bucket_name, s3_path) = (bucket_name, s3_path)
</DeepExtract>
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource('s3')
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
(bucket_name, s3_path) = (bucket_name, s3_path)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
DeepEventMine
|
positive
|
def test_added_2_7(self):
"""Test for names added in 2.7."""
<DeepExtract>
(before, after) = (up_to_version((2, 7)), from_version((2, 7)))
</DeepExtract>
for (name, suggs) in {'memoryview': [MEMVIEW_ADDED_MSG]}.items():
<DeepExtract>
suggs = sorted(listify(suggs, [], str))
(error_type, error_msg) = NAMEERROR
details = 'Running following code :\n---\n{0}\n---'.format(name)
if PythonEnvRange(before, interpreters).contains_current_env():
exc = get_exception(name)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(name, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, suggs, details)
</DeepExtract>
<DeepExtract>
details = 'Running following code :\n---\n{0}\n---'.format(name)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(name)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
def test_added_2_7(self):
"""Test for names added in 2.7."""
(before, after) = (up_to_version((2, 7)), from_version((2, 7)))
for (name, suggs) in {'memoryview': [MEMVIEW_ADDED_MSG]}.items():
suggs = sorted(listify(suggs, [], str))
(error_type, error_msg) = NAMEERROR
details = 'Running following code :\n---\n{0}\n---'.format(name)
if PythonEnvRange(before, interpreters).contains_current_env():
exc = get_exception(name)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(name, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, suggs, details)
details = 'Running following code :\n---\n{0}\n---'.format(name)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(name)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
DidYouMean-Python
|
positive
|
def resnet_fpn(features, min_level=3, max_level=7, resnet_depth=50, is_training_bn=False, use_nearest_upsampling=True):
"""ResNet feature pyramid networks."""
with tf.variable_scope('resnet%s' % resnet_depth):
<DeepExtract>
model_params = {18: {'block': residual_block, 'layers': [2, 2, 2, 2]}, 34: {'block': residual_block, 'layers': [3, 4, 6, 3]}, 50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}}
if resnet_depth not in model_params:
raise ValueError('Not a valid resnet_depth:', resnet_depth)
params = model_params[resnet_depth]
resnet_fn = resnet_v1_generator(params['block'], params['layers'], data_format)
</DeepExtract>
(u2, u3, u4, u5) = resnet_fn(features, is_training_bn)
feats_bottom_up = {2: u2, 3: u3, 4: u4, 5: u5}
with tf.variable_scope('resnet_fpn'):
feats_lateral = {}
for level in range(min_level, _RESNET_MAX_LEVEL + 1):
feats_lateral[level] = tf.layers.conv2d(feats_bottom_up[level], filters=256, kernel_size=(1, 1), padding='same', name='l%d' % level)
feats = {_RESNET_MAX_LEVEL: feats_lateral[_RESNET_MAX_LEVEL]}
for level in range(_RESNET_MAX_LEVEL - 1, min_level - 1, -1):
if use_nearest_upsampling:
feats[level] = nearest_upsampling(feats[level + 1], 2) + feats_lateral[level]
else:
feats[level] = resize_bilinear(feats[level + 1], tf.shape(feats_lateral[level])[1:3], feats[level + 1].dtype) + feats_lateral[level]
for level in range(min_level, _RESNET_MAX_LEVEL + 1):
feats[level] = tf.layers.conv2d(feats[level], filters=256, strides=(1, 1), kernel_size=(3, 3), padding='same', name='post_hoc_d%d' % level)
for level in range(_RESNET_MAX_LEVEL + 1, max_level + 1):
feats_in = feats[level - 1]
if level > _RESNET_MAX_LEVEL + 1:
feats_in = tf.nn.relu(feats_in)
feats[level] = tf.layers.conv2d(feats_in, filters=256, strides=(2, 2), kernel_size=(3, 3), padding='same', name='p%d' % level)
for level in range(min_level, max_level + 1):
feats[level] = tf.layers.batch_normalization(inputs=feats[level], momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=is_training_bn, fused=True, name='p%d-bn' % level)
return feats
|
def resnet_fpn(features, min_level=3, max_level=7, resnet_depth=50, is_training_bn=False, use_nearest_upsampling=True):
"""ResNet feature pyramid networks."""
with tf.variable_scope('resnet%s' % resnet_depth):
model_params = {18: {'block': residual_block, 'layers': [2, 2, 2, 2]}, 34: {'block': residual_block, 'layers': [3, 4, 6, 3]}, 50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}}
if resnet_depth not in model_params:
raise ValueError('Not a valid resnet_depth:', resnet_depth)
params = model_params[resnet_depth]
resnet_fn = resnet_v1_generator(params['block'], params['layers'], data_format)
(u2, u3, u4, u5) = resnet_fn(features, is_training_bn)
feats_bottom_up = {2: u2, 3: u3, 4: u4, 5: u5}
with tf.variable_scope('resnet_fpn'):
feats_lateral = {}
for level in range(min_level, _RESNET_MAX_LEVEL + 1):
feats_lateral[level] = tf.layers.conv2d(feats_bottom_up[level], filters=256, kernel_size=(1, 1), padding='same', name='l%d' % level)
feats = {_RESNET_MAX_LEVEL: feats_lateral[_RESNET_MAX_LEVEL]}
for level in range(_RESNET_MAX_LEVEL - 1, min_level - 1, -1):
if use_nearest_upsampling:
feats[level] = nearest_upsampling(feats[level + 1], 2) + feats_lateral[level]
else:
feats[level] = resize_bilinear(feats[level + 1], tf.shape(feats_lateral[level])[1:3], feats[level + 1].dtype) + feats_lateral[level]
for level in range(min_level, _RESNET_MAX_LEVEL + 1):
feats[level] = tf.layers.conv2d(feats[level], filters=256, strides=(1, 1), kernel_size=(3, 3), padding='same', name='post_hoc_d%d' % level)
for level in range(_RESNET_MAX_LEVEL + 1, max_level + 1):
feats_in = feats[level - 1]
if level > _RESNET_MAX_LEVEL + 1:
feats_in = tf.nn.relu(feats_in)
feats[level] = tf.layers.conv2d(feats_in, filters=256, strides=(2, 2), kernel_size=(3, 3), padding='same', name='p%d' % level)
for level in range(min_level, max_level + 1):
feats[level] = tf.layers.batch_normalization(inputs=feats[level], momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=is_training_bn, fused=True, name='p%d-bn' % level)
return feats
|
class-balanced-loss
|
positive
|
def update_observed_submodel(self):
"""
Method. Extract the sub-model of observed variables.
Args: None.
Returns:
None.
"""
<DeepExtract>
pass
</DeepExtract>
observed_variables = [var for var in flattened_model if var.is_observed]
self.observed_submodel = ProbabilisticModel(observed_variables, is_fully_observed=True)
|
def update_observed_submodel(self):
"""
Method. Extract the sub-model of observed variables.
Args: None.
Returns:
None.
"""
pass
observed_variables = [var for var in flattened_model if var.is_observed]
self.observed_submodel = ProbabilisticModel(observed_variables, is_fully_observed=True)
|
Brancher
|
positive
|
def absorbance_mode_params(self, wells: Union[List[Well], WellGroup], wavelength: WAVELENGTH, num_flashes: Optional[int]=None, settle_time: Optional[TIME]=None, read_position: Optional[SpectrophotometryBuildersReadPositions]=None, position_z: Optional[dict]=None):
"""
Parameters
----------
wells : iterable(Well) or WellGroup
Wells to be read.
wavelength : Unit or str
The wavelengths at which to make absorbance measurements.
num_flashes : int, optional
The number of discrete reads to be taken and then averaged.
settle_time : Unit or str, optional
The time to wait between moving to a well and reading it.
read_position: Enum("top", "bottom"), optional
The position of the probe relative to the plate for the read
position_z: dict, optional
This should be specified with either `position_z_manual` or
`position_z_calculated`
Returns
-------
dict
Formatted mode_params for an absorbance mode.
Raises
------
TypeError
Invalid type specified for input parameters, e.g. `num_flashes`
not of type int
ValueError
Invalid wells specified
"""
if not is_valid_well(wells):
raise ValueError(f'Invalid wells {wells}, must be an iterable of wells or a WellGroup.')
if isinstance(wells, Well):
wells = WellGroup([wells])
if not isinstance(wavelength, list):
wavelength = [wavelength]
wavelength = [parse_unit(_, 'nanometer') for _ in wavelength]
if num_flashes is not None and (not isinstance(num_flashes, int)):
raise TypeError(f'Invalid num_flashes {num_flashes}, must be an int')
if settle_time is not None:
settle_time = parse_unit(settle_time, 'second')
if read_position is not None and read_position not in self.READ_POSITIONS:
raise ValueError(f'Invalid read_position {read_position}, must be in {self.READ_POSITIONS}.')
if position_z is not None:
<DeepExtract>
suggested_msg = 'Please use either `position_z_manual` or `position_z_calculated` functions to construct the appropriate z-position.'
if not isinstance(position_z, dict):
raise TypeError(f'Invalid position_z {position_z}, must be a dict. {suggested_msg}')
if 'calculated_from_wells' in position_z:
position_z = self.position_z_calculated(**position_z['calculated_from_wells'])
elif 'manual' in position_z:
position_z = self.position_z_manual(**position_z['manual'])
else:
raise ValueError(f'Invalid position_z {position_z} specified. {suggested_msg}')
</DeepExtract>
mode_params = {'wells': wells, 'wavelength': wavelength, 'num_flashes': num_flashes, 'settle_time': settle_time, 'read_position': read_position, 'position_z': position_z}
mode_params = {k: v for (k, v) in mode_params.items() if v is not None}
return mode_params
|
def absorbance_mode_params(self, wells: Union[List[Well], WellGroup], wavelength: WAVELENGTH, num_flashes: Optional[int]=None, settle_time: Optional[TIME]=None, read_position: Optional[SpectrophotometryBuildersReadPositions]=None, position_z: Optional[dict]=None):
"""
Parameters
----------
wells : iterable(Well) or WellGroup
Wells to be read.
wavelength : Unit or str
The wavelengths at which to make absorbance measurements.
num_flashes : int, optional
The number of discrete reads to be taken and then averaged.
settle_time : Unit or str, optional
The time to wait between moving to a well and reading it.
read_position: Enum("top", "bottom"), optional
The position of the probe relative to the plate for the read
position_z: dict, optional
This should be specified with either `position_z_manual` or
`position_z_calculated`
Returns
-------
dict
Formatted mode_params for an absorbance mode.
Raises
------
TypeError
Invalid type specified for input parameters, e.g. `num_flashes`
not of type int
ValueError
Invalid wells specified
"""
if not is_valid_well(wells):
raise ValueError(f'Invalid wells {wells}, must be an iterable of wells or a WellGroup.')
if isinstance(wells, Well):
wells = WellGroup([wells])
if not isinstance(wavelength, list):
wavelength = [wavelength]
wavelength = [parse_unit(_, 'nanometer') for _ in wavelength]
if num_flashes is not None and (not isinstance(num_flashes, int)):
raise TypeError(f'Invalid num_flashes {num_flashes}, must be an int')
if settle_time is not None:
settle_time = parse_unit(settle_time, 'second')
if read_position is not None and read_position not in self.READ_POSITIONS:
raise ValueError(f'Invalid read_position {read_position}, must be in {self.READ_POSITIONS}.')
if position_z is not None:
suggested_msg = 'Please use either `position_z_manual` or `position_z_calculated` functions to construct the appropriate z-position.'
if not isinstance(position_z, dict):
raise TypeError(f'Invalid position_z {position_z}, must be a dict. {suggested_msg}')
if 'calculated_from_wells' in position_z:
position_z = self.position_z_calculated(**position_z['calculated_from_wells'])
elif 'manual' in position_z:
position_z = self.position_z_manual(**position_z['manual'])
else:
raise ValueError(f'Invalid position_z {position_z} specified. {suggested_msg}')
mode_params = {'wells': wells, 'wavelength': wavelength, 'num_flashes': num_flashes, 'settle_time': settle_time, 'read_position': read_position, 'position_z': position_z}
mode_params = {k: v for (k, v) in mode_params.items() if v is not None}
return mode_params
|
autoprotocol-python
|
positive
|
def gtoc_archive_add_many(self, archives: List[GtocArchiveEntry]):
a: GtocArchiveEntry
entries = [(a.src_uid, a.path_hash32, a.archive_magic) for a in archives]
self.db_execute_many('INSERT OR IGNORE INTO core_gtoc_archive_def VALUES (?,?,?)', entries, dbg='gtoc_archive_add_many:0:insert')
self.db_conn.commit()
def_row_ids = []
for entry in entries:
result = self.db_query_all('SELECT rowid FROM core_gtoc_archive_def WHERE (node_id_src=(?)) AND (path_hash32=(?)) AND (archive_magic=(?))', entry, dbg='gtoc_archive_add_many:1:select')
assert len(result) == 1
def_row_ids.append(result[0][0])
file_entry_strings = set()
a: GtocArchiveEntry
for a in archives:
fe: GtocFileEntry
for fe in a.file_entries:
file_entry_strings.add(fe.path)
hash_list = [make_hash_string_tuple(s) for s in file_entry_strings]
<DeepExtract>
hash_list_str = [(to_str(h[0]), h[1], h[2], h[3], h[4]) for h in hash_list]
hash_list_str_unique = list(set(hash_list_str))
self.db_execute_many('INSERT OR IGNORE INTO core_strings VALUES (?,?,?,?,?)', hash_list_str_unique, dbg='hash_string_add_many_basic:0:insert')
self.db_conn.commit()
self.db_changed_signal.call()
hash_list_map = {}
str_to_row_map = {}
for rec in hash_list_str_unique:
result = self.db_query_all('SELECT rowid FROM core_strings WHERE string=(?) and hash32=(?) and hash48=(?) and hash64=(?) and ext_hash32=(?)', rec, dbg='hash_string_add_many_basic:1:select')
assert len(result) == 1
row_id = result[0][0]
hash_list_map[rec] = row_id
str_to_row_map[to_bytes(rec[0])] = row_id
(_, _, str_to_row_map) = (hash_list_str, hash_list_map, str_to_row_map)
</DeepExtract>
all_file_entries = []
archive: GtocArchiveEntry
for (def_row_id, archive) in zip(def_row_ids, archives):
file_entry: GtocFileEntry
for (i, file_entry) in enumerate(archive.file_entries):
offset_in_archive = file_entry.offset_in_archive
file_size = file_entry.file_size
path = file_entry.path
path_row_id = str_to_row_map[path]
if offset_in_archive == 0:
offset_in_archive = None
all_file_entries.append((def_row_id, i, offset_in_archive, file_size, path_row_id))
self.db_execute_many('INSERT OR IGNORE INTO core_gtoc_file_entry VALUES (?,?,?,?,?)', all_file_entries, dbg='gtoc_archive_add_many:2:insert')
self.db_conn.commit()
self.db_changed_signal.call()
|
def gtoc_archive_add_many(self, archives: List[GtocArchiveEntry]):
a: GtocArchiveEntry
entries = [(a.src_uid, a.path_hash32, a.archive_magic) for a in archives]
self.db_execute_many('INSERT OR IGNORE INTO core_gtoc_archive_def VALUES (?,?,?)', entries, dbg='gtoc_archive_add_many:0:insert')
self.db_conn.commit()
def_row_ids = []
for entry in entries:
result = self.db_query_all('SELECT rowid FROM core_gtoc_archive_def WHERE (node_id_src=(?)) AND (path_hash32=(?)) AND (archive_magic=(?))', entry, dbg='gtoc_archive_add_many:1:select')
assert len(result) == 1
def_row_ids.append(result[0][0])
file_entry_strings = set()
a: GtocArchiveEntry
for a in archives:
fe: GtocFileEntry
for fe in a.file_entries:
file_entry_strings.add(fe.path)
hash_list = [make_hash_string_tuple(s) for s in file_entry_strings]
hash_list_str = [(to_str(h[0]), h[1], h[2], h[3], h[4]) for h in hash_list]
hash_list_str_unique = list(set(hash_list_str))
self.db_execute_many('INSERT OR IGNORE INTO core_strings VALUES (?,?,?,?,?)', hash_list_str_unique, dbg='hash_string_add_many_basic:0:insert')
self.db_conn.commit()
self.db_changed_signal.call()
hash_list_map = {}
str_to_row_map = {}
for rec in hash_list_str_unique:
result = self.db_query_all('SELECT rowid FROM core_strings WHERE string=(?) and hash32=(?) and hash48=(?) and hash64=(?) and ext_hash32=(?)', rec, dbg='hash_string_add_many_basic:1:select')
assert len(result) == 1
row_id = result[0][0]
hash_list_map[rec] = row_id
str_to_row_map[to_bytes(rec[0])] = row_id
(_, _, str_to_row_map) = (hash_list_str, hash_list_map, str_to_row_map)
all_file_entries = []
archive: GtocArchiveEntry
for (def_row_id, archive) in zip(def_row_ids, archives):
file_entry: GtocFileEntry
for (i, file_entry) in enumerate(archive.file_entries):
offset_in_archive = file_entry.offset_in_archive
file_size = file_entry.file_size
path = file_entry.path
path_row_id = str_to_row_map[path]
if offset_in_archive == 0:
offset_in_archive = None
all_file_entries.append((def_row_id, i, offset_in_archive, file_size, path_row_id))
self.db_execute_many('INSERT OR IGNORE INTO core_gtoc_file_entry VALUES (?,?,?,?,?)', all_file_entries, dbg='gtoc_archive_add_many:2:insert')
self.db_conn.commit()
self.db_changed_signal.call()
|
deca
|
positive
|
def run(args):
args = candle.ArgumentStruct(**args)
args.cuda = torch.cuda.is_available()
args.device = torch.device('cuda' if args.cuda else 'cpu')
<DeepExtract>
datapath = fetch_data(args)
train_data = P3B3(datapath, 'train')
valid_data = P3B3(datapath, 'test')
(train_data, valid_data) = (train_data, valid_data)
</DeepExtract>
hparams = Hparams(kernel1=args.kernel1, kernel2=args.kernel2, kernel3=args.kernel3, embed_dim=args.embed_dim, n_filters=args.n_filters)
train_loader = DataLoader(train_data, batch_size=args.batch_size)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size)
model = MTCNN(TASKS, hparams).to(args.device)
model = create_prune_masks(model)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, eps=args.eps)
train_epoch_loss = []
valid_epoch_loss = []
for epoch in range(args.epochs):
<DeepExtract>
accmeter = AccuracyMeter(TASKS, train_loader)
total_loss = 0
for (idx, (data, target)) in enumerate(train_loader):
optimizer.zero_grad()
(data, target) = (data.to(args.device), to_device(target, args.device))
logits = model(data)
_ = TRAIN_F1_MICRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
_ = TRAIN_F1_MACRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
loss = model.loss_value(logits, target, reduce='mean')
loss.backward()
optimizer.step()
total_loss += loss.item()
accmeter.update(logits, target)
avg_loss = total_loss / len(train_loader.dataset)
accmeter.update_accuracy()
print(f'\nEpoch {epoch} Training Accuracy:')
accmeter.print_task_accuracies()
accmeter.reset()
train_loss = avg_loss
</DeepExtract>
<DeepExtract>
accmeter = AccuracyMeter(TASKS, valid_loader)
loss = 0
model.eval()
with torch.no_grad():
for (idx, (data, target)) in enumerate(valid_loader):
(data, target) = (data.to(args.device), to_device(target, args.device))
logits = model(data)
_ = VALID_F1_MICRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
_ = VALID_F1_MACRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
loss += model.loss_value(logits, target, reduce='mean').item()
accmeter.update(logits, target)
accmeter.update_accuracy()
print('Validation accuracy:')
accmeter.print_task_accuracies()
loss /= len(valid_loader.dataset)
valid_loss = loss
</DeepExtract>
train_epoch_loss.append(train_loss)
valid_epoch_loss.append(valid_loss)
model = remove_prune_masks(model)
|
def run(args):
args = candle.ArgumentStruct(**args)
args.cuda = torch.cuda.is_available()
args.device = torch.device('cuda' if args.cuda else 'cpu')
datapath = fetch_data(args)
train_data = P3B3(datapath, 'train')
valid_data = P3B3(datapath, 'test')
(train_data, valid_data) = (train_data, valid_data)
hparams = Hparams(kernel1=args.kernel1, kernel2=args.kernel2, kernel3=args.kernel3, embed_dim=args.embed_dim, n_filters=args.n_filters)
train_loader = DataLoader(train_data, batch_size=args.batch_size)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size)
model = MTCNN(TASKS, hparams).to(args.device)
model = create_prune_masks(model)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, eps=args.eps)
train_epoch_loss = []
valid_epoch_loss = []
for epoch in range(args.epochs):
accmeter = AccuracyMeter(TASKS, train_loader)
total_loss = 0
for (idx, (data, target)) in enumerate(train_loader):
optimizer.zero_grad()
(data, target) = (data.to(args.device), to_device(target, args.device))
logits = model(data)
_ = TRAIN_F1_MICRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
_ = TRAIN_F1_MACRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
loss = model.loss_value(logits, target, reduce='mean')
loss.backward()
optimizer.step()
total_loss += loss.item()
accmeter.update(logits, target)
avg_loss = total_loss / len(train_loader.dataset)
accmeter.update_accuracy()
print(f'\nEpoch {epoch} Training Accuracy:')
accmeter.print_task_accuracies()
accmeter.reset()
train_loss = avg_loss
accmeter = AccuracyMeter(TASKS, valid_loader)
loss = 0
model.eval()
with torch.no_grad():
for (idx, (data, target)) in enumerate(valid_loader):
(data, target) = (data.to(args.device), to_device(target, args.device))
logits = model(data)
_ = VALID_F1_MICRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
_ = VALID_F1_MACRO.f1(to_device(logits, 'cpu'), to_device(target, 'cpu'))
loss += model.loss_value(logits, target, reduce='mean').item()
accmeter.update(logits, target)
accmeter.update_accuracy()
print('Validation accuracy:')
accmeter.print_task_accuracies()
loss /= len(valid_loader.dataset)
valid_loss = loss
train_epoch_loss.append(train_loss)
valid_epoch_loss.append(valid_loss)
model = remove_prune_masks(model)
|
Benchmarks
|
positive
|
def main_cli():
"""Entry point for command-line script"""
multiprocessing.freeze_support()
<DeepExtract>
start_time = time.time()
parser = get_argument_parser()
(args, leftover_args) = parser.parse_known_args(args=sys.argv[1:])
if not logging.root.handlers:
setup_logging(logger, log_to_stderr=is_any_output_stdout(args), quiet=args.quiet, minimal=args.report == 'minimal', debug=args.debug)
log_header(sys.argv[1:])
profiler = setup_profiler_if_requested(args.profile)
log_system_info()
if args.quiet and args.report:
parser.error('Options --quiet and --report cannot be used at the same time')
if leftover_args:
warn_if_en_dashes(sys.argv[1:])
parser.error('unrecognized arguments: ' + ' '.join(leftover_args))
if args.cores < 0:
parser.error('Value for --cores cannot be negative')
cores = available_cpu_count() if args.cores == 0 else args.cores
file_opener = FileOpener(compression_level=args.compression_level, threads=estimate_compression_threads(cores))
if sys.stderr.isatty() and (not args.quiet) and (not args.debug):
progress = Progress()
else:
progress = DummyProgress()
paired = determine_paired(args)
try:
is_interleaved_input = args.interleaved and len(args.inputs) == 1
input_paths = setup_input_files(args.inputs, paired, is_interleaved_input)
check_arguments(args, paired)
(adapters, adapters2) = adapters_from_args(args)
log_adapters(adapters, adapters2 if paired else None)
make_pipeline = PipelineMaker(args, paired, adapters, adapters2)
make_pipeline()
adapter_names: List[Optional[str]] = [a.name for a in adapters]
adapter_names2: List[Optional[str]] = [a.name for a in adapters2]
outfiles = open_output_files(args, default_outfile, file_opener, adapter_names, adapter_names2)
inpaths = InputPaths(*input_paths, interleaved=is_interleaved_input)
logger.info('Processing %s reads on %d core%s ...', {False: 'single-end', True: 'paired-end'}[paired], cores, 's' if cores > 1 else '')
stats = run_pipeline(make_pipeline, inpaths, outfiles, cores, progress, args.buffer_size)
except KeyboardInterrupt:
if args.debug:
raise
else:
print('Interrupted', file=sys.stderr)
sys.exit(130)
except BrokenPipeError:
sys.exit(1)
except (OSError, EOFError, HasNoQualities, dnaio.UnknownFileFormat, dnaio.FileFormatError, CommandLineError) as e:
logger.debug('Command line error. Traceback:', exc_info=True)
logger.error('%s', e)
exit_code = 2 if isinstance(e, CommandLineError) else 1
sys.exit(exit_code)
elapsed = time.time() - start_time
if args.report == 'minimal':
report = minimal_report
else:
report = full_report
logger.log(REPORT, '%s', report(stats, elapsed, args.gc_content / 100.0))
if args.json is not None:
with open(args.json, 'w') as f:
json_dict = json_report(stats=stats, cmdlineargs=sys.argv[1:], path1=inpaths.paths[0], path2=inpaths.paths[1] if len(inpaths.paths) > 1 else None, cores=cores, paired=paired, gc_content=args.gc_content / 100.0)
f.write(json_dumps(json_dict))
f.write('\n')
if profiler is not None:
import pstats
profiler.disable()
pstats.Stats(profiler).sort_stats('time').print_stats(20)
return stats
</DeepExtract>
return 0
|
def main_cli():
"""Entry point for command-line script"""
multiprocessing.freeze_support()
start_time = time.time()
parser = get_argument_parser()
(args, leftover_args) = parser.parse_known_args(args=sys.argv[1:])
if not logging.root.handlers:
setup_logging(logger, log_to_stderr=is_any_output_stdout(args), quiet=args.quiet, minimal=args.report == 'minimal', debug=args.debug)
log_header(sys.argv[1:])
profiler = setup_profiler_if_requested(args.profile)
log_system_info()
if args.quiet and args.report:
parser.error('Options --quiet and --report cannot be used at the same time')
if leftover_args:
warn_if_en_dashes(sys.argv[1:])
parser.error('unrecognized arguments: ' + ' '.join(leftover_args))
if args.cores < 0:
parser.error('Value for --cores cannot be negative')
cores = available_cpu_count() if args.cores == 0 else args.cores
file_opener = FileOpener(compression_level=args.compression_level, threads=estimate_compression_threads(cores))
if sys.stderr.isatty() and (not args.quiet) and (not args.debug):
progress = Progress()
else:
progress = DummyProgress()
paired = determine_paired(args)
try:
is_interleaved_input = args.interleaved and len(args.inputs) == 1
input_paths = setup_input_files(args.inputs, paired, is_interleaved_input)
check_arguments(args, paired)
(adapters, adapters2) = adapters_from_args(args)
log_adapters(adapters, adapters2 if paired else None)
make_pipeline = PipelineMaker(args, paired, adapters, adapters2)
make_pipeline()
adapter_names: List[Optional[str]] = [a.name for a in adapters]
adapter_names2: List[Optional[str]] = [a.name for a in adapters2]
outfiles = open_output_files(args, default_outfile, file_opener, adapter_names, adapter_names2)
inpaths = InputPaths(*input_paths, interleaved=is_interleaved_input)
logger.info('Processing %s reads on %d core%s ...', {False: 'single-end', True: 'paired-end'}[paired], cores, 's' if cores > 1 else '')
stats = run_pipeline(make_pipeline, inpaths, outfiles, cores, progress, args.buffer_size)
except KeyboardInterrupt:
if args.debug:
raise
else:
print('Interrupted', file=sys.stderr)
sys.exit(130)
except BrokenPipeError:
sys.exit(1)
except (OSError, EOFError, HasNoQualities, dnaio.UnknownFileFormat, dnaio.FileFormatError, CommandLineError) as e:
logger.debug('Command line error. Traceback:', exc_info=True)
logger.error('%s', e)
exit_code = 2 if isinstance(e, CommandLineError) else 1
sys.exit(exit_code)
elapsed = time.time() - start_time
if args.report == 'minimal':
report = minimal_report
else:
report = full_report
logger.log(REPORT, '%s', report(stats, elapsed, args.gc_content / 100.0))
if args.json is not None:
with open(args.json, 'w') as f:
json_dict = json_report(stats=stats, cmdlineargs=sys.argv[1:], path1=inpaths.paths[0], path2=inpaths.paths[1] if len(inpaths.paths) > 1 else None, cores=cores, paired=paired, gc_content=args.gc_content / 100.0)
f.write(json_dumps(json_dict))
f.write('\n')
if profiler is not None:
import pstats
profiler.disable()
pstats.Stats(profiler).sort_stats('time').print_stats(20)
return stats
return 0
|
cutadapt
|
positive
|
def main():
"""Main"""
import matplotlib.pyplot as plt
for deterministic in [True, False]:
<DeepExtract>
morph = ephys.morphologies.NrnFileMorphology(os.path.join(morph_dir, 'simple.swc'))
somatic_loc = ephys.locations.NrnSeclistLocation('somatic', seclist_name='somatic')
stochkv3_mech = ephys.mechanisms.NrnMODMechanism(name='StochKv3', suffix='StochKv3', locations=[somatic_loc], deterministic=deterministic)
pas_mech = ephys.mechanisms.NrnMODMechanism(name='pas', suffix='pas', locations=[somatic_loc])
gkbar_param = ephys.parameters.NrnSectionParameter(name='gkbar_StochKv3', param_name='gkbar_StochKv3', locations=[somatic_loc], bounds=[0.0, 10.0], frozen=False)
epas_param = ephys.parameters.NrnSectionParameter(name='e_pas', param_name='e_pas', locations=[somatic_loc], value=-90, frozen=True)
celsius_param = ephys.parameters.NrnGlobalParameter(name='celsius', param_name='celsius', value=34.0, frozen=True)
params = [epas_param, celsius_param, gkbar_param]
stochkv3_cell = ephys.models.CellModel(name='stochkv3_cell', morph=morph, mechs=[pas_mech, stochkv3_mech], params=params)
soma_loc = ephys.locations.NrnSeclistCompLocation(name='soma', seclist_name='somatic', sec_index=0, comp_x=0.5)
stim = ephys.stimuli.NrnSquarePulse(step_amplitude=0.1, step_delay=50, step_duration=50, location=soma_loc, total_duration=150)
hold_stim = ephys.stimuli.NrnSquarePulse(step_amplitude=-0.025, step_delay=0, step_duration=10000, location=soma_loc, total_duration=150)
rec = ephys.recordings.CompRecording(name='Step.soma.v', location=soma_loc, variable='v')
protocol = ephys.protocols.SweepProtocol('Step', [stim, hold_stim], [rec])
nrn = ephys.simulators.NrnSimulator(cvode_active=False)
evaluator = ephys.evaluators.CellEvaluator(cell_model=stochkv3_cell, param_names=[param.name for param in params], fitness_calculator=ephys.objectivescalculators.ObjectivesCalculator(), sim=nrn)
best_param_values = {'gkbar_StochKv3': 0.5}
responses = evaluator.run_protocol(protocol, cell_model=stochkv3_cell, param_values=best_param_values, sim=nrn)
hoc_string = stochkv3_cell.create_hoc(param_values=best_param_values, disable_banner=True)
stochkv3_hoc_cell = ephys.models.HocCellModel('stochkv3_hoc_cell', morphology_path=morph_dir, hoc_string=hoc_string)
nrn.neuron.h.celsius = 34
hoc_responses = protocol.run(stochkv3_hoc_cell, best_param_values, sim=nrn)
evaluator.use_params_for_seed = True
different_seed_responses = evaluator.run_protocol(protocol, cell_model=stochkv3_cell, param_values=best_param_values, sim=nrn)
(stochkv3_responses, stochkv3_hoc_responses, different_seed_responses, stochkv3_hoc_string) = (responses, hoc_responses, different_seed_responses, hoc_string)
</DeepExtract>
with open(stochkv3_hoc_filename(deterministic=deterministic), 'w') as stochkv3_hoc_file:
stochkv3_hoc_file.write(stochkv3_hoc_string)
time = stochkv3_responses['Step.soma.v']['time']
py_voltage = stochkv3_responses['Step.soma.v']['voltage']
hoc_voltage = stochkv3_hoc_responses['Step.soma.v']['voltage']
different_seed_voltage = different_seed_responses['Step.soma.v']['voltage']
plt.figure()
plt.plot(time, py_voltage - hoc_voltage, label='py - hoc diff')
plt.xlabel('time (ms)')
plt.ylabel('voltage diff(mV)')
plt.title('Deterministic' if deterministic else 'Stochastic')
plt.legend()
plt.figure()
plt.plot(time, py_voltage, label='py')
plt.plot(time, hoc_voltage, label='hoc')
plt.xlabel('time (ms)')
plt.ylabel('voltage (mV)')
plt.title('Deterministic' if deterministic else 'Stochastic')
plt.legend()
plt.figure()
plt.plot(time, py_voltage, label='py')
plt.plot(time, different_seed_voltage, label='different seed')
plt.xlabel('time (ms)')
plt.ylabel('voltage (mV)')
plt.title('Deterministic' if deterministic else 'Stochastic')
plt.legend()
plt.show()
|
def main():
"""Main"""
import matplotlib.pyplot as plt
for deterministic in [True, False]:
morph = ephys.morphologies.NrnFileMorphology(os.path.join(morph_dir, 'simple.swc'))
somatic_loc = ephys.locations.NrnSeclistLocation('somatic', seclist_name='somatic')
stochkv3_mech = ephys.mechanisms.NrnMODMechanism(name='StochKv3', suffix='StochKv3', locations=[somatic_loc], deterministic=deterministic)
pas_mech = ephys.mechanisms.NrnMODMechanism(name='pas', suffix='pas', locations=[somatic_loc])
gkbar_param = ephys.parameters.NrnSectionParameter(name='gkbar_StochKv3', param_name='gkbar_StochKv3', locations=[somatic_loc], bounds=[0.0, 10.0], frozen=False)
epas_param = ephys.parameters.NrnSectionParameter(name='e_pas', param_name='e_pas', locations=[somatic_loc], value=-90, frozen=True)
celsius_param = ephys.parameters.NrnGlobalParameter(name='celsius', param_name='celsius', value=34.0, frozen=True)
params = [epas_param, celsius_param, gkbar_param]
stochkv3_cell = ephys.models.CellModel(name='stochkv3_cell', morph=morph, mechs=[pas_mech, stochkv3_mech], params=params)
soma_loc = ephys.locations.NrnSeclistCompLocation(name='soma', seclist_name='somatic', sec_index=0, comp_x=0.5)
stim = ephys.stimuli.NrnSquarePulse(step_amplitude=0.1, step_delay=50, step_duration=50, location=soma_loc, total_duration=150)
hold_stim = ephys.stimuli.NrnSquarePulse(step_amplitude=-0.025, step_delay=0, step_duration=10000, location=soma_loc, total_duration=150)
rec = ephys.recordings.CompRecording(name='Step.soma.v', location=soma_loc, variable='v')
protocol = ephys.protocols.SweepProtocol('Step', [stim, hold_stim], [rec])
nrn = ephys.simulators.NrnSimulator(cvode_active=False)
evaluator = ephys.evaluators.CellEvaluator(cell_model=stochkv3_cell, param_names=[param.name for param in params], fitness_calculator=ephys.objectivescalculators.ObjectivesCalculator(), sim=nrn)
best_param_values = {'gkbar_StochKv3': 0.5}
responses = evaluator.run_protocol(protocol, cell_model=stochkv3_cell, param_values=best_param_values, sim=nrn)
hoc_string = stochkv3_cell.create_hoc(param_values=best_param_values, disable_banner=True)
stochkv3_hoc_cell = ephys.models.HocCellModel('stochkv3_hoc_cell', morphology_path=morph_dir, hoc_string=hoc_string)
nrn.neuron.h.celsius = 34
hoc_responses = protocol.run(stochkv3_hoc_cell, best_param_values, sim=nrn)
evaluator.use_params_for_seed = True
different_seed_responses = evaluator.run_protocol(protocol, cell_model=stochkv3_cell, param_values=best_param_values, sim=nrn)
(stochkv3_responses, stochkv3_hoc_responses, different_seed_responses, stochkv3_hoc_string) = (responses, hoc_responses, different_seed_responses, hoc_string)
with open(stochkv3_hoc_filename(deterministic=deterministic), 'w') as stochkv3_hoc_file:
stochkv3_hoc_file.write(stochkv3_hoc_string)
time = stochkv3_responses['Step.soma.v']['time']
py_voltage = stochkv3_responses['Step.soma.v']['voltage']
hoc_voltage = stochkv3_hoc_responses['Step.soma.v']['voltage']
different_seed_voltage = different_seed_responses['Step.soma.v']['voltage']
plt.figure()
plt.plot(time, py_voltage - hoc_voltage, label='py - hoc diff')
plt.xlabel('time (ms)')
plt.ylabel('voltage diff(mV)')
plt.title('Deterministic' if deterministic else 'Stochastic')
plt.legend()
plt.figure()
plt.plot(time, py_voltage, label='py')
plt.plot(time, hoc_voltage, label='hoc')
plt.xlabel('time (ms)')
plt.ylabel('voltage (mV)')
plt.title('Deterministic' if deterministic else 'Stochastic')
plt.legend()
plt.figure()
plt.plot(time, py_voltage, label='py')
plt.plot(time, different_seed_voltage, label='different seed')
plt.xlabel('time (ms)')
plt.ylabel('voltage (mV)')
plt.title('Deterministic' if deterministic else 'Stochastic')
plt.legend()
plt.show()
|
BluePyOpt
|
positive
|
def tf_unitary_overlap(A: tf.Tensor, B: tf.Tensor, lvls: tf.Tensor=None) -> tf.Tensor:
"""Unitary overlap between two matrices.
Parameters
----------
A : tf.Tensor
Unitary A
B : tf.Tensor
Unitary B
lvls : tf.Tensor, optional
Levels, by default None
Returns
-------
tf.Tensor
Overlap between the two unitaries
Raises
------
TypeError
For errors during cast
ValueError
For errors during matrix multiplicaton
"""
try:
if lvls is None:
lvls = tf.cast(tf.shape(B)[0], B.dtype)
<DeepExtract>
overlap = tf.reshape(tf.cast(tf.math.conj(tf.linalg.trace(tf.matmul(A, tf.linalg.adjoint(B))) / lvls) * tf.linalg.trace(tf.matmul(A, tf.linalg.adjoint(B))) / lvls, dtype=tf.float64), shape=[1])
</DeepExtract>
except TypeError:
raise TypeError('Possible Inconsistent Dimensions while casting tensors')
except ValueError:
raise ValueError('Possible Inconsistent Dimensions during Matrix Multiplication')
return overlap
|
def tf_unitary_overlap(A: tf.Tensor, B: tf.Tensor, lvls: tf.Tensor=None) -> tf.Tensor:
"""Unitary overlap between two matrices.
Parameters
----------
A : tf.Tensor
Unitary A
B : tf.Tensor
Unitary B
lvls : tf.Tensor, optional
Levels, by default None
Returns
-------
tf.Tensor
Overlap between the two unitaries
Raises
------
TypeError
For errors during cast
ValueError
For errors during matrix multiplicaton
"""
try:
if lvls is None:
lvls = tf.cast(tf.shape(B)[0], B.dtype)
overlap = tf.reshape(tf.cast(tf.math.conj(tf.linalg.trace(tf.matmul(A, tf.linalg.adjoint(B))) / lvls) * tf.linalg.trace(tf.matmul(A, tf.linalg.adjoint(B))) / lvls, dtype=tf.float64), shape=[1])
except TypeError:
raise TypeError('Possible Inconsistent Dimensions while casting tensors')
except ValueError:
raise ValueError('Possible Inconsistent Dimensions during Matrix Multiplication')
return overlap
|
c3
|
positive
|
def __init__(self, x, min_limit=-np.inf, max_limit=np.inf, weights=1.0):
self.points = x
self.N = x.size
self.min_limit = min_limit
self.max_limit = max_limit
<DeepExtract>
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
xleft = z[np.argmin([x[i] - k if k < x[i] else np.inf for k in z])]
xright = z[np.argmin([k - x[i] if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if sigma[i] == -np.inf:
sigma[i] = 1.0
self.sigma = sigma
</DeepExtract>
self.weights = 2 / (erf((max_limit - x) / (np.sqrt(2.0) * self.sigma)) - erf((min_limit - x) / (np.sqrt(2.0) * self.sigma))) * weights
self.W_sum = np.sum(self.weights)
|
def __init__(self, x, min_limit=-np.inf, max_limit=np.inf, weights=1.0):
self.points = x
self.N = x.size
self.min_limit = min_limit
self.max_limit = max_limit
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
xleft = z[np.argmin([x[i] - k if k < x[i] else np.inf for k in z])]
xright = z[np.argmin([k - x[i] if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if sigma[i] == -np.inf:
sigma[i] = 1.0
self.sigma = sigma
self.weights = 2 / (erf((max_limit - x) / (np.sqrt(2.0) * self.sigma)) - erf((min_limit - x) / (np.sqrt(2.0) * self.sigma))) * weights
self.W_sum = np.sum(self.weights)
|
brainiak
|
positive
|
def prepare_train_img(idx):
img_info = self.img_infos[idx]
<DeepExtract>
ann_info = self.img_infos[idx]['ann']
</DeepExtract>
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
<DeepExtract>
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
</DeepExtract>
return self.pipeline(results)
|
def prepare_train_img(idx):
img_info = self.img_infos[idx]
ann_info = self.img_infos[idx]['ann']
results = dict(img_info=img_info, ann_info=ann_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
return self.pipeline(results)
|
DetectoRS
|
positive
|
def _bimpm_forward(field_encoded_record1: torch.Tensor, field_encoded_record2: torch.Tensor, record_mask_record1: torch.Tensor, record_mask_record2: torch.Tensor) -> torch.Tensor:
"""Encodes in a record context, matches encodings, aggregates the matching results, classifies.
We do this in a helper function to reuse it for the `self._compute_attributions` method.
Parameters
----------
field_encoded_record1
Encoded record1 (in a field context)
field_encoded_record2
Encoded record2 (in a field context)
record_mask_record1
Mask for the record encoder for record1
record_mask_record2
Mask for the record encoder for record2
Returns
-------
logits
"""
record_encoded_record1 = self._dropout(self._record_encoder(field_encoded_record1, mask=record_mask_record1))
record_encoded_record2 = self._dropout(self._record_encoder(field_encoded_record2, mask=record_mask_record2))
<DeepExtract>
matching_vector_record1: List[torch.Tensor] = []
matching_vector_record2: List[torch.Tensor] = []
half_hidden_size = None
if self._matcher_backward is not None:
half_hidden_size = self._record_encoder.get_output_dim() // 2
matching_result = self._matcher_forward(record_encoded_record1[:, :, :half_hidden_size], record_mask_record1, record_encoded_record2[:, :, :half_hidden_size], record_mask_record2)
matching_vector_record1.extend(matching_result[0])
matching_vector_record2.extend(matching_result[1])
if self._matcher_backward is not None:
matching_result = self._matcher_backward(record_encoded_record1[:, :, half_hidden_size:], record_mask_record1, record_encoded_record2[:, :, half_hidden_size:], record_mask_record2)
matching_vector_record1.extend(matching_result[0])
matching_vector_record2.extend(matching_result[1])
matching_vector_record1_cat = self._dropout(torch.cat(matching_vector_record1, dim=2))
matching_vector_record2_cat = self._dropout(torch.cat(matching_vector_record2, dim=2))
(matching_vector_record1, matching_vector_record2) = (matching_vector_record1_cat, matching_vector_record2_cat)
</DeepExtract>
aggregated_record1 = self._dropout(self._aggregator(matching_vector_record1, record_mask_record1))
aggregated_record2 = self._dropout(self._aggregator(matching_vector_record2, record_mask_record2))
aggregated_records = torch.cat([aggregated_record1, aggregated_record2], dim=-1)
logits = self._output_layer(self._classifier_feedforward(aggregated_records))
return logits
|
def _bimpm_forward(field_encoded_record1: torch.Tensor, field_encoded_record2: torch.Tensor, record_mask_record1: torch.Tensor, record_mask_record2: torch.Tensor) -> torch.Tensor:
"""Encodes in a record context, matches encodings, aggregates the matching results, classifies.
We do this in a helper function to reuse it for the `self._compute_attributions` method.
Parameters
----------
field_encoded_record1
Encoded record1 (in a field context)
field_encoded_record2
Encoded record2 (in a field context)
record_mask_record1
Mask for the record encoder for record1
record_mask_record2
Mask for the record encoder for record2
Returns
-------
logits
"""
record_encoded_record1 = self._dropout(self._record_encoder(field_encoded_record1, mask=record_mask_record1))
record_encoded_record2 = self._dropout(self._record_encoder(field_encoded_record2, mask=record_mask_record2))
matching_vector_record1: List[torch.Tensor] = []
matching_vector_record2: List[torch.Tensor] = []
half_hidden_size = None
if self._matcher_backward is not None:
half_hidden_size = self._record_encoder.get_output_dim() // 2
matching_result = self._matcher_forward(record_encoded_record1[:, :, :half_hidden_size], record_mask_record1, record_encoded_record2[:, :, :half_hidden_size], record_mask_record2)
matching_vector_record1.extend(matching_result[0])
matching_vector_record2.extend(matching_result[1])
if self._matcher_backward is not None:
matching_result = self._matcher_backward(record_encoded_record1[:, :, half_hidden_size:], record_mask_record1, record_encoded_record2[:, :, half_hidden_size:], record_mask_record2)
matching_vector_record1.extend(matching_result[0])
matching_vector_record2.extend(matching_result[1])
matching_vector_record1_cat = self._dropout(torch.cat(matching_vector_record1, dim=2))
matching_vector_record2_cat = self._dropout(torch.cat(matching_vector_record2, dim=2))
(matching_vector_record1, matching_vector_record2) = (matching_vector_record1_cat, matching_vector_record2_cat)
aggregated_record1 = self._dropout(self._aggregator(matching_vector_record1, record_mask_record1))
aggregated_record2 = self._dropout(self._aggregator(matching_vector_record2, record_mask_record2))
aggregated_records = torch.cat([aggregated_record1, aggregated_record2], dim=-1)
logits = self._output_layer(self._classifier_feedforward(aggregated_records))
return logits
|
biome-text
|
positive
|
def se_resnet50(num_classes=1000, pretrained='imagenet'):
model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['se_resnet50'][pretrained]
<DeepExtract>
assert num_classes == settings['num_classes'], 'num_classes should be {}, but is {}'.format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
</DeepExtract>
return model
|
def se_resnet50(num_classes=1000, pretrained='imagenet'):
model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['se_resnet50'][pretrained]
assert num_classes == settings['num_classes'], 'num_classes should be {}, but is {}'.format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
|
DCASE2018Task2
|
positive
|
def prepare(job, slices):
<DeepExtract>
char = options.get('newline')
if not char:
lf_char = 256
msg = '%s must be a single iso-8859-1 character (or %s)' % ('newline', specials)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
lf_char = cstuff.backend.char2int(char)
</DeepExtract>
<DeepExtract>
char = options.get('separator')
if not char:
separator = 10 if lf_char == 256 else lf_char
msg = '%s must be a single iso-8859-1 character (or %s)' % ('separator', specials)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
separator = cstuff.backend.char2int(char)
</DeepExtract>
<DeepExtract>
char = options.get('comment')
if not char:
comment_char = 256
msg = '%s must be a single iso-8859-1 character (or %s)' % ('comment', specials)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
comment_char = cstuff.backend.char2int(char)
</DeepExtract>
if options.quotes == 'True':
quote_char = 256
elif options.quotes == 'False':
quote_char = 257
else:
<DeepExtract>
char = options.get('quotes')
if not char:
quote_char = 257
msg = '%s must be a single iso-8859-1 character (or %s)' % ('quotes', 'True/False/empty')
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
quote_char = cstuff.backend.char2int(char)
</DeepExtract>
filename = os.path.join(job.input_directory, options.filename)
orig_filename = filename
assert 1 <= options.compression <= 9
open(filename, 'rb').close()
fds = [os.pipe() for _ in range(slices)]
read_fds = [t[0] for t in fds]
write_fds = [t[1] for t in fds]
if options.labelsonfirstline:
(labels_rfd, labels_wfd) = os.pipe()
else:
labels_wfd = -1
success_fh = open('reader.success', 'wb+')
(status_rfd, status_wfd) = os.pipe()
p = Process(target=reader_process, name='reader', args=(slices, filename, write_fds, labels_wfd, success_fh.fileno(), status_wfd, comment_char, lf_char))
p.start()
for fd in write_fds:
os.close(fd)
os.close(status_wfd)
if options.labelsonfirstline:
os.close(labels_wfd)
out_fns = ['labels']
r_num = cstuff.mk_uint64(3)
try:
<DeepExtract>
fn = 'import.success.%d' % (-1,)
fh = open(fn, 'wb+')
real_stderr = os.dup(2)
try:
os.dup2(fh.fileno(), 2)
res = cstuff.backend.import_slice(*cstuff.bytesargs(labels_rfd, -1, -1, -1, out_fns, b'wb1', separator, r_num, quote_char, lf_char, 0, 0))
os.dup2(real_stderr, 2)
fh.seek(0)
msg = fh.read().decode('utf-8', 'replace')
if msg or res:
raise Exception(msg.strip() or 'c backend failed in label parsing')
finally:
os.dup2(real_stderr, 2)
os.close(real_stderr)
fh.close()
os.unlink(fn)
</DeepExtract>
finally:
os.close(labels_rfd)
if os.path.exists('labels'):
with typed_reader('bytes')('labels') as fh:
labels_from_file = [lab.decode('utf-8', 'backslashreplace') for lab in fh]
os.unlink('labels')
else:
labels_from_file = []
else:
labels_from_file = None
labels = options.labels or labels_from_file
if options.allow_extra_empty:
while labels and labels[-1] == '':
labels.pop()
assert labels, 'No labels'
if options.strip_labels:
labels = [x.strip() for x in labels]
labels = [options.rename.get(x, x) for x in labels]
assert len(labels) == len(set(labels)), 'Duplicate labels: %r' % (labels,)
dw = job.datasetwriter(columns={n: 'bytes' for n in labels if n not in options.discard}, filename=orig_filename, caption='csvimport of ' + orig_filename, previous=datasets.previous, meta_only=True)
if options.lineno_label:
dw.add(options.lineno_label, 'int64')
def dsprevious(name):
if datasets.previous and datasets.previous.name == 'default':
from accelerator.error import NoSuchDatasetError
try:
return datasets.previous.job.dataset(name)
except NoSuchDatasetError:
return None
return None
if options.allow_bad:
bad_dw = job.datasetwriter(name='bad', filename=orig_filename, columns=dict(lineno='int64', data='bytes'), caption='bad lines from csvimport of ' + orig_filename, previous=dsprevious('bad'), meta_only=True)
else:
bad_dw = None
if options.comment or options.skip_lines or options.skip_empty_lines:
skipped_dw = job.datasetwriter(name='skipped', filename=orig_filename, columns=dict(lineno='int64', data='bytes'), caption='skipped lines from csvimport of ' + orig_filename, previous=dsprevious('skipped'), meta_only=True)
else:
skipped_dw = None
return (separator, quote_char, lf_char, filename, orig_filename, labels, dw, bad_dw, skipped_dw, read_fds, success_fh, status_rfd)
|
def prepare(job, slices):
char = options.get('newline')
if not char:
lf_char = 256
msg = '%s must be a single iso-8859-1 character (or %s)' % ('newline', specials)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
lf_char = cstuff.backend.char2int(char)
char = options.get('separator')
if not char:
separator = 10 if lf_char == 256 else lf_char
msg = '%s must be a single iso-8859-1 character (or %s)' % ('separator', specials)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
separator = cstuff.backend.char2int(char)
char = options.get('comment')
if not char:
comment_char = 256
msg = '%s must be a single iso-8859-1 character (or %s)' % ('comment', specials)
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
comment_char = cstuff.backend.char2int(char)
if options.quotes == 'True':
quote_char = 256
elif options.quotes == 'False':
quote_char = 257
else:
char = options.get('quotes')
if not char:
quote_char = 257
msg = '%s must be a single iso-8859-1 character (or %s)' % ('quotes', 'True/False/empty')
if isinstance(char, bytes):
char = uni(char)
try:
char = char.encode('iso-8859-1')
except UnicodeEncodeError:
raise Exception(msg)
assert len(char) == 1, msg
quote_char = cstuff.backend.char2int(char)
filename = os.path.join(job.input_directory, options.filename)
orig_filename = filename
assert 1 <= options.compression <= 9
open(filename, 'rb').close()
fds = [os.pipe() for _ in range(slices)]
read_fds = [t[0] for t in fds]
write_fds = [t[1] for t in fds]
if options.labelsonfirstline:
(labels_rfd, labels_wfd) = os.pipe()
else:
labels_wfd = -1
success_fh = open('reader.success', 'wb+')
(status_rfd, status_wfd) = os.pipe()
p = Process(target=reader_process, name='reader', args=(slices, filename, write_fds, labels_wfd, success_fh.fileno(), status_wfd, comment_char, lf_char))
p.start()
for fd in write_fds:
os.close(fd)
os.close(status_wfd)
if options.labelsonfirstline:
os.close(labels_wfd)
out_fns = ['labels']
r_num = cstuff.mk_uint64(3)
try:
fn = 'import.success.%d' % (-1,)
fh = open(fn, 'wb+')
real_stderr = os.dup(2)
try:
os.dup2(fh.fileno(), 2)
res = cstuff.backend.import_slice(*cstuff.bytesargs(labels_rfd, -1, -1, -1, out_fns, b'wb1', separator, r_num, quote_char, lf_char, 0, 0))
os.dup2(real_stderr, 2)
fh.seek(0)
msg = fh.read().decode('utf-8', 'replace')
if msg or res:
raise Exception(msg.strip() or 'c backend failed in label parsing')
finally:
os.dup2(real_stderr, 2)
os.close(real_stderr)
fh.close()
os.unlink(fn)
finally:
os.close(labels_rfd)
if os.path.exists('labels'):
with typed_reader('bytes')('labels') as fh:
labels_from_file = [lab.decode('utf-8', 'backslashreplace') for lab in fh]
os.unlink('labels')
else:
labels_from_file = []
else:
labels_from_file = None
labels = options.labels or labels_from_file
if options.allow_extra_empty:
while labels and labels[-1] == '':
labels.pop()
assert labels, 'No labels'
if options.strip_labels:
labels = [x.strip() for x in labels]
labels = [options.rename.get(x, x) for x in labels]
assert len(labels) == len(set(labels)), 'Duplicate labels: %r' % (labels,)
dw = job.datasetwriter(columns={n: 'bytes' for n in labels if n not in options.discard}, filename=orig_filename, caption='csvimport of ' + orig_filename, previous=datasets.previous, meta_only=True)
if options.lineno_label:
dw.add(options.lineno_label, 'int64')
def dsprevious(name):
if datasets.previous and datasets.previous.name == 'default':
from accelerator.error import NoSuchDatasetError
try:
return datasets.previous.job.dataset(name)
except NoSuchDatasetError:
return None
return None
if options.allow_bad:
bad_dw = job.datasetwriter(name='bad', filename=orig_filename, columns=dict(lineno='int64', data='bytes'), caption='bad lines from csvimport of ' + orig_filename, previous=dsprevious('bad'), meta_only=True)
else:
bad_dw = None
if options.comment or options.skip_lines or options.skip_empty_lines:
skipped_dw = job.datasetwriter(name='skipped', filename=orig_filename, columns=dict(lineno='int64', data='bytes'), caption='skipped lines from csvimport of ' + orig_filename, previous=dsprevious('skipped'), meta_only=True)
else:
skipped_dw = None
return (separator, quote_char, lf_char, filename, orig_filename, labels, dw, bad_dw, skipped_dw, read_fds, success_fh, status_rfd)
|
accelerator
|
positive
|
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
<DeepExtract>
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head.get_loss()
(loss_point, tb_dict) = self.point_head.get_loss(tb_dict)
(loss_rcnn, tb_dict) = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
(loss, tb_dict, disp_dict) = (loss, tb_dict, disp_dict)
</DeepExtract>
ret_dict = {'loss': loss}
return (ret_dict, tb_dict, disp_dict)
else:
(pred_dicts, recall_dicts) = self.post_processing(batch_dict)
return (pred_dicts, recall_dicts)
|
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head.get_loss()
(loss_point, tb_dict) = self.point_head.get_loss(tb_dict)
(loss_rcnn, tb_dict) = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_point + loss_rcnn
(loss, tb_dict, disp_dict) = (loss, tb_dict, disp_dict)
ret_dict = {'loss': loss}
return (ret_dict, tb_dict, disp_dict)
else:
(pred_dicts, recall_dicts) = self.post_processing(batch_dict)
return (pred_dicts, recall_dicts)
|
CenterPoint-KITTI
|
positive
|
def test_ethtest_fixtures() -> None:
<DeepExtract>
with open(f'{ETHEREUM_TESTS_PATH}/PoWTests/ethash_tests.json') as pow_test_file_handler:
ethereum_tests = [{'nonce': hex_to_bytes8(raw_fixture['nonce']), 'mix_digest': hex_to_bytes32(raw_fixture['mixHash']), 'header': rlp.decode_to(Header, hex_to_bytes(raw_fixture['header'])), 'seed': hex_to_bytes32(raw_fixture['seed']), 'result': hex_to_bytes32(raw_fixture['result']), 'cache_size': Uint(raw_fixture['cache_size']), 'dataset_size': Uint(raw_fixture['full_size']), 'header_hash': hex_to_bytes32(raw_fixture['header_hash']), 'cache_hash': hex_to_bytes32(raw_fixture['cache_hash'])} for raw_fixture in json.load(pow_test_file_handler).values()]
</DeepExtract>
for test in ethereum_tests:
header = test['header']
assert header.nonce == test['nonce']
assert header.mix_digest == test['mix_digest']
assert generate_seed(header.number) == test['seed']
assert cache_size(header.number) == test['cache_size']
assert dataset_size(header.number) == test['dataset_size']
header_hash = generate_header_hash_for_pow(header)
assert header_hash == test['header_hash']
cache = generate_cache(header.number)
cache_hash = keccak256(b''.join((le_uint32_sequence_to_bytes(cache_item) for cache_item in cache)))
assert cache_hash == test['cache_hash']
(mix_digest, result) = hashimoto_light(header_hash, header.nonce, cache, dataset_size(header.number))
assert mix_digest == test['mix_digest']
assert result == test['result']
|
def test_ethtest_fixtures() -> None:
with open(f'{ETHEREUM_TESTS_PATH}/PoWTests/ethash_tests.json') as pow_test_file_handler:
ethereum_tests = [{'nonce': hex_to_bytes8(raw_fixture['nonce']), 'mix_digest': hex_to_bytes32(raw_fixture['mixHash']), 'header': rlp.decode_to(Header, hex_to_bytes(raw_fixture['header'])), 'seed': hex_to_bytes32(raw_fixture['seed']), 'result': hex_to_bytes32(raw_fixture['result']), 'cache_size': Uint(raw_fixture['cache_size']), 'dataset_size': Uint(raw_fixture['full_size']), 'header_hash': hex_to_bytes32(raw_fixture['header_hash']), 'cache_hash': hex_to_bytes32(raw_fixture['cache_hash'])} for raw_fixture in json.load(pow_test_file_handler).values()]
for test in ethereum_tests:
header = test['header']
assert header.nonce == test['nonce']
assert header.mix_digest == test['mix_digest']
assert generate_seed(header.number) == test['seed']
assert cache_size(header.number) == test['cache_size']
assert dataset_size(header.number) == test['dataset_size']
header_hash = generate_header_hash_for_pow(header)
assert header_hash == test['header_hash']
cache = generate_cache(header.number)
cache_hash = keccak256(b''.join((le_uint32_sequence_to_bytes(cache_item) for cache_item in cache)))
assert cache_hash == test['cache_hash']
(mix_digest, result) = hashimoto_light(header_hash, header.nonce, cache, dataset_size(header.number))
assert mix_digest == test['mix_digest']
assert result == test['result']
|
eth1.0-specs
|
positive
|
def find_image(self, name, tag):
"""
Lookup an image (by name and tag) and return the inspection results.
"""
if not name:
return None
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
try:
response = self.images(name=name)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (name, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (name, tag)
lookup_digest = '%s@%s' % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
</DeepExtract>
if not images:
(registry, repo_name) = auth.resolve_repository_name(name)
if registry == 'docker.io':
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
try:
response = self.images(name=repo_name)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (repo_name, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (repo_name, tag)
lookup_digest = '%s@%s' % (repo_name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
</DeepExtract>
if not images and repo_name.startswith('library/'):
lookup = repo_name[len('library/'):]
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
try:
response = self.images(name=lookup)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (lookup, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (lookup, tag)
lookup_digest = '%s@%s' % (lookup, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
</DeepExtract>
if not images:
lookup = '%s/%s' % (registry, repo_name)
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
try:
response = self.images(name=lookup)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (lookup, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (lookup, tag)
lookup_digest = '%s@%s' % (lookup, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
</DeepExtract>
if not images and '/' not in repo_name:
lookup = '%s/library/%s' % (registry, repo_name)
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
try:
response = self.images(name=lookup)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (lookup, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (lookup, tag)
lookup_digest = '%s@%s' % (lookup, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
</DeepExtract>
if len(images) > 1:
<DeepExtract>
pass
</DeepExtract>
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except NotFound:
<DeepExtract>
pass
</DeepExtract>
return None
except Exception as exc:
<DeepExtract>
pass
</DeepExtract>
return inspection
<DeepExtract>
pass
</DeepExtract>
return None
|
def find_image(self, name, tag):
"""
Lookup an image (by name and tag) and return the inspection results.
"""
if not name:
return None
pass
try:
response = self.images(name=name)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (name, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (name, tag)
lookup_digest = '%s@%s' % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
if not images:
(registry, repo_name) = auth.resolve_repository_name(name)
if registry == 'docker.io':
pass
try:
response = self.images(name=repo_name)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (repo_name, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (repo_name, tag)
lookup_digest = '%s@%s' % (repo_name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
if not images and repo_name.startswith('library/'):
lookup = repo_name[len('library/'):]
pass
try:
response = self.images(name=lookup)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (lookup, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (lookup, tag)
lookup_digest = '%s@%s' % (lookup, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
if not images:
lookup = '%s/%s' % (registry, repo_name)
pass
try:
response = self.images(name=lookup)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (lookup, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (lookup, tag)
lookup_digest = '%s@%s' % (lookup, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
if not images and '/' not in repo_name:
lookup = '%s/library/%s' % (registry, repo_name)
pass
try:
response = self.images(name=lookup)
except Exception as exc:
self.fail('Error searching for image %s - %s' % (lookup, str(exc)))
images = response
if tag:
lookup = '%s:%s' % (lookup, tag)
lookup_digest = '%s@%s' % (lookup, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if tags and lookup in tags or (digests and lookup_digest in digests):
images = [image]
break
images = images
if len(images) > 1:
pass
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except NotFound:
pass
return None
except Exception as exc:
pass
return inspection
pass
return None
|
community.docker
|
positive
|
def __init__(self, levels, channels, opt, num_classes=1000, block=BasicBlock, residual_root=False, return_levels=True, pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = True
self.num_classes = num_classes
self.base_layer = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
<DeepExtract>
modules = []
for i in range(levels[0]):
modules.extend([nn.Conv2d(channels[0], channels[0], kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), BatchNorm(channels[0]), nn.ReLU(inplace=True)])
channels[0] = channels[0]
self.level0 = nn.Sequential(*modules)
</DeepExtract>
<DeepExtract>
modules = []
for i in range(levels[1]):
modules.extend([nn.Conv2d(channels[0], channels[1], kernel_size=3, stride=2 if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), BatchNorm(channels[1]), nn.ReLU(inplace=True)])
channels[0] = channels[1]
self.level1 = nn.Sequential(*modules)
</DeepExtract>
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2, level_root=False, root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2, level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2, level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2, level_root=True, root_residual=residual_root)
if opt.pre_img:
self.pre_img_layer = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
if opt.pre_hm:
self.pre_hm_layer = nn.Sequential(nn.Conv2d(1, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
def __init__(self, levels, channels, opt, num_classes=1000, block=BasicBlock, residual_root=False, return_levels=True, pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = True
self.num_classes = num_classes
self.base_layer = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
modules = []
for i in range(levels[0]):
modules.extend([nn.Conv2d(channels[0], channels[0], kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), BatchNorm(channels[0]), nn.ReLU(inplace=True)])
channels[0] = channels[0]
self.level0 = nn.Sequential(*modules)
modules = []
for i in range(levels[1]):
modules.extend([nn.Conv2d(channels[0], channels[1], kernel_size=3, stride=2 if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), BatchNorm(channels[1]), nn.ReLU(inplace=True)])
channels[0] = channels[1]
self.level1 = nn.Sequential(*modules)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2, level_root=False, root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2, level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2, level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2, level_root=True, root_residual=residual_root)
if opt.pre_img:
self.pre_img_layer = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
if opt.pre_hm:
self.pre_hm_layer = nn.Sequential(nn.Conv2d(1, channels[0], kernel_size=7, stride=1, padding=3, bias=False), BatchNorm(channels[0]), nn.ReLU(inplace=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
CenterTrack-IOU
|
positive
|
def get_xml_path():
"""Return the alternative path to the UDUNITS2 XMl file"""
default = Path(sys.prefix) / 'share' / 'udunits' / 'udunits2.xml'
<DeepExtract>
value = str(default)
if config.has_option('System', 'udunits2_xml_path'):
value = config.get('System', 'udunits2_xml_path')
path = value
</DeepExtract>
return path.encode()
|
def get_xml_path():
"""Return the alternative path to the UDUNITS2 XMl file"""
default = Path(sys.prefix) / 'share' / 'udunits' / 'udunits2.xml'
value = str(default)
if config.has_option('System', 'udunits2_xml_path'):
value = config.get('System', 'udunits2_xml_path')
path = value
return path.encode()
|
cf-units
|
positive
|
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
<DeepExtract>
if cfg.TRAIN.TRUNCATED:
self.RCNN_rpn.RPN_Conv.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_rpn.RPN_Conv.weight.data.normal_(0, 0.01)
self.RCNN_rpn.RPN_Conv.bias.data.zero_()
</DeepExtract>
<DeepExtract>
if cfg.TRAIN.TRUNCATED:
self.RCNN_rpn.RPN_cls_score.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_rpn.RPN_cls_score.weight.data.normal_(0, 0.01)
self.RCNN_rpn.RPN_cls_score.bias.data.zero_()
</DeepExtract>
<DeepExtract>
if cfg.TRAIN.TRUNCATED:
self.RCNN_rpn.RPN_bbox_pred.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_rpn.RPN_bbox_pred.weight.data.normal_(0, 0.01)
self.RCNN_rpn.RPN_bbox_pred.bias.data.zero_()
</DeepExtract>
<DeepExtract>
if cfg.TRAIN.TRUNCATED:
self.RCNN_cls_score.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_cls_score.weight.data.normal_(0, 0.01)
self.RCNN_cls_score.bias.data.zero_()
</DeepExtract>
<DeepExtract>
if cfg.TRAIN.TRUNCATED:
self.RCNN_bbox_pred.weight.data.normal_().fmod_(2).mul_(0.001).add_(0)
else:
self.RCNN_bbox_pred.weight.data.normal_(0, 0.001)
self.RCNN_bbox_pred.bias.data.zero_()
</DeepExtract>
|
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
if cfg.TRAIN.TRUNCATED:
self.RCNN_rpn.RPN_Conv.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_rpn.RPN_Conv.weight.data.normal_(0, 0.01)
self.RCNN_rpn.RPN_Conv.bias.data.zero_()
if cfg.TRAIN.TRUNCATED:
self.RCNN_rpn.RPN_cls_score.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_rpn.RPN_cls_score.weight.data.normal_(0, 0.01)
self.RCNN_rpn.RPN_cls_score.bias.data.zero_()
if cfg.TRAIN.TRUNCATED:
self.RCNN_rpn.RPN_bbox_pred.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_rpn.RPN_bbox_pred.weight.data.normal_(0, 0.01)
self.RCNN_rpn.RPN_bbox_pred.bias.data.zero_()
if cfg.TRAIN.TRUNCATED:
self.RCNN_cls_score.weight.data.normal_().fmod_(2).mul_(0.01).add_(0)
else:
self.RCNN_cls_score.weight.data.normal_(0, 0.01)
self.RCNN_cls_score.bias.data.zero_()
if cfg.TRAIN.TRUNCATED:
self.RCNN_bbox_pred.weight.data.normal_().fmod_(2).mul_(0.001).add_(0)
else:
self.RCNN_bbox_pred.weight.data.normal_(0, 0.001)
self.RCNN_bbox_pred.bias.data.zero_()
</DeepExtract>
|
DatasetCulling
|
positive
|
def rec(node):
res = []
while node:
if _visible(node) or _show_all:
res.append(node)
if node.list and (not node.is_menuconfig):
res += rec(node.list)
elif node.list and isinstance(node.item, Symbol):
<DeepExtract>
res = []
while node.list:
if _visible(node.list) or _show_all:
res.append(node.list)
if node.list.list and (not node.list.is_menuconfig):
res += rec(node.list.list)
elif node.list.list and isinstance(node.list.item, Symbol):
shown_children = rec(node.list.list)
if shown_children:
res.append(node.list)
if not node.list.is_menuconfig:
res += shown_children
node.list = node.list.next
shown_children = res
</DeepExtract>
if shown_children:
res.append(node)
if not node.is_menuconfig:
res += shown_children
node = node.next
return res
|
def rec(node):
res = []
while node:
if _visible(node) or _show_all:
res.append(node)
if node.list and (not node.is_menuconfig):
res += rec(node.list)
elif node.list and isinstance(node.item, Symbol):
res = []
while node.list:
if _visible(node.list) or _show_all:
res.append(node.list)
if node.list.list and (not node.list.is_menuconfig):
res += rec(node.list.list)
elif node.list.list and isinstance(node.list.item, Symbol):
shown_children = rec(node.list.list)
if shown_children:
res.append(node.list)
if not node.list.is_menuconfig:
res += shown_children
node.list = node.list.next
shown_children = res
if shown_children:
res.append(node)
if not node.is_menuconfig:
res += shown_children
node = node.next
return res
|
cello
|
positive
|
def devices_status():
"""Print current devices status."""
<DeepExtract>
label = 'brand'
if 'brand_raw' in cpuinfo.get_cpu_info().keys():
label = 'brand_raw'
print('Cpu count: {:d} \t brand: {:s}'.format(os.cpu_count(), cpuinfo.get_cpu_info()[label]))
print('Avg_load_1m: \t{:.3f}%%;'.format(os.getloadavg()[0]))
print('Avg_load_5m:\t{:.3f}%%;'.format(os.getloadavg()[1]))
print('Avg_load_15m:\t{:.3f}%%;'.format(os.getloadavg()[2]))
</DeepExtract>
<DeepExtract>
if memoryInfo is None:
memoryInfo = psutil.virtual_memory()
print('Current RAM - total:\t {:.3f}GB;'.format(memoryInfo.total / 2.0 ** 30))
print('Current RAM - available:\t{:.3f}GB;'.format(memoryInfo.available / 2.0 ** 30))
print('Current RAM - used:\t{:.3f}GB;'.format(memoryInfo.used / 2.0 ** 30))
print('Current RAM - free:\t{:.3f}GB;'.format(memoryInfo.free / 2.0 ** 30))
</DeepExtract>
<DeepExtract>
if gpu_id is None:
gpu_ids = GPUtil.getAvailable(limit=10)
for gpu_id in gpu_ids:
GPU = GPUtil.getGPUs()[gpu_id]
GPU_load = GPU.load * 100
GPU_memoryUtil = GPU.memoryUtil / 2.0 ** 10
GPU_memoryTotal = GPU.memoryTotal / 2.0 ** 10
GPU_memoryUsed = GPU.memoryUsed / 2.0 ** 10
GPU_memoryFree = GPU.memoryFree / 2.0 ** 10
print('Current GPU (ID:{:d}) name:\t{:s}'.format(gpu_id, GPU.name))
print('Total_GPU_memory:\t{:.3f}GB;'.format(GPU_memoryTotal))
print('GPU_memoryUtil:\t{:.3f}GB;'.format(GPU_memoryUtil))
print('GPU_memoryUsed:\t{:.3f}GB;'.format(GPU_memoryUsed))
print('GPU_memoryFree:\t{:.3f}GB;'.format(GPU_memoryFree))
print('GPU_load:\t{:.3f}GB;'.format(GPU_load))
else:
GPU = GPUtil.getGPUs()[gpu_id]
GPU_load = GPU.load * 100
GPU_memoryUtil = GPU.memoryUtil / 2.0 ** 10
GPU_memoryTotal = GPU.memoryTotal / 2.0 ** 10
GPU_memoryUsed = GPU.memoryUsed / 2.0 ** 10
GPU_memoryFree = GPU.memoryFree / 2.0 ** 10
print('Current GPU (ID:{:d}) name:{:s}'.format(gpu_id, GPU.name))
print('Total_GPU_memory: {:.3f}GB;'.format(GPU_memoryTotal))
print('GPU_memoryUsed:{:.3f}GB;'.format(GPU_memoryUsed))
print('GPU_memoryFree:{:.3f}GB;'.format(GPU_memoryFree))
print('GPU_load:{:.3f}GB;'.format(GPU_load))
</DeepExtract>
|
def devices_status():
"""Print current devices status."""
label = 'brand'
if 'brand_raw' in cpuinfo.get_cpu_info().keys():
label = 'brand_raw'
print('Cpu count: {:d} \t brand: {:s}'.format(os.cpu_count(), cpuinfo.get_cpu_info()[label]))
print('Avg_load_1m: \t{:.3f}%%;'.format(os.getloadavg()[0]))
print('Avg_load_5m:\t{:.3f}%%;'.format(os.getloadavg()[1]))
print('Avg_load_15m:\t{:.3f}%%;'.format(os.getloadavg()[2]))
if memoryInfo is None:
memoryInfo = psutil.virtual_memory()
print('Current RAM - total:\t {:.3f}GB;'.format(memoryInfo.total / 2.0 ** 30))
print('Current RAM - available:\t{:.3f}GB;'.format(memoryInfo.available / 2.0 ** 30))
print('Current RAM - used:\t{:.3f}GB;'.format(memoryInfo.used / 2.0 ** 30))
print('Current RAM - free:\t{:.3f}GB;'.format(memoryInfo.free / 2.0 ** 30))
if gpu_id is None:
gpu_ids = GPUtil.getAvailable(limit=10)
for gpu_id in gpu_ids:
GPU = GPUtil.getGPUs()[gpu_id]
GPU_load = GPU.load * 100
GPU_memoryUtil = GPU.memoryUtil / 2.0 ** 10
GPU_memoryTotal = GPU.memoryTotal / 2.0 ** 10
GPU_memoryUsed = GPU.memoryUsed / 2.0 ** 10
GPU_memoryFree = GPU.memoryFree / 2.0 ** 10
print('Current GPU (ID:{:d}) name:\t{:s}'.format(gpu_id, GPU.name))
print('Total_GPU_memory:\t{:.3f}GB;'.format(GPU_memoryTotal))
print('GPU_memoryUtil:\t{:.3f}GB;'.format(GPU_memoryUtil))
print('GPU_memoryUsed:\t{:.3f}GB;'.format(GPU_memoryUsed))
print('GPU_memoryFree:\t{:.3f}GB;'.format(GPU_memoryFree))
print('GPU_load:\t{:.3f}GB;'.format(GPU_load))
else:
GPU = GPUtil.getGPUs()[gpu_id]
GPU_load = GPU.load * 100
GPU_memoryUtil = GPU.memoryUtil / 2.0 ** 10
GPU_memoryTotal = GPU.memoryTotal / 2.0 ** 10
GPU_memoryUsed = GPU.memoryUsed / 2.0 ** 10
GPU_memoryFree = GPU.memoryFree / 2.0 ** 10
print('Current GPU (ID:{:d}) name:{:s}'.format(gpu_id, GPU.name))
print('Total_GPU_memory: {:.3f}GB;'.format(GPU_memoryTotal))
print('GPU_memoryUsed:{:.3f}GB;'.format(GPU_memoryUsed))
print('GPU_memoryFree:{:.3f}GB;'.format(GPU_memoryFree))
print('GPU_load:{:.3f}GB;'.format(GPU_load))
</DeepExtract>
|
beta-recsys
|
positive
|
def copy_alignments(self, *args):
""" Copy the alignments from the previous or next frame
to the current frame """
if self.get_edit_mode() != 'Edit':
return
frame_id = self.state['navigation']['frame_idx'] + args[1]
if not 0 <= frame_id <= self.state['navigation']['max_frame']:
return
<DeepExtract>
current_frame = self.state['navigation']['frame_name']
</DeepExtract>
get_frame = self.frames.file_list_sorted[frame_id]['frame_fullname']
alignments = self.alignments.get_faces_in_frame(get_frame)
for alignment in alignments:
self.alignments.add_face(current_frame, alignment)
self.state['edit']['updated'] = True
self.state['edit']['update_faces'] = True
<DeepExtract>
self.state['edit']['redraw'] = True
</DeepExtract>
|
def copy_alignments(self, *args):
""" Copy the alignments from the previous or next frame
to the current frame """
if self.get_edit_mode() != 'Edit':
return
frame_id = self.state['navigation']['frame_idx'] + args[1]
if not 0 <= frame_id <= self.state['navigation']['max_frame']:
return
current_frame = self.state['navigation']['frame_name']
get_frame = self.frames.file_list_sorted[frame_id]['frame_fullname']
alignments = self.alignments.get_faces_in_frame(get_frame)
for alignment in alignments:
self.alignments.add_face(current_frame, alignment)
self.state['edit']['updated'] = True
self.state['edit']['update_faces'] = True
self.state['edit']['redraw'] = True
</DeepExtract>
|
DeepFakeTutorial
|
positive
|
def test_retain_set_and_clear(self):
<DeepExtract>
client_id_publisher = f'aws-crt-python-unit-test-{uuid.uuid4()}'
</DeepExtract>
payload = 'HELLO WORLD'
topic_filter = 'test/MQTT5_Binding_Python_' + client_id_publisher
callbacks = Mqtt5TestCallbacks()
client_options = mqtt5.ClientOptions('will be replaced', 0)
client_options.connect_options = mqtt5.ConnectPacket(client_id=client_id_publisher)
<DeepExtract>
config = Config(AuthType.DIRECT)
if client_options is None:
client_options = mqtt5.ClientOptions(host_name=config.endpoint, port=int(config.port))
if client_options.connect_options is None:
client_options.connect_options = mqtt5.ConnectPacket()
client_options.connect_options.client_id = create_client_id()
if AuthType.DIRECT == AuthType.DIRECT or AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH or AuthType.DIRECT == AuthType.DIRECT_TLS or (AuthType.DIRECT == AuthType.DIRECT_PROXY) or (AuthType.DIRECT == AuthType.WS) or (AuthType.DIRECT == AuthType.WS_BASIC_AUTH) or (AuthType.DIRECT == AuthType.WS_TLS) or (AuthType.DIRECT == AuthType.WS_PROXY) or (AuthType.DIRECT == AuthType.DIRECT_MUTUAL_TLS) or (AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH_BAD) or (AuthType.DIRECT == AuthType.DOUBLE_CLIENT_ID_FAILURE) or (AuthType.DIRECT == AuthType.DIRECT_HOST_ONLY) or (AuthType.DIRECT == AuthType.WS_BAD_PORT) or (AuthType.DIRECT == AuthType.DIRECT_HOST_AND_PORT_ONLY):
client_options.host_name = config.endpoint
if AuthType.DIRECT == AuthType.DIRECT or AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH or AuthType.DIRECT == AuthType.DIRECT_TLS or (AuthType.DIRECT == AuthType.DIRECT_PROXY) or (AuthType.DIRECT == AuthType.WS) or (AuthType.DIRECT == AuthType.WS_BASIC_AUTH) or (AuthType.DIRECT == AuthType.WS_TLS) or (AuthType.DIRECT == AuthType.WS_PROXY) or (AuthType.DIRECT == AuthType.DIRECT_MUTUAL_TLS) or (AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH_BAD) or (AuthType.DIRECT == AuthType.DOUBLE_CLIENT_ID_FAILURE) or (AuthType.DIRECT == AuthType.DIRECT_HOST_AND_PORT_ONLY):
client_options.port = int(config.port)
if AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH or AuthType.DIRECT == AuthType.WS_BASIC_AUTH:
client_options.connect_options.username = config.username
client_options.connect_options.password = config.password
if AuthType.DIRECT == AuthType.DIRECT_TLS or AuthType.DIRECT == AuthType.WS_TLS or AuthType.DIRECT == AuthType.DIRECT_PROXY:
tls_ctx_options = io.TlsContextOptions()
tls_ctx_options.verify_peer = False
client_options.tls_ctx = io.ClientTlsContext(tls_ctx_options)
if AuthType.DIRECT == AuthType.DIRECT_MUTUAL_TLS:
tls_ctx_options = io.TlsContextOptions.create_client_with_mtls_from_path(config.cert_path, config.key_path)
client_options.tls_ctx = io.ClientTlsContext(tls_ctx_options)
if AuthType.DIRECT == AuthType.WS or AuthType.DIRECT == AuthType.WS_BASIC_AUTH or AuthType.DIRECT == AuthType.WS_TLS or (AuthType.DIRECT == AuthType.WS_PROXY) or (AuthType.DIRECT == AuthType.WS_BAD_PORT):
client_options.websocket_handshake_transform = callbacks.ws_handshake_transform
if AuthType.DIRECT == AuthType.DIRECT_PROXY or AuthType.DIRECT == AuthType.WS_PROXY:
http_proxy_options = http.HttpProxyOptions(host_name=config.proxy_endpoint, port=int(config.proxy_port))
http_proxy_options.connection_type = http.HttpProxyConnectionType.Tunneling
http_proxy_options.auth_type = http.HttpProxyAuthenticationType.Nothing
client_options.http_proxy_options = http_proxy_options
if callbacks is not None:
client_options.on_publish_callback_fn = callbacks.on_publish_received
client_options.on_lifecycle_event_stopped_fn = callbacks.on_lifecycle_stopped
client_options.on_lifecycle_event_attempting_connect_fn = callbacks.on_lifecycle_attempting_connect
client_options.on_lifecycle_event_connection_success_fn = callbacks.on_lifecycle_connection_success
client_options.on_lifecycle_event_connection_failure_fn = callbacks.on_lifecycle_connection_failure
client_options.on_lifecycle_event_disconnection_fn = callbacks.on_lifecycle_disconnection
client = mqtt5.Client(client_options)
client1 = client
</DeepExtract>
client1.start()
callbacks.future_connection_success.result(TIMEOUT)
publish_packet = mqtt5.PublishPacket(payload=payload, topic=topic_filter, retain=True, qos=mqtt5.QoS.AT_LEAST_ONCE)
puback_future1 = client1.publish(publish_packet)
puback_future1.result(TIMEOUT)
<DeepExtract>
client_id_subscriber = f'aws-crt-python-unit-test-{uuid.uuid4()}'
</DeepExtract>
callbacks2 = Mqtt5TestCallbacks()
client_options2 = mqtt5.ClientOptions('will be replaced', 0)
client_options2.connect_options = mqtt5.ConnectPacket(client_id=client_id_subscriber)
client2 = Mqtt5ClientTest._create_client(AuthType.DIRECT, client_options=client_options2, callbacks=callbacks2)
client2.start()
callbacks2.future_connection_success.result(TIMEOUT)
subscriptions = []
subscriptions.append(mqtt5.Subscription(topic_filter=topic_filter, qos=mqtt5.QoS.AT_LEAST_ONCE))
subscribe_packet = mqtt5.SubscribePacket(subscriptions=subscriptions)
subscribe_future1 = client2.subscribe(subscribe_packet=subscribe_packet)
suback_packet1 = subscribe_future1.result(TIMEOUT)
self.assertIsInstance(suback_packet1, mqtt5.SubackPacket)
received_retained_publish1 = callbacks2.future_publish_received.result(TIMEOUT)
self.assertIsInstance(received_retained_publish1, mqtt5.PublishPacket)
client2.stop()
callbacks2.future_stopped.result(TIMEOUT)
publish_packet.payload = None
puback_future2 = client1.publish(publish_packet)
puback_future2.result(TIMEOUT)
client1.stop()
callbacks.future_stopped.result(TIMEOUT)
<DeepExtract>
client_id_subscriber2 = f'aws-crt-python-unit-test-{uuid.uuid4()}'
</DeepExtract>
callbacks3 = Mqtt5TestCallbacks()
client_options3 = mqtt5.ClientOptions('will be replaced', 0)
client_options3.connect_options = mqtt5.ConnectPacket(client_id=client_id_subscriber2)
client3 = Mqtt5ClientTest._create_client(AuthType.DIRECT, client_options=client_options3, callbacks=callbacks3)
client3.start()
callbacks3.future_connection_success.result(TIMEOUT)
subscribe_future2 = client3.subscribe(subscribe_packet=subscribe_packet)
suback_packet2 = subscribe_future2.result(TIMEOUT)
self.assertIsInstance(suback_packet2, mqtt5.SubackPacket)
time.sleep(1)
self.assertEqual(callbacks3.on_publish_received_counter, 0)
client3.stop()
callbacks3.future_stopped.result(TIMEOUT)
|
def test_retain_set_and_clear(self):
client_id_publisher = f'aws-crt-python-unit-test-{uuid.uuid4()}'
payload = 'HELLO WORLD'
topic_filter = 'test/MQTT5_Binding_Python_' + client_id_publisher
callbacks = Mqtt5TestCallbacks()
client_options = mqtt5.ClientOptions('will be replaced', 0)
client_options.connect_options = mqtt5.ConnectPacket(client_id=client_id_publisher)
config = Config(AuthType.DIRECT)
if client_options is None:
client_options = mqtt5.ClientOptions(host_name=config.endpoint, port=int(config.port))
if client_options.connect_options is None:
client_options.connect_options = mqtt5.ConnectPacket()
client_options.connect_options.client_id = create_client_id()
if AuthType.DIRECT == AuthType.DIRECT or AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH or AuthType.DIRECT == AuthType.DIRECT_TLS or (AuthType.DIRECT == AuthType.DIRECT_PROXY) or (AuthType.DIRECT == AuthType.WS) or (AuthType.DIRECT == AuthType.WS_BASIC_AUTH) or (AuthType.DIRECT == AuthType.WS_TLS) or (AuthType.DIRECT == AuthType.WS_PROXY) or (AuthType.DIRECT == AuthType.DIRECT_MUTUAL_TLS) or (AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH_BAD) or (AuthType.DIRECT == AuthType.DOUBLE_CLIENT_ID_FAILURE) or (AuthType.DIRECT == AuthType.DIRECT_HOST_ONLY) or (AuthType.DIRECT == AuthType.WS_BAD_PORT) or (AuthType.DIRECT == AuthType.DIRECT_HOST_AND_PORT_ONLY):
client_options.host_name = config.endpoint
if AuthType.DIRECT == AuthType.DIRECT or AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH or AuthType.DIRECT == AuthType.DIRECT_TLS or (AuthType.DIRECT == AuthType.DIRECT_PROXY) or (AuthType.DIRECT == AuthType.WS) or (AuthType.DIRECT == AuthType.WS_BASIC_AUTH) or (AuthType.DIRECT == AuthType.WS_TLS) or (AuthType.DIRECT == AuthType.WS_PROXY) or (AuthType.DIRECT == AuthType.DIRECT_MUTUAL_TLS) or (AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH_BAD) or (AuthType.DIRECT == AuthType.DOUBLE_CLIENT_ID_FAILURE) or (AuthType.DIRECT == AuthType.DIRECT_HOST_AND_PORT_ONLY):
client_options.port = int(config.port)
if AuthType.DIRECT == AuthType.DIRECT_BASIC_AUTH or AuthType.DIRECT == AuthType.WS_BASIC_AUTH:
client_options.connect_options.username = config.username
client_options.connect_options.password = config.password
if AuthType.DIRECT == AuthType.DIRECT_TLS or AuthType.DIRECT == AuthType.WS_TLS or AuthType.DIRECT == AuthType.DIRECT_PROXY:
tls_ctx_options = io.TlsContextOptions()
tls_ctx_options.verify_peer = False
client_options.tls_ctx = io.ClientTlsContext(tls_ctx_options)
if AuthType.DIRECT == AuthType.DIRECT_MUTUAL_TLS:
tls_ctx_options = io.TlsContextOptions.create_client_with_mtls_from_path(config.cert_path, config.key_path)
client_options.tls_ctx = io.ClientTlsContext(tls_ctx_options)
if AuthType.DIRECT == AuthType.WS or AuthType.DIRECT == AuthType.WS_BASIC_AUTH or AuthType.DIRECT == AuthType.WS_TLS or (AuthType.DIRECT == AuthType.WS_PROXY) or (AuthType.DIRECT == AuthType.WS_BAD_PORT):
client_options.websocket_handshake_transform = callbacks.ws_handshake_transform
if AuthType.DIRECT == AuthType.DIRECT_PROXY or AuthType.DIRECT == AuthType.WS_PROXY:
http_proxy_options = http.HttpProxyOptions(host_name=config.proxy_endpoint, port=int(config.proxy_port))
http_proxy_options.connection_type = http.HttpProxyConnectionType.Tunneling
http_proxy_options.auth_type = http.HttpProxyAuthenticationType.Nothing
client_options.http_proxy_options = http_proxy_options
if callbacks is not None:
client_options.on_publish_callback_fn = callbacks.on_publish_received
client_options.on_lifecycle_event_stopped_fn = callbacks.on_lifecycle_stopped
client_options.on_lifecycle_event_attempting_connect_fn = callbacks.on_lifecycle_attempting_connect
client_options.on_lifecycle_event_connection_success_fn = callbacks.on_lifecycle_connection_success
client_options.on_lifecycle_event_connection_failure_fn = callbacks.on_lifecycle_connection_failure
client_options.on_lifecycle_event_disconnection_fn = callbacks.on_lifecycle_disconnection
client = mqtt5.Client(client_options)
client1 = client
client1.start()
callbacks.future_connection_success.result(TIMEOUT)
publish_packet = mqtt5.PublishPacket(payload=payload, topic=topic_filter, retain=True, qos=mqtt5.QoS.AT_LEAST_ONCE)
puback_future1 = client1.publish(publish_packet)
puback_future1.result(TIMEOUT)
client_id_subscriber = f'aws-crt-python-unit-test-{uuid.uuid4()}'
callbacks2 = Mqtt5TestCallbacks()
client_options2 = mqtt5.ClientOptions('will be replaced', 0)
client_options2.connect_options = mqtt5.ConnectPacket(client_id=client_id_subscriber)
client2 = Mqtt5ClientTest._create_client(AuthType.DIRECT, client_options=client_options2, callbacks=callbacks2)
client2.start()
callbacks2.future_connection_success.result(TIMEOUT)
subscriptions = []
subscriptions.append(mqtt5.Subscription(topic_filter=topic_filter, qos=mqtt5.QoS.AT_LEAST_ONCE))
subscribe_packet = mqtt5.SubscribePacket(subscriptions=subscriptions)
subscribe_future1 = client2.subscribe(subscribe_packet=subscribe_packet)
suback_packet1 = subscribe_future1.result(TIMEOUT)
self.assertIsInstance(suback_packet1, mqtt5.SubackPacket)
received_retained_publish1 = callbacks2.future_publish_received.result(TIMEOUT)
self.assertIsInstance(received_retained_publish1, mqtt5.PublishPacket)
client2.stop()
callbacks2.future_stopped.result(TIMEOUT)
publish_packet.payload = None
puback_future2 = client1.publish(publish_packet)
puback_future2.result(TIMEOUT)
client1.stop()
callbacks.future_stopped.result(TIMEOUT)
client_id_subscriber2 = f'aws-crt-python-unit-test-{uuid.uuid4()}'
callbacks3 = Mqtt5TestCallbacks()
client_options3 = mqtt5.ClientOptions('will be replaced', 0)
client_options3.connect_options = mqtt5.ConnectPacket(client_id=client_id_subscriber2)
client3 = Mqtt5ClientTest._create_client(AuthType.DIRECT, client_options=client_options3, callbacks=callbacks3)
client3.start()
callbacks3.future_connection_success.result(TIMEOUT)
subscribe_future2 = client3.subscribe(subscribe_packet=subscribe_packet)
suback_packet2 = subscribe_future2.result(TIMEOUT)
self.assertIsInstance(suback_packet2, mqtt5.SubackPacket)
time.sleep(1)
self.assertEqual(callbacks3.on_publish_received_counter, 0)
client3.stop()
callbacks3.future_stopped.result(TIMEOUT)
|
aws-crt-python
|
positive
|
def get_target_conf(self, module):
""" accept that a unit does not exist
and return a unit conf that says 'not-loaded' """
<DeepExtract>
try:
conf = self.load_sysd_unit_conf(module)
if conf is not None:
conf = conf
conf = self.load_sysd_template_conf(module)
if conf is not None:
conf = conf
conf = self.load_sysv_unit_conf(module)
if conf is not None:
conf = conf
except Exception as e:
logg.warning('%s not loaded: %s', module, e)
conf = None
</DeepExtract>
if conf is not None:
return conf
<DeepExtract>
data = UnitConfParser()
data.set(Unit, 'Description', description or 'NOT-FOUND ' + str(module))
conf = SystemctlConf(data, module)
conf._root = self._root
target_conf = conf
</DeepExtract>
if module in target_requires:
target_conf.set(Unit, 'Requires', target_requires[module])
return target_conf
|
def get_target_conf(self, module):
""" accept that a unit does not exist
and return a unit conf that says 'not-loaded' """
try:
conf = self.load_sysd_unit_conf(module)
if conf is not None:
conf = conf
conf = self.load_sysd_template_conf(module)
if conf is not None:
conf = conf
conf = self.load_sysv_unit_conf(module)
if conf is not None:
conf = conf
except Exception as e:
logg.warning('%s not loaded: %s', module, e)
conf = None
if conf is not None:
return conf
data = UnitConfParser()
data.set(Unit, 'Description', description or 'NOT-FOUND ' + str(module))
conf = SystemctlConf(data, module)
conf._root = self._root
target_conf = conf
if module in target_requires:
target_conf.set(Unit, 'Requires', target_requires[module])
return target_conf
|
docker-systemctl-images
|
positive
|
def test_marshalling_task_command() -> None:
<DeepExtract>
js = to_js(SendMessage(Event('test', {'foo': 'hello'})))
again = from_js(js, type(SendMessage(Event('test', {'foo': 'hello'}))))
assert DeepDiff(SendMessage(Event('test', {'foo': 'hello'})), again) == {}, f'Json: {js} serialized as {again}'
</DeepExtract>
<DeepExtract>
js = to_js(ExecuteOnCLI('test', frozendict({'fii': 'bla'})))
again = from_js(js, type(ExecuteOnCLI('test', frozendict({'fii': 'bla'}))))
assert DeepDiff(ExecuteOnCLI('test', frozendict({'fii': 'bla'})), again) == {}, f'Json: {js} serialized as {again}'
</DeepExtract>
|
def test_marshalling_task_command() -> None:
js = to_js(SendMessage(Event('test', {'foo': 'hello'})))
again = from_js(js, type(SendMessage(Event('test', {'foo': 'hello'}))))
assert DeepDiff(SendMessage(Event('test', {'foo': 'hello'})), again) == {}, f'Json: {js} serialized as {again}'
js = to_js(ExecuteOnCLI('test', frozendict({'fii': 'bla'})))
again = from_js(js, type(ExecuteOnCLI('test', frozendict({'fii': 'bla'}))))
assert DeepDiff(ExecuteOnCLI('test', frozendict({'fii': 'bla'})), again) == {}, f'Json: {js} serialized as {again}'
</DeepExtract>
|
cloudkeeper
|
positive
|
def handler(event: Dict[str, Any], context: Optional[Dict[str, Any]]) -> Any:
<DeepExtract>
logger.info(f'Generating kubeconfig in {KUBECONFIG_PATH}')
run_command(f'aws eks update-kubeconfig --name orbit-{ORBIT_ENV} --role-arn arn:aws:iam::{ACCOUNT_ID}:role{ROLE_PREFIX}orbit-{ORBIT_ENV}-{REGION}-admin --kubeconfig {KUBECONFIG_PATH}')
logger.info('Loading kubeconfig')
try:
config.load_kube_config(KUBECONFIG_PATH)
logger.info('Loaded kubeconfig successfully')
except config.ConfigException:
raise Exception('Could not configure kubernetes python client')
</DeepExtract>
api_CoreV1 = client.CoreV1Api()
userspace_dc = dynamic.DynamicClient(client=api_client.ApiClient()).resources.get(group=ORBIT_API_GROUP, api_version=ORBIT_API_VERSION, kind=USERSPACE_CR_KIND)
<DeepExtract>
user_name = cast(str, event.get('user_name'))
user_email = cast(str, event.get('user_email'))
expected_user_namespaces = cast(Dict[str, str], event.get('expected_user_namespaces'))
all_ns_raw = api_CoreV1.list_namespace().to_dict()
all_ns = [item.get('metadata').get('name') for item in all_ns_raw['items'] if item.get('metadata', {}).get('name') and item.get('metadata', {}).get('name').endswith(user_name)]
create_user_namespace(api=api_CoreV1, userspace_dc=userspace_dc, user_name=user_name, user_email=user_email, expected_user_namespaces=expected_user_namespaces, namespaces=all_ns)
delete_user_namespace(api=api_CoreV1, userspace_dc=userspace_dc, user_name=user_name, expected_user_namespaces=expected_user_namespaces, namespaces=all_ns)
</DeepExtract>
|
def handler(event: Dict[str, Any], context: Optional[Dict[str, Any]]) -> Any:
logger.info(f'Generating kubeconfig in {KUBECONFIG_PATH}')
run_command(f'aws eks update-kubeconfig --name orbit-{ORBIT_ENV} --role-arn arn:aws:iam::{ACCOUNT_ID}:role{ROLE_PREFIX}orbit-{ORBIT_ENV}-{REGION}-admin --kubeconfig {KUBECONFIG_PATH}')
logger.info('Loading kubeconfig')
try:
config.load_kube_config(KUBECONFIG_PATH)
logger.info('Loaded kubeconfig successfully')
except config.ConfigException:
raise Exception('Could not configure kubernetes python client')
api_CoreV1 = client.CoreV1Api()
userspace_dc = dynamic.DynamicClient(client=api_client.ApiClient()).resources.get(group=ORBIT_API_GROUP, api_version=ORBIT_API_VERSION, kind=USERSPACE_CR_KIND)
user_name = cast(str, event.get('user_name'))
user_email = cast(str, event.get('user_email'))
expected_user_namespaces = cast(Dict[str, str], event.get('expected_user_namespaces'))
all_ns_raw = api_CoreV1.list_namespace().to_dict()
all_ns = [item.get('metadata').get('name') for item in all_ns_raw['items'] if item.get('metadata', {}).get('name') and item.get('metadata', {}).get('name').endswith(user_name)]
create_user_namespace(api=api_CoreV1, userspace_dc=userspace_dc, user_name=user_name, user_email=user_email, expected_user_namespaces=expected_user_namespaces, namespaces=all_ns)
delete_user_namespace(api=api_CoreV1, userspace_dc=userspace_dc, user_name=user_name, expected_user_namespaces=expected_user_namespaces, namespaces=all_ns)
</DeepExtract>
|
aws-orbit-workbench
|
positive
|
def homogeneous_input(self):
input = self.input0
if self.has_bias():
<DeepExtract>
batch = input.shape[0]
ones = torch.ones(batch, 1, device=input.device)
input = torch.cat([input, ones], dim=1)
</DeepExtract>
return input
|
def homogeneous_input(self):
input = self.input0
if self.has_bias():
batch = input.shape[0]
ones = torch.ones(batch, 1, device=input.device)
input = torch.cat([input, ones], dim=1)
return input
|
backpack
|
positive
|
def test_show_me_the_sum_success():
activity_count = 5
sum_ = 0
for i in range(activity_count):
sum_ += i
sum_results = f"Well that's nice {sum_}!"
context_builder = ContextBuilder('test_fan_out_fan_in_function')
<DeepExtract>
context_builder.add_task_scheduled_event(name='GetActivityCount', id_=0)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_completed_event(id_=0, result=json.dumps(activity_count))
</DeepExtract>
<DeepExtract>
for i in range(activity_count):
if i != failed_index:
add_completed_event(context_builder, 1 + i, 'ParrotValue', i)
else:
add_failed_event(context_builder, 1 + i, 'ParrotValue', failed_reason, failed_details)
</DeepExtract>
<DeepExtract>
context_builder.add_task_scheduled_event(name='ShowMeTheSum', id_=activity_count + 1)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_completed_event(id_=activity_count + 1, result=json.dumps(sum_results))
</DeepExtract>
result = get_orchestration_state_result(context_builder, generator_function)
<DeepExtract>
expected_state = OrchestratorState(is_done=False, actions=[], output=sum_results, replay_schema=replay_schema)
</DeepExtract>
<DeepExtract>
action = CallActivityAction(function_name='GetActivityCount', input_=None)
expected_state.actions.append([action])
</DeepExtract>
<DeepExtract>
actions = []
for i in range(activity_count):
action = CallActivityAction(function_name='ParrotValue', input_=json.dumps(i))
actions.append(action)
expected_state.actions.append(actions)
</DeepExtract>
results = []
for i in range(activity_count):
results.append(i)
<DeepExtract>
action = CallActivityAction(function_name='ShowMeTheSum', input_=results)
expected_state.actions.append([action])
</DeepExtract>
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
|
def test_show_me_the_sum_success():
activity_count = 5
sum_ = 0
for i in range(activity_count):
sum_ += i
sum_results = f"Well that's nice {sum_}!"
context_builder = ContextBuilder('test_fan_out_fan_in_function')
context_builder.add_task_scheduled_event(name='GetActivityCount', id_=0)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_completed_event(id_=0, result=json.dumps(activity_count))
for i in range(activity_count):
if i != failed_index:
add_completed_event(context_builder, 1 + i, 'ParrotValue', i)
else:
add_failed_event(context_builder, 1 + i, 'ParrotValue', failed_reason, failed_details)
context_builder.add_task_scheduled_event(name='ShowMeTheSum', id_=activity_count + 1)
context_builder.add_orchestrator_completed_event()
context_builder.add_orchestrator_started_event()
context_builder.add_task_completed_event(id_=activity_count + 1, result=json.dumps(sum_results))
result = get_orchestration_state_result(context_builder, generator_function)
expected_state = OrchestratorState(is_done=False, actions=[], output=sum_results, replay_schema=replay_schema)
action = CallActivityAction(function_name='GetActivityCount', input_=None)
expected_state.actions.append([action])
actions = []
for i in range(activity_count):
action = CallActivityAction(function_name='ParrotValue', input_=json.dumps(i))
actions.append(action)
expected_state.actions.append(actions)
results = []
for i in range(activity_count):
results.append(i)
action = CallActivityAction(function_name='ShowMeTheSum', input_=results)
expected_state.actions.append([action])
expected_state._is_done = True
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
|
azure-functions-durable-python
|
positive
|
def prepro(args):
if not os.path.exists(args.target_dir):
os.makedirs(args.target_dir)
if args.mode == 'full':
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('train', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'train')
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'dev')
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'test')
</DeepExtract>
elif args.mode == 'all':
<DeepExtract>
out_path = os.path.join(args.source_dir, 'all-v1.1.json')
if os.path.exists(out_path):
return
train_path = os.path.join(args.source_dir, args.train_name)
train_data = json.load(open(train_path, 'r'))
dev_path = os.path.join(args.source_dir, args.dev_name)
dev_data = json.load(open(dev_path, 'r'))
train_data['data'].extend(dev_data['data'])
print('dumping all data ...')
json.dump(train_data, open(out_path, 'w'))
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * 0.0))
stop_ai = int(round(len(source_data['data']) * 0.0))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'dev')
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * 0.0))
stop_ai = int(round(len(source_data['data']) * 0.0))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'test')
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('all', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'train')
</DeepExtract>
elif args.mode == 'single':
assert len(args.single_path) > 0
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = args.single_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('NULL', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'single')
</DeepExtract>
else:
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('train', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * 0.0))
stop_ai = int(round(len(source_data['data']) * args.train_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'train')
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('train', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * args.train_ratio))
stop_ai = int(round(len(source_data['data']) * 1.0))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'dev')
</DeepExtract>
<DeepExtract>
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'test')
</DeepExtract>
|
def prepro(args):
if not os.path.exists(args.target_dir):
os.makedirs(args.target_dir)
if args.mode == 'full':
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('train', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'train')
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'dev')
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'test')
elif args.mode == 'all':
out_path = os.path.join(args.source_dir, 'all-v1.1.json')
if os.path.exists(out_path):
return
train_path = os.path.join(args.source_dir, args.train_name)
train_data = json.load(open(train_path, 'r'))
dev_path = os.path.join(args.source_dir, args.dev_name)
dev_data = json.load(open(dev_path, 'r'))
train_data['data'].extend(dev_data['data'])
print('dumping all data ...')
json.dump(train_data, open(out_path, 'w'))
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * 0.0))
stop_ai = int(round(len(source_data['data']) * 0.0))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'dev')
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * 0.0))
stop_ai = int(round(len(source_data['data']) * 0.0))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'test')
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('all', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'train')
elif args.mode == 'single':
assert len(args.single_path) > 0
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = args.single_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('NULL', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'single')
else:
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('train', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * 0.0))
stop_ai = int(round(len(source_data['data']) * args.train_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'train')
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('train', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * args.train_ratio))
stop_ai = int(round(len(source_data['data']) * 1.0))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'dev')
if args.tokenizer == 'PTB':
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif args.tokenizer == 'Stanford':
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if not args.split:
sent_tokenize = lambda para: [para]
source_path = in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format('dev', args.suffix))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round(len(source_data['data']) * start_ratio))
stop_ai = int(round(len(source_data['data']) * stop_ratio))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert len(x) - 1 == ai
assert len(x[ai]) - 1 == pi
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = answer_start + len(answer_text)
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert len(xi[yi0[0]]) > yi0[1]
assert len(xi[yi1[0]]) >= yi1[1]
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][yi1[1] - 1]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], yi1[1] - 1))
cyi0 = answer_start - i0
cyi1 = answer_stop - i1 - 1
assert answer_text[0] == w0[cyi0], (answer_text, w0, cyi0)
assert answer_text[-1] == w1[cyi1]
assert cyi0 < 32, (answer_text, w0)
assert cyi1 < 32, (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if len(qa['answers']) == 0:
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, 'test')
</DeepExtract>
|
dawn-bench-models
|
positive
|
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
<DeepExtract>
if out is None:
direction[:3] = numpy.array(direction[:3], dtype=numpy.float64, copy=True)
if direction[:3].ndim == 1:
direction[:3] /= math.sqrt(numpy.dot(direction[:3], direction[:3]))
direction = direction[:3]
else:
if out is not direction[:3]:
out[:] = numpy.array(direction[:3], copy=False)
direction[:3] = out
length = numpy.atleast_1d(numpy.sum(direction[:3] * direction[:3], axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
direction[:3] /= length
if out is None:
direction = direction[:3]
</DeepExtract>
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = factor * numpy.dot(origin[:3], direction) * direction
return M
|
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
if out is None:
direction[:3] = numpy.array(direction[:3], dtype=numpy.float64, copy=True)
if direction[:3].ndim == 1:
direction[:3] /= math.sqrt(numpy.dot(direction[:3], direction[:3]))
direction = direction[:3]
else:
if out is not direction[:3]:
out[:] = numpy.array(direction[:3], copy=False)
direction[:3] = out
length = numpy.atleast_1d(numpy.sum(direction[:3] * direction[:3], axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
direction[:3] /= length
if out is None:
direction = direction[:3]
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = factor * numpy.dot(origin[:3], direction) * direction
return M
|
alfred
|
positive
|
def generate_sidebar(app, fromdocname):
env = app.builder.env
container = nodes.compound(classes=['toctree-wrapper'])
py = env.get_domain('py')
classes = py.get_objects()
classes_per_group = {'api': ([], None, 'bproc.'), 'internal': ([], 2, 'bproc.python.'), 'modules (deprecated)': ([], 3, '')}
for e in classes:
if e[2] == 'module' and e[3].startswith('blenderproc.api.') or (e[2] == 'class' and (not e[3].startswith('blenderproc.api.'))):
if e[3].startswith('blenderproc.api.'):
group = 'api'
elif e[0].startswith('blenderproc.python.modules.'):
group = 'modules (deprecated)'
else:
group = 'internal'
classes_per_group[group][0].append(e)
<DeepExtract>
tutorials_dir = Path(__file__).absolute().parent.parent / 'docs' / 'tutorials'
tutorials = [('Loading and manipulating objects', 'loader'), ('Configuring the camera', 'camera'), ('Rendering the scene', 'renderer'), ('Writing the results to file', 'writer'), ('How key frames work', 'key_frames'), ('Positioning objects via the physics simulator', 'physics')]
container += nodes.caption('Tutorials', '', *[nodes.Text('Tutorials')])
for tutorial in tutorials:
toc = nodes.bullet_list()
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, 'docs/tutorials/' + tutorial[1])
ref.append(nodes.Text(tutorial[0]))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l1'])
if fromdocname.startswith('docs/tutorials/' + tutorial[1]):
module_item['classes'].append('current')
toc += module_item
container += toc
</DeepExtract>
<DeepExtract>
examples = Path(__file__).absolute().parent.parent / 'examples'
container += nodes.caption('Examples', '', *[nodes.Text('Examples')])
for example_groups in [examples / group for group in ['basics', 'advanced', 'datasets']]:
if example_groups.is_dir():
toc = nodes.bullet_list()
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, 'examples/' + example_groups.name + '/README')
ref.append(nodes.Text(example_groups.name.capitalize()))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l1'])
if fromdocname.startswith('examples/' + example_groups.name):
module_item['classes'].append('current')
toc += module_item
subtree = nodes.bullet_list()
module_item += subtree
for example in sorted(example_groups.rglob('*/README.md'), key=lambda x: x.parent.name):
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, str(example).replace(str(examples), 'examples').replace('README.md', 'README'))
ref.append(nodes.Text(example.parent.name))
class_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l2'])
if fromdocname == ref['refuri'].replace('.html', ''):
class_item['classes'].append('current')
subtree += class_item
container += toc
</DeepExtract>
for (key, items) in classes_per_group.items():
<DeepExtract>
toc = nodes.bullet_list()
toc += nodes.caption(key.capitalize(), '', *[nodes.Text(key.capitalize())])
if items[1] is not None:
entries = defaultdict(list)
prefix = '.'.join(items[0][0][0].split('.')[:items[1]]) + '.'
for e in items[0]:
module = e[0].split('.')[items[1]]
entries[module].append(e)
for (module, class_list) in entries.items():
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, prefix + module)
ref.append(nodes.Text(items[2] + module if items[2] != '' else module.capitalize()))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l1'])
if fromdocname.startswith(prefix + module):
module_item['classes'].append('current')
toc += module_item
subtree = nodes.bullet_list()
module_item += subtree
generate_classlist(app, fromdocname, subtree, class_list, '')
else:
generate_classlist(app, fromdocname, toc, items[0], items[2], level=1)
container += toc
</DeepExtract>
return container
|
def generate_sidebar(app, fromdocname):
env = app.builder.env
container = nodes.compound(classes=['toctree-wrapper'])
py = env.get_domain('py')
classes = py.get_objects()
classes_per_group = {'api': ([], None, 'bproc.'), 'internal': ([], 2, 'bproc.python.'), 'modules (deprecated)': ([], 3, '')}
for e in classes:
if e[2] == 'module' and e[3].startswith('blenderproc.api.') or (e[2] == 'class' and (not e[3].startswith('blenderproc.api.'))):
if e[3].startswith('blenderproc.api.'):
group = 'api'
elif e[0].startswith('blenderproc.python.modules.'):
group = 'modules (deprecated)'
else:
group = 'internal'
classes_per_group[group][0].append(e)
tutorials_dir = Path(__file__).absolute().parent.parent / 'docs' / 'tutorials'
tutorials = [('Loading and manipulating objects', 'loader'), ('Configuring the camera', 'camera'), ('Rendering the scene', 'renderer'), ('Writing the results to file', 'writer'), ('How key frames work', 'key_frames'), ('Positioning objects via the physics simulator', 'physics')]
container += nodes.caption('Tutorials', '', *[nodes.Text('Tutorials')])
for tutorial in tutorials:
toc = nodes.bullet_list()
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, 'docs/tutorials/' + tutorial[1])
ref.append(nodes.Text(tutorial[0]))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l1'])
if fromdocname.startswith('docs/tutorials/' + tutorial[1]):
module_item['classes'].append('current')
toc += module_item
container += toc
examples = Path(__file__).absolute().parent.parent / 'examples'
container += nodes.caption('Examples', '', *[nodes.Text('Examples')])
for example_groups in [examples / group for group in ['basics', 'advanced', 'datasets']]:
if example_groups.is_dir():
toc = nodes.bullet_list()
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, 'examples/' + example_groups.name + '/README')
ref.append(nodes.Text(example_groups.name.capitalize()))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l1'])
if fromdocname.startswith('examples/' + example_groups.name):
module_item['classes'].append('current')
toc += module_item
subtree = nodes.bullet_list()
module_item += subtree
for example in sorted(example_groups.rglob('*/README.md'), key=lambda x: x.parent.name):
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, str(example).replace(str(examples), 'examples').replace('README.md', 'README'))
ref.append(nodes.Text(example.parent.name))
class_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l2'])
if fromdocname == ref['refuri'].replace('.html', ''):
class_item['classes'].append('current')
subtree += class_item
container += toc
for (key, items) in classes_per_group.items():
toc = nodes.bullet_list()
toc += nodes.caption(key.capitalize(), '', *[nodes.Text(key.capitalize())])
if items[1] is not None:
entries = defaultdict(list)
prefix = '.'.join(items[0][0][0].split('.')[:items[1]]) + '.'
for e in items[0]:
module = e[0].split('.')[items[1]]
entries[module].append(e)
for (module, class_list) in entries.items():
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, prefix + module)
ref.append(nodes.Text(items[2] + module if items[2] != '' else module.capitalize()))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=['toctree-l1'])
if fromdocname.startswith(prefix + module):
module_item['classes'].append('current')
toc += module_item
subtree = nodes.bullet_list()
module_item += subtree
generate_classlist(app, fromdocname, subtree, class_list, '')
else:
generate_classlist(app, fromdocname, toc, items[0], items[2], level=1)
container += toc
return container
|
BlenderProc
|
positive
|
def learn(self, batch_size=100, actor_lr_input=0.001, critic_lr_input=0.001):
self.q_optimizer.zero_grad()
data = self.replay_buffer.sample_batch(batch_size)
<DeepExtract>
if self.per_flag:
(tree_idx, batch_memory, ISWeights) = data
(o, a, r, o2, d) = ([], [], [], [], [])
for i in range(len(batch_memory)):
o.append(batch_memory[i][0])
a.append(batch_memory[i][1])
r.append(batch_memory[i][2])
o2.append(batch_memory[i][3])
d.append(batch_memory[i][4])
o = torch.as_tensor(np.array(o), dtype=torch.float32, device=self.device)
a = torch.as_tensor(np.array(a), dtype=torch.float32, device=self.device)
r = torch.as_tensor(np.array(r), dtype=torch.float32, device=self.device)
o2 = torch.as_tensor(np.array(o2), dtype=torch.float32, device=self.device)
d = torch.as_tensor(np.array(d), dtype=torch.float32, device=self.device)
ISWeights = torch.as_tensor(np.array(ISWeights), dtype=torch.float32, device=self.device)
else:
(o, a, r, o2, d) = (data['obs'], data['act'], data['rew'], data['obs2'], data['done'])
q = self.ac.q(o, a)
with torch.no_grad():
pi_targ = self.ac_targ.pi(o2)
epsilon = torch.randn_like(pi_targ) * self.target_noise
epsilon = torch.clamp(epsilon, -self.noise_clip, self.noise_clip)
a2 = pi_targ + epsilon
a2 = torch.clamp(a2, -self.a_bound, self.a_bound)
q_pi_targ = self.ac_targ.q(o2, a2)
backup = r + self.gamma * (1 - d) * q_pi_targ
backup = torch.clamp(backup, -50.0, 0.0)
loss_q = ((q - backup) ** 2).mean()
loss_info = dict(Q1Vals=q, Q2Vals=q)
if self.per_flag:
loss_q = (ISWeights * (q - backup) ** 2).mean()
abs_errors = torch.abs(backup - q)
loss_info['abs_errors'] = abs_errors.detach().cpu().numpy()
loss_info['tree_idx'] = tree_idx
(loss_q, loss_info) = (loss_q, loss_info)
</DeepExtract>
loss_q.backward()
self.q_optimizer.step()
if self.per_flag:
self.replay_buffer.batch_update(tree_idx=loss_info['tree_idx'], abs_errors=loss_info['abs_errors'])
if self.learn_step % self.policy_delay == 0:
for p in self.ac.q.parameters():
p.requires_grad = False
self.pi_optimizer.zero_grad()
<DeepExtract>
if self.per_flag:
(tree_idx, batch_memory, ISWeights) = data
o = []
for i in range(len(batch_memory)):
o.append(batch_memory[i][0])
o = torch.as_tensor(np.array(o), dtype=torch.float32, device=self.device)
else:
o = data['obs']
q_pi = self.ac.q(o, self.ac.pi(o))
loss_pi = -q_pi.mean()
</DeepExtract>
loss_pi.backward()
self.pi_optimizer.step()
for p in self.ac.q.parameters():
p.requires_grad = True
with torch.no_grad():
for (p, p_targ) in zip(self.ac.parameters(), self.ac_targ.parameters()):
p_targ.data.mul_(self.polyak)
p_targ.data.add_((1 - self.polyak) * p.data)
self.learn_step += 1
return (loss_q, loss_info['Q1Vals'].detach().cpu().numpy(), loss_info['Q2Vals'].detach().cpu().numpy())
|
def learn(self, batch_size=100, actor_lr_input=0.001, critic_lr_input=0.001):
self.q_optimizer.zero_grad()
data = self.replay_buffer.sample_batch(batch_size)
if self.per_flag:
(tree_idx, batch_memory, ISWeights) = data
(o, a, r, o2, d) = ([], [], [], [], [])
for i in range(len(batch_memory)):
o.append(batch_memory[i][0])
a.append(batch_memory[i][1])
r.append(batch_memory[i][2])
o2.append(batch_memory[i][3])
d.append(batch_memory[i][4])
o = torch.as_tensor(np.array(o), dtype=torch.float32, device=self.device)
a = torch.as_tensor(np.array(a), dtype=torch.float32, device=self.device)
r = torch.as_tensor(np.array(r), dtype=torch.float32, device=self.device)
o2 = torch.as_tensor(np.array(o2), dtype=torch.float32, device=self.device)
d = torch.as_tensor(np.array(d), dtype=torch.float32, device=self.device)
ISWeights = torch.as_tensor(np.array(ISWeights), dtype=torch.float32, device=self.device)
else:
(o, a, r, o2, d) = (data['obs'], data['act'], data['rew'], data['obs2'], data['done'])
q = self.ac.q(o, a)
with torch.no_grad():
pi_targ = self.ac_targ.pi(o2)
epsilon = torch.randn_like(pi_targ) * self.target_noise
epsilon = torch.clamp(epsilon, -self.noise_clip, self.noise_clip)
a2 = pi_targ + epsilon
a2 = torch.clamp(a2, -self.a_bound, self.a_bound)
q_pi_targ = self.ac_targ.q(o2, a2)
backup = r + self.gamma * (1 - d) * q_pi_targ
backup = torch.clamp(backup, -50.0, 0.0)
loss_q = ((q - backup) ** 2).mean()
loss_info = dict(Q1Vals=q, Q2Vals=q)
if self.per_flag:
loss_q = (ISWeights * (q - backup) ** 2).mean()
abs_errors = torch.abs(backup - q)
loss_info['abs_errors'] = abs_errors.detach().cpu().numpy()
loss_info['tree_idx'] = tree_idx
(loss_q, loss_info) = (loss_q, loss_info)
loss_q.backward()
self.q_optimizer.step()
if self.per_flag:
self.replay_buffer.batch_update(tree_idx=loss_info['tree_idx'], abs_errors=loss_info['abs_errors'])
if self.learn_step % self.policy_delay == 0:
for p in self.ac.q.parameters():
p.requires_grad = False
self.pi_optimizer.zero_grad()
if self.per_flag:
(tree_idx, batch_memory, ISWeights) = data
o = []
for i in range(len(batch_memory)):
o.append(batch_memory[i][0])
o = torch.as_tensor(np.array(o), dtype=torch.float32, device=self.device)
else:
o = data['obs']
q_pi = self.ac.q(o, self.ac.pi(o))
loss_pi = -q_pi.mean()
loss_pi.backward()
self.pi_optimizer.step()
for p in self.ac.q.parameters():
p.requires_grad = True
with torch.no_grad():
for (p, p_targ) in zip(self.ac.parameters(), self.ac_targ.parameters()):
p_targ.data.mul_(self.polyak)
p_targ.data.add_((1 - self.polyak) * p.data)
self.learn_step += 1
return (loss_q, loss_info['Q1Vals'].detach().cpu().numpy(), loss_info['Q2Vals'].detach().cpu().numpy())
|
DRLib
|
positive
|
def test_alter_part():
def verify_alter(table, attribute_sql):
definition_original = schema.connection.query(f'SHOW CREATE TABLE {table.full_table_name}').fetchone()[1]
table.definition = table.definition_new
table.alter(prompt=False)
definition_new = schema.connection.query(f'SHOW CREATE TABLE {table.full_table_name}').fetchone()[1]
assert re.sub(f'{attribute_sql},\n ', '', definition_new) == definition_original
<DeepExtract>
definition_original = schema.connection.query(f'SHOW CREATE TABLE {Parent.Child.full_table_name}').fetchone()[1]
Parent.Child.definition = Parent.Child.definition_new
Parent.Child.alter(prompt=False)
definition_new = schema.connection.query(f'SHOW CREATE TABLE {Parent.Child.full_table_name}').fetchone()[1]
assert re.sub(f"{'`child_id` .* DEFAULT NULL'},\n ", '', definition_new) == definition_original
</DeepExtract>
<DeepExtract>
definition_original = schema.connection.query(f'SHOW CREATE TABLE {Parent.Grandchild.full_table_name}').fetchone()[1]
Parent.Grandchild.definition = Parent.Grandchild.definition_new
Parent.Grandchild.alter(prompt=False)
definition_new = schema.connection.query(f'SHOW CREATE TABLE {Parent.Grandchild.full_table_name}').fetchone()[1]
assert re.sub(f"{'`grandchild_id` .* DEFAULT NULL'},\n ", '', definition_new) == definition_original
</DeepExtract>
|
def test_alter_part():
def verify_alter(table, attribute_sql):
definition_original = schema.connection.query(f'SHOW CREATE TABLE {table.full_table_name}').fetchone()[1]
table.definition = table.definition_new
table.alter(prompt=False)
definition_new = schema.connection.query(f'SHOW CREATE TABLE {table.full_table_name}').fetchone()[1]
assert re.sub(f'{attribute_sql},\n ', '', definition_new) == definition_original
definition_original = schema.connection.query(f'SHOW CREATE TABLE {Parent.Child.full_table_name}').fetchone()[1]
Parent.Child.definition = Parent.Child.definition_new
Parent.Child.alter(prompt=False)
definition_new = schema.connection.query(f'SHOW CREATE TABLE {Parent.Child.full_table_name}').fetchone()[1]
assert re.sub(f"{'`child_id` .* DEFAULT NULL'},\n ", '', definition_new) == definition_original
definition_original = schema.connection.query(f'SHOW CREATE TABLE {Parent.Grandchild.full_table_name}').fetchone()[1]
Parent.Grandchild.definition = Parent.Grandchild.definition_new
Parent.Grandchild.alter(prompt=False)
definition_new = schema.connection.query(f'SHOW CREATE TABLE {Parent.Grandchild.full_table_name}').fetchone()[1]
assert re.sub(f"{'`grandchild_id` .* DEFAULT NULL'},\n ", '', definition_new) == definition_original
</DeepExtract>
|
datajoint-python
|
positive
|
def __addInitialLinks(self):
self.sliplinks = []
<DeepExtract>
previous = self.letters[0]
for item in self.letters[1:]:
self.__addNonSlipLink(previous, item, label=self.successor)
self.__addNonSlipLink(item, previous, label=self.predecessor)
previous = item
</DeepExtract>
<DeepExtract>
previous = self.numbers[0]
for item in self.numbers[1:]:
self.__addNonSlipLink(previous, item, label=self.successor)
self.__addNonSlipLink(item, previous, label=self.predecessor)
previous = item
</DeepExtract>
for letter in self.letters:
<DeepExtract>
categoryLength = self.letterCategory.conceptualDepth - letter.conceptualDepth
self.__addCategoryLink(letter, self.letterCategory, categoryLength)
link = self.__addLink(self.letterCategory, letter, None, 97.0)
self.letterCategory.instanceLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.samenessGroup, self.letterCategory, None, 50.0)
self.samenessGroup.categoryLinks += [link]
</DeepExtract>
for number in self.numbers:
<DeepExtract>
categoryLength = self.length.conceptualDepth - number.conceptualDepth
self.__addCategoryLink(number, self.length, categoryLength)
link = self.__addLink(self.length, number, None, length)
self.length.instanceLinks += [link]
</DeepExtract>
groups = [self.predecessorGroup, self.successorGroup, self.samenessGroup]
for group in groups:
<DeepExtract>
link = self.__addLink(group, self.length, label, 95.0)
group.lateralNonSlipLinks += [link]
</DeepExtract>
opposites = [(self.first, self.last), (self.leftmost, self.rightmost), (self.left, self.right), (self.successor, self.predecessor), (self.successorGroup, self.predecessorGroup)]
for (a, b) in opposites:
<DeepExtract>
self.__addSlipLink(a, b, label=self.opposite)
self.__addSlipLink(b, a, label=self.opposite)
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.letters[0], self.first, None, 75.0)
self.letters[0].propertyLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.letters[-1], self.last, None, 75.0)
self.letters[-1].propertyLinks += [link]
</DeepExtract>
links = [(self.objectCategory, self.letter), (self.objectCategory, self.group), (self.stringPositionCategory, self.leftmost), (self.stringPositionCategory, self.rightmost), (self.stringPositionCategory, self.middle), (self.stringPositionCategory, self.single), (self.stringPositionCategory, self.whole), (self.alphabeticPositionCategory, self.first), (self.alphabeticPositionCategory, self.last), (self.directionCategory, self.left), (self.directionCategory, self.right), (self.bondCategory, self.predecessor), (self.bondCategory, self.successor), (self.bondCategory, self.sameness), (self.groupCategory, self.predecessorGroup), (self.groupCategory, self.successorGroup), (self.groupCategory, self.samenessGroup), (self.bondFacet, self.letterCategory), (self.bondFacet, self.length)]
for (a, b) in links:
<DeepExtract>
categoryLength = a.conceptualDepth - b.conceptualDepth
self.__addCategoryLink(b, a, categoryLength)
link = self.__addLink(a, b, None, length)
a.instanceLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.sameness, self.samenessGroup, self.groupCategory, 30.0)
self.sameness.lateralNonSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.successor, self.successorGroup, self.groupCategory, 60.0)
self.successor.lateralNonSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.predecessor, self.predecessorGroup, self.groupCategory, 60.0)
self.predecessor.lateralNonSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.samenessGroup, self.sameness, self.bondCategory, 90.0)
self.samenessGroup.lateralNonSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.successorGroup, self.successor, self.bondCategory, 90.0)
self.successorGroup.lateralNonSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.predecessorGroup, self.predecessor, self.bondCategory, 90.0)
self.predecessorGroup.lateralNonSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.letterCategory, self.length, label, 95.0)
self.letterCategory.lateralSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.length, self.letterCategory, label, 95.0)
self.length.lateralSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.letter, self.group, label, 90.0)
self.letter.lateralSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.group, self.letter, label, 90.0)
self.group.lateralSlipLinks += [link]
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.left, self.leftmost, length=90.0)
self.__addNonSlipLink(self.leftmost, self.left, length=90.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.right, self.rightmost, length=90.0)
self.__addNonSlipLink(self.rightmost, self.right, length=90.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.right, self.leftmost, length=100.0)
self.__addNonSlipLink(self.leftmost, self.right, length=100.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.left, self.rightmost, length=100.0)
self.__addNonSlipLink(self.rightmost, self.left, length=100.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.leftmost, self.first, length=100.0)
self.__addNonSlipLink(self.first, self.leftmost, length=100.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.rightmost, self.first, length=100.0)
self.__addNonSlipLink(self.first, self.rightmost, length=100.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.leftmost, self.last, length=100.0)
self.__addNonSlipLink(self.last, self.leftmost, length=100.0)
</DeepExtract>
<DeepExtract>
self.__addNonSlipLink(self.rightmost, self.last, length=100.0)
self.__addNonSlipLink(self.last, self.rightmost, length=100.0)
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.single, self.whole, label, 90.0)
self.single.lateralSlipLinks += [link]
</DeepExtract>
<DeepExtract>
link = self.__addLink(self.whole, self.single, label, 90.0)
self.whole.lateralSlipLinks += [link]
</DeepExtract>
|
def __addInitialLinks(self):
self.sliplinks = []
previous = self.letters[0]
for item in self.letters[1:]:
self.__addNonSlipLink(previous, item, label=self.successor)
self.__addNonSlipLink(item, previous, label=self.predecessor)
previous = item
previous = self.numbers[0]
for item in self.numbers[1:]:
self.__addNonSlipLink(previous, item, label=self.successor)
self.__addNonSlipLink(item, previous, label=self.predecessor)
previous = item
for letter in self.letters:
categoryLength = self.letterCategory.conceptualDepth - letter.conceptualDepth
self.__addCategoryLink(letter, self.letterCategory, categoryLength)
link = self.__addLink(self.letterCategory, letter, None, 97.0)
self.letterCategory.instanceLinks += [link]
link = self.__addLink(self.samenessGroup, self.letterCategory, None, 50.0)
self.samenessGroup.categoryLinks += [link]
for number in self.numbers:
categoryLength = self.length.conceptualDepth - number.conceptualDepth
self.__addCategoryLink(number, self.length, categoryLength)
link = self.__addLink(self.length, number, None, length)
self.length.instanceLinks += [link]
groups = [self.predecessorGroup, self.successorGroup, self.samenessGroup]
for group in groups:
link = self.__addLink(group, self.length, label, 95.0)
group.lateralNonSlipLinks += [link]
opposites = [(self.first, self.last), (self.leftmost, self.rightmost), (self.left, self.right), (self.successor, self.predecessor), (self.successorGroup, self.predecessorGroup)]
for (a, b) in opposites:
self.__addSlipLink(a, b, label=self.opposite)
self.__addSlipLink(b, a, label=self.opposite)
link = self.__addLink(self.letters[0], self.first, None, 75.0)
self.letters[0].propertyLinks += [link]
link = self.__addLink(self.letters[-1], self.last, None, 75.0)
self.letters[-1].propertyLinks += [link]
links = [(self.objectCategory, self.letter), (self.objectCategory, self.group), (self.stringPositionCategory, self.leftmost), (self.stringPositionCategory, self.rightmost), (self.stringPositionCategory, self.middle), (self.stringPositionCategory, self.single), (self.stringPositionCategory, self.whole), (self.alphabeticPositionCategory, self.first), (self.alphabeticPositionCategory, self.last), (self.directionCategory, self.left), (self.directionCategory, self.right), (self.bondCategory, self.predecessor), (self.bondCategory, self.successor), (self.bondCategory, self.sameness), (self.groupCategory, self.predecessorGroup), (self.groupCategory, self.successorGroup), (self.groupCategory, self.samenessGroup), (self.bondFacet, self.letterCategory), (self.bondFacet, self.length)]
for (a, b) in links:
categoryLength = a.conceptualDepth - b.conceptualDepth
self.__addCategoryLink(b, a, categoryLength)
link = self.__addLink(a, b, None, length)
a.instanceLinks += [link]
link = self.__addLink(self.sameness, self.samenessGroup, self.groupCategory, 30.0)
self.sameness.lateralNonSlipLinks += [link]
link = self.__addLink(self.successor, self.successorGroup, self.groupCategory, 60.0)
self.successor.lateralNonSlipLinks += [link]
link = self.__addLink(self.predecessor, self.predecessorGroup, self.groupCategory, 60.0)
self.predecessor.lateralNonSlipLinks += [link]
link = self.__addLink(self.samenessGroup, self.sameness, self.bondCategory, 90.0)
self.samenessGroup.lateralNonSlipLinks += [link]
link = self.__addLink(self.successorGroup, self.successor, self.bondCategory, 90.0)
self.successorGroup.lateralNonSlipLinks += [link]
link = self.__addLink(self.predecessorGroup, self.predecessor, self.bondCategory, 90.0)
self.predecessorGroup.lateralNonSlipLinks += [link]
link = self.__addLink(self.letterCategory, self.length, label, 95.0)
self.letterCategory.lateralSlipLinks += [link]
link = self.__addLink(self.length, self.letterCategory, label, 95.0)
self.length.lateralSlipLinks += [link]
link = self.__addLink(self.letter, self.group, label, 90.0)
self.letter.lateralSlipLinks += [link]
link = self.__addLink(self.group, self.letter, label, 90.0)
self.group.lateralSlipLinks += [link]
self.__addNonSlipLink(self.left, self.leftmost, length=90.0)
self.__addNonSlipLink(self.leftmost, self.left, length=90.0)
self.__addNonSlipLink(self.right, self.rightmost, length=90.0)
self.__addNonSlipLink(self.rightmost, self.right, length=90.0)
self.__addNonSlipLink(self.right, self.leftmost, length=100.0)
self.__addNonSlipLink(self.leftmost, self.right, length=100.0)
self.__addNonSlipLink(self.left, self.rightmost, length=100.0)
self.__addNonSlipLink(self.rightmost, self.left, length=100.0)
self.__addNonSlipLink(self.leftmost, self.first, length=100.0)
self.__addNonSlipLink(self.first, self.leftmost, length=100.0)
self.__addNonSlipLink(self.rightmost, self.first, length=100.0)
self.__addNonSlipLink(self.first, self.rightmost, length=100.0)
self.__addNonSlipLink(self.leftmost, self.last, length=100.0)
self.__addNonSlipLink(self.last, self.leftmost, length=100.0)
self.__addNonSlipLink(self.rightmost, self.last, length=100.0)
self.__addNonSlipLink(self.last, self.rightmost, length=100.0)
link = self.__addLink(self.single, self.whole, label, 90.0)
self.single.lateralSlipLinks += [link]
link = self.__addLink(self.whole, self.single, label, 90.0)
self.whole.lateralSlipLinks += [link]
</DeepExtract>
|
copycat
|
positive
|
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
<DeepExtract>
res = _empty_box_results()
if coco_eval is not None:
s = coco_eval.stats
res['box']['AP'] = s[COCO_AP]
res['box']['AP50'] = s[COCO_AP50]
res['box']['AP75'] = s[COCO_AP75]
res['box']['APs'] = s[COCO_APS]
res['box']['APm'] = s[COCO_APM]
res['box']['APl'] = s[COCO_APL]
box_results = res
</DeepExtract>
elif _use_cityscapes_evaluator(dataset):
logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
<DeepExtract>
res = _empty_box_results()
if coco_eval is not None:
s = coco_eval.stats
res['box']['AP'] = s[COCO_AP]
res['box']['AP50'] = s[COCO_AP50]
res['box']['AP75'] = s[COCO_AP75]
res['box']['APs'] = s[COCO_APS]
res['box']['APm'] = s[COCO_APM]
res['box']['APl'] = s[COCO_APL]
box_results = res
</DeepExtract>
elif _use_voc_evaluator(dataset):
voc_eval = voc_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=use_matlab)
<DeepExtract>
box_results = _empty_box_results()
</DeepExtract>
else:
raise NotImplementedError('No evaluator for dataset: {}'.format(dataset.name))
return OrderedDict([(dataset.name, box_results)])
|
def evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=False):
"""Evaluate bounding box detection."""
logger.info('Evaluating detections')
not_comp = not cfg.TEST.COMPETITION_MODE
if _use_json_dataset_evaluator(dataset):
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
res = _empty_box_results()
if coco_eval is not None:
s = coco_eval.stats
res['box']['AP'] = s[COCO_AP]
res['box']['AP50'] = s[COCO_AP50]
res['box']['AP75'] = s[COCO_AP75]
res['box']['APs'] = s[COCO_APS]
res['box']['APm'] = s[COCO_APM]
res['box']['APl'] = s[COCO_APL]
box_results = res
elif _use_cityscapes_evaluator(dataset):
logger.warn('Cityscapes bbox evaluated using COCO metrics/conversions')
coco_eval = json_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_salt=not_comp, cleanup=not_comp)
res = _empty_box_results()
if coco_eval is not None:
s = coco_eval.stats
res['box']['AP'] = s[COCO_AP]
res['box']['AP50'] = s[COCO_AP50]
res['box']['AP75'] = s[COCO_AP75]
res['box']['APs'] = s[COCO_APS]
res['box']['APm'] = s[COCO_APM]
res['box']['APl'] = s[COCO_APL]
box_results = res
elif _use_voc_evaluator(dataset):
voc_eval = voc_dataset_evaluator.evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=use_matlab)
box_results = _empty_box_results()
else:
raise NotImplementedError('No evaluator for dataset: {}'.format(dataset.name))
return OrderedDict([(dataset.name, box_results)])
|
AIC2018_iamai
|
positive
|
@classmethod
def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
(filename, data, content_type) = value
else:
(filename, data) = value
<DeepExtract>
if filename:
content_type = mimetypes.guess_type(filename)[0] or default
content_type = default
</DeepExtract>
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename, header_formatter=header_formatter)
request_param.make_multipart(content_type=content_type)
return request_param
|
@classmethod
def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
(filename, data, content_type) = value
else:
(filename, data) = value
if filename:
content_type = mimetypes.guess_type(filename)[0] or default
content_type = default
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename, header_formatter=header_formatter)
request_param.make_multipart(content_type=content_type)
return request_param
|
alexa-sky-hd
|
positive
|
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
(height, width) = (img.shape[0], img.shape[1])
c = np.array([img.shape[1] / 2.0, img.shape[0] / 2.0])
s = max(img.shape[0], img.shape[1]) * 1.0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
<DeepExtract>
i = 1
while img.shape[1] - 128 // i <= 128 // i:
i *= 2
w_border = 128 // i
</DeepExtract>
<DeepExtract>
i = 1
while img.shape[0] - 128 // i <= 128 // i:
i *= 2
h_border = 128 // i
</DeepExtract>
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += img.shape[0] * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
trans_input = get_affine_transform(c, s, 0, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input, (self.opt.input_res, self.opt.input_res), flags=cv2.INTER_LINEAR)
inp = inp.astype(np.float32) / 255.0
if self.split == 'train' and (not self.opt.no_color_aug):
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_classes = self.opt.num_classes
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
num_hm = 1 if self.opt.agnostic_ex else num_classes
hm_t = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_l = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_b = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_r = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_c = np.zeros((num_classes, output_res, output_res), dtype=np.float32)
reg_t = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_l = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_b = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_r = np.zeros((self.max_objs, 2), dtype=np.float32)
ind_t = np.zeros(self.max_objs, dtype=np.int64)
ind_l = np.zeros(self.max_objs, dtype=np.int64)
ind_b = np.zeros(self.max_objs, dtype=np.int64)
ind_r = np.zeros(self.max_objs, dtype=np.int64)
reg_mask = np.zeros(self.max_objs, dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
for k in range(num_objs):
ann = anns[k]
pts = np.array(ann['extreme_points'], dtype=np.float32).reshape(4, 2)
cls_id = int(self.cat_ids[ann['category_id']])
hm_id = 0 if self.opt.agnostic_ex else cls_id
if flipped:
pts[:, 0] = width - pts[:, 0] - 1
(pts[1], pts[3]) = (pts[3].copy(), pts[1].copy())
for j in range(4):
pts[j] = affine_transform(pts[j], trans_output)
pts = np.clip(pts, 0, self.opt.output_res - 1)
(h, w) = (pts[2, 1] - pts[0, 1], pts[3, 0] - pts[1, 0])
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
pt_int = pts.astype(np.int32)
draw_gaussian(hm_t[hm_id], pt_int[0], radius)
draw_gaussian(hm_l[hm_id], pt_int[1], radius)
draw_gaussian(hm_b[hm_id], pt_int[2], radius)
draw_gaussian(hm_r[hm_id], pt_int[3], radius)
reg_t[k] = pts[0] - pt_int[0]
reg_l[k] = pts[1] - pt_int[1]
reg_b[k] = pts[2] - pt_int[2]
reg_r[k] = pts[3] - pt_int[3]
ind_t[k] = pt_int[0, 1] * output_res + pt_int[0, 0]
ind_l[k] = pt_int[1, 1] * output_res + pt_int[1, 0]
ind_b[k] = pt_int[2, 1] * output_res + pt_int[2, 0]
ind_r[k] = pt_int[3, 1] * output_res + pt_int[3, 0]
ct = [int((pts[3, 0] + pts[1, 0]) / 2), int((pts[0, 1] + pts[2, 1]) / 2)]
draw_gaussian(hm_c[cls_id], ct, radius)
reg_mask[k] = 1
ret = {'input': inp, 'hm_t': hm_t, 'hm_l': hm_l, 'hm_b': hm_b, 'hm_r': hm_r, 'hm_c': hm_c}
if self.opt.reg_offset:
ret.update({'reg_mask': reg_mask, 'reg_t': reg_t, 'reg_l': reg_l, 'reg_b': reg_b, 'reg_r': reg_r, 'ind_t': ind_t, 'ind_l': ind_l, 'ind_b': ind_b, 'ind_r': ind_r})
return ret
|
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
(height, width) = (img.shape[0], img.shape[1])
c = np.array([img.shape[1] / 2.0, img.shape[0] / 2.0])
s = max(img.shape[0], img.shape[1]) * 1.0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
i = 1
while img.shape[1] - 128 // i <= 128 // i:
i *= 2
w_border = 128 // i
i = 1
while img.shape[0] - 128 // i <= 128 // i:
i *= 2
h_border = 128 // i
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += img.shape[0] * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
trans_input = get_affine_transform(c, s, 0, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input, (self.opt.input_res, self.opt.input_res), flags=cv2.INTER_LINEAR)
inp = inp.astype(np.float32) / 255.0
if self.split == 'train' and (not self.opt.no_color_aug):
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_classes = self.opt.num_classes
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
num_hm = 1 if self.opt.agnostic_ex else num_classes
hm_t = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_l = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_b = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_r = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_c = np.zeros((num_classes, output_res, output_res), dtype=np.float32)
reg_t = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_l = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_b = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_r = np.zeros((self.max_objs, 2), dtype=np.float32)
ind_t = np.zeros(self.max_objs, dtype=np.int64)
ind_l = np.zeros(self.max_objs, dtype=np.int64)
ind_b = np.zeros(self.max_objs, dtype=np.int64)
ind_r = np.zeros(self.max_objs, dtype=np.int64)
reg_mask = np.zeros(self.max_objs, dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
for k in range(num_objs):
ann = anns[k]
pts = np.array(ann['extreme_points'], dtype=np.float32).reshape(4, 2)
cls_id = int(self.cat_ids[ann['category_id']])
hm_id = 0 if self.opt.agnostic_ex else cls_id
if flipped:
pts[:, 0] = width - pts[:, 0] - 1
(pts[1], pts[3]) = (pts[3].copy(), pts[1].copy())
for j in range(4):
pts[j] = affine_transform(pts[j], trans_output)
pts = np.clip(pts, 0, self.opt.output_res - 1)
(h, w) = (pts[2, 1] - pts[0, 1], pts[3, 0] - pts[1, 0])
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
pt_int = pts.astype(np.int32)
draw_gaussian(hm_t[hm_id], pt_int[0], radius)
draw_gaussian(hm_l[hm_id], pt_int[1], radius)
draw_gaussian(hm_b[hm_id], pt_int[2], radius)
draw_gaussian(hm_r[hm_id], pt_int[3], radius)
reg_t[k] = pts[0] - pt_int[0]
reg_l[k] = pts[1] - pt_int[1]
reg_b[k] = pts[2] - pt_int[2]
reg_r[k] = pts[3] - pt_int[3]
ind_t[k] = pt_int[0, 1] * output_res + pt_int[0, 0]
ind_l[k] = pt_int[1, 1] * output_res + pt_int[1, 0]
ind_b[k] = pt_int[2, 1] * output_res + pt_int[2, 0]
ind_r[k] = pt_int[3, 1] * output_res + pt_int[3, 0]
ct = [int((pts[3, 0] + pts[1, 0]) / 2), int((pts[0, 1] + pts[2, 1]) / 2)]
draw_gaussian(hm_c[cls_id], ct, radius)
reg_mask[k] = 1
ret = {'input': inp, 'hm_t': hm_t, 'hm_l': hm_l, 'hm_b': hm_b, 'hm_r': hm_r, 'hm_c': hm_c}
if self.opt.reg_offset:
ret.update({'reg_mask': reg_mask, 'reg_t': reg_t, 'reg_l': reg_l, 'reg_b': reg_b, 'reg_r': reg_r, 'ind_t': ind_t, 'ind_l': ind_l, 'ind_b': ind_b, 'ind_r': ind_r})
return ret
|
CenterNet-CondInst
|
positive
|
def test_filter_file(get_product_flat):
producer = models.Producer.objects.create()
models.Product.objects.create(name='a', producer=producer, image='fred.jpg')
models.Product.objects.create(name='b', producer=producer, image='bob.jpg')
<DeepExtract>
def helper(queries, *args, **kwargs):
res = 1(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
</DeepExtract>
<DeepExtract>
assert sorted(data, key=str) == sorted([['b', '/media/bob.jpg']], key=str)
</DeepExtract>
|
def test_filter_file(get_product_flat):
producer = models.Producer.objects.create()
models.Product.objects.create(name='a', producer=producer, image='fred.jpg')
models.Product.objects.create(name='b', producer=producer, image='bob.jpg')
def helper(queries, *args, **kwargs):
res = 1(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
assert sorted(data, key=str) == sorted([['b', '/media/bob.jpg']], key=str)
</DeepExtract>
|
django-data-browser
|
positive
|
def importable_name(cls):
"""
>>> class Example(object):
... pass
>>> ex = Example()
>>> importable_name(ex.__class__) == 'jsonpickle.util.Example'
True
>>> importable_name(type(25)) == '__builtin__.int'
True
>>> importable_name(None.__class__) == '__builtin__.NoneType'
True
>>> importable_name(False.__class__) == '__builtin__.bool'
True
>>> importable_name(AttributeError) == '__builtin__.AttributeError'
True
"""
name = cls.__name__
<DeepExtract>
if cls.__module__ == 'builtins' or cls.__module__ == 'exceptions':
cls.__module__ = '__builtin__'
else:
cls.__module__ = cls.__module__
</DeepExtract>
return '%s.%s' % (module, name)
|
def importable_name(cls):
"""
>>> class Example(object):
... pass
>>> ex = Example()
>>> importable_name(ex.__class__) == 'jsonpickle.util.Example'
True
>>> importable_name(type(25)) == '__builtin__.int'
True
>>> importable_name(None.__class__) == '__builtin__.NoneType'
True
>>> importable_name(False.__class__) == '__builtin__.bool'
True
>>> importable_name(AttributeError) == '__builtin__.AttributeError'
True
"""
name = cls.__name__
if cls.__module__ == 'builtins' or cls.__module__ == 'exceptions':
cls.__module__ = '__builtin__'
else:
cls.__module__ = cls.__module__
return '%s.%s' % (module, name)
|
appkernel
|
positive
|
def visit_block_quote(self, node):
self.set_first_last(node)
self.sp()
<DeepExtract>
self.context.append('{')
self.last_output_char = '{'
</DeepExtract>
<DeepExtract>
self.indent(by)
self.rindent(by)
self.noindent()
</DeepExtract>
|
def visit_block_quote(self, node):
self.set_first_last(node)
self.sp()
self.context.append('{')
self.last_output_char = '{'
self.indent(by)
self.rindent(by)
self.noindent()
</DeepExtract>
|
ebookmaker
|
positive
|
def ls_tree(reference, path=None, directory=None):
"""
Returns a dictionary of files and folders for a given reference and path.
Implemented using ``git ls-tree``. If an invalid reference and/or path
None is returned.
:param reference: git reference to pull from (branch, tag, or commit)
:param path: tree to list
:param directory: directory in which to run this command
:returns: dict if a directory (or a reference) or None if it does not exist
:raises: subprocess.CalledProcessError if any git calls fail
:raises: RuntimeError if the output from git is not what we expected
"""
<DeepExtract>
if type(reference) == str:
reference = [reference]
debug('track_branches(' + str(reference) + ', ' + str(directory) + ')')
if reference == []:
return
current_branch = get_current_branch(directory)
try:
local_branches = get_branches(local_only=True, directory=directory)
all_branches = get_branches(local_only=False, directory=directory)
untracked_branches = []
for branch in all_branches:
if branch.startswith('remotes/'):
if branch.count('/') >= 2:
branch = '/'.join(branch.split('/')[2:])
if branch not in local_branches:
untracked_branches.append(branch)
if reference is not None:
branches_to_track = []
for untracked in untracked_branches:
if untracked in reference:
branches_to_track.append(untracked)
else:
branches_to_track = untracked_branches
debug('Tracking branches: ' + str(branches_to_track))
for branch in branches_to_track:
checkout(branch, directory=directory)
finally:
if current_branch:
checkout(current_branch, directory=directory)
</DeepExtract>
cmd = 'git ls-tree ' + reference
if path is not None and path != '':
cmd += ':' + path
(retcode, out, err) = execute_command(cmd, autofail=False, silent_error=True, cwd=directory, return_io=True)
if retcode != 0:
return None
items = {}
for line in out.splitlines():
tokens = line.split()
if len(tokens) != 4:
return None
if tokens[1] not in ['blob', 'tree']:
raise RuntimeError('item not a blob or tree')
if tokens[3] in items:
raise RuntimeError('duplicate name in ls tree')
items[tokens[3]] = 'file' if tokens[1] == 'blob' else 'directory'
return items
|
def ls_tree(reference, path=None, directory=None):
"""
Returns a dictionary of files and folders for a given reference and path.
Implemented using ``git ls-tree``. If an invalid reference and/or path
None is returned.
:param reference: git reference to pull from (branch, tag, or commit)
:param path: tree to list
:param directory: directory in which to run this command
:returns: dict if a directory (or a reference) or None if it does not exist
:raises: subprocess.CalledProcessError if any git calls fail
:raises: RuntimeError if the output from git is not what we expected
"""
if type(reference) == str:
reference = [reference]
debug('track_branches(' + str(reference) + ', ' + str(directory) + ')')
if reference == []:
return
current_branch = get_current_branch(directory)
try:
local_branches = get_branches(local_only=True, directory=directory)
all_branches = get_branches(local_only=False, directory=directory)
untracked_branches = []
for branch in all_branches:
if branch.startswith('remotes/'):
if branch.count('/') >= 2:
branch = '/'.join(branch.split('/')[2:])
if branch not in local_branches:
untracked_branches.append(branch)
if reference is not None:
branches_to_track = []
for untracked in untracked_branches:
if untracked in reference:
branches_to_track.append(untracked)
else:
branches_to_track = untracked_branches
debug('Tracking branches: ' + str(branches_to_track))
for branch in branches_to_track:
checkout(branch, directory=directory)
finally:
if current_branch:
checkout(current_branch, directory=directory)
cmd = 'git ls-tree ' + reference
if path is not None and path != '':
cmd += ':' + path
(retcode, out, err) = execute_command(cmd, autofail=False, silent_error=True, cwd=directory, return_io=True)
if retcode != 0:
return None
items = {}
for line in out.splitlines():
tokens = line.split()
if len(tokens) != 4:
return None
if tokens[1] not in ['blob', 'tree']:
raise RuntimeError('item not a blob or tree')
if tokens[3] in items:
raise RuntimeError('duplicate name in ls tree')
items[tokens[3]] = 'file' if tokens[1] == 'blob' else 'directory'
return items
|
bloom
|
positive
|
def main():
""" Main program. """
try:
<DeepExtract>
ap = argparse.ArgumentParser(prog='EbookMaker')
CommonCode.add_common_options(ap, CONFIG_FILES[1])
add_local_options(ap)
CommonCode.set_arg_defaults(ap, CONFIG_FILES[1])
global options
options.update(vars(CommonCode.parse_config_and_args(ap, CONFIG_FILES[0], {'proxies': None, 'xelatex': 'xelatex', 'mobigen': 'ebook-convert', 'mobilang': 'ebook-convert', 'mobikf8': 'ebook-convert', 'groff': 'groff', 'rhyming_dict': None, 'timestamp': datetime.datetime.today().isoformat()[:19]})))
if not re.search('^(https?|file):', options.url):
options.url = os.path.abspath(options.url)
</DeepExtract>
except configparser.Error as what:
error('Error in configuration file: %s', str(what))
return 1
Logger.set_log_level(options.verbose)
options.types = options.types or ['all']
options.types = CommonCode.add_dependencies(options.types, DEPENDENCIES, BUILD_ORDER)
debug('Building types: %s' % ' '.join(options.types))
start_time = datetime.datetime.now()
ParserFactory.load_parsers()
WriterFactory.load_writers()
PackagerFactory.load_packagers()
output_files = dict()
if options.is_job_queue:
job_queue = cPickle.load(sys.stdin.buffer)
else:
job_queue = []
for type_ in options.types:
job = CommonCode.Job(type_)
job.url = options.url
job.ebook = options.ebook
job.outputdir = options.outputdir
job_queue.append(job)
dc = None
for job in job_queue:
try:
info('Job starting for type %s from %s', job.type, job.url)
<DeepExtract>
url = job.url
parser = ParserFactory.ParserFactory.create(url)
try:
parser.parse()
except AttributeError as e:
raise Exception(f'the file {job.url} could not be found or was unparsable')
if options.is_job_queue:
dc = PGDCObject()
dc.load_from_database(job.ebook)
dc.source = job.source
dc.opf_identifier = job.opf_identifier
dc = dc
if options.coverpage_url:
parser._make_coverpage_link(coverpage_url=options.coverpage_url)
dc = DublinCore.GutenbergDublinCore()
try:
dc.load_from_rstheader(parser.unicode_content())
except (ValueError, UnicodeError):
debug('No RST header found.')
try:
dc.load_from_parser(parser)
except (ValueError, AttributeError, UnicodeError):
debug('No HTML header found.')
try:
dc.load_from_pgheader(parser.unicode_content())
except (ValueError, UnicodeError):
debug('No PG header found.')
dc.source = parser.attribs.url
dc.title = options.title or dc.title or 'NA'
if options.author:
dc.add_author(options.author, 'cre')
if not dc.authors:
dc.add_author('NA', 'cre')
dc.project_gutenberg_id = options.ebook or dc.project_gutenberg_id
if dc.project_gutenberg_id:
dc.opf_identifier = '%sebooks/%d' % (gg.PG_URL, dc.project_gutenberg_id)
else:
dc.opf_identifier = 'urn:mybooks:%s' % hashlib.md5(dc.source.encode('utf-8')).hexdigest()
if not dc.languages:
info('no language found, using default')
dc.add_lang_id('en')
dc = dc
</DeepExtract>
job.dc = dc
job.last_updated()
job.outputfile = job.outputfile or make_output_filename(job.type, dc)
output_files[job.type] = job.outputfile
if job.type.startswith('kindle'):
absoutputdir = os.path.abspath(job.outputdir)
if job.type == 'kindle.images' and 'epub.images' in output_files:
job.url = os.path.join(absoutputdir, output_files['epub.images'])
elif job.type == 'kindle.noimages' and 'epub.noimages' in output_files:
job.url = os.path.join(absoutputdir, output_files['epub.noimages'])
if job.type.startswith('kf8') and 'epub3.images' in output_files:
absoutputdir = os.path.abspath(job.outputdir)
job.url = os.path.join(absoutputdir, output_files['epub3.images'])
options.outputdir = job.outputdir
<DeepExtract>
log_handler = None
Logger.ebook = job.ebook
if job.logfile:
log_handler = open_log(os.path.join(os.path.abspath(job.outputdir), job.logfile))
debug('=== Building %s ===' % job.type)
start_time = datetime.datetime.now()
try:
if job.url:
spider = Spider.Spider(job)
for rewrite in options.rewrite:
(from_url, to_url) = rewrite.split('>')
spider.add_redirection(from_url, to_url)
attribs = parsers.ParserAttributes()
attribs.url = parsers.webify_url(job.url)
attribs.id = 'start'
if options.input_mediatype:
attribs.orig_mediatype = attribs.HeaderElement.from_str(options.input_mediatype)
spider.recursive_parse(attribs)
if job.type.split('.')[0] in ('epub', 'epub3', 'html', 'kindle', 'cover', 'pdf'):
elect_coverpage(spider, job.url, job.dc)
job.url = spider.redirect(job.url)
job.base_url = job.url
job.spider = spider
writer = WriterFactory.create(job.maintype)
writer.build(job)
if options.validate:
writer.validate(job)
packager = PackagerFactory.create(options.packager, job.type)
if packager:
packager.package(job)
if job.type == 'html.images':
options.html_images_list = list(job.spider.aux_file_iter())
if job.type.split('.')[0] == 'txt':
ParserFactory.ParserFactory.parsers = {}
except SkipOutputFormat as what:
warning('%s' % what)
except Exception as what:
exception('%s' % what)
end_time = datetime.datetime.now()
info(' %s made in %s' % (job.type, end_time - start_time))
if log_handler:
close_log(log_handler)
</DeepExtract>
if dc and hasattr(dc, 'session') and dc.session:
dc.session.close()
dc.session = None
except Exception as e:
critical('Job failed for type %s from %s', job.type, job.url)
exception(e)
continue
packager = PackagerFactory.create(options.packager, 'push')
if packager:
job = job_queue[0]
job.outputfile = '%d-final.zip' % dc.project_gutenberg_id
packager.package(job)
end_time = datetime.datetime.now()
info(' Finished jobs. Total time: %s' % (end_time - start_time))
return 0
|
def main():
""" Main program. """
try:
ap = argparse.ArgumentParser(prog='EbookMaker')
CommonCode.add_common_options(ap, CONFIG_FILES[1])
add_local_options(ap)
CommonCode.set_arg_defaults(ap, CONFIG_FILES[1])
global options
options.update(vars(CommonCode.parse_config_and_args(ap, CONFIG_FILES[0], {'proxies': None, 'xelatex': 'xelatex', 'mobigen': 'ebook-convert', 'mobilang': 'ebook-convert', 'mobikf8': 'ebook-convert', 'groff': 'groff', 'rhyming_dict': None, 'timestamp': datetime.datetime.today().isoformat()[:19]})))
if not re.search('^(https?|file):', options.url):
options.url = os.path.abspath(options.url)
except configparser.Error as what:
error('Error in configuration file: %s', str(what))
return 1
Logger.set_log_level(options.verbose)
options.types = options.types or ['all']
options.types = CommonCode.add_dependencies(options.types, DEPENDENCIES, BUILD_ORDER)
debug('Building types: %s' % ' '.join(options.types))
start_time = datetime.datetime.now()
ParserFactory.load_parsers()
WriterFactory.load_writers()
PackagerFactory.load_packagers()
output_files = dict()
if options.is_job_queue:
job_queue = cPickle.load(sys.stdin.buffer)
else:
job_queue = []
for type_ in options.types:
job = CommonCode.Job(type_)
job.url = options.url
job.ebook = options.ebook
job.outputdir = options.outputdir
job_queue.append(job)
dc = None
for job in job_queue:
try:
info('Job starting for type %s from %s', job.type, job.url)
url = job.url
parser = ParserFactory.ParserFactory.create(url)
try:
parser.parse()
except AttributeError as e:
raise Exception(f'the file {job.url} could not be found or was unparsable')
if options.is_job_queue:
dc = PGDCObject()
dc.load_from_database(job.ebook)
dc.source = job.source
dc.opf_identifier = job.opf_identifier
dc = dc
if options.coverpage_url:
parser._make_coverpage_link(coverpage_url=options.coverpage_url)
dc = DublinCore.GutenbergDublinCore()
try:
dc.load_from_rstheader(parser.unicode_content())
except (ValueError, UnicodeError):
debug('No RST header found.')
try:
dc.load_from_parser(parser)
except (ValueError, AttributeError, UnicodeError):
debug('No HTML header found.')
try:
dc.load_from_pgheader(parser.unicode_content())
except (ValueError, UnicodeError):
debug('No PG header found.')
dc.source = parser.attribs.url
dc.title = options.title or dc.title or 'NA'
if options.author:
dc.add_author(options.author, 'cre')
if not dc.authors:
dc.add_author('NA', 'cre')
dc.project_gutenberg_id = options.ebook or dc.project_gutenberg_id
if dc.project_gutenberg_id:
dc.opf_identifier = '%sebooks/%d' % (gg.PG_URL, dc.project_gutenberg_id)
else:
dc.opf_identifier = 'urn:mybooks:%s' % hashlib.md5(dc.source.encode('utf-8')).hexdigest()
if not dc.languages:
info('no language found, using default')
dc.add_lang_id('en')
dc = dc
job.dc = dc
job.last_updated()
job.outputfile = job.outputfile or make_output_filename(job.type, dc)
output_files[job.type] = job.outputfile
if job.type.startswith('kindle'):
absoutputdir = os.path.abspath(job.outputdir)
if job.type == 'kindle.images' and 'epub.images' in output_files:
job.url = os.path.join(absoutputdir, output_files['epub.images'])
elif job.type == 'kindle.noimages' and 'epub.noimages' in output_files:
job.url = os.path.join(absoutputdir, output_files['epub.noimages'])
if job.type.startswith('kf8') and 'epub3.images' in output_files:
absoutputdir = os.path.abspath(job.outputdir)
job.url = os.path.join(absoutputdir, output_files['epub3.images'])
options.outputdir = job.outputdir
log_handler = None
Logger.ebook = job.ebook
if job.logfile:
log_handler = open_log(os.path.join(os.path.abspath(job.outputdir), job.logfile))
debug('=== Building %s ===' % job.type)
start_time = datetime.datetime.now()
try:
if job.url:
spider = Spider.Spider(job)
for rewrite in options.rewrite:
(from_url, to_url) = rewrite.split('>')
spider.add_redirection(from_url, to_url)
attribs = parsers.ParserAttributes()
attribs.url = parsers.webify_url(job.url)
attribs.id = 'start'
if options.input_mediatype:
attribs.orig_mediatype = attribs.HeaderElement.from_str(options.input_mediatype)
spider.recursive_parse(attribs)
if job.type.split('.')[0] in ('epub', 'epub3', 'html', 'kindle', 'cover', 'pdf'):
elect_coverpage(spider, job.url, job.dc)
job.url = spider.redirect(job.url)
job.base_url = job.url
job.spider = spider
writer = WriterFactory.create(job.maintype)
writer.build(job)
if options.validate:
writer.validate(job)
packager = PackagerFactory.create(options.packager, job.type)
if packager:
packager.package(job)
if job.type == 'html.images':
options.html_images_list = list(job.spider.aux_file_iter())
if job.type.split('.')[0] == 'txt':
ParserFactory.ParserFactory.parsers = {}
except SkipOutputFormat as what:
warning('%s' % what)
except Exception as what:
exception('%s' % what)
end_time = datetime.datetime.now()
info(' %s made in %s' % (job.type, end_time - start_time))
if log_handler:
close_log(log_handler)
if dc and hasattr(dc, 'session') and dc.session:
dc.session.close()
dc.session = None
except Exception as e:
critical('Job failed for type %s from %s', job.type, job.url)
exception(e)
continue
packager = PackagerFactory.create(options.packager, 'push')
if packager:
job = job_queue[0]
job.outputfile = '%d-final.zip' % dc.project_gutenberg_id
packager.package(job)
end_time = datetime.datetime.now()
info(' Finished jobs. Total time: %s' % (end_time - start_time))
return 0
|
ebookmaker
|
positive
|
def test_reduce_data_dummyatlases():
n_jobs = 1
with tempfile.TemporaryDirectory() as datadir:
for n_timeframes in ([25, 24], [25, 25]):
n_voxels = 10
n_subjects = 2
n_components = 3
n_sessions = len(n_timeframes)
np.random.seed(0)
<DeepExtract>
n_sessions = len(n_timeframes)
cumsum_timeframes = np.cumsum([0] + n_timeframes)
slices_timeframes = [slice(cumsum_timeframes[i], cumsum_timeframes[i + 1]) for i in range(n_sessions)]
theta = np.linspace(-4 * np.pi, 4 * np.pi, int(np.sum(n_timeframes)))
z = np.linspace(-2, 2, int(np.sum(n_timeframes)))
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
W = []
X = []
for subject in range(n_subjects):
(Q, R) = np.linalg.qr(np.random.random((n_voxels, n_components)))
W.append(Q.T)
X_ = []
for session in range(n_sessions):
S_s = S[:, slices_timeframes[session]]
S_s = S_s - np.mean(S_s, axis=1, keepdims=True)
noise = noise_level * np.random.random((n_voxels, n_timeframes[session]))
noise = noise - np.mean(noise, axis=1, keepdims=True)
data = Q.dot(S_s) + noise
X_.append(data)
X.append(X_)
S = [S[:, s] - np.mean(S[:, s], axis=1, keepdims=True) for s in slices_timeframes]
if input_format == 'array':
paths = to_path(X, datadir)
(paths, _, _) = (paths, W, S)
elif input_format == 'list_of_list':
(paths, _, _) = (X, W, S)
elif input_format == 'list_of_array':
(paths, _, _) = ([np.concatenate([X[i][j].T for j in range(n_sessions)]).T for i in range(n_subjects)], W, S)
else:
raise ValueError('Wrong input_format')
</DeepExtract>
atlas = np.arange(1, n_voxels + 1)
data = reduce_data(paths, atlas=atlas, n_jobs=n_jobs, low_ram=False)
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(data[i, j].T, np.load(paths[i, j]))
atlas = np.ones(n_voxels)
data = reduce_data(paths, atlas=atlas, n_jobs=n_jobs, low_ram=False)
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(data[i, j].T.flatten(), np.mean(np.load(paths[i, j]), axis=0))
|
def test_reduce_data_dummyatlases():
n_jobs = 1
with tempfile.TemporaryDirectory() as datadir:
for n_timeframes in ([25, 24], [25, 25]):
n_voxels = 10
n_subjects = 2
n_components = 3
n_sessions = len(n_timeframes)
np.random.seed(0)
n_sessions = len(n_timeframes)
cumsum_timeframes = np.cumsum([0] + n_timeframes)
slices_timeframes = [slice(cumsum_timeframes[i], cumsum_timeframes[i + 1]) for i in range(n_sessions)]
theta = np.linspace(-4 * np.pi, 4 * np.pi, int(np.sum(n_timeframes)))
z = np.linspace(-2, 2, int(np.sum(n_timeframes)))
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
S = np.vstack((x, y, z))
W = []
X = []
for subject in range(n_subjects):
(Q, R) = np.linalg.qr(np.random.random((n_voxels, n_components)))
W.append(Q.T)
X_ = []
for session in range(n_sessions):
S_s = S[:, slices_timeframes[session]]
S_s = S_s - np.mean(S_s, axis=1, keepdims=True)
noise = noise_level * np.random.random((n_voxels, n_timeframes[session]))
noise = noise - np.mean(noise, axis=1, keepdims=True)
data = Q.dot(S_s) + noise
X_.append(data)
X.append(X_)
S = [S[:, s] - np.mean(S[:, s], axis=1, keepdims=True) for s in slices_timeframes]
if input_format == 'array':
paths = to_path(X, datadir)
(paths, _, _) = (paths, W, S)
elif input_format == 'list_of_list':
(paths, _, _) = (X, W, S)
elif input_format == 'list_of_array':
(paths, _, _) = ([np.concatenate([X[i][j].T for j in range(n_sessions)]).T for i in range(n_subjects)], W, S)
else:
raise ValueError('Wrong input_format')
atlas = np.arange(1, n_voxels + 1)
data = reduce_data(paths, atlas=atlas, n_jobs=n_jobs, low_ram=False)
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(data[i, j].T, np.load(paths[i, j]))
atlas = np.ones(n_voxels)
data = reduce_data(paths, atlas=atlas, n_jobs=n_jobs, low_ram=False)
for i in range(n_subjects):
for j in range(n_sessions):
assert_array_almost_equal(data[i, j].T.flatten(), np.mean(np.load(paths[i, j]), axis=0))
|
brainiak
|
positive
|
def forward(self, interp, interp_crit):
<DeepExtract>
if self.grad_outputs.size() != interp_crit.size():
self.grad_outputs.resize_(interp_crit.size()).fill_(1.0)
grad_outputs = self.grad_outputs
</DeepExtract>
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1) ** 2).mean()
return loss
|
def forward(self, interp, interp_crit):
if self.grad_outputs.size() != interp_crit.size():
self.grad_outputs.resize_(interp_crit.size()).fill_(1.0)
grad_outputs = self.grad_outputs
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1) ** 2).mean()
return loss
|
DeFlow
|
positive
|
def _parse_labels(labels_string: str) -> Dict[str, str]:
labels: Dict[str, str] = {}
if '=' not in labels_string:
return labels
escaping = False
if '\\' in labels_string:
escaping = True
sub_labels = labels_string
try:
while sub_labels:
value_start = sub_labels.index('=')
label_name = sub_labels[:value_start]
sub_labels = sub_labels[value_start + 1:].lstrip()
quote_start = sub_labels.index('"') + 1
value_substr = sub_labels[quote_start:]
i = 0
while i < len(value_substr):
i = value_substr.index('"', i)
if not _is_character_escaped(value_substr, i):
break
i += 1
quote_end = i + 1
label_value = sub_labels[quote_start:quote_end]
if escaping:
<DeepExtract>
label_value = ESCAPING_RE.sub(replace_escape_sequence, label_value)
</DeepExtract>
labels[label_name.strip()] = label_value
sub_labels = sub_labels[quote_end + 1:]
next_comma = sub_labels.find(',') + 1
sub_labels = sub_labels[next_comma:].lstrip()
return labels
except ValueError:
raise ValueError('Invalid labels: %s' % labels_string)
|
def _parse_labels(labels_string: str) -> Dict[str, str]:
labels: Dict[str, str] = {}
if '=' not in labels_string:
return labels
escaping = False
if '\\' in labels_string:
escaping = True
sub_labels = labels_string
try:
while sub_labels:
value_start = sub_labels.index('=')
label_name = sub_labels[:value_start]
sub_labels = sub_labels[value_start + 1:].lstrip()
quote_start = sub_labels.index('"') + 1
value_substr = sub_labels[quote_start:]
i = 0
while i < len(value_substr):
i = value_substr.index('"', i)
if not _is_character_escaped(value_substr, i):
break
i += 1
quote_end = i + 1
label_value = sub_labels[quote_start:quote_end]
if escaping:
label_value = ESCAPING_RE.sub(replace_escape_sequence, label_value)
labels[label_name.strip()] = label_value
sub_labels = sub_labels[quote_end + 1:]
next_comma = sub_labels.find(',') + 1
sub_labels = sub_labels[next_comma:].lstrip()
return labels
except ValueError:
raise ValueError('Invalid labels: %s' % labels_string)
|
client_python
|
positive
|
def test_transpose(self):
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
<DeepExtract>
diff = self.M.transpose(FLIP_LEFT_RIGHT).get_mask_tensor() - self.P.transpose(FLIP_LEFT_RIGHT).get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_hor = diff
</DeepExtract>
<DeepExtract>
diff = self.M.transpose(FLIP_TOP_BOTTOM).get_mask_tensor() - self.P.transpose(FLIP_TOP_BOTTOM).get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_ver = diff
</DeepExtract>
self.assertTrue(diff_hor <= 53250.0)
self.assertTrue(diff_ver <= 42494.0)
|
def test_transpose(self):
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
diff = self.M.transpose(FLIP_LEFT_RIGHT).get_mask_tensor() - self.P.transpose(FLIP_LEFT_RIGHT).get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_hor = diff
diff = self.M.transpose(FLIP_TOP_BOTTOM).get_mask_tensor() - self.P.transpose(FLIP_TOP_BOTTOM).get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_ver = diff
self.assertTrue(diff_hor <= 53250.0)
self.assertTrue(diff_ver <= 42494.0)
|
Box_Discretization_Network
|
positive
|
def udp_server(self, bind_addr, addrinfo, port_bound_event, task=None):
task.set_daemon()
udp_sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_DGRAM))
while 1:
try:
udp_sock.bind((bind_addr, self.port))
except socket.error as exc:
if exc.errno == errno.EADDRINUSE:
logger.warning('Port %s seems to be used by another program ...', self.port)
else:
logger.warning('Error binding to port %s: %s ...', self.port, exc.errno)
yield task.sleep(5)
except Exception:
logger.warning('Could not bind to port %s: %s', self.port, traceback.format_exc())
yield task.sleep(5)
else:
break
if addrinfo.family == socket.AF_INET:
if self.ipv4_udp_multicast:
mreq = socket.inet_aton(addrinfo.broadcast) + socket.inet_aton(addrinfo.ip)
udp_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = socket.inet_pton(addrinfo.family, addrinfo.broadcast)
mreq += struct.pack('@I', addrinfo.ifn)
udp_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
try:
udp_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
except Exception:
pass
port_bound_event.set()
del port_bound_event
while 1:
try:
(msg, addr) = (yield udp_sock.recvfrom(1000))
except GeneratorExit:
break
if msg.startswith(b'PING:'):
try:
info = deserialize(msg[len(b'PING:'):])
if info['version'] != _dispy_version:
logger.warning('Ignoring %s due to version mismatch', addr[0])
continue
assert info['port'] > 0
assert info['ip_addr']
except Exception:
logger.debug('Ignoring node %s', addr[0])
continue
<DeepExtract>
auth = hashlib.sha1((self.secret + info['sign']).encode()).hexdigest().encode()
</DeepExtract>
node = self._nodes.get(info['ip_addr'], None)
if node and node.auth == auth:
continue
sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile)
sock.settimeout(MsgTimeout)
msg = {'version': _dispy_version, 'port': self.port, 'sign': self.sign, 'node_ip_addr': info['ip_addr']}
msg['ip_addrs'] = self.ip_addrs
try:
yield sock.connect((info['ip_addr'], info['port']))
yield sock.sendall(auth)
yield sock.send_msg(b'PING:' + serialize(msg))
except GeneratorExit:
break
except Exception:
logger.debug(traceback.format_exc())
finally:
sock.close()
elif msg.startswith(b'TERMINATED:'):
try:
info = deserialize(msg[len(b'TERMINATED:'):])
node = self._nodes[info['ip_addr']]
assert node.auth == auth_code(self.secret, info['sign'])
except Exception:
pass
else:
<DeepExtract>
if node.clusters:
dead_jobs = [_job for _job in self._sched_jobs.values() if _job.node is not None and _job.node.ip_addr == node.ip_addr]
clusters = list(node.clusters)
node.clusters.clear()
for cluster in clusters:
dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None)
if not dispy_node:
continue
dispy_node.avail_cpus = dispy_node.cpus = dispy_node.busy = 0
if cluster.cluster_status:
self.worker_Q.put((cluster.cluster_status, (DispyNode.Closed, dispy_node, None)))
self.reschedule_jobs(dead_jobs)
for _job in node.pending_jobs:
cluster = self._clusters[_job.compute_id]
self.finish_job(cluster, _job, DispyJob.Cancelled)
if cluster.cluster_status:
dispy_node = cluster._dispy_nodes.get(node.ip_addr, None)
self.worker_Q.put((cluster.cluster_status, (DispyJob.Cancelled, dispy_node, _job.job)))
node.pending_jobs = []
self._nodes.pop(node.ip_addr, None)
</DeepExtract>
else:
pass
udp_sock.close()
|
def udp_server(self, bind_addr, addrinfo, port_bound_event, task=None):
task.set_daemon()
udp_sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_DGRAM))
while 1:
try:
udp_sock.bind((bind_addr, self.port))
except socket.error as exc:
if exc.errno == errno.EADDRINUSE:
logger.warning('Port %s seems to be used by another program ...', self.port)
else:
logger.warning('Error binding to port %s: %s ...', self.port, exc.errno)
yield task.sleep(5)
except Exception:
logger.warning('Could not bind to port %s: %s', self.port, traceback.format_exc())
yield task.sleep(5)
else:
break
if addrinfo.family == socket.AF_INET:
if self.ipv4_udp_multicast:
mreq = socket.inet_aton(addrinfo.broadcast) + socket.inet_aton(addrinfo.ip)
udp_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = socket.inet_pton(addrinfo.family, addrinfo.broadcast)
mreq += struct.pack('@I', addrinfo.ifn)
udp_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
try:
udp_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
except Exception:
pass
port_bound_event.set()
del port_bound_event
while 1:
try:
(msg, addr) = (yield udp_sock.recvfrom(1000))
except GeneratorExit:
break
if msg.startswith(b'PING:'):
try:
info = deserialize(msg[len(b'PING:'):])
if info['version'] != _dispy_version:
logger.warning('Ignoring %s due to version mismatch', addr[0])
continue
assert info['port'] > 0
assert info['ip_addr']
except Exception:
logger.debug('Ignoring node %s', addr[0])
continue
auth = hashlib.sha1((self.secret + info['sign']).encode()).hexdigest().encode()
node = self._nodes.get(info['ip_addr'], None)
if node and node.auth == auth:
continue
sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile)
sock.settimeout(MsgTimeout)
msg = {'version': _dispy_version, 'port': self.port, 'sign': self.sign, 'node_ip_addr': info['ip_addr']}
msg['ip_addrs'] = self.ip_addrs
try:
yield sock.connect((info['ip_addr'], info['port']))
yield sock.sendall(auth)
yield sock.send_msg(b'PING:' + serialize(msg))
except GeneratorExit:
break
except Exception:
logger.debug(traceback.format_exc())
finally:
sock.close()
elif msg.startswith(b'TERMINATED:'):
try:
info = deserialize(msg[len(b'TERMINATED:'):])
node = self._nodes[info['ip_addr']]
assert node.auth == auth_code(self.secret, info['sign'])
except Exception:
pass
else:
if node.clusters:
dead_jobs = [_job for _job in self._sched_jobs.values() if _job.node is not None and _job.node.ip_addr == node.ip_addr]
clusters = list(node.clusters)
node.clusters.clear()
for cluster in clusters:
dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None)
if not dispy_node:
continue
dispy_node.avail_cpus = dispy_node.cpus = dispy_node.busy = 0
if cluster.cluster_status:
self.worker_Q.put((cluster.cluster_status, (DispyNode.Closed, dispy_node, None)))
self.reschedule_jobs(dead_jobs)
for _job in node.pending_jobs:
cluster = self._clusters[_job.compute_id]
self.finish_job(cluster, _job, DispyJob.Cancelled)
if cluster.cluster_status:
dispy_node = cluster._dispy_nodes.get(node.ip_addr, None)
self.worker_Q.put((cluster.cluster_status, (DispyJob.Cancelled, dispy_node, _job.job)))
node.pending_jobs = []
self._nodes.pop(node.ip_addr, None)
else:
pass
udp_sock.close()
|
dispy
|
positive
|
def is_playbook(filename: str) -> bool:
"""
Check if the file is a playbook.
Given a filename, it should return true if it looks like a playbook. The
function is not supposed to raise exceptions.
"""
playbooks_keys = {'gather_facts', 'hosts', 'import_playbook', 'post_tasks', 'pre_tasks', 'roles', 'tasks'}
if not isinstance(filename, str):
filename = str(filename)
try:
<DeepExtract>
dataloader = DataLoader()
if hasattr(dataloader, 'set_vault_password'):
dataloader.set_vault_password(DEFAULT_VAULT_PASSWORD)
f = dataloader.load_from_file(filename)
</DeepExtract>
except Exception as exc:
_logger.warning('Failed to load %s with %s, assuming is not a playbook.', filename, exc)
else:
if isinstance(f, AnsibleSequence) and hasattr(next(iter(f), {}), 'keys') and playbooks_keys.intersection(next(iter(f), {}).keys()):
return True
return False
|
def is_playbook(filename: str) -> bool:
"""
Check if the file is a playbook.
Given a filename, it should return true if it looks like a playbook. The
function is not supposed to raise exceptions.
"""
playbooks_keys = {'gather_facts', 'hosts', 'import_playbook', 'post_tasks', 'pre_tasks', 'roles', 'tasks'}
if not isinstance(filename, str):
filename = str(filename)
try:
dataloader = DataLoader()
if hasattr(dataloader, 'set_vault_password'):
dataloader.set_vault_password(DEFAULT_VAULT_PASSWORD)
f = dataloader.load_from_file(filename)
except Exception as exc:
_logger.warning('Failed to load %s with %s, assuming is not a playbook.', filename, exc)
else:
if isinstance(f, AnsibleSequence) and hasattr(next(iter(f), {}), 'keys') and playbooks_keys.intersection(next(iter(f), {}).keys()):
return True
return False
|
ansible-lint
|
positive
|
def __init__(self, backbone, neck=None, head=None, pretrained=None, base_momentum=0.996, **kwargs):
super(BYOL, self).__init__()
self.online_net = nn.Sequential(builder.build_backbone(backbone), builder.build_neck(neck))
self.target_net = nn.Sequential(builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.online_net[0]
for param in self.target_net.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
<DeepExtract>
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.online_net[0].init_weights(pretrained=pretrained)
self.online_net[1].init_weights(init_linear='kaiming')
for (param_ol, param_tgt) in zip(self.online_net.parameters(), self.target_net.parameters()):
param_tgt.data.copy_(param_ol.data)
self.head.init_weights()
</DeepExtract>
self.base_momentum = base_momentum
self.momentum = base_momentum
|
def __init__(self, backbone, neck=None, head=None, pretrained=None, base_momentum=0.996, **kwargs):
super(BYOL, self).__init__()
self.online_net = nn.Sequential(builder.build_backbone(backbone), builder.build_neck(neck))
self.target_net = nn.Sequential(builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.online_net[0]
for param in self.target_net.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.online_net[0].init_weights(pretrained=pretrained)
self.online_net[1].init_weights(init_linear='kaiming')
for (param_ol, param_tgt) in zip(self.online_net.parameters(), self.target_net.parameters()):
param_tgt.data.copy_(param_ol.data)
self.head.init_weights()
self.base_momentum = base_momentum
self.momentum = base_momentum
|
DenseCL
|
positive
|
def delete(self, request, **kwargs):
appSettings = self.get_app().appsettings_set.latest()
addresses = self.get_serializer().validate_whitelist(request.data.get('addresses'))
unfound_addresses = set(addresses) - set(appSettings.whitelist)
if len(unfound_addresses) != 0:
raise UnprocessableEntity('addresses {} does not exist in whitelist'.format(unfound_addresses))
<DeepExtract>
user = self.get_object()
serializer = self.get_serializer(user, many=False)
addresses = Response(serializer.data)
</DeepExtract>
appSettings.new(self.request.user, whitelist=addresses)
return Response(status=status.HTTP_204_NO_CONTENT)
|
def delete(self, request, **kwargs):
appSettings = self.get_app().appsettings_set.latest()
addresses = self.get_serializer().validate_whitelist(request.data.get('addresses'))
unfound_addresses = set(addresses) - set(appSettings.whitelist)
if len(unfound_addresses) != 0:
raise UnprocessableEntity('addresses {} does not exist in whitelist'.format(unfound_addresses))
user = self.get_object()
serializer = self.get_serializer(user, many=False)
addresses = Response(serializer.data)
appSettings.new(self.request.user, whitelist=addresses)
return Response(status=status.HTTP_204_NO_CONTENT)
|
controller
|
positive
|
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info('SIGINT: Shutting down RoIDataLoader threads and exiting...')
<DeepExtract>
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
</DeepExtract>
signal.signal(signal.SIGINT, signal_handler)
|
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info('SIGINT: Shutting down RoIDataLoader threads and exiting...')
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
signal.signal(signal.SIGINT, signal_handler)
|
Detectron-Cascade-RCNN
|
positive
|
def align_neg_y(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 1, -1)
<DeepExtract>
modal._mouse_init = None
modal._mode_cache.clear()
keymap_refresh(modal)
modal._current_tool = modal._basic_tool
return
</DeepExtract>
return
|
def align_neg_y(modal, context, event, keys, func_data):
align_to_axis_normals(modal, 1, -1)
modal._mouse_init = None
modal._mode_cache.clear()
keymap_refresh(modal)
modal._current_tool = modal._basic_tool
return
return
|
Abnormal
|
positive
|
def test_add_consumer_with_valid_parameters_doesnt_fail(self):
<DeepExtract>
list_versions = [{'id': 'v1', 'status': 'CURRENT', 'min_version': '1.0', 'max_version': '1.1', 'links': []}]
self.list_versions.return_value = list_versions
</DeepExtract>
self.key_mgr.add_consumer(self.ctxt, self.secret_ref, self._get_custom_consumer_data())
|
def test_add_consumer_with_valid_parameters_doesnt_fail(self):
list_versions = [{'id': 'v1', 'status': 'CURRENT', 'min_version': '1.0', 'max_version': '1.1', 'links': []}]
self.list_versions.return_value = list_versions
self.key_mgr.add_consumer(self.ctxt, self.secret_ref, self._get_custom_consumer_data())
|
castellan
|
positive
|
@p(('copies', 'used_copies'), copies_pairs)
def test_memory_leak_warning_mock(self, copies, used_copies, monkeypatch):
def warn(w):
warn.warnings_list.append(w)
warn.warnings_list = []
gc.collect()
monkeypatch.setattr(warnings, 'warn', warn)
def apply_test():
types = [int, int, int, float, complex, float, type(thub), type(pytest)]
sig = [4, 3, 2, 7e-18, 9j, 0.2, thub, pytest]
data = thub(sig, copies)
assert isinstance(data, StreamTeeHub)
safe_copies = min(copies, used_copies)
assert all((all(types == data.map(type)) for n in xrange(safe_copies)))
if copies < used_copies:
with pytest.raises(IndexError):
Stream(data)
data.__del__()
data.__del__()
<DeepExtract>
types = [int, int, int, float, complex, float, type(thub), type(pytest)]
sig = [4, 3, 2, 7e-18, 9j, 0.2, thub, pytest]
data = thub(sig, copies)
assert isinstance(data, StreamTeeHub)
safe_copies = min(copies, used_copies)
assert all((all(types == data.map(type)) for n in xrange(safe_copies)))
if copies < used_copies:
with pytest.raises(IndexError):
Stream(data)
data.__del__()
data.__del__()
</DeepExtract>
gc.collect()
if copies > used_copies:
w = warn.warnings_list.pop()
assert isinstance(w, MemoryLeakWarning)
parts = ['StreamTeeHub', str(copies - used_copies)]
assert all((part in str(w) for part in parts))
assert not warn.warnings_list
|
@p(('copies', 'used_copies'), copies_pairs)
def test_memory_leak_warning_mock(self, copies, used_copies, monkeypatch):
def warn(w):
warn.warnings_list.append(w)
warn.warnings_list = []
gc.collect()
monkeypatch.setattr(warnings, 'warn', warn)
def apply_test():
types = [int, int, int, float, complex, float, type(thub), type(pytest)]
sig = [4, 3, 2, 7e-18, 9j, 0.2, thub, pytest]
data = thub(sig, copies)
assert isinstance(data, StreamTeeHub)
safe_copies = min(copies, used_copies)
assert all((all(types == data.map(type)) for n in xrange(safe_copies)))
if copies < used_copies:
with pytest.raises(IndexError):
Stream(data)
data.__del__()
data.__del__()
types = [int, int, int, float, complex, float, type(thub), type(pytest)]
sig = [4, 3, 2, 7e-18, 9j, 0.2, thub, pytest]
data = thub(sig, copies)
assert isinstance(data, StreamTeeHub)
safe_copies = min(copies, used_copies)
assert all((all(types == data.map(type)) for n in xrange(safe_copies)))
if copies < used_copies:
with pytest.raises(IndexError):
Stream(data)
data.__del__()
data.__del__()
gc.collect()
if copies > used_copies:
w = warn.warnings_list.pop()
assert isinstance(w, MemoryLeakWarning)
parts = ['StreamTeeHub', str(copies - used_copies)]
assert all((part in str(w) for part in parts))
assert not warn.warnings_list
|
audiolazy
|
positive
|
def get_input_ids(text):
if isinstance(text, str):
<DeepExtract>
all_special_tokens = self.all_special_tokens
text = self.prepare_for_tokenization(text, **kwargs)
def lowercase_text(t):
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = '(' + '|'.join(escaped_special_toks) + ')|' + '(.+?)'
tokens = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t)
if self.init_kwargs.get('do_lower_case', False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for (i, sub_text) in enumerate(split_text):
sub_text = sub_text.rstrip()
if i == 0 and (not sub_text):
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
tokens = result
def split_on_tokens(tok_list, text):
if not text.strip():
tokens = []
if not tok_list:
tokens = self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_added_tokens_encoder:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
tokens = list(itertools.chain.from_iterable((self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token] for token in tokenized_text)))
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
tokens = tokenized_text
</DeepExtract>
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError('Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.')
|
def get_input_ids(text):
if isinstance(text, str):
all_special_tokens = self.all_special_tokens
text = self.prepare_for_tokenization(text, **kwargs)
def lowercase_text(t):
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = '(' + '|'.join(escaped_special_toks) + ')|' + '(.+?)'
tokens = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), t)
if self.init_kwargs.get('do_lower_case', False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for (i, sub_text) in enumerate(split_text):
sub_text = sub_text.rstrip()
if i == 0 and (not sub_text):
result += [tok]
elif i == len(split_text) - 1:
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
tokens = result
def split_on_tokens(tok_list, text):
if not text.strip():
tokens = []
if not tok_list:
tokens = self._tokenize(text)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if sub_text not in self.unique_added_tokens_encoder:
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
tokens = list(itertools.chain.from_iterable((self._tokenize(token) if token not in self.unique_added_tokens_encoder else [token] for token in tokenized_text)))
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
tokens = tokenized_text
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError('Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.')
|
Cutoff
|
positive
|
def score_choices(self, choices, ctxs):
assert len(choices) == len(ctxs)
cnts = [int(x) for x in ctxs[0][0::2]]
(agree, scores) = (True, [0 for _ in range(len(ctxs))])
for i in range(len(cnts)):
n = 0
for (agent_id, (choice, ctx)) in enumerate(zip(choices, ctxs)):
<DeepExtract>
try:
taken = int(choice[i][choice[i].find('=') + 1:])
except:
taken = 0
</DeepExtract>
n += taken
scores[agent_id] += int(ctx[2 * i + 1]) * taken
agree = agree and n == 0
return (agree, scores)
|
def score_choices(self, choices, ctxs):
assert len(choices) == len(ctxs)
cnts = [int(x) for x in ctxs[0][0::2]]
(agree, scores) = (True, [0 for _ in range(len(ctxs))])
for i in range(len(cnts)):
n = 0
for (agent_id, (choice, ctx)) in enumerate(zip(choices, ctxs)):
try:
taken = int(choice[i][choice[i].find('=') + 1:])
except:
taken = 0
n += taken
scores[agent_id] += int(ctx[2 * i + 1]) * taken
agree = agree and n == 0
return (agree, scores)
|
end-to-end-negotiator
|
positive
|
def test_mem(self, low1=0, high1=16, low2=0, high2=100000):
self.driver = webdriver.Chrome()
<DeepExtract>
self.driver.get('http://www.emu86.org/')
</DeepExtract>
self.getById('subButton').click()
for i in range(0, NUM_TESTS):
a = random.randint(low2, high2)
b = random.randint(low1, high1)
message = ''
try:
int(hex(b).upper().split('X')[-1])
except Exception:
message = 'Not a valid value for decimal number system'
<DeepExtract>
inputBox = self.getById('memText')
self.enter_inputs(inputBox, hex(a).upper().split('X')[-1])
inputVal = self.getById('valueText')
self.enter_inputs(inputVal, hex(b).upper().split('X')[-1])
self.getById('setMem').click()
</DeepExtract>
try:
alert = self.driver.switch_to.alert
alert_message = alert.text
alert.accept()
self.assertEqual(message, alert_message)
except Exception:
<DeepExtract>
loc_val = self.driver.find_element_by_name(hex(a).upper().split('X')[-1])
</DeepExtract>
val = loc_val.get_attribute('value')
self.assertEqual(val, hex(b).upper().split('X')[-1])
self.assertEqual(message, '')
<DeepExtract>
self.driver.quit()
</DeepExtract>
|
def test_mem(self, low1=0, high1=16, low2=0, high2=100000):
self.driver = webdriver.Chrome()
self.driver.get('http://www.emu86.org/')
self.getById('subButton').click()
for i in range(0, NUM_TESTS):
a = random.randint(low2, high2)
b = random.randint(low1, high1)
message = ''
try:
int(hex(b).upper().split('X')[-1])
except Exception:
message = 'Not a valid value for decimal number system'
inputBox = self.getById('memText')
self.enter_inputs(inputBox, hex(a).upper().split('X')[-1])
inputVal = self.getById('valueText')
self.enter_inputs(inputVal, hex(b).upper().split('X')[-1])
self.getById('setMem').click()
try:
alert = self.driver.switch_to.alert
alert_message = alert.text
alert.accept()
self.assertEqual(message, alert_message)
except Exception:
loc_val = self.driver.find_element_by_name(hex(a).upper().split('X')[-1])
val = loc_val.get_attribute('value')
self.assertEqual(val, hex(b).upper().split('X')[-1])
self.assertEqual(message, '')
self.driver.quit()
</DeepExtract>
|
Emu86
|
positive
|
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
out = array('B', [type])
if not prev:
if type == 2:
type = 0
elif type == 3:
prev = [0] * len(line)
elif type == 4:
type = 1
if type == 0:
out.extend(line)
elif type == 1:
<DeepExtract>
ai = 0
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = x + a & 255
ai += 1
</DeepExtract>
elif type == 2:
<DeepExtract>
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = x + b & 255
</DeepExtract>
elif type == 3:
<DeepExtract>
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = x + (a + b >> 1) & 255
ai += 1
</DeepExtract>
else:
<DeepExtract>
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = x + pr & 255
ai += 1
</DeepExtract>
return out
|
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
out = array('B', [type])
if not prev:
if type == 2:
type = 0
elif type == 3:
prev = [0] * len(line)
elif type == 4:
type = 1
if type == 0:
out.extend(line)
elif type == 1:
ai = 0
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = x + a & 255
ai += 1
elif type == 2:
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = x + b & 255
elif type == 3:
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = x + (a + b >> 1) & 255
ai += 1
else:
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = x + pr & 255
ai += 1
return out
|
DF-VO
|
positive
|
def test_hst_type_1_quadrangle():
<DeepExtract>
with mock.patch('capytaine.io.mesh_loaders._check_file', lambda foo: None):
with mock.patch('capytaine.io.mesh_loaders.open', mock.mock_open(read_data='\n COORDINATES\n 1 0.0 0.0 0.0\n 2 1.0 0.0 0.0\n 3 1.0 1.0 0.0\n 4 0.0 1.0 0.0\n ENDCOORDINATES\n PANEL TYPE 1\n 1 1 2 3 4\n ENDPANEL\n ')):
mesh = load_HST('mocked/filename')
</DeepExtract>
assert mesh.vertices.shape == (4, 3)
assert mesh.faces.shape == (1, 4)
assert np.allclose(mesh.faces_centers, np.array([[1 / 2, 1 / 2, 0.0]]))
|
def test_hst_type_1_quadrangle():
with mock.patch('capytaine.io.mesh_loaders._check_file', lambda foo: None):
with mock.patch('capytaine.io.mesh_loaders.open', mock.mock_open(read_data='\n COORDINATES\n 1 0.0 0.0 0.0\n 2 1.0 0.0 0.0\n 3 1.0 1.0 0.0\n 4 0.0 1.0 0.0\n ENDCOORDINATES\n PANEL TYPE 1\n 1 1 2 3 4\n ENDPANEL\n ')):
mesh = load_HST('mocked/filename')
assert mesh.vertices.shape == (4, 3)
assert mesh.faces.shape == (1, 4)
assert np.allclose(mesh.faces_centers, np.array([[1 / 2, 1 / 2, 0.0]]))
|
capytaine
|
positive
|
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
<DeepExtract>
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(target=_queue_manangement_worker, args=(weakref.ref(self), self._processes, self._pending_work_items, self._work_ids, self._call_queue, self._result_queue, self._shutdown_process_event))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_thread_references.add(weakref.ref(self._queue_management_thread))
</DeepExtract>
<DeepExtract>
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(target=_process_worker, args=(self._call_queue, self._result_queue, self._shutdown_process_event))
p.start()
self._processes.add(p)
</DeepExtract>
return f
|
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(target=_queue_manangement_worker, args=(weakref.ref(self), self._processes, self._pending_work_items, self._work_ids, self._call_queue, self._result_queue, self._shutdown_process_event))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_thread_references.add(weakref.ref(self._queue_management_thread))
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(target=_process_worker, args=(self._call_queue, self._result_queue, self._shutdown_process_event))
p.start()
self._processes.add(p)
return f
|
evergreen
|
positive
|
def _pick_at_most_one(self, x, pool=None, rtol=None, atol=None):
"""
Iterates over the pool (full pool if ``pool`` is ``None``) to find the index of a
single element ``x``, using the provided tolerances.
It uses the test function ``self._where_isclose(pool, x, rtol, atol)``, returning
an array of indices of matches.
Tolerances start at the minimum one, and, until an element is found, are
progressively increased until the maximum tolerance is reached.
"""
if pool is None:
pool = self.values
if rtol is None:
rtol = self._adapt_rtol_min
if atol is None:
atol = self._adapt_atol_min
<DeepExtract>
</DeepExtract>
if not len(i):
if rtol > self._adapt_rtol_max and atol > self._adapt_atol_max:
return np.empty(shape=0, dtype=int)
if rtol <= self._adapt_rtol_max:
rtol *= 10
if atol <= self._adapt_atol_max:
atol *= 10
return self._pick_at_most_one(x, pool, rtol, atol)
elif len(i) > 1:
if rtol < self._adapt_rtol_min and atol < self._adapt_atol_min:
return np.empty(shape=0, dtype=int)
if rtol >= self._adapt_rtol_min:
rtol /= 3
if atol >= self._adapt_atol_min:
atol /= 3
return self._pick_at_most_one(x, pool, rtol, atol)
else:
return i
|
def _pick_at_most_one(self, x, pool=None, rtol=None, atol=None):
"""
Iterates over the pool (full pool if ``pool`` is ``None``) to find the index of a
single element ``x``, using the provided tolerances.
It uses the test function ``self._where_isclose(pool, x, rtol, atol)``, returning
an array of indices of matches.
Tolerances start at the minimum one, and, until an element is found, are
progressively increased until the maximum tolerance is reached.
"""
if pool is None:
pool = self.values
if rtol is None:
rtol = self._adapt_rtol_min
if atol is None:
atol = self._adapt_atol_min
if not len(i):
if rtol > self._adapt_rtol_max and atol > self._adapt_atol_max:
return np.empty(shape=0, dtype=int)
if rtol <= self._adapt_rtol_max:
rtol *= 10
if atol <= self._adapt_atol_max:
atol *= 10
return self._pick_at_most_one(x, pool, rtol, atol)
elif len(i) > 1:
if rtol < self._adapt_rtol_min and atol < self._adapt_atol_min:
return np.empty(shape=0, dtype=int)
if rtol >= self._adapt_rtol_min:
rtol /= 3
if atol >= self._adapt_atol_min:
atol /= 3
return self._pick_at_most_one(x, pool, rtol, atol)
else:
return i
|
cobaya
|
positive
|
def create_ppo(self, env_id='Pendulum-v0', name='ppo'):
<DeepExtract>
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
mlp_v = MLPVValueFunc(env_spec=env_spec, name_scope=name + 'mlp_v', name=name + 'mlp_v', mlp_config=[{'ACT': 'RELU', 'B_INIT_VALUE': 0.0, 'NAME': '1', 'N_UNITS': 16, 'L1_NORM': 0.01, 'L2_NORM': 0.01, 'TYPE': 'DENSE', 'W_NORMAL_STDDEV': 0.03}, {'ACT': 'LINEAR', 'B_INIT_VALUE': 0.0, 'NAME': 'OUPTUT', 'N_UNITS': 1, 'TYPE': 'DENSE', 'W_NORMAL_STDDEV': 0.03}])
(mlp_v, local) = (mlp_v, locals())
</DeepExtract>
env_spec = local['env_spec']
env = local['env']
policy = self.create_normal_dist_mlp_policy(env_spec=env_spec, name=name)[0]
ppo = PPO(env_spec=env_spec, config_or_config_dict={'gamma': 0.995, 'value_func_memory_size': 10000, 'lam': 0.98, 'policy_train_iter': 10, 'value_func_train_iter': 10, 'clipping_range': None, 'beta': 1.0, 'eta': 50, 'log_var_init': -1.0, 'kl_target': 0.003, 'policy_lr': 0.01, 'value_func_lr': 0.01, 'value_func_train_batch_size': 10, 'lr_multiplier': 1.0}, value_func=mlp_v, stochastic_policy=policy, name=name)
return (ppo, locals())
|
def create_ppo(self, env_id='Pendulum-v0', name='ppo'):
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
mlp_v = MLPVValueFunc(env_spec=env_spec, name_scope=name + 'mlp_v', name=name + 'mlp_v', mlp_config=[{'ACT': 'RELU', 'B_INIT_VALUE': 0.0, 'NAME': '1', 'N_UNITS': 16, 'L1_NORM': 0.01, 'L2_NORM': 0.01, 'TYPE': 'DENSE', 'W_NORMAL_STDDEV': 0.03}, {'ACT': 'LINEAR', 'B_INIT_VALUE': 0.0, 'NAME': 'OUPTUT', 'N_UNITS': 1, 'TYPE': 'DENSE', 'W_NORMAL_STDDEV': 0.03}])
(mlp_v, local) = (mlp_v, locals())
env_spec = local['env_spec']
env = local['env']
policy = self.create_normal_dist_mlp_policy(env_spec=env_spec, name=name)[0]
ppo = PPO(env_spec=env_spec, config_or_config_dict={'gamma': 0.995, 'value_func_memory_size': 10000, 'lam': 0.98, 'policy_train_iter': 10, 'value_func_train_iter': 10, 'clipping_range': None, 'beta': 1.0, 'eta': 50, 'log_var_init': -1.0, 'kl_target': 0.003, 'policy_lr': 0.01, 'value_func_lr': 0.01, 'value_func_train_batch_size': 10, 'lr_multiplier': 1.0}, value_func=mlp_v, stochastic_policy=policy, name=name)
return (ppo, locals())
|
baconian-project
|
positive
|
def reduce_img_size(path='../data/sm4/images', img_size=1024):
path_new = path + '_reduced'
<DeepExtract>
if os.path.exists(path_new):
shutil.rmtree(path_new)
os.makedirs(path_new)
</DeepExtract>
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
(h, w) = img.shape[:2]
r = img_size / max(h, w)
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA)
fnew = f.replace(path, path_new)
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
|
def reduce_img_size(path='../data/sm4/images', img_size=1024):
path_new = path + '_reduced'
if os.path.exists(path_new):
shutil.rmtree(path_new)
os.makedirs(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
(h, w) = img.shape[:2]
r = img_size / max(h, w)
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA)
fnew = f.replace(path, path_new)
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
|
easy_detection
|
positive
|
def load(self, f=None):
if self.has_checkpoint():
<DeepExtract>
save_file = os.path.join(self.save_dir, 'last_checkpoint')
try:
with open(save_file, 'r') as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
last_saved = ''
f = last_saved
</DeepExtract>
if not f:
self.logger.info('No checkpoint found. Initializing model from scratch')
return {}
self.logger.info('Loading checkpoint from {}'.format(f))
<DeepExtract>
checkpoint = torch.load(f, map_location=torch.device('cpu'))
</DeepExtract>
<DeepExtract>
load_state_dict(self.model, checkpoint.pop('model'))
</DeepExtract>
if 'optimizer' in checkpoint and self.optimizer:
self.logger.info('Loading optimizer from {}'.format(f))
self.optimizer.load_state_dict(checkpoint.pop('optimizer'))
if 'scheduler' in checkpoint and self.scheduler:
self.logger.info('Loading scheduler from {}'.format(f))
self.scheduler.load_state_dict(checkpoint.pop('scheduler'))
return checkpoint
|
def load(self, f=None):
if self.has_checkpoint():
save_file = os.path.join(self.save_dir, 'last_checkpoint')
try:
with open(save_file, 'r') as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
last_saved = ''
f = last_saved
if not f:
self.logger.info('No checkpoint found. Initializing model from scratch')
return {}
self.logger.info('Loading checkpoint from {}'.format(f))
checkpoint = torch.load(f, map_location=torch.device('cpu'))
load_state_dict(self.model, checkpoint.pop('model'))
if 'optimizer' in checkpoint and self.optimizer:
self.logger.info('Loading optimizer from {}'.format(f))
self.optimizer.load_state_dict(checkpoint.pop('optimizer'))
if 'scheduler' in checkpoint and self.scheduler:
self.logger.info('Loading scheduler from {}'.format(f))
self.scheduler.load_state_dict(checkpoint.pop('scheduler'))
return checkpoint
|
DF-Traffic-Sign-Identification
|
positive
|
def kill_task_with_pattern(pattern, agent_host=None, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def fn():
command = "sudo kill -9 $(ps ax | grep {} | grep -v grep | tr -s ' ' | sed 's/^ *//g' | cut -d ' ' -f 1)".format(pattern)
if agent_host is None:
<DeepExtract>
log.info('(SSH:master) {}'.format(command))
(success, output) = shakedown.run_command_on_master(command)
log.info('Output (success={}):\n{}'.format(success, output))
(exit_status, _) = (success, output)
</DeepExtract>
else:
<DeepExtract>
log.info('(SSH:agent={}) {}'.format(agent_host, command))
(success, output) = shakedown.run_command_on_agent(agent_host, command)
log.info('Output (success={}):\n{}'.format(success, output))
(exit_status, _) = (success, output)
</DeepExtract>
return exit_status
<DeepExtract>
try:
response = dcos.http.request(method, url, verify=verify, **kwargs)
except dcos.errors.DCOSHTTPException as e:
response = e.response
except dcos.errors.DCOSUnprocessableException as e:
response = e.response
log_msg = 'Got {} for {} {}'.format(response.status_code, method.upper(), cluster_path)
if kwargs:
log_msg += ' (args: {})'.format(kwargs if log_args else [e for e in kwargs.keys()])
log.info(log_msg)
if not response.ok:
response_text = response.text
if response_text:
log.info('Response content ({} bytes):\n{}'.format(len(response_text), response_text))
else:
log.info('No response content')
if raise_on_error:
response.raise_for_status()
return response
</DeepExtract>
|
def kill_task_with_pattern(pattern, agent_host=None, timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res)
def fn():
command = "sudo kill -9 $(ps ax | grep {} | grep -v grep | tr -s ' ' | sed 's/^ *//g' | cut -d ' ' -f 1)".format(pattern)
if agent_host is None:
log.info('(SSH:master) {}'.format(command))
(success, output) = shakedown.run_command_on_master(command)
log.info('Output (success={}):\n{}'.format(success, output))
(exit_status, _) = (success, output)
else:
log.info('(SSH:agent={}) {}'.format(agent_host, command))
(success, output) = shakedown.run_command_on_agent(agent_host, command)
log.info('Output (success={}):\n{}'.format(success, output))
(exit_status, _) = (success, output)
return exit_status
try:
response = dcos.http.request(method, url, verify=verify, **kwargs)
except dcos.errors.DCOSHTTPException as e:
response = e.response
except dcos.errors.DCOSUnprocessableException as e:
response = e.response
log_msg = 'Got {} for {} {}'.format(response.status_code, method.upper(), cluster_path)
if kwargs:
log_msg += ' (args: {})'.format(kwargs if log_args else [e for e in kwargs.keys()])
log.info(log_msg)
if not response.ok:
response_text = response.text
if response_text:
log.info('Response content ({} bytes):\n{}'.format(len(response_text), response_text))
else:
log.info('No response content')
if raise_on_error:
response.raise_for_status()
return response
</DeepExtract>
|
dcos-jenkins-service
|
positive
|
def draw_polyline(self, v, **attrs):
<DeepExtract>
geom = PolyLine(v, False)
</DeepExtract>
<DeepExtract>
if 'color' in attrs:
geom.set_color(*attrs['color'])
if 'linewidth' in attrs:
geom.set_linewidth(attrs['linewidth'])
</DeepExtract>
<DeepExtract>
self.onetime_geoms.append(geom)
</DeepExtract>
return geom
|
def draw_polyline(self, v, **attrs):
geom = PolyLine(v, False)
if 'color' in attrs:
geom.set_color(*attrs['color'])
if 'linewidth' in attrs:
geom.set_linewidth(attrs['linewidth'])
self.onetime_geoms.append(geom)
return geom
|
DQN-DDPG_Stock_Trading
|
positive
|
def save_subplots(self, out_path, names, fig=None, png_preview=True, tex_preview=True, override_externals=True, post_process=True):
"""Save subplots of figure into single TikZ figures."""
if fig is None:
fig = plt.gcf()
<DeepExtract>
fig_axes = fig.get_axes()
for ax in fig_axes:
fig.delaxes(ax)
fig_list = []
for ax in fig_axes:
new_fig = plt.figure()
new_fig._axstack.add(new_fig._make_key(ax), ax)
new_fig.axes[0].change_geometry(1, 1, 1)
fig_list.append(new_fig)
axes = fig_list
</DeepExtract>
for (name, subplot) in zip(names, axes):
assert len(subplot.get_axes()) == 1
out_file = os.path.join(out_path, name)
<DeepExtract>
if subplot is not None:
self.set_current(subplot)
tex_file = self._add_extension(out_file, 'tex')
out_dir = os.path.dirname(out_file)
os.makedirs(out_dir, exist_ok=True)
if png_preview:
png_file = self._add_extension('{}-preview'.format(out_file), 'png')
plt.savefig(png_file, bbox_inches='tight')
tikz_save(tex_file, override_externals=override_externals, extra_axis_parameters=self.extra_axis_parameters)
if post_process is True:
self.post_process(out_file)
if tex_preview is True:
tex_preview_file = self._add_extension('{}-preview'.format(out_file), 'tex')
with open(tex_file, 'r') as f:
content = ''.join(f.readlines())
preamble = '\\documentclass[tikz]{standalone}\n\\usepackage{tikz}\n\\usepackage{pgfplots}\n\\usepackage{amssymb}\n\\usetikzlibrary{shapes,\n pgfplots.groupplots,\n shadings,\n calc,\n arrows,\n backgrounds,\n colorbrewer,\n shadows.blur}\n\n% customize "zmystyle" as you wish\n \\pgfkeys{/pgfplots/zmystyle/.style={\n % legend pos = north east,\n % xmin=1, xmax=20,\n % ymin = 1, ymax = 1.2,\n % title = {The title},\n }\n }\n\n\\begin{document}\n'
postamble = '\\end{document}'
preview_content = preamble + content + postamble
with open(tex_preview_file, 'w') as f:
f.write(preview_content)
subprocess.run(['pdflatex', '-output-directory', out_dir, tex_preview_file])
</DeepExtract>
|
def save_subplots(self, out_path, names, fig=None, png_preview=True, tex_preview=True, override_externals=True, post_process=True):
"""Save subplots of figure into single TikZ figures."""
if fig is None:
fig = plt.gcf()
fig_axes = fig.get_axes()
for ax in fig_axes:
fig.delaxes(ax)
fig_list = []
for ax in fig_axes:
new_fig = plt.figure()
new_fig._axstack.add(new_fig._make_key(ax), ax)
new_fig.axes[0].change_geometry(1, 1, 1)
fig_list.append(new_fig)
axes = fig_list
for (name, subplot) in zip(names, axes):
assert len(subplot.get_axes()) == 1
out_file = os.path.join(out_path, name)
if subplot is not None:
self.set_current(subplot)
tex_file = self._add_extension(out_file, 'tex')
out_dir = os.path.dirname(out_file)
os.makedirs(out_dir, exist_ok=True)
if png_preview:
png_file = self._add_extension('{}-preview'.format(out_file), 'png')
plt.savefig(png_file, bbox_inches='tight')
tikz_save(tex_file, override_externals=override_externals, extra_axis_parameters=self.extra_axis_parameters)
if post_process is True:
self.post_process(out_file)
if tex_preview is True:
tex_preview_file = self._add_extension('{}-preview'.format(out_file), 'tex')
with open(tex_file, 'r') as f:
content = ''.join(f.readlines())
preamble = '\\documentclass[tikz]{standalone}\n\\usepackage{tikz}\n\\usepackage{pgfplots}\n\\usepackage{amssymb}\n\\usetikzlibrary{shapes,\n pgfplots.groupplots,\n shadings,\n calc,\n arrows,\n backgrounds,\n colorbrewer,\n shadows.blur}\n\n% customize "zmystyle" as you wish\n \\pgfkeys{/pgfplots/zmystyle/.style={\n % legend pos = north east,\n % xmin=1, xmax=20,\n % ymin = 1, ymax = 1.2,\n % title = {The title},\n }\n }\n\n\\begin{document}\n'
postamble = '\\end{document}'
preview_content = preamble + content + postamble
with open(tex_preview_file, 'w') as f:
f.write(preview_content)
subprocess.run(['pdflatex', '-output-directory', out_dir, tex_preview_file])
</DeepExtract>
|
cockpit
|
positive
|
def _loglikelihood(self, parameters: Float64Array, sigma2: Float64Array, backcast: float | Float64Array, var_bounds: Float64Array, individual: bool=False) -> float | Float64Array:
"""
Computes the log-likelihood using the entire model
Parameters
----------
parameters
sigma2
backcast
individual : bool, optional
Returns
-------
neg_llf : float
Negative of model loglikelihood
"""
_callback_info['count'] += 1
<DeepExtract>
parameters = np.asarray(parameters, dtype=float)
(km, kv) = (int(self.num_params), int(self.volatility.num_params))
(mp, vp, dp) = (parameters[:km], parameters[km:km + kv], parameters[km + kv:])
</DeepExtract>
resids = np.asarray(self.resids(mp), dtype=float)
sigma2 = self.volatility.compute_variance(vp, resids, sigma2, backcast, var_bounds)
llf = self.distribution.loglikelihood(dp, resids, sigma2, individual)
if not individual:
_callback_info['llf'] = llf_f = -float(llf)
return llf_f
return cast(np.ndarray, -llf)
|
def _loglikelihood(self, parameters: Float64Array, sigma2: Float64Array, backcast: float | Float64Array, var_bounds: Float64Array, individual: bool=False) -> float | Float64Array:
"""
Computes the log-likelihood using the entire model
Parameters
----------
parameters
sigma2
backcast
individual : bool, optional
Returns
-------
neg_llf : float
Negative of model loglikelihood
"""
_callback_info['count'] += 1
parameters = np.asarray(parameters, dtype=float)
(km, kv) = (int(self.num_params), int(self.volatility.num_params))
(mp, vp, dp) = (parameters[:km], parameters[km:km + kv], parameters[km + kv:])
resids = np.asarray(self.resids(mp), dtype=float)
sigma2 = self.volatility.compute_variance(vp, resids, sigma2, backcast, var_bounds)
llf = self.distribution.loglikelihood(dp, resids, sigma2, individual)
if not individual:
_callback_info['llf'] = llf_f = -float(llf)
return llf_f
return cast(np.ndarray, -llf)
|
arch
|
positive
|
def __init__(self, args):
self._args = args
self._algo = 'retina'
self._metric_tool_dir = './devkit'
self._metric_prob_list = './../templatelists/facescrub_features_list.json'
self._python_tool_dir = PYTHON_TOOL
self._outdir_root = ''
self._emb_outdir_root = ''
self._emb_clean_outdir_root = ''
self._face_scrub_list = './data/facescrub_lst'
self._face_scrub_root = '../data/megaface/facescrub_images'
self._face_scrub = []
self._face_scrub_emb_outdir = ''
self._face_scrub_emb_clean_outdir = ''
self._face_gallery_list = './data/megaface_lst'
self._face_gallery_root = '../data/megaface/megaface_images'
self._face_gallery = []
self._face_gallery_emb_outdir = ''
self._face_gallery_emb_clean_outdir = ''
self._face_scrub_noisy_list = './data/facescrub_noises.txt'
self._face_gallery_noisy_list = './data/megaface_noises.txt'
self._face_scrub_noisy_dict = {}
self._face_gallery_noisy_dict = {}
self._save_badcase = True if args.save_badcase else False
self._badcase_prob = []
self._badcase_gall = []
self._face_badcase_savefile = 'megaface.badcase'
<DeepExtract>
epoch = int(self._args.model_path.strip().split(',')[1])
self._outdir_root = os.path.join('/'.join(self._args.model_path.strip().split('/')[:-1]), 'megaface_%d' % epoch)
self._emb_outdir_root = os.path.join(self._outdir_root, 'embedding')
self._emb_clean_outdir_root = os.path.join(self._outdir_root, 'embedding_clean')
self._face_scrub_emb_outdir = os.path.join(self._emb_outdir_root, 'facescrub')
os.system('mkdir -p %s' % self._face_scrub_emb_outdir)
self._face_scrub_emb_clean_outdir = os.path.join(self._emb_clean_outdir_root, 'facescrub')
os.system('mkdir -p %s' % self._face_scrub_emb_clean_outdir)
self._face_gallery_emb_outdir = os.path.join(self._emb_outdir_root, 'megaface')
os.system('mkdir -p %s' % self._face_gallery_emb_outdir)
self._face_gallery_emb_clean_outdir = os.path.join(self._emb_clean_outdir_root, 'megaface')
os.system('mkdir -p %s' % self._face_gallery_emb_clean_outdir)
self._face_result_filename = 'megaface.%d.result' % epoch
</DeepExtract>
<DeepExtract>
self._face_scrub = open(self._face_scrub_list).readlines()
self._face_gallery = open(self._face_gallery_list).readlines()
for line in open(self._face_scrub_noisy_list, 'r'):
if line.startswith('#'):
continue
line = line.strip()
fname = line.split('.')[0]
p = fname.rfind('_')
fname = fname[0:p]
self._face_scrub_noisy_dict[line] = fname
print('Noisy faces of scrub: %d' % len(self._face_scrub_noisy_dict))
for line in open(self._face_gallery_noisy_list, 'r'):
if line.startswith('#'):
continue
line = line.strip()
_vec = line.split('\t')
if len(_vec) > 1:
line = _vec[1]
self._face_gallery_noisy_dict[line] = 1
print('Noisy faces of gallery: %d' % len(self._face_gallery_noisy_dict))
</DeepExtract>
|
def __init__(self, args):
self._args = args
self._algo = 'retina'
self._metric_tool_dir = './devkit'
self._metric_prob_list = './../templatelists/facescrub_features_list.json'
self._python_tool_dir = PYTHON_TOOL
self._outdir_root = ''
self._emb_outdir_root = ''
self._emb_clean_outdir_root = ''
self._face_scrub_list = './data/facescrub_lst'
self._face_scrub_root = '../data/megaface/facescrub_images'
self._face_scrub = []
self._face_scrub_emb_outdir = ''
self._face_scrub_emb_clean_outdir = ''
self._face_gallery_list = './data/megaface_lst'
self._face_gallery_root = '../data/megaface/megaface_images'
self._face_gallery = []
self._face_gallery_emb_outdir = ''
self._face_gallery_emb_clean_outdir = ''
self._face_scrub_noisy_list = './data/facescrub_noises.txt'
self._face_gallery_noisy_list = './data/megaface_noises.txt'
self._face_scrub_noisy_dict = {}
self._face_gallery_noisy_dict = {}
self._save_badcase = True if args.save_badcase else False
self._badcase_prob = []
self._badcase_gall = []
self._face_badcase_savefile = 'megaface.badcase'
epoch = int(self._args.model_path.strip().split(',')[1])
self._outdir_root = os.path.join('/'.join(self._args.model_path.strip().split('/')[:-1]), 'megaface_%d' % epoch)
self._emb_outdir_root = os.path.join(self._outdir_root, 'embedding')
self._emb_clean_outdir_root = os.path.join(self._outdir_root, 'embedding_clean')
self._face_scrub_emb_outdir = os.path.join(self._emb_outdir_root, 'facescrub')
os.system('mkdir -p %s' % self._face_scrub_emb_outdir)
self._face_scrub_emb_clean_outdir = os.path.join(self._emb_clean_outdir_root, 'facescrub')
os.system('mkdir -p %s' % self._face_scrub_emb_clean_outdir)
self._face_gallery_emb_outdir = os.path.join(self._emb_outdir_root, 'megaface')
os.system('mkdir -p %s' % self._face_gallery_emb_outdir)
self._face_gallery_emb_clean_outdir = os.path.join(self._emb_clean_outdir_root, 'megaface')
os.system('mkdir -p %s' % self._face_gallery_emb_clean_outdir)
self._face_result_filename = 'megaface.%d.result' % epoch
self._face_scrub = open(self._face_scrub_list).readlines()
self._face_gallery = open(self._face_gallery_list).readlines()
for line in open(self._face_scrub_noisy_list, 'r'):
if line.startswith('#'):
continue
line = line.strip()
fname = line.split('.')[0]
p = fname.rfind('_')
fname = fname[0:p]
self._face_scrub_noisy_dict[line] = fname
print('Noisy faces of scrub: %d' % len(self._face_scrub_noisy_dict))
for line in open(self._face_gallery_noisy_list, 'r'):
if line.startswith('#'):
continue
line = line.strip()
_vec = line.split('\t')
if len(_vec) > 1:
line = _vec[1]
self._face_gallery_noisy_dict[line] = 1
print('Noisy faces of gallery: %d' % len(self._face_gallery_noisy_dict))
</DeepExtract>
|
cavaface.pytorch
|
positive
|
def change_role(self, membership_id, role_name):
"""
Changes the role of an application member.
Args:
membership_id (int): the id of the membership that will be changed.
role_name (str): the role name to be granted to the membership.
Examples:
>>> balena.models.application.membership.change_role(55074, 'observer')
'OK'
"""
<DeepExtract>
base_request = BaseRequest()
settings = Settings()
params = {'filter': 'name', 'eq': role_name}
roles = base_request.request('application_membership_role', 'GET', params=params, endpoint=settings.get('pine_endpoint'))['d']
if not roles:
raise exceptions.BalenaApplicationMembershipRoleNotFound(role_name=role_name)
else:
role_id = roles[0]['id']
</DeepExtract>
params = {'filter': 'id', 'eq': membership_id}
data = {'application_membership_role': role_id}
return self.base_request.request(self.RESOURCE, 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint'))
|
def change_role(self, membership_id, role_name):
"""
Changes the role of an application member.
Args:
membership_id (int): the id of the membership that will be changed.
role_name (str): the role name to be granted to the membership.
Examples:
>>> balena.models.application.membership.change_role(55074, 'observer')
'OK'
"""
base_request = BaseRequest()
settings = Settings()
params = {'filter': 'name', 'eq': role_name}
roles = base_request.request('application_membership_role', 'GET', params=params, endpoint=settings.get('pine_endpoint'))['d']
if not roles:
raise exceptions.BalenaApplicationMembershipRoleNotFound(role_name=role_name)
else:
role_id = roles[0]['id']
params = {'filter': 'id', 'eq': membership_id}
data = {'application_membership_role': role_id}
return self.base_request.request(self.RESOURCE, 'PATCH', params=params, data=data, endpoint=self.settings.get('pine_endpoint'))
|
balena-sdk-python
|
positive
|
def assert_content_manifest(client, request_id, image_contents):
"""
Check that the content manifest is successfully generated and contains correct content.
Checks:
* Check that status of content-manifest request is 200
* Validate content manifest schema
* Check image_contents from content-manifest
:param Client client: the Cachito API client
:param int request_id: The Cachito request id
:param list image_contents: expected image content part from content manifest
"""
content_manifest_response = client.fetch_content_manifest(request_id)
assert content_manifest_response.status == 200, f'#{content_manifest_response.id}: response status {content_manifest_response.status} != 200'
response_data = content_manifest_response.data
<DeepExtract>
icm_spec = response_data['metadata']['icm_spec']
requests_session = get_requests_session()
schema = requests_session.get(icm_spec, timeout=30).json()
assert validate_json(schema, response_data), f"ICM data not valid for schema at {response_data['metadata']['icm_spec']}: {response_data}"
</DeepExtract>
assert image_contents == content_manifest_response.data['image_contents'], f"#{content_manifest_response.id}: image content in reponse differs from test expectations.\nResponse image content: {json.dumps(content_manifest_response.data['image_contents'], indent=4, sort_keys=True)},\nTest expectations: {json.dumps(image_contents, indent=4, sort_keys=True)}"
|
def assert_content_manifest(client, request_id, image_contents):
"""
Check that the content manifest is successfully generated and contains correct content.
Checks:
* Check that status of content-manifest request is 200
* Validate content manifest schema
* Check image_contents from content-manifest
:param Client client: the Cachito API client
:param int request_id: The Cachito request id
:param list image_contents: expected image content part from content manifest
"""
content_manifest_response = client.fetch_content_manifest(request_id)
assert content_manifest_response.status == 200, f'#{content_manifest_response.id}: response status {content_manifest_response.status} != 200'
response_data = content_manifest_response.data
icm_spec = response_data['metadata']['icm_spec']
requests_session = get_requests_session()
schema = requests_session.get(icm_spec, timeout=30).json()
assert validate_json(schema, response_data), f"ICM data not valid for schema at {response_data['metadata']['icm_spec']}: {response_data}"
assert image_contents == content_manifest_response.data['image_contents'], f"#{content_manifest_response.id}: image content in reponse differs from test expectations.\nResponse image content: {json.dumps(content_manifest_response.data['image_contents'], indent=4, sort_keys=True)},\nTest expectations: {json.dumps(image_contents, indent=4, sort_keys=True)}"
|
cachito
|
positive
|
def dpCountAllGrp(self, *args):
""" Count the number of active All_Grp and return them in a list.
"""
allGrpNodeList = []
allNodeList = cmds.ls(selection=False, type='transform')
for nodeName in allNodeList:
<DeepExtract>
if cmds.objExists(nodeName):
if cmds.objExists(nodeName + '.' + self.masterAttr):
if cmds.getAttr(nodeName + '.' + self.masterAttr) == 1:
allGrp = nodeName
allGrp = False
</DeepExtract>
if allGrp:
allGrpNodeList.append(allGrp)
return allGrpNodeList
|
def dpCountAllGrp(self, *args):
""" Count the number of active All_Grp and return them in a list.
"""
allGrpNodeList = []
allNodeList = cmds.ls(selection=False, type='transform')
for nodeName in allNodeList:
if cmds.objExists(nodeName):
if cmds.objExists(nodeName + '.' + self.masterAttr):
if cmds.getAttr(nodeName + '.' + self.masterAttr) == 1:
allGrp = nodeName
allGrp = False
if allGrp:
allGrpNodeList.append(allGrp)
return allGrpNodeList
|
dpAutoRigSystem
|
positive
|
def check_valid_altdisks(module, action, vios, hdisks, vios_key, altdisk_op_tab, err_label):
"""
Check a valid alternate disk that
- exists,
- is an alternate disk
and so can be used.
sets the altdisk_op_tab acordingly:
altdisk_op_tab[vios_key] = "FAILURE-ALTDC[12] <error message>"
altdisk_op_tab[vios_key] = "SUCCESS-ALTDC"
arguments:
module (dict): The Ansible module
action (str): The action to perform
vios (str): The VIOS name
hdisks (list): The lists of hdisks
vios_key (str): The key for altdisk_op_tab status dicionary
altdisk_op_tab (dict): The operation status
err_label (str): The error to set the altdisk_op_tab value with
return:
True if alternate disk is found
False otherwise
"""
module.debug('action: {0}, vios: {1}, hdisks: {2}, vios_key: {3}'.format(action, vios, hdisks, vios_key))
<DeepExtract>
module.debug('get_pvs vios: {0}'.format(vios))
cmd = ['/usr/ios/cli/ioscli', 'lspv']
(ret, stdout, stderr) = nim_exec(module, vios, cmd)
if ret != 0:
msg = 'Failed to get the PV list on {0}, lspv returned: {1} {2}'.format(vios, ret, stderr)
results['meta'][vios]['messages'].append(msg)
module.log(msg)
pvs = None
pvs = {}
for line in stdout.split('\n'):
line = line.rstrip()
match_key = re.match('^(hdisk\\S+)\\s+(\\S+)\\s+(\\S+)\\s*(\\S*)', line)
if match_key:
pvs[match_key.group(1)] = {}
pvs[match_key.group(1)]['pvid'] = match_key.group(2)
pvs[match_key.group(1)]['vg'] = match_key.group(3)
pvs[match_key.group(1)]['status'] = match_key.group(4)
module.debug('List of PVs:')
for key in pvs.keys():
module.debug(' pvs[{0}]: {1}'.format(key, pvs[key]))
pvs = pvs
</DeepExtract>
if pvs is None or not pvs:
msg = 'to get the list of PVs on {0}'.format(vios)
altdisk_op_tab[vios_key] = '{0} {1}'.format(err_label, msg)
results['meta'][vios]['messages'].append('Failed ' + msg)
module.log('ERROR: Failed ' + msg)
return False
if not isinstance(hdisks, list):
msg = 'value is not a list for {0}'.format(vios)
altdisk_op_tab[vios_key] = '{0} {1}'.format(err_label, msg)
results['meta'][vios]['messages'].append('Target dictionary ' + msg)
module.log('ERROR: Target dictionary ' + msg)
return False
if hdisks:
for hdisk in hdisks:
if hdisk not in pvs or pvs[hdisk]['vg'] != 'altinst_rootvg':
msg = 'disk {0} is not an alternate install rootvg on {1}'.format(hdisk, vios)
altdisk_op_tab[vios_key] = '{0} {1}'.format(err_label, msg)
msg = 'Specified ' + msg
results['meta'][vios]['messages'].append(msg)
module.log('ERROR: ' + msg)
return False
else:
for pv in pvs.keys():
if pvs[pv]['vg'] == 'altinst_rootvg':
hdisks.append(pv)
if not hdisks:
msg = 'There is no alternate install rootvg on {0}'.format(vios)
results['meta'][vios]['messages'].append(msg)
module.log('ERROR: ' + msg)
return False
return True
|
def check_valid_altdisks(module, action, vios, hdisks, vios_key, altdisk_op_tab, err_label):
"""
Check a valid alternate disk that
- exists,
- is an alternate disk
and so can be used.
sets the altdisk_op_tab acordingly:
altdisk_op_tab[vios_key] = "FAILURE-ALTDC[12] <error message>"
altdisk_op_tab[vios_key] = "SUCCESS-ALTDC"
arguments:
module (dict): The Ansible module
action (str): The action to perform
vios (str): The VIOS name
hdisks (list): The lists of hdisks
vios_key (str): The key for altdisk_op_tab status dicionary
altdisk_op_tab (dict): The operation status
err_label (str): The error to set the altdisk_op_tab value with
return:
True if alternate disk is found
False otherwise
"""
module.debug('action: {0}, vios: {1}, hdisks: {2}, vios_key: {3}'.format(action, vios, hdisks, vios_key))
module.debug('get_pvs vios: {0}'.format(vios))
cmd = ['/usr/ios/cli/ioscli', 'lspv']
(ret, stdout, stderr) = nim_exec(module, vios, cmd)
if ret != 0:
msg = 'Failed to get the PV list on {0}, lspv returned: {1} {2}'.format(vios, ret, stderr)
results['meta'][vios]['messages'].append(msg)
module.log(msg)
pvs = None
pvs = {}
for line in stdout.split('\n'):
line = line.rstrip()
match_key = re.match('^(hdisk\\S+)\\s+(\\S+)\\s+(\\S+)\\s*(\\S*)', line)
if match_key:
pvs[match_key.group(1)] = {}
pvs[match_key.group(1)]['pvid'] = match_key.group(2)
pvs[match_key.group(1)]['vg'] = match_key.group(3)
pvs[match_key.group(1)]['status'] = match_key.group(4)
module.debug('List of PVs:')
for key in pvs.keys():
module.debug(' pvs[{0}]: {1}'.format(key, pvs[key]))
pvs = pvs
if pvs is None or not pvs:
msg = 'to get the list of PVs on {0}'.format(vios)
altdisk_op_tab[vios_key] = '{0} {1}'.format(err_label, msg)
results['meta'][vios]['messages'].append('Failed ' + msg)
module.log('ERROR: Failed ' + msg)
return False
if not isinstance(hdisks, list):
msg = 'value is not a list for {0}'.format(vios)
altdisk_op_tab[vios_key] = '{0} {1}'.format(err_label, msg)
results['meta'][vios]['messages'].append('Target dictionary ' + msg)
module.log('ERROR: Target dictionary ' + msg)
return False
if hdisks:
for hdisk in hdisks:
if hdisk not in pvs or pvs[hdisk]['vg'] != 'altinst_rootvg':
msg = 'disk {0} is not an alternate install rootvg on {1}'.format(hdisk, vios)
altdisk_op_tab[vios_key] = '{0} {1}'.format(err_label, msg)
msg = 'Specified ' + msg
results['meta'][vios]['messages'].append(msg)
module.log('ERROR: ' + msg)
return False
else:
for pv in pvs.keys():
if pvs[pv]['vg'] == 'altinst_rootvg':
hdisks.append(pv)
if not hdisks:
msg = 'There is no alternate install rootvg on {0}'.format(vios)
results['meta'][vios]['messages'].append(msg)
module.log('ERROR: ' + msg)
return False
return True
|
ansible-power-aix
|
positive
|
def load_pretrained_model(path):
<DeepExtract>
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
anchor_generator = AnchorGenerator(sizes=tuple([(4, 8, 16, 32, 64, 128, 256, 512) for _ in range(5)]), aspect_ratios=tuple([(0.25, 0.5, 1.0, 2.0) for _ in range(5)]))
model.rpn.anchor_generator = anchor_generator
model.rpn.head = RPNHead(256, anchor_generator.num_anchors_per_location()[0])
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, len(constants.OBJECTS_DETECTOR) + 1)
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, len(constants.OBJECTS_DETECTOR) + 1)
mask_rcnn = model
</DeepExtract>
mask_rcnn.load_state_dict(torch.load(path))
return mask_rcnn
|
def load_pretrained_model(path):
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
anchor_generator = AnchorGenerator(sizes=tuple([(4, 8, 16, 32, 64, 128, 256, 512) for _ in range(5)]), aspect_ratios=tuple([(0.25, 0.5, 1.0, 2.0) for _ in range(5)]))
model.rpn.anchor_generator = anchor_generator
model.rpn.head = RPNHead(256, anchor_generator.num_anchors_per_location()[0])
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, len(constants.OBJECTS_DETECTOR) + 1)
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, len(constants.OBJECTS_DETECTOR) + 1)
mask_rcnn = model
mask_rcnn.load_state_dict(torch.load(path))
return mask_rcnn
|
alfworld
|
positive
|
def ShowSkills(crew_member):
if not crew_member.alive:
return
selected_skill = 0
exit_menu = False
while not exit_menu:
skill_list = []
for skill in SKILLS:
if len(skill.restrictions) > 0:
if crew_member.position not in skill.restrictions:
continue
if skill.name == 'Gyrostabilizer' and campaign.player_nation != 'USA':
continue
level = 0
for crew_skill in crew_member.skills:
if crew_skill.name == skill.name:
level = crew_skill.level
break
skill_list.append((skill, level))
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH - 1, MENU_CON_HEIGHT - 1, clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2, libtcod.BKGND_NONE, libtcod.CENTER, 'Crew Skills')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT - 5, libtcod.BKGND_NONE, libtcod.CENTER, '[%cW/S/Up/Down%c] Change Skill Selection' % HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT - 4, libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Add/Upgrade Skill' % HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT - 3, libtcod.BKGND_NONE, libtcod.CENTER, '[%cESC%c] Return/Continue' % HIGHLIGHT)
crew_member.DisplayCrewInfo(menu_con, 29, 4, False)
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, 83, 4, 40, 30, clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, 86, 9, 'Description')
libtcod.console_print(menu_con, 86, 25, 'Activation Levels')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, 56, 4, 27, 30, clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, 59, 5, 'Skills')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
y = 7
n = 0
for (skill, level) in skill_list:
if n == selected_skill:
libtcod.console_set_default_background(menu_con, libtcod.darker_grey)
libtcod.console_rect(menu_con, 57, y, 25, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
lines = wrap(skill.desc, 30, subsequent_indent=' ')
y2 = 11
for line in lines:
libtcod.console_print(menu_con, 86, y2, line)
y2 += 1
if skill.levels[0] == 100:
libtcod.console_print(menu_con, 86, 27, 'Always active')
else:
x2 = 86
for skill_level in skill.levels:
if skill_level <= level:
libtcod.console_set_default_foreground(menu_con, libtcod.light_blue)
else:
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, x2, 27, str(skill_level) + '%')
x2 += len(str(skill_level)) + 2
if skill.name == 'Gyrostabilizer' and (not campaign.gyro_skill_avail):
libtcod.console_set_default_foreground(menu_con, GREYED_COLOR)
elif level > 0:
libtcod.console_set_default_foreground(menu_con, libtcod.light_blue)
else:
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, 58, y, skill.name)
y += 1
n += 1
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
if libtcod.console_is_window_closed():
sys.exit()
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
key_char = chr(key.c)
if key.vk == libtcod.KEY_UP or key_char in ['w', 'W']:
if selected_skill == 0:
selected_skill = len(skill_list) - 1
else:
selected_skill -= 1
refresh = True
elif key.vk == libtcod.KEY_DOWN or key_char in ['s', 'S']:
if selected_skill == len(skill_list) - 1:
selected_skill = 0
else:
selected_skill += 1
refresh = True
elif key.vk == libtcod.KEY_ENTER:
if not crew_member.NoActions() and crew_member.skill_pts > 0:
n = 0
for (skill, level) in skill_list:
if n == selected_skill:
if skill.name == 'Gyrostabilizer' and (not campaign.gyro_skill_avail):
break
if level == 0:
text = 'Spend a skill point to add skill "' + skill.name + '"'
if PopUp(text, confirm=True):
crew_member.skill_pts -= 1
crew_member.skills.append(SkillRecord(skill.name, skill.levels[0]))
<DeepExtract>
campaign.campaign_journal.append(crew_member.name + ' added a new skill: ' + skill.name + ', now at ' + str(skill.levels[0]) + '% activation')
</DeepExtract>
<DeepExtract>
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'new_skill' in SOUNDS:
return
if SOUNDS['new_skill'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['new_skill'], 0)
</DeepExtract>
else:
for skill_level in skill.levels:
if level < skill_level:
text = 'Spend a skill point to upgrade skill "' + skill.name + '"'
if PopUp(text, confirm=True):
crew_member.skill_pts -= 1
crew_member.UpgradeSkill(skill.name, skill_level)
<DeepExtract>
campaign.campaign_journal.append(crew_member.name + ' upgraded a skill: ' + skill.name + ', now at ' + str(skill_level) + '% activation')
</DeepExtract>
<DeepExtract>
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'new_skill' in SOUNDS:
return
if SOUNDS['new_skill'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['new_skill'], 0)
</DeepExtract>
break
break
n += 1
refresh = True
libtcod.console_flush()
|
def ShowSkills(crew_member):
if not crew_member.alive:
return
selected_skill = 0
exit_menu = False
while not exit_menu:
skill_list = []
for skill in SKILLS:
if len(skill.restrictions) > 0:
if crew_member.position not in skill.restrictions:
continue
if skill.name == 'Gyrostabilizer' and campaign.player_nation != 'USA':
continue
level = 0
for crew_skill in crew_member.skills:
if crew_skill.name == skill.name:
level = crew_skill.level
break
skill_list.append((skill, level))
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_clear(menu_con)
libtcod.console_print_frame(menu_con, 0, 0, MENU_CON_WIDTH - 1, MENU_CON_HEIGHT - 1, clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_set_default_foreground(menu_con, MENU_TITLE_COLOR)
libtcod.console_print_ex(menu_con, MENU_CON_XM, 2, libtcod.BKGND_NONE, libtcod.CENTER, 'Crew Skills')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT - 5, libtcod.BKGND_NONE, libtcod.CENTER, '[%cW/S/Up/Down%c] Change Skill Selection' % HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT - 4, libtcod.BKGND_NONE, libtcod.CENTER, '[%cEnter%c] Add/Upgrade Skill' % HIGHLIGHT)
libtcod.console_print_ex(menu_con, MENU_CON_XM, MENU_CON_HEIGHT - 3, libtcod.BKGND_NONE, libtcod.CENTER, '[%cESC%c] Return/Continue' % HIGHLIGHT)
crew_member.DisplayCrewInfo(menu_con, 29, 4, False)
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, 83, 4, 40, 30, clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, 86, 9, 'Description')
libtcod.console_print(menu_con, 86, 25, 'Activation Levels')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_set_default_foreground(menu_con, libtcod.light_grey)
libtcod.console_print_frame(menu_con, 56, 4, 27, 30, clear=False, flag=libtcod.BKGND_DEFAULT, fmt=0)
libtcod.console_print(menu_con, 59, 5, 'Skills')
libtcod.console_set_default_foreground(menu_con, libtcod.white)
y = 7
n = 0
for (skill, level) in skill_list:
if n == selected_skill:
libtcod.console_set_default_background(menu_con, libtcod.darker_grey)
libtcod.console_rect(menu_con, 57, y, 25, 1, False, flag=libtcod.BKGND_SET)
libtcod.console_set_default_background(menu_con, libtcod.black)
libtcod.console_set_default_foreground(menu_con, libtcod.white)
lines = wrap(skill.desc, 30, subsequent_indent=' ')
y2 = 11
for line in lines:
libtcod.console_print(menu_con, 86, y2, line)
y2 += 1
if skill.levels[0] == 100:
libtcod.console_print(menu_con, 86, 27, 'Always active')
else:
x2 = 86
for skill_level in skill.levels:
if skill_level <= level:
libtcod.console_set_default_foreground(menu_con, libtcod.light_blue)
else:
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, x2, 27, str(skill_level) + '%')
x2 += len(str(skill_level)) + 2
if skill.name == 'Gyrostabilizer' and (not campaign.gyro_skill_avail):
libtcod.console_set_default_foreground(menu_con, GREYED_COLOR)
elif level > 0:
libtcod.console_set_default_foreground(menu_con, libtcod.light_blue)
else:
libtcod.console_set_default_foreground(menu_con, libtcod.white)
libtcod.console_print(menu_con, 58, y, skill.name)
y += 1
n += 1
libtcod.console_blit(menu_con, 0, 0, MENU_CON_WIDTH, MENU_CON_HEIGHT, 0, MENU_CON_X, MENU_CON_Y)
libtcod.console_flush()
refresh = False
while not refresh:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
if libtcod.console_is_window_closed():
sys.exit()
if key.vk == libtcod.KEY_ESCAPE:
exit_menu = True
break
key_char = chr(key.c)
if key.vk == libtcod.KEY_UP or key_char in ['w', 'W']:
if selected_skill == 0:
selected_skill = len(skill_list) - 1
else:
selected_skill -= 1
refresh = True
elif key.vk == libtcod.KEY_DOWN or key_char in ['s', 'S']:
if selected_skill == len(skill_list) - 1:
selected_skill = 0
else:
selected_skill += 1
refresh = True
elif key.vk == libtcod.KEY_ENTER:
if not crew_member.NoActions() and crew_member.skill_pts > 0:
n = 0
for (skill, level) in skill_list:
if n == selected_skill:
if skill.name == 'Gyrostabilizer' and (not campaign.gyro_skill_avail):
break
if level == 0:
text = 'Spend a skill point to add skill "' + skill.name + '"'
if PopUp(text, confirm=True):
crew_member.skill_pts -= 1
crew_member.skills.append(SkillRecord(skill.name, skill.levels[0]))
campaign.campaign_journal.append(crew_member.name + ' added a new skill: ' + skill.name + ', now at ' + str(skill.levels[0]) + '% activation')
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'new_skill' in SOUNDS:
return
if SOUNDS['new_skill'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['new_skill'], 0)
else:
for skill_level in skill.levels:
if level < skill_level:
text = 'Spend a skill point to upgrade skill "' + skill.name + '"'
if PopUp(text, confirm=True):
crew_member.skill_pts -= 1
crew_member.UpgradeSkill(skill.name, skill_level)
campaign.campaign_journal.append(crew_member.name + ' upgraded a skill: ' + skill.name + ', now at ' + str(skill_level) + '% activation')
if not MIXER_ACTIVE:
return
if campaign is not None:
if not campaign.sounds:
return
if not 'new_skill' in SOUNDS:
return
if SOUNDS['new_skill'] is None:
return
mixer.Mix_PlayChannel(-1, SOUNDS['new_skill'], 0)
break
break
n += 1
refresh = True
libtcod.console_flush()
|
armcom
|
positive
|
@attest.test
def custom_values(self):
<DeepExtract>
with open(self.path, 'w') as fp:
fp.write('---\n')
for (k, v) in iteritems(kwargs):
fp.write('%s: %s\n' % (k, v))
fp.write('---\n')
</DeepExtract>
entry = Entry(self.path, conf)
assert 'image' in entry
assert entry.image == '/img/test.png'
|
@attest.test
def custom_values(self):
with open(self.path, 'w') as fp:
fp.write('---\n')
for (k, v) in iteritems(kwargs):
fp.write('%s: %s\n' % (k, v))
fp.write('---\n')
entry = Entry(self.path, conf)
assert 'image' in entry
assert entry.image == '/img/test.png'
|
acrylamid
|
positive
|
def build_graph(self):
<DeepExtract>
self.question = tf.placeholder(tf.int32, [self.batch_size, self.max_input_left], name='input_question')
if self.dataset == 'TREC':
self.input_y = tf.placeholder(tf.float32, [self.batch_size, 6], name='input_y')
else:
self.input_y = tf.placeholder(tf.float32, [self.batch_size, 2], name='input_y')
self.q_position = tf.placeholder(tf.int32, [self.batch_size, self.max_input_left], name='q_position')
self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
</DeepExtract>
<DeepExtract>
with tf.name_scope('embedding'):
if self.is_Embedding_Needed:
W = tf.Variable(np.array(self.embeddings), name='W', dtype='float32', trainable=self.trainable)
W_pos = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], 0, 2 * math.pi), name='W', trainable=self.trainable)
else:
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable)
self.embedding_W = W
self.embedding_W_pos = W_pos
(self.embedded_chars_q, self.embedding_chars_q_phase) = self.concat_embedding(self.question, self.q_position)
self.embedded_chars_q = tf.reduce_sum([self.embedded_chars_q, self.embedding_chars_q_phase], 0)
print(self.embedded_chars_q)
self.represent = tf.reduce_mean(self.embedded_chars_q, 1)
print(self.represent)
</DeepExtract>
<DeepExtract>
self.l2_loss = tf.constant(0.0)
with tf.name_scope('output'):
if self.dataset == 'TREC':
W = tf.get_variable('W', shape=[self.embedding_size, 6], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[6]), name='b')
else:
W = tf.get_variable('W', shape=[self.embedding_size, 2], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[2]), name='b')
self.l2_loss += tf.nn.l2_loss(W)
self.l2_loss += tf.nn.l2_loss(b)
self.logits = tf.nn.xw_plus_b(self.represent, W, b, name='logits')
self.scores = tf.nn.softmax(self.logits, name='scores')
self.predictions = tf.argmax(self.scores, 1, name='predictions')
</DeepExtract>
<DeepExtract>
with tf.name_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')
</DeepExtract>
|
def build_graph(self):
self.question = tf.placeholder(tf.int32, [self.batch_size, self.max_input_left], name='input_question')
if self.dataset == 'TREC':
self.input_y = tf.placeholder(tf.float32, [self.batch_size, 6], name='input_y')
else:
self.input_y = tf.placeholder(tf.float32, [self.batch_size, 2], name='input_y')
self.q_position = tf.placeholder(tf.int32, [self.batch_size, self.max_input_left], name='q_position')
self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
with tf.name_scope('embedding'):
if self.is_Embedding_Needed:
W = tf.Variable(np.array(self.embeddings), name='W', dtype='float32', trainable=self.trainable)
W_pos = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], 0, 2 * math.pi), name='W', trainable=self.trainable)
else:
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable)
self.embedding_W = W
self.embedding_W_pos = W_pos
(self.embedded_chars_q, self.embedding_chars_q_phase) = self.concat_embedding(self.question, self.q_position)
self.embedded_chars_q = tf.reduce_sum([self.embedded_chars_q, self.embedding_chars_q_phase], 0)
print(self.embedded_chars_q)
self.represent = tf.reduce_mean(self.embedded_chars_q, 1)
print(self.represent)
self.l2_loss = tf.constant(0.0)
with tf.name_scope('output'):
if self.dataset == 'TREC':
W = tf.get_variable('W', shape=[self.embedding_size, 6], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[6]), name='b')
else:
W = tf.get_variable('W', shape=[self.embedding_size, 2], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[2]), name='b')
self.l2_loss += tf.nn.l2_loss(W)
self.l2_loss += tf.nn.l2_loss(b)
self.logits = tf.nn.xw_plus_b(self.represent, W, b, name='logits')
self.scores = tf.nn.softmax(self.logits, name='scores')
self.predictions = tf.argmax(self.scores, 1, name='predictions')
with tf.name_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')
</DeepExtract>
|
complex-order
|
positive
|
def describe_subnets(connection, module):
"""
Describe Subnets.
module : AnsibleAWSModule object
connection : boto3 client connection object
"""
filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
subnet_ids = module.params.get('subnet_ids')
if subnet_ids is None:
subnet_ids = []
subnet_info = list()
try:
<DeepExtract>
response = connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
</DeepExtract>
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Failed to describe subnets')
for subnet in response['Subnets']:
subnet['id'] = subnet['SubnetId']
subnet_info.append(camel_dict_to_snake_dict(subnet))
subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
module.exit_json(subnets=subnet_info)
|
def describe_subnets(connection, module):
"""
Describe Subnets.
module : AnsibleAWSModule object
connection : boto3 client connection object
"""
filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
subnet_ids = module.params.get('subnet_ids')
if subnet_ids is None:
subnet_ids = []
subnet_info = list()
try:
response = connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Failed to describe subnets')
for subnet in response['Subnets']:
subnet['id'] = subnet['SubnetId']
subnet_info.append(camel_dict_to_snake_dict(subnet))
subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
module.exit_json(subnets=subnet_info)
|
amazon.aws
|
positive
|
def __init__(self, applet: 'BluemanApplet') -> None:
super().__init__(application=applet, title=_('Plugins'), icon_name='blueman', name='PluginDialog', border_width=10, default_width=490, default_height=380, resizable=False, visible=False)
self.set_position(Gtk.WindowPosition.CENTER)
self.applet = applet
builder = Builder('applet-plugins-widget.ui')
self.description = builder.get_widget('description', Gtk.Label)
self.icon = builder.get_widget('icon', Gtk.Image)
self.author_txt = builder.get_widget('author_txt', Gtk.Label)
self.depends_hdr = builder.get_widget('depends_hdr', Gtk.Widget)
self.depends_txt = builder.get_widget('depends_txt', Gtk.Label)
self.conflicts_hdr = builder.get_widget('conflicts_hdr', Gtk.Widget)
self.conflicts_txt = builder.get_widget('conflicts_txt', Gtk.Label)
self.plugin_name = builder.get_widget('name', Gtk.Label)
self.main_container = builder.get_widget('main_container', Gtk.Bin)
self.content_grid = builder.get_widget('content', Gtk.Widget)
self.b_prefs = builder.get_widget('b_prefs', Gtk.ToggleButton)
self.b_prefs.connect('toggled', self.on_prefs_toggled)
self.add(builder.get_widget('all', Gtk.Container))
cr = Gtk.CellRendererToggle()
cr.connect('toggled', self.on_toggled)
data: List[ListDataDict] = [{'id': 'active', 'type': bool, 'renderer': cr, 'render_attrs': {'active': 0, 'activatable': 1, 'visible': 1}}, {'id': 'activatable', 'type': bool}, {'id': 'icon', 'type': str, 'renderer': Gtk.CellRendererPixbuf(), 'render_attrs': {'icon-name': 2}}, {'id': 'desc', 'type': str, 'renderer': Gtk.CellRendererText(), 'render_attrs': {'markup': 3}, 'view_props': {'expand': True}}, {'id': 'name', 'type': str}]
self.list = GenericList(data, headers_visible=False, visible=True)
self.list.liststore.set_sort_column_id(3, Gtk.SortType.ASCENDING)
self.list.liststore.set_sort_func(3, self.list_compare_func)
self.list.selection.connect('changed', self.on_selection_changed)
plugin_list = builder.get_widget('plugin_list', Gtk.ScrolledWindow)
plugin_info = builder.get_widget('main_scrolled_window', Gtk.ScrolledWindow)
plugin_list.add(self.list)
if Gtk.get_minor_version() >= 16:
plugin_list.props.overlay_scrolling = False
plugin_info.props.overlay_scrolling = False
<DeepExtract>
classes: Dict[str, Type[AppletPlugin]] = self.applet.Plugins.get_classes()
loaded = self.applet.Plugins.get_loaded()
for (name, cls) in classes.items():
if cls.is_configurable():
desc = f'<span weight="bold">{name}</span>'
else:
desc = name
self.list.append(active=name in loaded, icon=cls.__icon__, activatable=cls.__unloadable__, name=name, desc=desc)
</DeepExtract>
self.sig_a: int = self.applet.Plugins.connect('plugin-loaded', self.plugin_state_changed, True)
self.sig_b: int = self.applet.Plugins.connect('plugin-unloaded', self.plugin_state_changed, False)
self.connect('delete-event', self._on_close)
self.list.set_cursor(0)
close_action = Gio.SimpleAction.new('close', None)
close_action.connect('activate', lambda x, y: self.close())
self.add_action(close_action)
|
def __init__(self, applet: 'BluemanApplet') -> None:
super().__init__(application=applet, title=_('Plugins'), icon_name='blueman', name='PluginDialog', border_width=10, default_width=490, default_height=380, resizable=False, visible=False)
self.set_position(Gtk.WindowPosition.CENTER)
self.applet = applet
builder = Builder('applet-plugins-widget.ui')
self.description = builder.get_widget('description', Gtk.Label)
self.icon = builder.get_widget('icon', Gtk.Image)
self.author_txt = builder.get_widget('author_txt', Gtk.Label)
self.depends_hdr = builder.get_widget('depends_hdr', Gtk.Widget)
self.depends_txt = builder.get_widget('depends_txt', Gtk.Label)
self.conflicts_hdr = builder.get_widget('conflicts_hdr', Gtk.Widget)
self.conflicts_txt = builder.get_widget('conflicts_txt', Gtk.Label)
self.plugin_name = builder.get_widget('name', Gtk.Label)
self.main_container = builder.get_widget('main_container', Gtk.Bin)
self.content_grid = builder.get_widget('content', Gtk.Widget)
self.b_prefs = builder.get_widget('b_prefs', Gtk.ToggleButton)
self.b_prefs.connect('toggled', self.on_prefs_toggled)
self.add(builder.get_widget('all', Gtk.Container))
cr = Gtk.CellRendererToggle()
cr.connect('toggled', self.on_toggled)
data: List[ListDataDict] = [{'id': 'active', 'type': bool, 'renderer': cr, 'render_attrs': {'active': 0, 'activatable': 1, 'visible': 1}}, {'id': 'activatable', 'type': bool}, {'id': 'icon', 'type': str, 'renderer': Gtk.CellRendererPixbuf(), 'render_attrs': {'icon-name': 2}}, {'id': 'desc', 'type': str, 'renderer': Gtk.CellRendererText(), 'render_attrs': {'markup': 3}, 'view_props': {'expand': True}}, {'id': 'name', 'type': str}]
self.list = GenericList(data, headers_visible=False, visible=True)
self.list.liststore.set_sort_column_id(3, Gtk.SortType.ASCENDING)
self.list.liststore.set_sort_func(3, self.list_compare_func)
self.list.selection.connect('changed', self.on_selection_changed)
plugin_list = builder.get_widget('plugin_list', Gtk.ScrolledWindow)
plugin_info = builder.get_widget('main_scrolled_window', Gtk.ScrolledWindow)
plugin_list.add(self.list)
if Gtk.get_minor_version() >= 16:
plugin_list.props.overlay_scrolling = False
plugin_info.props.overlay_scrolling = False
classes: Dict[str, Type[AppletPlugin]] = self.applet.Plugins.get_classes()
loaded = self.applet.Plugins.get_loaded()
for (name, cls) in classes.items():
if cls.is_configurable():
desc = f'<span weight="bold">{name}</span>'
else:
desc = name
self.list.append(active=name in loaded, icon=cls.__icon__, activatable=cls.__unloadable__, name=name, desc=desc)
self.sig_a: int = self.applet.Plugins.connect('plugin-loaded', self.plugin_state_changed, True)
self.sig_b: int = self.applet.Plugins.connect('plugin-unloaded', self.plugin_state_changed, False)
self.connect('delete-event', self._on_close)
self.list.set_cursor(0)
close_action = Gio.SimpleAction.new('close', None)
close_action.connect('activate', lambda x, y: self.close())
self.add_action(close_action)
|
blueman
|
positive
|
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
<DeepExtract>
try:
from test_forward import _get_detector_cfg
except ImportError:
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
(model, train_cfg, test_cfg) = _get_detector_cfg('faster_rcnn_ohem_r50_fpn_1x.py')
model['pretrained'] = None
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
context = context
</DeepExtract>
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=-1, add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2 ** i), int(2 ** i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
|
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
try:
from test_forward import _get_detector_cfg
except ImportError:
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
(model, train_cfg, test_cfg) = _get_detector_cfg('faster_rcnn_ohem_r50_fpn_1x.py')
model['pretrained'] = None
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
context = context
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=-1, add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2 ** i), int(2 ** i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
|
Dense-RepPoints
|
positive
|
def arrange(self) -> None:
<DeepExtract>
GUIUtilities.clear_layout(self)
</DeepExtract>
if len(self.items) > 0:
row = col = 0
n = max(len(self.items), self.cols)
for idx in range(n):
self.setColumnStretch(col, 1)
self.setRowStretch(row, 1)
if idx < len(self.items):
widget = self.items[idx]
self.addWidget(widget, row, col)
else:
self.addWidget(QWidget(), row, col)
col += 1
if col % self.cols == 0:
row += 1
col = 0
|
def arrange(self) -> None:
GUIUtilities.clear_layout(self)
if len(self.items) > 0:
row = col = 0
n = max(len(self.items), self.cols)
for idx in range(n):
self.setColumnStretch(col, 1)
self.setRowStretch(row, 1)
if idx < len(self.items):
widget = self.items[idx]
self.addWidget(widget, row, col)
else:
self.addWidget(QWidget(), row, col)
col += 1
if col % self.cols == 0:
row += 1
col = 0
|
CvStudio
|
positive
|
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError('Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented')
(image_width, image_height) = self.size
<DeepExtract>
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
</DeepExtract>
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat((transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1)
bbox = BoxList(transposed_boxes, self.size, mode='xyxy')
for (k, v) in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
|
def transpose(self, method):
"""
Transpose bounding box (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
"""
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError('Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented')
(image_width, image_height) = self.size
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
if method == FLIP_LEFT_RIGHT:
TO_REMOVE = 1
transposed_xmin = image_width - xmax - TO_REMOVE
transposed_xmax = image_width - xmin - TO_REMOVE
transposed_ymin = ymin
transposed_ymax = ymax
elif method == FLIP_TOP_BOTTOM:
transposed_xmin = xmin
transposed_xmax = xmax
transposed_ymin = image_height - ymax
transposed_ymax = image_height - ymin
transposed_boxes = torch.cat((transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1)
bbox = BoxList(transposed_boxes, self.size, mode='xyxy')
for (k, v) in self.extra_fields.items():
if not isinstance(v, torch.Tensor):
v = v.transpose(method)
bbox.add_field(k, v)
return bbox.convert(self.mode)
|
AE-WTN
|
positive
|
def add_embeddings(self):
with tf.name_scope('embedding'):
if self.is_Embedding_Needed:
W = tf.Variable(np.array(self.embeddings), name='W', dtype='float32', trainable=self.trainable)
W_pos = tf.Variable(tf.random_uniform([500, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable)
else:
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable)
self.embedding_W = W
self.embedding_W_pos = W_pos
<DeepExtract>
embedded_chars_q = tf.nn.embedding_lookup(self.embedding_W, self.question)
embedding_chars_q_pos = tf.nn.embedding_lookup(self.embedding_W_pos, self.q_position)
(self.embedded_chars_q, self.embedded_chars_q_pos) = (embedded_chars_q, embedding_chars_q_pos)
</DeepExtract>
self.embedded_chars_q = tf.reduce_sum([self.embedded_chars_q, self.embedded_chars_q_pos], 0)
|
def add_embeddings(self):
with tf.name_scope('embedding'):
if self.is_Embedding_Needed:
W = tf.Variable(np.array(self.embeddings), name='W', dtype='float32', trainable=self.trainable)
W_pos = tf.Variable(tf.random_uniform([500, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable)
else:
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable)
self.embedding_W = W
self.embedding_W_pos = W_pos
embedded_chars_q = tf.nn.embedding_lookup(self.embedding_W, self.question)
embedding_chars_q_pos = tf.nn.embedding_lookup(self.embedding_W_pos, self.q_position)
(self.embedded_chars_q, self.embedded_chars_q_pos) = (embedded_chars_q, embedding_chars_q_pos)
self.embedded_chars_q = tf.reduce_sum([self.embedded_chars_q, self.embedded_chars_q_pos], 0)
|
complex-order
|
positive
|
@pytest.mark.ci
def test_lpq_params():
<DeepExtract>
assert dml_lpq.params['ml_m_z']['d'] == test_values
assert dml_lpq.params['ml_m_d_z0']['d'][0] is None
param_list_1 = [dml_lpq.models['ml_m_z']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_test for param in param_list_1))
param_list_2 = [dml_lpq.models['ml_m_d_z0']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
</DeepExtract>
param_list_2 = [dml_lpq.models['ml_m_d_z1']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
param_list_2 = [dml_lpq.models['ml_g_du_z0']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
param_list_2 = [dml_lpq.models['ml_g_du_z1']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
|
@pytest.mark.ci
def test_lpq_params():
assert dml_lpq.params['ml_m_z']['d'] == test_values
assert dml_lpq.params['ml_m_d_z0']['d'][0] is None
param_list_1 = [dml_lpq.models['ml_m_z']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_test for param in param_list_1))
param_list_2 = [dml_lpq.models['ml_m_d_z0']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
param_list_2 = [dml_lpq.models['ml_m_d_z1']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
param_list_2 = [dml_lpq.models['ml_g_du_z0']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
param_list_2 = [dml_lpq.models['ml_g_du_z1']['d'][0][fold].n_estimators for fold in range(n_folds)]
assert all((param == n_est_default for param in param_list_2))
|
doubleml-for-py
|
positive
|
def find_children_by_path(self, from_element, path):
path_elements = path.split('/')
element = from_element
for element_name in path_elements[:-1]:
<DeepExtract>
if element is None:
element = None
ret = self.find(element_name, element)
if ret is None:
reference = self.find(element_name + '-REF', element)
if reference is not None:
raise 'use follow_ref!'
element = ret
</DeepExtract>
<DeepExtract>
if element is None:
children = []
ret = self.findall(path_elements[-1], element)
if not ret:
ret_list = self.findall(path_elements[-1] + '-REF', element)
ret = [self.get_short_name_path(item.text) for item in ret_list]
if len(ret) > 0:
raise 'use follow_all_ref!'
children = ret
</DeepExtract>
return children
|
def find_children_by_path(self, from_element, path):
path_elements = path.split('/')
element = from_element
for element_name in path_elements[:-1]:
if element is None:
element = None
ret = self.find(element_name, element)
if ret is None:
reference = self.find(element_name + '-REF', element)
if reference is not None:
raise 'use follow_ref!'
element = ret
if element is None:
children = []
ret = self.findall(path_elements[-1], element)
if not ret:
ret_list = self.findall(path_elements[-1] + '-REF', element)
ret = [self.get_short_name_path(item.text) for item in ret_list]
if len(ret) > 0:
raise 'use follow_all_ref!'
children = ret
return children
|
canmatrix
|
positive
|
def move(self, dest, src):
<DeepExtract>
s_set = self.lookup.setdefault(src, _CopySet(src))
</DeepExtract>
<DeepExtract>
d_set = self.lookup.setdefault(dest, _CopySet(dest))
</DeepExtract>
if s_set is d_set:
return False
d_set.remove(dest)
s_set.add(dest)
self.lookup[dest] = s_set
return True
|
def move(self, dest, src):
s_set = self.lookup.setdefault(src, _CopySet(src))
d_set = self.lookup.setdefault(dest, _CopySet(dest))
if s_set is d_set:
return False
d_set.remove(dest)
s_set.add(dest)
self.lookup[dest] = s_set
return True
|
Apk-Changer
|
positive
|
def test_basic(self):
"""Test basic types."""
<DeepExtract>
dbus_type = get_dbus_type(Int)
self.assertEqual(dbus_type, 'i')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Int)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'i')
expected_type = GLib.VariantType.new('i')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('i')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Int, Tuple) and len(get_type_arguments(Int)) == 1
self.assertEqual(is_tuple_of_one(Int), expected_value)
self.assertEqual(is_tuple_of_one('i'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Int]))
self.assertTrue(is_tuple_of_one('({})'.format('i')))
</DeepExtract>
<DeepExtract>
dbus_type = get_dbus_type(Bool)
self.assertEqual(dbus_type, 'b')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Bool)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'b')
expected_type = GLib.VariantType.new('b')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('b')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Bool, Tuple) and len(get_type_arguments(Bool)) == 1
self.assertEqual(is_tuple_of_one(Bool), expected_value)
self.assertEqual(is_tuple_of_one('b'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Bool]))
self.assertTrue(is_tuple_of_one('({})'.format('b')))
</DeepExtract>
<DeepExtract>
dbus_type = get_dbus_type(Double)
self.assertEqual(dbus_type, 'd')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Double)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'd')
expected_type = GLib.VariantType.new('d')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('d')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Double, Tuple) and len(get_type_arguments(Double)) == 1
self.assertEqual(is_tuple_of_one(Double), expected_value)
self.assertEqual(is_tuple_of_one('d'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Double]))
self.assertTrue(is_tuple_of_one('({})'.format('d')))
</DeepExtract>
<DeepExtract>
dbus_type = get_dbus_type(Str)
self.assertEqual(dbus_type, 's')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Str)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 's')
expected_type = GLib.VariantType.new('s')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('s')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Str, Tuple) and len(get_type_arguments(Str)) == 1
self.assertEqual(is_tuple_of_one(Str), expected_value)
self.assertEqual(is_tuple_of_one('s'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Str]))
self.assertTrue(is_tuple_of_one('({})'.format('s')))
</DeepExtract>
<DeepExtract>
dbus_type = get_dbus_type(ObjPath)
self.assertEqual(dbus_type, 'o')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(ObjPath)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'o')
expected_type = GLib.VariantType.new('o')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('o')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(ObjPath, Tuple) and len(get_type_arguments(ObjPath)) == 1
self.assertEqual(is_tuple_of_one(ObjPath), expected_value)
self.assertEqual(is_tuple_of_one('o'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[ObjPath]))
self.assertTrue(is_tuple_of_one('({})'.format('o')))
</DeepExtract>
<DeepExtract>
dbus_type = get_dbus_type(UnixFD)
self.assertEqual(dbus_type, 'h')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(UnixFD)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'h')
expected_type = GLib.VariantType.new('h')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('h')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(UnixFD, Tuple) and len(get_type_arguments(UnixFD)) == 1
self.assertEqual(is_tuple_of_one(UnixFD), expected_value)
self.assertEqual(is_tuple_of_one('h'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[UnixFD]))
self.assertTrue(is_tuple_of_one('({})'.format('h')))
</DeepExtract>
<DeepExtract>
dbus_type = get_dbus_type(Variant)
self.assertEqual(dbus_type, 'v')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Variant)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'v')
expected_type = GLib.VariantType.new('v')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('v')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Variant, Tuple) and len(get_type_arguments(Variant)) == 1
self.assertEqual(is_tuple_of_one(Variant), expected_value)
self.assertEqual(is_tuple_of_one('v'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Variant]))
self.assertTrue(is_tuple_of_one('({})'.format('v')))
</DeepExtract>
|
def test_basic(self):
"""Test basic types."""
dbus_type = get_dbus_type(Int)
self.assertEqual(dbus_type, 'i')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Int)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'i')
expected_type = GLib.VariantType.new('i')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('i')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Int, Tuple) and len(get_type_arguments(Int)) == 1
self.assertEqual(is_tuple_of_one(Int), expected_value)
self.assertEqual(is_tuple_of_one('i'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Int]))
self.assertTrue(is_tuple_of_one('({})'.format('i')))
dbus_type = get_dbus_type(Bool)
self.assertEqual(dbus_type, 'b')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Bool)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'b')
expected_type = GLib.VariantType.new('b')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('b')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Bool, Tuple) and len(get_type_arguments(Bool)) == 1
self.assertEqual(is_tuple_of_one(Bool), expected_value)
self.assertEqual(is_tuple_of_one('b'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Bool]))
self.assertTrue(is_tuple_of_one('({})'.format('b')))
dbus_type = get_dbus_type(Double)
self.assertEqual(dbus_type, 'd')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Double)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'd')
expected_type = GLib.VariantType.new('d')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('d')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Double, Tuple) and len(get_type_arguments(Double)) == 1
self.assertEqual(is_tuple_of_one(Double), expected_value)
self.assertEqual(is_tuple_of_one('d'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Double]))
self.assertTrue(is_tuple_of_one('({})'.format('d')))
dbus_type = get_dbus_type(Str)
self.assertEqual(dbus_type, 's')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Str)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 's')
expected_type = GLib.VariantType.new('s')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('s')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Str, Tuple) and len(get_type_arguments(Str)) == 1
self.assertEqual(is_tuple_of_one(Str), expected_value)
self.assertEqual(is_tuple_of_one('s'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Str]))
self.assertTrue(is_tuple_of_one('({})'.format('s')))
dbus_type = get_dbus_type(ObjPath)
self.assertEqual(dbus_type, 'o')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(ObjPath)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'o')
expected_type = GLib.VariantType.new('o')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('o')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(ObjPath, Tuple) and len(get_type_arguments(ObjPath)) == 1
self.assertEqual(is_tuple_of_one(ObjPath), expected_value)
self.assertEqual(is_tuple_of_one('o'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[ObjPath]))
self.assertTrue(is_tuple_of_one('({})'.format('o')))
dbus_type = get_dbus_type(UnixFD)
self.assertEqual(dbus_type, 'h')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(UnixFD)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'h')
expected_type = GLib.VariantType.new('h')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('h')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(UnixFD, Tuple) and len(get_type_arguments(UnixFD)) == 1
self.assertEqual(is_tuple_of_one(UnixFD), expected_value)
self.assertEqual(is_tuple_of_one('h'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[UnixFD]))
self.assertTrue(is_tuple_of_one('({})'.format('h')))
dbus_type = get_dbus_type(Variant)
self.assertEqual(dbus_type, 'v')
self.assertTrue(GLib.VariantType.string_is_valid(dbus_type))
variant_type = get_variant_type(Variant)
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertEqual(variant_type.dup_string(), 'v')
expected_type = GLib.VariantType.new('v')
self.assertTrue(expected_type.equal(variant_type))
variant_type = get_variant_type('v')
self.assertIsInstance(variant_type, GLib.VariantType)
self.assertTrue(expected_type.equal(variant_type))
expected_value = is_base_type(Variant, Tuple) and len(get_type_arguments(Variant)) == 1
self.assertEqual(is_tuple_of_one(Variant), expected_value)
self.assertEqual(is_tuple_of_one('v'), expected_value)
self.assertTrue(is_tuple_of_one(Tuple[Variant]))
self.assertTrue(is_tuple_of_one('({})'.format('v')))
</DeepExtract>
|
dasbus
|
positive
|
def _restart_thread(self):
"""
Restarts subscription threads if their connection drops.
:return:
"""
while self.running['restart_thread']:
try:
endpoint = self.restart_q.get(timeout=0.5)
except TimeoutError:
continue
log.info('_restart_thread(): Restarting Thread for endpoint %s', endpoint)
<DeepExtract>
self.threads_running[endpoint] = False
try:
self.endpoint_threads[endpoint].join(timeout=1)
except TimeoutError:
self.endpoint_threads.pop(endpoint)
self.garbage_collector()
</DeepExtract>
<DeepExtract>
log.debug('GeminiWSS.subscribe(): Starting Thread for endpoint %s', endpoint)
self.threads_running[endpoint] = True
t = Thread(target=self._subscription_thread, args=(endpoint,), name=endpoint)
t.daemon = True
t.start()
self.endpoint_threads[endpoint] = t
</DeepExtract>
|
def _restart_thread(self):
"""
Restarts subscription threads if their connection drops.
:return:
"""
while self.running['restart_thread']:
try:
endpoint = self.restart_q.get(timeout=0.5)
except TimeoutError:
continue
log.info('_restart_thread(): Restarting Thread for endpoint %s', endpoint)
self.threads_running[endpoint] = False
try:
self.endpoint_threads[endpoint].join(timeout=1)
except TimeoutError:
self.endpoint_threads.pop(endpoint)
self.garbage_collector()
log.debug('GeminiWSS.subscribe(): Starting Thread for endpoint %s', endpoint)
self.threads_running[endpoint] = True
t = Thread(target=self._subscription_thread, args=(endpoint,), name=endpoint)
t.daemon = True
t.start()
self.endpoint_threads[endpoint] = t
</DeepExtract>
|
bitex
|
positive
|
def read_atom(stream):
header = stream.read(8)
if not header:
return
assert len(header) == 8
n = 0
size = struct.unpack('>I', header[:4])[0]
assert size > 0
n += 4
type = header[4:8]
n += 4
assert type != b'uuid'
if size == 1:
<DeepExtract>
size = struct.unpack('>Q', stream.read(8))[0]
</DeepExtract>
n += 8
left = size - n
if type in atom_readers:
return atom_readers[type](stream, size, left, type)
raise NotImplementedError('%s: %d' % (type, left))
|
def read_atom(stream):
header = stream.read(8)
if not header:
return
assert len(header) == 8
n = 0
size = struct.unpack('>I', header[:4])[0]
assert size > 0
n += 4
type = header[4:8]
n += 4
assert type != b'uuid'
if size == 1:
size = struct.unpack('>Q', stream.read(8))[0]
n += 8
left = size - n
if type in atom_readers:
return atom_readers[type](stream, size, left, type)
raise NotImplementedError('%s: %d' % (type, left))
|
acmpv
|
positive
|
def process_create_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.berlin.vm.Evm`
Items containing execution specific objects.
"""
begin_transaction(env.state)
destroy_storage(env.state, message.current_target)
increment_nonce(env.state, message.current_target)
<DeepExtract>
if message.depth > STACK_DEPTH_LIMIT:
raise StackDepthLimitError('Stack depth limit reached')
begin_transaction(env.state)
touch_account(env.state, message.current_target)
if message.should_transfer_value and message.value != 0:
move_ether(env.state, message.caller, message.current_target, message.value)
evm = execute_code(message, env)
if evm.has_erred:
rollback_transaction(env.state)
else:
commit_transaction(env.state)
evm = evm
</DeepExtract>
if not evm.has_erred:
contract_code = evm.output
contract_code_gas = len(contract_code) * GAS_CODE_DEPOSIT
try:
charge_gas(evm, contract_code_gas)
ensure(len(contract_code) <= MAX_CODE_SIZE, OutOfGasError)
except ExceptionalHalt:
rollback_transaction(env.state)
evm.gas_left = U256(0)
evm.output = b''
evm.has_erred = True
else:
set_code(env.state, message.current_target, contract_code)
commit_transaction(env.state)
else:
rollback_transaction(env.state)
return evm
|
def process_create_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.berlin.vm.Evm`
Items containing execution specific objects.
"""
begin_transaction(env.state)
destroy_storage(env.state, message.current_target)
increment_nonce(env.state, message.current_target)
if message.depth > STACK_DEPTH_LIMIT:
raise StackDepthLimitError('Stack depth limit reached')
begin_transaction(env.state)
touch_account(env.state, message.current_target)
if message.should_transfer_value and message.value != 0:
move_ether(env.state, message.caller, message.current_target, message.value)
evm = execute_code(message, env)
if evm.has_erred:
rollback_transaction(env.state)
else:
commit_transaction(env.state)
evm = evm
if not evm.has_erred:
contract_code = evm.output
contract_code_gas = len(contract_code) * GAS_CODE_DEPOSIT
try:
charge_gas(evm, contract_code_gas)
ensure(len(contract_code) <= MAX_CODE_SIZE, OutOfGasError)
except ExceptionalHalt:
rollback_transaction(env.state)
evm.gas_left = U256(0)
evm.output = b''
evm.has_erred = True
else:
set_code(env.state, message.current_target, contract_code)
commit_transaction(env.state)
else:
rollback_transaction(env.state)
return evm
|
eth1.0-specs
|
positive
|
def send_reset_password_verification_email_notification(request: Request, user: 'AbstractBaseUser') -> None:
signer = ResetPasswordSigner({'user_id': get_user_verification_id(user)}, request=request)
<DeepExtract>
template_selector = registration_settings.VERIFICATION_TEMPLATES_SELECTOR
notification_method = NotificationMethod.EMAIL
try:
template_config_data = template_selector(request=request, user=user, notification_method=notification_method, notification_type=NotificationType.RESET_PASSWORD_VERIFICATION)
except (VerificationTemplatesNotFound, LookupError):
template_config_data = select_default_templates(request=request, user=user, notification_method=notification_method, notification_type=NotificationType.RESET_PASSWORD_VERIFICATION)
template_config_data = template_config_data
</DeepExtract>
notification_data = {'params_signer': signer}
send_verification_notification(NotificationType.RESET_PASSWORD_VERIFICATION, user, notification_data, template_config_data)
|
def send_reset_password_verification_email_notification(request: Request, user: 'AbstractBaseUser') -> None:
signer = ResetPasswordSigner({'user_id': get_user_verification_id(user)}, request=request)
template_selector = registration_settings.VERIFICATION_TEMPLATES_SELECTOR
notification_method = NotificationMethod.EMAIL
try:
template_config_data = template_selector(request=request, user=user, notification_method=notification_method, notification_type=NotificationType.RESET_PASSWORD_VERIFICATION)
except (VerificationTemplatesNotFound, LookupError):
template_config_data = select_default_templates(request=request, user=user, notification_method=notification_method, notification_type=NotificationType.RESET_PASSWORD_VERIFICATION)
template_config_data = template_config_data
notification_data = {'params_signer': signer}
send_verification_notification(NotificationType.RESET_PASSWORD_VERIFICATION, user, notification_data, template_config_data)
|
django-rest-registration
|
positive
|
@pytest.mark.parametrize('num_branch,num_spks,permute', [pytest.param(2, 2, True), pytest.param(2, 2, False), pytest.param(3, 2, True)])
def test_wa(num_branch, num_spks, permute):
<DeepExtract>
transform = EnhTransform(feats='spectrogram-log-cmvn', frame_len=512, frame_hop=256, center=True, stft_mode='librosa')
base_rnn_cls = aps_sse_nnet('sse@base_rnn')
nnet = base_rnn_cls(enh_transform=transform, num_bins=257, input_size=257, num_layers=2, num_spks=num_branch, hidden=256, training_mode='time')
</DeepExtract>
kwargs = {'permute': permute, 'num_spks': num_spks, 'objf': 'L1'}
task = aps_task('sse@wa', nnet, **kwargs)
<DeepExtract>
(batch_size, chunk_size) = (4, 64000)
egs = {'mix': th.rand(batch_size, chunk_size), 'ref': [th.rand(batch_size, chunk_size) for _ in range(num_branch)]}
if num_branch == 1:
egs['ref'] = egs['ref'][0]
egs = egs
</DeepExtract>
<DeepExtract>
for _ in range(5):
stats = task(egs)
loss = stats['loss']
loss.backward()
norm = clip_grad_norm_(task.parameters(), 20)
assert not math.isnan(loss.item())
assert not math.isnan(norm.item())
</DeepExtract>
|
@pytest.mark.parametrize('num_branch,num_spks,permute', [pytest.param(2, 2, True), pytest.param(2, 2, False), pytest.param(3, 2, True)])
def test_wa(num_branch, num_spks, permute):
transform = EnhTransform(feats='spectrogram-log-cmvn', frame_len=512, frame_hop=256, center=True, stft_mode='librosa')
base_rnn_cls = aps_sse_nnet('sse@base_rnn')
nnet = base_rnn_cls(enh_transform=transform, num_bins=257, input_size=257, num_layers=2, num_spks=num_branch, hidden=256, training_mode='time')
kwargs = {'permute': permute, 'num_spks': num_spks, 'objf': 'L1'}
task = aps_task('sse@wa', nnet, **kwargs)
(batch_size, chunk_size) = (4, 64000)
egs = {'mix': th.rand(batch_size, chunk_size), 'ref': [th.rand(batch_size, chunk_size) for _ in range(num_branch)]}
if num_branch == 1:
egs['ref'] = egs['ref'][0]
egs = egs
for _ in range(5):
stats = task(egs)
loss = stats['loss']
loss.backward()
norm = clip_grad_norm_(task.parameters(), 20)
assert not math.isnan(loss.item())
assert not math.isnan(norm.item())
</DeepExtract>
|
aps
|
positive
|
def response_json_at_path_does_not_contain(response, json_path, value):
"""
"""
<DeepExtract>
results = jsonpath(response.json(), json_path)
if not results:
fail('Match not found at <{path}> for <{body}>'.format(path=json_path, body=response.json()))
values = results
</DeepExtract>
[assert_that(actual_value).does_not_contain(eval(value)) for actual_value in values]
|
def response_json_at_path_does_not_contain(response, json_path, value):
"""
"""
results = jsonpath(response.json(), json_path)
if not results:
fail('Match not found at <{path}> for <{body}>'.format(path=json_path, body=response.json()))
values = results
[assert_that(actual_value).does_not_contain(eval(value)) for actual_value in values]
|
behave-restful
|
positive
|
def overlay_display(self, im_list, color_coding=np.array([[1, 0, 1], [0, 1, 0]]), export_pdf='', show=True, f=False, shift=[0, 0], final_fov=False, interp='gaussian', scale='lin', gamma=0.5, dynamic_range=[1000.0], rescale=True):
"""Overlay primary polarization images of a list of images to compare structures.
Args:
im_list (list): list of images to align to the current image
color_coding (numpy.array): Color coding of each image in the composite
f (matplotlib.pyplot.figure): Figure to overlay on top of
export_pdf (str): path to exported PDF with plot
show (bool): Display the plot if true
shift (list): list of manual image shifts,
otherwise use the shift from maximum cross-correlation
final_fov (float): fov of the comparison image (rad).
If False it is the largestinput image fov
scale (str) : compare images in 'log','lin',or 'gamma' scale
gamma (float): exponent for gamma scale comparison
dynamic_range (float): dynamic range for log and gamma scale comparisons
Returns:
(matplotlib.figure.Figure): figure object with image
"""
if not f:
f = plt.figure()
plt.clf()
if len(dynamic_range) == 1:
dynamic_range = dynamic_range * np.ones(len(im_list) + 1)
if not isinstance(shift, np.ndarray) and (not isinstance(shift, bool)):
shift = matlib.repmat(shift, len(im_list), 1)
psize = self.psize
max_fov = np.max([self.xdim * self.psize, self.ydim * self.psize])
for i in range(0, len(im_list)):
psize = np.min([psize, im_list[i].psize])
max_fov = np.max([max_fov, im_list[i].xdim * im_list[i].psize, im_list[i].ydim * im_list[i].psize])
if not final_fov:
final_fov = max_fov
<DeepExtract>
im0 = self.copy()
if not np.all(im0.polrep == np.array([im.polrep for im in im_list])):
raise Exception('In align_images, all images must have the same polrep!')
if not np.all(im0.pol_prim == np.array([im.pol_prim for im in im_list])):
raise Exception('In find_shift, all images must have the same pol_prim!')
if len(dynamic_range) == 1:
dynamic_range = dynamic_range * np.ones(len(im_list) + 1)
useshift = True
if isinstance(shift, bool):
useshift = False
psize = im0.psize
max_fov = np.max([im0.xdim * im0.psize, im0.ydim * im0.psize])
for i in range(0, len(im_list)):
psize = np.min([psize, im_list[i].psize])
max_fov = np.max([max_fov, im_list[i].xdim * im_list[i].psize, im_list[i].ydim * im_list[i].psize])
if not final_fov:
final_fov = max_fov
im_list_shift = []
shifts = []
for i in range(0, len(im_list)):
(idx, _, im0_pad_orig, im_pad) = im0.find_shift(im_list[i], target_fov=2 * max_fov, psize=psize, pol=pol, scale=scale, gamma=gamma, dynamic_range=dynamic_range[i + 1])
if i == 0:
npix = int(im0_pad_orig.xdim / 2)
im0_pad = im0_pad_orig.regrid_image(final_fov, npix)
if useshift:
idx = shift[i]
tmp = im_pad.shift(idx)
shifts.append(idx)
im_list_shift.append(tmp.regrid_image(final_fov, npix))
(im_list_shift, shifts, im0_pad) = (im_list_shift, shifts, im0_pad)
</DeepExtract>
if scale == 'log':
log_offset = np.max(im0_pad.imvec) / dynamic_range[0]
im0_pad.imvec = np.log10(im0_pad.imvec + log_offset)
for i in range(0, len(im_list)):
log_offset = np.max(im_list_shift[i].imvec) / dynamic_range[i + 1]
im_list_shift[i].imvec = np.log10(im_list_shift[i].imvec + log_offset)
if scale == 'gamma':
log_offset = np.max(im0_pad.imvec) / dynamic_range[0]
im0_pad.imvec = (im0_pad.imvec + log_offset) ** gamma
for i in range(0, len(im_list)):
log_offset = np.max(im_list_shift[i].imvec) / dynamic_range[i + 1]
im_list_shift[i].imvec = (im_list_shift[i].imvec + log_offset) ** gamma
composite_img = np.zeros((im0_pad.ydim, im0_pad.xdim, 3))
for i in range(-1, len(im_list)):
if i == -1:
immtx = im0_pad.imvec.reshape(im0_pad.ydim, im0_pad.xdim)
else:
immtx = im_list_shift[i].imvec.reshape(im0_pad.ydim, im0_pad.xdim)
if rescale:
immtx = immtx - np.min(np.min(immtx))
immtx = immtx / np.max(np.max(immtx))
for c in range(0, 3):
composite_img[:, :, c] = composite_img[:, :, c] + color_coding[i + 1, c] * immtx
if rescale is False:
composite_img = composite_img - np.min(np.min(np.min(composite_img)))
composite_img = composite_img / np.max(np.max(np.max(composite_img)))
plt.subplot(111)
plt.title('%s MJD %i %.2f GHz' % (self.source, self.mjd, self.rf / 1000000000.0), fontsize=20)
plt.imshow(composite_img, interpolation=interp)
xticks = obsh.ticks(im0_pad.xdim, im0_pad.psize / ehc.RADPERAS / 1e-06)
yticks = obsh.ticks(im0_pad.ydim, im0_pad.psize / ehc.RADPERAS / 1e-06)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\\mu$as)')
plt.ylabel('Relative Dec ($\\mu$as)')
if show:
plt.show(block=False)
if export_pdf != '':
f.savefig(export_pdf, bbox_inches='tight')
return (f, shift)
|
def overlay_display(self, im_list, color_coding=np.array([[1, 0, 1], [0, 1, 0]]), export_pdf='', show=True, f=False, shift=[0, 0], final_fov=False, interp='gaussian', scale='lin', gamma=0.5, dynamic_range=[1000.0], rescale=True):
"""Overlay primary polarization images of a list of images to compare structures.
Args:
im_list (list): list of images to align to the current image
color_coding (numpy.array): Color coding of each image in the composite
f (matplotlib.pyplot.figure): Figure to overlay on top of
export_pdf (str): path to exported PDF with plot
show (bool): Display the plot if true
shift (list): list of manual image shifts,
otherwise use the shift from maximum cross-correlation
final_fov (float): fov of the comparison image (rad).
If False it is the largestinput image fov
scale (str) : compare images in 'log','lin',or 'gamma' scale
gamma (float): exponent for gamma scale comparison
dynamic_range (float): dynamic range for log and gamma scale comparisons
Returns:
(matplotlib.figure.Figure): figure object with image
"""
if not f:
f = plt.figure()
plt.clf()
if len(dynamic_range) == 1:
dynamic_range = dynamic_range * np.ones(len(im_list) + 1)
if not isinstance(shift, np.ndarray) and (not isinstance(shift, bool)):
shift = matlib.repmat(shift, len(im_list), 1)
psize = self.psize
max_fov = np.max([self.xdim * self.psize, self.ydim * self.psize])
for i in range(0, len(im_list)):
psize = np.min([psize, im_list[i].psize])
max_fov = np.max([max_fov, im_list[i].xdim * im_list[i].psize, im_list[i].ydim * im_list[i].psize])
if not final_fov:
final_fov = max_fov
im0 = self.copy()
if not np.all(im0.polrep == np.array([im.polrep for im in im_list])):
raise Exception('In align_images, all images must have the same polrep!')
if not np.all(im0.pol_prim == np.array([im.pol_prim for im in im_list])):
raise Exception('In find_shift, all images must have the same pol_prim!')
if len(dynamic_range) == 1:
dynamic_range = dynamic_range * np.ones(len(im_list) + 1)
useshift = True
if isinstance(shift, bool):
useshift = False
psize = im0.psize
max_fov = np.max([im0.xdim * im0.psize, im0.ydim * im0.psize])
for i in range(0, len(im_list)):
psize = np.min([psize, im_list[i].psize])
max_fov = np.max([max_fov, im_list[i].xdim * im_list[i].psize, im_list[i].ydim * im_list[i].psize])
if not final_fov:
final_fov = max_fov
im_list_shift = []
shifts = []
for i in range(0, len(im_list)):
(idx, _, im0_pad_orig, im_pad) = im0.find_shift(im_list[i], target_fov=2 * max_fov, psize=psize, pol=pol, scale=scale, gamma=gamma, dynamic_range=dynamic_range[i + 1])
if i == 0:
npix = int(im0_pad_orig.xdim / 2)
im0_pad = im0_pad_orig.regrid_image(final_fov, npix)
if useshift:
idx = shift[i]
tmp = im_pad.shift(idx)
shifts.append(idx)
im_list_shift.append(tmp.regrid_image(final_fov, npix))
(im_list_shift, shifts, im0_pad) = (im_list_shift, shifts, im0_pad)
if scale == 'log':
log_offset = np.max(im0_pad.imvec) / dynamic_range[0]
im0_pad.imvec = np.log10(im0_pad.imvec + log_offset)
for i in range(0, len(im_list)):
log_offset = np.max(im_list_shift[i].imvec) / dynamic_range[i + 1]
im_list_shift[i].imvec = np.log10(im_list_shift[i].imvec + log_offset)
if scale == 'gamma':
log_offset = np.max(im0_pad.imvec) / dynamic_range[0]
im0_pad.imvec = (im0_pad.imvec + log_offset) ** gamma
for i in range(0, len(im_list)):
log_offset = np.max(im_list_shift[i].imvec) / dynamic_range[i + 1]
im_list_shift[i].imvec = (im_list_shift[i].imvec + log_offset) ** gamma
composite_img = np.zeros((im0_pad.ydim, im0_pad.xdim, 3))
for i in range(-1, len(im_list)):
if i == -1:
immtx = im0_pad.imvec.reshape(im0_pad.ydim, im0_pad.xdim)
else:
immtx = im_list_shift[i].imvec.reshape(im0_pad.ydim, im0_pad.xdim)
if rescale:
immtx = immtx - np.min(np.min(immtx))
immtx = immtx / np.max(np.max(immtx))
for c in range(0, 3):
composite_img[:, :, c] = composite_img[:, :, c] + color_coding[i + 1, c] * immtx
if rescale is False:
composite_img = composite_img - np.min(np.min(np.min(composite_img)))
composite_img = composite_img / np.max(np.max(np.max(composite_img)))
plt.subplot(111)
plt.title('%s MJD %i %.2f GHz' % (self.source, self.mjd, self.rf / 1000000000.0), fontsize=20)
plt.imshow(composite_img, interpolation=interp)
xticks = obsh.ticks(im0_pad.xdim, im0_pad.psize / ehc.RADPERAS / 1e-06)
yticks = obsh.ticks(im0_pad.ydim, im0_pad.psize / ehc.RADPERAS / 1e-06)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\\mu$as)')
plt.ylabel('Relative Dec ($\\mu$as)')
if show:
plt.show(block=False)
if export_pdf != '':
f.savefig(export_pdf, bbox_inches='tight')
return (f, shift)
|
eht-imaging
|
positive
|
def test_get_container_link(self):
l = ContainerLinkList()
assert_a = lambda a: self.assertEqual(l.get_type_item(a), ContainerLink('a', None))
assert_b = lambda b: self.assertEqual(l.get_type_item(b), ContainerLink('b', 'b_'))
<DeepExtract>
self.assertEqual(InputConfigIdList('a', map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids('a', maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
self.assertEqual(InputConfigIdList(('a',), map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(('a',), maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
self.assertEqual(InputConfigIdList({'container': 'a'}, map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids({'container': 'a'}, maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
self.assertEqual(InputConfigIdList(['a', None], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(['a', None], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
six.assertCountEqual(self, InputConfigIdList(('b', 'b_'), map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids(('b', 'b_'), maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
</DeepExtract>
<DeepExtract>
six.assertCountEqual(self, InputConfigIdList({'container': 'b', 'alias': 'b_'}, map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids({'container': 'b', 'alias': 'b_'}, maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
</DeepExtract>
|
def test_get_container_link(self):
l = ContainerLinkList()
assert_a = lambda a: self.assertEqual(l.get_type_item(a), ContainerLink('a', None))
assert_b = lambda b: self.assertEqual(l.get_type_item(b), ContainerLink('b', 'b_'))
self.assertEqual(InputConfigIdList('a', map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids('a', maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
self.assertEqual(InputConfigIdList(('a',), map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(('a',), maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
self.assertEqual(InputConfigIdList({'container': 'a'}, map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids({'container': 'a'}, maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
self.assertEqual(InputConfigIdList(['a', None], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(['a', None], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
six.assertCountEqual(self, InputConfigIdList(('b', 'b_'), map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids(('b', 'b_'), maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
six.assertCountEqual(self, InputConfigIdList({'container': 'b', 'alias': 'b_'}, map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids({'container': 'b', 'alias': 'b_'}, maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
</DeepExtract>
|
docker-map
|
positive
|
@api.onchange('contract_template_id')
def _onchange_contract_template_id(self):
"""Update the contract fields with that of the template.
Take special consideration with the `contract_line_ids`,
which must be created using the data from the contract lines. Cascade
deletion ensures that any errant lines that are created are also
deleted.
"""
contract_template_id = self.contract_template_id
if not contract_template_id:
return
for (field_name, field) in contract_template_id._fields.items():
if field.name == 'contract_line_ids':
<DeepExtract>
self.ensure_one()
new_lines = self.env['contract.line']
contract_line_model = self.env['contract.line']
for contract_line in contract_template_id.contract_line_ids:
vals = contract_line._convert_to_write(contract_line.read()[0])
vals.pop('contract_template_id', False)
vals['date_start'] = fields.Date.context_today(contract_line)
vals['recurring_next_date'] = fields.Date.context_today(contract_line)
new_lines += contract_line_model.new(vals)
new_lines._onchange_is_auto_renew()
lines = new_lines
</DeepExtract>
self.contract_line_ids += lines
elif not any((field.compute, field.related, field.automatic, field.readonly, field.company_dependent, field.name in self.NO_SYNC)):
if self.contract_template_id[field_name]:
self[field_name] = self.contract_template_id[field_name]
|
@api.onchange('contract_template_id')
def _onchange_contract_template_id(self):
"""Update the contract fields with that of the template.
Take special consideration with the `contract_line_ids`,
which must be created using the data from the contract lines. Cascade
deletion ensures that any errant lines that are created are also
deleted.
"""
contract_template_id = self.contract_template_id
if not contract_template_id:
return
for (field_name, field) in contract_template_id._fields.items():
if field.name == 'contract_line_ids':
self.ensure_one()
new_lines = self.env['contract.line']
contract_line_model = self.env['contract.line']
for contract_line in contract_template_id.contract_line_ids:
vals = contract_line._convert_to_write(contract_line.read()[0])
vals.pop('contract_template_id', False)
vals['date_start'] = fields.Date.context_today(contract_line)
vals['recurring_next_date'] = fields.Date.context_today(contract_line)
new_lines += contract_line_model.new(vals)
new_lines._onchange_is_auto_renew()
lines = new_lines
self.contract_line_ids += lines
elif not any((field.compute, field.related, field.automatic, field.readonly, field.company_dependent, field.name in self.NO_SYNC)):
if self.contract_template_id[field_name]:
self[field_name] = self.contract_template_id[field_name]
|
contract
|
positive
|
def loss(self, rpn_cls_scores, rpn_bbox_preds, rpn_dir_preds, gt_bboxes, gt_labels, anchors, anchors_mask, cfg):
cls_reg_targets = multi_apply(anchor_target, anchors, anchors_mask, gt_bboxes, gt_labels, target_means=self.target_means, target_stds=self.target_stds, cfg=cfg, sampling=False)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, dir_labels_list, dir_weights_list, pos_inds_list, neg_inds_list) = cls_reg_targets
num_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
<DeepExtract>
torch.cat(labels_list) = torch.cat(labels_list).contiguous().view(-1)
torch.cat(label_weights_list) = torch.cat(label_weights_list).contiguous().view(-1)
if self.use_sigmoid_cls:
rpn_cls_scores = rpn_cls_scores.permute(0, 2, 3, 1).contiguous().view(-1)
criterion = weighted_sigmoid_focal_loss
else:
rpn_cls_scores = rpn_cls_scores.permute(0, 2, 3, 1).contiguous().view(-1, 2)
criterion = weighted_cross_entropy
loss_cls = criterion(rpn_cls_scores, torch.cat(labels_list), torch.cat(label_weights_list), avg_factor=num_pos)
torch.cat(bbox_targets_list) = torch.cat(bbox_targets_list).contiguous().view(-1, 7)
torch.cat(bbox_weights_list) = torch.cat(bbox_weights_list).contiguous().view(-1, 7)
rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous().view(-1, 7)
(rpn_bbox_preds, torch.cat(bbox_targets_list)) = add_sin_difference(rpn_bbox_preds, torch.cat(bbox_targets_list))
loss_reg = weighted_smoothl1(rpn_bbox_preds, torch.cat(bbox_targets_list), torch.cat(bbox_weights_list), beta=cfg.smoothl1_beta, avg_factor=num_pos)
dir_logits = rpn_dir_preds.permute(0, 2, 3, 1).contiguous().view(-1, 2)
loss_dir = weighted_cross_entropy(dir_logits, torch.cat(dir_labels_list), torch.cat(dir_weights_list), avg_factor=num_pos)
loss_reg *= 2
loss_dir *= 0.2
(losses_cls, losses_reg, losses_dir) = (loss_cls, loss_reg, loss_dir)
</DeepExtract>
return dict(loss_rpn_cls=losses_cls, loss_rpn_reg=losses_reg, loss_rpn_dir=losses_dir)
|
def loss(self, rpn_cls_scores, rpn_bbox_preds, rpn_dir_preds, gt_bboxes, gt_labels, anchors, anchors_mask, cfg):
cls_reg_targets = multi_apply(anchor_target, anchors, anchors_mask, gt_bboxes, gt_labels, target_means=self.target_means, target_stds=self.target_stds, cfg=cfg, sampling=False)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, dir_labels_list, dir_weights_list, pos_inds_list, neg_inds_list) = cls_reg_targets
num_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
torch.cat(labels_list) = torch.cat(labels_list).contiguous().view(-1)
torch.cat(label_weights_list) = torch.cat(label_weights_list).contiguous().view(-1)
if self.use_sigmoid_cls:
rpn_cls_scores = rpn_cls_scores.permute(0, 2, 3, 1).contiguous().view(-1)
criterion = weighted_sigmoid_focal_loss
else:
rpn_cls_scores = rpn_cls_scores.permute(0, 2, 3, 1).contiguous().view(-1, 2)
criterion = weighted_cross_entropy
loss_cls = criterion(rpn_cls_scores, torch.cat(labels_list), torch.cat(label_weights_list), avg_factor=num_pos)
torch.cat(bbox_targets_list) = torch.cat(bbox_targets_list).contiguous().view(-1, 7)
torch.cat(bbox_weights_list) = torch.cat(bbox_weights_list).contiguous().view(-1, 7)
rpn_bbox_preds = rpn_bbox_preds.permute(0, 2, 3, 1).contiguous().view(-1, 7)
(rpn_bbox_preds, torch.cat(bbox_targets_list)) = add_sin_difference(rpn_bbox_preds, torch.cat(bbox_targets_list))
loss_reg = weighted_smoothl1(rpn_bbox_preds, torch.cat(bbox_targets_list), torch.cat(bbox_weights_list), beta=cfg.smoothl1_beta, avg_factor=num_pos)
dir_logits = rpn_dir_preds.permute(0, 2, 3, 1).contiguous().view(-1, 2)
loss_dir = weighted_cross_entropy(dir_logits, torch.cat(dir_labels_list), torch.cat(dir_weights_list), avg_factor=num_pos)
loss_reg *= 2
loss_dir *= 0.2
(losses_cls, losses_reg, losses_dir) = (loss_cls, loss_reg, loss_dir)
return dict(loss_rpn_cls=losses_cls, loss_rpn_reg=losses_reg, loss_rpn_dir=losses_dir)
|
ebms_3dod
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.