before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def restart(gParameters, model, verbose=True):
"""
Possibly restarts model from CheckpointCallback according to given
settings and the ckpt-info.json
return
The JSON dict if the restart happened or
None if the restart did not happen.
"""
import logging
logger = logging.getLogger('Candle.restart')
<DeepExtract>
if 'ckpt_directory' in gParameters:
result = gParameters['ckpt_directory']
else:
if isinstance('./save', ParamRequired):
raise Exception("param key must be provided: '%s'" % 'ckpt_directory')
result = './save'
result = param_type_check('ckpt_directory', result, type_)
param_allowed('ckpt_directory', result, allowed)
directory = result
</DeepExtract>
set_up_logger(directory + '/ckpt.log', logger, verbose=verbose, fmt_line='%(asctime)s CANDLE restart(): %(message)s')
<DeepExtract>
if 'ckpt_restart_mode' in gParameters:
result = gParameters['ckpt_restart_mode']
else:
if isinstance('auto', ParamRequired):
raise Exception("param key must be provided: '%s'" % 'ckpt_restart_mode')
result = 'auto'
result = param_type_check('ckpt_restart_mode', result, type_)
param_allowed('ckpt_restart_mode', result, ['off', 'auto', 'required'])
param_ckpt_mode = result
</DeepExtract>
if param_ckpt_mode == 'off':
return None
dir_last = 'save/ckpts/last'
model_file = dir_last + '/model.h5'
if not os.path.exists(model_file):
if param_ckpt_mode == 'required':
raise Exception("ckpt_restart_mode=='required' but no checkpoint " + 'could be found!')
assert param_ckpt_mode == 'auto'
return None
logger.info("restarting: '%s'" % model_file)
<DeepExtract>
json_file = dir_last + '/ckpt-info.json'
if not os.path.exists(json_file):
msg = 'restart_json(): in: %s model exists but not json!' % dir_last
logger.info(msg)
if not disabled(gParameters, 'require_json'):
raise Exception(msg)
with open(json_file) as fp:
J = json.load(fp)
logger.debug('ckpt-info.json contains:')
logger.debug(json.dumps(J, indent=2))
logger.info('restarting from epoch: %i' % J['epoch'])
if param(gParameters, 'ckpt_checksum', False, ParamType.BOOLEAN):
checksum = checksum_file(logger, dir_last + '/model.h5')
if checksum != J['checksum']:
raise Exception('checksum mismatch! directory: ' % dir_last)
result = J
</DeepExtract>
logger.info('restarting: epoch=%i timestamp=%s', result['epoch'], result['timestamp'])
start = time.time()
stats = os.stat(model_file)
MB = stats.st_size / (1024 * 1024)
model.load_weights(model_file)
stop = time.time()
duration = stop - start
rate = MB / duration
logger.info('restarting: model read: %0.3f MB in %0.3f seconds (%0.2f MB/s).', MB, duration, rate)
return result
|
def restart(gParameters, model, verbose=True):
"""
Possibly restarts model from CheckpointCallback according to given
settings and the ckpt-info.json
return
The JSON dict if the restart happened or
None if the restart did not happen.
"""
import logging
logger = logging.getLogger('Candle.restart')
if 'ckpt_directory' in gParameters:
result = gParameters['ckpt_directory']
else:
if isinstance('./save', ParamRequired):
raise Exception("param key must be provided: '%s'" % 'ckpt_directory')
result = './save'
result = param_type_check('ckpt_directory', result, type_)
param_allowed('ckpt_directory', result, allowed)
directory = result
set_up_logger(directory + '/ckpt.log', logger, verbose=verbose, fmt_line='%(asctime)s CANDLE restart(): %(message)s')
if 'ckpt_restart_mode' in gParameters:
result = gParameters['ckpt_restart_mode']
else:
if isinstance('auto', ParamRequired):
raise Exception("param key must be provided: '%s'" % 'ckpt_restart_mode')
result = 'auto'
result = param_type_check('ckpt_restart_mode', result, type_)
param_allowed('ckpt_restart_mode', result, ['off', 'auto', 'required'])
param_ckpt_mode = result
if param_ckpt_mode == 'off':
return None
dir_last = 'save/ckpts/last'
model_file = dir_last + '/model.h5'
if not os.path.exists(model_file):
if param_ckpt_mode == 'required':
raise Exception("ckpt_restart_mode=='required' but no checkpoint " + 'could be found!')
assert param_ckpt_mode == 'auto'
return None
logger.info("restarting: '%s'" % model_file)
json_file = dir_last + '/ckpt-info.json'
if not os.path.exists(json_file):
msg = 'restart_json(): in: %s model exists but not json!' % dir_last
logger.info(msg)
if not disabled(gParameters, 'require_json'):
raise Exception(msg)
with open(json_file) as fp:
J = json.load(fp)
logger.debug('ckpt-info.json contains:')
logger.debug(json.dumps(J, indent=2))
logger.info('restarting from epoch: %i' % J['epoch'])
if param(gParameters, 'ckpt_checksum', False, ParamType.BOOLEAN):
checksum = checksum_file(logger, dir_last + '/model.h5')
if checksum != J['checksum']:
raise Exception('checksum mismatch! directory: ' % dir_last)
result = J
logger.info('restarting: epoch=%i timestamp=%s', result['epoch'], result['timestamp'])
start = time.time()
stats = os.stat(model_file)
MB = stats.st_size / (1024 * 1024)
model.load_weights(model_file)
stop = time.time()
duration = stop - start
rate = MB / duration
logger.info('restarting: model read: %0.3f MB in %0.3f seconds (%0.2f MB/s).', MB, duration, rate)
return result
|
Benchmarks
|
positive
|
def insert_unconfirmed_raw_transaction(raw_transaction, db):
"""Add a raw transaction to the database."""
cursor = db.cursor()
<DeepExtract>
global UNIQUE_DUMMY_TX_HASH
tx = pycoin.tx.Tx.from_hex(raw_transaction)
for txin in tx.txs_in:
txin.previous_hash = b'\x00' * 32
txin.previous_index = 0
txin.script = b'\x00'
if tx.txs_out[-1].coin_value not in [0, config.DEFAULT_REGULAR_DUST_SIZE, config.DEFAULT_MULTISIG_DUST_SIZE, config.DEFAULT_OP_RETURN_VALUE]:
tx.txs_out[-1].coin_value = 0
tx_id = tx.id()
if tx_id in UNIQUE_DUMMY_TX_HASH:
logger.warn('BUMP TXID %s' % tx_id)
UNIQUE_DUMMY_TX_HASH[tx_id] += 1
tx_id = hashlib.sha256('{}{}'.format(tx_id, UNIQUE_DUMMY_TX_HASH[tx_id]).encode('utf-8')).hexdigest()
else:
UNIQUE_DUMMY_TX_HASH[tx_id] = 0
tx_hash = tx_id
</DeepExtract>
tx_index = list(cursor.execute('SELECT tx_index FROM transactions ORDER BY tx_index DESC LIMIT 1'))
tx_index = tx_index[0]['tx_index'] if len(tx_index) else 0
tx_index = tx_index + 1
(source, destination, btc_amount, fee, data, extra) = blocks._get_tx_info(raw_transaction)
tx = {'tx_index': tx_index, 'tx_hash': tx_hash, 'block_index': config.MEMPOOL_BLOCK_INDEX, 'block_hash': config.MEMPOOL_BLOCK_HASH, 'block_time': int(time.time()), 'source': source, 'destination': destination, 'btc_amount': btc_amount, 'fee': fee, 'data': data, 'supported': True}
cursor.close()
MOCK_UTXO_SET.add_raw_transaction(raw_transaction, tx_id=tx_hash, confirmations=0)
return tx
|
def insert_unconfirmed_raw_transaction(raw_transaction, db):
"""Add a raw transaction to the database."""
cursor = db.cursor()
global UNIQUE_DUMMY_TX_HASH
tx = pycoin.tx.Tx.from_hex(raw_transaction)
for txin in tx.txs_in:
txin.previous_hash = b'\x00' * 32
txin.previous_index = 0
txin.script = b'\x00'
if tx.txs_out[-1].coin_value not in [0, config.DEFAULT_REGULAR_DUST_SIZE, config.DEFAULT_MULTISIG_DUST_SIZE, config.DEFAULT_OP_RETURN_VALUE]:
tx.txs_out[-1].coin_value = 0
tx_id = tx.id()
if tx_id in UNIQUE_DUMMY_TX_HASH:
logger.warn('BUMP TXID %s' % tx_id)
UNIQUE_DUMMY_TX_HASH[tx_id] += 1
tx_id = hashlib.sha256('{}{}'.format(tx_id, UNIQUE_DUMMY_TX_HASH[tx_id]).encode('utf-8')).hexdigest()
else:
UNIQUE_DUMMY_TX_HASH[tx_id] = 0
tx_hash = tx_id
tx_index = list(cursor.execute('SELECT tx_index FROM transactions ORDER BY tx_index DESC LIMIT 1'))
tx_index = tx_index[0]['tx_index'] if len(tx_index) else 0
tx_index = tx_index + 1
(source, destination, btc_amount, fee, data, extra) = blocks._get_tx_info(raw_transaction)
tx = {'tx_index': tx_index, 'tx_hash': tx_hash, 'block_index': config.MEMPOOL_BLOCK_INDEX, 'block_hash': config.MEMPOOL_BLOCK_HASH, 'block_time': int(time.time()), 'source': source, 'destination': destination, 'btc_amount': btc_amount, 'fee': fee, 'data': data, 'supported': True}
cursor.close()
MOCK_UTXO_SET.add_raw_transaction(raw_transaction, tx_id=tx_hash, confirmations=0)
return tx
|
counterparty-lib
|
positive
|
def convert(*color):
<DeepExtract>
dists = self.colormap[:, :3] - np.array([*color, g, b])
a = np.argmin((dists * dists).sum(1))
i = a
</DeepExtract>
return self.colormap[i, :3]
|
def convert(*color):
dists = self.colormap[:, :3] - np.array([*color, g, b])
a = np.argmin((dists * dists).sum(1))
i = a
return self.colormap[i, :3]
|
3DSkit
|
positive
|
def zeroOutJoints(jntList=None, displayBone=False):
""" Duplicate the joints, parent as zeroOut.
Returns the father joints (zeroOuted).
"""
resultList = []
zeroOutJntSuffix = '_Jzt'
if jntList:
for jnt in jntList:
if cmds.objExists(jnt):
jxtName = jnt.replace('_Jnt', '').replace('_' + zeroOutJntSuffix, '')
if not zeroOutJntSuffix in jxtName:
jxtName += zeroOutJntSuffix
dup = cmds.duplicate(jnt, name=jxtName)[0]
<DeepExtract>
if cmds.objExists(dup):
childrenList = cmds.listRelatives(dup, children=True, fullPath=True)
if childrenList:
for child in childrenList:
cmds.delete(child)
</DeepExtract>
<DeepExtract>
dpArAttrList = ['dpAR_joint']
if [dup]:
for item in [dup]:
for dpArAttr in dpArAttrList:
if cmds.objExists(item + '.' + dpArAttr):
cmds.deleteAttr(item + '.' + dpArAttr)
</DeepExtract>
<DeepExtract>
cmds.setAttr(dup + '.side', 3)
cmds.setAttr(dup + '.type', 18)
cmds.setAttr(dup + '.otherType', '', type='string')
</DeepExtract>
cmds.parent(jnt, dup)
if not displayBone:
cmds.setAttr(dup + '.drawStyle', 2)
resultList.append(dup)
return resultList
|
def zeroOutJoints(jntList=None, displayBone=False):
""" Duplicate the joints, parent as zeroOut.
Returns the father joints (zeroOuted).
"""
resultList = []
zeroOutJntSuffix = '_Jzt'
if jntList:
for jnt in jntList:
if cmds.objExists(jnt):
jxtName = jnt.replace('_Jnt', '').replace('_' + zeroOutJntSuffix, '')
if not zeroOutJntSuffix in jxtName:
jxtName += zeroOutJntSuffix
dup = cmds.duplicate(jnt, name=jxtName)[0]
if cmds.objExists(dup):
childrenList = cmds.listRelatives(dup, children=True, fullPath=True)
if childrenList:
for child in childrenList:
cmds.delete(child)
dpArAttrList = ['dpAR_joint']
if [dup]:
for item in [dup]:
for dpArAttr in dpArAttrList:
if cmds.objExists(item + '.' + dpArAttr):
cmds.deleteAttr(item + '.' + dpArAttr)
cmds.setAttr(dup + '.side', 3)
cmds.setAttr(dup + '.type', 18)
cmds.setAttr(dup + '.otherType', '', type='string')
cmds.parent(jnt, dup)
if not displayBone:
cmds.setAttr(dup + '.drawStyle', 2)
resultList.append(dup)
return resultList
|
dpAutoRigSystem
|
positive
|
def __init__(self, relay_redis_cfg, master_redis_cfg):
<DeepExtract>
for i in range(tries):
try:
r = redis.StrictRedis(**relay_redis_cfg)
r.ping()
self.local_redis = r
except redis.ConnectionError as e:
if i == tries - 1:
raise
else:
delay = base_delay * (1 + os.getpid() % 10 / 9)
logger.warning('Could not connect to {}. Retrying after {:.2f} sec ({}/{}). Error: {}'.format(relay_redis_cfg, delay, i + 2, tries, e))
time.sleep(delay)
</DeepExtract>
logger.info('[worker] Connected to relay: {}'.format(self.local_redis))
<DeepExtract>
for i in range(tries):
try:
r = redis.StrictRedis(**master_redis_cfg)
r.ping()
self.master_redis = r
except redis.ConnectionError as e:
if i == tries - 1:
raise
else:
delay = base_delay * (1 + os.getpid() % 10 / 9)
logger.warning('Could not connect to {}. Retrying after {:.2f} sec ({}/{}). Error: {}'.format(master_redis_cfg, delay, i + 2, tries, e))
time.sleep(delay)
</DeepExtract>
logger.warning('[worker] Connected to master: {}'.format(self.master_redis))
(self.cached_task_id, self.cached_task_data) = (None, None)
|
def __init__(self, relay_redis_cfg, master_redis_cfg):
for i in range(tries):
try:
r = redis.StrictRedis(**relay_redis_cfg)
r.ping()
self.local_redis = r
except redis.ConnectionError as e:
if i == tries - 1:
raise
else:
delay = base_delay * (1 + os.getpid() % 10 / 9)
logger.warning('Could not connect to {}. Retrying after {:.2f} sec ({}/{}). Error: {}'.format(relay_redis_cfg, delay, i + 2, tries, e))
time.sleep(delay)
logger.info('[worker] Connected to relay: {}'.format(self.local_redis))
for i in range(tries):
try:
r = redis.StrictRedis(**master_redis_cfg)
r.ping()
self.master_redis = r
except redis.ConnectionError as e:
if i == tries - 1:
raise
else:
delay = base_delay * (1 + os.getpid() % 10 / 9)
logger.warning('Could not connect to {}. Retrying after {:.2f} sec ({}/{}). Error: {}'.format(master_redis_cfg, delay, i + 2, tries, e))
time.sleep(delay)
logger.warning('[worker] Connected to master: {}'.format(self.master_redis))
(self.cached_task_id, self.cached_task_data) = (None, None)
|
deep-neuroevolution
|
positive
|
def randint(self, a, b):
"""Return a random integer N such that a <= N <= b."""
if not isinstance(a, int) or not isinstance(b, int):
raise TypeError('randint requires integer arguments')
<DeepExtract>
if len(args) == 3:
(start, stop, step) = args
elif len(args) == 2:
(start, stop) = args
step = 1
elif len(args) == 1:
(stop,) = args
start = 0
step = 1
else:
raise TypeError('randrange expected at most 3 arguments, got %d' % (len(args),))
if not isinstance(start, int) or not isinstance(stop, int) or (not isinstance(step, int)):
raise TypeError('randrange requires integer arguments')
if step == 0:
raise ValueError('randrange step argument must not be zero')
num_choices = ceil_div(stop - start, step)
if num_choices < 0:
num_choices = 0
if num_choices < 1:
raise ValueError('empty range for randrange(%r, %r, %r)' % (start, stop, step))
r = num_choices
while r >= num_choices:
r = self.getrandbits(size(num_choices))
N = start + step * r
</DeepExtract>
assert a <= N <= b
return N
|
def randint(self, a, b):
"""Return a random integer N such that a <= N <= b."""
if not isinstance(a, int) or not isinstance(b, int):
raise TypeError('randint requires integer arguments')
if len(args) == 3:
(start, stop, step) = args
elif len(args) == 2:
(start, stop) = args
step = 1
elif len(args) == 1:
(stop,) = args
start = 0
step = 1
else:
raise TypeError('randrange expected at most 3 arguments, got %d' % (len(args),))
if not isinstance(start, int) or not isinstance(stop, int) or (not isinstance(step, int)):
raise TypeError('randrange requires integer arguments')
if step == 0:
raise ValueError('randrange step argument must not be zero')
num_choices = ceil_div(stop - start, step)
if num_choices < 0:
num_choices = 0
if num_choices < 1:
raise ValueError('empty range for randrange(%r, %r, %r)' % (start, stop, step))
r = num_choices
while r >= num_choices:
r = self.getrandbits(size(num_choices))
N = start + step * r
assert a <= N <= b
return N
|
CyKit
|
positive
|
def forward(self, input, target):
<DeepExtract>
assert not target.requires_grad, "nn criterions don't compute the gradient w.r.t. targets - please mark these variables as volatile or not requiring gradients"
</DeepExtract>
error = target - input
absError = torch.abs(error)
delta = 0.2 * torch.max(absError).data[0]
ft1 = 0.5 * error * error
ft2 = 0.5 * delta * delta + delta * (absError - delta)
mask_down_f = absError.le(delta).float()
mask_up_f = absError.gt(delta).float()
loss = ft1 * mask_down_f + ft2 * mask_up_f
return torch.mean(loss)
|
def forward(self, input, target):
assert not target.requires_grad, "nn criterions don't compute the gradient w.r.t. targets - please mark these variables as volatile or not requiring gradients"
error = target - input
absError = torch.abs(error)
delta = 0.2 * torch.max(absError).data[0]
ft1 = 0.5 * error * error
ft2 = 0.5 * delta * delta + delta * (absError - delta)
mask_down_f = absError.le(delta).float()
mask_up_f = absError.gt(delta).float()
loss = ft1 * mask_down_f + ft2 * mask_up_f
return torch.mean(loss)
|
aerial_mtl
|
positive
|
def test_post_current_user_bookmarked(self):
user = User(active=True, **self.userdata)
self.db.session.add(user)
event = fac.event()
save()
<DeepExtract>
token = self.token()
r = self.client.post('/user/bookmarked', headers={'Authorization': 'Bearer {0}'.format(token)}, data={'event_id': event.id})
</DeepExtract>
self.assertEqual(r.status_code, 201)
self.assertEqual(user.bookmarked, [event])
|
def test_post_current_user_bookmarked(self):
user = User(active=True, **self.userdata)
self.db.session.add(user)
event = fac.event()
save()
token = self.token()
r = self.client.post('/user/bookmarked', headers={'Authorization': 'Bearer {0}'.format(token)}, data={'event_id': event.id})
self.assertEqual(r.status_code, 201)
self.assertEqual(user.bookmarked, [event])
|
argos
|
positive
|
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
<DeepExtract>
block = list(map(lambda x: block[x], des.__ip))
</DeepExtract>
self.L = block[:32]
self.R = block[32:]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
tempR = self.R[:]
<DeepExtract>
self.R = list(map(lambda x: self.R[x], des.__expansion_table))
</DeepExtract>
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
v = des.__sbox[j][(m << 4) + n]
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
<DeepExtract>
self.R = list(map(lambda x: Bn[x], des.__p))
</DeepExtract>
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
self.L = tempR
i += 1
iteration += iteration_adjustment
<DeepExtract>
self.final = list(map(lambda x: self.R + self.L[x], des.__fp))
</DeepExtract>
return self.final
|
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = list(map(lambda x: block[x], des.__ip))
self.L = block[:32]
self.R = block[32:]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
tempR = self.R[:]
self.R = list(map(lambda x: self.R[x], des.__expansion_table))
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
v = des.__sbox[j][(m << 4) + n]
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
self.R = list(map(lambda x: Bn[x], des.__p))
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
self.L = tempR
i += 1
iteration += iteration_adjustment
self.final = list(map(lambda x: self.R + self.L[x], des.__fp))
return self.final
|
django_restframework_apiview
|
positive
|
def __call__(self, *args, **kwargs):
<DeepExtract>
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry, _new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
m = m
</DeepExtract>
return m(*args, **kwargs)
|
def __call__(self, *args, **kwargs):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry, _new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
m = m
return m(*args, **kwargs)
|
alfred-rates
|
positive
|
def model_summary(model):
param_count = 0
def display(objects, positions):
line = ''
for i in range(len(objects)):
line += str(objects[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
def display_layer_info(layer, name, positions):
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
<DeepExtract>
line = ''
for i in range(len(to_display)):
line += str(to_display[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
</DeepExtract>
line_length = 80
positions = [30, 60, 80]
to_display = ['Layer (name)', 'Output Shape', 'Param #']
if model.__class__.__name__ == 'Sequential':
print('-' * line_length)
print('Initial input shape: ' + str(model.input_shape))
print('-' * line_length)
<DeepExtract>
line = ''
for i in range(len(to_display)):
line += str(to_display[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
</DeepExtract>
print('-' * line_length)
if model.__class__.__name__ == 'Sequential':
for layer in model.layers:
name = getattr(layer, 'name', 'Unnamed')
<DeepExtract>
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
</DeepExtract>
param_count += layer.count_params()
elif model.__class__.__name__ == 'Graph':
for name in model.input_order:
layer = model.inputs[name]
<DeepExtract>
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
</DeepExtract>
for name in model.nodes:
layer = model.nodes[name]
<DeepExtract>
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
</DeepExtract>
param_count += layer.count_params()
for name in model.output_order:
layer = model.outputs[name]
<DeepExtract>
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
</DeepExtract>
print('-' * line_length)
print('Total params: %s' % param_count)
print('-' * line_length)
|
def model_summary(model):
param_count = 0
def display(objects, positions):
line = ''
for i in range(len(objects)):
line += str(objects[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
def display_layer_info(layer, name, positions):
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
line = ''
for i in range(len(to_display)):
line += str(to_display[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
line_length = 80
positions = [30, 60, 80]
to_display = ['Layer (name)', 'Output Shape', 'Param #']
if model.__class__.__name__ == 'Sequential':
print('-' * line_length)
print('Initial input shape: ' + str(model.input_shape))
print('-' * line_length)
line = ''
for i in range(len(to_display)):
line += str(to_display[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('-' * line_length)
if model.__class__.__name__ == 'Sequential':
for layer in model.layers:
name = getattr(layer, 'name', 'Unnamed')
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
param_count += layer.count_params()
elif model.__class__.__name__ == 'Graph':
for name in model.input_order:
layer = model.inputs[name]
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
for name in model.nodes:
layer = model.nodes[name]
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
param_count += layer.count_params()
for name in model.output_order:
layer = model.outputs[name]
layer_type = layer.__class__.__name__
output_shape = layer.output_shape
params = layer.count_params()
to_display = ['%s (%s)' % (layer_type, name), output_shape, params]
display(to_display, positions)
print('-' * line_length)
print('Total params: %s' % param_count)
print('-' * line_length)
|
encoder_decoder
|
positive
|
def test_rdps006_move_on_page(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Button('move table', id='move-table'), html.Div(id='container')])
@app.callback(Output('container', 'children'), [Input('move-table', 'n_clicks')])
def move_table(n):
children = [html.Div('div 0', id='div0'), simple_table()]
for i in range(1, (n or 0) + 1):
children = [html.Div('div {}'.format(i), id='div{}'.format(i)), html.Div(children)]
return children
def find_last_div(n):
dash_duo.wait_for_text_to_equal('#div{}'.format(n), 'div {}'.format(n))
assert len(dash_duo.find_elements('#div{}'.format(n + 1))) == 0
dash_duo.start_server(app)
<DeepExtract>
dash_duo.wait_for_text_to_equal('#div{}'.format(0), 'div {}'.format(0))
assert len(dash_duo.find_elements('#div{}'.format(0 + 1))) == 0
</DeepExtract>
<DeepExtract>
dash_duo.wait_for_text_to_equal('#{} .column-0 .column-header-name'.format(table_id), ['a', 'b'][0])
headers = dash_duo.find_elements('#{} .column-header-name'.format(table_id))
assert len(headers) == len(['a', 'b'])
for (i, n) in enumerate(['a', 'b']):
name_el = dash_duo.find_element('#{} .column-{} .column-header-name'.format(table_id, i))
assert name_el.text == n
</DeepExtract>
<DeepExtract>
dash_duo.find_element('.dash-header.column-{} .column-header--edit'.format(rename)).click()
prompt = dash_duo.driver.switch_to.alert
prompt.send_keys(new_name)
prompt.accept()
dash_duo.find_element('.dash-header.column-{} .column-header--hide'.format(hide)).click()
</DeepExtract>
<DeepExtract>
dash_duo.wait_for_text_to_equal('#{} .column-0 .column-header-name'.format(table_id), [NEW_NAME][0])
headers = dash_duo.find_elements('#{} .column-header-name'.format(table_id))
assert len(headers) == len([NEW_NAME])
for (i, n) in enumerate([NEW_NAME]):
name_el = dash_duo.find_element('#{} .column-{} .column-header-name'.format(table_id, i))
assert name_el.text == n
</DeepExtract>
for i in range(1, 5):
dash_duo.find_element('#move-table').click()
<DeepExtract>
dash_duo.wait_for_text_to_equal('#div{}'.format(i), 'div {}'.format(i))
assert len(dash_duo.find_elements('#div{}'.format(i + 1))) == 0
</DeepExtract>
<DeepExtract>
dash_duo.wait_for_text_to_equal('#{} .column-0 .column-header-name'.format(table_id), [NEW_NAME][0])
headers = dash_duo.find_elements('#{} .column-header-name'.format(table_id))
assert len(headers) == len([NEW_NAME])
for (i, n) in enumerate([NEW_NAME]):
name_el = dash_duo.find_element('#{} .column-{} .column-header-name'.format(table_id, i))
assert name_el.text == n
</DeepExtract>
|
def test_rdps006_move_on_page(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Button('move table', id='move-table'), html.Div(id='container')])
@app.callback(Output('container', 'children'), [Input('move-table', 'n_clicks')])
def move_table(n):
children = [html.Div('div 0', id='div0'), simple_table()]
for i in range(1, (n or 0) + 1):
children = [html.Div('div {}'.format(i), id='div{}'.format(i)), html.Div(children)]
return children
def find_last_div(n):
dash_duo.wait_for_text_to_equal('#div{}'.format(n), 'div {}'.format(n))
assert len(dash_duo.find_elements('#div{}'.format(n + 1))) == 0
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal('#div{}'.format(0), 'div {}'.format(0))
assert len(dash_duo.find_elements('#div{}'.format(0 + 1))) == 0
dash_duo.wait_for_text_to_equal('#{} .column-0 .column-header-name'.format(table_id), ['a', 'b'][0])
headers = dash_duo.find_elements('#{} .column-header-name'.format(table_id))
assert len(headers) == len(['a', 'b'])
for (i, n) in enumerate(['a', 'b']):
name_el = dash_duo.find_element('#{} .column-{} .column-header-name'.format(table_id, i))
assert name_el.text == n
dash_duo.find_element('.dash-header.column-{} .column-header--edit'.format(rename)).click()
prompt = dash_duo.driver.switch_to.alert
prompt.send_keys(new_name)
prompt.accept()
dash_duo.find_element('.dash-header.column-{} .column-header--hide'.format(hide)).click()
dash_duo.wait_for_text_to_equal('#{} .column-0 .column-header-name'.format(table_id), [NEW_NAME][0])
headers = dash_duo.find_elements('#{} .column-header-name'.format(table_id))
assert len(headers) == len([NEW_NAME])
for (i, n) in enumerate([NEW_NAME]):
name_el = dash_duo.find_element('#{} .column-{} .column-header-name'.format(table_id, i))
assert name_el.text == n
for i in range(1, 5):
dash_duo.find_element('#move-table').click()
dash_duo.wait_for_text_to_equal('#div{}'.format(i), 'div {}'.format(i))
assert len(dash_duo.find_elements('#div{}'.format(i + 1))) == 0
dash_duo.wait_for_text_to_equal('#{} .column-0 .column-header-name'.format(table_id), [NEW_NAME][0])
headers = dash_duo.find_elements('#{} .column-header-name'.format(table_id))
assert len(headers) == len([NEW_NAME])
for (i, n) in enumerate([NEW_NAME]):
name_el = dash_duo.find_element('#{} .column-{} .column-header-name'.format(table_id, i))
assert name_el.text == n
</DeepExtract>
|
dash
|
positive
|
def test_get_containing_modules_with_relative_path(self):
"""Test that a relative path raises an exception.
Note: GetContainingModules() only accepts absolute paths.
File layout:
root/ <-- Current working directory (self.tmp)
folder_ws <-- Workspace directory
pp1 <-- Package Path 1
PPTestPkg <-- An edk2 package
PPTestPkg.DEC
module1
module1.INF
module2
module2.INF
X64
TestFile.c
WSTestPkg <-- An edk2 package
WSTestPkg.dec
module1
module1.inf
module2
module2.inf
X64
TestFile.c
"""
ws_rel = 'folder_ws'
ws_abs = os.path.join(self.tmp, ws_rel)
os.mkdir(ws_abs)
folder_pp_rel = 'pp1'
folder_pp1_abs = os.path.join(ws_abs, folder_pp_rel)
os.mkdir(folder_pp1_abs)
ws_p_name = 'WSTestPkg'
<DeepExtract>
pkgfolder = os.path.join(ws_abs, ws_p_name)
os.makedirs(pkgfolder, exist_ok=True)
pn = ws_p_name + ('.dec' if extension_case_lower else '.DEC')
self._make_file_helper(pkgfolder, pn)
self._make_edk2_module_helper(pkgfolder, 'module1', extension_case_lower=extension_case_lower)
p2 = self._make_edk2_module_helper(pkgfolder, 'module2', extension_case_lower=extension_case_lower)
p3 = os.path.join(p2, 'X64')
os.makedirs(p3)
self._make_file_helper(p3, 'TestFile.c')
return pkgfolder
</DeepExtract>
pp_p_name = 'PPTestPkg'
<DeepExtract>
pkgfolder = os.path.join(folder_pp1_abs, pp_p_name)
os.makedirs(pkgfolder, exist_ok=True)
pn = pp_p_name + ('.dec' if False else '.DEC')
self._make_file_helper(pkgfolder, pn)
self._make_edk2_module_helper(pkgfolder, 'module1', extension_case_lower=False)
p2 = self._make_edk2_module_helper(pkgfolder, 'module2', extension_case_lower=False)
p3 = os.path.join(p2, 'X64')
os.makedirs(p3)
self._make_file_helper(p3, 'TestFile.c')
return pkgfolder
</DeepExtract>
pathobj = Edk2Path(ws_abs, [folder_pp1_abs])
os.chdir(ws_abs)
p = os.path.join('WSTestPkg', 'module2', 'module2.inf')
self.assertRaises(Exception, pathobj.GetContainingModules, p)
p = os.path.join('WSTestPkg', 'module2', 'module3.inf')
self.assertRaises(Exception, pathobj.GetContainingModules, p)
p = os.path.join('WSTestPkg', 'module2', 'X64', 'TestFile.c')
self.assertRaises(Exception, pathobj.GetContainingModules, p)
|
def test_get_containing_modules_with_relative_path(self):
"""Test that a relative path raises an exception.
Note: GetContainingModules() only accepts absolute paths.
File layout:
root/ <-- Current working directory (self.tmp)
folder_ws <-- Workspace directory
pp1 <-- Package Path 1
PPTestPkg <-- An edk2 package
PPTestPkg.DEC
module1
module1.INF
module2
module2.INF
X64
TestFile.c
WSTestPkg <-- An edk2 package
WSTestPkg.dec
module1
module1.inf
module2
module2.inf
X64
TestFile.c
"""
ws_rel = 'folder_ws'
ws_abs = os.path.join(self.tmp, ws_rel)
os.mkdir(ws_abs)
folder_pp_rel = 'pp1'
folder_pp1_abs = os.path.join(ws_abs, folder_pp_rel)
os.mkdir(folder_pp1_abs)
ws_p_name = 'WSTestPkg'
pkgfolder = os.path.join(ws_abs, ws_p_name)
os.makedirs(pkgfolder, exist_ok=True)
pn = ws_p_name + ('.dec' if extension_case_lower else '.DEC')
self._make_file_helper(pkgfolder, pn)
self._make_edk2_module_helper(pkgfolder, 'module1', extension_case_lower=extension_case_lower)
p2 = self._make_edk2_module_helper(pkgfolder, 'module2', extension_case_lower=extension_case_lower)
p3 = os.path.join(p2, 'X64')
os.makedirs(p3)
self._make_file_helper(p3, 'TestFile.c')
return pkgfolder
pp_p_name = 'PPTestPkg'
pkgfolder = os.path.join(folder_pp1_abs, pp_p_name)
os.makedirs(pkgfolder, exist_ok=True)
pn = pp_p_name + ('.dec' if False else '.DEC')
self._make_file_helper(pkgfolder, pn)
self._make_edk2_module_helper(pkgfolder, 'module1', extension_case_lower=False)
p2 = self._make_edk2_module_helper(pkgfolder, 'module2', extension_case_lower=False)
p3 = os.path.join(p2, 'X64')
os.makedirs(p3)
self._make_file_helper(p3, 'TestFile.c')
return pkgfolder
pathobj = Edk2Path(ws_abs, [folder_pp1_abs])
os.chdir(ws_abs)
p = os.path.join('WSTestPkg', 'module2', 'module2.inf')
self.assertRaises(Exception, pathobj.GetContainingModules, p)
p = os.path.join('WSTestPkg', 'module2', 'module3.inf')
self.assertRaises(Exception, pathobj.GetContainingModules, p)
p = os.path.join('WSTestPkg', 'module2', 'X64', 'TestFile.c')
self.assertRaises(Exception, pathobj.GetContainingModules, p)
|
edk2-pytool-library
|
positive
|
def __delete_min(node):
if not node.left:
return None
if not self.__is_red(node.left) and (not self.__is_red(node.left.left)):
<DeepExtract>
assert node is not None
assert self.__is_red(node) and (not self.__is_red(node.left)) and (not self.__is_red(node.left.left))
self.__flip_colors(node)
if self.__is_red(node.right.left):
node.right = self.__rotate_right(node.right)
node = self.__rotate_left(node)
node = node
</DeepExtract>
<DeepExtract>
if not node.left.left:
node.left.left = None
if not self.__is_red(node.left.left) and (not self.__is_red(node.left.left.left)):
node.left = self.__move_red_left(node.left)
node.left.left = self.__delete_min(node.left.left)
node.left.left = self.__balance(node.left)
</DeepExtract>
return self.__balance(node)
|
def __delete_min(node):
if not node.left:
return None
if not self.__is_red(node.left) and (not self.__is_red(node.left.left)):
assert node is not None
assert self.__is_red(node) and (not self.__is_red(node.left)) and (not self.__is_red(node.left.left))
self.__flip_colors(node)
if self.__is_red(node.right.left):
node.right = self.__rotate_right(node.right)
node = self.__rotate_left(node)
node = node
if not node.left.left:
node.left.left = None
if not self.__is_red(node.left.left) and (not self.__is_red(node.left.left.left)):
node.left = self.__move_red_left(node.left)
node.left.left = self.__delete_min(node.left.left)
node.left.left = self.__balance(node.left)
return self.__balance(node)
|
algorithms-sedgewick-python
|
positive
|
def test_transform_structure(self):
input_params = {'Structure': {'TransformMe': self.original_value, 'LeaveAlone': self.original_value}}
input_shape = {'Structure': {'type': 'structure', 'members': {'TransformMe': {'shape': self.target_shape}, 'LeaveAlone': {'shape': 'String'}}}}
<DeepExtract>
self.add_shape(input_shape)
params_shape = self.json_model['shapes']['SampleOperationInputOutput']
shape_name = list(input_shape.keys())[0]
params_shape['members'][shape_name] = {'shape': shape_name}
</DeepExtract>
self.transformer.transform(params=input_params, model=self.operation_model.input_shape, transformation=self.transformation, target_shape=self.target_shape)
assert input_params == {'Structure': {'TransformMe': self.transformed_value, 'LeaveAlone': self.original_value}}
|
def test_transform_structure(self):
input_params = {'Structure': {'TransformMe': self.original_value, 'LeaveAlone': self.original_value}}
input_shape = {'Structure': {'type': 'structure', 'members': {'TransformMe': {'shape': self.target_shape}, 'LeaveAlone': {'shape': 'String'}}}}
self.add_shape(input_shape)
params_shape = self.json_model['shapes']['SampleOperationInputOutput']
shape_name = list(input_shape.keys())[0]
params_shape['members'][shape_name] = {'shape': shape_name}
self.transformer.transform(params=input_params, model=self.operation_model.input_shape, transformation=self.transformation, target_shape=self.target_shape)
assert input_params == {'Structure': {'TransformMe': self.transformed_value, 'LeaveAlone': self.original_value}}
|
boto3
|
positive
|
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
<DeepExtract>
text = 'Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\nCause: %s\nFix: %s\n' % (0, 13, 'cookie_encode() will be removed soon.', 'Do not use this API directly.')
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
</DeepExtract>
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
|
def cookie_encode(data, key, digestmod=None):
""" Encode and sign a pickle-able object. Return a (byte) string """
text = 'Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\nCause: %s\nFix: %s\n' % (0, 13, 'cookie_encode() will be removed soon.', 'Do not use this API directly.')
if DEBUG == 'strict':
raise DeprecationWarning(text)
warnings.warn(text, DeprecationWarning, stacklevel=3)
return DeprecationWarning(text)
digestmod = digestmod or hashlib.sha256
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=digestmod).digest())
return tob('!') + sig + tob('?') + msg
|
dirty-leds
|
positive
|
def knrm(self, bert_output, bert_mask, bert_segments, batch_size):
<DeepExtract>
bert_mask[:, 1:] = tf.cast(bert_mask[:, 1:], bert_output[:, 1:].dtype)
query_mask = bert_mask[:, 1:] * tf.cast(bert_segments[:, 1:] == 0, bert_mask[:, 1:].dtype)
query_mask = tf.expand_dims(query_mask, axis=-1)
padded_query = (query_mask * bert_output[:, 1:])[:, :self.maxqlen]
query_mask = query_mask[:, :self.maxqlen]
doc_mask = bert_mask[:, 1:] * tf.cast(bert_segments[:, 1:] == 1, bert_mask[:, 1:].dtype)
doc_mask = tf.expand_dims(doc_mask, axis=-1)
padded_doc = doc_mask * bert_output[:, 1:]
simmat = new_similarity_matrix_tf(padded_query, padded_doc, query_mask, doc_mask, 0)
(passage_simmats, passage_doc_mask, passage_query_mask) = (simmat, doc_mask, query_mask)
</DeepExtract>
passage_simmats = tf.reshape(passage_simmats, [batch_size, self.num_passages, self.maxqlen, self.maxdoclen])
passage_doc_mask = tf.reshape(passage_doc_mask, [batch_size, self.num_passages, 1, -1])
doc_simmat = tf.concat([passage_simmats[:, PIDX, :, :] for PIDX in range(self.num_passages)], axis=2)
doc_mask = tf.concat([passage_doc_mask[:, PIDX, :, :] for PIDX in range(self.num_passages)], axis=2)
query_mask = tf.reshape(passage_query_mask, [batch_size, self.num_passages, -1, 1])[:, 0, :, :]
prepooled_doc = self.kernels(doc_simmat)
prepooled_doc = prepooled_doc * tf.reshape(doc_mask, [batch_size, 1, 1, -1]) * tf.reshape(query_mask, [batch_size, 1, -1, 1])
knrm_features = tf.reduce_sum(prepooled_doc, axis=3)
knrm_features = tf.math.log(tf.maximum(knrm_features, 1e-06)) * 0.01
knrm_features = tf.reduce_sum(knrm_features, axis=2)
return knrm_features
|
def knrm(self, bert_output, bert_mask, bert_segments, batch_size):
bert_mask[:, 1:] = tf.cast(bert_mask[:, 1:], bert_output[:, 1:].dtype)
query_mask = bert_mask[:, 1:] * tf.cast(bert_segments[:, 1:] == 0, bert_mask[:, 1:].dtype)
query_mask = tf.expand_dims(query_mask, axis=-1)
padded_query = (query_mask * bert_output[:, 1:])[:, :self.maxqlen]
query_mask = query_mask[:, :self.maxqlen]
doc_mask = bert_mask[:, 1:] * tf.cast(bert_segments[:, 1:] == 1, bert_mask[:, 1:].dtype)
doc_mask = tf.expand_dims(doc_mask, axis=-1)
padded_doc = doc_mask * bert_output[:, 1:]
simmat = new_similarity_matrix_tf(padded_query, padded_doc, query_mask, doc_mask, 0)
(passage_simmats, passage_doc_mask, passage_query_mask) = (simmat, doc_mask, query_mask)
passage_simmats = tf.reshape(passage_simmats, [batch_size, self.num_passages, self.maxqlen, self.maxdoclen])
passage_doc_mask = tf.reshape(passage_doc_mask, [batch_size, self.num_passages, 1, -1])
doc_simmat = tf.concat([passage_simmats[:, PIDX, :, :] for PIDX in range(self.num_passages)], axis=2)
doc_mask = tf.concat([passage_doc_mask[:, PIDX, :, :] for PIDX in range(self.num_passages)], axis=2)
query_mask = tf.reshape(passage_query_mask, [batch_size, self.num_passages, -1, 1])[:, 0, :, :]
prepooled_doc = self.kernels(doc_simmat)
prepooled_doc = prepooled_doc * tf.reshape(doc_mask, [batch_size, 1, 1, -1]) * tf.reshape(query_mask, [batch_size, 1, -1, 1])
knrm_features = tf.reduce_sum(prepooled_doc, axis=3)
knrm_features = tf.math.log(tf.maximum(knrm_features, 1e-06)) * 0.01
knrm_features = tf.reduce_sum(knrm_features, axis=2)
return knrm_features
|
capreolus
|
positive
|
def test_release(self):
"""Run the Helm tests corresponding to a release for success (i.e. exit
code 0).
:return: Helm test suite run result
"""
LOG.info('RUNNING: %s tests with timeout=%ds', self.release_id, self.timeout)
try:
<DeepExtract>
labels = get_wait_labels(self.chart)
if labels:
label_selector = label_selectors(labels)
namespace = self.chart['namespace']
list_args = {'namespace': namespace, 'label_selector': label_selector, 'timeout_seconds': self.k8s_timeout}
pod_list = self.helm.k8s.client.list_namespaced_pod(**list_args)
test_pods = [pod for pod in pod_list.items if is_test_pod(pod)]
if test_pods:
LOG.info('Found existing test pods for release with namespace=%s, labels=(%s)', namespace, label_selector)
for test_pod in test_pods:
pod_name = test_pod.metadata.name
LOG.info('Deleting existing test pod: %s', pod_name)
self.helm.k8s.delete_pod_action(pod_name, namespace, timeout=self.k8s_timeout)
</DeepExtract>
except Exception:
LOG.exception('Exception when deleting test pods for release: %s', self.release_id)
self.helm.test_release(self.release_id, timeout=self.timeout)
|
def test_release(self):
"""Run the Helm tests corresponding to a release for success (i.e. exit
code 0).
:return: Helm test suite run result
"""
LOG.info('RUNNING: %s tests with timeout=%ds', self.release_id, self.timeout)
try:
labels = get_wait_labels(self.chart)
if labels:
label_selector = label_selectors(labels)
namespace = self.chart['namespace']
list_args = {'namespace': namespace, 'label_selector': label_selector, 'timeout_seconds': self.k8s_timeout}
pod_list = self.helm.k8s.client.list_namespaced_pod(**list_args)
test_pods = [pod for pod in pod_list.items if is_test_pod(pod)]
if test_pods:
LOG.info('Found existing test pods for release with namespace=%s, labels=(%s)', namespace, label_selector)
for test_pod in test_pods:
pod_name = test_pod.metadata.name
LOG.info('Deleting existing test pod: %s', pod_name)
self.helm.k8s.delete_pod_action(pod_name, namespace, timeout=self.k8s_timeout)
except Exception:
LOG.exception('Exception when deleting test pods for release: %s', self.release_id)
self.helm.test_release(self.release_id, timeout=self.timeout)
|
armada
|
positive
|
def __init__(self, wrapped_module, num_bits, mode=LinearQuantMode.SYMMETRIC, stats=None):
if not isinstance(wrapped_module, nn.Embedding):
raise ValueError(self.__class__.__name__ + ' can only wrap torch.nn.Embedding modules')
super(RangeLinearEmbeddingWrapper, self).__init__()
(self.min_q_val, self.max_q_val) = get_quantized_range(num_bits, signed=mode != LinearQuantMode.ASYMMETRIC_UNSIGNED)
if stats is None:
<DeepExtract>
if per_channel and wrapped_module.weight.dim() not in [2, 4]:
raise ValueError('Per channel quantization possible only with 2D or 4D tensors (linear or conv layer weights)')
if clip == ClipMode.N_STD:
if per_channel:
raise ValueError('N_STD clipping not supported with per-channel quantization')
if num_stds is None:
raise ValueError("Clip mode set top N_STD but 'num_stds' parameter not provided")
dim = 0 if clip == ClipMode.AVG or per_channel else None
sat_fn = _get_saturation_fn(self.mode, clip, num_stds)
if self.mode == LinearQuantMode.SYMMETRIC:
sat_val = sat_fn(wrapped_module.weight, dim)
(scale, zp) = symmetric_linear_quantization_params(num_bits, sat_val)
else:
(sat_min, sat_max) = sat_fn(wrapped_module.weight, dim)
signed = self.mode == LinearQuantMode.ASYMMETRIC_SIGNED
(scale, zp) = asymmetric_linear_quantization_params(num_bits, sat_min, sat_max, signed=signed)
if per_channel:
dims = [scale.shape[0]] + [1] * (wrapped_module.weight.dim() - 1)
scale = scale.view(dims)
zp = zp.view(dims)
if scale_approx_mult_bits is not None:
scale = approx_scale_as_mult_and_shift(scale, scale_approx_mult_bits)
(w_scale, w_zero_point) = (scale, zp)
</DeepExtract>
else:
<DeepExtract>
if clip == ClipMode.N_STD:
if num_stds is None:
raise ValueError("Clip mode set to N_STD but 'num_stds' parameter not provided")
if num_stds <= 0:
raise ValueError('n_stds must be > 0, got {}'.format(num_stds))
prefix = 'avg_' if clip == ClipMode.AVG else ''
sat_min = torch.tensor(float(stats['output'][prefix + 'min']))
sat_max = torch.tensor(float(stats['output'][prefix + 'max']))
if clip == ClipMode.N_STD:
mean = torch.tensor(float(stats['output']['mean']))
std = torch.tensor(float(stats['output']['std']))
sat_min = torch.max(sat_min, mean - num_stds * std)
sat_max = torch.min(sat_max, mean + num_stds * std)
if mode == LinearQuantMode.SYMMETRIC:
(scale, zp) = symmetric_linear_quantization_params(num_bits, torch.max(sat_min.abs_(), sat_max.abs_()))
else:
signed = mode == LinearQuantMode.ASYMMETRIC_SIGNED
(scale, zp) = asymmetric_linear_quantization_params(num_bits, sat_min, sat_max, signed=signed)
if scale_approx_mult_bits is not None:
scale = approx_scale_as_mult_and_shift(scale, scale_approx_mult_bits)
(w_scale, w_zero_point) = (scale, zp)
</DeepExtract>
device = wrapped_module.weight.device
self.register_buffer('w_scale', w_scale.to(device))
self.register_buffer('w_zero_point', w_zero_point.to(device))
linear_quantize_clamp(wrapped_module.weight.data, self.w_scale, self.w_zero_point, self.min_q_val, self.max_q_val, inplace=True)
self.wrapped_module = wrapped_module
|
def __init__(self, wrapped_module, num_bits, mode=LinearQuantMode.SYMMETRIC, stats=None):
if not isinstance(wrapped_module, nn.Embedding):
raise ValueError(self.__class__.__name__ + ' can only wrap torch.nn.Embedding modules')
super(RangeLinearEmbeddingWrapper, self).__init__()
(self.min_q_val, self.max_q_val) = get_quantized_range(num_bits, signed=mode != LinearQuantMode.ASYMMETRIC_UNSIGNED)
if stats is None:
if per_channel and wrapped_module.weight.dim() not in [2, 4]:
raise ValueError('Per channel quantization possible only with 2D or 4D tensors (linear or conv layer weights)')
if clip == ClipMode.N_STD:
if per_channel:
raise ValueError('N_STD clipping not supported with per-channel quantization')
if num_stds is None:
raise ValueError("Clip mode set top N_STD but 'num_stds' parameter not provided")
dim = 0 if clip == ClipMode.AVG or per_channel else None
sat_fn = _get_saturation_fn(self.mode, clip, num_stds)
if self.mode == LinearQuantMode.SYMMETRIC:
sat_val = sat_fn(wrapped_module.weight, dim)
(scale, zp) = symmetric_linear_quantization_params(num_bits, sat_val)
else:
(sat_min, sat_max) = sat_fn(wrapped_module.weight, dim)
signed = self.mode == LinearQuantMode.ASYMMETRIC_SIGNED
(scale, zp) = asymmetric_linear_quantization_params(num_bits, sat_min, sat_max, signed=signed)
if per_channel:
dims = [scale.shape[0]] + [1] * (wrapped_module.weight.dim() - 1)
scale = scale.view(dims)
zp = zp.view(dims)
if scale_approx_mult_bits is not None:
scale = approx_scale_as_mult_and_shift(scale, scale_approx_mult_bits)
(w_scale, w_zero_point) = (scale, zp)
else:
if clip == ClipMode.N_STD:
if num_stds is None:
raise ValueError("Clip mode set to N_STD but 'num_stds' parameter not provided")
if num_stds <= 0:
raise ValueError('n_stds must be > 0, got {}'.format(num_stds))
prefix = 'avg_' if clip == ClipMode.AVG else ''
sat_min = torch.tensor(float(stats['output'][prefix + 'min']))
sat_max = torch.tensor(float(stats['output'][prefix + 'max']))
if clip == ClipMode.N_STD:
mean = torch.tensor(float(stats['output']['mean']))
std = torch.tensor(float(stats['output']['std']))
sat_min = torch.max(sat_min, mean - num_stds * std)
sat_max = torch.min(sat_max, mean + num_stds * std)
if mode == LinearQuantMode.SYMMETRIC:
(scale, zp) = symmetric_linear_quantization_params(num_bits, torch.max(sat_min.abs_(), sat_max.abs_()))
else:
signed = mode == LinearQuantMode.ASYMMETRIC_SIGNED
(scale, zp) = asymmetric_linear_quantization_params(num_bits, sat_min, sat_max, signed=signed)
if scale_approx_mult_bits is not None:
scale = approx_scale_as_mult_and_shift(scale, scale_approx_mult_bits)
(w_scale, w_zero_point) = (scale, zp)
device = wrapped_module.weight.device
self.register_buffer('w_scale', w_scale.to(device))
self.register_buffer('w_zero_point', w_zero_point.to(device))
linear_quantize_clamp(wrapped_module.weight.data, self.w_scale, self.w_zero_point, self.min_q_val, self.max_q_val, inplace=True)
self.wrapped_module = wrapped_module
|
EagleEye
|
positive
|
def compute_layout(self, force=False):
if not self.layout:
<DeepExtract>
layout = Container(margin=(4, 4, 4, 4))
tab = Icon(c4d.CINEMAN_ROOTFILE, width=self.plus_size)
tab.on_click = self.editor.open_document
layout.children.append(tab)
tab = Icon(c4d.RESOURCEIMAGE_AMDUPLICATE, width=self.plus_size)
tab.on_click = self.editor.new_document
layout.children.append(tab)
tab = Icon(c4d.RESOURCEIMAGE_BROWSER_PLAY, width=self.plus_size)
tab.on_click = self.editor.run_code
layout.children.append(tab)
active_doc = self.editor.get_active_document()
for doc in self.editor.documents:
tab = Container(margin=(8, 4, 8, 4))
name = os.path.basename(doc.filename) if doc.filename else 'untitled'
font = c4d.FONT_BOLD if doc.status == doc.Edited else c4d.FONT_DEFAULT
tab.children.append(Text(name, font=font))
tab.children.append(Icon(c4d.RESOURCEIMAGE_CLEARSELECTION, on_click=partial(self.editor.remove_document, doc), width=self.close_size))
if doc == active_doc:
tab.background_color = c4d.COLOR_BG
tab.children[0].color = c4d.COLOR_TEXTFOCUS
tab.on_click = partial(self.editor.set_active_document, doc)
layout.children.append(tab)
self.layout = layout
self.compute_layout()
self.LayoutChanged()
</DeepExtract>
self.layout.compute_size(self)
self.layout.compute_layout(None, self)
|
def compute_layout(self, force=False):
if not self.layout:
layout = Container(margin=(4, 4, 4, 4))
tab = Icon(c4d.CINEMAN_ROOTFILE, width=self.plus_size)
tab.on_click = self.editor.open_document
layout.children.append(tab)
tab = Icon(c4d.RESOURCEIMAGE_AMDUPLICATE, width=self.plus_size)
tab.on_click = self.editor.new_document
layout.children.append(tab)
tab = Icon(c4d.RESOURCEIMAGE_BROWSER_PLAY, width=self.plus_size)
tab.on_click = self.editor.run_code
layout.children.append(tab)
active_doc = self.editor.get_active_document()
for doc in self.editor.documents:
tab = Container(margin=(8, 4, 8, 4))
name = os.path.basename(doc.filename) if doc.filename else 'untitled'
font = c4d.FONT_BOLD if doc.status == doc.Edited else c4d.FONT_DEFAULT
tab.children.append(Text(name, font=font))
tab.children.append(Icon(c4d.RESOURCEIMAGE_CLEARSELECTION, on_click=partial(self.editor.remove_document, doc), width=self.close_size))
if doc == active_doc:
tab.background_color = c4d.COLOR_BG
tab.children[0].color = c4d.COLOR_TEXTFOCUS
tab.on_click = partial(self.editor.set_active_document, doc)
layout.children.append(tab)
self.layout = layout
self.compute_layout()
self.LayoutChanged()
self.layout.compute_size(self)
self.layout.compute_layout(None, self)
|
c4ddev
|
positive
|
def test_should_override_existing_temporary_table_content(self):
self.dataset_manager.write_tmp('tmp_table', "\n SELECT 'John' AS first_name, 'Smith' AS last_name\n ")
self.dataset_manager.write_tmp('tmp_table', "\n SELECT 'Neo' AS first_name, 'Neo' AS last_name\n ")
<DeepExtract>
results = [r for (_, r) in self.dataset_manager.collect('SELECT * FROM `{tmp_table}`').iterrows()]
</DeepExtract>
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['first_name'], 'Neo')
self.assertEqual(results[0]['last_name'], 'Neo')
|
def test_should_override_existing_temporary_table_content(self):
self.dataset_manager.write_tmp('tmp_table', "\n SELECT 'John' AS first_name, 'Smith' AS last_name\n ")
self.dataset_manager.write_tmp('tmp_table', "\n SELECT 'Neo' AS first_name, 'Neo' AS last_name\n ")
results = [r for (_, r) in self.dataset_manager.collect('SELECT * FROM `{tmp_table}`').iterrows()]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['first_name'], 'Neo')
self.assertEqual(results[0]['last_name'], 'Neo')
|
bigflow
|
positive
|
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
<DeepExtract>
if self._image_set == 'minitrain':
training_split_file = os.path.join(self._data_path, 'train.txt')
if self._image_set == 'smalltrain':
training_split_file = os.path.join(self._data_path, 'train.txt')
if self._image_set == 'minival':
training_split_file = os.path.join(self._data_path, 'val.txt')
if self._image_set == 'smallval':
training_split_file = os.path.join(self._data_path, 'val.txt')
else:
training_split_file = os.path.join(self._data_path, self._image_set + '.txt')
</DeepExtract>
assert os.path.exists(training_split_file), 'Path does not exist: {}'.format(training_split_file)
with open(training_split_file) as f:
metadata = f.readlines()
if self._image_set == 'minitrain':
metadata = metadata[:1000]
elif self._image_set == 'smalltrain':
metadata = metadata[:20000]
elif self._image_set == 'minival':
metadata = metadata[:100]
elif self._image_set == 'smallval':
metadata = metadata[:2000]
image_index = []
id_to_dir = {}
for line in metadata:
(im_file, ann_file) = line.split()
image_id = int(ann_file.split('/')[-1].split('.')[0])
<DeepExtract>
filename = os.path.join(self._data_path, 'xml', str(image_id) + '.xml')
</DeepExtract>
if os.path.exists(filename):
tree = ET.parse(filename)
for obj in tree.findall('object'):
obj_name = obj.find('name').text.lower().strip()
if obj_name in self._class_to_ind:
image_index.append(image_id)
id_to_dir[image_id] = im_file.split('/')[0]
break
return (image_index, id_to_dir)
|
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
if self._image_set == 'minitrain':
training_split_file = os.path.join(self._data_path, 'train.txt')
if self._image_set == 'smalltrain':
training_split_file = os.path.join(self._data_path, 'train.txt')
if self._image_set == 'minival':
training_split_file = os.path.join(self._data_path, 'val.txt')
if self._image_set == 'smallval':
training_split_file = os.path.join(self._data_path, 'val.txt')
else:
training_split_file = os.path.join(self._data_path, self._image_set + '.txt')
assert os.path.exists(training_split_file), 'Path does not exist: {}'.format(training_split_file)
with open(training_split_file) as f:
metadata = f.readlines()
if self._image_set == 'minitrain':
metadata = metadata[:1000]
elif self._image_set == 'smalltrain':
metadata = metadata[:20000]
elif self._image_set == 'minival':
metadata = metadata[:100]
elif self._image_set == 'smallval':
metadata = metadata[:2000]
image_index = []
id_to_dir = {}
for line in metadata:
(im_file, ann_file) = line.split()
image_id = int(ann_file.split('/')[-1].split('.')[0])
filename = os.path.join(self._data_path, 'xml', str(image_id) + '.xml')
if os.path.exists(filename):
tree = ET.parse(filename)
for obj in tree.findall('object'):
obj_name = obj.find('name').text.lower().strip()
if obj_name in self._class_to_ind:
image_index.append(image_id)
id_to_dir[image_id] = im_file.split('/')[0]
break
return (image_index, id_to_dir)
|
cascade-rcnn-fpn-faster_rcnn-pytorch1.0
|
positive
|
def filtButtonClicked(self):
print('Filter with parameters and change sac header')
if self.ptreeItem.onStack:
saclist = [self.gsac.stkdh]
else:
saclist = self.gsac.saclist
for sacdh in saclist:
pdata.setFilterPara(sacdh, self.opts.pppara, self.opts.filterParameters)
if self.opts.filterParameters['apply']:
pdata.seisApplyFilter(saclist, self.opts.filterParameters)
else:
pdata.seisUnApplyFilter(saclist)
if self.ptreeItem.onStack:
<DeepExtract>
self.resetStackCurve(self.stackWaveItem)
self.resetWaveCurve([self.stackWaveItem])
self.resetPick([self.stackWaveItem], self.picklist)
self.resetWindStack()
</DeepExtract>
else:
<DeepExtract>
self.resetWaveCurve(self.traceWaveItemListPlotted)
self.resetWaveLabel(self.traceWaveItemList)
self.resetWind(self.traceWaveItemListPlotted)
self.resetPick(self.traceWaveItemListPlotted, self.picklist)
</DeepExtract>
|
def filtButtonClicked(self):
print('Filter with parameters and change sac header')
if self.ptreeItem.onStack:
saclist = [self.gsac.stkdh]
else:
saclist = self.gsac.saclist
for sacdh in saclist:
pdata.setFilterPara(sacdh, self.opts.pppara, self.opts.filterParameters)
if self.opts.filterParameters['apply']:
pdata.seisApplyFilter(saclist, self.opts.filterParameters)
else:
pdata.seisUnApplyFilter(saclist)
if self.ptreeItem.onStack:
self.resetStackCurve(self.stackWaveItem)
self.resetWaveCurve([self.stackWaveItem])
self.resetPick([self.stackWaveItem], self.picklist)
self.resetWindStack()
else:
self.resetWaveCurve(self.traceWaveItemListPlotted)
self.resetWaveLabel(self.traceWaveItemList)
self.resetWind(self.traceWaveItemListPlotted)
self.resetPick(self.traceWaveItemListPlotted, self.picklist)
</DeepExtract>
|
aimbat
|
positive
|
def handle_data(self, server, session, envelope, data):
mx_rcpt = {}
for rcpt in envelope.rcpt_tos:
(_, _, domain) = rcpt.partition('@')
<DeepExtract>
records = dns.resolver.resolve(domain, 'MX')
if not records:
mx = None
records = sorted(records, key=lambda r: r.preference)
mx = str(records[0].exchange)
</DeepExtract>
if mx is None:
continue
mx_rcpt.setdefault(mx, []).append(rcpt)
for (mx, rcpts) in mx_rcpt.items():
with SMTPCLient(mx, 25) as client:
client.sendmail(from_addr=envelope.mail_from, to_addrs=rcpts, msg=envelope.original_content)
|
def handle_data(self, server, session, envelope, data):
mx_rcpt = {}
for rcpt in envelope.rcpt_tos:
(_, _, domain) = rcpt.partition('@')
records = dns.resolver.resolve(domain, 'MX')
if not records:
mx = None
records = sorted(records, key=lambda r: r.preference)
mx = str(records[0].exchange)
if mx is None:
continue
mx_rcpt.setdefault(mx, []).append(rcpt)
for (mx, rcpts) in mx_rcpt.items():
with SMTPCLient(mx, 25) as client:
client.sendmail(from_addr=envelope.mail_from, to_addrs=rcpts, msg=envelope.original_content)
|
aiosmtpd
|
positive
|
def finalize_on_234(spec, state, epoch, sufficient_support):
assert epoch > 4
transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1)
<DeepExtract>
c1 = None if epoch < 1 else spec.Checkpoint(epoch=epoch - 1, root=b'\xaa' * 32)
c2 = None if epoch < 2 else spec.Checkpoint(epoch=epoch - 2, root=b'\xbb' * 32)
c3 = None if epoch < 3 else spec.Checkpoint(epoch=epoch - 3, root=b'\xcc' * 32)
c4 = None if epoch < 4 else spec.Checkpoint(epoch=epoch - 4, root=b'\xdd' * 32)
c5 = None if epoch < 5 else spec.Checkpoint(epoch=epoch - 5, root=b'\xee' * 32)
(c1, c2, c3, c4, _) = (c1, c2, c3, c4, c5)
</DeepExtract>
<DeepExtract>
for c in [c1, c2, c3, c4]:
state.block_roots[spec.compute_start_slot_at_epoch(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root
</DeepExtract>
old_finalized = state.finalized_checkpoint
state.previous_justified_checkpoint = c4
state.current_justified_checkpoint = c3
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
state.justification_bits[1:3] = [1, 1]
<DeepExtract>
assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0
previous_epoch = spec.get_previous_epoch(state)
current_epoch = spec.get_current_epoch(state)
if not is_post_altair(spec):
if current_epoch == epoch - 2:
attestations = state.current_epoch_attestations
elif previous_epoch == epoch - 2:
attestations = state.previous_epoch_attestations
else:
raise Exception(f'cannot include attestations in epoch ${epoch - 2} from epoch ${current_epoch}')
elif current_epoch == epoch - 2:
epoch_participation = state.current_epoch_participation
elif previous_epoch == epoch - 2:
epoch_participation = state.previous_epoch_participation
else:
raise Exception(f'cannot include attestations in epoch ${epoch - 2} from epoch ${current_epoch}')
total_balance = spec.get_total_active_balance(state)
remaining_balance = int(total_balance * 2 // 3)
start_slot = spec.compute_start_slot_at_epoch(epoch - 2)
committees_per_slot = spec.get_committee_count_per_slot(state, epoch - 2)
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
for index in range(committees_per_slot):
if remaining_balance < 0:
return
committee = spec.get_beacon_committee(state, slot, index)
aggregation_bits = [0] * len(committee)
for v in range(len(committee) * 2 // 3 + 1):
if remaining_balance > 0:
remaining_balance -= int(state.validators[v].effective_balance)
aggregation_bits[v] = 1
else:
break
if not sufficient_support:
for i in range(max(len(committee) // 5, 1)):
aggregation_bits[i] = 0
if not is_post_altair(spec):
attestations.append(spec.PendingAttestation(aggregation_bits=aggregation_bits, data=spec.AttestationData(slot=slot, beacon_block_root=b'\xff' * 32, source=c4, target=c2, index=index), inclusion_delay=1))
if messed_up_target:
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
else:
for (i, index) in enumerate(committee):
if aggregation_bits[i]:
epoch_participation[index] |= spec.ParticipationFlags(2 ** spec.TIMELY_HEAD_FLAG_INDEX)
epoch_participation[index] |= spec.ParticipationFlags(2 ** spec.TIMELY_SOURCE_FLAG_INDEX)
if not messed_up_target:
epoch_participation[index] |= spec.ParticipationFlags(2 ** spec.TIMELY_TARGET_FLAG_INDEX)
</DeepExtract>
yield from run_process_just_and_fin(spec, state)
assert state.previous_justified_checkpoint == c3
if sufficient_support:
assert state.current_justified_checkpoint == c2
assert state.finalized_checkpoint == c4
else:
assert state.current_justified_checkpoint == c3
assert state.finalized_checkpoint == old_finalized
|
def finalize_on_234(spec, state, epoch, sufficient_support):
assert epoch > 4
transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1)
c1 = None if epoch < 1 else spec.Checkpoint(epoch=epoch - 1, root=b'\xaa' * 32)
c2 = None if epoch < 2 else spec.Checkpoint(epoch=epoch - 2, root=b'\xbb' * 32)
c3 = None if epoch < 3 else spec.Checkpoint(epoch=epoch - 3, root=b'\xcc' * 32)
c4 = None if epoch < 4 else spec.Checkpoint(epoch=epoch - 4, root=b'\xdd' * 32)
c5 = None if epoch < 5 else spec.Checkpoint(epoch=epoch - 5, root=b'\xee' * 32)
(c1, c2, c3, c4, _) = (c1, c2, c3, c4, c5)
for c in [c1, c2, c3, c4]:
state.block_roots[spec.compute_start_slot_at_epoch(c.epoch) % spec.SLOTS_PER_HISTORICAL_ROOT] = c.root
old_finalized = state.finalized_checkpoint
state.previous_justified_checkpoint = c4
state.current_justified_checkpoint = c3
state.justification_bits = spec.Bitvector[spec.JUSTIFICATION_BITS_LENGTH]()
state.justification_bits[1:3] = [1, 1]
assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0
previous_epoch = spec.get_previous_epoch(state)
current_epoch = spec.get_current_epoch(state)
if not is_post_altair(spec):
if current_epoch == epoch - 2:
attestations = state.current_epoch_attestations
elif previous_epoch == epoch - 2:
attestations = state.previous_epoch_attestations
else:
raise Exception(f'cannot include attestations in epoch ${epoch - 2} from epoch ${current_epoch}')
elif current_epoch == epoch - 2:
epoch_participation = state.current_epoch_participation
elif previous_epoch == epoch - 2:
epoch_participation = state.previous_epoch_participation
else:
raise Exception(f'cannot include attestations in epoch ${epoch - 2} from epoch ${current_epoch}')
total_balance = spec.get_total_active_balance(state)
remaining_balance = int(total_balance * 2 // 3)
start_slot = spec.compute_start_slot_at_epoch(epoch - 2)
committees_per_slot = spec.get_committee_count_per_slot(state, epoch - 2)
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
for index in range(committees_per_slot):
if remaining_balance < 0:
return
committee = spec.get_beacon_committee(state, slot, index)
aggregation_bits = [0] * len(committee)
for v in range(len(committee) * 2 // 3 + 1):
if remaining_balance > 0:
remaining_balance -= int(state.validators[v].effective_balance)
aggregation_bits[v] = 1
else:
break
if not sufficient_support:
for i in range(max(len(committee) // 5, 1)):
aggregation_bits[i] = 0
if not is_post_altair(spec):
attestations.append(spec.PendingAttestation(aggregation_bits=aggregation_bits, data=spec.AttestationData(slot=slot, beacon_block_root=b'\xff' * 32, source=c4, target=c2, index=index), inclusion_delay=1))
if messed_up_target:
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
else:
for (i, index) in enumerate(committee):
if aggregation_bits[i]:
epoch_participation[index] |= spec.ParticipationFlags(2 ** spec.TIMELY_HEAD_FLAG_INDEX)
epoch_participation[index] |= spec.ParticipationFlags(2 ** spec.TIMELY_SOURCE_FLAG_INDEX)
if not messed_up_target:
epoch_participation[index] |= spec.ParticipationFlags(2 ** spec.TIMELY_TARGET_FLAG_INDEX)
yield from run_process_just_and_fin(spec, state)
assert state.previous_justified_checkpoint == c3
if sufficient_support:
assert state.current_justified_checkpoint == c2
assert state.finalized_checkpoint == c4
else:
assert state.current_justified_checkpoint == c3
assert state.finalized_checkpoint == old_finalized
|
eth2.0-specs
|
positive
|
def addAttributes(self, eid, parent):
"""adds the attributes of the given IFC entity under the given QTreeWidgetITem"""
import ifcopenshell
from PySide import QtCore, QtGui
entity = self.ifc[eid]
i = 0
while True:
try:
argname = entity.attribute_name(i)
except RuntimeError:
break
else:
try:
argvalue = getattr(entity, argname)
except AttributeError:
FreeCAD.Console.PrintError(translate('BIM', 'Error in entity') + ' ' + self.tostr(entity) + '\n')
break
else:
if argname not in ['Id', 'GlobalId']:
colored = False
if isinstance(argvalue, ifcopenshell.entity_instance):
if argvalue.id() == 0:
<DeepExtract>
import six
if six.PY2:
if isinstance(argvalue, unicode):
t = argvalue.encode('utf8')
else:
t = str(argvalue)
elif isinstance(argvalue, str):
t = argvalue
else:
t = str(argvalue)
</DeepExtract>
else:
colored = True
t = '#' + self.tostr(argvalue.id()) + ': ' + self.tostr(argvalue.is_a())
elif isinstance(argvalue, (list, tuple)):
t = ''
else:
<DeepExtract>
import six
if six.PY2:
if isinstance(argvalue, unicode):
t = argvalue.encode('utf8')
else:
t = str(argvalue)
elif isinstance(argvalue, str):
t = argvalue
else:
t = str(argvalue)
</DeepExtract>
item = QtGui.QTreeWidgetItem(parent)
item.setText(0, self.tostr(argname))
if t and t != 'None':
item.setText(1, t)
if colored:
item.setForeground(1, self.linkbrush)
item.setFont(1, self.linkfont)
if argname == 'Name':
item.setFont(1, self.bold)
if isinstance(argvalue, (list, tuple)):
j = 0
for argitem in argvalue:
colored = False
if isinstance(argitem, ifcopenshell.entity_instance):
if argitem.id() == 0:
<DeepExtract>
import six
if six.PY2:
if isinstance(argitem, unicode):
t = argitem.encode('utf8')
else:
t = str(argitem)
elif isinstance(argitem, str):
t = argitem
else:
t = str(argitem)
</DeepExtract>
else:
colored = True
t = '#' + self.tostr(argitem.id()) + ': ' + self.tostr(argitem.is_a())
else:
t = argitem
<DeepExtract>
import six
if six.PY2:
if isinstance(t, unicode):
t = t.encode('utf8')
else:
t = str(t)
elif isinstance(t, str):
t = t
else:
t = str(t)
</DeepExtract>
if j == 0:
item.setText(1, t)
if colored:
item.setForeground(1, self.linkbrush)
item.setFont(1, self.linkfont)
else:
subitem = QtGui.QTreeWidgetItem(item)
subitem.setText(1, t)
if colored:
subitem.setForeground(1, self.linkbrush)
subitem.setFont(1, self.linkfont)
j += 1
i += 1
|
def addAttributes(self, eid, parent):
"""adds the attributes of the given IFC entity under the given QTreeWidgetITem"""
import ifcopenshell
from PySide import QtCore, QtGui
entity = self.ifc[eid]
i = 0
while True:
try:
argname = entity.attribute_name(i)
except RuntimeError:
break
else:
try:
argvalue = getattr(entity, argname)
except AttributeError:
FreeCAD.Console.PrintError(translate('BIM', 'Error in entity') + ' ' + self.tostr(entity) + '\n')
break
else:
if argname not in ['Id', 'GlobalId']:
colored = False
if isinstance(argvalue, ifcopenshell.entity_instance):
if argvalue.id() == 0:
import six
if six.PY2:
if isinstance(argvalue, unicode):
t = argvalue.encode('utf8')
else:
t = str(argvalue)
elif isinstance(argvalue, str):
t = argvalue
else:
t = str(argvalue)
else:
colored = True
t = '#' + self.tostr(argvalue.id()) + ': ' + self.tostr(argvalue.is_a())
elif isinstance(argvalue, (list, tuple)):
t = ''
else:
import six
if six.PY2:
if isinstance(argvalue, unicode):
t = argvalue.encode('utf8')
else:
t = str(argvalue)
elif isinstance(argvalue, str):
t = argvalue
else:
t = str(argvalue)
item = QtGui.QTreeWidgetItem(parent)
item.setText(0, self.tostr(argname))
if t and t != 'None':
item.setText(1, t)
if colored:
item.setForeground(1, self.linkbrush)
item.setFont(1, self.linkfont)
if argname == 'Name':
item.setFont(1, self.bold)
if isinstance(argvalue, (list, tuple)):
j = 0
for argitem in argvalue:
colored = False
if isinstance(argitem, ifcopenshell.entity_instance):
if argitem.id() == 0:
import six
if six.PY2:
if isinstance(argitem, unicode):
t = argitem.encode('utf8')
else:
t = str(argitem)
elif isinstance(argitem, str):
t = argitem
else:
t = str(argitem)
else:
colored = True
t = '#' + self.tostr(argitem.id()) + ': ' + self.tostr(argitem.is_a())
else:
t = argitem
import six
if six.PY2:
if isinstance(t, unicode):
t = t.encode('utf8')
else:
t = str(t)
elif isinstance(t, str):
t = t
else:
t = str(t)
if j == 0:
item.setText(1, t)
if colored:
item.setForeground(1, self.linkbrush)
item.setFont(1, self.linkfont)
else:
subitem = QtGui.QTreeWidgetItem(item)
subitem.setText(1, t)
if colored:
subitem.setForeground(1, self.linkbrush)
subitem.setFont(1, self.linkfont)
j += 1
i += 1
|
BIM_Workbench
|
positive
|
def find_line(varname):
<DeepExtract>
ls = list(filter(lambda l: varname in l, list(open('python_lab.py'))))
</DeepExtract>
return ls[0] if len(ls) else None
|
def find_line(varname):
ls = list(filter(lambda l: varname in l, list(open('python_lab.py'))))
return ls[0] if len(ls) else None
|
coding-the-matrix
|
positive
|
def testDQN(self):
try:
<DeepExtract>
class MyDQNModel(DQNModel):
def _encode_obs(self, input_obs, scope='encode_obs'):
with tf.variable_scope(name_or_scope=scope):
h1 = tf.layers.dense(input_obs, units=64, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=0))
h2 = tf.layers.dense(h1, units=64, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=0))
logits = tf.layers.dense(h2, units=2 * DQN_MODEL_CONFIG['num_atoms'], activation=None, kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=0))
return logits
env = gym.make('CartPole-v0')
dqn_g = tf.Graph()
with dqn_g.as_default():
agent = agents[DQN_AGENT_CONFIG['type']](env.observation_space, env.action_space, DQN_AGENT_CONFIG, DQN_MODEL_CONFIG, distributed_spec={}, custom_model=MyDQNModel)
ob = env.reset()
(action, results) = agent.act([ob], deterministic=False, use_perturbed_action=False)
(next_ob, reward, done, info) = env.step(action[0])
</DeepExtract>
except Exception as ex:
self.fail('doTestDQN raised {} unexpectedly!'.format(Exception))
finally:
pass
|
def testDQN(self):
try:
class MyDQNModel(DQNModel):
def _encode_obs(self, input_obs, scope='encode_obs'):
with tf.variable_scope(name_or_scope=scope):
h1 = tf.layers.dense(input_obs, units=64, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=0))
h2 = tf.layers.dense(h1, units=64, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=0))
logits = tf.layers.dense(h2, units=2 * DQN_MODEL_CONFIG['num_atoms'], activation=None, kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=0))
return logits
env = gym.make('CartPole-v0')
dqn_g = tf.Graph()
with dqn_g.as_default():
agent = agents[DQN_AGENT_CONFIG['type']](env.observation_space, env.action_space, DQN_AGENT_CONFIG, DQN_MODEL_CONFIG, distributed_spec={}, custom_model=MyDQNModel)
ob = env.reset()
(action, results) = agent.act([ob], deterministic=False, use_perturbed_action=False)
(next_ob, reward, done, info) = env.step(action[0])
except Exception as ex:
self.fail('doTestDQN raised {} unexpectedly!'.format(Exception))
finally:
pass
|
EasyRL
|
positive
|
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--job-id', default=None, type=JobIdClickType(), help=JobIdClickType.help)
@click.option('--json-file', default=None, type=click.Path(), help='File containing partial JSON request to POST to /api/2.*/jobs/reset. For more, read full help message.')
@click.option('--json', default=None, type=JsonClickType(), help='Partial JSON string to POST to /api/2.*/jobs/reset. For more, read full help message.')
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def reset_cli(api_client, json_file, json, job_id, version):
"""
Resets (edits) the definition of a job.
The specification for the json option can be found
https://docs.databricks.com/api/latest/jobs.html#jobsjobsettings
"""
<DeepExtract>
if version is not None:
return
if api_client.jobs_api_version == '2.1':
return
click.echo(click.style('WARN', fg='yellow') + ': Your CLI is configured ' + 'to use Jobs API 2.0. In order to use the latest Jobs features ' + "please upgrade to 2.1: 'databricks jobs configure --version=2.1'. " + 'Future versions of this CLI will default to the new Jobs API. ' + 'Learn more at https://docs.databricks.com/dev-tools/cli/jobs-cli.html', err=True)
</DeepExtract>
if not bool(json_file) ^ bool(json):
raise RuntimeError('Either --json-file or --json should be provided')
if json_file:
with open(json_file, 'r') as f:
json = f.read()
deser_json = json_loads(json)
new_settings = deser_json['new_settings'] if 'new_settings' in deser_json else deser_json
if job_id is None:
if 'job_id' in deser_json:
job_id = deser_json['job_id']
else:
raise RuntimeError('Either --job-id or a root-level json key "job_id" should be provided')
request_body = {'job_id': job_id, 'new_settings': new_settings}
JobsApi(api_client).reset_job(request_body, version=version)
|
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('--job-id', default=None, type=JobIdClickType(), help=JobIdClickType.help)
@click.option('--json-file', default=None, type=click.Path(), help='File containing partial JSON request to POST to /api/2.*/jobs/reset. For more, read full help message.')
@click.option('--json', default=None, type=JsonClickType(), help='Partial JSON string to POST to /api/2.*/jobs/reset. For more, read full help message.')
@api_version_option
@debug_option
@profile_option
@eat_exceptions
@provide_api_client
def reset_cli(api_client, json_file, json, job_id, version):
"""
Resets (edits) the definition of a job.
The specification for the json option can be found
https://docs.databricks.com/api/latest/jobs.html#jobsjobsettings
"""
if version is not None:
return
if api_client.jobs_api_version == '2.1':
return
click.echo(click.style('WARN', fg='yellow') + ': Your CLI is configured ' + 'to use Jobs API 2.0. In order to use the latest Jobs features ' + "please upgrade to 2.1: 'databricks jobs configure --version=2.1'. " + 'Future versions of this CLI will default to the new Jobs API. ' + 'Learn more at https://docs.databricks.com/dev-tools/cli/jobs-cli.html', err=True)
if not bool(json_file) ^ bool(json):
raise RuntimeError('Either --json-file or --json should be provided')
if json_file:
with open(json_file, 'r') as f:
json = f.read()
deser_json = json_loads(json)
new_settings = deser_json['new_settings'] if 'new_settings' in deser_json else deser_json
if job_id is None:
if 'job_id' in deser_json:
job_id = deser_json['job_id']
else:
raise RuntimeError('Either --job-id or a root-level json key "job_id" should be provided')
request_body = {'job_id': job_id, 'new_settings': new_settings}
JobsApi(api_client).reset_job(request_body, version=version)
|
databricks-cli
|
positive
|
def __init__(self):
os.mkdir('train')
os.mkdir('test')
for i in range(1000):
os.mkdir('train/' + str(i))
os.mkdir('test/' + str(i))
print(i)
<DeepExtract>
files = os.listdir('raw_data/Imagenet64_train_npz')
img_size2 = img_size * img_size
for file in files:
f = np.load('raw_data/Imagenet64_train_npz' + '/' + file)
x = np.array(f['data'])
y = np.array(f['labels']) - 1
x = np.dstack((x[:, :img_size2], x[:, img_size2:2 * img_size2], x[:, 2 * img_size2:]))
x = x.reshape((x.shape[0], img_size, img_size, 3))
for (i, img) in enumerate(x):
img = Image.fromarray(img.reshape(img_size, img_size, 3))
name = str(int(0)).zfill(9)
label = str(y[i])
print(0, 'train/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
img.save('train/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
0 += 1
</DeepExtract>
<DeepExtract>
files = os.listdir('raw_data/Imagenet64_val_npz')
img_size2 = img_size * img_size
for file in files:
f = np.load('raw_data/Imagenet64_val_npz' + '/' + file)
x = np.array(f['data'])
y = np.array(f['labels']) - 1
x = np.dstack((x[:, :img_size2], x[:, img_size2:2 * img_size2], x[:, 2 * img_size2:]))
x = x.reshape((x.shape[0], img_size, img_size, 3))
for (i, img) in enumerate(x):
img = Image.fromarray(img.reshape(img_size, img_size, 3))
name = str(int(100000000.0)).zfill(9)
label = str(y[i])
print(100000000.0, 'test/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
img.save('test/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
100000000.0 += 1
</DeepExtract>
|
def __init__(self):
os.mkdir('train')
os.mkdir('test')
for i in range(1000):
os.mkdir('train/' + str(i))
os.mkdir('test/' + str(i))
print(i)
files = os.listdir('raw_data/Imagenet64_train_npz')
img_size2 = img_size * img_size
for file in files:
f = np.load('raw_data/Imagenet64_train_npz' + '/' + file)
x = np.array(f['data'])
y = np.array(f['labels']) - 1
x = np.dstack((x[:, :img_size2], x[:, img_size2:2 * img_size2], x[:, 2 * img_size2:]))
x = x.reshape((x.shape[0], img_size, img_size, 3))
for (i, img) in enumerate(x):
img = Image.fromarray(img.reshape(img_size, img_size, 3))
name = str(int(0)).zfill(9)
label = str(y[i])
print(0, 'train/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
img.save('train/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
0 += 1
files = os.listdir('raw_data/Imagenet64_val_npz')
img_size2 = img_size * img_size
for file in files:
f = np.load('raw_data/Imagenet64_val_npz' + '/' + file)
x = np.array(f['data'])
y = np.array(f['labels']) - 1
x = np.dstack((x[:, :img_size2], x[:, img_size2:2 * img_size2], x[:, 2 * img_size2:]))
x = x.reshape((x.shape[0], img_size, img_size, 3))
for (i, img) in enumerate(x):
img = Image.fromarray(img.reshape(img_size, img_size, 3))
name = str(int(100000000.0)).zfill(9)
label = str(y[i])
print(100000000.0, 'test/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
img.save('test/' + label + '/' + name + '_label_' + label.zfill(4) + '.png')
100000000.0 += 1
</DeepExtract>
|
auto_LiRPA
|
positive
|
def address(self, address, sock=None, get_address=False):
"""Decorate functions to bind them from their definition.
`address` is the osc address to bind to the callback.
if `get_address` is set to True, the first parameter the
callback will receive will be the address that matched (useful
with advanced matching).
example:
server = OSCThreadServer()
server.listen('localhost', 8000, default=True)
@server.address(b'/printer')
def printer(values):
print(values)
send_message(b'/printer', [b'hello world'])
note:
This won't work on methods as it'll call them as normal
functions, and the callback won't get a `self` argument.
To bind a method use the `address_method` decorator.
"""
def decorator(callback):
<DeepExtract>
if not sock and self.default_socket:
sock = self.default_socket
elif not sock:
raise RuntimeError('no default socket yet and no socket provided')
if isinstance(address, UNICODE) and self.encoding:
address = address.encode(self.encoding, errors=self.encoding_errors)
if self.advanced_matching:
address = self.create_smart_address(address)
callbacks = self.addresses.get((sock, address), [])
cb = (callback, get_address)
if cb not in callbacks:
callbacks.append(cb)
self.addresses[sock, address] = callbacks
</DeepExtract>
return callback
return decorator
|
def address(self, address, sock=None, get_address=False):
"""Decorate functions to bind them from their definition.
`address` is the osc address to bind to the callback.
if `get_address` is set to True, the first parameter the
callback will receive will be the address that matched (useful
with advanced matching).
example:
server = OSCThreadServer()
server.listen('localhost', 8000, default=True)
@server.address(b'/printer')
def printer(values):
print(values)
send_message(b'/printer', [b'hello world'])
note:
This won't work on methods as it'll call them as normal
functions, and the callback won't get a `self` argument.
To bind a method use the `address_method` decorator.
"""
def decorator(callback):
if not sock and self.default_socket:
sock = self.default_socket
elif not sock:
raise RuntimeError('no default socket yet and no socket provided')
if isinstance(address, UNICODE) and self.encoding:
address = address.encode(self.encoding, errors=self.encoding_errors)
if self.advanced_matching:
address = self.create_smart_address(address)
callbacks = self.addresses.get((sock, address), [])
cb = (callback, get_address)
if cb not in callbacks:
callbacks.append(cb)
self.addresses[sock, address] = callbacks
return callback
return decorator
|
blender.NodeOSC
|
positive
|
def _stringify_row(self, row, options):
for (index, field, value, width) in zip(range(0, len(row)), self._field_names, row, self._widths):
lines = value.split('\n')
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = '\n'.join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
<DeepExtract>
if options['left_padding_width'] is not None:
lpad = options['left_padding_width']
else:
lpad = options['padding_width']
if options['right_padding_width'] is not None:
rpad = options['right_padding_width']
else:
rpad = options['padding_width']
(lpad, rpad) = (lpad, rpad)
</DeepExtract>
for y in range(0, row_height):
bits.append([])
if options['border']:
if options['vrules'] in (ALL, FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(' ')
for (field, value, width) in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split('\n')
dHeight = row_height - len(lines)
if dHeight:
if valign == 'm':
lines = [''] * int(dHeight / 2) + lines + [''] * (dHeight - int(dHeight / 2))
elif valign == 'b':
lines = [''] * dHeight + lines
else:
lines = lines + [''] * dHeight
y = 0
for l in lines:
if options['fields'] and field not in options['fields']:
continue
bits[y].append(' ' * lpad + self._justify(l, width, self._align[field]) + ' ' * rpad)
if options['border']:
if options['vrules'] == ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(' ')
y += 1
for y in range(0, row_height):
if options['border'] and options['vrules'] == FRAME:
bits[y].pop()
bits[y].append(options['vertical_char'])
if options['border'] and options['hrules'] == ALL:
bits[row_height - 1].append('\n')
bits[row_height - 1].append(self._hrule)
for y in range(0, row_height):
bits[y] = ''.join(bits[y])
return '\n'.join(bits)
|
def _stringify_row(self, row, options):
for (index, field, value, width) in zip(range(0, len(row)), self._field_names, row, self._widths):
lines = value.split('\n')
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = '\n'.join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
if options['left_padding_width'] is not None:
lpad = options['left_padding_width']
else:
lpad = options['padding_width']
if options['right_padding_width'] is not None:
rpad = options['right_padding_width']
else:
rpad = options['padding_width']
(lpad, rpad) = (lpad, rpad)
for y in range(0, row_height):
bits.append([])
if options['border']:
if options['vrules'] in (ALL, FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(' ')
for (field, value, width) in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split('\n')
dHeight = row_height - len(lines)
if dHeight:
if valign == 'm':
lines = [''] * int(dHeight / 2) + lines + [''] * (dHeight - int(dHeight / 2))
elif valign == 'b':
lines = [''] * dHeight + lines
else:
lines = lines + [''] * dHeight
y = 0
for l in lines:
if options['fields'] and field not in options['fields']:
continue
bits[y].append(' ' * lpad + self._justify(l, width, self._align[field]) + ' ' * rpad)
if options['border']:
if options['vrules'] == ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(' ')
y += 1
for y in range(0, row_height):
if options['border'] and options['vrules'] == FRAME:
bits[y].pop()
bits[y].append(options['vertical_char'])
if options['border'] and options['hrules'] == ALL:
bits[row_height - 1].append('\n')
bits[row_height - 1].append(self._hrule)
for y in range(0, row_height):
bits[y] = ''.join(bits[y])
return '\n'.join(bits)
|
C--Compiler
|
positive
|
def acquire_userips(self, username, num=1):
logger.info('acquire user ips of %s' % username)
if not self.has_user(username):
return [False, 'username not exists in users set']
<DeepExtract>
[status, userdata] = self.etcd.getkey('network/users/' + username)
usercopy = json.loads(userdata)
user = UserPool(copy=usercopy)
logger.debug('load user into dict')
self.users[username] = user
</DeepExtract>
result = self.users[username].acquire(num)
<DeepExtract>
logger.debug('dump user into etcd')
self.etcd.setkey('network/users/' + username, json.dumps({'info': self.users[username].info, 'gateway': self.users[username].gateway, 'pool': self.users[username].pool}))
</DeepExtract>
del self.users[username]
return result
|
def acquire_userips(self, username, num=1):
logger.info('acquire user ips of %s' % username)
if not self.has_user(username):
return [False, 'username not exists in users set']
[status, userdata] = self.etcd.getkey('network/users/' + username)
usercopy = json.loads(userdata)
user = UserPool(copy=usercopy)
logger.debug('load user into dict')
self.users[username] = user
result = self.users[username].acquire(num)
logger.debug('dump user into etcd')
self.etcd.setkey('network/users/' + username, json.dumps({'info': self.users[username].info, 'gateway': self.users[username].gateway, 'pool': self.users[username].pool}))
del self.users[username]
return result
|
docklet
|
positive
|
def main():
"""See README for instructions on calling analysis.
"""
description = 'Analyze results from aggregated studies'
args = parse_args(general_parser(description))
leaderboard_metric = cc.VISIBLE_TO_OPT
logger.setLevel(logging.INFO)
if args[CmdArgs.verbose]:
logger.addHandler(logging.StreamHandler())
(perf_ds, meta) = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL_RESULTS)
logger.info('Meta data from source file: %s' % str(meta['args']))
if cc.BASELINE not in XRSerializer.get_derived_keys(args[CmdArgs.db_root], db=args[CmdArgs.db]):
warnings.warn('Baselines not found. Need to construct baseline.')
do_baseline(args)
(baseline_ds, meta_ref) = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE)
logger.info('baseline data from source ref file: %s' % str(meta_ref['args']))
(sig_errs, signatures) = analyze_signature_pair(meta['signature'], meta_ref['signature'])
logger.info('Signature errors:\n%s' % sig_errs.to_string())
print(json.dumps({'exp-anal sig errors': sig_errs.T.to_dict()}))
test_cases_run = perf_ds.coords[TEST_CASE].values.tolist()
assert set(test_cases_run) <= set(baseline_ds.coords[TEST_CASE].values.tolist()), 'Data set contains test cases not found in baseline.'
baseline_ds = baseline_ds.sel({TEST_CASE: test_cases_run})
iters_run = perf_ds.coords[ITER].values.tolist()
assert set(iters_run) <= set(baseline_ds.coords[ITER].values.tolist()), 'Data set not same batch size or too many iters compared to baseline.'
baseline_ds = baseline_ds.sel({ITER: iters_run})
perf_visible = perf_ds[cc.VISIBLE_TO_OPT]
agg_result = OrderedDict()
summary = OrderedDict()
for metric_for_scoring in sorted(perf_ds):
perf_da = perf_ds[metric_for_scoring]
baseline_ds_ = baseline_ds.sel({OBJECTIVE: metric_for_scoring}, drop=True)
<DeepExtract>
validate_agg_perf(perf_da, min_trial=1)
assert isinstance(baseline_ds_, xr.Dataset)
assert tuple(baseline_ds_[PERF_BEST].dims) == (TEST_CASE,)
assert tuple(baseline_ds_[PERF_CLIP].dims) == (TEST_CASE,)
assert tuple(baseline_ds_[PERF_MED].dims) == (ITER, TEST_CASE)
assert tuple(baseline_ds_[PERF_MEAN].dims) == (ITER, TEST_CASE)
assert xru.coord_compat((perf_da, baseline_ds_), (ITER, TEST_CASE))
assert not any((np.any(np.isnan(baseline_ds_[kk].values)) for kk in baseline_ds_))
agg_result = xru.ds_like(perf_da, (PERF_MED, LB_MED, UB_MED, NORMED_MED, PERF_MEAN, LB_MEAN, UB_MEAN, NORMED_MEAN), (ITER, METHOD, TEST_CASE))
baseline_mean_da = xru.only_dataarray(xru.ds_like(perf_da, ['ref'], (ITER, TEST_CASE)))
for func_name in perf_da.coords[TEST_CASE].values:
rand_perf_med = baseline_ds_[PERF_MED].sel({TEST_CASE: func_name}, drop=True).values
rand_perf_mean = baseline_ds_[PERF_MEAN].sel({TEST_CASE: func_name}, drop=True).values
best_opt = baseline_ds_[PERF_BEST].sel({TEST_CASE: func_name}, drop=True).values
base_clip_val = baseline_ds_[PERF_CLIP].sel({TEST_CASE: func_name}, drop=True).values
assert np.all(np.diff(rand_perf_med) <= 0), 'Baseline should be decreasing with iteration'
assert np.all(np.diff(rand_perf_mean) <= 0), 'Baseline should be decreasing with iteration'
assert np.all(rand_perf_med > best_opt)
assert np.all(rand_perf_mean > best_opt)
assert np.all(rand_perf_mean <= base_clip_val)
baseline_mean_da.loc[{TEST_CASE: func_name}] = linear_rescale(rand_perf_mean, best_opt, base_clip_val, 0.0, 1.0, enforce_bounds=False)
for method_name in perf_da.coords[METHOD].values:
curr_da = perf_da.sel({METHOD: method_name, TEST_CASE: func_name}, drop=True)
assert curr_da.dims == (ITER, SUGGEST, TRIAL)
if perf_visible is None:
perf_array = get_perf_array(curr_da.values, curr_da.values)
curr_da_ = perf_da.sel({METHOD: method_name, TEST_CASE: func_name}, drop=True).min(dim=SUGGEST)
assert curr_da_.dims == (ITER, TRIAL)
perf_array_ = np.minimum.accumulate(curr_da_.values, axis=0)
assert np.allclose(perf_array, perf_array_)
else:
curr_visible_da = perf_visible.sel({METHOD: method_name, TEST_CASE: func_name}, drop=True)
assert curr_visible_da.dims == (ITER, SUGGEST, TRIAL)
perf_array = get_perf_array(curr_da.values, curr_visible_da.values)
(med_perf, LB, UB) = qt.quantile_and_CI(perf_array, EVAL_Q, alpha=ALPHA)
assert med_perf.shape == rand_perf_med.shape
agg_result[PERF_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = med_perf
agg_result[LB_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = LB
agg_result[UB_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = UB
normed = linear_rescale(med_perf, best_opt, rand_perf_med, 0.0, 1.0, enforce_bounds=False)
agg_result[NORMED_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = normed
normed = linear_rescale(perf_array, best_opt, base_clip_val, 0.0, 1.0, enforce_bounds=False)
normed = np.clip(normed, -1.0, 1.0)
normed = np.mean(normed, axis=1)
agg_result[NORMED_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = normed
perf_array = np.minimum(base_clip_val, perf_array)
mean_perf = np.mean(perf_array, axis=1)
assert mean_perf.shape == rand_perf_mean.shape
EB = t_EB(perf_array, alpha=ALPHA, axis=1)
assert EB.shape == rand_perf_mean.shape
agg_result[PERF_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = mean_perf
agg_result[LB_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = mean_perf - EB
agg_result[UB_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = mean_perf + EB
assert not any((np.any(np.isnan(agg_result[kk].values)) for kk in agg_result))
summary = xru.ds_like(perf_da, (PERF_MED, LB_MED, UB_MED, PERF_MEAN, LB_MEAN, UB_MEAN, NORMED_MEAN, LB_NORMED_MEAN, UB_NORMED_MEAN), (ITER, METHOD))
(summary[PERF_MED], summary[LB_MED], summary[UB_MED]) = xr.apply_ufunc(qt.quantile_and_CI, agg_result[NORMED_MED], input_core_dims=[[TEST_CASE]], kwargs={'q': EVAL_Q, 'alpha': ALPHA}, output_core_dims=[[], [], []])
summary[PERF_MEAN] = agg_result[NORMED_MEAN].mean(dim=TEST_CASE)
EB = xr.apply_ufunc(t_EB, agg_result[NORMED_MEAN], input_core_dims=[[TEST_CASE]])
summary[LB_MEAN] = summary[PERF_MEAN] - EB
summary[UB_MEAN] = summary[PERF_MEAN] + EB
normalizer = baseline_mean_da.mean(dim=TEST_CASE)
summary[NORMED_MEAN] = summary[PERF_MEAN] / normalizer
summary[LB_NORMED_MEAN] = summary[LB_MEAN] / normalizer
summary[UB_NORMED_MEAN] = summary[UB_MEAN] / normalizer
assert all((tuple(summary[kk].dims) == (ITER, METHOD) for kk in summary))
(agg_result[metric_for_scoring,], summary[metric_for_scoring,]) = (agg_result, summary)
</DeepExtract>
agg_result = xru.ds_concat(agg_result, dims=(cc.OBJECTIVE,))
summary = xru.ds_concat(summary, dims=(cc.OBJECTIVE,))
for metric_for_scoring in sorted(perf_ds):
print('Scores by problem (JSON):\n')
agg_df = agg_result[NORMED_MEAN].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}].to_pandas().T
print(json.dumps({metric_for_scoring: agg_df.to_dict()}))
print('\n')
final_score = summary[PERF_MED].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}]
logger.info('median score @ %d:\n%s' % (summary.sizes[ITER], xru.da_to_string(final_score)))
final_score = summary[PERF_MEAN].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}]
logger.info('mean score @ %d:\n%s' % (summary.sizes[ITER], xru.da_to_string(final_score)))
print('Final scores (JSON):\n')
print(json.dumps({metric_for_scoring: final_score.to_series().to_dict()}))
print('\n')
final_score = summary[NORMED_MEAN].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}]
logger.info('normed mean score @ %d:\n%s' % (summary.sizes[ITER], xru.da_to_string(final_score)))
meta = {'args': serializable_dict(args), 'signature': signatures}
XRSerializer.save_derived(agg_result, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.PERF_RESULTS)
XRSerializer.save_derived(summary, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.MEAN_SCORE)
final_msg = xru.da_to_string(100 * (1.0 - summary[PERF_MEAN].sel({cc.OBJECTIVE: leaderboard_metric}, drop=True)[{ITER: -1}]))
logger.info('-' * 20)
logger.info('Final score `100 x (1-loss)` for leaderboard:\n%s' % final_msg)
|
def main():
"""See README for instructions on calling analysis.
"""
description = 'Analyze results from aggregated studies'
args = parse_args(general_parser(description))
leaderboard_metric = cc.VISIBLE_TO_OPT
logger.setLevel(logging.INFO)
if args[CmdArgs.verbose]:
logger.addHandler(logging.StreamHandler())
(perf_ds, meta) = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.EVAL_RESULTS)
logger.info('Meta data from source file: %s' % str(meta['args']))
if cc.BASELINE not in XRSerializer.get_derived_keys(args[CmdArgs.db_root], db=args[CmdArgs.db]):
warnings.warn('Baselines not found. Need to construct baseline.')
do_baseline(args)
(baseline_ds, meta_ref) = XRSerializer.load_derived(args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.BASELINE)
logger.info('baseline data from source ref file: %s' % str(meta_ref['args']))
(sig_errs, signatures) = analyze_signature_pair(meta['signature'], meta_ref['signature'])
logger.info('Signature errors:\n%s' % sig_errs.to_string())
print(json.dumps({'exp-anal sig errors': sig_errs.T.to_dict()}))
test_cases_run = perf_ds.coords[TEST_CASE].values.tolist()
assert set(test_cases_run) <= set(baseline_ds.coords[TEST_CASE].values.tolist()), 'Data set contains test cases not found in baseline.'
baseline_ds = baseline_ds.sel({TEST_CASE: test_cases_run})
iters_run = perf_ds.coords[ITER].values.tolist()
assert set(iters_run) <= set(baseline_ds.coords[ITER].values.tolist()), 'Data set not same batch size or too many iters compared to baseline.'
baseline_ds = baseline_ds.sel({ITER: iters_run})
perf_visible = perf_ds[cc.VISIBLE_TO_OPT]
agg_result = OrderedDict()
summary = OrderedDict()
for metric_for_scoring in sorted(perf_ds):
perf_da = perf_ds[metric_for_scoring]
baseline_ds_ = baseline_ds.sel({OBJECTIVE: metric_for_scoring}, drop=True)
validate_agg_perf(perf_da, min_trial=1)
assert isinstance(baseline_ds_, xr.Dataset)
assert tuple(baseline_ds_[PERF_BEST].dims) == (TEST_CASE,)
assert tuple(baseline_ds_[PERF_CLIP].dims) == (TEST_CASE,)
assert tuple(baseline_ds_[PERF_MED].dims) == (ITER, TEST_CASE)
assert tuple(baseline_ds_[PERF_MEAN].dims) == (ITER, TEST_CASE)
assert xru.coord_compat((perf_da, baseline_ds_), (ITER, TEST_CASE))
assert not any((np.any(np.isnan(baseline_ds_[kk].values)) for kk in baseline_ds_))
agg_result = xru.ds_like(perf_da, (PERF_MED, LB_MED, UB_MED, NORMED_MED, PERF_MEAN, LB_MEAN, UB_MEAN, NORMED_MEAN), (ITER, METHOD, TEST_CASE))
baseline_mean_da = xru.only_dataarray(xru.ds_like(perf_da, ['ref'], (ITER, TEST_CASE)))
for func_name in perf_da.coords[TEST_CASE].values:
rand_perf_med = baseline_ds_[PERF_MED].sel({TEST_CASE: func_name}, drop=True).values
rand_perf_mean = baseline_ds_[PERF_MEAN].sel({TEST_CASE: func_name}, drop=True).values
best_opt = baseline_ds_[PERF_BEST].sel({TEST_CASE: func_name}, drop=True).values
base_clip_val = baseline_ds_[PERF_CLIP].sel({TEST_CASE: func_name}, drop=True).values
assert np.all(np.diff(rand_perf_med) <= 0), 'Baseline should be decreasing with iteration'
assert np.all(np.diff(rand_perf_mean) <= 0), 'Baseline should be decreasing with iteration'
assert np.all(rand_perf_med > best_opt)
assert np.all(rand_perf_mean > best_opt)
assert np.all(rand_perf_mean <= base_clip_val)
baseline_mean_da.loc[{TEST_CASE: func_name}] = linear_rescale(rand_perf_mean, best_opt, base_clip_val, 0.0, 1.0, enforce_bounds=False)
for method_name in perf_da.coords[METHOD].values:
curr_da = perf_da.sel({METHOD: method_name, TEST_CASE: func_name}, drop=True)
assert curr_da.dims == (ITER, SUGGEST, TRIAL)
if perf_visible is None:
perf_array = get_perf_array(curr_da.values, curr_da.values)
curr_da_ = perf_da.sel({METHOD: method_name, TEST_CASE: func_name}, drop=True).min(dim=SUGGEST)
assert curr_da_.dims == (ITER, TRIAL)
perf_array_ = np.minimum.accumulate(curr_da_.values, axis=0)
assert np.allclose(perf_array, perf_array_)
else:
curr_visible_da = perf_visible.sel({METHOD: method_name, TEST_CASE: func_name}, drop=True)
assert curr_visible_da.dims == (ITER, SUGGEST, TRIAL)
perf_array = get_perf_array(curr_da.values, curr_visible_da.values)
(med_perf, LB, UB) = qt.quantile_and_CI(perf_array, EVAL_Q, alpha=ALPHA)
assert med_perf.shape == rand_perf_med.shape
agg_result[PERF_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = med_perf
agg_result[LB_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = LB
agg_result[UB_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = UB
normed = linear_rescale(med_perf, best_opt, rand_perf_med, 0.0, 1.0, enforce_bounds=False)
agg_result[NORMED_MED].loc[{TEST_CASE: func_name, METHOD: method_name}] = normed
normed = linear_rescale(perf_array, best_opt, base_clip_val, 0.0, 1.0, enforce_bounds=False)
normed = np.clip(normed, -1.0, 1.0)
normed = np.mean(normed, axis=1)
agg_result[NORMED_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = normed
perf_array = np.minimum(base_clip_val, perf_array)
mean_perf = np.mean(perf_array, axis=1)
assert mean_perf.shape == rand_perf_mean.shape
EB = t_EB(perf_array, alpha=ALPHA, axis=1)
assert EB.shape == rand_perf_mean.shape
agg_result[PERF_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = mean_perf
agg_result[LB_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = mean_perf - EB
agg_result[UB_MEAN].loc[{TEST_CASE: func_name, METHOD: method_name}] = mean_perf + EB
assert not any((np.any(np.isnan(agg_result[kk].values)) for kk in agg_result))
summary = xru.ds_like(perf_da, (PERF_MED, LB_MED, UB_MED, PERF_MEAN, LB_MEAN, UB_MEAN, NORMED_MEAN, LB_NORMED_MEAN, UB_NORMED_MEAN), (ITER, METHOD))
(summary[PERF_MED], summary[LB_MED], summary[UB_MED]) = xr.apply_ufunc(qt.quantile_and_CI, agg_result[NORMED_MED], input_core_dims=[[TEST_CASE]], kwargs={'q': EVAL_Q, 'alpha': ALPHA}, output_core_dims=[[], [], []])
summary[PERF_MEAN] = agg_result[NORMED_MEAN].mean(dim=TEST_CASE)
EB = xr.apply_ufunc(t_EB, agg_result[NORMED_MEAN], input_core_dims=[[TEST_CASE]])
summary[LB_MEAN] = summary[PERF_MEAN] - EB
summary[UB_MEAN] = summary[PERF_MEAN] + EB
normalizer = baseline_mean_da.mean(dim=TEST_CASE)
summary[NORMED_MEAN] = summary[PERF_MEAN] / normalizer
summary[LB_NORMED_MEAN] = summary[LB_MEAN] / normalizer
summary[UB_NORMED_MEAN] = summary[UB_MEAN] / normalizer
assert all((tuple(summary[kk].dims) == (ITER, METHOD) for kk in summary))
(agg_result[metric_for_scoring,], summary[metric_for_scoring,]) = (agg_result, summary)
agg_result = xru.ds_concat(agg_result, dims=(cc.OBJECTIVE,))
summary = xru.ds_concat(summary, dims=(cc.OBJECTIVE,))
for metric_for_scoring in sorted(perf_ds):
print('Scores by problem (JSON):\n')
agg_df = agg_result[NORMED_MEAN].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}].to_pandas().T
print(json.dumps({metric_for_scoring: agg_df.to_dict()}))
print('\n')
final_score = summary[PERF_MED].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}]
logger.info('median score @ %d:\n%s' % (summary.sizes[ITER], xru.da_to_string(final_score)))
final_score = summary[PERF_MEAN].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}]
logger.info('mean score @ %d:\n%s' % (summary.sizes[ITER], xru.da_to_string(final_score)))
print('Final scores (JSON):\n')
print(json.dumps({metric_for_scoring: final_score.to_series().to_dict()}))
print('\n')
final_score = summary[NORMED_MEAN].sel({cc.OBJECTIVE: metric_for_scoring}, drop=True)[{ITER: -1}]
logger.info('normed mean score @ %d:\n%s' % (summary.sizes[ITER], xru.da_to_string(final_score)))
meta = {'args': serializable_dict(args), 'signature': signatures}
XRSerializer.save_derived(agg_result, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.PERF_RESULTS)
XRSerializer.save_derived(summary, meta, args[CmdArgs.db_root], db=args[CmdArgs.db], key=cc.MEAN_SCORE)
final_msg = xru.da_to_string(100 * (1.0 - summary[PERF_MEAN].sel({cc.OBJECTIVE: leaderboard_metric}, drop=True)[{ITER: -1}]))
logger.info('-' * 20)
logger.info('Final score `100 x (1-loss)` for leaderboard:\n%s' % final_msg)
|
bayesmark
|
positive
|
def get_env_builder(t_prof):
<DeepExtract>
for b in ALL_BUILDERS:
if t_prof.env_builder_cls_str == b.__name__:
ENV_BUILDER = b
raise ValueError(t_prof.env_builder_cls_str, 'is not registered or does not exist.')
</DeepExtract>
return ENV_BUILDER(env_cls=get_env_cls_from_str(t_prof.game_cls_str), env_args=t_prof.module_args['env'])
|
def get_env_builder(t_prof):
for b in ALL_BUILDERS:
if t_prof.env_builder_cls_str == b.__name__:
ENV_BUILDER = b
raise ValueError(t_prof.env_builder_cls_str, 'is not registered or does not exist.')
return ENV_BUILDER(env_cls=get_env_cls_from_str(t_prof.game_cls_str), env_args=t_prof.module_args['env'])
|
DREAM
|
positive
|
@force_fp32(apply_to=('feats',), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
num_levels = len(feats)
<DeepExtract>
scale = torch.sqrt((rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-06))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
target_lvls = target_lvls
</DeepExtract>
roi_feats = feats[0].new_zeros(rois.size(0), self.out_channels, *out_size)
if roi_scale_factor is not None:
<DeepExtract>
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
new_w = w * roi_scale_factor
new_h = h * roi_scale_factor
x1 = cx - new_w * 0.5 + 0.5
x2 = cx + new_w * 0.5 - 0.5
y1 = cy - new_h * 0.5 + 0.5
y2 = cy + new_h * 0.5 - 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
rois = new_rois
</DeepExtract>
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
return roi_feats
|
@force_fp32(apply_to=('feats',), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
num_levels = len(feats)
scale = torch.sqrt((rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-06))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
target_lvls = target_lvls
roi_feats = feats[0].new_zeros(rois.size(0), self.out_channels, *out_size)
if roi_scale_factor is not None:
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
new_w = w * roi_scale_factor
new_h = h * roi_scale_factor
x1 = cx - new_w * 0.5 + 0.5
x2 = cx + new_w * 0.5 - 0.5
y1 = cy - new_h * 0.5 + 0.5
y2 = cy + new_h * 0.5 - 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
rois = new_rois
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
return roi_feats
|
D2Det
|
positive
|
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, rescale=False):
"""
Transform network output for a batch into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): size / scale info for each image
cfg (mmcv.Config): test / postprocessing configuration
rescale (bool): if True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the class index of the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(num_classes=9, in_channels=1)
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
mlvl_anchors = [self.anchor_generators[i].grid_anchors(cls_scores[i].size()[-2:], self.anchor_strides[i], device=device) for i in range(num_levels)]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)]
bbox_pred_list = [bbox_preds[i][img_id].detach() for i in range(num_levels)]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
<DeepExtract>
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for (cls_score, bbox_pred, anchors) in zip(cls_score_list, bbox_pred_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
(max_scores, _) = scores.max(dim=1)
else:
(max_scores, _) = scores[:, 1:].max(dim=1)
(_, topk_inds) = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means, self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
(det_bboxes, det_labels) = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
proposals = (det_bboxes, det_labels)
</DeepExtract>
result_list.append(proposals)
return result_list
|
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, rescale=False):
"""
Transform network output for a batch into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): size / scale info for each image
cfg (mmcv.Config): test / postprocessing configuration
rescale (bool): if True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the class index of the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(num_classes=9, in_channels=1)
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
mlvl_anchors = [self.anchor_generators[i].grid_anchors(cls_scores[i].size()[-2:], self.anchor_strides[i], device=device) for i in range(num_levels)]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)]
bbox_pred_list = [bbox_preds[i][img_id].detach() for i in range(num_levels)]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for (cls_score, bbox_pred, anchors) in zip(cls_score_list, bbox_pred_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
(max_scores, _) = scores.max(dim=1)
else:
(max_scores, _) = scores[:, 1:].max(dim=1)
(_, topk_inds) = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means, self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
(det_bboxes, det_labels) = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
proposals = (det_bboxes, det_labels)
result_list.append(proposals)
return result_list
|
Dense-RepPoints
|
positive
|
def test_post_request_increments(self):
<DeepExtract>
ingr_comps = IngredientComposition.objects.filter(user=self.user_1)
ingr_comps_uuids = ingr_comps.values_list('uuid', flat=True)
ingr_comps_uuids = [{'uuid': str(item)} for item in ingr_comps_uuids]
request_parameters = {'name': 'Glutamine', 'ingredient_compositions': ingr_comps_uuids}
request_parameters = request_parameters
</DeepExtract>
super().test_post_request_increments(request_parameters)
|
def test_post_request_increments(self):
ingr_comps = IngredientComposition.objects.filter(user=self.user_1)
ingr_comps_uuids = ingr_comps.values_list('uuid', flat=True)
ingr_comps_uuids = [{'uuid': str(item)} for item in ingr_comps_uuids]
request_parameters = {'name': 'Glutamine', 'ingredient_compositions': ingr_comps_uuids}
request_parameters = request_parameters
super().test_post_request_increments(request_parameters)
|
betterself
|
positive
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='../data/glue_data/', type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default='bert', type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default='../model/pytorch_bert_base_uncased/pytorch_model.bin', type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--task_name', default='MRPC', type=str, required=True, help='The name of the task to train selected in the list: ' + ', '.join(processors.keys()))
parser.add_argument('--output_dir', default='../model/MRPC/pytorch_output/pytorch_model.bin', type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_predict', action='store_true', help='Whether to run predict on the test set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=500, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
<DeepExtract>
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
</DeepExtract>
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError('Task not found: %s' % args.task_name)
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
<DeepExtract>
if args.local_rank not in [-1, 0] and (not evaluate):
torch.distributed.barrier()
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
if evaluate:
mode = 'dev'
elif predict:
mode = 'test'
else:
mode = 'train'
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(mode, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(args.task_name)))
if os.path.exists(cached_features_file) and (not args.overwrite_cache):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
if args.task_name in ['mnli', 'mnli-mm'] and args.model_type in ['roberta', 'xlmroberta']:
(label_list[1], label_list[2]) = (label_list[2], label_list[1])
if evaluate:
examples = processor.get_dev_examples(args.data_dir)
elif predict:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and (not evaluate):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == 'classification':
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == 'regression':
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
train_dataset = dataset
</DeepExtract>
<DeepExtract>
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt')):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
try:
global_step = int(args.model_name_or_path.split('-')[-1].split('/')[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = range(epochs_trained, int(args.num_train_epochs))
set_seed(args)
for i in train_iterator:
logger.info('epoch {}/{}'.format(i, int(args.num_train_epochs)))
for (step, batch) in enumerate(train_dataloader):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
logs = {}
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for (key, value) in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
</DeepExtract>
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
<DeepExtract>
eval_task_names = ('mnli', 'mnli-mm') if args.task_name == 'mnli' else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == 'mnli' else (args.output_dir,)
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1 and (not isinstance(model, torch.nn.DataParallel)):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
result = results
</DeepExtract>
result = dict(((k + '_{}'.format(global_step), v) for (k, v) in result.items()))
results.update(result)
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoint = args.output_dir
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
<DeepExtract>
pred_task_names = ('mnli', 'mnli-mm') if args.task_name == 'mnli' else (args.task_name,)
pred_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == 'mnli' else (args.output_dir,)
for (pred_task, pred_output_dir) in zip(pred_task_names, pred_outputs_dirs):
pred_dataset = load_and_cache_examples(args, pred_task, tokenizer, predict=True)
processor = processors[pred_task]()
if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(pred_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
pred_sampler = SequentialSampler(pred_dataset)
pred_dataloader = DataLoader(pred_dataset, sampler=pred_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1 and (not isinstance(model, torch.nn.DataParallel)):
model = torch.nn.DataParallel(model)
logger.info('***** Running prediction *****')
logger.info(' Num examples = %d', len(pred_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(pred_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
label_list = processor.get_labels()
label_map = {i: label for (i, label) in enumerate(label_list)}
output_pred_file = os.path.join(pred_output_dir, pred_task.upper() + '.tsv')
with open(output_pred_file, 'w') as writer:
logger.info('***** predict results *****')
writer.write('index\tprediction\n')
for (index, pred) in enumerate(tqdm(preds)):
if pred_task == 'sts-b':
pred = round(pred, 3)
else:
pred = label_map[pred]
writer.write('%s\t%s\n' % (index, str(pred)))
</DeepExtract>
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='../data/glue_data/', type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default='bert', type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default='../model/pytorch_bert_base_uncased/pytorch_model.bin', type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--task_name', default='MRPC', type=str, required=True, help='The name of the task to train selected in the list: ' + ', '.join(processors.keys()))
parser.add_argument('--output_dir', default='../model/MRPC/pytorch_output/pytorch_model.bin', type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_predict', action='store_true', help='Whether to run predict on the test set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=500, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError('Task not found: %s' % args.task_name)
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
if args.local_rank not in [-1, 0] and (not evaluate):
torch.distributed.barrier()
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
if evaluate:
mode = 'dev'
elif predict:
mode = 'test'
else:
mode = 'train'
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(mode, list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(args.task_name)))
if os.path.exists(cached_features_file) and (not args.overwrite_cache):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
if args.task_name in ['mnli', 'mnli-mm'] and args.model_type in ['roberta', 'xlmroberta']:
(label_list[1], label_list[2]) = (label_list[2], label_list[1])
if evaluate:
examples = processor.get_dev_examples(args.data_dir)
elif predict:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and (not evaluate):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == 'classification':
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == 'regression':
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
train_dataset = dataset
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt')):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
try:
global_step = int(args.model_name_or_path.split('-')[-1].split('/')[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = range(epochs_trained, int(args.num_train_epochs))
set_seed(args)
for i in train_iterator:
logger.info('epoch {}/{}'.format(i, int(args.num_train_epochs)))
for (step, batch) in enumerate(train_dataloader):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
logs = {}
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for (key, value) in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
eval_task_names = ('mnli', 'mnli-mm') if args.task_name == 'mnli' else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == 'mnli' else (args.output_dir,)
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1 and (not isinstance(model, torch.nn.DataParallel)):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
result = results
result = dict(((k + '_{}'.format(global_step), v) for (k, v) in result.items()))
results.update(result)
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoint = args.output_dir
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
pred_task_names = ('mnli', 'mnli-mm') if args.task_name == 'mnli' else (args.task_name,)
pred_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == 'mnli' else (args.output_dir,)
for (pred_task, pred_output_dir) in zip(pred_task_names, pred_outputs_dirs):
pred_dataset = load_and_cache_examples(args, pred_task, tokenizer, predict=True)
processor = processors[pred_task]()
if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(pred_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
pred_sampler = SequentialSampler(pred_dataset)
pred_dataloader = DataLoader(pred_dataset, sampler=pred_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1 and (not isinstance(model, torch.nn.DataParallel)):
model = torch.nn.DataParallel(model)
logger.info('***** Running prediction *****')
logger.info(' Num examples = %d', len(pred_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(pred_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
label_list = processor.get_labels()
label_map = {i: label for (i, label) in enumerate(label_list)}
output_pred_file = os.path.join(pred_output_dir, pred_task.upper() + '.tsv')
with open(output_pred_file, 'w') as writer:
logger.info('***** predict results *****')
writer.write('index\tprediction\n')
for (index, pred) in enumerate(tqdm(preds)):
if pred_task == 'sts-b':
pred = round(pred, 3)
else:
pred = label_map[pred]
writer.write('%s\t%s\n' % (index, str(pred)))
</DeepExtract>
|
BERT-EMD
|
positive
|
@classmethod
def teardown_class(self):
<DeepExtract>
try:
os.remove(self.BIN_LOAD)
except OSError:
pass
</DeepExtract>
<DeepExtract>
try:
os.remove(self.BIN_IROM)
except OSError:
pass
</DeepExtract>
|
@classmethod
def teardown_class(self):
try:
os.remove(self.BIN_LOAD)
except OSError:
pass
try:
os.remove(self.BIN_IROM)
except OSError:
pass
</DeepExtract>
|
esptool
|
positive
|
def __init__(self, vobj):
<DeepExtract>
FreeCAD.Console.PrintMessage(str('__init__' + str(self)) + '\n')
</DeepExtract>
self.Object = vobj.Object
vobj.Proxy = self
|
def __init__(self, vobj):
FreeCAD.Console.PrintMessage(str('__init__' + str(self)) + '\n')
self.Object = vobj.Object
vobj.Proxy = self
|
Animation
|
positive
|
def read(self, path=DEFAULT_CONFIG_PATH):
"""Read the configuration from a file
Args:
path (str): The path to the configuration file
Raises:
IOError: If unable to read config file
"""
config = configparser.ConfigParser()
with open(path, 'r') as configfile:
config.read_file(configfile)
if config.has_section('GRASS'):
if config.has_option('GRASS', 'GRASS_DATABASE'):
self.GRASS_DATABASE = config.get('GRASS', 'GRASS_DATABASE')
if config.has_option('GRASS', 'GRASS_USER_DATABASE'):
self.GRASS_USER_DATABASE = config.get('GRASS', 'GRASS_USER_DATABASE')
if config.has_option('GRASS', 'GRASS_DEFAULT_LOCATION'):
self.GRASS_DEFAULT_LOCATION = config.get('GRASS', 'GRASS_DEFAULT_LOCATION')
if config.has_option('GRASS', 'GRASS_TMP_DATABASE'):
self.GRASS_TMP_DATABASE = config.get('GRASS', 'GRASS_TMP_DATABASE')
if config.has_option('GRASS', 'GRASS_RESOURCE_DIR'):
self.GRASS_RESOURCE_DIR = config.get('GRASS', 'GRASS_RESOURCE_DIR')
if config.has_option('GRASS', 'GRASS_RESOURCE_QUOTA'):
self.GRASS_RESOURCE_QUOTA = config.getint('GRASS', 'GRASS_RESOURCE_QUOTA')
if config.has_option('GRASS', 'GRASS_GIS_BASE'):
self.GRASS_GIS_BASE = config.get('GRASS', 'GRASS_GIS_BASE')
if config.has_option('GRASS', 'GRASS_GIS_START_SCRIPT'):
self.GRASS_GIS_START_SCRIPT = config.get('GRASS', 'GRASS_GIS_START_SCRIPT')
if config.has_option('GRASS', 'GRASS_ADDON_PATH'):
self.GRASS_ADDON_PATH = config.get('GRASS', 'GRASS_ADDON_PATH')
if config.has_option('GRASS', 'GRASS_MODULES_XML_PATH'):
self.GRASS_MODULES_XML_PATH = config.get('GRASS', 'GRASS_MODULES_XML_PATH')
if config.has_option('GRASS', 'GRASS_VENV'):
self.GRASS_VENV = config.get('GRASS', 'GRASS_VENV')
if config.has_option('MANAGEMENT', 'ADDITIONAL_ALLOWED_MODULES'):
self.ADDITIONAL_ALLOWED_MODULES = ast.literal_eval(config.get('MANAGEMENT', 'ADDITIONAL_ALLOWED_MODULES'))
self.MODULE_ALLOW_LIST.extend(self.ADDITIONAL_ALLOWED_MODULES)
self.MODULE_ALLOW_LIST = list(set(self.MODULE_ALLOW_LIST))
if config.has_section('LIMITS'):
if config.has_option('LIMITS', 'MAX_CELL_LIMIT'):
self.MAX_CELL_LIMIT = config.getint('LIMITS', 'MAX_CELL_LIMIT')
if config.has_option('LIMITS', 'PROCESS_TIME_LIMT'):
self.PROCESS_TIME_LIMT = config.getint('LIMITS', 'PROCESS_TIME_LIMT')
if config.has_option('LIMITS', 'PROCESS_NUM_LIMIT'):
self.PROCESS_NUM_LIMIT = config.getint('LIMITS', 'PROCESS_NUM_LIMIT')
if config.has_section('API'):
if config.has_option('API', 'CHECK_CREDENTIALS'):
self.CHECK_CREDENTIALS = config.getboolean('API', 'CHECK_CREDENTIALS')
if config.has_option('API', 'CHECK_LIMITS'):
self.CHECK_LIMITS = config.getboolean('API', 'CHECK_LIMITS')
if config.has_option('API', 'LOG_API_CALL'):
self.LOG_API_CALL = config.getboolean('API', 'LOG_API_CALL')
if config.has_option('API', 'LOGIN_REQUIRED'):
self.LOGIN_REQUIRED = config.getboolean('API', 'LOGIN_REQUIRED')
if config.has_option('API', 'FORCE_HTTPS_URLS'):
self.FORCE_HTTPS_URLS = config.getboolean('API', 'FORCE_HTTPS_URLS')
if config.has_option('API', 'PLUGINS'):
self.PLUGINS = ast.literal_eval(config.get('API', 'PLUGINS'))
if config.has_option('API', 'ENDPOINTS_CONFIG'):
self.ENDPOINTS_CONFIG = config.get('API', 'ENDPOINTS_CONFIG')
if config.has_section('KEYCLOAK'):
if config.has_option('KEYCLOAK', 'CONFIG_PATH'):
keycloak_cfg_path = config.get('KEYCLOAK', 'CONFIG_PATH')
if os.path.isfile(keycloak_cfg_path):
self.KEYCLOAK_CONFIG_PATH = keycloak_cfg_path
<DeepExtract>
if key_cloak_config_path is None:
key_cloak_config_path = self.KEYCLOAK_CONFIG_PATH
if os.path.isfile(key_cloak_config_path):
with open(key_cloak_config_path) as f:
keycloak_cfg = json_load(f)
self.KEYCLOAK_URL = keycloak_cfg['auth-server-url']
self.KEYCLOAK_REALM = keycloak_cfg['realm']
self.KEYCLOAK_CLIENT_ID = keycloak_cfg['resource']
self.KEYCLOAK_CLIENT_SECRET_KEY = keycloak_cfg['credentials']['secret']
else:
raise Exception('KEYCLOAK_CONFIG_PATH is not a valid keycloak configuration for actinia')
</DeepExtract>
else:
print('Keycloak is configured, but configfile is not an existing file! Using Redis for user management.')
if config.has_option('KEYCLOAK', 'GROUP_PREFIX'):
self.KEYCLOAK_GROUP_PREFIX = config.get('KEYCLOAK', 'GROUP_PREFIX')
if config.has_option('KEYCLOAK', 'ATTR_PREFIX'):
self.KEYCLOAK_ATTR_PREFIX = config.get('KEYCLOAK', 'ATTR_PREFIX')
if config.has_section('REDIS'):
if config.has_option('REDIS', 'REDIS_SERVER_URL'):
self.REDIS_SERVER_URL = config.get('REDIS', 'REDIS_SERVER_URL')
if config.has_option('REDIS', 'REDIS_SERVER_PORT'):
self.REDIS_SERVER_PORT = config.getint('REDIS', 'REDIS_SERVER_PORT')
if config.has_option('REDIS', 'REDIS_SERVER_PW'):
self.REDIS_SERVER_PW = config.get('REDIS', 'REDIS_SERVER_PW')
if config.has_option('REDIS', 'REDIS_RESOURCE_EXPIRE_TIME'):
self.REDIS_RESOURCE_EXPIRE_TIME = config.getint('REDIS', 'REDIS_RESOURCE_EXPIRE_TIME')
if config.has_option('REDIS', 'WORKER_LOGFILE'):
self.WORKER_LOGFILE = config.get('REDIS', 'WORKER_LOGFILE')
if config.has_section('QUEUE'):
if config.has_option('QUEUE', 'NUMBER_OF_WORKERS'):
self.NUMBER_OF_WORKERS = config.getint('QUEUE', 'NUMBER_OF_WORKERS')
if config.has_option('QUEUE', 'REDIS_QUEUE_SERVER_URL'):
self.REDIS_QUEUE_SERVER_URL = config.get('QUEUE', 'REDIS_QUEUE_SERVER_URL')
if config.has_option('QUEUE', 'REDIS_QUEUE_SERVER_PORT'):
self.REDIS_QUEUE_SERVER_PORT = config.get('QUEUE', 'REDIS_QUEUE_SERVER_PORT')
if config.has_option('QUEUE', 'REDIS_QUEUE_SERVER_PASSWORD'):
self.REDIS_QUEUE_SERVER_PASSWORD = config.get('QUEUE', 'REDIS_QUEUE_SERVER_PASSWORD')
if config.has_option('QUEUE', 'REDIS_QUEUE_JOB_TTL'):
self.REDIS_QUEUE_JOB_TTL = config.get('QUEUE', 'REDIS_QUEUE_JOB_TTL')
if config.has_option('QUEUE', 'WORKER_QUEUE_PREFIX'):
self.WORKER_QUEUE_PREFIX = config.get('QUEUE', 'WORKER_QUEUE_PREFIX')
if config.has_option('QUEUE', 'QUEUE_TYPE'):
self.QUEUE_TYPE = config.get('QUEUE', 'QUEUE_TYPE')
if config.has_option('QUEUE', 'QUEUE_TYPE_OVERWRITE'):
self.QUEUE_TYPE_OVERWRITE = config.get('QUEUE', 'QUEUE_TYPE_OVERWRITE')
if config.has_section('MISC'):
if config.has_option('MISC', 'DOWNLOAD_CACHE'):
self.DOWNLOAD_CACHE = config.get('MISC', 'DOWNLOAD_CACHE')
if config.has_option('MISC', 'DOWNLOAD_CACHE_QUOTA'):
self.DOWNLOAD_CACHE_QUOTA = config.getint('MISC', 'DOWNLOAD_CACHE_QUOTA')
if config.has_option('MISC', 'TMP_WORKDIR'):
self.TMP_WORKDIR = config.get('MISC', 'TMP_WORKDIR')
if config.has_option('MISC', 'SECRET_KEY'):
self.SECRET_KEY = config.get('MISC', 'SECRET_KEY')
if config.has_option('MISC', 'LOG_LEVEL'):
self.LOG_LEVEL = config.getint('MISC', 'LOG_LEVEL')
if config.has_option('MISC', 'SAVE_INTERIM_RESULTS'):
save_interim = config.get('MISC', 'SAVE_INTERIM_RESULTS')
if save_interim == 'True':
self.SAVE_INTERIM_RESULTS = True
elif save_interim == 'False':
self.SAVE_INTERIM_RESULTS = False
else:
self.SAVE_INTERIM_RESULTS = save_interim
if config.has_option('MISC', 'SAVE_INTERIM_RESULTS_ENDPOINTS_CFG'):
cfg = config.get('MISC', 'SAVE_INTERIM_RESULTS_ENDPOINTS_CFG')
if os.path.isfile(cfg):
self.SAVE_INTERIM_RESULTS_ENDPOINTS_CFG = cfg
with open(cfg, mode='r') as inp:
reader = csv.reader(inp, delimiter=';')
endpoints_dict = {row[0].lower(): row[1] for row in reader if len(row) == 2}
self.INTERIM_SAVING_ENDPOINTS.update(endpoints_dict)
if config.has_option('MISC', 'INCLUDE_ADDITIONAL_MAPSET_PATTERN'):
self.INCLUDE_ADDITIONAL_MAPSET_PATTERN = config.get('MISC', 'INCLUDE_ADDITIONAL_MAPSET_PATTERN')
if config.has_section('LOGGING'):
if config.has_option('LOGGING', 'LOG_INTERFACE'):
self.LOG_INTERFACE = config.get('LOGGING', 'LOG_INTERFACE')
if config.has_option('LOGGING', 'LOG_STDOUT_FORMAT'):
self.LOG_STDOUT_FORMAT = config.get('LOGGING', 'LOG_STDOUT_FORMAT')
if config.has_option('LOGGING', 'LOG_FILE_FORMAT'):
self.LOG_FILE_FORMAT = config.get('LOGGING', 'LOG_FILE_FORMAT')
if config.has_option('LOGGING', 'LOG_STDERR_FORMAT'):
self.LOG_STDERR_FORMAT = config.get('LOGGING', 'LOG_STDERR_FORMAT')
if config.has_option('LOGGING', 'LOG_FLUENT_HOST'):
self.LOG_FLUENT_HOST = config.get('LOGGING', 'LOG_FLUENT_HOST')
if config.has_option('LOGGING', 'LOG_FLUENT_PORT'):
self.LOG_FLUENT_PORT = config.getint('LOGGING', 'LOG_FLUENT_PORT')
if config.has_option('LOGGING', 'LOG_LEVEL'):
self.LOG_LEVEL = config.getint('LOGGING', 'LOG_LEVEL')
if config.has_section('MANAGEMENT'):
if config.has_option('MANAGEMENT', 'DEFAULT_USER'):
self.DEFAULT_USER = config.get('MANAGEMENT', 'DEFAULT_USER')
if config.has_option('MANAGEMENT', 'DEFAULT_USER_GROUP'):
self.DEFAULT_USER_GROUP = config.get('MANAGEMENT', 'DEFAULT_USER_GROUP')
if config.has_section('GCS'):
if config.has_option('GCS', 'GOOGLE_APPLICATION_CREDENTIALS'):
self.GOOGLE_APPLICATION_CREDENTIALS = config.get('GCS', 'GOOGLE_APPLICATION_CREDENTIALS')
if config.has_option('GCS', 'GCS_RESOURCE_BUCKET'):
self.GCS_RESOURCE_BUCKET = config.get('GCS', 'GCS_RESOURCE_BUCKET')
if config.has_option('GCS', 'GOOGLE_CLOUD_PROJECT'):
self.GOOGLE_CLOUD_PROJECT = config.get('GCS', 'GOOGLE_CLOUD_PROJECT')
if config.has_section('AWS_S3'):
if config.has_option('AWS_S3', 'S3_AWS_ACCESS_KEY_ID'):
self.S3_AWS_ACCESS_KEY_ID = config.get('AWS_S3', 'S3_AWS_ACCESS_KEY_ID')
if config.has_option('AWS_S3', 'S3_AWS_SECRET_ACCESS_KEY'):
self.S3_AWS_SECRET_ACCESS_KEY = config.get('AWS_S3', 'S3_AWS_SECRET_ACCESS_KEY')
if config.has_option('AWS_S3', 'S3_AWS_DEFAULT_REGION'):
self.S3_AWS_DEFAULT_REGION = config.get('AWS_S3', 'S3_AWS_DEFAULT_REGION')
if config.has_option('AWS_S3', 'S3_AWS_RESOURCE_BUCKET'):
self.S3_AWS_RESOURCE_BUCKET = config.get('AWS_S3', 'S3_AWS_RESOURCE_BUCKET')
if config.has_section('WEBHOOK'):
if config.has_option('WEBHOOK', 'WEBHOOK_RETRIES'):
self.WEBHOOK_RETRIES = config.get('WEBHOOK', 'WEBHOOK_RETRIES')
if config.has_option('WEBHOOK', 'WEBHOOK_SLEEP'):
self.WEBHOOK_SLEEP = config.get('WEBHOOK', 'WEBHOOK_SLEEP')
def print_warning(cfg_section, cfg_key, file_val=None, env_val=None):
if env_val is None:
env_val = os.environ[cfg_key]
if config.has_option(cfg_section, cfg_key):
if file_val is None:
file_val = config.get(cfg_section, cfg_key)
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % (cfg_key, file_val, env_val))
if os.environ.get('REDIS_SERVER_URL'):
<DeepExtract>
if env_val is None:
env_val = os.environ['REDIS_SERVER_URL']
if config.has_option('REDIS', 'REDIS_SERVER_URL'):
if file_val is None:
file_val = config.get('REDIS', 'REDIS_SERVER_URL')
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % ('REDIS_SERVER_URL', file_val, env_val))
</DeepExtract>
self.REDIS_SERVER_URL = os.environ['REDIS_SERVER_URL']
if os.environ.get('REDIS_SERVER_PORT'):
<DeepExtract>
if env_val is None:
env_val = os.environ['REDIS_SERVER_PORT']
if config.has_option('REDIS', 'REDIS_SERVER_PORT'):
if file_val is None:
file_val = config.get('REDIS', 'REDIS_SERVER_PORT')
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % ('REDIS_SERVER_PORT', file_val, env_val))
</DeepExtract>
self.REDIS_SERVER_PORT = os.environ['REDIS_SERVER_PORT']
if os.environ.get('REDIS_SERVER_PW'):
<DeepExtract>
if 'XXX' is None:
'XXX' = os.environ['REDIS_SERVER_PW']
if config.has_option('REDIS', 'REDIS_SERVER_PW'):
if 'XXX' is None:
'XXX' = config.get('REDIS', 'REDIS_SERVER_PW')
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % ('REDIS_SERVER_PW', 'XXX', 'XXX'))
</DeepExtract>
self.REDIS_SERVER_PW = os.environ['REDIS_SERVER_PW']
|
def read(self, path=DEFAULT_CONFIG_PATH):
"""Read the configuration from a file
Args:
path (str): The path to the configuration file
Raises:
IOError: If unable to read config file
"""
config = configparser.ConfigParser()
with open(path, 'r') as configfile:
config.read_file(configfile)
if config.has_section('GRASS'):
if config.has_option('GRASS', 'GRASS_DATABASE'):
self.GRASS_DATABASE = config.get('GRASS', 'GRASS_DATABASE')
if config.has_option('GRASS', 'GRASS_USER_DATABASE'):
self.GRASS_USER_DATABASE = config.get('GRASS', 'GRASS_USER_DATABASE')
if config.has_option('GRASS', 'GRASS_DEFAULT_LOCATION'):
self.GRASS_DEFAULT_LOCATION = config.get('GRASS', 'GRASS_DEFAULT_LOCATION')
if config.has_option('GRASS', 'GRASS_TMP_DATABASE'):
self.GRASS_TMP_DATABASE = config.get('GRASS', 'GRASS_TMP_DATABASE')
if config.has_option('GRASS', 'GRASS_RESOURCE_DIR'):
self.GRASS_RESOURCE_DIR = config.get('GRASS', 'GRASS_RESOURCE_DIR')
if config.has_option('GRASS', 'GRASS_RESOURCE_QUOTA'):
self.GRASS_RESOURCE_QUOTA = config.getint('GRASS', 'GRASS_RESOURCE_QUOTA')
if config.has_option('GRASS', 'GRASS_GIS_BASE'):
self.GRASS_GIS_BASE = config.get('GRASS', 'GRASS_GIS_BASE')
if config.has_option('GRASS', 'GRASS_GIS_START_SCRIPT'):
self.GRASS_GIS_START_SCRIPT = config.get('GRASS', 'GRASS_GIS_START_SCRIPT')
if config.has_option('GRASS', 'GRASS_ADDON_PATH'):
self.GRASS_ADDON_PATH = config.get('GRASS', 'GRASS_ADDON_PATH')
if config.has_option('GRASS', 'GRASS_MODULES_XML_PATH'):
self.GRASS_MODULES_XML_PATH = config.get('GRASS', 'GRASS_MODULES_XML_PATH')
if config.has_option('GRASS', 'GRASS_VENV'):
self.GRASS_VENV = config.get('GRASS', 'GRASS_VENV')
if config.has_option('MANAGEMENT', 'ADDITIONAL_ALLOWED_MODULES'):
self.ADDITIONAL_ALLOWED_MODULES = ast.literal_eval(config.get('MANAGEMENT', 'ADDITIONAL_ALLOWED_MODULES'))
self.MODULE_ALLOW_LIST.extend(self.ADDITIONAL_ALLOWED_MODULES)
self.MODULE_ALLOW_LIST = list(set(self.MODULE_ALLOW_LIST))
if config.has_section('LIMITS'):
if config.has_option('LIMITS', 'MAX_CELL_LIMIT'):
self.MAX_CELL_LIMIT = config.getint('LIMITS', 'MAX_CELL_LIMIT')
if config.has_option('LIMITS', 'PROCESS_TIME_LIMT'):
self.PROCESS_TIME_LIMT = config.getint('LIMITS', 'PROCESS_TIME_LIMT')
if config.has_option('LIMITS', 'PROCESS_NUM_LIMIT'):
self.PROCESS_NUM_LIMIT = config.getint('LIMITS', 'PROCESS_NUM_LIMIT')
if config.has_section('API'):
if config.has_option('API', 'CHECK_CREDENTIALS'):
self.CHECK_CREDENTIALS = config.getboolean('API', 'CHECK_CREDENTIALS')
if config.has_option('API', 'CHECK_LIMITS'):
self.CHECK_LIMITS = config.getboolean('API', 'CHECK_LIMITS')
if config.has_option('API', 'LOG_API_CALL'):
self.LOG_API_CALL = config.getboolean('API', 'LOG_API_CALL')
if config.has_option('API', 'LOGIN_REQUIRED'):
self.LOGIN_REQUIRED = config.getboolean('API', 'LOGIN_REQUIRED')
if config.has_option('API', 'FORCE_HTTPS_URLS'):
self.FORCE_HTTPS_URLS = config.getboolean('API', 'FORCE_HTTPS_URLS')
if config.has_option('API', 'PLUGINS'):
self.PLUGINS = ast.literal_eval(config.get('API', 'PLUGINS'))
if config.has_option('API', 'ENDPOINTS_CONFIG'):
self.ENDPOINTS_CONFIG = config.get('API', 'ENDPOINTS_CONFIG')
if config.has_section('KEYCLOAK'):
if config.has_option('KEYCLOAK', 'CONFIG_PATH'):
keycloak_cfg_path = config.get('KEYCLOAK', 'CONFIG_PATH')
if os.path.isfile(keycloak_cfg_path):
self.KEYCLOAK_CONFIG_PATH = keycloak_cfg_path
if key_cloak_config_path is None:
key_cloak_config_path = self.KEYCLOAK_CONFIG_PATH
if os.path.isfile(key_cloak_config_path):
with open(key_cloak_config_path) as f:
keycloak_cfg = json_load(f)
self.KEYCLOAK_URL = keycloak_cfg['auth-server-url']
self.KEYCLOAK_REALM = keycloak_cfg['realm']
self.KEYCLOAK_CLIENT_ID = keycloak_cfg['resource']
self.KEYCLOAK_CLIENT_SECRET_KEY = keycloak_cfg['credentials']['secret']
else:
raise Exception('KEYCLOAK_CONFIG_PATH is not a valid keycloak configuration for actinia')
else:
print('Keycloak is configured, but configfile is not an existing file! Using Redis for user management.')
if config.has_option('KEYCLOAK', 'GROUP_PREFIX'):
self.KEYCLOAK_GROUP_PREFIX = config.get('KEYCLOAK', 'GROUP_PREFIX')
if config.has_option('KEYCLOAK', 'ATTR_PREFIX'):
self.KEYCLOAK_ATTR_PREFIX = config.get('KEYCLOAK', 'ATTR_PREFIX')
if config.has_section('REDIS'):
if config.has_option('REDIS', 'REDIS_SERVER_URL'):
self.REDIS_SERVER_URL = config.get('REDIS', 'REDIS_SERVER_URL')
if config.has_option('REDIS', 'REDIS_SERVER_PORT'):
self.REDIS_SERVER_PORT = config.getint('REDIS', 'REDIS_SERVER_PORT')
if config.has_option('REDIS', 'REDIS_SERVER_PW'):
self.REDIS_SERVER_PW = config.get('REDIS', 'REDIS_SERVER_PW')
if config.has_option('REDIS', 'REDIS_RESOURCE_EXPIRE_TIME'):
self.REDIS_RESOURCE_EXPIRE_TIME = config.getint('REDIS', 'REDIS_RESOURCE_EXPIRE_TIME')
if config.has_option('REDIS', 'WORKER_LOGFILE'):
self.WORKER_LOGFILE = config.get('REDIS', 'WORKER_LOGFILE')
if config.has_section('QUEUE'):
if config.has_option('QUEUE', 'NUMBER_OF_WORKERS'):
self.NUMBER_OF_WORKERS = config.getint('QUEUE', 'NUMBER_OF_WORKERS')
if config.has_option('QUEUE', 'REDIS_QUEUE_SERVER_URL'):
self.REDIS_QUEUE_SERVER_URL = config.get('QUEUE', 'REDIS_QUEUE_SERVER_URL')
if config.has_option('QUEUE', 'REDIS_QUEUE_SERVER_PORT'):
self.REDIS_QUEUE_SERVER_PORT = config.get('QUEUE', 'REDIS_QUEUE_SERVER_PORT')
if config.has_option('QUEUE', 'REDIS_QUEUE_SERVER_PASSWORD'):
self.REDIS_QUEUE_SERVER_PASSWORD = config.get('QUEUE', 'REDIS_QUEUE_SERVER_PASSWORD')
if config.has_option('QUEUE', 'REDIS_QUEUE_JOB_TTL'):
self.REDIS_QUEUE_JOB_TTL = config.get('QUEUE', 'REDIS_QUEUE_JOB_TTL')
if config.has_option('QUEUE', 'WORKER_QUEUE_PREFIX'):
self.WORKER_QUEUE_PREFIX = config.get('QUEUE', 'WORKER_QUEUE_PREFIX')
if config.has_option('QUEUE', 'QUEUE_TYPE'):
self.QUEUE_TYPE = config.get('QUEUE', 'QUEUE_TYPE')
if config.has_option('QUEUE', 'QUEUE_TYPE_OVERWRITE'):
self.QUEUE_TYPE_OVERWRITE = config.get('QUEUE', 'QUEUE_TYPE_OVERWRITE')
if config.has_section('MISC'):
if config.has_option('MISC', 'DOWNLOAD_CACHE'):
self.DOWNLOAD_CACHE = config.get('MISC', 'DOWNLOAD_CACHE')
if config.has_option('MISC', 'DOWNLOAD_CACHE_QUOTA'):
self.DOWNLOAD_CACHE_QUOTA = config.getint('MISC', 'DOWNLOAD_CACHE_QUOTA')
if config.has_option('MISC', 'TMP_WORKDIR'):
self.TMP_WORKDIR = config.get('MISC', 'TMP_WORKDIR')
if config.has_option('MISC', 'SECRET_KEY'):
self.SECRET_KEY = config.get('MISC', 'SECRET_KEY')
if config.has_option('MISC', 'LOG_LEVEL'):
self.LOG_LEVEL = config.getint('MISC', 'LOG_LEVEL')
if config.has_option('MISC', 'SAVE_INTERIM_RESULTS'):
save_interim = config.get('MISC', 'SAVE_INTERIM_RESULTS')
if save_interim == 'True':
self.SAVE_INTERIM_RESULTS = True
elif save_interim == 'False':
self.SAVE_INTERIM_RESULTS = False
else:
self.SAVE_INTERIM_RESULTS = save_interim
if config.has_option('MISC', 'SAVE_INTERIM_RESULTS_ENDPOINTS_CFG'):
cfg = config.get('MISC', 'SAVE_INTERIM_RESULTS_ENDPOINTS_CFG')
if os.path.isfile(cfg):
self.SAVE_INTERIM_RESULTS_ENDPOINTS_CFG = cfg
with open(cfg, mode='r') as inp:
reader = csv.reader(inp, delimiter=';')
endpoints_dict = {row[0].lower(): row[1] for row in reader if len(row) == 2}
self.INTERIM_SAVING_ENDPOINTS.update(endpoints_dict)
if config.has_option('MISC', 'INCLUDE_ADDITIONAL_MAPSET_PATTERN'):
self.INCLUDE_ADDITIONAL_MAPSET_PATTERN = config.get('MISC', 'INCLUDE_ADDITIONAL_MAPSET_PATTERN')
if config.has_section('LOGGING'):
if config.has_option('LOGGING', 'LOG_INTERFACE'):
self.LOG_INTERFACE = config.get('LOGGING', 'LOG_INTERFACE')
if config.has_option('LOGGING', 'LOG_STDOUT_FORMAT'):
self.LOG_STDOUT_FORMAT = config.get('LOGGING', 'LOG_STDOUT_FORMAT')
if config.has_option('LOGGING', 'LOG_FILE_FORMAT'):
self.LOG_FILE_FORMAT = config.get('LOGGING', 'LOG_FILE_FORMAT')
if config.has_option('LOGGING', 'LOG_STDERR_FORMAT'):
self.LOG_STDERR_FORMAT = config.get('LOGGING', 'LOG_STDERR_FORMAT')
if config.has_option('LOGGING', 'LOG_FLUENT_HOST'):
self.LOG_FLUENT_HOST = config.get('LOGGING', 'LOG_FLUENT_HOST')
if config.has_option('LOGGING', 'LOG_FLUENT_PORT'):
self.LOG_FLUENT_PORT = config.getint('LOGGING', 'LOG_FLUENT_PORT')
if config.has_option('LOGGING', 'LOG_LEVEL'):
self.LOG_LEVEL = config.getint('LOGGING', 'LOG_LEVEL')
if config.has_section('MANAGEMENT'):
if config.has_option('MANAGEMENT', 'DEFAULT_USER'):
self.DEFAULT_USER = config.get('MANAGEMENT', 'DEFAULT_USER')
if config.has_option('MANAGEMENT', 'DEFAULT_USER_GROUP'):
self.DEFAULT_USER_GROUP = config.get('MANAGEMENT', 'DEFAULT_USER_GROUP')
if config.has_section('GCS'):
if config.has_option('GCS', 'GOOGLE_APPLICATION_CREDENTIALS'):
self.GOOGLE_APPLICATION_CREDENTIALS = config.get('GCS', 'GOOGLE_APPLICATION_CREDENTIALS')
if config.has_option('GCS', 'GCS_RESOURCE_BUCKET'):
self.GCS_RESOURCE_BUCKET = config.get('GCS', 'GCS_RESOURCE_BUCKET')
if config.has_option('GCS', 'GOOGLE_CLOUD_PROJECT'):
self.GOOGLE_CLOUD_PROJECT = config.get('GCS', 'GOOGLE_CLOUD_PROJECT')
if config.has_section('AWS_S3'):
if config.has_option('AWS_S3', 'S3_AWS_ACCESS_KEY_ID'):
self.S3_AWS_ACCESS_KEY_ID = config.get('AWS_S3', 'S3_AWS_ACCESS_KEY_ID')
if config.has_option('AWS_S3', 'S3_AWS_SECRET_ACCESS_KEY'):
self.S3_AWS_SECRET_ACCESS_KEY = config.get('AWS_S3', 'S3_AWS_SECRET_ACCESS_KEY')
if config.has_option('AWS_S3', 'S3_AWS_DEFAULT_REGION'):
self.S3_AWS_DEFAULT_REGION = config.get('AWS_S3', 'S3_AWS_DEFAULT_REGION')
if config.has_option('AWS_S3', 'S3_AWS_RESOURCE_BUCKET'):
self.S3_AWS_RESOURCE_BUCKET = config.get('AWS_S3', 'S3_AWS_RESOURCE_BUCKET')
if config.has_section('WEBHOOK'):
if config.has_option('WEBHOOK', 'WEBHOOK_RETRIES'):
self.WEBHOOK_RETRIES = config.get('WEBHOOK', 'WEBHOOK_RETRIES')
if config.has_option('WEBHOOK', 'WEBHOOK_SLEEP'):
self.WEBHOOK_SLEEP = config.get('WEBHOOK', 'WEBHOOK_SLEEP')
def print_warning(cfg_section, cfg_key, file_val=None, env_val=None):
if env_val is None:
env_val = os.environ[cfg_key]
if config.has_option(cfg_section, cfg_key):
if file_val is None:
file_val = config.get(cfg_section, cfg_key)
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % (cfg_key, file_val, env_val))
if os.environ.get('REDIS_SERVER_URL'):
if env_val is None:
env_val = os.environ['REDIS_SERVER_URL']
if config.has_option('REDIS', 'REDIS_SERVER_URL'):
if file_val is None:
file_val = config.get('REDIS', 'REDIS_SERVER_URL')
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % ('REDIS_SERVER_URL', file_val, env_val))
self.REDIS_SERVER_URL = os.environ['REDIS_SERVER_URL']
if os.environ.get('REDIS_SERVER_PORT'):
if env_val is None:
env_val = os.environ['REDIS_SERVER_PORT']
if config.has_option('REDIS', 'REDIS_SERVER_PORT'):
if file_val is None:
file_val = config.get('REDIS', 'REDIS_SERVER_PORT')
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % ('REDIS_SERVER_PORT', file_val, env_val))
self.REDIS_SERVER_PORT = os.environ['REDIS_SERVER_PORT']
if os.environ.get('REDIS_SERVER_PW'):
if 'XXX' is None:
'XXX' = os.environ['REDIS_SERVER_PW']
if config.has_option('REDIS', 'REDIS_SERVER_PW'):
if 'XXX' is None:
'XXX' = config.get('REDIS', 'REDIS_SERVER_PW')
print("Config for %s from config file with value '%s' will be overwritten by environment variable with value '%s'." % ('REDIS_SERVER_PW', 'XXX', 'XXX'))
self.REDIS_SERVER_PW = os.environ['REDIS_SERVER_PW']
|
actinia_core
|
positive
|
def _safe_get_and_call(self, dataset_id, action, callback=None, exceptions=(), success_status_code=DEFAULT_SUCCESS_STATUS_CODE, error=DEFAULT_ERROR_MESSAGE, content_type=JSON):
"""Find dataset and call action with it and kwargs.
Finds the dataset by `dataset_id` then calls function `action` and
catches any passed in exceptions as well as a set of standard
exceptions. Passes the result, error and callback to dump_or_error and
returns the resulting string.
:param dataset_id: The dataset ID to fetch.
:param action: A function to call within a try block that takes a
dataset any kwargs.
:param callback: A JSONP callback that is passed through to
dump_or_error.
:param exceptions: A set of exceptions to additionally catch.
:param success_status_code: The HTTP status code to return, default is
DEFAULT_SUCCESS_STATUS_CODE.
:param error: Default error string.
:param kwargs: A set of keyword arguments that are passed to the
action.
:returns: A string that is the result of calling action or an error
caught when calling action.
"""
exceptions += (ArgumentError, JSONError, ValueError)
dataset = Dataset.find_one(dataset_id) if dataset_id else None
result = None
try:
if dataset is None or dataset.record:
result = action(dataset)
except exceptions as err:
error = err.__str__()
<DeepExtract>
cherrypy.response.headers['Content-Type'] = content_type
cherrypy.response.status = success_status_code if result is not None else self.ERROR_STATUS_CODE
</DeepExtract>
return self._dump_or_error(result, error, callback)
|
def _safe_get_and_call(self, dataset_id, action, callback=None, exceptions=(), success_status_code=DEFAULT_SUCCESS_STATUS_CODE, error=DEFAULT_ERROR_MESSAGE, content_type=JSON):
"""Find dataset and call action with it and kwargs.
Finds the dataset by `dataset_id` then calls function `action` and
catches any passed in exceptions as well as a set of standard
exceptions. Passes the result, error and callback to dump_or_error and
returns the resulting string.
:param dataset_id: The dataset ID to fetch.
:param action: A function to call within a try block that takes a
dataset any kwargs.
:param callback: A JSONP callback that is passed through to
dump_or_error.
:param exceptions: A set of exceptions to additionally catch.
:param success_status_code: The HTTP status code to return, default is
DEFAULT_SUCCESS_STATUS_CODE.
:param error: Default error string.
:param kwargs: A set of keyword arguments that are passed to the
action.
:returns: A string that is the result of calling action or an error
caught when calling action.
"""
exceptions += (ArgumentError, JSONError, ValueError)
dataset = Dataset.find_one(dataset_id) if dataset_id else None
result = None
try:
if dataset is None or dataset.record:
result = action(dataset)
except exceptions as err:
error = err.__str__()
cherrypy.response.headers['Content-Type'] = content_type
cherrypy.response.status = success_status_code if result is not None else self.ERROR_STATUS_CODE
return self._dump_or_error(result, error, callback)
|
bamboo
|
positive
|
def test_squareform_isfc():
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info('Testing ISC options')
<DeepExtract>
prng = np.random.RandomState(random_state)
if n_voxels:
signal = prng.randn(n_TRs, n_voxels)
prng = np.random.RandomState(prng.randint(0, 2 ** 32 - 1))
data = [signal + prng.randn(n_TRs, n_voxels) * noise for subject in np.arange(n_subjects)]
elif not n_voxels:
signal = prng.randn(n_TRs)
prng = np.random.RandomState(prng.randint(0, 2 ** 32 - 1))
data = [signal + prng.randn(n_TRs) * noise for subject in np.arange(n_subjects)]
if 'array' == 'array':
if n_voxels:
data = np.dstack(data)
elif not n_voxels:
data = np.column_stack(data)
data = data
</DeepExtract>
isfcs_r = isfc(data, vectorize_isfcs=False)
assert isfcs_r.shape == (n_subjects, n_voxels, n_voxels)
(isfcs_c, iscs_c) = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_subjects, n_voxels * (n_voxels - 1) / 2)
assert iscs_c.shape == (n_subjects, n_voxels)
isfcs_new = squareform_isfc(isfcs_c, iscs_c)
assert np.array_equal(isfcs_r, isfcs_new)
assert np.allclose(isc(data), iscs_c, rtol=0.001)
isfcs_r = isfc(data[..., :2], vectorize_isfcs=False)
assert isfcs_r.shape == (n_voxels, n_voxels)
(isfcs_c, iscs_c) = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_voxels * (n_voxels - 1) / 2,)
assert iscs_c.shape == (n_voxels,)
assert np.array_equal(isfcs_r, squareform_isfc(isfcs_c, iscs_c))
|
def test_squareform_isfc():
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info('Testing ISC options')
prng = np.random.RandomState(random_state)
if n_voxels:
signal = prng.randn(n_TRs, n_voxels)
prng = np.random.RandomState(prng.randint(0, 2 ** 32 - 1))
data = [signal + prng.randn(n_TRs, n_voxels) * noise for subject in np.arange(n_subjects)]
elif not n_voxels:
signal = prng.randn(n_TRs)
prng = np.random.RandomState(prng.randint(0, 2 ** 32 - 1))
data = [signal + prng.randn(n_TRs) * noise for subject in np.arange(n_subjects)]
if 'array' == 'array':
if n_voxels:
data = np.dstack(data)
elif not n_voxels:
data = np.column_stack(data)
data = data
isfcs_r = isfc(data, vectorize_isfcs=False)
assert isfcs_r.shape == (n_subjects, n_voxels, n_voxels)
(isfcs_c, iscs_c) = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_subjects, n_voxels * (n_voxels - 1) / 2)
assert iscs_c.shape == (n_subjects, n_voxels)
isfcs_new = squareform_isfc(isfcs_c, iscs_c)
assert np.array_equal(isfcs_r, isfcs_new)
assert np.allclose(isc(data), iscs_c, rtol=0.001)
isfcs_r = isfc(data[..., :2], vectorize_isfcs=False)
assert isfcs_r.shape == (n_voxels, n_voxels)
(isfcs_c, iscs_c) = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_voxels * (n_voxels - 1) / 2,)
assert iscs_c.shape == (n_voxels,)
assert np.array_equal(isfcs_r, squareform_isfc(isfcs_c, iscs_c))
|
brainiak
|
positive
|
@click.command()
@click.option('-c', '--config-file', required=True, type=click.File('r'))
def main(config_file: TextIO) -> None:
config = ExperimentConfig(**yaml.safe_load(config_file))
logger.debug(f'Loaded experiment config: {yaml.dump(config.dict(), sort_keys=False, indent=2)}')
site_names = {site.id: (site.name, site.path.name) for site in Site.objects.filter(id=config.site_ids)}
assert len(site_names) == len(config.site_ids), f'Config specified site_ids {config.site_ids} but API only found {len(site_names)} of them.'
apps_by_site = {site_id: App.objects.get(site_id=site_id, class_path=config.app_name) for site_id in config.site_ids}
job_factory = JobFactory(config.experiment_tag, config.xpcs_datasets, config.eig_datasets, config.site_cpu_map)
if config.submission_mode == 'const-backlog':
submit_method = submit_const_backlog
elif config.submission_mode == 'round-robin':
submit_method = submit_round_robin
elif config.submission_mode == 'shortest-backlog':
submit_method = submit_shortest_backlog
else:
raise ValueError('Invalid submission mode')
start = datetime.utcnow()
logger.info(f'Starting experiment at {start}')
logger.info(f'Total duration will be {config.experiment_duration_min} minutes at most')
while datetime.utcnow() - start < timedelta(minutes=config.experiment_duration_min):
time.sleep(config.submit_period)
<DeepExtract>
backlogs: Dict[int, int] = {}
for site_id in config.site_ids:
qs = Job.objects.filter(site_id=site_id, state=set([*RUNNABLE_STATES, JobState.ready, JobState.staged_in]))
count = qs.count()
assert count is not None
backlogs[site_id] = count
backlogs = backlogs
</DeepExtract>
submit_method(job_factory, apps_by_site, backlogs, config.submit_batch_size, config.max_site_backlog)
logger.info('Reached experiment max duration, exiting.')
|
@click.command()
@click.option('-c', '--config-file', required=True, type=click.File('r'))
def main(config_file: TextIO) -> None:
config = ExperimentConfig(**yaml.safe_load(config_file))
logger.debug(f'Loaded experiment config: {yaml.dump(config.dict(), sort_keys=False, indent=2)}')
site_names = {site.id: (site.name, site.path.name) for site in Site.objects.filter(id=config.site_ids)}
assert len(site_names) == len(config.site_ids), f'Config specified site_ids {config.site_ids} but API only found {len(site_names)} of them.'
apps_by_site = {site_id: App.objects.get(site_id=site_id, class_path=config.app_name) for site_id in config.site_ids}
job_factory = JobFactory(config.experiment_tag, config.xpcs_datasets, config.eig_datasets, config.site_cpu_map)
if config.submission_mode == 'const-backlog':
submit_method = submit_const_backlog
elif config.submission_mode == 'round-robin':
submit_method = submit_round_robin
elif config.submission_mode == 'shortest-backlog':
submit_method = submit_shortest_backlog
else:
raise ValueError('Invalid submission mode')
start = datetime.utcnow()
logger.info(f'Starting experiment at {start}')
logger.info(f'Total duration will be {config.experiment_duration_min} minutes at most')
while datetime.utcnow() - start < timedelta(minutes=config.experiment_duration_min):
time.sleep(config.submit_period)
backlogs: Dict[int, int] = {}
for site_id in config.site_ids:
qs = Job.objects.filter(site_id=site_id, state=set([*RUNNABLE_STATES, JobState.ready, JobState.staged_in]))
count = qs.count()
assert count is not None
backlogs[site_id] = count
backlogs = backlogs
submit_method(job_factory, apps_by_site, backlogs, config.submit_batch_size, config.max_site_backlog)
logger.info('Reached experiment max duration, exiting.')
|
balsam
|
positive
|
def realize_molecule(data, name, smiles=None, first=-1, subtract_nonbonded=True):
elements = data['z'].tolist()
<DeepExtract>
offset = sum([OFFSETS[element] for element in elements])
</DeepExtract>
g = esp.data.utils.infer_mol_from_coordinates(data['R'][0], elements, smiles)
g.nodes['n1'].data['xyz'] = torch.tensor(Quantity(data['R'].transpose(1, 0, 2), unit.angstrom).value_in_unit(esp.units.DISTANCE_UNIT), requires_grad=True)[:, :first, :]
g.nodes['g'].data['u_ref'] = torch.tensor(Quantity(data['E'], unit.kilocalorie_per_mole).value_in_unit(esp.units.ENERGY_UNIT)).transpose(1, 0)[:, :first] - offset
g.nodes['n1'].data['u_ref_prime'] = torch.tensor(Quantity(data['F'], unit.kilocalorie_per_mole / unit.angstrom).value_in_unit(esp.units.FORCE_UNIT)).transpose(1, 0)[:, :first, :]
if subtract_nonbonded is True:
g = esp.data.md.subtract_nonbonded_force(g)
return g
|
def realize_molecule(data, name, smiles=None, first=-1, subtract_nonbonded=True):
elements = data['z'].tolist()
offset = sum([OFFSETS[element] for element in elements])
g = esp.data.utils.infer_mol_from_coordinates(data['R'][0], elements, smiles)
g.nodes['n1'].data['xyz'] = torch.tensor(Quantity(data['R'].transpose(1, 0, 2), unit.angstrom).value_in_unit(esp.units.DISTANCE_UNIT), requires_grad=True)[:, :first, :]
g.nodes['g'].data['u_ref'] = torch.tensor(Quantity(data['E'], unit.kilocalorie_per_mole).value_in_unit(esp.units.ENERGY_UNIT)).transpose(1, 0)[:, :first] - offset
g.nodes['n1'].data['u_ref_prime'] = torch.tensor(Quantity(data['F'], unit.kilocalorie_per_mole / unit.angstrom).value_in_unit(esp.units.FORCE_UNIT)).transpose(1, 0)[:, :first, :]
if subtract_nonbonded is True:
g = esp.data.md.subtract_nonbonded_force(g)
return g
|
espaloma
|
positive
|
def main():
opts = args.get_argparser().parse_args()
args.print_args(opts)
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
vis = Visualizer(port=opts.vis_port, env=opts.vis_env) if opts.enable_vis else None
if vis is not None:
vis.vis_table('Options', vars(opts))
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: %s' % device)
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
if opts.dataset == 'voc' and (not opts.crop_val):
opts.val_batch_size = 1
(train_dst, val_dst) = args.get_dataset(opts)
train_loader = data.DataLoader(train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=2)
val_loader = data.DataLoader(val_dst, batch_size=opts.val_batch_size, shuffle=True, num_workers=2)
print('Dataset: %s, Train set: %d, Val set: %d' % (opts.dataset, len(train_dst), len(val_dst)))
model_map = {'deeplabv3_resnet50': network.deeplabv3_resnet50, 'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50, 'deeplabv3_resnet101': network.deeplabv3_resnet101, 'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101, 'deeplabv3_mobilenet': network.deeplabv3_mobilenet, 'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
metrics = StreamSegMetrics(opts.num_classes)
optimizer = torch.optim.SGD(params=[{'params': model.backbone.parameters(), 'lr': 0.1 * opts.lr}, {'params': model.classifier.parameters(), 'lr': opts.lr}], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy == 'poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy == 'step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
if opts.loss_type == 'focal_loss':
criterion = utils.FocalLoss(ignore_index=255, size_average=True)
elif opts.loss_type == 'cross_entropy':
criterion = nn.CrossEntropyLoss(ignore_index=255, reduction='mean')
def save_ckpt(path):
""" save current model
"""
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, path)
print('Model saved as %s' % path)
writer = SummaryWriter(log_dir='runs/' + opts.exp)
utils.mkdir('checkpoints/' + opts.exp)
best_score = 0.0
cur_itrs = 0
cur_epochs = 0
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state'])
model = nn.DataParallel(model)
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint['optimizer_state'])
scheduler.load_state_dict(checkpoint['scheduler_state'])
cur_itrs = checkpoint['cur_itrs']
best_score = checkpoint['best_score']
print('Training state restored from %s' % opts.ckpt)
print('Model restored from %s' % opts.ckpt)
del checkpoint
else:
print('[!] Retrain')
model = nn.DataParallel(model)
model.to(device)
OUTDIR = 'seed{}_imp_simclr_outbackbone'.format(opts.random_seed)
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
vis_sample_id = np.random.randint(0, len(val_loader), opts.vis_num_samples, np.int32) if opts.enable_vis else None
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if opts.test_only:
model.eval()
(val_score, ret_samples) = args.validate(opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
print(metrics.to_str(val_score))
return
sim_ckpt = torch.load('./simclr_pretrain.pth')['state_dict']
print('load simclr weight!')
print('Simclr 100% Model')
model_state_dict = model.module.backbone.state_dict()
update_state_dict = {k: v for (k, v) in sim_ckpt.items() if k in model_state_dict.keys()}
model_state_dict.update(update_state_dict)
model.module.backbone.load_state_dict(model_state_dict)
print('Load SimCLR model:[{}/{}]'.format(len(update_state_dict), len(model_state_dict)))
if opts.imp_num == 0:
save_name = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, opts.imp_num)
torch.save({'first_conv': model.module.backbone.conv1.state_dict(), 'backbone': model.module.backbone.state_dict(), 'classifier': model.module.classifier.state_dict()}, save_name)
print('Begining IMP:[{}]'.format(opts.imp_num))
print('INFO: Begin Training Model...' + '-' * 80)
else:
print('Begining IMP:[{}]'.format(opts.imp_num))
load_name = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, opts.imp_num)
print('Load Mask Dir: {}'.format(load_name))
mask_ckpt = torch.load(load_name, map_location='cuda')
load_name_rewind = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, 0)
rewind_ckpt = torch.load(load_name_rewind, map_location='cuda')
print('Rewind first conv1 and other things')
model.module.backbone.conv1.load_state_dict(rewind_ckpt['first_conv'])
model.module.classifier.load_state_dict(rewind_ckpt['classifier'])
mask_dict = pruning.extract_mask(mask_ckpt['backbone'])
print('Mask Dict Len:[{}]'.format(len(mask_dict.keys())))
pruning.imagenet_pruning_model_custom_res50v1(model.module.backbone, mask_dict)
pruning.see_zero_rate(model.module.backbone)
print('INFO: Begin Training Model...' + '-' * 80)
interval_loss = 0
total_time = 0
while True:
model.train()
cur_epochs += 1
for (images, labels) in train_loader:
t0 = time.time()
cur_itrs += 1
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
np_loss = loss.detach().cpu().numpy()
interval_loss += np_loss
if vis is not None:
vis.vis_scalar('Loss', cur_itrs, np_loss)
if cur_itrs % 10 == 0:
interval_loss = interval_loss / 10
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' | ' + 'Epoch:[{}], Itrs:[{}/{}], Loss:[{:.4f}], Time:[{:.4f} min]'.format(cur_epochs, cur_itrs, int(opts.total_itrs), interval_loss, total_time / 60))
writer.add_scalar('Loss/train', interval_loss, cur_itrs)
interval_loss = 0.0
total_time = 0.0
if cur_itrs % opts.val_interval == 0 and cur_itrs >= opts.total_itrs / 2:
<DeepExtract>
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, 'checkpoints/' + opts.exp + '/latest_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
print('Model saved as %s' % 'checkpoints/' + opts.exp + '/latest_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
</DeepExtract>
print('validation...')
model.eval()
(val_score, ret_samples) = args.validate(opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
print(metrics.to_str(val_score))
writer.add_scalar('mIOU/test', val_score['Mean IoU'], cur_itrs)
if val_score['Mean IoU'] > best_score:
best_score = val_score['Mean IoU']
<DeepExtract>
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, 'checkpoints/' + opts.exp + '/best_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
print('Model saved as %s' % 'checkpoints/' + opts.exp + '/best_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
</DeepExtract>
if vis is not None:
vis.vis_scalar('[Val] Overall Acc', cur_itrs, val_score['Overall Acc'])
vis.vis_scalar('[Val] Mean IoU', cur_itrs, val_score['Mean IoU'])
vis.vis_table('[Val] Class IoU', val_score['Class IoU'])
for (k, (img, target, lbl)) in enumerate(ret_samples):
img = (denorm(img) * 255).astype(np.uint8)
target = train_dst.decode_target(target).transpose(2, 0, 1).astype(np.uint8)
lbl = train_dst.decode_target(lbl).transpose(2, 0, 1).astype(np.uint8)
concat_img = np.concatenate((img, target, lbl), axis=2)
vis.vis_image('Sample %d' % k, concat_img)
model.train()
scheduler.step()
t1 = time.time()
total_time += t1 - t0
if cur_itrs >= opts.total_itrs:
print('syd ------------[imp]--------------')
print('syd Last IOU:[{:.6f}]'.format(val_score['Mean IoU']))
print('syd Best IOU:[{:.6f}]'.format(best_score))
print('syd --------------------------')
writer.close()
print('INFO: Begin Pruning Model...' + '-' * 80)
pruning.pruning_model(model.module.backbone, 0.2, exclude_first=True)
pruning.see_zero_rate(model.module.backbone)
save_name = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, opts.imp_num + 1)
torch.save({'first_conv': model.module.backbone.conv1.state_dict(), 'backbone': model.module.backbone.state_dict(), 'classifier': model.module.classifier.state_dict()}, save_name)
print('INFO: Save Dir [{}]'.format(save_name))
return
|
def main():
opts = args.get_argparser().parse_args()
args.print_args(opts)
if opts.dataset.lower() == 'voc':
opts.num_classes = 21
elif opts.dataset.lower() == 'cityscapes':
opts.num_classes = 19
vis = Visualizer(port=opts.vis_port, env=opts.vis_env) if opts.enable_vis else None
if vis is not None:
vis.vis_table('Options', vars(opts))
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: %s' % device)
torch.manual_seed(opts.random_seed)
np.random.seed(opts.random_seed)
random.seed(opts.random_seed)
if opts.dataset == 'voc' and (not opts.crop_val):
opts.val_batch_size = 1
(train_dst, val_dst) = args.get_dataset(opts)
train_loader = data.DataLoader(train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=2)
val_loader = data.DataLoader(val_dst, batch_size=opts.val_batch_size, shuffle=True, num_workers=2)
print('Dataset: %s, Train set: %d, Val set: %d' % (opts.dataset, len(train_dst), len(val_dst)))
model_map = {'deeplabv3_resnet50': network.deeplabv3_resnet50, 'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50, 'deeplabv3_resnet101': network.deeplabv3_resnet101, 'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101, 'deeplabv3_mobilenet': network.deeplabv3_mobilenet, 'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet}
model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride)
if opts.separable_conv and 'plus' in opts.model:
network.convert_to_separable_conv(model.classifier)
utils.set_bn_momentum(model.backbone, momentum=0.01)
metrics = StreamSegMetrics(opts.num_classes)
optimizer = torch.optim.SGD(params=[{'params': model.backbone.parameters(), 'lr': 0.1 * opts.lr}, {'params': model.classifier.parameters(), 'lr': opts.lr}], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
if opts.lr_policy == 'poly':
scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
elif opts.lr_policy == 'step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)
if opts.loss_type == 'focal_loss':
criterion = utils.FocalLoss(ignore_index=255, size_average=True)
elif opts.loss_type == 'cross_entropy':
criterion = nn.CrossEntropyLoss(ignore_index=255, reduction='mean')
def save_ckpt(path):
""" save current model
"""
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, path)
print('Model saved as %s' % path)
writer = SummaryWriter(log_dir='runs/' + opts.exp)
utils.mkdir('checkpoints/' + opts.exp)
best_score = 0.0
cur_itrs = 0
cur_epochs = 0
if opts.ckpt is not None and os.path.isfile(opts.ckpt):
checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state'])
model = nn.DataParallel(model)
model.to(device)
if opts.continue_training:
optimizer.load_state_dict(checkpoint['optimizer_state'])
scheduler.load_state_dict(checkpoint['scheduler_state'])
cur_itrs = checkpoint['cur_itrs']
best_score = checkpoint['best_score']
print('Training state restored from %s' % opts.ckpt)
print('Model restored from %s' % opts.ckpt)
del checkpoint
else:
print('[!] Retrain')
model = nn.DataParallel(model)
model.to(device)
OUTDIR = 'seed{}_imp_simclr_outbackbone'.format(opts.random_seed)
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
vis_sample_id = np.random.randint(0, len(val_loader), opts.vis_num_samples, np.int32) if opts.enable_vis else None
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if opts.test_only:
model.eval()
(val_score, ret_samples) = args.validate(opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
print(metrics.to_str(val_score))
return
sim_ckpt = torch.load('./simclr_pretrain.pth')['state_dict']
print('load simclr weight!')
print('Simclr 100% Model')
model_state_dict = model.module.backbone.state_dict()
update_state_dict = {k: v for (k, v) in sim_ckpt.items() if k in model_state_dict.keys()}
model_state_dict.update(update_state_dict)
model.module.backbone.load_state_dict(model_state_dict)
print('Load SimCLR model:[{}/{}]'.format(len(update_state_dict), len(model_state_dict)))
if opts.imp_num == 0:
save_name = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, opts.imp_num)
torch.save({'first_conv': model.module.backbone.conv1.state_dict(), 'backbone': model.module.backbone.state_dict(), 'classifier': model.module.classifier.state_dict()}, save_name)
print('Begining IMP:[{}]'.format(opts.imp_num))
print('INFO: Begin Training Model...' + '-' * 80)
else:
print('Begining IMP:[{}]'.format(opts.imp_num))
load_name = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, opts.imp_num)
print('Load Mask Dir: {}'.format(load_name))
mask_ckpt = torch.load(load_name, map_location='cuda')
load_name_rewind = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, 0)
rewind_ckpt = torch.load(load_name_rewind, map_location='cuda')
print('Rewind first conv1 and other things')
model.module.backbone.conv1.load_state_dict(rewind_ckpt['first_conv'])
model.module.classifier.load_state_dict(rewind_ckpt['classifier'])
mask_dict = pruning.extract_mask(mask_ckpt['backbone'])
print('Mask Dict Len:[{}]'.format(len(mask_dict.keys())))
pruning.imagenet_pruning_model_custom_res50v1(model.module.backbone, mask_dict)
pruning.see_zero_rate(model.module.backbone)
print('INFO: Begin Training Model...' + '-' * 80)
interval_loss = 0
total_time = 0
while True:
model.train()
cur_epochs += 1
for (images, labels) in train_loader:
t0 = time.time()
cur_itrs += 1
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
np_loss = loss.detach().cpu().numpy()
interval_loss += np_loss
if vis is not None:
vis.vis_scalar('Loss', cur_itrs, np_loss)
if cur_itrs % 10 == 0:
interval_loss = interval_loss / 10
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' | ' + 'Epoch:[{}], Itrs:[{}/{}], Loss:[{:.4f}], Time:[{:.4f} min]'.format(cur_epochs, cur_itrs, int(opts.total_itrs), interval_loss, total_time / 60))
writer.add_scalar('Loss/train', interval_loss, cur_itrs)
interval_loss = 0.0
total_time = 0.0
if cur_itrs % opts.val_interval == 0 and cur_itrs >= opts.total_itrs / 2:
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, 'checkpoints/' + opts.exp + '/latest_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
print('Model saved as %s' % 'checkpoints/' + opts.exp + '/latest_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
print('validation...')
model.eval()
(val_score, ret_samples) = args.validate(opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
print(metrics.to_str(val_score))
writer.add_scalar('mIOU/test', val_score['Mean IoU'], cur_itrs)
if val_score['Mean IoU'] > best_score:
best_score = val_score['Mean IoU']
torch.save({'cur_itrs': cur_itrs, 'model_state': model.module.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_score': best_score}, 'checkpoints/' + opts.exp + '/best_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
print('Model saved as %s' % 'checkpoints/' + opts.exp + '/best_%s_%s_os%d.pth' % (opts.model, opts.dataset, opts.output_stride))
if vis is not None:
vis.vis_scalar('[Val] Overall Acc', cur_itrs, val_score['Overall Acc'])
vis.vis_scalar('[Val] Mean IoU', cur_itrs, val_score['Mean IoU'])
vis.vis_table('[Val] Class IoU', val_score['Class IoU'])
for (k, (img, target, lbl)) in enumerate(ret_samples):
img = (denorm(img) * 255).astype(np.uint8)
target = train_dst.decode_target(target).transpose(2, 0, 1).astype(np.uint8)
lbl = train_dst.decode_target(lbl).transpose(2, 0, 1).astype(np.uint8)
concat_img = np.concatenate((img, target, lbl), axis=2)
vis.vis_image('Sample %d' % k, concat_img)
model.train()
scheduler.step()
t1 = time.time()
total_time += t1 - t0
if cur_itrs >= opts.total_itrs:
print('syd ------------[imp]--------------')
print('syd Last IOU:[{:.6f}]'.format(val_score['Mean IoU']))
print('syd Best IOU:[{:.6f}]'.format(best_score))
print('syd --------------------------')
writer.close()
print('INFO: Begin Pruning Model...' + '-' * 80)
pruning.pruning_model(model.module.backbone, 0.2, exclude_first=True)
pruning.see_zero_rate(model.module.backbone)
save_name = './{}/seg_backbone_imp{}.pth'.format(OUTDIR, opts.imp_num + 1)
torch.save({'first_conv': model.module.backbone.conv1.state_dict(), 'backbone': model.module.backbone.state_dict(), 'classifier': model.module.classifier.state_dict()}, save_name)
print('INFO: Save Dir [{}]'.format(save_name))
return
|
CV_LTH_Pre-training
|
positive
|
def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e, rho_X0, sigma2_X0):
""" Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized.
"""
logger.info('Estimating cross-validated score for new data.')
n_T = Y.shape[0]
if design is not None:
Y = Y - np.dot(design, beta)
T_X = np.diag(rho_X0)
Var_X = sigma2_X0 / (1 - rho_X0 ** 2)
Var_dX = sigma2_X0
sigma2_e = sigma_e ** 2
scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int)
n_scan = scan_onsets.size
total_log_p = 0
for (scan, onset) in enumerate(scan_onsets):
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
<DeepExtract>
if Var_X.ndim == 1:
inv_Var_X = np.diag(1 / Var_X)
half_log_det_Var_X = np.sum(np.log(Var_X)) / 2.0
Var_X = np.diag(Var_X)
else:
half_log_det_Var_X = self._half_log_det(Var_X)
inv_Var_X = np.linalg.inv(Var_X)
if Var_dX.ndim == 1:
inv_Var_dX = np.diag(1 / Var_dX)
half_log_det_Var_dX = np.sum(np.log(Var_dX)) / 2.0
Var_dX = np.diag(Var_dX)
else:
inv_Var_dX = np.linalg.inv(Var_dX)
half_log_det_Var_dX = self._half_log_det(Var_dX)
if T_X.ndim == 1:
T_X = np.diag(T_X)
[n_T, n_V] = np.shape(Y[onset:offset, :])
mu = [None] * n_T
Gamma_inv = [None] * n_T
mu_Gamma_inv = [None] * n_T
log_p_data = -np.log(np.pi * 2) * (n_T * n_V) / 2 - half_log_det_Var_X - np.sum(np.log(sigma2_e)) * n_T / 2.0 + np.sum(np.log(1 - rho_e ** 2)) / 2.0 - half_log_det_Var_dX * (n_T - 1)
Lambda_0 = np.dot(T_X, np.dot(inv_Var_dX, T_X.T)) + np.dot(beta0 * rho_e ** 2 / sigma2_e, beta0.T)
H = np.dot(inv_Var_dX, T_X.T) + np.dot(beta0 * rho_e / sigma2_e, beta0.T)
Lambda_1 = inv_Var_dX + np.dot(beta0 / sigma2_e, beta0.T)
Gamma_inv[0] = inv_Var_X + np.dot(beta0 * (1 - rho_e ** 2) / sigma2_e, beta0.T)
mu_Gamma_inv[0] = np.dot(Y[onset:offset, :][0, :] * (1 - rho_e ** 2) / sigma2_e, beta0.T)
mu[0] = np.linalg.solve(Gamma_inv[0], mu_Gamma_inv[0])
log_p_data -= 0.5 * np.sum(Y[onset:offset, :][0, :] ** 2 * (1 - rho_e ** 2) / sigma2_e)
deltaY = Y[onset:offset, :][1:, :] - rho_e * Y[onset:offset, :][:-1, :]
deltaY_sigma2inv_rho_weightT = np.dot(deltaY / sigma2_e * rho_e, beta0.T)
for t in np.arange(1, n_T):
Gamma_tilde_inv = Lambda_0 + Gamma_inv[t - 1]
tmp = np.linalg.solve(Gamma_tilde_inv, H.T)
Gamma_inv[t] = Lambda_1 - np.dot(H, tmp)
mu_Gamma_inv[t] = np.dot(deltaY[t - 1, :] / sigma2_e, beta0.T) + np.dot(mu_Gamma_inv[t - 1] - deltaY_sigma2inv_rho_weightT[t - 1, :], tmp)
mu[t] = np.linalg.solve(Gamma_inv[t], mu_Gamma_inv[t])
tmp2 = mu_Gamma_inv[t - 1] - deltaY_sigma2inv_rho_weightT[t - 1, :]
log_p_data += -self._half_log_det(Gamma_tilde_inv) + np.dot(tmp2, np.linalg.solve(Gamma_tilde_inv, tmp2)) / 2.0
log_p_data += -self._half_log_det(Gamma_inv[-1]) + np.dot(mu_Gamma_inv[-1], mu[-1]) / 2.0 - np.sum(deltaY ** 2 / sigma2_e) / 2.0
(_, _, _, log_p_data, _, _, _, _, _) = (mu, mu_Gamma_inv, Gamma_inv, log_p_data, Lambda_0, Lambda_1, H, deltaY, deltaY_sigma2inv_rho_weightT)
</DeepExtract>
total_log_p += log_p_data
return total_log_p
|
def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e, rho_X0, sigma2_X0):
""" Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized.
"""
logger.info('Estimating cross-validated score for new data.')
n_T = Y.shape[0]
if design is not None:
Y = Y - np.dot(design, beta)
T_X = np.diag(rho_X0)
Var_X = sigma2_X0 / (1 - rho_X0 ** 2)
Var_dX = sigma2_X0
sigma2_e = sigma_e ** 2
scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int)
n_scan = scan_onsets.size
total_log_p = 0
for (scan, onset) in enumerate(scan_onsets):
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
if Var_X.ndim == 1:
inv_Var_X = np.diag(1 / Var_X)
half_log_det_Var_X = np.sum(np.log(Var_X)) / 2.0
Var_X = np.diag(Var_X)
else:
half_log_det_Var_X = self._half_log_det(Var_X)
inv_Var_X = np.linalg.inv(Var_X)
if Var_dX.ndim == 1:
inv_Var_dX = np.diag(1 / Var_dX)
half_log_det_Var_dX = np.sum(np.log(Var_dX)) / 2.0
Var_dX = np.diag(Var_dX)
else:
inv_Var_dX = np.linalg.inv(Var_dX)
half_log_det_Var_dX = self._half_log_det(Var_dX)
if T_X.ndim == 1:
T_X = np.diag(T_X)
[n_T, n_V] = np.shape(Y[onset:offset, :])
mu = [None] * n_T
Gamma_inv = [None] * n_T
mu_Gamma_inv = [None] * n_T
log_p_data = -np.log(np.pi * 2) * (n_T * n_V) / 2 - half_log_det_Var_X - np.sum(np.log(sigma2_e)) * n_T / 2.0 + np.sum(np.log(1 - rho_e ** 2)) / 2.0 - half_log_det_Var_dX * (n_T - 1)
Lambda_0 = np.dot(T_X, np.dot(inv_Var_dX, T_X.T)) + np.dot(beta0 * rho_e ** 2 / sigma2_e, beta0.T)
H = np.dot(inv_Var_dX, T_X.T) + np.dot(beta0 * rho_e / sigma2_e, beta0.T)
Lambda_1 = inv_Var_dX + np.dot(beta0 / sigma2_e, beta0.T)
Gamma_inv[0] = inv_Var_X + np.dot(beta0 * (1 - rho_e ** 2) / sigma2_e, beta0.T)
mu_Gamma_inv[0] = np.dot(Y[onset:offset, :][0, :] * (1 - rho_e ** 2) / sigma2_e, beta0.T)
mu[0] = np.linalg.solve(Gamma_inv[0], mu_Gamma_inv[0])
log_p_data -= 0.5 * np.sum(Y[onset:offset, :][0, :] ** 2 * (1 - rho_e ** 2) / sigma2_e)
deltaY = Y[onset:offset, :][1:, :] - rho_e * Y[onset:offset, :][:-1, :]
deltaY_sigma2inv_rho_weightT = np.dot(deltaY / sigma2_e * rho_e, beta0.T)
for t in np.arange(1, n_T):
Gamma_tilde_inv = Lambda_0 + Gamma_inv[t - 1]
tmp = np.linalg.solve(Gamma_tilde_inv, H.T)
Gamma_inv[t] = Lambda_1 - np.dot(H, tmp)
mu_Gamma_inv[t] = np.dot(deltaY[t - 1, :] / sigma2_e, beta0.T) + np.dot(mu_Gamma_inv[t - 1] - deltaY_sigma2inv_rho_weightT[t - 1, :], tmp)
mu[t] = np.linalg.solve(Gamma_inv[t], mu_Gamma_inv[t])
tmp2 = mu_Gamma_inv[t - 1] - deltaY_sigma2inv_rho_weightT[t - 1, :]
log_p_data += -self._half_log_det(Gamma_tilde_inv) + np.dot(tmp2, np.linalg.solve(Gamma_tilde_inv, tmp2)) / 2.0
log_p_data += -self._half_log_det(Gamma_inv[-1]) + np.dot(mu_Gamma_inv[-1], mu[-1]) / 2.0 - np.sum(deltaY ** 2 / sigma2_e) / 2.0
(_, _, _, log_p_data, _, _, _, _, _) = (mu, mu_Gamma_inv, Gamma_inv, log_p_data, Lambda_0, Lambda_1, H, deltaY, deltaY_sigma2inv_rho_weightT)
total_log_p += log_p_data
return total_log_p
|
brainiak
|
positive
|
def _dismiss_initial_dialog(self):
<DeepExtract>
self.wait_for_element_to_be_clickable((By.ID, 'play_button'))
self.browser.find_element_by_id('play_button').click()
self.wait_for_element_to_be_invisible((By.ID, 'play_button'))
</DeepExtract>
return self
|
def _dismiss_initial_dialog(self):
self.wait_for_element_to_be_clickable((By.ID, 'play_button'))
self.browser.find_element_by_id('play_button').click()
self.wait_for_element_to_be_invisible((By.ID, 'play_button'))
return self
|
codeforlife-portal
|
positive
|
def _quit_dialog():
if not _conf_changed:
return "No changes to save (for '{}')".format(_conf_filename)
while True:
<DeepExtract>
win = _styled_win('body')
win.keypad(True)
_resize_key_dialog(win, ' Save configuration?\n\n(Y)es (N)o (C)ancel')
while True:
_draw_main()
_draw_key_dialog(win, 'Quit', ' Save configuration?\n\n(Y)es (N)o (C)ancel')
curses.doupdate()
c = _getch_compat(win)
if c == curses.KEY_RESIZE:
_resize_main()
_resize_key_dialog(win, ' Save configuration?\n\n(Y)es (N)o (C)ancel')
elif c == '\x1b':
c = None
elif isinstance(c, str):
c = c.lower()
if c in 'ync':
c = c
</DeepExtract>
if c is None or c == 'c':
return None
if c == 'y':
<DeepExtract>
try:
msg = _kconf.write_config(_conf_filename)
except EnvironmentError as e:
_error("Error saving {} to '{}'\n\n{} (errno: {})".format('configuration', e.filename, e.strerror, errno.errorcode[e.errno]))
msg = None
</DeepExtract>
if msg:
return msg
elif c == 'n':
return 'Configuration ({}) was not saved'.format(_conf_filename)
|
def _quit_dialog():
if not _conf_changed:
return "No changes to save (for '{}')".format(_conf_filename)
while True:
win = _styled_win('body')
win.keypad(True)
_resize_key_dialog(win, ' Save configuration?\n\n(Y)es (N)o (C)ancel')
while True:
_draw_main()
_draw_key_dialog(win, 'Quit', ' Save configuration?\n\n(Y)es (N)o (C)ancel')
curses.doupdate()
c = _getch_compat(win)
if c == curses.KEY_RESIZE:
_resize_main()
_resize_key_dialog(win, ' Save configuration?\n\n(Y)es (N)o (C)ancel')
elif c == '\x1b':
c = None
elif isinstance(c, str):
c = c.lower()
if c in 'ync':
c = c
if c is None or c == 'c':
return None
if c == 'y':
try:
msg = _kconf.write_config(_conf_filename)
except EnvironmentError as e:
_error("Error saving {} to '{}'\n\n{} (errno: {})".format('configuration', e.filename, e.strerror, errno.errorcode[e.errno]))
msg = None
if msg:
return msg
elif c == 'n':
return 'Configuration ({}) was not saved'.format(_conf_filename)
|
cello
|
positive
|
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)}
items_to_handlers = {'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), 'object/bbox': slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label')}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
else:
<DeepExtract>
base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/inception/inception/data/'
synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)
synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)
(filename, _) = urllib.request.urlretrieve(synset_url)
synset_list = [s.strip() for s in open(filename).readlines()]
num_synsets_in_ilsvrc = len(synset_list)
assert num_synsets_in_ilsvrc == 1000
(filename, _) = urllib.request.urlretrieve(synset_to_human_url)
synset_to_human_list = open(filename).readlines()
num_synsets_in_all_imagenet = len(synset_to_human_list)
assert num_synsets_in_all_imagenet == 21842
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
labels_to_names = labels_to_names
</DeepExtract>
dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES, labels_to_names=labels_to_names)
|
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)}
items_to_handlers = {'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), 'object/bbox': slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label')}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
else:
base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/research/inception/inception/data/'
synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)
synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)
(filename, _) = urllib.request.urlretrieve(synset_url)
synset_list = [s.strip() for s in open(filename).readlines()]
num_synsets_in_ilsvrc = len(synset_list)
assert num_synsets_in_ilsvrc == 1000
(filename, _) = urllib.request.urlretrieve(synset_to_human_url)
synset_to_human_list = open(filename).readlines()
num_synsets_in_all_imagenet = len(synset_to_human_list)
assert num_synsets_in_all_imagenet == 21842
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
labels_to_names = labels_to_names
dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES, labels_to_names=labels_to_names)
|
CVTron
|
positive
|
def _setup_criteria_area(self):
def on_mapped(widget, event):
if self._vp_criteria_area.allocation.x < 0:
twisted.internet.reactor.callLater(0.1, widget.emit, 'map-event', event)
return
if widget.handler_is_connected(handle):
widget.disconnect(handle)
self._vp_criteria_area.set_data('was_mapped', True)
if self._plugin.config['common']['label_options_pane_pos'] > -1:
self._vp_criteria_area.set_position(self._vp_criteria_area.allocation.height - self._plugin.config['common']['label_options_pane_pos'])
<DeepExtract>
handle_size = self._vp_criteria_area.allocation.height - self._vp_criteria_area.get_property('max-position')
max_dist = self._hb_test_criteria.allocation.height + handle_size * 2
threshold = max_dist / 2
if self._vp_criteria_area.allocation.height - self._vp_criteria_area.get_position() > threshold:
twisted.internet.reactor.callLater(0.1, self._vp_criteria_area.set_position, self._vp_criteria_area.allocation.height - max_dist)
else:
twisted.internet.reactor.callLater(0.1, self._vp_criteria_area.set_position, self._vp_criteria_area.allocation.height)
</DeepExtract>
def clamp_position(widget, *args):
handle_size = widget.allocation.height - widget.get_property('max-position')
max_dist = self._hb_test_criteria.allocation.height + handle_size * 2
threshold = max_dist / 2
if widget.allocation.height - widget.get_position() > threshold:
twisted.internet.reactor.callLater(0.1, widget.set_position, widget.allocation.height - max_dist)
else:
twisted.internet.reactor.callLater(0.1, widget.set_position, widget.allocation.height)
handle = self._eb_criteria_area.connect('map-event', on_mapped)
self._vp_criteria_area.connect('button-release-event', clamp_position)
self._vp_criteria_area.connect('accept-position', clamp_position)
|
def _setup_criteria_area(self):
def on_mapped(widget, event):
if self._vp_criteria_area.allocation.x < 0:
twisted.internet.reactor.callLater(0.1, widget.emit, 'map-event', event)
return
if widget.handler_is_connected(handle):
widget.disconnect(handle)
self._vp_criteria_area.set_data('was_mapped', True)
if self._plugin.config['common']['label_options_pane_pos'] > -1:
self._vp_criteria_area.set_position(self._vp_criteria_area.allocation.height - self._plugin.config['common']['label_options_pane_pos'])
handle_size = self._vp_criteria_area.allocation.height - self._vp_criteria_area.get_property('max-position')
max_dist = self._hb_test_criteria.allocation.height + handle_size * 2
threshold = max_dist / 2
if self._vp_criteria_area.allocation.height - self._vp_criteria_area.get_position() > threshold:
twisted.internet.reactor.callLater(0.1, self._vp_criteria_area.set_position, self._vp_criteria_area.allocation.height - max_dist)
else:
twisted.internet.reactor.callLater(0.1, self._vp_criteria_area.set_position, self._vp_criteria_area.allocation.height)
def clamp_position(widget, *args):
handle_size = widget.allocation.height - widget.get_property('max-position')
max_dist = self._hb_test_criteria.allocation.height + handle_size * 2
threshold = max_dist / 2
if widget.allocation.height - widget.get_position() > threshold:
twisted.internet.reactor.callLater(0.1, widget.set_position, widget.allocation.height - max_dist)
else:
twisted.internet.reactor.callLater(0.1, widget.set_position, widget.allocation.height)
handle = self._eb_criteria_area.connect('map-event', on_mapped)
self._vp_criteria_area.connect('button-release-event', clamp_position)
self._vp_criteria_area.connect('accept-position', clamp_position)
|
deluge-labelplus
|
positive
|
def roaming(value=None):
"""
Retrieves the current roaming configuration. If value parameter is set, it'll set the roaming service to that value.
Possible values:
- False - (bool) Roaming is disabled
- True - (bool) Roaming is enabled
- 'auto' - (string) Roaming is set to Auto mode
"""
DISABLED = 1
ENABLED = 2
AUTO = 255
if value == None:
<DeepExtract>
res = client.send_sync(_msg_pack('AT+QCFG="roamservice"', cooldown_delay=cooldown_delay, **kwargs))
</DeepExtract>
roaming_value = int(_parse_dict(res.pop('data'))['+QCFG'].split(',')[1])
if roaming_value == DISABLED:
res['value'] = False
elif roaming_value == ENABLED:
res['value'] = True
elif roaming_value == AUTO:
res['value'] = 'auto'
else:
raise ValueError("Got unknown roaming value of '{}'".format(roaming_value))
return res
if value == False:
<DeepExtract>
res = client.send_sync(_msg_pack('AT+QCFG="roamservice",1,1', cooldown_delay=cooldown_delay, **kwargs))
</DeepExtract>
elif value == True:
<DeepExtract>
res = client.send_sync(_msg_pack('AT+QCFG="roamservice",2,1', cooldown_delay=cooldown_delay, **kwargs))
</DeepExtract>
elif value == 'auto':
<DeepExtract>
res = client.send_sync(_msg_pack('AT+QCFG="roamservice",255,1', cooldown_delay=cooldown_delay, **kwargs))
</DeepExtract>
else:
raise ValueError('Value of {} is not supported by this function'.format(value))
return res
|
def roaming(value=None):
"""
Retrieves the current roaming configuration. If value parameter is set, it'll set the roaming service to that value.
Possible values:
- False - (bool) Roaming is disabled
- True - (bool) Roaming is enabled
- 'auto' - (string) Roaming is set to Auto mode
"""
DISABLED = 1
ENABLED = 2
AUTO = 255
if value == None:
res = client.send_sync(_msg_pack('AT+QCFG="roamservice"', cooldown_delay=cooldown_delay, **kwargs))
roaming_value = int(_parse_dict(res.pop('data'))['+QCFG'].split(',')[1])
if roaming_value == DISABLED:
res['value'] = False
elif roaming_value == ENABLED:
res['value'] = True
elif roaming_value == AUTO:
res['value'] = 'auto'
else:
raise ValueError("Got unknown roaming value of '{}'".format(roaming_value))
return res
if value == False:
res = client.send_sync(_msg_pack('AT+QCFG="roamservice",1,1', cooldown_delay=cooldown_delay, **kwargs))
elif value == True:
res = client.send_sync(_msg_pack('AT+QCFG="roamservice",2,1', cooldown_delay=cooldown_delay, **kwargs))
elif value == 'auto':
res = client.send_sync(_msg_pack('AT+QCFG="roamservice",255,1', cooldown_delay=cooldown_delay, **kwargs))
else:
raise ValueError('Value of {} is not supported by this function'.format(value))
return res
|
autopi-core
|
positive
|
def forward(self, data):
"""
Args:
data: (batch_size, num_samples, num_features) or (num_samples, num_features)
Returns:
output: (batch_size, num_samples, num_features) or (num_samples, num_features) in PCA domain.
"""
n_dims = data.dim()
assert n_dims in [2, 3], 'data is expected 2D or 3D tensor.'
if n_dims == 2:
data = data.unsqueeze(dim=0)
if self.training:
if self.standardize:
(self.mean, self.std) = (torch.mean(data, dim=1), torch.std(data, dim=1))
<DeepExtract>
standardized = (data - self.mean.unsqueeze(dim=1)) / self.std.unsqueeze(dim=1)
</DeepExtract>
else:
standardized = data
cov = torch.bmm(standardized.permute(0, 2, 1), standardized) / standardized.size(1)
(_, proj_matrix) = torch.linalg.eigh(cov)
self.proj_matrix = torch.flip(proj_matrix, dims=(-1,))
else:
if self.proj_matrix is None:
raise RuntimeError('proj_matrix is computed in advance.')
if self.standardize:
<DeepExtract>
standardized = (data - self.mean.unsqueeze(dim=1)) / self.std.unsqueeze(dim=1)
</DeepExtract>
else:
standardized = data
output = torch.bmm(standardized, self.proj_matrix)
if n_dims == 2:
output = output.squeeze(dim=0)
return output
|
def forward(self, data):
"""
Args:
data: (batch_size, num_samples, num_features) or (num_samples, num_features)
Returns:
output: (batch_size, num_samples, num_features) or (num_samples, num_features) in PCA domain.
"""
n_dims = data.dim()
assert n_dims in [2, 3], 'data is expected 2D or 3D tensor.'
if n_dims == 2:
data = data.unsqueeze(dim=0)
if self.training:
if self.standardize:
(self.mean, self.std) = (torch.mean(data, dim=1), torch.std(data, dim=1))
standardized = (data - self.mean.unsqueeze(dim=1)) / self.std.unsqueeze(dim=1)
else:
standardized = data
cov = torch.bmm(standardized.permute(0, 2, 1), standardized) / standardized.size(1)
(_, proj_matrix) = torch.linalg.eigh(cov)
self.proj_matrix = torch.flip(proj_matrix, dims=(-1,))
else:
if self.proj_matrix is None:
raise RuntimeError('proj_matrix is computed in advance.')
if self.standardize:
standardized = (data - self.mean.unsqueeze(dim=1)) / self.std.unsqueeze(dim=1)
else:
standardized = data
output = torch.bmm(standardized, self.proj_matrix)
if n_dims == 2:
output = output.squeeze(dim=0)
return output
|
DNN-based_source_separation
|
positive
|
def test(self, test_dataset_container: DatasetContainer, batch_size: int=32, num_workers: int=1, callbacks: Union[List, None]=None, seed: int=42, verbose: Union[None, bool]=None) -> Dict:
"""
Method to test a retrained or a pretrained model using a dataset with the default tags. If you test a
retrained model with different prediction tags, we will use those tags.
Args:
test_dataset_container (~deepparse.dataset_container.DatasetContainer):
The test dataset container of the data to use.
batch_size (int): The size of the batch (by default, ``32``).
num_workers (int): Number of workers to use for the data loader (by default, ``1`` worker).
callbacks (Union[list, None]): List of callbacks to use during training.
See Poutyne `callback <https://poutyne.org/callbacks.html#callback-class>`_ for more information.
By default, we set no callback.
seed (int): Seed to use (by default, ``42``).
verbose (Union[None, bool]): To override the AddressParser verbosity for the test. When set to True or
False, it will override (but it does not change the AddressParser verbosity) the test verbosity.
If set to the default value None, the AddressParser verbosity is used as the test verbosity.
Return:
A dictionary with the stats (see `Experiment class
<https://poutyne.org/experiment.html#poutyne.Experiment.train>`_ for details).
Note:
We use NLL loss and accuracy as in the `article <https://arxiv.org/abs/2006.16152>`_.
Examples:
.. code-block:: python
address_parser = AddressParser(device=0, verbose=True) # On GPU device 0
data_path = "path_to_a_pickle_test_dataset.p"
test_container = PickleDatasetContainer(data_path, is_training_container=False)
# We test the model on the data, and we override the test verbosity
address_parser.test(test_container, verbose=False)
You can also test your fine-tuned model
.. code-block:: python
address_components = {"ATag":0, "AnotherTag": 1, "EOS": 2}
address_parser = AddressParser(device=0) # On GPU device 0
# Train phase
data_path = "path_to_a_pickle_train_dataset.p"
train_container = PickleDatasetContainer(data_path)
address_parser.retrain(container, train_ratio=0.8, epochs=1, batch_size=128,
prediction_tags=address_components)
# Test phase
data_path = "path_to_a_pickle_test_dataset.p"
test_container = PickleDatasetContainer(data_path, is_training_container=False)
address_parser.test(test_container) # Test the retrained model
"""
<DeepExtract>
if system() == 'Windows' and 'fasttext' in self.model_type and (num_workers > 0):
raise FastTextModelError("On Windows system, we cannot use FastText-like models with parallelism workers since FastText objects are not pickleable with the parallelism process use by Windows. Thus, you need to set num_workers to 0 since 1 also means 'parallelism'.")
if system() == 'Darwin' and 'fasttext' in self.model_type and (num_workers > 0):
torch.multiprocessing.set_start_method('fork')
warnings.warn("On MacOS system, we cannot use FastText-like models with parallelism out-of-the-box since FastText objects are not pickleable with the parallelism process used by default by MacOS. Thus, we have set it to the 'fork' (i.e. torch.multiprocessing.set_start_method('fork')) to allow torch parallelism.", category=UserWarning)
</DeepExtract>
if 'fasttext-light' in self.model_type:
raise FastTextModelError("It's not possible to test a fasttext-light due to pymagnitude problem. See the Retrain method doc for more details.")
if not isinstance(test_dataset_container, DatasetContainer):
raise ValueError('The test_dataset_container has to be a DatasetContainer. Read the docs at https://deepparse.org/ for more details.')
if not test_dataset_container.is_a_train_container():
raise ValueError('The dataset container is not a train container.')
callbacks = [] if callbacks is None else callbacks
test_generator = DataLoader(test_dataset_container, collate_fn=partial(self.processor.process_for_training, teacher_forcing=False), batch_size=batch_size, num_workers=num_workers)
exp = Experiment('./checkpoint', self.model, device=self.device, loss_function=nll_loss, batch_metrics=[accuracy], logging=False)
if verbose is None:
verbose = self.verbose
test_res = exp.test(test_generator, seed=seed, callbacks=callbacks, verbose=verbose)
return test_res
|
def test(self, test_dataset_container: DatasetContainer, batch_size: int=32, num_workers: int=1, callbacks: Union[List, None]=None, seed: int=42, verbose: Union[None, bool]=None) -> Dict:
"""
Method to test a retrained or a pretrained model using a dataset with the default tags. If you test a
retrained model with different prediction tags, we will use those tags.
Args:
test_dataset_container (~deepparse.dataset_container.DatasetContainer):
The test dataset container of the data to use.
batch_size (int): The size of the batch (by default, ``32``).
num_workers (int): Number of workers to use for the data loader (by default, ``1`` worker).
callbacks (Union[list, None]): List of callbacks to use during training.
See Poutyne `callback <https://poutyne.org/callbacks.html#callback-class>`_ for more information.
By default, we set no callback.
seed (int): Seed to use (by default, ``42``).
verbose (Union[None, bool]): To override the AddressParser verbosity for the test. When set to True or
False, it will override (but it does not change the AddressParser verbosity) the test verbosity.
If set to the default value None, the AddressParser verbosity is used as the test verbosity.
Return:
A dictionary with the stats (see `Experiment class
<https://poutyne.org/experiment.html#poutyne.Experiment.train>`_ for details).
Note:
We use NLL loss and accuracy as in the `article <https://arxiv.org/abs/2006.16152>`_.
Examples:
.. code-block:: python
address_parser = AddressParser(device=0, verbose=True) # On GPU device 0
data_path = "path_to_a_pickle_test_dataset.p"
test_container = PickleDatasetContainer(data_path, is_training_container=False)
# We test the model on the data, and we override the test verbosity
address_parser.test(test_container, verbose=False)
You can also test your fine-tuned model
.. code-block:: python
address_components = {"ATag":0, "AnotherTag": 1, "EOS": 2}
address_parser = AddressParser(device=0) # On GPU device 0
# Train phase
data_path = "path_to_a_pickle_train_dataset.p"
train_container = PickleDatasetContainer(data_path)
address_parser.retrain(container, train_ratio=0.8, epochs=1, batch_size=128,
prediction_tags=address_components)
# Test phase
data_path = "path_to_a_pickle_test_dataset.p"
test_container = PickleDatasetContainer(data_path, is_training_container=False)
address_parser.test(test_container) # Test the retrained model
"""
if system() == 'Windows' and 'fasttext' in self.model_type and (num_workers > 0):
raise FastTextModelError("On Windows system, we cannot use FastText-like models with parallelism workers since FastText objects are not pickleable with the parallelism process use by Windows. Thus, you need to set num_workers to 0 since 1 also means 'parallelism'.")
if system() == 'Darwin' and 'fasttext' in self.model_type and (num_workers > 0):
torch.multiprocessing.set_start_method('fork')
warnings.warn("On MacOS system, we cannot use FastText-like models with parallelism out-of-the-box since FastText objects are not pickleable with the parallelism process used by default by MacOS. Thus, we have set it to the 'fork' (i.e. torch.multiprocessing.set_start_method('fork')) to allow torch parallelism.", category=UserWarning)
if 'fasttext-light' in self.model_type:
raise FastTextModelError("It's not possible to test a fasttext-light due to pymagnitude problem. See the Retrain method doc for more details.")
if not isinstance(test_dataset_container, DatasetContainer):
raise ValueError('The test_dataset_container has to be a DatasetContainer. Read the docs at https://deepparse.org/ for more details.')
if not test_dataset_container.is_a_train_container():
raise ValueError('The dataset container is not a train container.')
callbacks = [] if callbacks is None else callbacks
test_generator = DataLoader(test_dataset_container, collate_fn=partial(self.processor.process_for_training, teacher_forcing=False), batch_size=batch_size, num_workers=num_workers)
exp = Experiment('./checkpoint', self.model, device=self.device, loss_function=nll_loss, batch_metrics=[accuracy], logging=False)
if verbose is None:
verbose = self.verbose
test_res = exp.test(test_generator, seed=seed, callbacks=callbacks, verbose=verbose)
return test_res
|
deepparse
|
positive
|
def evaluate_dist_scores(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix='') -> Dict:
eval_output_dir = args.output_dir
<DeepExtract>
if generate:
cs_len = args.gen_cs_len
hs_len = args.gen_hs_len
tis_len = args.gen_tis_len
else:
cs_len = args.cs_len
hs_len = args.hs_len
tis_len = args.tis_len
if file_path is None:
file_path = args.eval_data_file if True else args.train_data_file
if line_by_line:
logger.info('Creating LineByLineTextDataset')
eval_dataset = LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size, prepend_bos_token=prepend_bos_token)
elif True:
logger.info('Creating JsonlCoconDataset for eval')
eval_dataset = JsonlCoconDataset(tokenizer, args, file_path=file_path, block_size=args.block_size, text_json_key=args.text_json_key, cs_len=cs_len, hs_len=hs_len, tis_len=tis_len, evaluate=True, prepended_text_to_remove=args.prepended_text_to_remove)
else:
eval_dataset = JsonlCoconDataset(tokenizer, args, file_path=file_path, cs_len=cs_len, hs_len=hs_len, tis_len=tis_len)
</DeepExtract>
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
dist_eval_samples = []
num_tokens = 0
for batch in tqdm(eval_dataloader, desc='Evaluating'):
sample_flattened = batch.reshape(-1)
dist_eval_samples.append(sample_flattened.tolist())
num_tokens += len(sample_flattened)
nb_eval_steps += 1
if nb_eval_steps == args.dist_eval_max_samples:
logger.info('breaking iteration @ sample # {}'.format(nb_eval_steps))
break
dist1_score = count_ngram(dist_eval_samples, 1) / float(num_tokens)
dist2_score = count_ngram(dist_eval_samples, 2) / float(num_tokens)
dist3_score = count_ngram(dist_eval_samples, 3) / float(num_tokens)
result = {'Dist-1': dist1_score, 'Dist-2': dist2_score, 'Dist-3': dist3_score}
output_filename = 'distK_' + args.eval_output_filename
output_eval_file = os.path.join(eval_output_dir, prefix, output_filename)
with open(output_eval_file, 'w') as writer:
logger.info('***** Dist-1,2,3 Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
return result
|
def evaluate_dist_scores(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix='') -> Dict:
eval_output_dir = args.output_dir
if generate:
cs_len = args.gen_cs_len
hs_len = args.gen_hs_len
tis_len = args.gen_tis_len
else:
cs_len = args.cs_len
hs_len = args.hs_len
tis_len = args.tis_len
if file_path is None:
file_path = args.eval_data_file if True else args.train_data_file
if line_by_line:
logger.info('Creating LineByLineTextDataset')
eval_dataset = LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size, prepend_bos_token=prepend_bos_token)
elif True:
logger.info('Creating JsonlCoconDataset for eval')
eval_dataset = JsonlCoconDataset(tokenizer, args, file_path=file_path, block_size=args.block_size, text_json_key=args.text_json_key, cs_len=cs_len, hs_len=hs_len, tis_len=tis_len, evaluate=True, prepended_text_to_remove=args.prepended_text_to_remove)
else:
eval_dataset = JsonlCoconDataset(tokenizer, args, file_path=file_path, cs_len=cs_len, hs_len=hs_len, tis_len=tis_len)
if args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
dist_eval_samples = []
num_tokens = 0
for batch in tqdm(eval_dataloader, desc='Evaluating'):
sample_flattened = batch.reshape(-1)
dist_eval_samples.append(sample_flattened.tolist())
num_tokens += len(sample_flattened)
nb_eval_steps += 1
if nb_eval_steps == args.dist_eval_max_samples:
logger.info('breaking iteration @ sample # {}'.format(nb_eval_steps))
break
dist1_score = count_ngram(dist_eval_samples, 1) / float(num_tokens)
dist2_score = count_ngram(dist_eval_samples, 2) / float(num_tokens)
dist3_score = count_ngram(dist_eval_samples, 3) / float(num_tokens)
result = {'Dist-1': dist1_score, 'Dist-2': dist2_score, 'Dist-3': dist3_score}
output_filename = 'distK_' + args.eval_output_filename
output_eval_file = os.path.join(eval_output_dir, prefix, output_filename)
with open(output_eval_file, 'w') as writer:
logger.info('***** Dist-1,2,3 Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
return result
|
COCON_ICLR2021
|
positive
|
def test_changelist(self):
url = reverse('admin:app_normal_changelist')
request = self.request_factory.get(url)
<DeepExtract>
normaladmin = admin.site._registry[Normal]
</DeepExtract>
with translation.override('en'):
queryset = normaladmin.get_queryset(request)
self.assertEqual(queryset.count(), self.normal_count)
|
def test_changelist(self):
url = reverse('admin:app_normal_changelist')
request = self.request_factory.get(url)
normaladmin = admin.site._registry[Normal]
with translation.override('en'):
queryset = normaladmin.get_queryset(request)
self.assertEqual(queryset.count(), self.normal_count)
|
django-hvad
|
positive
|
def predict_proba_from_restored(X_test, file_path):
"""
Predicts y for given X_test. Folder in which the stored model was saved has to be
provided.
:param X_test: The test features
:param folder: The folder in which the model has been saved
:return: The prediction y for X_test
"""
folder_name = file_path.split('mlp_models/')[1]
mlp_folder_path = '%s/data/fnc-1/mlp_models/' % path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
file_path = mlp_folder_path + folder_name
with tf.Graph().as_default() as g:
with tf.Session(config=self.config) as sess:
<DeepExtract>
self.x = tf.placeholder(tf.float32, shape=[None, len(X_test[0])])
self.y = tf.placeholder(tf.float32, shape=[None, self.n_classes])
self.keep_prob = tf.placeholder(tf.float32)
self.momentum = tf.placeholder(tf.float32)
self.learning_rate_tensor = tf.placeholder(tf.float32)
self.n_nodes_hl_list = [len(X_test[0])]
for layer_size in self.hidden_layers:
self.n_nodes_hl_list.append(layer_size)
self.n_nodes_hl_list.append(self.n_classes)
def get_layer(input_length, n_nodes_hl, hl_no, layer_input):
"""
Returns a layer with the given parameters
:param input_length: Size of the input of the layer
:param n_nodes_hl: Number of nodes the layer should have
:param hl_no: Index of the hidden layer, e.g. 1, 2, 3...
:param weight_init: Method how the weights should be initialized;
'xavier', 'sqrt_n' or leave empty for truncated normal with stddev of 0.1
:param layer_input: The input for the layer; either a variable holding X or the previous layer
:return: A layer with the given parameters
"""
def weight_variable(shape, name):
if self.weight_init == 'xavier':
(prediction, prob) = tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer(seed=self.seed))
if self.weight_init == 'sqrt_n':
(prediction, prob) = tf.get_variable(name, shape, initializer=tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False, seed=self.seed))
else:
(prediction, prob) = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def bias_variable(shape, name):
initial = tf.constant(self.bias_init, shape=shape)
(prediction, prob) = tf.Variable(initial, name=name)
W = weight_variable([input_length, n_nodes_hl], 'weight' + str(hl_no))
self.weight_var_test = W
b = bias_variable([n_nodes_hl], 'bias' + str(hl_no))
if i == len(self.n_nodes_hl_list) - 2:
layer = tf.add(tf.matmul(layer_input, W), b, name='prediction')
(prediction, prob) = layer
else:
if self.activation_function == 'relu6':
layer = tf.nn.relu6(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'crelu':
layer = tf.nn.crelu(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'elu':
layer = tf.nn.elu(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'softplus':
layer = tf.nn.softplus(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'softsign':
layer = tf.nn.softsign(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'pelu':
layer = self.pelu(tf.add(tf.matmul(layer_input, W), b))
else:
layer = tf.nn.relu(tf.add(tf.matmul(layer_input, W), b))
dropout_layer = tf.nn.dropout(layer, self.keep_prob)
(prediction, prob) = dropout_layer
layer_input = self.x
for i in range(len(self.n_nodes_hl_list) - 1):
layer_input = get_layer(self.n_nodes_hl_list[i], self.n_nodes_hl_list[i + 1], i, layer_input)
prob = tf.nn.softmax(layer_input)
(prediction, prob) = (layer_input, prob)
</DeepExtract>
saver = tf.train.Saver()
saver.restore(sess, file_path)
value = sess.run(prob, feed_dict={self.x: X_test, self.keep_prob: self.keep_prob_const})
return value
|
def predict_proba_from_restored(X_test, file_path):
"""
Predicts y for given X_test. Folder in which the stored model was saved has to be
provided.
:param X_test: The test features
:param folder: The folder in which the model has been saved
:return: The prediction y for X_test
"""
folder_name = file_path.split('mlp_models/')[1]
mlp_folder_path = '%s/data/fnc-1/mlp_models/' % path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
file_path = mlp_folder_path + folder_name
with tf.Graph().as_default() as g:
with tf.Session(config=self.config) as sess:
self.x = tf.placeholder(tf.float32, shape=[None, len(X_test[0])])
self.y = tf.placeholder(tf.float32, shape=[None, self.n_classes])
self.keep_prob = tf.placeholder(tf.float32)
self.momentum = tf.placeholder(tf.float32)
self.learning_rate_tensor = tf.placeholder(tf.float32)
self.n_nodes_hl_list = [len(X_test[0])]
for layer_size in self.hidden_layers:
self.n_nodes_hl_list.append(layer_size)
self.n_nodes_hl_list.append(self.n_classes)
def get_layer(input_length, n_nodes_hl, hl_no, layer_input):
"""
Returns a layer with the given parameters
:param input_length: Size of the input of the layer
:param n_nodes_hl: Number of nodes the layer should have
:param hl_no: Index of the hidden layer, e.g. 1, 2, 3...
:param weight_init: Method how the weights should be initialized;
'xavier', 'sqrt_n' or leave empty for truncated normal with stddev of 0.1
:param layer_input: The input for the layer; either a variable holding X or the previous layer
:return: A layer with the given parameters
"""
def weight_variable(shape, name):
if self.weight_init == 'xavier':
(prediction, prob) = tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer(seed=self.seed))
if self.weight_init == 'sqrt_n':
(prediction, prob) = tf.get_variable(name, shape, initializer=tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False, seed=self.seed))
else:
(prediction, prob) = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def bias_variable(shape, name):
initial = tf.constant(self.bias_init, shape=shape)
(prediction, prob) = tf.Variable(initial, name=name)
W = weight_variable([input_length, n_nodes_hl], 'weight' + str(hl_no))
self.weight_var_test = W
b = bias_variable([n_nodes_hl], 'bias' + str(hl_no))
if i == len(self.n_nodes_hl_list) - 2:
layer = tf.add(tf.matmul(layer_input, W), b, name='prediction')
(prediction, prob) = layer
else:
if self.activation_function == 'relu6':
layer = tf.nn.relu6(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'crelu':
layer = tf.nn.crelu(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'elu':
layer = tf.nn.elu(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'softplus':
layer = tf.nn.softplus(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'softsign':
layer = tf.nn.softsign(tf.add(tf.matmul(layer_input, W), b))
elif self.activation_function == 'pelu':
layer = self.pelu(tf.add(tf.matmul(layer_input, W), b))
else:
layer = tf.nn.relu(tf.add(tf.matmul(layer_input, W), b))
dropout_layer = tf.nn.dropout(layer, self.keep_prob)
(prediction, prob) = dropout_layer
layer_input = self.x
for i in range(len(self.n_nodes_hl_list) - 1):
layer_input = get_layer(self.n_nodes_hl_list[i], self.n_nodes_hl_list[i + 1], i, layer_input)
prob = tf.nn.softmax(layer_input)
(prediction, prob) = (layer_input, prob)
saver = tf.train.Saver()
saver.restore(sess, file_path)
value = sess.run(prob, feed_dict={self.x: X_test, self.keep_prob: self.keep_prob_const})
return value
|
coling2018_fake-news-challenge
|
positive
|
def get_word_span(context, wordss, start, stop):
<DeepExtract>
spanss = []
cur_idx = 0
for tokens in wordss:
spans = []
for token in tokens:
if context.find(token, cur_idx) < 0:
print(tokens)
print('{} {} {}'.format(token, cur_idx, context))
raise Exception()
cur_idx = context.find(token, cur_idx)
spans.append((cur_idx, cur_idx + len(token)))
cur_idx += len(token)
spanss.append(spans)
spanss = spanss
</DeepExtract>
idxs = []
for (sent_idx, spans) in enumerate(spanss):
for (word_idx, span) in enumerate(spans):
if not (stop <= span[0] or start >= span[1]):
idxs.append((sent_idx, word_idx))
assert len(idxs) > 0, '{} {} {} {}'.format(context, spanss, start, stop)
return (idxs[0], (idxs[-1][0], idxs[-1][1] + 1))
|
def get_word_span(context, wordss, start, stop):
spanss = []
cur_idx = 0
for tokens in wordss:
spans = []
for token in tokens:
if context.find(token, cur_idx) < 0:
print(tokens)
print('{} {} {}'.format(token, cur_idx, context))
raise Exception()
cur_idx = context.find(token, cur_idx)
spans.append((cur_idx, cur_idx + len(token)))
cur_idx += len(token)
spanss.append(spans)
spanss = spanss
idxs = []
for (sent_idx, spans) in enumerate(spanss):
for (word_idx, span) in enumerate(spans):
if not (stop <= span[0] or start >= span[1]):
idxs.append((sent_idx, word_idx))
assert len(idxs) > 0, '{} {} {} {}'.format(context, spanss, start, stop)
return (idxs[0], (idxs[-1][0], idxs[-1][1] + 1))
|
dawn-bench-models
|
positive
|
def getClosestPoint(poly, pt):
closest = (-1, -1)
distTh = 4.0
dist = 1000000000.0
for i in range(poly.size()):
<DeepExtract>
line = QtCore.QLineF(poly[i], pt)
lineLength = line.length()
curDist = lineLength
</DeepExtract>
if curDist < dist:
closest = (i, i)
dist = curDist
if dist <= distTh:
return closest
if self.drawPolyClosed and poly.size() >= 2:
for i in range(poly.size()):
pt1 = poly[i]
j = i + 1
if j == poly.size():
j = 0
pt2 = poly[j]
edge = QtCore.QLineF(pt1, pt2)
normal = edge.normalVector()
normalThroughMouse = QtCore.QLineF(pt.x(), pt.y(), pt.x() + normal.dx(), pt.y() + normal.dy())
intersectionPt = QtCore.QPointF()
intersectionType = edge.intersect(normalThroughMouse, intersectionPt)
if intersectionType == QtCore.QLineF.BoundedIntersection:
<DeepExtract>
line = QtCore.QLineF(intersectionPt, pt)
lineLength = line.length()
curDist = lineLength
</DeepExtract>
if curDist < dist:
closest = (i, j)
dist = curDist
if dist <= distTh:
return closest
return (-1, -1)
|
def getClosestPoint(poly, pt):
closest = (-1, -1)
distTh = 4.0
dist = 1000000000.0
for i in range(poly.size()):
line = QtCore.QLineF(poly[i], pt)
lineLength = line.length()
curDist = lineLength
if curDist < dist:
closest = (i, i)
dist = curDist
if dist <= distTh:
return closest
if self.drawPolyClosed and poly.size() >= 2:
for i in range(poly.size()):
pt1 = poly[i]
j = i + 1
if j == poly.size():
j = 0
pt2 = poly[j]
edge = QtCore.QLineF(pt1, pt2)
normal = edge.normalVector()
normalThroughMouse = QtCore.QLineF(pt.x(), pt.y(), pt.x() + normal.dx(), pt.y() + normal.dy())
intersectionPt = QtCore.QPointF()
intersectionType = edge.intersect(normalThroughMouse, intersectionPt)
if intersectionType == QtCore.QLineF.BoundedIntersection:
line = QtCore.QLineF(intersectionPt, pt)
lineLength = line.length()
curDist = lineLength
if curDist < dist:
closest = (i, j)
dist = curDist
if dist <= distTh:
return closest
return (-1, -1)
|
CBST
|
positive
|
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
<DeepExtract>
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
</DeepExtract>
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
<DeepExtract>
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
</DeepExtract>
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
|
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
|
Danesfield
|
positive
|
def deploy_application(self, app_id, app_info):
<DeepExtract>
df_dir = provided_df_dir
if not df_dir:
df_dir = self._get_path_for_dfs(app_info)
if not os.path.exists(df_dir + '/aws-creds'):
shutil.copytree(home_dir + '/.aws', df_dir + '/aws-creds')
</DeepExtract>
env_vars = common_functions.resolve_environment(app_id, app_info)
app_details = {}
app_data = {}
app_details['task-familyName'] = app_info['app_name']
app_data['status'] = 'registering-task-definition'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
tagged_image = common_functions.get_image_uri(app_info)
app_ports = common_functions.get_app_port(app_info)
container_port = int(app_ports[0])
host_port = int(app_ports[1])
<DeepExtract>
if not cont_name:
cont_name = app_info['app_name'] + '-' + app_info['app_version']
memory = 250
mem1 = common_functions.get_app_memory(app_info)
if mem1:
memory = int(mem1)
family_name = app_info['app_name']
task_def_arn = ''
revision = str(int(round(time.time() * 1000)))
family_name = family_name + '-' + revision
env_list = []
for (key, value) in env_vars.iteritems():
environment_dict = {}
environment_dict['name'] = key
environment_dict['value'] = value
env_list.append(environment_dict)
if host_port != 80:
env_obj = env_db.Environment().get(app_info['env_id'])
env_output_config = ast.literal_eval(env_obj.output_config)
sec_group_name = env_output_config['http-and-ssh-group-name']
sec_group_id = env_output_config['http-and-ssh-group-id']
vpc_id = env_output_config['vpc_id']
vpc_traffic_block = []
internet_traffic = '0.0.0.0/0'
vpc_traffic_block.append(internet_traffic)
port_list = [host_port]
ECSHandler.awshelper.setup_security_group(vpc_id, vpc_traffic_block, sec_group_id, sec_group_name, port_list)
try:
resp = self.ecs_client.register_task_definition(family=family_name, containerDefinitions=[{'name': cont_name, 'image': tagged_image, 'memory': memory, 'portMappings': [{'containerPort': container_port, 'hostPort': host_port, 'protocol': 'tcp'}], 'environment': env_list}])
task_def_arn = resp['taskDefinition']['taskDefinitionArn']
except Exception as e:
fmlogger.error('Exception encountered in trying to register task definition:%s' % e)
fmlogger.debug('Done registering task definition.')
(task_def_arn, cont_name) = (task_def_arn, cont_name)
</DeepExtract>
app_details['task_def_arn'] = [task_def_arn]
app_details['cont_name'] = cont_name
<DeepExtract>
resource_obj = res_db.Resource().get_resource_for_env_by_type(app_info['env_id'], 'ecs-cluster')
cluster_name = resource_obj.cloud_resource_id
app_details['cluster_name'] = cluster_name
</DeepExtract>
app_details['image_name'] = [tagged_image]
app_details['memory'] = common_functions.get_app_memory(app_info)
app_details['app_folder_name'] = app_info['app_folder_name']
app_details['env_name'] = app_info['env_name']
app_details['container_port'] = container_port
app_details['host_port'] = host_port
app_data['status'] = 'creating-ecs-app-service'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
app_url = app_ip_url = lb_arn = target_group_arn = listener_arn = ''
try:
<DeepExtract>
env_obj = env_db.Environment().get(app_info['env_id'])
env_output_config = ast.literal_eval(env_obj.output_config)
subnet_string = env_output_config['subnets']
subnet_list = subnet_string.split(',')
sec_group_id = env_output_config['http-and-ssh-group-id']
vpc_id = env_output_config['vpc_id']
cluster_name = self._get_cluster_name(app_info['env_id'])
app_ports = common_functions.get_app_port(app_info)
container_port = app_ports[0]
host_port = app_ports[1]
(app_url, lb_arn, target_group_arn, listener_arn) = ECSHandler.awshelper.create_service(app_info['app_name'], container_port, host_port, vpc_id, subnet_list, sec_group_id, cluster_name, task_def_arn, cont_name)
app_ip_url = self._get_app_url(app_info, cluster_name, host_port)
if not app_url:
app_url = app_ip_url
else:
app_url = 'http://' + app_url
fmlogger.debug('App URL:%s' % app_url)
fmlogger.debug('App IP URL:%s' % app_ip_url)
(app_url, app_ip_url, lb_arn, target_group_arn, listener_arn) = (app_url, app_ip_url, lb_arn, target_group_arn, listener_arn)
</DeepExtract>
except Exception as e:
fmlogger.error(e)
app_details['error'] = str(e)
app_data = {}
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
return
app_details['lb_arn'] = lb_arn
app_details['target_group_arn'] = target_group_arn
app_details['listener_arn'] = listener_arn
app_details['app_url'] = app_url
app_details['app_ip_url'] = app_ip_url
app_data['status'] = 'ecs-app-service-created'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
app_data['status'] = 'waiting-for-app-to-get-ready'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
<DeepExtract>
app_status = ''
if common_functions.is_app_ready(app_ip_url, app_id=app_id):
fmlogger.debug('Application is ready.')
app_status = constants.APP_DEPLOYMENT_COMPLETE + ':' + constants.APP_IP_IS_RESPONSIVE
else:
fmlogger.debug('Application could not start properly.')
app_status = constants.APP_LB_NOT_YET_READY + ':' + constants.USE_APP_IP_URL
status = app_status
</DeepExtract>
fmlogger.debug('Application URL:%s' % app_url)
app_data['status'] = status
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
|
def deploy_application(self, app_id, app_info):
df_dir = provided_df_dir
if not df_dir:
df_dir = self._get_path_for_dfs(app_info)
if not os.path.exists(df_dir + '/aws-creds'):
shutil.copytree(home_dir + '/.aws', df_dir + '/aws-creds')
env_vars = common_functions.resolve_environment(app_id, app_info)
app_details = {}
app_data = {}
app_details['task-familyName'] = app_info['app_name']
app_data['status'] = 'registering-task-definition'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
tagged_image = common_functions.get_image_uri(app_info)
app_ports = common_functions.get_app_port(app_info)
container_port = int(app_ports[0])
host_port = int(app_ports[1])
if not cont_name:
cont_name = app_info['app_name'] + '-' + app_info['app_version']
memory = 250
mem1 = common_functions.get_app_memory(app_info)
if mem1:
memory = int(mem1)
family_name = app_info['app_name']
task_def_arn = ''
revision = str(int(round(time.time() * 1000)))
family_name = family_name + '-' + revision
env_list = []
for (key, value) in env_vars.iteritems():
environment_dict = {}
environment_dict['name'] = key
environment_dict['value'] = value
env_list.append(environment_dict)
if host_port != 80:
env_obj = env_db.Environment().get(app_info['env_id'])
env_output_config = ast.literal_eval(env_obj.output_config)
sec_group_name = env_output_config['http-and-ssh-group-name']
sec_group_id = env_output_config['http-and-ssh-group-id']
vpc_id = env_output_config['vpc_id']
vpc_traffic_block = []
internet_traffic = '0.0.0.0/0'
vpc_traffic_block.append(internet_traffic)
port_list = [host_port]
ECSHandler.awshelper.setup_security_group(vpc_id, vpc_traffic_block, sec_group_id, sec_group_name, port_list)
try:
resp = self.ecs_client.register_task_definition(family=family_name, containerDefinitions=[{'name': cont_name, 'image': tagged_image, 'memory': memory, 'portMappings': [{'containerPort': container_port, 'hostPort': host_port, 'protocol': 'tcp'}], 'environment': env_list}])
task_def_arn = resp['taskDefinition']['taskDefinitionArn']
except Exception as e:
fmlogger.error('Exception encountered in trying to register task definition:%s' % e)
fmlogger.debug('Done registering task definition.')
(task_def_arn, cont_name) = (task_def_arn, cont_name)
app_details['task_def_arn'] = [task_def_arn]
app_details['cont_name'] = cont_name
resource_obj = res_db.Resource().get_resource_for_env_by_type(app_info['env_id'], 'ecs-cluster')
cluster_name = resource_obj.cloud_resource_id
app_details['cluster_name'] = cluster_name
app_details['image_name'] = [tagged_image]
app_details['memory'] = common_functions.get_app_memory(app_info)
app_details['app_folder_name'] = app_info['app_folder_name']
app_details['env_name'] = app_info['env_name']
app_details['container_port'] = container_port
app_details['host_port'] = host_port
app_data['status'] = 'creating-ecs-app-service'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
app_url = app_ip_url = lb_arn = target_group_arn = listener_arn = ''
try:
env_obj = env_db.Environment().get(app_info['env_id'])
env_output_config = ast.literal_eval(env_obj.output_config)
subnet_string = env_output_config['subnets']
subnet_list = subnet_string.split(',')
sec_group_id = env_output_config['http-and-ssh-group-id']
vpc_id = env_output_config['vpc_id']
cluster_name = self._get_cluster_name(app_info['env_id'])
app_ports = common_functions.get_app_port(app_info)
container_port = app_ports[0]
host_port = app_ports[1]
(app_url, lb_arn, target_group_arn, listener_arn) = ECSHandler.awshelper.create_service(app_info['app_name'], container_port, host_port, vpc_id, subnet_list, sec_group_id, cluster_name, task_def_arn, cont_name)
app_ip_url = self._get_app_url(app_info, cluster_name, host_port)
if not app_url:
app_url = app_ip_url
else:
app_url = 'http://' + app_url
fmlogger.debug('App URL:%s' % app_url)
fmlogger.debug('App IP URL:%s' % app_ip_url)
(app_url, app_ip_url, lb_arn, target_group_arn, listener_arn) = (app_url, app_ip_url, lb_arn, target_group_arn, listener_arn)
except Exception as e:
fmlogger.error(e)
app_details['error'] = str(e)
app_data = {}
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
return
app_details['lb_arn'] = lb_arn
app_details['target_group_arn'] = target_group_arn
app_details['listener_arn'] = listener_arn
app_details['app_url'] = app_url
app_details['app_ip_url'] = app_ip_url
app_data['status'] = 'ecs-app-service-created'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
app_data['status'] = 'waiting-for-app-to-get-ready'
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
app_status = ''
if common_functions.is_app_ready(app_ip_url, app_id=app_id):
fmlogger.debug('Application is ready.')
app_status = constants.APP_DEPLOYMENT_COMPLETE + ':' + constants.APP_IP_IS_RESPONSIVE
else:
fmlogger.debug('Application could not start properly.')
app_status = constants.APP_LB_NOT_YET_READY + ':' + constants.USE_APP_IP_URL
status = app_status
fmlogger.debug('Application URL:%s' % app_url)
app_data['status'] = status
app_data['output_config'] = str(app_details)
app_db.App().update(app_id, app_data)
|
caastle
|
positive
|
def finalize_stage(self):
"""
finalizes the observation space of the environment after adding all
the objects and visuals in the stage.
:return:
"""
if self._observation_mode == 'pixel':
self._stage_observations = StageObservations(self._rigid_objects, self._visual_objects, self._observation_mode, self._normalize_observations, cameras=self._cameras, camera_indicies=self._camera_indicies)
<DeepExtract>
self._goal_image = self._stage_observations.get_current_goal_image()
return
</DeepExtract>
else:
self._stage_observations = StageObservations(self._rigid_objects, self._visual_objects, self._observation_mode, self._normalize_observations)
return
|
def finalize_stage(self):
"""
finalizes the observation space of the environment after adding all
the objects and visuals in the stage.
:return:
"""
if self._observation_mode == 'pixel':
self._stage_observations = StageObservations(self._rigid_objects, self._visual_objects, self._observation_mode, self._normalize_observations, cameras=self._cameras, camera_indicies=self._camera_indicies)
self._goal_image = self._stage_observations.get_current_goal_image()
return
else:
self._stage_observations = StageObservations(self._rigid_objects, self._visual_objects, self._observation_mode, self._normalize_observations)
return
|
CausalWorld
|
positive
|
def testVlogMisuse(self):
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('VLOG(1)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('VLOG(99)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('LOG(ERROR)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('LOG(INFO)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('LOG(WARNING)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('LOG(FATAL)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('LOG(DFATAL)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('VLOG(SOMETHINGWEIRD)'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('MYOWNVLOG(ERROR)'))
</DeepExtract>
errmsg = 'VLOG() should be used with numeric verbosity level. Use LOG() if you want symbolic severity levels. [runtime/vlog] [5]'
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(ERROR)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(INFO)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(WARNING)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(FATAL)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(DFATAL)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(ERROR)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(INFO)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(WARNING)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(FATAL)'))
</DeepExtract>
<DeepExtract>
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(DFATAL)'))
</DeepExtract>
|
def testVlogMisuse(self):
self.assertEquals('', self.PerformSingleLineLint('VLOG(1)'))
self.assertEquals('', self.PerformSingleLineLint('VLOG(99)'))
self.assertEquals('', self.PerformSingleLineLint('LOG(ERROR)'))
self.assertEquals('', self.PerformSingleLineLint('LOG(INFO)'))
self.assertEquals('', self.PerformSingleLineLint('LOG(WARNING)'))
self.assertEquals('', self.PerformSingleLineLint('LOG(FATAL)'))
self.assertEquals('', self.PerformSingleLineLint('LOG(DFATAL)'))
self.assertEquals('', self.PerformSingleLineLint('VLOG(SOMETHINGWEIRD)'))
self.assertEquals('', self.PerformSingleLineLint('MYOWNVLOG(ERROR)'))
errmsg = 'VLOG() should be used with numeric verbosity level. Use LOG() if you want symbolic severity levels. [runtime/vlog] [5]'
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(ERROR)'))
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(INFO)'))
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(WARNING)'))
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(FATAL)'))
self.assertEquals(errmsg, self.PerformSingleLineLint('VLOG(DFATAL)'))
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(ERROR)'))
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(INFO)'))
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(WARNING)'))
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(FATAL)'))
self.assertEquals(errmsg, self.PerformSingleLineLint(' VLOG(DFATAL)'))
</DeepExtract>
|
cpplint
|
positive
|
def __init__(self, base_name, heads, pretrained=True, down_ratio=4, head_conv=256):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()[base_name](pretrained=pretrained, return_levels=True)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
'\n self.fc = nn.Sequential(\n nn.Conv2d(channels[self.first_level], classes, kernel_size=1,\n stride=1, padding=0, bias=True)\n )\n '
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(nn.Conv2d(channels[self.first_level], head_conv, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(head_conv, classes, kernel_size=1, stride=1, padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
<DeepExtract>
for m in fc.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
</DeepExtract>
else:
fc = nn.Conv2d(channels[self.first_level], classes, kernel_size=1, stride=1, padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
<DeepExtract>
for m in fc.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
</DeepExtract>
self.__setattr__(head, fc)
'\n up_factor = 2 ** self.first_level\n if up_factor > 1:\n up = nn.ConvTranspose2d(classes, classes, up_factor * 2,\n stride=up_factor, padding=up_factor // 2,\n output_padding=0, groups=classes,\n bias=False)\n fill_up_weights(up)\n up.weight.requires_grad = False\n else:\n up = Identity()\n self.up = up\n self.softmax = nn.LogSoftmax(dim=1)\n \n\n for m in self.fc.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, BatchNorm):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n '
|
def __init__(self, base_name, heads, pretrained=True, down_ratio=4, head_conv=256):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()[base_name](pretrained=pretrained, return_levels=True)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
'\n self.fc = nn.Sequential(\n nn.Conv2d(channels[self.first_level], classes, kernel_size=1,\n stride=1, padding=0, bias=True)\n )\n '
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(nn.Conv2d(channels[self.first_level], head_conv, kernel_size=3, padding=1, bias=True), nn.ReLU(inplace=True), nn.Conv2d(head_conv, classes, kernel_size=1, stride=1, padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
for m in fc.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
else:
fc = nn.Conv2d(channels[self.first_level], classes, kernel_size=1, stride=1, padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
for m in fc.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
self.__setattr__(head, fc)
'\n up_factor = 2 ** self.first_level\n if up_factor > 1:\n up = nn.ConvTranspose2d(classes, classes, up_factor * 2,\n stride=up_factor, padding=up_factor // 2,\n output_padding=0, groups=classes,\n bias=False)\n fill_up_weights(up)\n up.weight.requires_grad = False\n else:\n up = Identity()\n self.up = up\n self.softmax = nn.LogSoftmax(dim=1)\n \n\n for m in self.fc.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, BatchNorm):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n '
|
CenterNet
|
positive
|
def test_msr(text, msr, expected_value, shift=0, mask=~0, highbit=63, lowbit=0):
"""Test the value of an MSR.
Fails if any CPU does not match expected_value. Pass
expected_value=None to expect a GPF."""
<DeepExtract>
if (highbit != 63 or lowbit != 0) and (shift != 0 or mask != ~0):
raise ValueError('Input parameter usage is limited to "highbit and lowbit" OR "shift and mask".')
uniques = {}
for cpu in bits.cpus():
value = bits.rdmsr(cpu, msr)
if value is not None:
if highbit != 63 or lowbit != 0:
value = (value & (1 << highbit + 1) - 1) >> lowbit
else:
value = value >> shift & mask
uniques.setdefault(value, []).append(cpu)
msr_desc = 'MSR {:#x}'.format(msr)
if shift == 0 and mask == ~0:
if highbit == lowbit:
msr_desc += ' [{:d}]'.format(highbit)
else:
msr_desc += ' [{:d}:{:d}]'.format(highbit, lowbit)
else:
if shift != 0:
msr_desc += ' >> {}'.format(shift)
if mask != ~0:
msr_desc += ' & {:#x}'.format(mask)
desc = []
if len(uniques) > 1 and None not in uniques:
mask = testutil.find_common_mask(uniques.iterkeys(), 64)
desc.append('MSR value is not unique across all logical processors')
desc.append('Common bits for all processors = {0:#018x}'.format(uniques.keys()[0] & mask))
desc.append('Mask of common bits = {0:#018x}'.format(mask))
for value in sorted(uniques.iterkeys()):
cpus = uniques[value]
desc.append(msr_desc + ' = ' + ('GPF' if value is None else '{0:#x}'.format(value)))
desc.append('On {0} CPUs: {1}'.format(len(cpus), testutil.apicid_list(cpus)))
(uniques, desc) = (uniques, desc)
</DeepExtract>
if expected_value is None:
desc[0] += ' (Expected GPF)'
else:
desc[0] += ' (Expected {:#x})'.format(expected_value)
if text:
desc.insert(0, text)
status = testsuite.test(desc[0], len(uniques) == 1 and uniques.keys()[0] == expected_value)
for line in desc[1:]:
testsuite.print_detail(line)
return status
|
def test_msr(text, msr, expected_value, shift=0, mask=~0, highbit=63, lowbit=0):
"""Test the value of an MSR.
Fails if any CPU does not match expected_value. Pass
expected_value=None to expect a GPF."""
if (highbit != 63 or lowbit != 0) and (shift != 0 or mask != ~0):
raise ValueError('Input parameter usage is limited to "highbit and lowbit" OR "shift and mask".')
uniques = {}
for cpu in bits.cpus():
value = bits.rdmsr(cpu, msr)
if value is not None:
if highbit != 63 or lowbit != 0:
value = (value & (1 << highbit + 1) - 1) >> lowbit
else:
value = value >> shift & mask
uniques.setdefault(value, []).append(cpu)
msr_desc = 'MSR {:#x}'.format(msr)
if shift == 0 and mask == ~0:
if highbit == lowbit:
msr_desc += ' [{:d}]'.format(highbit)
else:
msr_desc += ' [{:d}:{:d}]'.format(highbit, lowbit)
else:
if shift != 0:
msr_desc += ' >> {}'.format(shift)
if mask != ~0:
msr_desc += ' & {:#x}'.format(mask)
desc = []
if len(uniques) > 1 and None not in uniques:
mask = testutil.find_common_mask(uniques.iterkeys(), 64)
desc.append('MSR value is not unique across all logical processors')
desc.append('Common bits for all processors = {0:#018x}'.format(uniques.keys()[0] & mask))
desc.append('Mask of common bits = {0:#018x}'.format(mask))
for value in sorted(uniques.iterkeys()):
cpus = uniques[value]
desc.append(msr_desc + ' = ' + ('GPF' if value is None else '{0:#x}'.format(value)))
desc.append('On {0} CPUs: {1}'.format(len(cpus), testutil.apicid_list(cpus)))
(uniques, desc) = (uniques, desc)
if expected_value is None:
desc[0] += ' (Expected GPF)'
else:
desc[0] += ' (Expected {:#x})'.format(expected_value)
if text:
desc.insert(0, text)
status = testsuite.test(desc[0], len(uniques) == 1 and uniques.keys()[0] == expected_value)
for line in desc[1:]:
testsuite.print_detail(line)
return status
|
bits
|
positive
|
def fix_unity_reflection_ps_variant_ps(tree, args):
try:
<DeepExtract>
for line in range(tree.shader_start):
for token in tree[line]:
if not isinstance(token, CPPStyleComment):
continue
if not isinstance(unity_WorldSpaceCameraPos, (tuple, list)):
unity_WorldSpaceCameraPos = (unity_WorldSpaceCameraPos,)
for comment_pattern in unity_WorldSpaceCameraPos:
match = comment_pattern.match(token)
if match is not None:
match = match
raise KeyError()
</DeepExtract>
except KeyError:
<DeepExtract>
if verbosity >= 0:
return debug(*args, **kwargs)
</DeepExtract>
return
_WorldSpaceCameraPos = Register('c' + match.group('constant'))
inv_mvp0 = tree._find_free_reg('c', VS3, desired=180)
inv_mvp1 = Register('c%i' % (inv_mvp0.num + 1))
inv_mvp2 = Register('c%i' % (inv_mvp0.num + 2))
inv_mvp3 = Register('c%i' % (inv_mvp0.num + 3))
try:
<DeepExtract>
for (t, r) in tree.declared:
if t == 'dcl_texcoord8':
if prefix and (not r.startswith(prefix)):
continue
d = r
raise IndexError()
</DeepExtract>
except:
pass
else:
<DeepExtract>
if verbosity >= 0:
return debug(*args, **kwargs)
</DeepExtract>
raise NoFreeRegisters('texcoord8')
try:
<DeepExtract>
for (t, r) in tree.declared:
if t == 'dcl_texcoord9':
if prefix and (not r.startswith(prefix)):
continue
d = r
raise IndexError()
</DeepExtract>
except:
pass
else:
<DeepExtract>
if verbosity >= 0:
return debug(*args, **kwargs)
</DeepExtract>
raise NoFreeRegisters('texcoord9')
try:
<DeepExtract>
for (t, r) in tree.declared:
if t == 'dcl_texcoord10':
if prefix and (not r.startswith(prefix)):
continue
d = r
raise IndexError()
</DeepExtract>
except:
pass
else:
<DeepExtract>
if verbosity >= 0:
return debug(*args, **kwargs)
</DeepExtract>
raise NoFreeRegisters('texcoord10')
t = tree._find_free_reg('r', PS3, desired=31)
_Object2World0 = tree._find_free_reg('v', PS3)
_Object2World1 = tree._find_free_reg('v', PS3)
_Object2World2 = tree._find_free_reg('v', PS3)
tree.insert_decl()
tree.insert_decl('dcl_texcoord8', ['%s' % _Object2World0], comment='New input with _Object2World[0]')
tree.insert_decl('dcl_texcoord9', ['%s' % _Object2World1], comment='New input with _Object2World[1]')
tree.insert_decl('dcl_texcoord10', ['%s' % _Object2World2], comment='New input with _Object2World[2]')
<DeepExtract>
if hasattr(tree, 'stereo_const'):
(stereo_const, _) = (tree.stereo_const, 0)
if isinstance(tree, VertexShader) and args.use_nv_stereo_reg_vs:
tree.stereo_sampler = None
tree.nv_stereo_reg = Register(args.use_nv_stereo_reg_vs)
elif isinstance(tree, VertexShader) and args.stereo_sampler_vs:
tree.stereo_sampler = args.stereo_sampler_vs
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif isinstance(tree, PixelShader) and args.stereo_sampler_ps:
tree.stereo_sampler = args.stereo_sampler_ps
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif 's' in tree.reg_types and tree.def_stereo_sampler in tree.reg_types['s']:
tree.stereo_sampler = tree._find_free_reg('s', None)
debug('WARNING: SHADER ALREADY USES %s! USING %s FOR STEREO SAMPLER INSTEAD!' % (tree.def_stereo_sampler, tree.stereo_sampler))
if isinstance(tree, VertexShader):
acronym = 'VS'
quirk = 257
elif isinstance(tree, PixelShader):
acronym = 'PS'
quirk = 0
else:
raise AssertionError()
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('Def%sSampler' % acronym, str(quirk + tree.stereo_sampler.num), 'Shader already uses %s, so use %s instead:' % (tree.def_stereo_sampler, tree.stereo_sampler)))
else:
tree.stereo_sampler = tree.def_stereo_sampler
if args.adjust_multiply and args.adjust_multiply != -1:
w = args.adjust_multiply
tree.stereo_const = tree._find_free_reg('c', None, desired=preferred_stereo_const)
offset = 0
offset += tree.insert_decl()
offset += tree.insert_decl('def', [tree.stereo_const, x, y, z, w])
if tree.stereo_sampler is not None:
offset += tree.insert_decl('dcl_2d', [tree.stereo_sampler])
offset += tree.insert_decl()
(stereo_const, _) = (tree.stereo_const, offset)
</DeepExtract>
pos = tree.decl_end
pos += tree.insert_instr(pos, NewInstruction('texldl', [t, stereo_const.z, tree.stereo_sampler]))
separation = t.x
convergence = t.y
repl_cam_pos = tree._find_free_reg('r', PS3)
clip_space_adj = tree._find_free_reg('r', PS3)
local_space_adj = tree._find_free_reg('r', PS3)
world_space_adj = clip_space_adj
replace_regs = {_WorldSpaceCameraPos: repl_cam_pos}
tree.do_replacements(replace_regs, False)
pos += insert_vanity_comment(args, tree, pos, 'Unity reflection/specular fix (object *pixel* shader variant) inserted with')
pos += tree.insert_instr(pos, NewInstruction('mov', [repl_cam_pos, _WorldSpaceCameraPos]))
pos += tree.insert_instr(pos, NewInstruction('mov', [clip_space_adj, tree.stereo_const.x]))
pos += tree.insert_instr(pos, NewInstruction('mul', [clip_space_adj.x, separation, -convergence]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.x, inv_mvp0, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.y, inv_mvp1, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.z, inv_mvp2, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.w, inv_mvp3, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [world_space_adj.x, _Object2World0, local_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [world_space_adj.y, _Object2World1, local_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [world_space_adj.z, _Object2World2, local_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('add', [repl_cam_pos.xyz, repl_cam_pos, -world_space_adj]))
pos += tree.insert_instr(pos)
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('UseMatrix1', 'true', 'Inverse MVP matrix for _WorldSpaceCameraPos adjustment (pixel shader variant):'))
tree.ini.append(('MatrixReg1', str(inv_mvp0.num), None))
tree.autofixed = True
|
def fix_unity_reflection_ps_variant_ps(tree, args):
try:
for line in range(tree.shader_start):
for token in tree[line]:
if not isinstance(token, CPPStyleComment):
continue
if not isinstance(unity_WorldSpaceCameraPos, (tuple, list)):
unity_WorldSpaceCameraPos = (unity_WorldSpaceCameraPos,)
for comment_pattern in unity_WorldSpaceCameraPos:
match = comment_pattern.match(token)
if match is not None:
match = match
raise KeyError()
except KeyError:
if verbosity >= 0:
return debug(*args, **kwargs)
return
_WorldSpaceCameraPos = Register('c' + match.group('constant'))
inv_mvp0 = tree._find_free_reg('c', VS3, desired=180)
inv_mvp1 = Register('c%i' % (inv_mvp0.num + 1))
inv_mvp2 = Register('c%i' % (inv_mvp0.num + 2))
inv_mvp3 = Register('c%i' % (inv_mvp0.num + 3))
try:
for (t, r) in tree.declared:
if t == 'dcl_texcoord8':
if prefix and (not r.startswith(prefix)):
continue
d = r
raise IndexError()
except:
pass
else:
if verbosity >= 0:
return debug(*args, **kwargs)
raise NoFreeRegisters('texcoord8')
try:
for (t, r) in tree.declared:
if t == 'dcl_texcoord9':
if prefix and (not r.startswith(prefix)):
continue
d = r
raise IndexError()
except:
pass
else:
if verbosity >= 0:
return debug(*args, **kwargs)
raise NoFreeRegisters('texcoord9')
try:
for (t, r) in tree.declared:
if t == 'dcl_texcoord10':
if prefix and (not r.startswith(prefix)):
continue
d = r
raise IndexError()
except:
pass
else:
if verbosity >= 0:
return debug(*args, **kwargs)
raise NoFreeRegisters('texcoord10')
t = tree._find_free_reg('r', PS3, desired=31)
_Object2World0 = tree._find_free_reg('v', PS3)
_Object2World1 = tree._find_free_reg('v', PS3)
_Object2World2 = tree._find_free_reg('v', PS3)
tree.insert_decl()
tree.insert_decl('dcl_texcoord8', ['%s' % _Object2World0], comment='New input with _Object2World[0]')
tree.insert_decl('dcl_texcoord9', ['%s' % _Object2World1], comment='New input with _Object2World[1]')
tree.insert_decl('dcl_texcoord10', ['%s' % _Object2World2], comment='New input with _Object2World[2]')
if hasattr(tree, 'stereo_const'):
(stereo_const, _) = (tree.stereo_const, 0)
if isinstance(tree, VertexShader) and args.use_nv_stereo_reg_vs:
tree.stereo_sampler = None
tree.nv_stereo_reg = Register(args.use_nv_stereo_reg_vs)
elif isinstance(tree, VertexShader) and args.stereo_sampler_vs:
tree.stereo_sampler = args.stereo_sampler_vs
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif isinstance(tree, PixelShader) and args.stereo_sampler_ps:
tree.stereo_sampler = args.stereo_sampler_ps
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif 's' in tree.reg_types and tree.def_stereo_sampler in tree.reg_types['s']:
tree.stereo_sampler = tree._find_free_reg('s', None)
debug('WARNING: SHADER ALREADY USES %s! USING %s FOR STEREO SAMPLER INSTEAD!' % (tree.def_stereo_sampler, tree.stereo_sampler))
if isinstance(tree, VertexShader):
acronym = 'VS'
quirk = 257
elif isinstance(tree, PixelShader):
acronym = 'PS'
quirk = 0
else:
raise AssertionError()
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('Def%sSampler' % acronym, str(quirk + tree.stereo_sampler.num), 'Shader already uses %s, so use %s instead:' % (tree.def_stereo_sampler, tree.stereo_sampler)))
else:
tree.stereo_sampler = tree.def_stereo_sampler
if args.adjust_multiply and args.adjust_multiply != -1:
w = args.adjust_multiply
tree.stereo_const = tree._find_free_reg('c', None, desired=preferred_stereo_const)
offset = 0
offset += tree.insert_decl()
offset += tree.insert_decl('def', [tree.stereo_const, x, y, z, w])
if tree.stereo_sampler is not None:
offset += tree.insert_decl('dcl_2d', [tree.stereo_sampler])
offset += tree.insert_decl()
(stereo_const, _) = (tree.stereo_const, offset)
pos = tree.decl_end
pos += tree.insert_instr(pos, NewInstruction('texldl', [t, stereo_const.z, tree.stereo_sampler]))
separation = t.x
convergence = t.y
repl_cam_pos = tree._find_free_reg('r', PS3)
clip_space_adj = tree._find_free_reg('r', PS3)
local_space_adj = tree._find_free_reg('r', PS3)
world_space_adj = clip_space_adj
replace_regs = {_WorldSpaceCameraPos: repl_cam_pos}
tree.do_replacements(replace_regs, False)
pos += insert_vanity_comment(args, tree, pos, 'Unity reflection/specular fix (object *pixel* shader variant) inserted with')
pos += tree.insert_instr(pos, NewInstruction('mov', [repl_cam_pos, _WorldSpaceCameraPos]))
pos += tree.insert_instr(pos, NewInstruction('mov', [clip_space_adj, tree.stereo_const.x]))
pos += tree.insert_instr(pos, NewInstruction('mul', [clip_space_adj.x, separation, -convergence]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.x, inv_mvp0, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.y, inv_mvp1, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.z, inv_mvp2, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [local_space_adj.w, inv_mvp3, clip_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [world_space_adj.x, _Object2World0, local_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [world_space_adj.y, _Object2World1, local_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('dp4', [world_space_adj.z, _Object2World2, local_space_adj]))
pos += tree.insert_instr(pos, NewInstruction('add', [repl_cam_pos.xyz, repl_cam_pos, -world_space_adj]))
pos += tree.insert_instr(pos)
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('UseMatrix1', 'true', 'Inverse MVP matrix for _WorldSpaceCameraPos adjustment (pixel shader variant):'))
tree.ini.append(('MatrixReg1', str(inv_mvp0.num), None))
tree.autofixed = True
|
3d-fixes
|
positive
|
def ensure_bucket_has_no_public_access(bucket_name: str, region: str) -> None:
<DeepExtract>
if region in S3CLIENTS:
s3_client = S3CLIENTS[region]
s3_client = boto3.client('s3', region_name=region)
S3CLIENTS[region] = s3_client
s3_client = s3_client
</DeepExtract>
s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration={'BlockPublicAcls': True, 'IgnorePublicAcls': True, 'BlockPublicPolicy': True, 'RestrictPublicBuckets': True})
|
def ensure_bucket_has_no_public_access(bucket_name: str, region: str) -> None:
if region in S3CLIENTS:
s3_client = S3CLIENTS[region]
s3_client = boto3.client('s3', region_name=region)
S3CLIENTS[region] = s3_client
s3_client = s3_client
s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration={'BlockPublicAcls': True, 'IgnorePublicAcls': True, 'BlockPublicPolicy': True, 'RestrictPublicBuckets': True})
|
aws-deployment-framework
|
positive
|
def DataDecoding(self):
<DeepExtract>
self.logger.info('Decoding extracted data...') if info else logger.debug('Decoding extracted data...')
if self.updater:
self.updater.set('Decoding extracted data...')
self.updater._root.update()
</DeepExtract>
self.logger.debug(self.DOWNLOADS)
<DeepExtract>
self.WB = engines.Workbook(self.work_dir, 'REPORT')
self.summary_sheet = self.WB.add_sheet('Summary')
self.WB.write_header(self.summary_sheet, ['Extraction Summary'])
workbook = self.WB
</DeepExtract>
for file_name in filter(None, self.DOWNLOADS):
if self.registry.has_target(file_name):
for deco_class in self.registry.decoders_target(file_name):
file_path = os.path.join(self.output_dir, file_name)
try:
self.logger.info(f'Decoding {file_name} using {deco_class.__name__}')
deco = deco_class(self.work_dir, file_path)
if not deco.template_name:
continue
self.DECODED.append([deco.report_html(), f'{deco.title} ({len(deco.DATA)})'])
deco.report_xlsx(workbook=workbook)
except Exception as e:
logger.error(f'Decoding error for `{os.path.basename(file_name)}`: {e}')
logger.exception(str(e))
|
def DataDecoding(self):
self.logger.info('Decoding extracted data...') if info else logger.debug('Decoding extracted data...')
if self.updater:
self.updater.set('Decoding extracted data...')
self.updater._root.update()
self.logger.debug(self.DOWNLOADS)
self.WB = engines.Workbook(self.work_dir, 'REPORT')
self.summary_sheet = self.WB.add_sheet('Summary')
self.WB.write_header(self.summary_sheet, ['Extraction Summary'])
workbook = self.WB
for file_name in filter(None, self.DOWNLOADS):
if self.registry.has_target(file_name):
for deco_class in self.registry.decoders_target(file_name):
file_path = os.path.join(self.output_dir, file_name)
try:
self.logger.info(f'Decoding {file_name} using {deco_class.__name__}')
deco = deco_class(self.work_dir, file_path)
if not deco.template_name:
continue
self.DECODED.append([deco.report_html(), f'{deco.title} ({len(deco.DATA)})'])
deco.report_xlsx(workbook=workbook)
except Exception as e:
logger.error(f'Decoding error for `{os.path.basename(file_name)}`: {e}')
logger.exception(str(e))
|
andriller
|
positive
|
def __enter__(self):
gc.collect()
self._mem_used = None
<DeepExtract>
self._mem_start = getrusage(RUSAGE_SELF).ru_maxrss
</DeepExtract>
return self
|
def __enter__(self):
gc.collect()
self._mem_used = None
self._mem_start = getrusage(RUSAGE_SELF).ru_maxrss
return self
|
coax
|
positive
|
def baidu_pan_protected_share(url):
print('This share is protected by password!')
inpwd = input('Please provide unlock password: ')
inpwd = inpwd.replace(' ', '').replace('\t', '')
print('Please wait...')
post_pwd = {'pwd': inpwd, 'vcode': None, 'vstr': None}
from http import cookiejar
import time
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
resp = opener.open(url)
init_url = resp.geturl()
verify_url = 'http://pan.baidu.com/share/verify?%s&t=%s&channel=chunlei&clienttype=0&web=1' % (init_url.split('?', 1)[1], int(time.time()))
refer_url = init_url
fake_headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-US,en;q=0.8', 'Host': 'pan.baidu.com', 'Origin': 'http://pan.baidu.com', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36', 'Referer': refer_url}
<DeepExtract>
out_triplet = []
for i in fake_headers:
out_triplet.append((i, fake_headers[i]))
opener.addheaders = out_triplet
</DeepExtract>
pwd_resp = opener.open(verify_url, bytes(parse.urlencode(post_pwd), 'utf-8'))
pwd_resp_str = ungzip(pwd_resp.read()).decode('utf-8')
pwd_res = json.loads(pwd_resp_str)
if pwd_res['errno'] != 0:
raise AssertionError('Server returned an error: %s (Incorrect password?)' % pwd_res['errno'])
pg_resp = opener.open('http://pan.baidu.com/share/link?%s' % init_url.split('?', 1)[1])
content = ungzip(pg_resp.read()).decode('utf-8')
<DeepExtract>
sign_patt = 'sign":"([^"]+)"'
timestamp_patt = 'timestamp":([^"]+),'
appid_patt = 'app_id":"([^"]+)"'
bdstoken_patt = 'bdstoken":"([^"]+)"'
fs_id_patt = 'fs_id":([^"]+),'
uk_patt = 'uk":([^"]+),'
errno_patt = 'errno":([^"]+),'
primary_id_patt = 'shareid":([^"]+),'
sign = match1(content, sign_patt)
timestamp = match1(content, timestamp_patt)
appid = match1(content, appid_patt)
bdstoken = match1(content, bdstoken_patt)
fs_id = match1(content, fs_id_patt)
uk = match1(content, uk_patt)
primary_id = match1(content, primary_id_patt)
(sign, timestamp, bdstoken, appid, primary_id, fs_id, uk) = (sign, timestamp, bdstoken, appid, primary_id, fs_id, uk)
</DeepExtract>
<DeepExtract>
for i in cookiejar:
if i.name == 'BDCLND':
psk = i.value
</DeepExtract>
psk = parse.unquote(psk)
<DeepExtract>
cookie_str = ''
for i in cookiejar:
cookie_str = cookie_str + i.name + '=' + i.value + ';'
fake_headers['Cookie'] = cookie_str[:-1]
</DeepExtract>
return (sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk)
|
def baidu_pan_protected_share(url):
print('This share is protected by password!')
inpwd = input('Please provide unlock password: ')
inpwd = inpwd.replace(' ', '').replace('\t', '')
print('Please wait...')
post_pwd = {'pwd': inpwd, 'vcode': None, 'vstr': None}
from http import cookiejar
import time
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
resp = opener.open(url)
init_url = resp.geturl()
verify_url = 'http://pan.baidu.com/share/verify?%s&t=%s&channel=chunlei&clienttype=0&web=1' % (init_url.split('?', 1)[1], int(time.time()))
refer_url = init_url
fake_headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-US,en;q=0.8', 'Host': 'pan.baidu.com', 'Origin': 'http://pan.baidu.com', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36', 'Referer': refer_url}
out_triplet = []
for i in fake_headers:
out_triplet.append((i, fake_headers[i]))
opener.addheaders = out_triplet
pwd_resp = opener.open(verify_url, bytes(parse.urlencode(post_pwd), 'utf-8'))
pwd_resp_str = ungzip(pwd_resp.read()).decode('utf-8')
pwd_res = json.loads(pwd_resp_str)
if pwd_res['errno'] != 0:
raise AssertionError('Server returned an error: %s (Incorrect password?)' % pwd_res['errno'])
pg_resp = opener.open('http://pan.baidu.com/share/link?%s' % init_url.split('?', 1)[1])
content = ungzip(pg_resp.read()).decode('utf-8')
sign_patt = 'sign":"([^"]+)"'
timestamp_patt = 'timestamp":([^"]+),'
appid_patt = 'app_id":"([^"]+)"'
bdstoken_patt = 'bdstoken":"([^"]+)"'
fs_id_patt = 'fs_id":([^"]+),'
uk_patt = 'uk":([^"]+),'
errno_patt = 'errno":([^"]+),'
primary_id_patt = 'shareid":([^"]+),'
sign = match1(content, sign_patt)
timestamp = match1(content, timestamp_patt)
appid = match1(content, appid_patt)
bdstoken = match1(content, bdstoken_patt)
fs_id = match1(content, fs_id_patt)
uk = match1(content, uk_patt)
primary_id = match1(content, primary_id_patt)
(sign, timestamp, bdstoken, appid, primary_id, fs_id, uk) = (sign, timestamp, bdstoken, appid, primary_id, fs_id, uk)
for i in cookiejar:
if i.name == 'BDCLND':
psk = i.value
psk = parse.unquote(psk)
cookie_str = ''
for i in cookiejar:
cookie_str = cookie_str + i.name + '=' + i.value + ';'
fake_headers['Cookie'] = cookie_str[:-1]
return (sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk)
|
acmpv
|
positive
|
def get_overlap_tracks(self, layer_id: int, lower: int, upper: int, half_track: bool=True) -> Tuple[OptHalfIntType, OptHalfIntType]:
""" Returns the first and last track index that overlaps with the given range.
Parameters
----------
layer_id : int
the layer ID.
lower : int
the lower coordinate.
upper : int
the upper coordinate.
half_track : bool
True to allow half-integer tracks.
Returns
-------
start_track : OptHalfIntType
the first track index. None if no solution.
end_track : OptHalfIntType
the last track index. None if no solution.
"""
even = not half_track
<DeepExtract>
lower_tr = HalfInt(self.find_next_htr(layer_id, lower, tr_width, RoundMode.LESS_EQ, not half_track))
</DeepExtract>
lower_tr = lower_tr.up().up_even(even)
<DeepExtract>
upper_tr = HalfInt(self.find_next_htr(layer_id, upper, tr_width, RoundMode.GREATER_EQ, not half_track))
</DeepExtract>
upper_tr = upper_tr.down().down_even(even)
if upper_tr < lower_tr:
return (None, None)
return (lower_tr, upper_tr)
|
def get_overlap_tracks(self, layer_id: int, lower: int, upper: int, half_track: bool=True) -> Tuple[OptHalfIntType, OptHalfIntType]:
""" Returns the first and last track index that overlaps with the given range.
Parameters
----------
layer_id : int
the layer ID.
lower : int
the lower coordinate.
upper : int
the upper coordinate.
half_track : bool
True to allow half-integer tracks.
Returns
-------
start_track : OptHalfIntType
the first track index. None if no solution.
end_track : OptHalfIntType
the last track index. None if no solution.
"""
even = not half_track
lower_tr = HalfInt(self.find_next_htr(layer_id, lower, tr_width, RoundMode.LESS_EQ, not half_track))
lower_tr = lower_tr.up().up_even(even)
upper_tr = HalfInt(self.find_next_htr(layer_id, upper, tr_width, RoundMode.GREATER_EQ, not half_track))
upper_tr = upper_tr.down().down_even(even)
if upper_tr < lower_tr:
return (None, None)
return (lower_tr, upper_tr)
|
bag
|
positive
|
def fit_box_height_to_text_lines(self):
"""
fit height of box to match text
"""
blf.size(0, self.text_size, self.text_dpi)
<DeepExtract>
line_height = blf.dimensions(0, 'A')[1]
</DeepExtract>
line_count = len(self.text_lines)
self.height = line_count * (line_height + self.spacer) + 2 * self.border
|
def fit_box_height_to_text_lines(self):
"""
fit height of box to match text
"""
blf.size(0, self.text_size, self.text_dpi)
line_height = blf.dimensions(0, 'A')[1]
line_count = len(self.text_lines)
self.height = line_count * (line_height + self.spacer) + 2 * self.border
|
BlenderPro
|
positive
|
def test_import_process(self):
"""Verify importer inserts the correct google bookmarks"""
<DeepExtract>
loc = os.path.dirname(__file__)
del_file = os.path.join(loc, 'googlebookmarks.html')
good_file = open(del_file)
</DeepExtract>
imp = Importer(good_file, username=u'admin')
imp.process()
<DeepExtract>
res = Bmark.query.all()
self.assertEqual(len(res), 9, 'We should have 9 results, we got: ' + str(len(res)))
check_url = 'http://www.alistapart.com/'
check_url_hashed = generate_hash(check_url)
url_description = u'A List Apart "Test"'
found = Bmark.query.filter(Bmark.hash_id == check_url_hashed).one()
self.assertTrue(found.hashed.url == check_url, 'The url should match our search')
self.assertEqual(len(found.tags), 4, 'We should have gotten 4 tags, got: ' + str(len(found.tags)))
self.assertTrue('html' in found.tag_string(), 'html should be a valid tag in the bookmark')
self.assertTrue('make websites' in found.extended, "'make websites' should be in the extended description")
self.assertEqual(url_description, found.description, 'The description of URL should not have any XML/HTML entities')
</DeepExtract>
|
def test_import_process(self):
"""Verify importer inserts the correct google bookmarks"""
loc = os.path.dirname(__file__)
del_file = os.path.join(loc, 'googlebookmarks.html')
good_file = open(del_file)
imp = Importer(good_file, username=u'admin')
imp.process()
res = Bmark.query.all()
self.assertEqual(len(res), 9, 'We should have 9 results, we got: ' + str(len(res)))
check_url = 'http://www.alistapart.com/'
check_url_hashed = generate_hash(check_url)
url_description = u'A List Apart "Test"'
found = Bmark.query.filter(Bmark.hash_id == check_url_hashed).one()
self.assertTrue(found.hashed.url == check_url, 'The url should match our search')
self.assertEqual(len(found.tags), 4, 'We should have gotten 4 tags, got: ' + str(len(found.tags)))
self.assertTrue('html' in found.tag_string(), 'html should be a valid tag in the bookmark')
self.assertTrue('make websites' in found.extended, "'make websites' should be in the extended description")
self.assertEqual(url_description, found.description, 'The description of URL should not have any XML/HTML entities')
</DeepExtract>
|
Bookie
|
positive
|
def cache(join=None):
<DeepExtract>
infoPath = os.path.abspath('./info.plist')
if os.path.exists(infoPath):
info = plistlib.readPlist(infoPath)
try:
bundleID = info['bundleid']
except KeyError:
raise Exception('Bundle ID not defined or readable from info.plist.')
else:
raise Exception('info.plist missing.')
bundleID = bundleID
</DeepExtract>
vPath = os.path.expanduser(os.path.join('~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data/', bundleID))
if not os.path.exists(vPath):
os.makedirs(vPath)
if join:
vPath = os.path.join(vPath, join)
return vPath
|
def cache(join=None):
infoPath = os.path.abspath('./info.plist')
if os.path.exists(infoPath):
info = plistlib.readPlist(infoPath)
try:
bundleID = info['bundleid']
except KeyError:
raise Exception('Bundle ID not defined or readable from info.plist.')
else:
raise Exception('info.plist missing.')
bundleID = bundleID
vPath = os.path.expanduser(os.path.join('~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data/', bundleID))
if not os.path.exists(vPath):
os.makedirs(vPath)
if join:
vPath = os.path.join(vPath, join)
return vPath
|
alfred2-workflow-help
|
positive
|
def test_untrimmed_paired_output(tmp_path, run_paired):
untrimmed1 = os.fspath(tmp_path / 'untrimmed.1.fastq')
untrimmed2 = os.fspath(tmp_path / 'untrimmed.2.fastq')
<DeepExtract>
def _run(params, in1, in2, expected1, expected2, cores):
if type(params) is str:
params = params.split()
params += ['--cores', str(1), '--buffer-size=512']
params += ['--json', os.fspath(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'stats.cutadapt.json')]
(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r1').mkdir()
(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r2').mkdir()
path1 = os.fspath(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r1' / 'paired-trimmed.1.fastq')
path2 = os.fspath(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r2' / 'paired-trimmed.2.fastq')
params += ['-o', path1, '-p', path2]
params += [datapath('paired.1.fastq'), datapath('paired.2.fastq')]
stats = main(params)
assert_files_equal(cutpath('paired-trimmed.1.fastq'), path1)
assert_files_equal(cutpath('paired-trimmed.2.fastq'), path2)
return stats
return _run
</DeepExtract>
assert_files_equal(cutpath('paired-untrimmed.1.fastq'), untrimmed1)
assert_files_equal(cutpath('paired-untrimmed.2.fastq'), untrimmed2)
|
def test_untrimmed_paired_output(tmp_path, run_paired):
untrimmed1 = os.fspath(tmp_path / 'untrimmed.1.fastq')
untrimmed2 = os.fspath(tmp_path / 'untrimmed.2.fastq')
def _run(params, in1, in2, expected1, expected2, cores):
if type(params) is str:
params = params.split()
params += ['--cores', str(1), '--buffer-size=512']
params += ['--json', os.fspath(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'stats.cutadapt.json')]
(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r1').mkdir()
(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r2').mkdir()
path1 = os.fspath(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r1' / 'paired-trimmed.1.fastq')
path2 = os.fspath(['-a', 'TTAGACATAT', '--pair-filter=first', '--untrimmed-output', untrimmed1, '--untrimmed-paired-output', untrimmed2] / 'r2' / 'paired-trimmed.2.fastq')
params += ['-o', path1, '-p', path2]
params += [datapath('paired.1.fastq'), datapath('paired.2.fastq')]
stats = main(params)
assert_files_equal(cutpath('paired-trimmed.1.fastq'), path1)
assert_files_equal(cutpath('paired-trimmed.2.fastq'), path2)
return stats
return _run
assert_files_equal(cutpath('paired-untrimmed.1.fastq'), untrimmed1)
assert_files_equal(cutpath('paired-untrimmed.2.fastq'), untrimmed2)
|
cutadapt
|
positive
|
def execute_shell(url, cmd, cve, check_header, filename, os_shell_option):
<DeepExtract>
def check_for_shell(url, cmd, cve, check_header, filename):
try:
TAG = ''.join((random.choice(string.ascii_uppercase) for i in range(6)))
cmd = 'echo ' + TAG + '$(' + cmd + ')' + TAG
payload = shellshock_exploitation(cve, cmd)
debug_msg = "Executing the '" + cmd + "' command. "
if settings.VERBOSITY_LEVEL != 0:
sys.stdout.write(settings.print_debug_msg(debug_msg))
if settings.VERBOSITY_LEVEL != 0:
print(settings.SINGLE_WHITESPACE)
print(settings.print_payload(payload))
header = {check_header: payload}
request = _urllib.request.Request(url, None, header)
if check_header == 'User-Agent':
menu.options.agent = payload
log_http_headers.do_check(request)
log_http_headers.check_http_traffic(request)
if menu.options.proxy:
response = proxy.use_proxy(request)
elif menu.options.tor:
response = tor.use_tor(request)
else:
response = _urllib.request.urlopen(request, timeout=settings.TIMEOUT)
if check_header == 'User-Agent':
menu.options.agent = default_user_agent
shell = checks.page_encoding(response, action='decode').rstrip().replace('\n', ' ')
shell = re.findall('' + TAG + '(.*)' + TAG, shell)
shell = ''.join(shell)
(shell, payload) = (shell, payload)
except _urllib.error.URLError as err_msg:
print(settings.SINGLE_WHITESPACE)
print(settings.print_critical_msg(err_msg))
raise SystemExit()
(shell, payload) = check_for_shell(url, cmd, cve, check_header, filename)
if len(shell) == 0:
cmd = '/bin/' + cmd
(shell, payload) = check_for_shell(url, cmd, cve, check_header, filename)
if len(shell) > 0:
pass
elif len(shell) == 0:
cmd = '/usr' + cmd
(shell, payload) = check_for_shell(url, cmd, cve, check_header, filename)
if len(shell) > 0:
pass
(shell, payload) = (shell, payload)
</DeepExtract>
err_msg = 'The ' + os_shell_option.split('_')[0] + ' '
err_msg += os_shell_option.split('_')[1].upper() + ' connection has failed.'
print(settings.print_critical_msg(err_msg))
|
def execute_shell(url, cmd, cve, check_header, filename, os_shell_option):
def check_for_shell(url, cmd, cve, check_header, filename):
try:
TAG = ''.join((random.choice(string.ascii_uppercase) for i in range(6)))
cmd = 'echo ' + TAG + '$(' + cmd + ')' + TAG
payload = shellshock_exploitation(cve, cmd)
debug_msg = "Executing the '" + cmd + "' command. "
if settings.VERBOSITY_LEVEL != 0:
sys.stdout.write(settings.print_debug_msg(debug_msg))
if settings.VERBOSITY_LEVEL != 0:
print(settings.SINGLE_WHITESPACE)
print(settings.print_payload(payload))
header = {check_header: payload}
request = _urllib.request.Request(url, None, header)
if check_header == 'User-Agent':
menu.options.agent = payload
log_http_headers.do_check(request)
log_http_headers.check_http_traffic(request)
if menu.options.proxy:
response = proxy.use_proxy(request)
elif menu.options.tor:
response = tor.use_tor(request)
else:
response = _urllib.request.urlopen(request, timeout=settings.TIMEOUT)
if check_header == 'User-Agent':
menu.options.agent = default_user_agent
shell = checks.page_encoding(response, action='decode').rstrip().replace('\n', ' ')
shell = re.findall('' + TAG + '(.*)' + TAG, shell)
shell = ''.join(shell)
(shell, payload) = (shell, payload)
except _urllib.error.URLError as err_msg:
print(settings.SINGLE_WHITESPACE)
print(settings.print_critical_msg(err_msg))
raise SystemExit()
(shell, payload) = check_for_shell(url, cmd, cve, check_header, filename)
if len(shell) == 0:
cmd = '/bin/' + cmd
(shell, payload) = check_for_shell(url, cmd, cve, check_header, filename)
if len(shell) > 0:
pass
elif len(shell) == 0:
cmd = '/usr' + cmd
(shell, payload) = check_for_shell(url, cmd, cve, check_header, filename)
if len(shell) > 0:
pass
(shell, payload) = (shell, payload)
err_msg = 'The ' + os_shell_option.split('_')[0] + ' '
err_msg += os_shell_option.split('_')[1].upper() + ' connection has failed.'
print(settings.print_critical_msg(err_msg))
|
commix
|
positive
|
def run_workflow(args, argv=None, pipeline=None):
"""run workflow given options in args.
argv is kept for backwards compatibility.
"""
logger = logging.getLogger('cgatcore.pipeline')
logger.debug('starting run_workflow with action {}'.format(args.pipeline_action))
if args.force_run:
if args.force_run == 'all':
forcedtorun_tasks = ruffus.pipeline_get_task_names()
else:
forcedtorun_tasks = args.pipeline_targets
else:
forcedtorun_tasks = []
if not os.path.exists(get_params()['tmpdir']):
logger.warn('local temporary directory {} did not exist - created'.format(get_params()['tmpdir']))
try:
os.makedirs(get_params()['tmpdir'])
except OSError:
pass
logger.info('temporary directory is {}'.format(get_params()['tmpdir']))
run_on_cluster = HAS_DRMAA is True and (not args.without_cluster)
if args.multiprocess is None:
if not run_on_cluster:
args.multiprocess = int(math.ceil(multiprocessing.cpu_count() / 2.0))
else:
args.multiprocess = 40
if args.input_validation:
input_validation(get_params(), sys.argv[0])
elif args.pipeline_action == 'debug':
start_session()
method_name = args.pipeline_targets[0]
caller = get_caller()
method = getattr(caller, method_name)
method(*args.pipeline_targets[1:])
elif args.pipeline_action in ('make', 'show', 'state', 'svg', 'plot', 'dot', 'touch', 'regenerate'):
messenger = None
try:
with cache_os_functions():
if args.pipeline_action == 'make':
if not args.without_cluster and (not HAS_DRMAA) and (not get_params()['testing']):
E.critical('DRMAA API not found so cannot talk to a cluster.')
E.critical('Please use --local to run the pipeline on this host: {}'.format(os.uname()[1]))
sys.exit(-1)
stream = StringIO()
ruffus.pipeline_printout(stream, args.pipeline_targets, verbose=5, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
messenger = LoggingFilterProgress(stream.getvalue())
logger.addFilter(messenger)
global task
if args.without_cluster:
opts = {'multithread': args.multiprocess}
else:
opts = {'multiprocess': args.multiprocess, 'pool_manager': 'gevent'}
start_session()
logger.info('current directory is {}'.format(os.getcwd()))
ruffus.pipeline_run(args.pipeline_targets, forcedtorun_tasks=forcedtorun_tasks, logger=logger, verbose=args.loglevel, log_exceptions=args.log_exceptions, exceptions_terminate_immediately=args.exceptions_terminate_immediately, checksum_level=args.ruffus_checksums_level, pipeline=pipeline, one_second_per_job=False, **opts)
close_session()
elif args.pipeline_action == 'show':
ruffus.pipeline_printout(args.stdout, args.pipeline_targets, forcedtorun_tasks=forcedtorun_tasks, verbose=args.loglevel, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
elif args.pipeline_action == 'touch':
ruffus.pipeline_run(args.pipeline_targets, touch_files_only=True, verbose=args.loglevel, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
elif args.pipeline_action == 'regenerate':
ruffus.pipeline_run(args.pipeline_targets, touch_files_only=args.ruffus_checksums_level, pipeline=pipeline, verbose=args.loglevel)
elif args.pipeline_action == 'svg':
ruffus.pipeline_printout_graph(args.stdout.buffer, args.pipeline_format, args.pipeline_targets, forcedtorun_tasks=forcedtorun_tasks, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
elif args.pipeline_action == 'state':
<DeepExtract>
(args.ruffus_checksums_level, job_history, pipeline, runtime_data, args.pipeline_targets, forcedtorun_tasks) = _pipeline_prepare_to_run(args.ruffus_checksums_level, history_file, pipeline, runtime_data, args.pipeline_targets, forcedtorun_tasks)
(incomplete_tasks, self_terminated_nodes, dag_violating_edges, dag_violating_nodes) = topologically_sorted_nodes(args.pipeline_targets, forcedtorun_tasks, gnu_make_maximal_rebuild_mode, extra_data_for_signal=[t_verbose_logger(0, 0, None, runtime_data), job_history], signal_callback=is_node_up_to_date)
args.stdout.write('function\tactive\toutput_files\tparents\n')
stack = args.pipeline_targets + forcedtorun_tasks
visited = set()
while stack:
t = stack.pop(0)
visited.add(t)
args.stdout.write('\t'.join(map(str, [t.func_name, t.is_active, ','.join(t.output_filenames) if t.output_filenames else '', ','.join((x.func_name for x in t._get_inward()))])) + '\n')
for tt in t._get_inward():
if tt not in visited:
stack.append(tt)
</DeepExtract>
elif args.pipeline_action == 'plot':
(outf, filename) = tempfile.mkstemp()
ruffus.pipeline_printout_graph(os.fdopen(outf, 'wb'), args.pipeline_format, args.pipeline_targets, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
execute('inkscape %s' % filename)
os.unlink(filename)
except ruffus.ruffus_exceptions.RethrownJobError as ex:
if not args.debug:
E.error('%i tasks with errors, please see summary below:' % len(ex.args))
for (idx, e) in enumerate(ex.args):
(task, job, error, msg, traceback) = e
if task is None:
msg = str(msg)
else:
task = re.sub('__main__.', '', task)
job = re.sub('\\s', '', job)
if len([x for x in msg.split('\n') if x != '']) > 1:
msg = ''
E.error('%i: Task=%s Error=%s %s: %s' % (idx, task, error, job, msg))
E.error('full traceback is in %s' % args.pipeline_logfile)
logger.error('start of all error messages')
logger.error(ex)
logger.error('end of all error messages')
raise ValueError('pipeline failed with errors') from ex
else:
raise
elif args.pipeline_action == 'dump':
args.stdout.write(json.dumps(get_params()) + '\n')
elif args.pipeline_action == 'printconfig':
E.info('printing out pipeline parameters: ')
p = get_params()
for k in sorted(get_params()):
print(k, '=', p[k])
<DeepExtract>
filenames = get_params()['pipeline_yml']
print('\n List of .yml files used to configure the pipeline')
s = len(filenames)
if s == 0:
print(' No yml files passed!')
elif s >= 1:
print(' %-11s: %s ' % ('Priority', 'File'))
for f in filenames:
if s == 1:
print(' (highest) %s: %s\n' % (s, f))
else:
print(' %-11s: %s ' % (s, f))
s -= 1
</DeepExtract>
elif args.pipeline_action == 'config':
f = sys._getframe(2)
caller = f.f_globals['__file__']
pipeline_path = os.path.splitext(caller)[0]
general_path = os.path.join(os.path.dirname(pipeline_path), 'configuration')
<DeepExtract>
paths = [pipeline_path, general_path]
config_files = ['pipeline.yml']
for dest in config_files:
if os.path.exists(dest):
E.warn('file `%s` already exists - skipped' % dest)
continue
for path in paths:
src = os.path.join(path, dest)
if os.path.exists(src):
shutil.copyfile(src, dest)
E.info('created new configuration file `%s` ' % dest)
break
else:
raise ValueError('default config file `%s` not found in %s' % (config_files, paths))
</DeepExtract>
elif args.pipeline_action == 'clone':
<DeepExtract>
if destdir is None:
destdir = os.path.curdir
get_logger().info('cloning pipeline from %s to %s' % (args.pipeline_targets[0], destdir))
copy_files = ('conf.py', 'pipeline.yml', 'benchmark.yml', 'csvdb')
ignore_prefix = ('report', '_cache', 'export', 'tmp', 'ctmp', '_static', '_templates', 'shell.log', 'pipeline.log', 'results.commit')
def _ignore(p):
for x in ignore_prefix:
if p.startswith(x):
return True
return False
for (root, dirs, files) in os.walk(args.pipeline_targets[0]):
relpath = os.path.relpath(root, args.pipeline_targets[0])
if _ignore(relpath):
continue
for d in dirs:
if _ignore(d):
continue
dest = os.path.join(os.path.join(destdir, relpath, d))
os.mkdir(dest)
s = os.stat(os.path.join(root, d))
os.utime(dest, (s.st_atime, s.st_mtime))
for f in files:
if _ignore(f):
continue
fn = os.path.join(root, f)
dest_fn = os.path.join(destdir, relpath, f)
if f in copy_files:
shutil.copyfile(fn, dest_fn)
else:
os.symlink(os.path.realpath(fn), dest_fn)
</DeepExtract>
else:
raise ValueError('unknown pipeline action %s' % args.pipeline_action)
E.stop(logger=get_logger())
|
def run_workflow(args, argv=None, pipeline=None):
"""run workflow given options in args.
argv is kept for backwards compatibility.
"""
logger = logging.getLogger('cgatcore.pipeline')
logger.debug('starting run_workflow with action {}'.format(args.pipeline_action))
if args.force_run:
if args.force_run == 'all':
forcedtorun_tasks = ruffus.pipeline_get_task_names()
else:
forcedtorun_tasks = args.pipeline_targets
else:
forcedtorun_tasks = []
if not os.path.exists(get_params()['tmpdir']):
logger.warn('local temporary directory {} did not exist - created'.format(get_params()['tmpdir']))
try:
os.makedirs(get_params()['tmpdir'])
except OSError:
pass
logger.info('temporary directory is {}'.format(get_params()['tmpdir']))
run_on_cluster = HAS_DRMAA is True and (not args.without_cluster)
if args.multiprocess is None:
if not run_on_cluster:
args.multiprocess = int(math.ceil(multiprocessing.cpu_count() / 2.0))
else:
args.multiprocess = 40
if args.input_validation:
input_validation(get_params(), sys.argv[0])
elif args.pipeline_action == 'debug':
start_session()
method_name = args.pipeline_targets[0]
caller = get_caller()
method = getattr(caller, method_name)
method(*args.pipeline_targets[1:])
elif args.pipeline_action in ('make', 'show', 'state', 'svg', 'plot', 'dot', 'touch', 'regenerate'):
messenger = None
try:
with cache_os_functions():
if args.pipeline_action == 'make':
if not args.without_cluster and (not HAS_DRMAA) and (not get_params()['testing']):
E.critical('DRMAA API not found so cannot talk to a cluster.')
E.critical('Please use --local to run the pipeline on this host: {}'.format(os.uname()[1]))
sys.exit(-1)
stream = StringIO()
ruffus.pipeline_printout(stream, args.pipeline_targets, verbose=5, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
messenger = LoggingFilterProgress(stream.getvalue())
logger.addFilter(messenger)
global task
if args.without_cluster:
opts = {'multithread': args.multiprocess}
else:
opts = {'multiprocess': args.multiprocess, 'pool_manager': 'gevent'}
start_session()
logger.info('current directory is {}'.format(os.getcwd()))
ruffus.pipeline_run(args.pipeline_targets, forcedtorun_tasks=forcedtorun_tasks, logger=logger, verbose=args.loglevel, log_exceptions=args.log_exceptions, exceptions_terminate_immediately=args.exceptions_terminate_immediately, checksum_level=args.ruffus_checksums_level, pipeline=pipeline, one_second_per_job=False, **opts)
close_session()
elif args.pipeline_action == 'show':
ruffus.pipeline_printout(args.stdout, args.pipeline_targets, forcedtorun_tasks=forcedtorun_tasks, verbose=args.loglevel, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
elif args.pipeline_action == 'touch':
ruffus.pipeline_run(args.pipeline_targets, touch_files_only=True, verbose=args.loglevel, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
elif args.pipeline_action == 'regenerate':
ruffus.pipeline_run(args.pipeline_targets, touch_files_only=args.ruffus_checksums_level, pipeline=pipeline, verbose=args.loglevel)
elif args.pipeline_action == 'svg':
ruffus.pipeline_printout_graph(args.stdout.buffer, args.pipeline_format, args.pipeline_targets, forcedtorun_tasks=forcedtorun_tasks, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
elif args.pipeline_action == 'state':
(args.ruffus_checksums_level, job_history, pipeline, runtime_data, args.pipeline_targets, forcedtorun_tasks) = _pipeline_prepare_to_run(args.ruffus_checksums_level, history_file, pipeline, runtime_data, args.pipeline_targets, forcedtorun_tasks)
(incomplete_tasks, self_terminated_nodes, dag_violating_edges, dag_violating_nodes) = topologically_sorted_nodes(args.pipeline_targets, forcedtorun_tasks, gnu_make_maximal_rebuild_mode, extra_data_for_signal=[t_verbose_logger(0, 0, None, runtime_data), job_history], signal_callback=is_node_up_to_date)
args.stdout.write('function\tactive\toutput_files\tparents\n')
stack = args.pipeline_targets + forcedtorun_tasks
visited = set()
while stack:
t = stack.pop(0)
visited.add(t)
args.stdout.write('\t'.join(map(str, [t.func_name, t.is_active, ','.join(t.output_filenames) if t.output_filenames else '', ','.join((x.func_name for x in t._get_inward()))])) + '\n')
for tt in t._get_inward():
if tt not in visited:
stack.append(tt)
elif args.pipeline_action == 'plot':
(outf, filename) = tempfile.mkstemp()
ruffus.pipeline_printout_graph(os.fdopen(outf, 'wb'), args.pipeline_format, args.pipeline_targets, pipeline=pipeline, checksum_level=args.ruffus_checksums_level)
execute('inkscape %s' % filename)
os.unlink(filename)
except ruffus.ruffus_exceptions.RethrownJobError as ex:
if not args.debug:
E.error('%i tasks with errors, please see summary below:' % len(ex.args))
for (idx, e) in enumerate(ex.args):
(task, job, error, msg, traceback) = e
if task is None:
msg = str(msg)
else:
task = re.sub('__main__.', '', task)
job = re.sub('\\s', '', job)
if len([x for x in msg.split('\n') if x != '']) > 1:
msg = ''
E.error('%i: Task=%s Error=%s %s: %s' % (idx, task, error, job, msg))
E.error('full traceback is in %s' % args.pipeline_logfile)
logger.error('start of all error messages')
logger.error(ex)
logger.error('end of all error messages')
raise ValueError('pipeline failed with errors') from ex
else:
raise
elif args.pipeline_action == 'dump':
args.stdout.write(json.dumps(get_params()) + '\n')
elif args.pipeline_action == 'printconfig':
E.info('printing out pipeline parameters: ')
p = get_params()
for k in sorted(get_params()):
print(k, '=', p[k])
filenames = get_params()['pipeline_yml']
print('\n List of .yml files used to configure the pipeline')
s = len(filenames)
if s == 0:
print(' No yml files passed!')
elif s >= 1:
print(' %-11s: %s ' % ('Priority', 'File'))
for f in filenames:
if s == 1:
print(' (highest) %s: %s\n' % (s, f))
else:
print(' %-11s: %s ' % (s, f))
s -= 1
elif args.pipeline_action == 'config':
f = sys._getframe(2)
caller = f.f_globals['__file__']
pipeline_path = os.path.splitext(caller)[0]
general_path = os.path.join(os.path.dirname(pipeline_path), 'configuration')
paths = [pipeline_path, general_path]
config_files = ['pipeline.yml']
for dest in config_files:
if os.path.exists(dest):
E.warn('file `%s` already exists - skipped' % dest)
continue
for path in paths:
src = os.path.join(path, dest)
if os.path.exists(src):
shutil.copyfile(src, dest)
E.info('created new configuration file `%s` ' % dest)
break
else:
raise ValueError('default config file `%s` not found in %s' % (config_files, paths))
elif args.pipeline_action == 'clone':
if destdir is None:
destdir = os.path.curdir
get_logger().info('cloning pipeline from %s to %s' % (args.pipeline_targets[0], destdir))
copy_files = ('conf.py', 'pipeline.yml', 'benchmark.yml', 'csvdb')
ignore_prefix = ('report', '_cache', 'export', 'tmp', 'ctmp', '_static', '_templates', 'shell.log', 'pipeline.log', 'results.commit')
def _ignore(p):
for x in ignore_prefix:
if p.startswith(x):
return True
return False
for (root, dirs, files) in os.walk(args.pipeline_targets[0]):
relpath = os.path.relpath(root, args.pipeline_targets[0])
if _ignore(relpath):
continue
for d in dirs:
if _ignore(d):
continue
dest = os.path.join(os.path.join(destdir, relpath, d))
os.mkdir(dest)
s = os.stat(os.path.join(root, d))
os.utime(dest, (s.st_atime, s.st_mtime))
for f in files:
if _ignore(f):
continue
fn = os.path.join(root, f)
dest_fn = os.path.join(destdir, relpath, f)
if f in copy_files:
shutil.copyfile(fn, dest_fn)
else:
os.symlink(os.path.realpath(fn), dest_fn)
else:
raise ValueError('unknown pipeline action %s' % args.pipeline_action)
E.stop(logger=get_logger())
|
cgat-core
|
positive
|
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
<DeepExtract>
label = label if label is not None else self.label
cdf = Cdf(self, label=label)
</DeepExtract>
return cdf.CredibleInterval(percentage)
|
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
label = label if label is not None else self.label
cdf = Cdf(self, label=label)
return cdf.CredibleInterval(percentage)
|
DataExploration
|
positive
|
def LinearSpectrumDict(self, peptide):
<DeepExtract>
massTable = '\n G 57\n A 71\n S 87\n P 97\n V 99\n T 101\n C 103\n I 113\n L 113\n N 114\n D 115\n K 128\n Q 128\n E 129\n M 131\n H 137\n F 147\n R 156\n Y 163\n W 186'
mass = massTable.split()
massDict = {mass[i]: int(mass[i + 1]) for i in range(0, len(mass), 2)}
</DeepExtract>
n = len(peptide)
PrefixMass = [0]
for i in range(n):
PrefixMass.append(PrefixMass[i] + massDict[peptide[i]])
lSpectrumDict = {0: 1}
for i in range(n):
for j in range(i + 1, n + 1):
s = PrefixMass[j] - PrefixMass[i]
lSpectrumDict[s] = lSpectrumDict.get(s, 0) + 1
return lSpectrumDict
|
def LinearSpectrumDict(self, peptide):
massTable = '\n G 57\n A 71\n S 87\n P 97\n V 99\n T 101\n C 103\n I 113\n L 113\n N 114\n D 115\n K 128\n Q 128\n E 129\n M 131\n H 137\n F 147\n R 156\n Y 163\n W 186'
mass = massTable.split()
massDict = {mass[i]: int(mass[i + 1]) for i in range(0, len(mass), 2)}
n = len(peptide)
PrefixMass = [0]
for i in range(n):
PrefixMass.append(PrefixMass[i] + massDict[peptide[i]])
lSpectrumDict = {0: 1}
for i in range(n):
for j in range(i + 1, n + 1):
s = PrefixMass[j] - PrefixMass[i]
lSpectrumDict[s] = lSpectrumDict.get(s, 0) + 1
return lSpectrumDict
|
Coursera-Bioinformatics
|
positive
|
def get_bboxes(self, cls_scores, pts_preds_init, pts_preds_refine, img_metas, cfg, rescale=False, nms=True):
assert len(cls_scores) == len(pts_preds_refine)
bbox_preds_refine = [self.points2bbox(pts_pred_refine) for pts_pred_refine in pts_preds_refine]
num_levels = len(cls_scores)
mlvl_points = [self.point_generators[i].grid_points(cls_scores[i].size()[-2:], self.point_strides[i]) for i in range(num_levels)]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)]
bbox_pred_list = [bbox_preds_refine[i][img_id].detach() for i in range(num_levels)]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
<DeepExtract>
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for (i_lvl, (cls_score, bbox_pred, points)) in enumerate(zip(cls_score_list, bbox_pred_list, mlvl_points)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
(max_scores, _) = scores.max(dim=1)
else:
(max_scores, _) = scores[:, 1:].max(dim=1)
(_, topk_inds) = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
if nms:
(det_bboxes, det_labels) = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
proposals = (det_bboxes, det_labels)
else:
proposals = (mlvl_bboxes, mlvl_scores)
</DeepExtract>
result_list.append(proposals)
return result_list
|
def get_bboxes(self, cls_scores, pts_preds_init, pts_preds_refine, img_metas, cfg, rescale=False, nms=True):
assert len(cls_scores) == len(pts_preds_refine)
bbox_preds_refine = [self.points2bbox(pts_pred_refine) for pts_pred_refine in pts_preds_refine]
num_levels = len(cls_scores)
mlvl_points = [self.point_generators[i].grid_points(cls_scores[i].size()[-2:], self.point_strides[i]) for i in range(num_levels)]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)]
bbox_pred_list = [bbox_preds_refine[i][img_id].detach() for i in range(num_levels)]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for (i_lvl, (cls_score, bbox_pred, points)) in enumerate(zip(cls_score_list, bbox_pred_list, mlvl_points)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
(max_scores, _) = scores.max(dim=1)
else:
(max_scores, _) = scores[:, 1:].max(dim=1)
(_, topk_inds) = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
if nms:
(det_bboxes, det_labels) = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
proposals = (det_bboxes, det_labels)
else:
proposals = (mlvl_bboxes, mlvl_scores)
result_list.append(proposals)
return result_list
|
ACSL
|
positive
|
def _bucket_delitem(self, j, k):
<DeepExtract>
firstAvail = None
while True:
if self._is_available(j):
if firstAvail is None:
firstAvail = j
if self._table[j] is None:
(found, s) = (False, firstAvail)
elif k == self._table[j]._key:
(found, s) = (True, j)
j = (j + 1) % len(self._table)
</DeepExtract>
if not found:
raise KeyError('Key Error: ' + repr(k))
self._table[s] = ProbeHashMap._AVAIL
|
def _bucket_delitem(self, j, k):
firstAvail = None
while True:
if self._is_available(j):
if firstAvail is None:
firstAvail = j
if self._table[j] is None:
(found, s) = (False, firstAvail)
elif k == self._table[j]._key:
(found, s) = (True, j)
j = (j + 1) % len(self._table)
if not found:
raise KeyError('Key Error: ' + repr(k))
self._table[s] = ProbeHashMap._AVAIL
|
code-catalog-python
|
positive
|
def fit_transform(raw_documents: List[str]) -> (List[List[str]], sp.csr_matrix):
"""Build the vocabulary and return term-document matrix.
Parameters
----------
raw_documents : List[str]
Returns
-------
(sequences, X) :
sequences: List[List[str]
Tokenized sequences of raw_documents
X: array, [n_samples, n_features]
Document-term matrix.
"""
sequences = self.tokenizer.batch_tokenize(raw_documents)
fixed_vocab = self.vocab is not None
if self.vocab is None:
self.vocab = Vocabulary.from_sequences(sequences)
<DeepExtract>
data = []
indices = []
indptr = [0]
for sequence in sequences:
feature_counter = Counter()
for token in sequence:
if token not in self.vocab.tok2idx.keys():
continue
idx = self.vocab.tok2idx[token]
if self.vocab.use_special_tokens:
idx -= len(SPECIAL_TOKENS)
feature_counter[idx] += 1
indices.extend(feature_counter.keys())
data.extend(feature_counter.values())
indptr.append(len(indices))
feature_dim = self.vocab.size
if self.vocab.use_special_tokens:
feature_dim -= len(SPECIAL_TOKENS)
X = sp.csr_matrix((data, indices, indptr), shape=(len(sequences), feature_dim), dtype=np.int64)
X.sort_indices()
X = X
</DeepExtract>
if self.binary:
X.data.fill(1)
if not fixed_vocab:
n_docs = X.shape[0]
max_doc_count = self.max_doc_freq if isinstance(self.max_doc_freq, int) else int(self.max_doc_freq * n_docs)
min_doc_count = self.min_doc_freq if isinstance(self.min_doc_freq, int) else int(self.min_doc_freq * n_docs)
<DeepExtract>
if max_doc_count >= X.shape[0] and min_doc_count <= 1 and (self.max_features is None):
X = X
doc_freq = np.bincount(X.indices, minlength=X.shape[1])
term_indices = np.arange(X.shape[1])
mask = np.ones(len(doc_freq), dtype=bool)
if max_doc_count < X.shape[0]:
mask &= doc_freq <= max_doc_count
if min_doc_count > 1:
mask &= doc_freq >= min_doc_count
if self.max_features is not None and mask.sum() > self.max_features:
mask_indices = term_indices[mask][:self.max_features]
new_mask = np.zeros(len(doc_freq), dtype=bool)
new_mask[mask_indices] = True
mask = new_mask
for index in np.sort(np.where(np.logical_not(mask))[0])[::-1]:
del self.vocab.idx2tok[index]
self.vocab.build_tok2idx()
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError('After pruning, no terms remain. Try a lower min_freq or a higher max_doc_freq.')
X = X[:, kept_indices]
</DeepExtract>
return (sequences, X)
|
def fit_transform(raw_documents: List[str]) -> (List[List[str]], sp.csr_matrix):
"""Build the vocabulary and return term-document matrix.
Parameters
----------
raw_documents : List[str]
Returns
-------
(sequences, X) :
sequences: List[List[str]
Tokenized sequences of raw_documents
X: array, [n_samples, n_features]
Document-term matrix.
"""
sequences = self.tokenizer.batch_tokenize(raw_documents)
fixed_vocab = self.vocab is not None
if self.vocab is None:
self.vocab = Vocabulary.from_sequences(sequences)
data = []
indices = []
indptr = [0]
for sequence in sequences:
feature_counter = Counter()
for token in sequence:
if token not in self.vocab.tok2idx.keys():
continue
idx = self.vocab.tok2idx[token]
if self.vocab.use_special_tokens:
idx -= len(SPECIAL_TOKENS)
feature_counter[idx] += 1
indices.extend(feature_counter.keys())
data.extend(feature_counter.values())
indptr.append(len(indices))
feature_dim = self.vocab.size
if self.vocab.use_special_tokens:
feature_dim -= len(SPECIAL_TOKENS)
X = sp.csr_matrix((data, indices, indptr), shape=(len(sequences), feature_dim), dtype=np.int64)
X.sort_indices()
X = X
if self.binary:
X.data.fill(1)
if not fixed_vocab:
n_docs = X.shape[0]
max_doc_count = self.max_doc_freq if isinstance(self.max_doc_freq, int) else int(self.max_doc_freq * n_docs)
min_doc_count = self.min_doc_freq if isinstance(self.min_doc_freq, int) else int(self.min_doc_freq * n_docs)
if max_doc_count >= X.shape[0] and min_doc_count <= 1 and (self.max_features is None):
X = X
doc_freq = np.bincount(X.indices, minlength=X.shape[1])
term_indices = np.arange(X.shape[1])
mask = np.ones(len(doc_freq), dtype=bool)
if max_doc_count < X.shape[0]:
mask &= doc_freq <= max_doc_count
if min_doc_count > 1:
mask &= doc_freq >= min_doc_count
if self.max_features is not None and mask.sum() > self.max_features:
mask_indices = term_indices[mask][:self.max_features]
new_mask = np.zeros(len(doc_freq), dtype=bool)
new_mask[mask_indices] = True
mask = new_mask
for index in np.sort(np.where(np.logical_not(mask))[0])[::-1]:
del self.vocab.idx2tok[index]
self.vocab.build_tok2idx()
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError('After pruning, no terms remain. Try a lower min_freq or a higher max_doc_freq.')
X = X[:, kept_indices]
return (sequences, X)
|
cornac
|
positive
|
def get_nondefault_flags_as_str():
"""Returns flags as a string that can be passed as command line arguments.
E.g., returns: "--batch_size=256 --use_synthetic_data" for the following code
block:
```
flags.FLAGS.batch_size = 256
flags.FLAGS.use_synthetic_data = True
print(get_nondefault_flags_as_str())
```
Only flags with nondefault values are returned, as passing default flags as
command line arguments has no effect.
Returns:
A string with the flags, that can be passed as command line arguments to a
program to use the flags.
"""
<DeepExtract>
nondefault_flags = {}
for flag_name in flags.FLAGS:
flag_value = getattr(flags.FLAGS, flag_name)
if flag_name != flags.FLAGS[flag_name].short_name and flag_value != flags.FLAGS[flag_name].default:
nondefault_flags[flag_name] = flag_value
nondefault_flags = nondefault_flags
</DeepExtract>
flag_strings = []
for (name, value) in sorted(nondefault_flags.items()):
if isinstance(value, bool):
flag_str = '--{}'.format(name) if value else '--no{}'.format(name)
elif isinstance(value, list):
flag_str = '--{}={}'.format(name, ','.join(value))
else:
flag_str = '--{}={}'.format(name, value)
flag_strings.append(flag_str)
return ' '.join((shlex_quote(flag_str) for flag_str in flag_strings))
|
def get_nondefault_flags_as_str():
"""Returns flags as a string that can be passed as command line arguments.
E.g., returns: "--batch_size=256 --use_synthetic_data" for the following code
block:
```
flags.FLAGS.batch_size = 256
flags.FLAGS.use_synthetic_data = True
print(get_nondefault_flags_as_str())
```
Only flags with nondefault values are returned, as passing default flags as
command line arguments has no effect.
Returns:
A string with the flags, that can be passed as command line arguments to a
program to use the flags.
"""
nondefault_flags = {}
for flag_name in flags.FLAGS:
flag_value = getattr(flags.FLAGS, flag_name)
if flag_name != flags.FLAGS[flag_name].short_name and flag_value != flags.FLAGS[flag_name].default:
nondefault_flags[flag_name] = flag_value
nondefault_flags = nondefault_flags
flag_strings = []
for (name, value) in sorted(nondefault_flags.items()):
if isinstance(value, bool):
flag_str = '--{}'.format(name) if value else '--no{}'.format(name)
elif isinstance(value, list):
flag_str = '--{}={}'.format(name, ','.join(value))
else:
flag_str = '--{}={}'.format(name, value)
flag_strings.append(flag_str)
return ' '.join((shlex_quote(flag_str) for flag_str in flag_strings))
|
autodist
|
positive
|
def mod_inv(a: int, m: int) -> int:
"""Return the inverse of a (mod m).
m does not have to be a prime.
Based on Extended Euclidean Algorithm, see:
- https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
"""
a %= m
<DeepExtract>
(x0, x1, y0, y1) = (0, 1, 1, 0)
while a != 0:
(q, m, a) = (m // a, a, m % a)
(y0, y1) = (y1, y0 - q * y1)
(x0, x1) = (x1, x0 - q * x1)
(g, x, _) = (m, x0, y0)
</DeepExtract>
if g == 1:
return x % m
err_msg = 'no inverse for '
err_msg += f'{hex_string(a)}' if a > 4294967295 else f'{a}'
err_msg += ' mod '
err_msg += f'{hex_string(m)}' if m > 4294967295 else f'{m}'
raise BTClibValueError(err_msg)
|
def mod_inv(a: int, m: int) -> int:
"""Return the inverse of a (mod m).
m does not have to be a prime.
Based on Extended Euclidean Algorithm, see:
- https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
"""
a %= m
(x0, x1, y0, y1) = (0, 1, 1, 0)
while a != 0:
(q, m, a) = (m // a, a, m % a)
(y0, y1) = (y1, y0 - q * y1)
(x0, x1) = (x1, x0 - q * x1)
(g, x, _) = (m, x0, y0)
if g == 1:
return x % m
err_msg = 'no inverse for '
err_msg += f'{hex_string(a)}' if a > 4294967295 else f'{a}'
err_msg += ' mod '
err_msg += f'{hex_string(m)}' if m > 4294967295 else f'{m}'
raise BTClibValueError(err_msg)
|
btclib
|
positive
|
def ensure_bucket_policy(bucket_name: str, region: str, policy: MutableMapping) -> None:
partition = get_partition(region)
<DeepExtract>
if region in S3CLIENTS:
s3_client = S3CLIENTS[region]
s3_client = boto3.client('s3', region_name=region)
S3CLIENTS[region] = s3_client
s3_client = s3_client
</DeepExtract>
for action in policy['Statement']:
action['Resource'] = [f'arn:{partition}:s3:::{bucket_name}', f'arn:{partition}:s3:::{bucket_name}/*']
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
|
def ensure_bucket_policy(bucket_name: str, region: str, policy: MutableMapping) -> None:
partition = get_partition(region)
if region in S3CLIENTS:
s3_client = S3CLIENTS[region]
s3_client = boto3.client('s3', region_name=region)
S3CLIENTS[region] = s3_client
s3_client = s3_client
for action in policy['Statement']:
action['Resource'] = [f'arn:{partition}:s3:::{bucket_name}', f'arn:{partition}:s3:::{bucket_name}/*']
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
|
aws-deployment-framework
|
positive
|
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
<DeepExtract>
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
feat = feat
</DeepExtract>
return feat
|
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
feat = feat
return feat
|
centerNet-deep-sort
|
positive
|
def __init__(self, root, split_id=0, num_val=100, download=True):
super(New_Complete_Aicity_Car, self).__init__(root, split_id=split_id)
if download:
<DeepExtract>
if self._check_integrity():
print('Files already downloaded and verified')
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print('Using downloaded file: ' + fpath)
else:
raise RuntimeError('Please download the dataset manually from {} to {}'.format(self.url, fpath))
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print('Extracting zip file')
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile('([-\\d]+)_c(\\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
(pid, cam) = map(int, pattern.search(fname).groups())
if pid == -1:
continue
assert 0 <= pid <= 1501
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam]))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
meta = {'name': 'New_Complete_Aicity_Car', 'shot': 'multiple', 'num_cameras': 41, 'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
splits = [{'trainval': sorted(list(trainval_pids)), 'query': sorted(list(query_pids)), 'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
</DeepExtract>
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it.')
self.load(num_val)
|
def __init__(self, root, split_id=0, num_val=100, download=True):
super(New_Complete_Aicity_Car, self).__init__(root, split_id=split_id)
if download:
if self._check_integrity():
print('Files already downloaded and verified')
return
import re
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
fpath = osp.join(raw_dir, 'Market-1501-v15.09.15.zip')
if osp.isfile(fpath) and hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print('Using downloaded file: ' + fpath)
else:
raise RuntimeError('Please download the dataset manually from {} to {}'.format(self.url, fpath))
exdir = osp.join(raw_dir, 'Market-1501-v15.09.15')
if not osp.isdir(exdir):
print('Extracting zip file')
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
identities = [[[] for _ in range(6)] for _ in range(1502)]
def register(subdir, pattern=re.compile('([-\\d]+)_c(\\d)')):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
(pid, cam) = map(int, pattern.search(fname).groups())
if pid == -1:
continue
assert 0 <= pid <= 1501
assert 1 <= cam <= 6
cam -= 1
pids.add(pid)
fname = '{:08d}_{:02d}_{:04d}.jpg'.format(pid, cam, len(identities[pid][cam]))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('bounding_box_train')
gallery_pids = register('bounding_box_test')
query_pids = register('query')
assert query_pids <= gallery_pids
assert trainval_pids.isdisjoint(gallery_pids)
meta = {'name': 'New_Complete_Aicity_Car', 'shot': 'multiple', 'num_cameras': 41, 'identities': identities}
write_json(meta, osp.join(self.root, 'meta.json'))
splits = [{'trainval': sorted(list(trainval_pids)), 'query': sorted(list(query_pids)), 'gallery': sorted(list(gallery_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted. ' + 'You can use download=True to download it.')
self.load(num_val)
|
AIC2020_ReID
|
positive
|
def render_template(template: str, props: Json, more_props: Iterable[Json]=(), tags: TagsTuple=default_tags) -> str:
"""
Render given provided template with given property values.
:param template: the template string.
:param props: the properties to populate.
:param more_props: additional property maps
:param tags: the tags to identify the template
:return: the rendered template string.
"""
def json_stringify(data: Any, text: bool=False) -> Union[bytes, ByteString]:
if isinstance(data, ByteString) and (not text):
return data
elif isinstance(data, str):
return data.encode()
elif isinstance(data, (list, dict)):
return json.dumps(data).encode()
else:
return f'{data}'.encode()
<DeepExtract>
</DeepExtract>
return cast(str, rendered)
|
def render_template(template: str, props: Json, more_props: Iterable[Json]=(), tags: TagsTuple=default_tags) -> str:
"""
Render given provided template with given property values.
:param template: the template string.
:param props: the properties to populate.
:param more_props: additional property maps
:param tags: the tags to identify the template
:return: the rendered template string.
"""
def json_stringify(data: Any, text: bool=False) -> Union[bytes, ByteString]:
if isinstance(data, ByteString) and (not text):
return data
elif isinstance(data, str):
return data.encode()
elif isinstance(data, (list, dict)):
return json.dumps(data).encode()
else:
return f'{data}'.encode()
return cast(str, rendered)
|
cloudkeeper
|
positive
|
def call_without_this(this, *args):
inargs = []
for a in args_in_idx:
inargs.append(args[a])
try:
result = mth(*inargs)
if args_out == 1:
args[args_out_idx[0]][0] = result
elif args_out != 0:
if len(result) != args_out:
msg = 'Method should have returned a %s-tuple' % args_out
raise ValueError(msg)
for (i, value) in enumerate(result):
args[args_out_idx[i]][0] = value
except ReturnHRESULT as err:
(hresult, text) = err.args
return ReportError(text, iid=interface._iid_, clsid=clsid, hresult=hresult)
except COMError as err:
(hr, text, details) = err.args
_error('Exception in %s.%s implementation:', interface.__name__, mthname, exc_info=True)
try:
(descr, source, helpfile, helpcontext, progid) = details
except (ValueError, TypeError):
msg = str(details)
else:
msg = '%s: %s' % (source, descr)
<DeepExtract>
if hr is None:
hr = 2147483648
if hr & 2147483648:
hr = hr
hr = hr & 65535 | 2147942400
</DeepExtract>
return ReportError(msg, iid=interface._iid_, clsid=clsid, hresult=hr)
except WindowsError as details:
_error('Exception in %s.%s implementation:', interface.__name__, mthname, exc_info=True)
<DeepExtract>
if winerror(details) is None:
hr = 2147483648
if winerror(details) & 2147483648:
hr = winerror(details)
hr = winerror(details) & 65535 | 2147942400
</DeepExtract>
return ReportException(hr, interface._iid_, clsid=clsid)
except E_NotImplemented:
_warning('Unimplemented method %s.%s called', interface.__name__, mthname)
return E_NOTIMPL
except:
_error('Exception in %s.%s implementation:', interface.__name__, mthname, exc_info=True)
return ReportException(E_FAIL, interface._iid_, clsid=clsid)
return S_OK
|
def call_without_this(this, *args):
inargs = []
for a in args_in_idx:
inargs.append(args[a])
try:
result = mth(*inargs)
if args_out == 1:
args[args_out_idx[0]][0] = result
elif args_out != 0:
if len(result) != args_out:
msg = 'Method should have returned a %s-tuple' % args_out
raise ValueError(msg)
for (i, value) in enumerate(result):
args[args_out_idx[i]][0] = value
except ReturnHRESULT as err:
(hresult, text) = err.args
return ReportError(text, iid=interface._iid_, clsid=clsid, hresult=hresult)
except COMError as err:
(hr, text, details) = err.args
_error('Exception in %s.%s implementation:', interface.__name__, mthname, exc_info=True)
try:
(descr, source, helpfile, helpcontext, progid) = details
except (ValueError, TypeError):
msg = str(details)
else:
msg = '%s: %s' % (source, descr)
if hr is None:
hr = 2147483648
if hr & 2147483648:
hr = hr
hr = hr & 65535 | 2147942400
return ReportError(msg, iid=interface._iid_, clsid=clsid, hresult=hr)
except WindowsError as details:
_error('Exception in %s.%s implementation:', interface.__name__, mthname, exc_info=True)
if winerror(details) is None:
hr = 2147483648
if winerror(details) & 2147483648:
hr = winerror(details)
hr = winerror(details) & 65535 | 2147942400
return ReportException(hr, interface._iid_, clsid=clsid)
except E_NotImplemented:
_warning('Unimplemented method %s.%s called', interface.__name__, mthname)
return E_NOTIMPL
except:
_error('Exception in %s.%s implementation:', interface.__name__, mthname, exc_info=True)
return ReportException(E_FAIL, interface._iid_, clsid=clsid)
return S_OK
|
comtypes
|
positive
|
@add_to_dict(static_method, ('boost::intrusive::bhtraits', 'to_value_ptr'))
def f(vtt, node_rptr):
def get_hook_type(value_t, tag_t):
"""Get a base hook type of a type value_t corresponding to a type tag_t"""
value_base_types = (field.type for field in value_t.fields() if field.is_base_class)
for value_base_type in value_base_types:
for field in value_base_type.fields():
if field.is_base_class and template_name(field.type) == 'boost::intrusive::generic_hook':
hooktags_struct = get_inner_type(field.type, 'hooktags')
hook_tag = get_inner_type(hooktags_struct, 'tag')
if get_basic_type(hook_tag) == get_basic_type(tag_t):
return value_base_type
assert False, 'no subclass hook with tag: ' + str(tag_t.strip_typedefs())
value_type = vtt.template_argument(0)
tag_type = vtt.template_argument(3)
<DeepExtract>
value_base_types = (field.type for field in value_type.fields() if field.is_base_class)
for value_base_type in value_base_types:
for field in value_base_type.fields():
if field.is_base_class and template_name(field.type) == 'boost::intrusive::generic_hook':
hooktags_struct = get_inner_type(field.type, 'hooktags')
hook_tag = get_inner_type(hooktags_struct, 'tag')
if get_basic_type(hook_tag) == get_basic_type(tag_type):
hook_type = value_base_type
assert False, 'no subclass hook with tag: ' + str(tag_type.strip_typedefs())
</DeepExtract>
hook_ptr = node_rptr.cast(hook_type.pointer())
value_ptr = hook_ptr.cast(value_type.pointer())
return value_ptr
|
@add_to_dict(static_method, ('boost::intrusive::bhtraits', 'to_value_ptr'))
def f(vtt, node_rptr):
def get_hook_type(value_t, tag_t):
"""Get a base hook type of a type value_t corresponding to a type tag_t"""
value_base_types = (field.type for field in value_t.fields() if field.is_base_class)
for value_base_type in value_base_types:
for field in value_base_type.fields():
if field.is_base_class and template_name(field.type) == 'boost::intrusive::generic_hook':
hooktags_struct = get_inner_type(field.type, 'hooktags')
hook_tag = get_inner_type(hooktags_struct, 'tag')
if get_basic_type(hook_tag) == get_basic_type(tag_t):
return value_base_type
assert False, 'no subclass hook with tag: ' + str(tag_t.strip_typedefs())
value_type = vtt.template_argument(0)
tag_type = vtt.template_argument(3)
value_base_types = (field.type for field in value_type.fields() if field.is_base_class)
for value_base_type in value_base_types:
for field in value_base_type.fields():
if field.is_base_class and template_name(field.type) == 'boost::intrusive::generic_hook':
hooktags_struct = get_inner_type(field.type, 'hooktags')
hook_tag = get_inner_type(hooktags_struct, 'tag')
if get_basic_type(hook_tag) == get_basic_type(tag_type):
hook_type = value_base_type
assert False, 'no subclass hook with tag: ' + str(tag_type.strip_typedefs())
hook_ptr = node_rptr.cast(hook_type.pointer())
value_ptr = hook_ptr.cast(value_type.pointer())
return value_ptr
|
Boost-Pretty-Printer
|
positive
|
def _inc_path(self):
""":returns: The path of the next sibling of a given node path."""
newpos = self._str2int(self.path[-self.steplen:]) + 1
<DeepExtract>
key = newpos.numconv_obj().int2str(num)
</DeepExtract>
if len(key) > self.steplen:
raise PathOverflow(_("Path Overflow from: '%s'" % (self.path,)))
return '{0}{1}{2}'.format(self.path[:-self.steplen], self.alphabet[0] * (self.steplen - len(key)), key)
|
def _inc_path(self):
""":returns: The path of the next sibling of a given node path."""
newpos = self._str2int(self.path[-self.steplen:]) + 1
key = newpos.numconv_obj().int2str(num)
if len(key) > self.steplen:
raise PathOverflow(_("Path Overflow from: '%s'" % (self.path,)))
return '{0}{1}{2}'.format(self.path[:-self.steplen], self.alphabet[0] * (self.steplen - len(key)), key)
|
django-treebeard
|
positive
|
def event(self, event):
if event.type() == QtCore.QEvent.ToolTip:
<DeepExtract>
x = event.pos().x()
color = self._color_at_x(x)
if color is not None:
QtGui.QToolTip.showText(event.globalPos(), color.verbose())
</DeepExtract>
return True
else:
return super(Scratchpad, self).event(event)
|
def event(self, event):
if event.type() == QtCore.QEvent.ToolTip:
x = event.pos().x()
color = self._color_at_x(x)
if color is not None:
QtGui.QToolTip.showText(event.globalPos(), color.verbose())
return True
else:
return super(Scratchpad, self).event(event)
|
color-palette
|
positive
|
def op_mul(self, i):
<DeepExtract>
l = i.args.split(', ', 2)
if len(l) == 3:
(dst, src0, src1) = l
else:
(dst, src0, src1) = [l[0]] + l
</DeepExtract>
rn = self.reg_numbers[src0]
<DeepExtract>
l = src1.split(', ', 1)
if len(l) == 2:
t = l[1].split(' ')
if len(t) == 1:
a = self._reg_or_literal(l[0])
b = int(t[0], 0) & 4294967295
sF = lambda : ror(a(), b)
a = self._reg_or_literal(l[0])
b = self._reg_or_literal(t[1])
if t[0] == 'lsl':
sF = lambda : lsl(a(), b())
if t[0] == 'lsr':
sF = lambda : lsr(a(), b())
if t[0] == 'asr':
sF = lambda : asr(a(), b())
if t[0] == 'rol':
sF = lambda : rol(a(), b())
if t[0] == 'ror':
sF = lambda : ror(a(), b())
a = self._reg_or_literal(src1)
sF = lambda : (a(), 0)
</DeepExtract>
<DeepExtract>
if dst == 'pc':
def fn(r):
self._branch = r & 4294967294
self.thumb = r & 1
dF = fn
else:
rn = self.reg_numbers[dst]
def fn(r):
self.regs[rn] = r & 4294967295
dF = fn
</DeepExtract>
def fn():
dF(self.regs[rn] * sF()[0])
return fn
|
def op_mul(self, i):
l = i.args.split(', ', 2)
if len(l) == 3:
(dst, src0, src1) = l
else:
(dst, src0, src1) = [l[0]] + l
rn = self.reg_numbers[src0]
l = src1.split(', ', 1)
if len(l) == 2:
t = l[1].split(' ')
if len(t) == 1:
a = self._reg_or_literal(l[0])
b = int(t[0], 0) & 4294967295
sF = lambda : ror(a(), b)
a = self._reg_or_literal(l[0])
b = self._reg_or_literal(t[1])
if t[0] == 'lsl':
sF = lambda : lsl(a(), b())
if t[0] == 'lsr':
sF = lambda : lsr(a(), b())
if t[0] == 'asr':
sF = lambda : asr(a(), b())
if t[0] == 'rol':
sF = lambda : rol(a(), b())
if t[0] == 'ror':
sF = lambda : ror(a(), b())
a = self._reg_or_literal(src1)
sF = lambda : (a(), 0)
if dst == 'pc':
def fn(r):
self._branch = r & 4294967294
self.thumb = r & 1
dF = fn
else:
rn = self.reg_numbers[dst]
def fn(r):
self.regs[rn] = r & 4294967295
dF = fn
def fn():
dF(self.regs[rn] * sF()[0])
return fn
|
coastermelt
|
positive
|
@BACKBONE_REGISTRY.register()
def alexnet(pretrained=True, **kwargs):
model = AlexNet()
if pretrained:
<DeepExtract>
pretrain_dict = model_zoo.load_url(model_urls['alexnet'])
model.load_state_dict(pretrain_dict, strict=False)
</DeepExtract>
return model
|
@BACKBONE_REGISTRY.register()
def alexnet(pretrained=True, **kwargs):
model = AlexNet()
if pretrained:
pretrain_dict = model_zoo.load_url(model_urls['alexnet'])
model.load_state_dict(pretrain_dict, strict=False)
return model
|
Dassl.pytorch
|
positive
|
def extract_column_names(text):
<DeepExtract>
level = 0
0 = text.find('(', 0)
if 0 == -1:
(begin, end) = (-1, -1)
begin_pos = 0
while 0 < len(text):
if text[0] == '(':
level += 1
elif text[0] == ')':
level -= 1
if level == 0:
(begin, end) = (begin_pos, 0)
0 += 1
(begin, end) = (begin_pos, -1)
</DeepExtract>
while end != -1:
text = text[:begin] + text[end + 1:]
<DeepExtract>
level = 0
begin = text.find('(', begin)
if begin == -1:
(begin, end) = (-1, -1)
begin_pos = begin
while begin < len(text):
if text[begin] == '(':
level += 1
elif text[begin] == ')':
level -= 1
if level == 0:
(begin, end) = (begin_pos, begin)
begin += 1
(begin, end) = (begin_pos, -1)
</DeepExtract>
keywords = ['check', 'constraint']
columns = []
for statement in text.split(','):
if statement.split()[0] in keywords:
continue
s = statement.split()
colname = s[0]
<DeepExtract>
s[1] = s[1].lower()
types = [('integer', 'int'), ('uuid', 'uuid'), ('varchar', 'str'), ('text', 'str'), ('char', 'str'), ('serial', 'int'), ('timestamp', 'datetime'), ('date', 'date'), ('time', 'time'), ('smallint', 'int'), ('int', 'int'), ('boolean', 'bool')]
for (old, new) in types:
if s[1].startswith(old):
coltype = new
print('Type not found for', s[1], '. Assuming str')
coltype = 'str'
</DeepExtract>
columns.append((colname, coltype))
return columns
|
def extract_column_names(text):
level = 0
0 = text.find('(', 0)
if 0 == -1:
(begin, end) = (-1, -1)
begin_pos = 0
while 0 < len(text):
if text[0] == '(':
level += 1
elif text[0] == ')':
level -= 1
if level == 0:
(begin, end) = (begin_pos, 0)
0 += 1
(begin, end) = (begin_pos, -1)
while end != -1:
text = text[:begin] + text[end + 1:]
level = 0
begin = text.find('(', begin)
if begin == -1:
(begin, end) = (-1, -1)
begin_pos = begin
while begin < len(text):
if text[begin] == '(':
level += 1
elif text[begin] == ')':
level -= 1
if level == 0:
(begin, end) = (begin_pos, begin)
begin += 1
(begin, end) = (begin_pos, -1)
keywords = ['check', 'constraint']
columns = []
for statement in text.split(','):
if statement.split()[0] in keywords:
continue
s = statement.split()
colname = s[0]
s[1] = s[1].lower()
types = [('integer', 'int'), ('uuid', 'uuid'), ('varchar', 'str'), ('text', 'str'), ('char', 'str'), ('serial', 'int'), ('timestamp', 'datetime'), ('date', 'date'), ('time', 'time'), ('smallint', 'int'), ('int', 'int'), ('boolean', 'bool')]
for (old, new) in types:
if s[1].startswith(old):
coltype = new
print('Type not found for', s[1], '. Assuming str')
coltype = 'str'
columns.append((colname, coltype))
return columns
|
bard
|
positive
|
def from_config(item: Any) -> Any:
"""Instantiate item from config.
Raises
------
ValueError
If item is not a valid config (unexpected eval method)
"""
if isinstance(item, dict):
mode = item.get(EVAL, CALL)
params = {key: value for (key, value) in item.items() if key != EVAL}
if mode == CALL:
if TYPE in params:
<DeepExtract>
parts = params[TYPE].split('.')
module = '.'.join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
cls_or_fn = m
</DeepExtract>
<DeepExtract>
if isinstance(params.get(POSITIONAL), dict):
mode = params.get(POSITIONAL).get(EVAL, CALL)
params = {key: value for (key, value) in params.get(POSITIONAL).items() if key != EVAL}
if mode == CALL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
args = cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
args = {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
args = functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
instantiated = from_config(params)
def _partial(**kwargs):
args = {**instantiated, **kwargs}
args = _partial
if mode is None:
args = params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {params.get(POSITIONAL)}")
if isinstance(params.get(POSITIONAL), list):
args = [from_config(it) for it in params.get(POSITIONAL)]
if isinstance(params.get(POSITIONAL), tuple):
args = tuple((from_config(it) for it in params.get(POSITIONAL)))
args = params.get(POSITIONAL)
</DeepExtract>
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
return cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
return {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
<DeepExtract>
parts = params[TYPE].split('.')
module = '.'.join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
cls_or_fn = m
</DeepExtract>
<DeepExtract>
if isinstance(params.get(POSITIONAL), dict):
mode = params.get(POSITIONAL).get(EVAL, CALL)
params = {key: value for (key, value) in params.get(POSITIONAL).items() if key != EVAL}
if mode == CALL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
args = cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
args = {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
args = functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
instantiated = from_config(params)
def _partial(**kwargs):
args = {**instantiated, **kwargs}
args = _partial
if mode is None:
args = params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {params.get(POSITIONAL)}")
if isinstance(params.get(POSITIONAL), list):
args = [from_config(it) for it in params.get(POSITIONAL)]
if isinstance(params.get(POSITIONAL), tuple):
args = tuple((from_config(it) for it in params.get(POSITIONAL)))
args = params.get(POSITIONAL)
</DeepExtract>
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
return functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
<DeepExtract>
if isinstance(params, dict):
mode = params.get(EVAL, CALL)
params = {key: value for (key, value) in params.items() if key != EVAL}
if mode == CALL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
instantiated = cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
instantiated = {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
instantiated = functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
instantiated = from_config(params)
def _partial(**kwargs):
instantiated = {**instantiated, **kwargs}
instantiated = _partial
if mode is None:
instantiated = params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {params}")
if isinstance(params, list):
instantiated = [from_config(it) for it in params]
if isinstance(params, tuple):
instantiated = tuple((from_config(it) for it in params))
instantiated = params
</DeepExtract>
def _partial(**kwargs):
return {**instantiated, **kwargs}
return _partial
if mode is None:
return params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {item}")
if isinstance(item, list):
return [from_config(it) for it in item]
if isinstance(item, tuple):
return tuple((from_config(it) for it in item))
return item
|
def from_config(item: Any) -> Any:
"""Instantiate item from config.
Raises
------
ValueError
If item is not a valid config (unexpected eval method)
"""
if isinstance(item, dict):
mode = item.get(EVAL, CALL)
params = {key: value for (key, value) in item.items() if key != EVAL}
if mode == CALL:
if TYPE in params:
parts = params[TYPE].split('.')
module = '.'.join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
cls_or_fn = m
if isinstance(params.get(POSITIONAL), dict):
mode = params.get(POSITIONAL).get(EVAL, CALL)
params = {key: value for (key, value) in params.get(POSITIONAL).items() if key != EVAL}
if mode == CALL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
args = cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
args = {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
args = functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
instantiated = from_config(params)
def _partial(**kwargs):
args = {**instantiated, **kwargs}
args = _partial
if mode is None:
args = params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {params.get(POSITIONAL)}")
if isinstance(params.get(POSITIONAL), list):
args = [from_config(it) for it in params.get(POSITIONAL)]
if isinstance(params.get(POSITIONAL), tuple):
args = tuple((from_config(it) for it in params.get(POSITIONAL)))
args = params.get(POSITIONAL)
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
return cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
return {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
parts = params[TYPE].split('.')
module = '.'.join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
cls_or_fn = m
if isinstance(params.get(POSITIONAL), dict):
mode = params.get(POSITIONAL).get(EVAL, CALL)
params = {key: value for (key, value) in params.get(POSITIONAL).items() if key != EVAL}
if mode == CALL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
args = cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
args = {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
args = functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
instantiated = from_config(params)
def _partial(**kwargs):
args = {**instantiated, **kwargs}
args = _partial
if mode is None:
args = params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {params.get(POSITIONAL)}")
if isinstance(params.get(POSITIONAL), list):
args = [from_config(it) for it in params.get(POSITIONAL)]
if isinstance(params.get(POSITIONAL), tuple):
args = tuple((from_config(it) for it in params.get(POSITIONAL)))
args = params.get(POSITIONAL)
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
return functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
if isinstance(params, dict):
mode = params.get(EVAL, CALL)
params = {key: value for (key, value) in params.items() if key != EVAL}
if mode == CALL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
try:
instantiated = cls_or_fn(*args, **kwargs) if args else cls_or_fn(**kwargs)
except (ValueError, TypeError) as e:
raise type(e)(f'Error while calling {cls_or_fn})') from e
else:
instantiated = {key: from_config(value) for (key, value) in params.items()}
if mode == PARTIAL:
if TYPE in params:
cls_or_fn = _import(params[TYPE])
args = from_config(params.get(POSITIONAL))
kwargs = {key: from_config(value) for (key, value) in params.items() if key not in {TYPE, POSITIONAL}}
instantiated = functools.partial(cls_or_fn, *args, **kwargs) if args else functools.partial(cls_or_fn, **kwargs)
else:
instantiated = from_config(params)
def _partial(**kwargs):
instantiated = {**instantiated, **kwargs}
instantiated = _partial
if mode is None:
instantiated = params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {params}")
if isinstance(params, list):
instantiated = [from_config(it) for it in params]
if isinstance(params, tuple):
instantiated = tuple((from_config(it) for it in params))
instantiated = params
def _partial(**kwargs):
return {**instantiated, **kwargs}
return _partial
if mode is None:
return params
raise ValueError(f"Unexpected evaluation mode: '{mode}' in item {item}")
if isinstance(item, list):
return [from_config(it) for it in item]
if isinstance(item, tuple):
return tuple((from_config(it) for it in item))
return item
|
deepr
|
positive
|
ahkab
|
positive
|
||
def test_component_generate(tmp_trestle_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None:
"""Test component generate."""
def _assemble_and_check_ac1_contents(assem_name: str, assemble_cmd: str, new_prose: str, add_comp: bool) -> None:
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
for ii in range(3):
assert assem_component.components[0].control_implementations[0].implemented_requirements[0].set_parameters[0].values[ii] == f'inserted value {ii}'
statement = assem_component.components[0].control_implementations[0].implemented_requirements[0].statements[0]
assert statement.description == new_prose
assert ControlInterface.get_status_from_props(statement).state == const.STATUS_IMPLEMENTED
if add_comp:
assert assem_component.components[2].title == 'comp_new'
comp_name = test_utils.setup_component_generate(tmp_trestle_dir)
ac1_path = tmp_trestle_dir / 'md_comp/comp_aa/comp_prof_aa/ac/ac-1.md'
(orig_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, comp_name, comp.ComponentDefinition)
generate_cmd = f'trestle author component-generate -n {comp_name} -o {md_path}'
test_utils.execute_command_and_assert(generate_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
<DeepExtract>
assert test_utils.confirm_text_in_file(ac1_path, 'enter one of:', 'ac-1 from comp aa')
assert test_utils.confirm_text_in_file(ac1_path, 'ac-1 from comp aa', 'Status: implemented')
assert test_utils.confirm_text_in_file(ac1_path, '- comp_rule_aa_1', 'Status: partial')
assert test_utils.confirm_text_in_file(ac1_path, 'ac-1_prm_3:', '- set by comp aa imp req')
markdown_processor = MarkdownProcessor()
(header, _) = markdown_processor.read_markdown_wo_processing(ac1_path)
assert header[const.PARAM_VALUES_TAG]['ac-1_prm_1'] == ['prof_aa val 1']
rules = header[const.COMP_DEF_RULES_TAG]['comp_aa']
assert len(rules) == 2
assert rules[0] == {'name': 'top_shared_rule_1', 'description': 'top shared rule 1 in aa'}
assert rules[1] == {'name': 'comp_rule_aa_1', 'description': 'comp rule aa 1'}
vals = header[const.COMP_DEF_RULES_PARAM_VALS_TAG]['comp_aa']
assert len(vals) == 1
assert vals[0]['name'] == 'shared_param_1'
assert vals[0]['values'] == ['shared_param_1_aa_opt_1']
check_common_contents(header)
</DeepExtract>
file_checker = test_utils.FileChecker(tmp_trestle_dir / md_path)
generate_cmd = f'trestle author component-generate -n {comp_name} -o {md_path}'
test_utils.execute_command_and_assert(generate_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
assert file_checker.files_unchanged()
assem_name = 'assem_comp'
assemble_cmd = f'trestle author component-assemble -m {md_path} -n {comp_name} -o {assem_name}'
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, assem_comp_path) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
creation_time = assem_comp_path.stat().st_mtime
orig_component.components[1].control_implementations[0].implemented_requirements[0].statements.pop()
assert model_utils.ModelUtils.models_are_equivalent(orig_component, assem_component, True)
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
assert creation_time == assem_comp_path.stat().st_mtime
new_text = ' component-values:\n - inserted value 0\n - inserted value 1\n - inserted value 2\n'
file_utils.insert_text_in_file(ac1_path, '- shared_param_1_aa_opt_1', new_text)
test_utils.substitute_text_in_file(ac1_path, '### Implementation Status: partial', f'### Implementation Status: {const.STATUS_IMPLEMENTED}')
new_prose = 'new prose\nmultiline too'
test_utils.substitute_text_in_file(ac1_path, 'statement prose for part a. from comp aa', new_prose)
<DeepExtract>
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
for ii in range(3):
assert assem_component.components[0].control_implementations[0].implemented_requirements[0].set_parameters[0].values[ii] == f'inserted value {ii}'
statement = assem_component.components[0].control_implementations[0].implemented_requirements[0].statements[0]
assert statement.description == new_prose
assert ControlInterface.get_status_from_props(statement).state == const.STATUS_IMPLEMENTED
if False:
assert assem_component.components[2].title == 'comp_new'
</DeepExtract>
<DeepExtract>
ac_path = tmp_trestle_dir / 'md_comp' / 'comp_new/ac'
ac_path.mkdir(parents=True, exist_ok=True)
new_ac1_path = ac_path / 'ac-1.md'
shutil.copyfile(str(ac1_path), str(new_ac1_path))
</DeepExtract>
<DeepExtract>
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
for ii in range(3):
assert assem_component.components[0].control_implementations[0].implemented_requirements[0].set_parameters[0].values[ii] == f'inserted value {ii}'
statement = assem_component.components[0].control_implementations[0].implemented_requirements[0].statements[0]
assert statement.description == new_prose
assert ControlInterface.get_status_from_props(statement).state == const.STATUS_IMPLEMENTED
if True:
assert assem_component.components[2].title == 'comp_new'
</DeepExtract>
|
def test_component_generate(tmp_trestle_dir: pathlib.Path, monkeypatch: MonkeyPatch) -> None:
"""Test component generate."""
def _assemble_and_check_ac1_contents(assem_name: str, assemble_cmd: str, new_prose: str, add_comp: bool) -> None:
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
for ii in range(3):
assert assem_component.components[0].control_implementations[0].implemented_requirements[0].set_parameters[0].values[ii] == f'inserted value {ii}'
statement = assem_component.components[0].control_implementations[0].implemented_requirements[0].statements[0]
assert statement.description == new_prose
assert ControlInterface.get_status_from_props(statement).state == const.STATUS_IMPLEMENTED
if add_comp:
assert assem_component.components[2].title == 'comp_new'
comp_name = test_utils.setup_component_generate(tmp_trestle_dir)
ac1_path = tmp_trestle_dir / 'md_comp/comp_aa/comp_prof_aa/ac/ac-1.md'
(orig_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, comp_name, comp.ComponentDefinition)
generate_cmd = f'trestle author component-generate -n {comp_name} -o {md_path}'
test_utils.execute_command_and_assert(generate_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
assert test_utils.confirm_text_in_file(ac1_path, 'enter one of:', 'ac-1 from comp aa')
assert test_utils.confirm_text_in_file(ac1_path, 'ac-1 from comp aa', 'Status: implemented')
assert test_utils.confirm_text_in_file(ac1_path, '- comp_rule_aa_1', 'Status: partial')
assert test_utils.confirm_text_in_file(ac1_path, 'ac-1_prm_3:', '- set by comp aa imp req')
markdown_processor = MarkdownProcessor()
(header, _) = markdown_processor.read_markdown_wo_processing(ac1_path)
assert header[const.PARAM_VALUES_TAG]['ac-1_prm_1'] == ['prof_aa val 1']
rules = header[const.COMP_DEF_RULES_TAG]['comp_aa']
assert len(rules) == 2
assert rules[0] == {'name': 'top_shared_rule_1', 'description': 'top shared rule 1 in aa'}
assert rules[1] == {'name': 'comp_rule_aa_1', 'description': 'comp rule aa 1'}
vals = header[const.COMP_DEF_RULES_PARAM_VALS_TAG]['comp_aa']
assert len(vals) == 1
assert vals[0]['name'] == 'shared_param_1'
assert vals[0]['values'] == ['shared_param_1_aa_opt_1']
check_common_contents(header)
file_checker = test_utils.FileChecker(tmp_trestle_dir / md_path)
generate_cmd = f'trestle author component-generate -n {comp_name} -o {md_path}'
test_utils.execute_command_and_assert(generate_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
assert file_checker.files_unchanged()
assem_name = 'assem_comp'
assemble_cmd = f'trestle author component-assemble -m {md_path} -n {comp_name} -o {assem_name}'
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, assem_comp_path) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
creation_time = assem_comp_path.stat().st_mtime
orig_component.components[1].control_implementations[0].implemented_requirements[0].statements.pop()
assert model_utils.ModelUtils.models_are_equivalent(orig_component, assem_component, True)
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
assert creation_time == assem_comp_path.stat().st_mtime
new_text = ' component-values:\n - inserted value 0\n - inserted value 1\n - inserted value 2\n'
file_utils.insert_text_in_file(ac1_path, '- shared_param_1_aa_opt_1', new_text)
test_utils.substitute_text_in_file(ac1_path, '### Implementation Status: partial', f'### Implementation Status: {const.STATUS_IMPLEMENTED}')
new_prose = 'new prose\nmultiline too'
test_utils.substitute_text_in_file(ac1_path, 'statement prose for part a. from comp aa', new_prose)
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
for ii in range(3):
assert assem_component.components[0].control_implementations[0].implemented_requirements[0].set_parameters[0].values[ii] == f'inserted value {ii}'
statement = assem_component.components[0].control_implementations[0].implemented_requirements[0].statements[0]
assert statement.description == new_prose
assert ControlInterface.get_status_from_props(statement).state == const.STATUS_IMPLEMENTED
if False:
assert assem_component.components[2].title == 'comp_new'
ac_path = tmp_trestle_dir / 'md_comp' / 'comp_new/ac'
ac_path.mkdir(parents=True, exist_ok=True)
new_ac1_path = ac_path / 'ac-1.md'
shutil.copyfile(str(ac1_path), str(new_ac1_path))
test_utils.execute_command_and_assert(assemble_cmd, CmdReturnCodes.SUCCESS.value, monkeypatch)
(assem_component, _) = model_utils.ModelUtils.load_model_for_class(tmp_trestle_dir, assem_name, comp.ComponentDefinition)
for ii in range(3):
assert assem_component.components[0].control_implementations[0].implemented_requirements[0].set_parameters[0].values[ii] == f'inserted value {ii}'
statement = assem_component.components[0].control_implementations[0].implemented_requirements[0].statements[0]
assert statement.description == new_prose
assert ControlInterface.get_status_from_props(statement).state == const.STATUS_IMPLEMENTED
if True:
assert assem_component.components[2].title == 'comp_new'
</DeepExtract>
|
compliance-trestle
|
positive
|
def fit(self, generator, steps, batch_size):
num_samples = steps * batch_size
centroid_indices = sorted(self.random_generator.permutation(steps * batch_size)[:self.num_treatments + 1])
if self.with_exposure:
self.dosage_centroids = []
for treatment_idx in range(self.num_treatments):
dosage_centroid_indices = sorted(self.random_generator.permutation(num_samples)[:self.num_archetypes_per_treatment])
self.dosage_centroids.append(self.get_from_generator_with_offsets(generator, dosage_centroid_indices))
for dosage_idx in range(self.num_archetypes_per_treatment):
min_response = self.random_generator.normal(0.0, 0.1)
self.dosage_centroids[treatment_idx][dosage_idx] += (min_response,)
<DeepExtract>
from drnet.data_access.generator import get_last_id_set
(centroids_tmp, current_idx) = ([], 0)
while len(centroid_indices) != 0:
(x, _) = next(generator)
ids = get_last_id_set()
while len(centroid_indices) != 0 and centroid_indices[0] <= current_idx + len(x[0]):
next_index = centroid_indices[0]
del centroid_indices[0]
is_last_treatment = len(centroid_indices) == 0
if is_last_treatment and True:
response_mean_of_mean = 1 - self.response_mean_of_mean
else:
response_mean_of_mean = self.response_mean_of_mean
response_mean = clip_percentage(self.random_generator.normal(response_mean_of_mean, self.response_std_of_mean))
response_std = clip_percentage(self.random_generator.normal(self.response_mean_of_std, self.response_std_of_std)) + 0.025
gene_loci_indices = np.arange(len(x[0][next_index]))
rnaseq_data = self.data_access.get_entry_with_id(ids[next_index])[1]['rnaseq'][1]
centroid_data = (gene_loci_indices, rnaseq_data[gene_loci_indices], response_mean, response_std)
centroids_tmp.append(centroid_data)
current_idx += len(x[0])
self.centroids = centroids_tmp
</DeepExtract>
self.assignment_cache = {}
|
def fit(self, generator, steps, batch_size):
num_samples = steps * batch_size
centroid_indices = sorted(self.random_generator.permutation(steps * batch_size)[:self.num_treatments + 1])
if self.with_exposure:
self.dosage_centroids = []
for treatment_idx in range(self.num_treatments):
dosage_centroid_indices = sorted(self.random_generator.permutation(num_samples)[:self.num_archetypes_per_treatment])
self.dosage_centroids.append(self.get_from_generator_with_offsets(generator, dosage_centroid_indices))
for dosage_idx in range(self.num_archetypes_per_treatment):
min_response = self.random_generator.normal(0.0, 0.1)
self.dosage_centroids[treatment_idx][dosage_idx] += (min_response,)
from drnet.data_access.generator import get_last_id_set
(centroids_tmp, current_idx) = ([], 0)
while len(centroid_indices) != 0:
(x, _) = next(generator)
ids = get_last_id_set()
while len(centroid_indices) != 0 and centroid_indices[0] <= current_idx + len(x[0]):
next_index = centroid_indices[0]
del centroid_indices[0]
is_last_treatment = len(centroid_indices) == 0
if is_last_treatment and True:
response_mean_of_mean = 1 - self.response_mean_of_mean
else:
response_mean_of_mean = self.response_mean_of_mean
response_mean = clip_percentage(self.random_generator.normal(response_mean_of_mean, self.response_std_of_mean))
response_std = clip_percentage(self.random_generator.normal(self.response_mean_of_std, self.response_std_of_std)) + 0.025
gene_loci_indices = np.arange(len(x[0][next_index]))
rnaseq_data = self.data_access.get_entry_with_id(ids[next_index])[1]['rnaseq'][1]
centroid_data = (gene_loci_indices, rnaseq_data[gene_loci_indices], response_mean, response_std)
centroids_tmp.append(centroid_data)
current_idx += len(x[0])
self.centroids = centroids_tmp
self.assignment_cache = {}
|
drnet
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.