before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def decode_zset_value(self, ldb_key):
<DeepExtract>
(type_id, key_length) = self.KEY_PREFIX_STRUCT.unpack(ldb_key[:self.KEY_PREFIX_LENGTH])
key_value = ldb_key[self.KEY_PREFIX_LENGTH:]
(_, length, key_name) = (type_id, key_length, key_value)
</DeepExtract>
return key_name[length + self.ZSET_SCORE_FORMAT_LENGTH:]
|
def decode_zset_value(self, ldb_key):
(type_id, key_length) = self.KEY_PREFIX_STRUCT.unpack(ldb_key[:self.KEY_PREFIX_LENGTH])
key_value = ldb_key[self.KEY_PREFIX_LENGTH:]
(_, length, key_name) = (type_id, key_length, key_value)
return key_name[length + self.ZSET_SCORE_FORMAT_LENGTH:]
|
dredis
|
positive
|
def _window_adjust(self, m):
nbytes = m.get_int()
self.lock.acquire()
try:
if self.ultra_debug:
<DeepExtract>
self.logger.log(DEBUG, '[chan ' + self._name + '] ' + 'window up {}'.format(nbytes), *args)
</DeepExtract>
self.out_window_size += nbytes
self.out_buffer_cv.notifyAll()
finally:
self.lock.release()
|
def _window_adjust(self, m):
nbytes = m.get_int()
self.lock.acquire()
try:
if self.ultra_debug:
self.logger.log(DEBUG, '[chan ' + self._name + '] ' + 'window up {}'.format(nbytes), *args)
self.out_window_size += nbytes
self.out_buffer_cv.notifyAll()
finally:
self.lock.release()
|
cerbrutus
|
positive
|
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = chr(0)
true_byte = chr(1)
if is_packed:
<DeepExtract>
tag_bytes = _VarintBytes(wire_format.PackTag(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED))
</DeepExtract>
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
<DeepExtract>
tag_bytes = _VarintBytes(wire_format.PackTag(field_number, wire_format.WIRETYPE_VARINT))
</DeepExtract>
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
<DeepExtract>
tag_bytes = _VarintBytes(wire_format.PackTag(field_number, wire_format.WIRETYPE_VARINT))
</DeepExtract>
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
|
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = chr(0)
true_byte = chr(1)
if is_packed:
tag_bytes = _VarintBytes(wire_format.PackTag(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED))
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = _VarintBytes(wire_format.PackTag(field_number, wire_format.WIRETYPE_VARINT))
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = _VarintBytes(wire_format.PackTag(field_number, wire_format.WIRETYPE_VARINT))
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
|
burp-protobuf-decoder
|
positive
|
def deploy_new_app_sync(self, project_id: str, cluster_name: str, app_directory: str, app_name: str, image_name: str, secrets: Dict[str, Dict[str, str]], region: str='us-west1', zone: str='us-west1-a') -> str:
"""Deploy a Django app to gke.
Args:
project_id: GCP project id.
cluster_name: Name of the cluster to host the app.
app_directory: Absolute path of the directory of your Django app.
app_name: Name of the Django app.
image_name: Tag of the docker image of the app.
secrets: Secrets necessary to run the app.
region: Where do you want to host the cluster.
zone: Name of the Google Compute Engine zone in which the cluster
resides.
Raises:
DeployNewAppError: If unable to deploy the app.
Returns:
The url of the deployed Django app.
"""
self._container_client.create_cluster_sync(project_id, cluster_name, region, zone)
self._container_client.build_docker_image(image_name, app_directory)
self._container_client.push_docker_image(image_name)
yaml_file_path = os.path.join(app_directory, app_name + '.yaml')
with open(yaml_file_path) as yaml_file:
for data in yaml.load_all(yaml_file, Loader=yaml.FullLoader):
if data['kind'] == 'Deployment':
deployment_data = data
elif data['kind'] == 'Service':
service_data = data
if not deployment_data or not service_data:
raise DeployNewAppError('Invalid kubernetes configuration file for Django app "{}" in "{}"'.format(app_name, app_directory))
kube_config = self._container_client.create_kubernetes_configuration(self._credentials, project_id, cluster_name, zone)
for (secret_name, secret) in secrets.items():
for (key, value) in secret.items():
if isinstance(value, str):
value = value.encode('utf-8')
secret[key] = base64.standard_b64encode(value).decode('utf-8')
secret_data = kubernetes.client.V1Secret(api_version='v1', data=secret, kind='Secret', metadata={'name': secret_name})
self._container_client.create_secret(secret_data, kube_config)
self._container_client.create_deployment(deployment_data, kube_config)
<DeepExtract>
api_client = kubernetes.client.ApiClient(kube_config)
api = kubernetes.client.ExtensionsV1beta1Api(api_client)
label_selector = '='.join(['app', app_name])
self._try_get_ready_replicas(api, label_selector)
</DeepExtract>
self._container_client.create_service(service_data, kube_config)
<DeepExtract>
api_client = kubernetes.client.ApiClient(kube_config)
api = kubernetes.client.CoreV1Api(api_client)
ingress_url = self._try_get_ingress_url(api)
</DeepExtract>
return ingress_url
|
def deploy_new_app_sync(self, project_id: str, cluster_name: str, app_directory: str, app_name: str, image_name: str, secrets: Dict[str, Dict[str, str]], region: str='us-west1', zone: str='us-west1-a') -> str:
"""Deploy a Django app to gke.
Args:
project_id: GCP project id.
cluster_name: Name of the cluster to host the app.
app_directory: Absolute path of the directory of your Django app.
app_name: Name of the Django app.
image_name: Tag of the docker image of the app.
secrets: Secrets necessary to run the app.
region: Where do you want to host the cluster.
zone: Name of the Google Compute Engine zone in which the cluster
resides.
Raises:
DeployNewAppError: If unable to deploy the app.
Returns:
The url of the deployed Django app.
"""
self._container_client.create_cluster_sync(project_id, cluster_name, region, zone)
self._container_client.build_docker_image(image_name, app_directory)
self._container_client.push_docker_image(image_name)
yaml_file_path = os.path.join(app_directory, app_name + '.yaml')
with open(yaml_file_path) as yaml_file:
for data in yaml.load_all(yaml_file, Loader=yaml.FullLoader):
if data['kind'] == 'Deployment':
deployment_data = data
elif data['kind'] == 'Service':
service_data = data
if not deployment_data or not service_data:
raise DeployNewAppError('Invalid kubernetes configuration file for Django app "{}" in "{}"'.format(app_name, app_directory))
kube_config = self._container_client.create_kubernetes_configuration(self._credentials, project_id, cluster_name, zone)
for (secret_name, secret) in secrets.items():
for (key, value) in secret.items():
if isinstance(value, str):
value = value.encode('utf-8')
secret[key] = base64.standard_b64encode(value).decode('utf-8')
secret_data = kubernetes.client.V1Secret(api_version='v1', data=secret, kind='Secret', metadata={'name': secret_name})
self._container_client.create_secret(secret_data, kube_config)
self._container_client.create_deployment(deployment_data, kube_config)
api_client = kubernetes.client.ApiClient(kube_config)
api = kubernetes.client.ExtensionsV1beta1Api(api_client)
label_selector = '='.join(['app', app_name])
self._try_get_ready_replicas(api, label_selector)
self._container_client.create_service(service_data, kube_config)
api_client = kubernetes.client.ApiClient(kube_config)
api = kubernetes.client.CoreV1Api(api_client)
ingress_url = self._try_get_ingress_url(api)
return ingress_url
|
django-cloud-deploy
|
positive
|
@pytest.mark.skipif(arg_chip != 'esp32', reason='ESP32-only')
def test_burn_block_data_with_offset_for_3_key_blocks(self):
offset = 1
<DeepExtract>
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK0 {IMAGES_DIR}/192bit'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
</DeepExtract>
offset = 4
<DeepExtract>
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK1 {IMAGES_DIR}/192bit_1'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
</DeepExtract>
<DeepExtract>
with open(f'{IMAGES_DIR}/192bit_1', 'rb') as f:
data = BitStream('0x00') * offset + BitStream(f)
blk = data.readlist(f'{data.len // 8}*uint:8')
blk = blk[::-1] if reverse_order else blk
hex_blk = ' '.join((f'{num:02x}' for num in blk))
assert repeat == self.espefuse_py('summary -d').count(hex_blk)
</DeepExtract>
offset = 6
<DeepExtract>
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK2 {IMAGES_DIR}/192bit_2'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
</DeepExtract>
<DeepExtract>
with open(f'{IMAGES_DIR}/192bit_2', 'rb') as f:
data = BitStream('0x00') * offset + BitStream(f)
blk = data.readlist(f'{data.len // 8}*uint:8')
blk = blk[::-1] if reverse_order else blk
hex_blk = ' '.join((f'{num:02x}' for num in blk))
assert repeat == self.espefuse_py('summary -d').count(hex_blk)
</DeepExtract>
offset = 8
<DeepExtract>
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK3 {IMAGES_DIR}/192bit_2'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
</DeepExtract>
<DeepExtract>
with open(f'{IMAGES_DIR}/192bit_2', 'rb') as f:
data = BitStream('0x00') * offset + BitStream(f)
blk = data.readlist(f'{data.len // 8}*uint:8')
blk = blk[::-1] if reverse_order else blk
hex_blk = ' '.join((f'{num:02x}' for num in blk))
assert repeat == self.espefuse_py('summary -d').count(hex_blk)
</DeepExtract>
|
@pytest.mark.skipif(arg_chip != 'esp32', reason='ESP32-only')
def test_burn_block_data_with_offset_for_3_key_blocks(self):
offset = 1
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK0 {IMAGES_DIR}/192bit'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
offset = 4
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK1 {IMAGES_DIR}/192bit_1'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
with open(f'{IMAGES_DIR}/192bit_1', 'rb') as f:
data = BitStream('0x00') * offset + BitStream(f)
blk = data.readlist(f'{data.len // 8}*uint:8')
blk = blk[::-1] if reverse_order else blk
hex_blk = ' '.join((f'{num:02x}' for num in blk))
assert repeat == self.espefuse_py('summary -d').count(hex_blk)
offset = 6
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK2 {IMAGES_DIR}/192bit_2'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
with open(f'{IMAGES_DIR}/192bit_2', 'rb') as f:
data = BitStream('0x00') * offset + BitStream(f)
blk = data.readlist(f'{data.len // 8}*uint:8')
blk = blk[::-1] if reverse_order else blk
hex_blk = ' '.join((f'{num:02x}' for num in blk))
assert repeat == self.espefuse_py('summary -d').count(hex_blk)
offset = 8
full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_block_data --offset {offset} BLOCK3 {IMAGES_DIR}/192bit_2'])
output = self._run_command(full_cmd, check_msg, ret_code)
self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0)
print(output)
return output
with open(f'{IMAGES_DIR}/192bit_2', 'rb') as f:
data = BitStream('0x00') * offset + BitStream(f)
blk = data.readlist(f'{data.len // 8}*uint:8')
blk = blk[::-1] if reverse_order else blk
hex_blk = ' '.join((f'{num:02x}' for num in blk))
assert repeat == self.espefuse_py('summary -d').count(hex_blk)
</DeepExtract>
|
esptool
|
positive
|
def test_DataSetBetter(self):
<DeepExtract>
metric_set_1 = [[float(i), float(i * 1) + offset] for i in (10, 20, 30, 40)]
</DeepExtract>
<DeepExtract>
metric_set_2 = [[float(i), float(i * 2) + offset] for i in (10, 20, 30, 40)]
</DeepExtract>
<DeepExtract>
metric_set_3 = [[float(i), float(i * 1) + 2] for i in (10, 20, 30, 40)]
</DeepExtract>
self.assertAlmostEqual(100.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_2, 'avg'))
self.assertAlmostEqual(100.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_2, 'bdrate'), delta=2.0)
self.assertAlmostEqual(2.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_3, 'dsnr'))
|
def test_DataSetBetter(self):
metric_set_1 = [[float(i), float(i * 1) + offset] for i in (10, 20, 30, 40)]
metric_set_2 = [[float(i), float(i * 2) + offset] for i in (10, 20, 30, 40)]
metric_set_3 = [[float(i), float(i * 1) + 2] for i in (10, 20, 30, 40)]
self.assertAlmostEqual(100.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_2, 'avg'))
self.assertAlmostEqual(100.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_2, 'bdrate'), delta=2.0)
self.assertAlmostEqual(2.0, visual_metrics.DataSetBetter(metric_set_1, metric_set_3, 'dsnr'))
|
compare-codecs
|
positive
|
def process_mvhd(self, data):
"""Set nextTrackId and time and movie timescale."""
<DeepExtract>
version = data[8]
output = data[:12]
if version == 1:
if self.creation_modfication_time:
output += uint64_to_str(self.creation_modfication_time)
output += uint64_to_str(self.creation_modfication_time)
else:
output += data[12:28]
output += uint32_to_str(self.timescale)
output += uint64_to_str(0)
else:
if self.creation_modfication_time:
output += uint32_to_str(self.creation_modfication_time)
output += uint32_to_str(self.creation_modfication_time)
else:
output += data[12:20]
output += uint32_to_str(self.timescale)
output += uint32_to_str(0)
output = output
</DeepExtract>
pos = len(output)
output += data[pos:-4]
output += uint32_to_str(self.track_id + 1)
return output
|
def process_mvhd(self, data):
"""Set nextTrackId and time and movie timescale."""
version = data[8]
output = data[:12]
if version == 1:
if self.creation_modfication_time:
output += uint64_to_str(self.creation_modfication_time)
output += uint64_to_str(self.creation_modfication_time)
else:
output += data[12:28]
output += uint32_to_str(self.timescale)
output += uint64_to_str(0)
else:
if self.creation_modfication_time:
output += uint32_to_str(self.creation_modfication_time)
output += uint32_to_str(self.creation_modfication_time)
else:
output += data[12:20]
output += uint32_to_str(self.timescale)
output += uint32_to_str(0)
output = output
pos = len(output)
output += data[pos:-4]
output += uint32_to_str(self.track_id + 1)
return output
|
dash-live-source-simulator
|
positive
|
def main(argv):
options = 'e:u:p:P:R:C:'
longOptions = ['endpoint=', 'user=', 'password=', 'pwdfile=', 'resourcename=', 'cookie=']
try:
(opts, args) = getopt.getopt(argv, options, longOptions)
<DeepExtract>
moduleArgs = {}
moduleArgs['endpoint'] = None
moduleArgs['user'] = None
moduleArgs['password'] = None
moduleArgs['pwdfile'] = None
moduleArgs['cookie'] = None
moduleArgs['resourcename'] = None
for (opt, arg) in opts:
if opt in ('-e', '--endpoint'):
moduleArgs['endpoint'] = arg
elif opt in ('-u', '--user'):
moduleArgs['user'] = arg
elif opt in ('-p', '--password'):
moduleArgs['password'] = arg
elif opt in ('-P', '--pwdfile'):
moduleArgs['pwdfile'] = arg
elif opt in ('-R', '--resourcename'):
moduleArgs['resourcename'] = arg
elif opt in ('-C', '--cookie'):
moduleArgs['cookie'] = arg
moduleArgs = moduleArgs
</DeepExtract>
if moduleArgs['cookie'] is None and moduleArgs['endpoint'] is not None and (moduleArgs['user'] is not None):
if moduleArgs['password'] is None and moduleArgs['pwdfile'] is None:
moduleArgs['password'] = getPassword(moduleArgs['user'])
elif moduleArgs['pwdfile'] is not None:
with open(moduleArgs['pwdfile'], 'r') as f:
moduleArgs['password'] = f.read().rstrip('\n')
moduleArgs['cookie'] = authenticate(moduleArgs['endpoint'], moduleArgs['user'], moduleArgs['password'])
if moduleArgs['cookie'] is not None:
<DeepExtract>
basepath = '/machineimage/'
params = None
data = None
response = callRESTApi(moduleArgs['endpoint'], basepath, moduleArgs['resourcename'], data, 'GET', params, moduleArgs['cookie'])
jsonResponse = json.loads(response.text)
jsonObj = jsonResponse
</DeepExtract>
printJSON(jsonObj)
else:
print('Incorrect parameters')
except getopt.GetoptError:
usage()
except Exception as e:
print('Unknown Exception please check log file')
logging.exception(e)
sys.exit(1)
return
|
def main(argv):
options = 'e:u:p:P:R:C:'
longOptions = ['endpoint=', 'user=', 'password=', 'pwdfile=', 'resourcename=', 'cookie=']
try:
(opts, args) = getopt.getopt(argv, options, longOptions)
moduleArgs = {}
moduleArgs['endpoint'] = None
moduleArgs['user'] = None
moduleArgs['password'] = None
moduleArgs['pwdfile'] = None
moduleArgs['cookie'] = None
moduleArgs['resourcename'] = None
for (opt, arg) in opts:
if opt in ('-e', '--endpoint'):
moduleArgs['endpoint'] = arg
elif opt in ('-u', '--user'):
moduleArgs['user'] = arg
elif opt in ('-p', '--password'):
moduleArgs['password'] = arg
elif opt in ('-P', '--pwdfile'):
moduleArgs['pwdfile'] = arg
elif opt in ('-R', '--resourcename'):
moduleArgs['resourcename'] = arg
elif opt in ('-C', '--cookie'):
moduleArgs['cookie'] = arg
moduleArgs = moduleArgs
if moduleArgs['cookie'] is None and moduleArgs['endpoint'] is not None and (moduleArgs['user'] is not None):
if moduleArgs['password'] is None and moduleArgs['pwdfile'] is None:
moduleArgs['password'] = getPassword(moduleArgs['user'])
elif moduleArgs['pwdfile'] is not None:
with open(moduleArgs['pwdfile'], 'r') as f:
moduleArgs['password'] = f.read().rstrip('\n')
moduleArgs['cookie'] = authenticate(moduleArgs['endpoint'], moduleArgs['user'], moduleArgs['password'])
if moduleArgs['cookie'] is not None:
basepath = '/machineimage/'
params = None
data = None
response = callRESTApi(moduleArgs['endpoint'], basepath, moduleArgs['resourcename'], data, 'GET', params, moduleArgs['cookie'])
jsonResponse = json.loads(response.text)
jsonObj = jsonResponse
printJSON(jsonObj)
else:
print('Incorrect parameters')
except getopt.GetoptError:
usage()
except Exception as e:
print('Unknown Exception please check log file')
logging.exception(e)
sys.exit(1)
return
|
atg-commerce-iaas
|
positive
|
def __init__(self, path):
self.dictionary = Dictionary()
<DeepExtract>
assert os.path.exists(os.path.join(path, 'train.txt'))
with open(os.path.join(path, 'train.txt'), 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(path, 'train.txt'), 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
self.train = ids
</DeepExtract>
<DeepExtract>
assert os.path.exists(os.path.join(path, 'valid.txt'))
with open(os.path.join(path, 'valid.txt'), 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(path, 'valid.txt'), 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
self.valid = ids
</DeepExtract>
<DeepExtract>
assert os.path.exists(os.path.join(path, 'test.txt'))
with open(os.path.join(path, 'test.txt'), 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(path, 'test.txt'), 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
self.test = ids
</DeepExtract>
|
def __init__(self, path):
self.dictionary = Dictionary()
assert os.path.exists(os.path.join(path, 'train.txt'))
with open(os.path.join(path, 'train.txt'), 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(path, 'train.txt'), 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
self.train = ids
assert os.path.exists(os.path.join(path, 'valid.txt'))
with open(os.path.join(path, 'valid.txt'), 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(path, 'valid.txt'), 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
self.valid = ids
assert os.path.exists(os.path.join(path, 'test.txt'))
with open(os.path.join(path, 'test.txt'), 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(os.path.join(path, 'test.txt'), 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
self.test = ids
</DeepExtract>
|
eval-nas
|
positive
|
def minced(self, nb_slices=(8, 8, 4)):
"""Experimental method decomposing the mesh as a hierarchical structure.
Parameters
----------
nb_slices: Tuple[int, int, int]
The number of slices in each of the x, y and z directions.
Only powers of 2 are supported at the moment.
Returns
-------
FloatingBody
"""
<DeepExtract>
new_body = copy.deepcopy(self)
if name is None:
new_body.name = f'copy_of_{self.name}'
LOG.debug(f'Copy {self.name}.')
else:
new_body.name = name
LOG.debug(f'Copy {self.name} under the name {name}.')
minced_body = new_body
</DeepExtract>
(x_min, x_max, y_min, y_max, z_min, z_max) = self.mesh.axis_aligned_bbox
sizes = [(x_min, x_max), (y_min, y_max), (z_min, z_max)]
directions = [np.array(d) for d in [(1, 0, 0), (0, 1, 0), (0, 0, 1)]]
def _slice_positions_at_depth(i):
"""Helper function.
Returns a list of floats as follows:
i=1 -> [1/2]
i=2 -> [1/4, 3/4]
i=3 -> [1/8, 3/8, 5/8, 7/8]
...
"""
denominator = 2 ** i
return [numerator / denominator for numerator in range(1, denominator, 2)]
planes = []
for (direction, nb_slices_in_dir, (min_coord, max_coord)) in zip(directions, nb_slices, sizes):
planes_in_dir = []
depth_of_treelike_structure = int(np.log2(nb_slices_in_dir))
for i_depth in range(1, depth_of_treelike_structure + 1):
planes_in_dir_at_depth = []
for relative_position in _slice_positions_at_depth(i_depth):
slice_position = (min_coord + relative_position * (max_coord - min_coord)) * direction
plane = Plane(normal=direction, point=slice_position)
planes_in_dir_at_depth.append(plane)
planes_in_dir.append(planes_in_dir_at_depth)
planes.append(planes_in_dir)
intermingled_x_y_z = chain.from_iterable(zip_longest(*planes))
for planes in intermingled_x_y_z:
if planes is not None:
for plane in planes:
minced_body = minced_body.sliced_by_plane(plane)
return minced_body
|
def minced(self, nb_slices=(8, 8, 4)):
"""Experimental method decomposing the mesh as a hierarchical structure.
Parameters
----------
nb_slices: Tuple[int, int, int]
The number of slices in each of the x, y and z directions.
Only powers of 2 are supported at the moment.
Returns
-------
FloatingBody
"""
new_body = copy.deepcopy(self)
if name is None:
new_body.name = f'copy_of_{self.name}'
LOG.debug(f'Copy {self.name}.')
else:
new_body.name = name
LOG.debug(f'Copy {self.name} under the name {name}.')
minced_body = new_body
(x_min, x_max, y_min, y_max, z_min, z_max) = self.mesh.axis_aligned_bbox
sizes = [(x_min, x_max), (y_min, y_max), (z_min, z_max)]
directions = [np.array(d) for d in [(1, 0, 0), (0, 1, 0), (0, 0, 1)]]
def _slice_positions_at_depth(i):
"""Helper function.
Returns a list of floats as follows:
i=1 -> [1/2]
i=2 -> [1/4, 3/4]
i=3 -> [1/8, 3/8, 5/8, 7/8]
...
"""
denominator = 2 ** i
return [numerator / denominator for numerator in range(1, denominator, 2)]
planes = []
for (direction, nb_slices_in_dir, (min_coord, max_coord)) in zip(directions, nb_slices, sizes):
planes_in_dir = []
depth_of_treelike_structure = int(np.log2(nb_slices_in_dir))
for i_depth in range(1, depth_of_treelike_structure + 1):
planes_in_dir_at_depth = []
for relative_position in _slice_positions_at_depth(i_depth):
slice_position = (min_coord + relative_position * (max_coord - min_coord)) * direction
plane = Plane(normal=direction, point=slice_position)
planes_in_dir_at_depth.append(plane)
planes_in_dir.append(planes_in_dir_at_depth)
planes.append(planes_in_dir)
intermingled_x_y_z = chain.from_iterable(zip_longest(*planes))
for planes in intermingled_x_y_z:
if planes is not None:
for plane in planes:
minced_body = minced_body.sliced_by_plane(plane)
return minced_body
|
capytaine
|
positive
|
@app.route('/add_to_db', methods=['POST'])
def add_to_db():
print('Received request.')
print(request.form['message'])
msg = request.form['message']
<DeepExtract>
db = os.environ.get('DB', None) or os.environ.get('database', None)
username = os.environ.get('USER', None) or os.environ.get('username', None)
password = os.environ.get('PASSWORD', None) or os.environ.get('password', None)
hostname = os.environ.get('HOST', None) or os.environ.get('dbhost', None)
(db, username, password, hostname) = (db, username, password, hostname)
</DeepExtract>
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password, host=hostname, database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("INSERT INTO message (greeting) values ('" + msg + "')")
cnx.commit()
return hello()
|
@app.route('/add_to_db', methods=['POST'])
def add_to_db():
print('Received request.')
print(request.form['message'])
msg = request.form['message']
db = os.environ.get('DB', None) or os.environ.get('database', None)
username = os.environ.get('USER', None) or os.environ.get('username', None)
password = os.environ.get('PASSWORD', None) or os.environ.get('password', None)
hostname = os.environ.get('HOST', None) or os.environ.get('dbhost', None)
(db, username, password, hostname) = (db, username, password, hostname)
cnx = ''
try:
cnx = mysql.connector.connect(user=username, password=password, host=hostname, database=db)
except Exception as exp:
print(exp)
import MySQLdb
cnx = MySQLdb.connect(unix_socket=hostname, user=username, passwd=password, db=db)
cur = cnx.cursor()
cur.execute("INSERT INTO message (greeting) values ('" + msg + "')")
cnx.commit()
return hello()
|
caastle
|
positive
|
def CloseClick(self, button='left', pressed='', coords=(0, 0), double=False):
"""Peform a click action that should make the window go away
The only difference from Click is that there are extra delays
before and after the click action.
"""
time.sleep(Timings.before_closeclick_wait)
<DeepExtract>
self.VerifyActionable()
if isinstance(coords, win32structures.RECT):
coords = (coords.left, coords.top)
msgs = []
if not double:
if button.lower() == 'left':
if button_down:
msgs.append(win32defines.WM_LBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_LBUTTONUP)
elif button.lower() == 'middle':
if button_down:
msgs.append(win32defines.WM_MBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_MBUTTONUP)
elif button.lower() == 'right':
if button_down:
msgs.append(win32defines.WM_RBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_RBUTTONUP)
elif button.lower() == 'left':
msgs = (win32defines.WM_LBUTTONDOWN, win32defines.WM_LBUTTONUP, win32defines.WM_LBUTTONDBLCLK, win32defines.WM_LBUTTONUP)
elif button.lower() == 'middle':
msgs = (win32defines.WM_MBUTTONDOWN, win32defines.WM_MBUTTONUP, win32defines.WM_MBUTTONDBLCLK, win32defines.WM_MBUTTONUP)
elif button.lower() == 'right':
msgs = (win32defines.WM_RBUTTONDOWN, win32defines.WM_RBUTTONUP, win32defines.WM_RBUTTONDBLCLK, win32defines.WM_RBUTTONUP)
(flags, click_point) = _calc_flags_and_coords(pressed, coords)
win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), self.ProcessID(), 1)
for msg in msgs:
self.PostMessage(msg, flags, click_point)
time.sleep(Timings.sendmessagetimeout_timeout)
win32functions.WaitGuiThreadIdle(self)
win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), self.ProcessID(), 0)
time.sleep(Timings.after_click_wait)
</DeepExtract>
def has_closed():
return not (win32functions.IsWindow(self) or win32functions.IsWindow(self.Parent()))
timings.WaitUntil(Timings.closeclick_dialog_close_wait, Timings.closeclick_retry, has_closed)
time.sleep(Timings.after_closeclick_wait)
return self
|
def CloseClick(self, button='left', pressed='', coords=(0, 0), double=False):
"""Peform a click action that should make the window go away
The only difference from Click is that there are extra delays
before and after the click action.
"""
time.sleep(Timings.before_closeclick_wait)
self.VerifyActionable()
if isinstance(coords, win32structures.RECT):
coords = (coords.left, coords.top)
msgs = []
if not double:
if button.lower() == 'left':
if button_down:
msgs.append(win32defines.WM_LBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_LBUTTONUP)
elif button.lower() == 'middle':
if button_down:
msgs.append(win32defines.WM_MBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_MBUTTONUP)
elif button.lower() == 'right':
if button_down:
msgs.append(win32defines.WM_RBUTTONDOWN)
if button_up:
msgs.append(win32defines.WM_RBUTTONUP)
elif button.lower() == 'left':
msgs = (win32defines.WM_LBUTTONDOWN, win32defines.WM_LBUTTONUP, win32defines.WM_LBUTTONDBLCLK, win32defines.WM_LBUTTONUP)
elif button.lower() == 'middle':
msgs = (win32defines.WM_MBUTTONDOWN, win32defines.WM_MBUTTONUP, win32defines.WM_MBUTTONDBLCLK, win32defines.WM_MBUTTONUP)
elif button.lower() == 'right':
msgs = (win32defines.WM_RBUTTONDOWN, win32defines.WM_RBUTTONUP, win32defines.WM_RBUTTONDBLCLK, win32defines.WM_RBUTTONUP)
(flags, click_point) = _calc_flags_and_coords(pressed, coords)
win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), self.ProcessID(), 1)
for msg in msgs:
self.PostMessage(msg, flags, click_point)
time.sleep(Timings.sendmessagetimeout_timeout)
win32functions.WaitGuiThreadIdle(self)
win32functions.AttachThreadInput(win32functions.GetCurrentThreadId(), self.ProcessID(), 0)
time.sleep(Timings.after_click_wait)
def has_closed():
return not (win32functions.IsWindow(self) or win32functions.IsWindow(self.Parent()))
timings.WaitUntil(Timings.closeclick_dialog_close_wait, Timings.closeclick_retry, has_closed)
time.sleep(Timings.after_closeclick_wait)
return self
|
BrowserRefresh-Sublime
|
positive
|
def FilenameCheckHash(filename, literalfilename):
if literalfilename:
return (FCH_FILENAME, filename)
elif filename.startswith('#h#'):
<DeepExtract>
if len(filename[3:].replace(' ', '')) % 2 == 1:
filename[3:].replace(' ', '') = '0' + filename[3:].replace(' ', '')
try:
result = binascii.a2b_hex(filename[3:].replace(' ', ''))
except:
result = None
</DeepExtract>
if result == None:
return (FCH_ERROR, 'hexadecimal')
else:
return (FCH_DATA, result)
elif filename.startswith('#b#'):
try:
return (FCH_DATA, binascii.a2b_base64(filename[3:]))
except:
return (FCH_ERROR, 'base64')
elif filename.startswith('#e#'):
<DeepExtract>
functioncalls = Parse(filename[3:])
if functioncalls == None:
result = None
decoded = ''
for functioncall in functioncalls:
(functionname, arguments) = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
result = None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
result = None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
result = None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
result = None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
result = None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
result = None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
result = None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
result = None
number = CheckNumber(arguments[0], minimum=0, maximum=255)
if number == None:
result = None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=0, maximum=255)
if number2 == None:
result = None
if number < number2:
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
decoded += ''.join([chr(n) for n in range(number, number2 - 1, -1)])
else:
print('Error: unknown function: %s' % functionname)
result = None
result = decoded
</DeepExtract>
if result == None:
return (FCH_ERROR, 'expression')
else:
return (FCH_DATA, C2BIP3(result))
elif filename.startswith('#p#'):
<DeepExtract>
try:
(packFormat, pythonExpression) = filename[3:].split('#', 1)
filename[3:] = struct.pack(packFormat, int(pythonExpression))
result = filename[3:]
except:
result = None
</DeepExtract>
if result == None:
return (FCH_ERROR, 'pack')
else:
return (FCH_DATA, result)
elif filename.startswith('#'):
return (FCH_DATA, C2BIP3(filename[1:]))
else:
return (FCH_FILENAME, filename)
|
def FilenameCheckHash(filename, literalfilename):
if literalfilename:
return (FCH_FILENAME, filename)
elif filename.startswith('#h#'):
if len(filename[3:].replace(' ', '')) % 2 == 1:
filename[3:].replace(' ', '') = '0' + filename[3:].replace(' ', '')
try:
result = binascii.a2b_hex(filename[3:].replace(' ', ''))
except:
result = None
if result == None:
return (FCH_ERROR, 'hexadecimal')
else:
return (FCH_DATA, result)
elif filename.startswith('#b#'):
try:
return (FCH_DATA, binascii.a2b_base64(filename[3:]))
except:
return (FCH_ERROR, 'base64')
elif filename.startswith('#e#'):
functioncalls = Parse(filename[3:])
if functioncalls == None:
result = None
decoded = ''
for functioncall in functioncalls:
(functionname, arguments) = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
result = None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
result = None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
result = None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
result = None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
result = None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
result = None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
result = None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
result = None
number = CheckNumber(arguments[0], minimum=0, maximum=255)
if number == None:
result = None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=0, maximum=255)
if number2 == None:
result = None
if number < number2:
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
decoded += ''.join([chr(n) for n in range(number, number2 - 1, -1)])
else:
print('Error: unknown function: %s' % functionname)
result = None
result = decoded
if result == None:
return (FCH_ERROR, 'expression')
else:
return (FCH_DATA, C2BIP3(result))
elif filename.startswith('#p#'):
try:
(packFormat, pythonExpression) = filename[3:].split('#', 1)
filename[3:] = struct.pack(packFormat, int(pythonExpression))
result = filename[3:]
except:
result = None
if result == None:
return (FCH_ERROR, 'pack')
else:
return (FCH_DATA, result)
elif filename.startswith('#'):
return (FCH_DATA, C2BIP3(filename[1:]))
else:
return (FCH_FILENAME, filename)
|
Beta
|
positive
|
def __call__(self, eval_desc='syntax-vae', step=None, **kwargs):
"""
Args:
eval_desc:
Returns: eval the multi-bleu for machine translation
"""
args = self.model.args
ret_track = {}
if args.task_type == 'SyntaxVAE2':
<DeepExtract>
model = self.model
args = self.model.args
training = model.training
model.eval()
step = eval_step if eval_step is not None else 2 * args.x0
ret_track = {}
(batch_examples, _) = batchify_examples(examples=self.eval_set, batch_size=self.batch_size, sort=False)
for batch in batch_examples:
ret_loss = model.get_loss(batch, step)
ret_track = update_tracker(ret_loss, ret_track)
if self.write_down:
write_result(ret_track, fname=os.path.join(self.out_dir, eval_desc + '.score'))
model.training = training
ret_track = ret_track
</DeepExtract>
<DeepExtract>
model = self.model
args = self.model.args
training = model.training
model.eval()
eval_results = new_evaluate(examples=self.eval_set, model=model, sort_key=self.sort_key, eval_tgt=self.eval_tgt, batch_size=self.batch_size, out_dir=os.path.join(self.out_dir, eval_desc) if self.write_down is not None else None)
tgt_bleu = eval_results['accuracy']
model.training = training
rec_ret = {'BLEU': tgt_bleu, 'EVAL TIME': eval_results['use_time'], 'EVAL SPEED': len(self.eval_set) / eval_results['use_time']}
</DeepExtract>
ret_track.update(**rec_ret)
if args.dev_item is None:
<DeepExtract>
if args.task_type == 'EnhanceSyntaxVAE':
self.score_item = 'ELBO'
if bleu < 50.0:
self.score_item = 'TGT BLEU'
else:
self.score_item = 'TGT_ORI_RATE'
</DeepExtract>
else:
self.score_item = args.dev_item
elif args.task_type == 'SyntaxPara':
<DeepExtract>
model = self.model
args = self.model.args
training = model.training
model.eval()
eval_results = new_evaluate(examples=self.eval_set, model=model, sort_key=self.sort_key, eval_tgt=self.eval_tgt, batch_size=self.batch_size, out_dir=os.path.join(self.out_dir, eval_desc) if self.write_down is not None else None)
tgt_bleu = eval_results['accuracy']
ori_bleu = BleuScoreMetric.evaluate_file(pred_file=eval_results['pred_file'], gold_files=eval_results['input_file'])
model.training = training
ret_track = {'TGT BLEU': tgt_bleu, 'ORI BLEU': ori_bleu, 'TGT_ORI_RATE': tgt_bleu / ori_bleu, 'EVAL TIME': eval_results['use_time'], 'EVAL SPEED': len(self.eval_set) / eval_results['use_time']}
</DeepExtract>
if args.dev_item is None:
<DeepExtract>
if args.task_type == 'EnhanceSyntaxVAE':
self.score_item = 'ELBO'
if ret_track['TGT BLEU'] < 50.0:
self.score_item = 'TGT BLEU'
else:
self.score_item = 'TGT_ORI_RATE'
</DeepExtract>
else:
self.score_item = args.dev_item
return ret_track
|
def __call__(self, eval_desc='syntax-vae', step=None, **kwargs):
"""
Args:
eval_desc:
Returns: eval the multi-bleu for machine translation
"""
args = self.model.args
ret_track = {}
if args.task_type == 'SyntaxVAE2':
model = self.model
args = self.model.args
training = model.training
model.eval()
step = eval_step if eval_step is not None else 2 * args.x0
ret_track = {}
(batch_examples, _) = batchify_examples(examples=self.eval_set, batch_size=self.batch_size, sort=False)
for batch in batch_examples:
ret_loss = model.get_loss(batch, step)
ret_track = update_tracker(ret_loss, ret_track)
if self.write_down:
write_result(ret_track, fname=os.path.join(self.out_dir, eval_desc + '.score'))
model.training = training
ret_track = ret_track
model = self.model
args = self.model.args
training = model.training
model.eval()
eval_results = new_evaluate(examples=self.eval_set, model=model, sort_key=self.sort_key, eval_tgt=self.eval_tgt, batch_size=self.batch_size, out_dir=os.path.join(self.out_dir, eval_desc) if self.write_down is not None else None)
tgt_bleu = eval_results['accuracy']
model.training = training
rec_ret = {'BLEU': tgt_bleu, 'EVAL TIME': eval_results['use_time'], 'EVAL SPEED': len(self.eval_set) / eval_results['use_time']}
ret_track.update(**rec_ret)
if args.dev_item is None:
if args.task_type == 'EnhanceSyntaxVAE':
self.score_item = 'ELBO'
if bleu < 50.0:
self.score_item = 'TGT BLEU'
else:
self.score_item = 'TGT_ORI_RATE'
else:
self.score_item = args.dev_item
elif args.task_type == 'SyntaxPara':
model = self.model
args = self.model.args
training = model.training
model.eval()
eval_results = new_evaluate(examples=self.eval_set, model=model, sort_key=self.sort_key, eval_tgt=self.eval_tgt, batch_size=self.batch_size, out_dir=os.path.join(self.out_dir, eval_desc) if self.write_down is not None else None)
tgt_bleu = eval_results['accuracy']
ori_bleu = BleuScoreMetric.evaluate_file(pred_file=eval_results['pred_file'], gold_files=eval_results['input_file'])
model.training = training
ret_track = {'TGT BLEU': tgt_bleu, 'ORI BLEU': ori_bleu, 'TGT_ORI_RATE': tgt_bleu / ori_bleu, 'EVAL TIME': eval_results['use_time'], 'EVAL SPEED': len(self.eval_set) / eval_results['use_time']}
if args.dev_item is None:
if args.task_type == 'EnhanceSyntaxVAE':
self.score_item = 'ELBO'
if ret_track['TGT BLEU'] < 50.0:
self.score_item = 'TGT BLEU'
else:
self.score_item = 'TGT_ORI_RATE'
else:
self.score_item = args.dev_item
return ret_track
|
DSS-VAE
|
positive
|
def draw_controlled_gate(backend, positions, node, **params):
""" Draws a :class:`discopy.quantum.gates.Controlled` gate. """
(box, depth) = (node.box, node.depth)
distance = box.distance
c_size = len(box.controlled.dom)
index = (0, distance) if distance > 0 else (c_size - distance - 1, 0)
dom = Node('dom', obj=box.dom.inside[0], i=index[0], depth=depth)
cod = Node('cod', obj=box.cod.inside[0], i=index[0], depth=depth)
middle = (positions[dom][0], (positions[dom][1] + positions[cod][1]) / 2)
controlled_box = box.controlled.to_drawing()
controlled = Node('box', box=controlled_box, depth=depth)
c_dom = Node('dom', obj=box.dom.inside[0], i=index[1], depth=depth)
c_cod = Node('cod', obj=box.cod.inside[0], i=index[1], depth=depth)
c_middle = (positions[c_dom][0], (positions[c_dom][1] + positions[c_cod][1]) / 2)
target = (positions[c_dom][0] + (c_size - 1) / 2, (positions[c_dom][1] + positions[c_cod][1]) / 2)
target_boundary = target
if controlled_box.name == 'X':
backend.draw_wire(positions[c_dom], positions[c_cod])
eps = 1e-10
perturbed_target = (target[0], target[1] + eps)
backend.draw_node(*perturbed_target, shape='circle', color='white', edgecolor='black', nodesize=2 * params.get('nodesize', 1))
backend.draw_node(*target, shape='plus', nodesize=2 * params.get('nodesize', 1))
else:
fake_positions = {controlled: target}
for i in range(c_size):
dom_node = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
(x, y) = (positions[c_dom][0] + i, positions[c_dom][1])
fake_positions[dom_node] = (x, y)
cod_node = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
(x, y) = (positions[c_cod][0] + i, positions[c_cod][1])
fake_positions[cod_node] = (x, y)
shift_boundary = True
if hasattr(box.controlled, 'draw_as_controlled'):
<DeepExtract>
(box, depth) = (controlled.box, controlled.depth)
distance = box.distance
c_size = len(box.controlled.dom)
index = (0, distance) if distance > 0 else (c_size - distance - 1, 0)
dom = Node('dom', obj=box.dom.inside[0], i=index[0], depth=depth)
cod = Node('cod', obj=box.cod.inside[0], i=index[0], depth=depth)
middle = (fake_positions[dom][0], (fake_positions[dom][1] + fake_positions[cod][1]) / 2)
controlled_box = box.controlled.to_drawing()
controlled = Node('box', box=controlled_box, depth=depth)
c_dom = Node('dom', obj=box.dom.inside[0], i=index[1], depth=depth)
c_cod = Node('cod', obj=box.cod.inside[0], i=index[1], depth=depth)
c_middle = (fake_positions[c_dom][0], (fake_positions[c_dom][1] + fake_positions[c_cod][1]) / 2)
target = (fake_positions[c_dom][0] + (c_size - 1) / 2, (fake_positions[c_dom][1] + fake_positions[c_cod][1]) / 2)
target_boundary = target
if controlled_box.name == 'X':
backend.draw_wire(fake_positions[c_dom], fake_positions[c_cod])
eps = 1e-10
perturbed_target = (target[0], target[1] + eps)
backend.draw_node(*perturbed_target, shape='circle', color='white', edgecolor='black', nodesize=2 * params.get('nodesize', 1))
backend.draw_node(*target, shape='plus', nodesize=2 * params.get('nodesize', 1))
else:
fake_positions = {controlled: target}
for i in range(c_size):
dom_node = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
(x, y) = (fake_positions[c_dom][0] + i, fake_positions[c_dom][1])
fake_positions[dom_node] = (x, y)
cod_node = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
(x, y) = (fake_positions[c_cod][0] + i, fake_positions[c_cod][1])
fake_positions[cod_node] = (x, y)
shift_boundary = True
if hasattr(box.controlled, 'draw_as_controlled'):
backend = draw_controlled_gate(backend, fake_positions, controlled, **params)
next_box = box.controlled
while hasattr(next_box, 'controlled'):
if controlled_box.distance * next_box.distance < 0:
shift_boundary = False
break
next_box = next_box.controlled
if next_box.name == 'X':
shift_boundary = False
else:
backend = draw_box(backend, fake_positions, controlled, **params)
if shift_boundary:
if box.distance > 0:
target_boundary = (c_middle[0] - 0.25, c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1 + 0.25, c_middle[1])
elif box.distance > 0:
target_boundary = (c_middle[0], c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1, c_middle[1])
backend.draw_wire(fake_positions[dom], fake_positions[cod])
extra_offset = 1 if distance > 0 else len(box.controlled.dom)
for i in range(extra_offset, extra_offset + abs(distance) - 1):
node1 = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
node2 = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
backend.draw_wire(fake_positions[node1], fake_positions[node2])
backend.draw_wire(middle, target_boundary, bend_in=True, bend_out=True)
backend.draw_node(*middle, color='black', shape='circle', nodesize=params.get('nodesize', 1))
backend = backend
</DeepExtract>
next_box = box.controlled
while hasattr(next_box, 'controlled'):
if controlled_box.distance * next_box.distance < 0:
shift_boundary = False
break
next_box = next_box.controlled
if next_box.name == 'X':
shift_boundary = False
else:
backend = draw_box(backend, fake_positions, controlled, **params)
if shift_boundary:
if box.distance > 0:
target_boundary = (c_middle[0] - 0.25, c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1 + 0.25, c_middle[1])
elif box.distance > 0:
target_boundary = (c_middle[0], c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1, c_middle[1])
backend.draw_wire(positions[dom], positions[cod])
extra_offset = 1 if distance > 0 else len(box.controlled.dom)
for i in range(extra_offset, extra_offset + abs(distance) - 1):
node1 = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
node2 = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
backend.draw_wire(positions[node1], positions[node2])
backend.draw_wire(middle, target_boundary, bend_in=True, bend_out=True)
backend.draw_node(*middle, color='black', shape='circle', nodesize=params.get('nodesize', 1))
return backend
|
def draw_controlled_gate(backend, positions, node, **params):
""" Draws a :class:`discopy.quantum.gates.Controlled` gate. """
(box, depth) = (node.box, node.depth)
distance = box.distance
c_size = len(box.controlled.dom)
index = (0, distance) if distance > 0 else (c_size - distance - 1, 0)
dom = Node('dom', obj=box.dom.inside[0], i=index[0], depth=depth)
cod = Node('cod', obj=box.cod.inside[0], i=index[0], depth=depth)
middle = (positions[dom][0], (positions[dom][1] + positions[cod][1]) / 2)
controlled_box = box.controlled.to_drawing()
controlled = Node('box', box=controlled_box, depth=depth)
c_dom = Node('dom', obj=box.dom.inside[0], i=index[1], depth=depth)
c_cod = Node('cod', obj=box.cod.inside[0], i=index[1], depth=depth)
c_middle = (positions[c_dom][0], (positions[c_dom][1] + positions[c_cod][1]) / 2)
target = (positions[c_dom][0] + (c_size - 1) / 2, (positions[c_dom][1] + positions[c_cod][1]) / 2)
target_boundary = target
if controlled_box.name == 'X':
backend.draw_wire(positions[c_dom], positions[c_cod])
eps = 1e-10
perturbed_target = (target[0], target[1] + eps)
backend.draw_node(*perturbed_target, shape='circle', color='white', edgecolor='black', nodesize=2 * params.get('nodesize', 1))
backend.draw_node(*target, shape='plus', nodesize=2 * params.get('nodesize', 1))
else:
fake_positions = {controlled: target}
for i in range(c_size):
dom_node = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
(x, y) = (positions[c_dom][0] + i, positions[c_dom][1])
fake_positions[dom_node] = (x, y)
cod_node = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
(x, y) = (positions[c_cod][0] + i, positions[c_cod][1])
fake_positions[cod_node] = (x, y)
shift_boundary = True
if hasattr(box.controlled, 'draw_as_controlled'):
(box, depth) = (controlled.box, controlled.depth)
distance = box.distance
c_size = len(box.controlled.dom)
index = (0, distance) if distance > 0 else (c_size - distance - 1, 0)
dom = Node('dom', obj=box.dom.inside[0], i=index[0], depth=depth)
cod = Node('cod', obj=box.cod.inside[0], i=index[0], depth=depth)
middle = (fake_positions[dom][0], (fake_positions[dom][1] + fake_positions[cod][1]) / 2)
controlled_box = box.controlled.to_drawing()
controlled = Node('box', box=controlled_box, depth=depth)
c_dom = Node('dom', obj=box.dom.inside[0], i=index[1], depth=depth)
c_cod = Node('cod', obj=box.cod.inside[0], i=index[1], depth=depth)
c_middle = (fake_positions[c_dom][0], (fake_positions[c_dom][1] + fake_positions[c_cod][1]) / 2)
target = (fake_positions[c_dom][0] + (c_size - 1) / 2, (fake_positions[c_dom][1] + fake_positions[c_cod][1]) / 2)
target_boundary = target
if controlled_box.name == 'X':
backend.draw_wire(fake_positions[c_dom], fake_positions[c_cod])
eps = 1e-10
perturbed_target = (target[0], target[1] + eps)
backend.draw_node(*perturbed_target, shape='circle', color='white', edgecolor='black', nodesize=2 * params.get('nodesize', 1))
backend.draw_node(*target, shape='plus', nodesize=2 * params.get('nodesize', 1))
else:
fake_positions = {controlled: target}
for i in range(c_size):
dom_node = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
(x, y) = (fake_positions[c_dom][0] + i, fake_positions[c_dom][1])
fake_positions[dom_node] = (x, y)
cod_node = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
(x, y) = (fake_positions[c_cod][0] + i, fake_positions[c_cod][1])
fake_positions[cod_node] = (x, y)
shift_boundary = True
if hasattr(box.controlled, 'draw_as_controlled'):
backend = draw_controlled_gate(backend, fake_positions, controlled, **params)
next_box = box.controlled
while hasattr(next_box, 'controlled'):
if controlled_box.distance * next_box.distance < 0:
shift_boundary = False
break
next_box = next_box.controlled
if next_box.name == 'X':
shift_boundary = False
else:
backend = draw_box(backend, fake_positions, controlled, **params)
if shift_boundary:
if box.distance > 0:
target_boundary = (c_middle[0] - 0.25, c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1 + 0.25, c_middle[1])
elif box.distance > 0:
target_boundary = (c_middle[0], c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1, c_middle[1])
backend.draw_wire(fake_positions[dom], fake_positions[cod])
extra_offset = 1 if distance > 0 else len(box.controlled.dom)
for i in range(extra_offset, extra_offset + abs(distance) - 1):
node1 = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
node2 = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
backend.draw_wire(fake_positions[node1], fake_positions[node2])
backend.draw_wire(middle, target_boundary, bend_in=True, bend_out=True)
backend.draw_node(*middle, color='black', shape='circle', nodesize=params.get('nodesize', 1))
backend = backend
next_box = box.controlled
while hasattr(next_box, 'controlled'):
if controlled_box.distance * next_box.distance < 0:
shift_boundary = False
break
next_box = next_box.controlled
if next_box.name == 'X':
shift_boundary = False
else:
backend = draw_box(backend, fake_positions, controlled, **params)
if shift_boundary:
if box.distance > 0:
target_boundary = (c_middle[0] - 0.25, c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1 + 0.25, c_middle[1])
elif box.distance > 0:
target_boundary = (c_middle[0], c_middle[1])
else:
target_boundary = (c_middle[0] + c_size - 1, c_middle[1])
backend.draw_wire(positions[dom], positions[cod])
extra_offset = 1 if distance > 0 else len(box.controlled.dom)
for i in range(extra_offset, extra_offset + abs(distance) - 1):
node1 = Node('dom', obj=box.dom.inside[i], i=i, depth=depth)
node2 = Node('cod', obj=box.cod.inside[i], i=i, depth=depth)
backend.draw_wire(positions[node1], positions[node2])
backend.draw_wire(middle, target_boundary, bend_in=True, bend_out=True)
backend.draw_node(*middle, color='black', shape='circle', nodesize=params.get('nodesize', 1))
return backend
|
discopy
|
positive
|
def _check_conflict(action):
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
if confl_optionals:
<DeepExtract>
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
conflict_handler = getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
</DeepExtract>
conflict_handler(action, confl_optionals)
|
def _check_conflict(action):
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
if confl_optionals:
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
conflict_handler = getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
conflict_handler(action, confl_optionals)
|
BioNLP-2016
|
positive
|
def random_operation(self):
self.canary_core.stat_total_operations += 1
operation = random.randint(0, 100)
if self.stopped:
<DeepExtract>
if not self.stopped:
return
self.client.start()
self.canary_core.stat_total_starts += 1
self.stopped = False
</DeepExtract>
elif operation < 10:
<DeepExtract>
self.canary_core.stat_subscribes_attempted += 1
if topic_filter is None:
topic_filter = str(time.time()) + self.client_id
self.canary_core.subscriptions.append(topic_filter)
subscribe_packet = mqtt5.SubscribePacket(subscriptions=[mqtt5.Subscription(topic_filter=topic_filter, qos=qos)])
try:
self.client.subscribe(subscribe_packet=subscribe_packet)
self.canary_core.stat_subscribes_succeeded += 1
except BaseException:
pass
</DeepExtract>
elif operation < 20:
<DeepExtract>
if len(self.canary_core.subscriptions) < 1:
return
self.canary_core.stat_unsubscribes_attempted += 1
unsubscribe_packet = mqtt5.UnsubscribePacket(topic_filters=[self.canary_core.subscriptions.pop()])
try:
self.client.unsubscribe(unsubscribe_packet=unsubscribe_packet)
self.canary_core.stat_unsubscribes_succeeded += 1
except BaseException:
pass
</DeepExtract>
elif operation < 99:
<DeepExtract>
self.canary_core.stat_publishes_attempted += 1
if len(self.canary_core.subscriptions) > 0:
topic_filter = self.canary_core.subscriptions[0]
else:
topic_filter = str(time.time()) + self.client_id
publish_packet = mqtt5.PublishPacket(topic=topic_filter, qos=random.randint(0, 1), payload=bytearray(os.urandom(random.randint(0, 10000))))
if random.getrandbits(1):
publish_packet.user_properties = self.user_properties
try:
self.client.publish(publish_packet=publish_packet)
self.canary_core.stat_publishes_succeeded += 1
except BaseException:
pass
</DeepExtract>
elif not self.stopped:
<DeepExtract>
if self.stopped:
return
self.stopped = True
self.client.stop()
self.canary_core.stat_total_stops += 1
</DeepExtract>
|
def random_operation(self):
self.canary_core.stat_total_operations += 1
operation = random.randint(0, 100)
if self.stopped:
if not self.stopped:
return
self.client.start()
self.canary_core.stat_total_starts += 1
self.stopped = False
elif operation < 10:
self.canary_core.stat_subscribes_attempted += 1
if topic_filter is None:
topic_filter = str(time.time()) + self.client_id
self.canary_core.subscriptions.append(topic_filter)
subscribe_packet = mqtt5.SubscribePacket(subscriptions=[mqtt5.Subscription(topic_filter=topic_filter, qos=qos)])
try:
self.client.subscribe(subscribe_packet=subscribe_packet)
self.canary_core.stat_subscribes_succeeded += 1
except BaseException:
pass
elif operation < 20:
if len(self.canary_core.subscriptions) < 1:
return
self.canary_core.stat_unsubscribes_attempted += 1
unsubscribe_packet = mqtt5.UnsubscribePacket(topic_filters=[self.canary_core.subscriptions.pop()])
try:
self.client.unsubscribe(unsubscribe_packet=unsubscribe_packet)
self.canary_core.stat_unsubscribes_succeeded += 1
except BaseException:
pass
elif operation < 99:
self.canary_core.stat_publishes_attempted += 1
if len(self.canary_core.subscriptions) > 0:
topic_filter = self.canary_core.subscriptions[0]
else:
topic_filter = str(time.time()) + self.client_id
publish_packet = mqtt5.PublishPacket(topic=topic_filter, qos=random.randint(0, 1), payload=bytearray(os.urandom(random.randint(0, 10000))))
if random.getrandbits(1):
publish_packet.user_properties = self.user_properties
try:
self.client.publish(publish_packet=publish_packet)
self.canary_core.stat_publishes_succeeded += 1
except BaseException:
pass
elif not self.stopped:
if self.stopped:
return
self.stopped = True
self.client.stop()
self.canary_core.stat_total_stops += 1
</DeepExtract>
|
aws-crt-python
|
positive
|
def draw(self, stepCount, stepDelay):
<DeepExtract>
(x1, y1) = self.robotPos
</DeepExtract>
x1 = x1 % self.totWidth
if y1 != self.groundY:
raise 'Flying Robot!!'
<DeepExtract>
(armCos, armSin) = self.__getCosAndSin(self.armAngle)
(handCos, handSin) = self.__getCosAndSin(self.handAngle)
x = self.armLength * armCos + self.handLength * handCos + self.robotWidth
y = self.armLength * armSin + self.handLength * handSin + self.robotHeight
if y < 0:
rotationAngle = math.atan(-y / x)
rotationAngle = 0.0
</DeepExtract>
<DeepExtract>
(cosRot, sinRot) = (math.cos(rotationAngle), math.sin(rotationAngle))
</DeepExtract>
x2 = x1 + self.robotWidth * cosRot
y2 = y1 - self.robotWidth * sinRot
x3 = x1 - self.robotHeight * sinRot
y3 = y1 - self.robotHeight * cosRot
x4 = x3 + cosRot * self.robotWidth
y4 = y3 - sinRot * self.robotWidth
self.canvas.coords(self.robotBody, x1, y1, x2, y2, x4, y4, x3, y3)
<DeepExtract>
(armCos, armSin) = (math.cos(rotationAngle + self.armAngle), math.sin(rotationAngle + self.armAngle))
</DeepExtract>
xArm = x4 + self.armLength * armCos
yArm = y4 - self.armLength * armSin
self.canvas.coords(self.robotArm, x4, y4, xArm, yArm)
<DeepExtract>
(handCos, handSin) = (math.cos(self.handAngle + rotationAngle), math.sin(self.handAngle + rotationAngle))
</DeepExtract>
xHand = xArm + self.handLength * handCos
yHand = yArm - self.handLength * handSin
self.canvas.coords(self.robotHand, xArm, yArm, xHand, yHand)
steps = stepCount - self.lastStep
if steps == 0:
return
pos = self.positions[-1]
velocity = pos - self.positions[-2]
vel2 = (pos - self.positions[0]) / len(self.positions)
self.velAvg = 0.9 * self.velAvg + 0.1 * vel2
velMsg = '100-step Avg Velocity: %.2f' % self.velAvg
velocityMsg = 'Velocity: %.2f' % velocity
positionMsg = 'Position: %2.f' % pos
stepMsg = 'Step: %d' % stepCount
if 'vel_msg' in dir(self):
self.canvas.delete(self.vel_msg)
self.canvas.delete(self.pos_msg)
self.canvas.delete(self.step_msg)
self.canvas.delete(self.velavg_msg)
self.velavg_msg = self.canvas.create_text(650, 190, text=velMsg)
self.vel_msg = self.canvas.create_text(450, 190, text=velocityMsg)
self.pos_msg = self.canvas.create_text(250, 190, text=positionMsg)
self.step_msg = self.canvas.create_text(50, 190, text=stepMsg)
self.lastStep = stepCount
|
def draw(self, stepCount, stepDelay):
(x1, y1) = self.robotPos
x1 = x1 % self.totWidth
if y1 != self.groundY:
raise 'Flying Robot!!'
(armCos, armSin) = self.__getCosAndSin(self.armAngle)
(handCos, handSin) = self.__getCosAndSin(self.handAngle)
x = self.armLength * armCos + self.handLength * handCos + self.robotWidth
y = self.armLength * armSin + self.handLength * handSin + self.robotHeight
if y < 0:
rotationAngle = math.atan(-y / x)
rotationAngle = 0.0
(cosRot, sinRot) = (math.cos(rotationAngle), math.sin(rotationAngle))
x2 = x1 + self.robotWidth * cosRot
y2 = y1 - self.robotWidth * sinRot
x3 = x1 - self.robotHeight * sinRot
y3 = y1 - self.robotHeight * cosRot
x4 = x3 + cosRot * self.robotWidth
y4 = y3 - sinRot * self.robotWidth
self.canvas.coords(self.robotBody, x1, y1, x2, y2, x4, y4, x3, y3)
(armCos, armSin) = (math.cos(rotationAngle + self.armAngle), math.sin(rotationAngle + self.armAngle))
xArm = x4 + self.armLength * armCos
yArm = y4 - self.armLength * armSin
self.canvas.coords(self.robotArm, x4, y4, xArm, yArm)
(handCos, handSin) = (math.cos(self.handAngle + rotationAngle), math.sin(self.handAngle + rotationAngle))
xHand = xArm + self.handLength * handCos
yHand = yArm - self.handLength * handSin
self.canvas.coords(self.robotHand, xArm, yArm, xHand, yHand)
steps = stepCount - self.lastStep
if steps == 0:
return
pos = self.positions[-1]
velocity = pos - self.positions[-2]
vel2 = (pos - self.positions[0]) / len(self.positions)
self.velAvg = 0.9 * self.velAvg + 0.1 * vel2
velMsg = '100-step Avg Velocity: %.2f' % self.velAvg
velocityMsg = 'Velocity: %.2f' % velocity
positionMsg = 'Position: %2.f' % pos
stepMsg = 'Step: %d' % stepCount
if 'vel_msg' in dir(self):
self.canvas.delete(self.vel_msg)
self.canvas.delete(self.pos_msg)
self.canvas.delete(self.step_msg)
self.canvas.delete(self.velavg_msg)
self.velavg_msg = self.canvas.create_text(650, 190, text=velMsg)
self.vel_msg = self.canvas.create_text(450, 190, text=velocityMsg)
self.pos_msg = self.canvas.create_text(250, 190, text=positionMsg)
self.step_msg = self.canvas.create_text(50, 190, text=stepMsg)
self.lastStep = stepCount
|
comp90054-cheat
|
positive
|
def test_egonet_splitter(self):
if karateclub is None:
return
<DeepExtract>
g = nx.karate_club_graph()
node_map = {}
for n in g.nodes():
node_map[n] = '$%s$' % n
nx.relabel_nodes(g, node_map, False)
g = g
</DeepExtract>
coms = algorithms.egonet_splitter(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
|
def test_egonet_splitter(self):
if karateclub is None:
return
g = nx.karate_club_graph()
node_map = {}
for n in g.nodes():
node_map[n] = '$%s$' % n
nx.relabel_nodes(g, node_map, False)
g = g
coms = algorithms.egonet_splitter(g)
self.assertEqual(type(coms.communities), list)
if len(coms.communities) > 0:
self.assertEqual(type(coms.communities[0]), list)
self.assertEqual(type(coms.communities[0][0]), str)
|
cdlib
|
positive
|
def cmd_pull(self):
result = dict(changed=False, actions=[])
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
if 'image' not in service.options:
continue
self.log('Pulling image for service %s' % service.name)
old_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
old_image_id = image['Id']
except NoSuchImageError:
pass
except Exception as exc:
self.client.fail('Error: service image lookup failed - %s' % to_native(exc))
<DeepExtract>
(dummy, out_redir_name) = tempfile.mkstemp(prefix='ansible')
(dummy, err_redir_name) = tempfile.mkstemp(prefix='ansible')
(out_redir_name, err_redir_name) = (out_redir_name, err_redir_name)
</DeepExtract>
try:
with stdout_redirector(out_redir_name):
with stderr_redirector(err_redir_name):
service.pull(ignore_pull_failures=False)
except Exception as exc:
<DeepExtract>
if err_redir_name is None:
stderr = []
else:
stderr = get_redirected_output(err_redir_name)
stdout = get_redirected_output(out_redir_name)
reason = attempt_extract_errors(str(exc), stdout, stderr)
reason['msg'] = 'Error: pull failed with %s' % reason['msg']
fail_reason = reason
</DeepExtract>
self.client.fail(**fail_reason)
else:
<DeepExtract>
for i in [out_redir_name, err_redir_name]:
os.remove(i)
</DeepExtract>
new_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
new_image_id = image['Id']
except NoSuchImageError as exc:
self.client.fail('Error: service image lookup failed after pull - %s' % to_native(exc))
if new_image_id != old_image_id:
result['changed'] = True
result['actions'].append(dict(service=service.name, pulled_image=dict(name=service.image_name, id=new_image_id)))
return result
|
def cmd_pull(self):
result = dict(changed=False, actions=[])
if not self.check_mode:
for service in self.project.get_services(self.services, include_deps=False):
if 'image' not in service.options:
continue
self.log('Pulling image for service %s' % service.name)
old_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
old_image_id = image['Id']
except NoSuchImageError:
pass
except Exception as exc:
self.client.fail('Error: service image lookup failed - %s' % to_native(exc))
(dummy, out_redir_name) = tempfile.mkstemp(prefix='ansible')
(dummy, err_redir_name) = tempfile.mkstemp(prefix='ansible')
(out_redir_name, err_redir_name) = (out_redir_name, err_redir_name)
try:
with stdout_redirector(out_redir_name):
with stderr_redirector(err_redir_name):
service.pull(ignore_pull_failures=False)
except Exception as exc:
if err_redir_name is None:
stderr = []
else:
stderr = get_redirected_output(err_redir_name)
stdout = get_redirected_output(out_redir_name)
reason = attempt_extract_errors(str(exc), stdout, stderr)
reason['msg'] = 'Error: pull failed with %s' % reason['msg']
fail_reason = reason
self.client.fail(**fail_reason)
else:
for i in [out_redir_name, err_redir_name]:
os.remove(i)
new_image_id = ''
try:
image = service.image()
if image and image.get('Id'):
new_image_id = image['Id']
except NoSuchImageError as exc:
self.client.fail('Error: service image lookup failed after pull - %s' % to_native(exc))
if new_image_id != old_image_id:
result['changed'] = True
result['actions'].append(dict(service=service.name, pulled_image=dict(name=service.image_name, id=new_image_id)))
return result
|
community.docker
|
positive
|
def validate_proof_of_work(header: Header) -> None:
"""
Validates the Proof of Work constraints.
In order to verify that a miner's proof-of-work is valid for a block, a
``mix-digest`` and ``result`` are calculated using the ``hashimoto_light``
hash function. The mix digest is a hash of the header and the nonce that
is passed through and it confirms whether or not proof-of-work was done
on the correct block. The result is the actual hash value of the block.
Parameters
----------
header :
Header of interest.
"""
<DeepExtract>
header_data_without_pow_artefacts = [header.parent_hash, header.ommers_hash, header.coinbase, header.state_root, header.transactions_root, header.receipt_root, header.bloom, header.difficulty, header.number, header.gas_limit, header.gas_used, header.timestamp, header.extra_data]
header_hash = rlp.rlp_hash(header_data_without_pow_artefacts)
</DeepExtract>
cache = generate_cache(header.number)
(mix_digest, result) = hashimoto_light(header_hash, header.nonce, cache, dataset_size(header.number))
ensure(mix_digest == header.mix_digest, InvalidBlock)
ensure(Uint.from_be_bytes(result) <= U256_CEIL_VALUE // header.difficulty, InvalidBlock)
|
def validate_proof_of_work(header: Header) -> None:
"""
Validates the Proof of Work constraints.
In order to verify that a miner's proof-of-work is valid for a block, a
``mix-digest`` and ``result`` are calculated using the ``hashimoto_light``
hash function. The mix digest is a hash of the header and the nonce that
is passed through and it confirms whether or not proof-of-work was done
on the correct block. The result is the actual hash value of the block.
Parameters
----------
header :
Header of interest.
"""
header_data_without_pow_artefacts = [header.parent_hash, header.ommers_hash, header.coinbase, header.state_root, header.transactions_root, header.receipt_root, header.bloom, header.difficulty, header.number, header.gas_limit, header.gas_used, header.timestamp, header.extra_data]
header_hash = rlp.rlp_hash(header_data_without_pow_artefacts)
cache = generate_cache(header.number)
(mix_digest, result) = hashimoto_light(header_hash, header.nonce, cache, dataset_size(header.number))
ensure(mix_digest == header.mix_digest, InvalidBlock)
ensure(Uint.from_be_bytes(result) <= U256_CEIL_VALUE // header.difficulty, InvalidBlock)
|
eth1.0-specs
|
positive
|
def _as_chunk(self):
"""
Parse the contents of a primitive BitString encoding as an integer value.
Allows reconstructing indefinite length values.
:raises:
ValueError - when an invalid value is passed
:return:
A list with one tuple (value, bits, unused_bits) where value is an integer
with the value of the BitString, bits is the bit count of value and
unused_bits is a tuple of 1s and 0s.
"""
if self._indefinite:
return []
unused_bits_len = ord(self.contents[0]) if _PY2 else self.contents[0]
value = int_from_bytes(self.contents[1:])
bits = (len(self.contents) - 1) * 8
if not unused_bits_len:
return [(value, bits, ())]
if len(self.contents) == 1:
raise ValueError('Empty bit string has {0} unused bits'.format(unused_bits_len))
if unused_bits_len > 7:
raise ValueError('Bit string has {0} unused bits'.format(unused_bits_len))
<DeepExtract>
if not value & (1 << unused_bits_len) - 1 and (not unused_bits_len):
unused_bits = ()
result = tuple(map(int, format(value & (1 << unused_bits_len) - 1, '0{0}b'.format(unused_bits_len))))
if len(result) != unused_bits_len:
raise ValueError('Result too large: {0} > {1}'.format(len(result), unused_bits_len))
unused_bits = result
</DeepExtract>
value >>= unused_bits_len
bits -= unused_bits_len
return [(value, bits, unused_bits)]
|
def _as_chunk(self):
"""
Parse the contents of a primitive BitString encoding as an integer value.
Allows reconstructing indefinite length values.
:raises:
ValueError - when an invalid value is passed
:return:
A list with one tuple (value, bits, unused_bits) where value is an integer
with the value of the BitString, bits is the bit count of value and
unused_bits is a tuple of 1s and 0s.
"""
if self._indefinite:
return []
unused_bits_len = ord(self.contents[0]) if _PY2 else self.contents[0]
value = int_from_bytes(self.contents[1:])
bits = (len(self.contents) - 1) * 8
if not unused_bits_len:
return [(value, bits, ())]
if len(self.contents) == 1:
raise ValueError('Empty bit string has {0} unused bits'.format(unused_bits_len))
if unused_bits_len > 7:
raise ValueError('Bit string has {0} unused bits'.format(unused_bits_len))
if not value & (1 << unused_bits_len) - 1 and (not unused_bits_len):
unused_bits = ()
result = tuple(map(int, format(value & (1 << unused_bits_len) - 1, '0{0}b'.format(unused_bits_len))))
if len(result) != unused_bits_len:
raise ValueError('Result too large: {0} > {1}'.format(len(result), unused_bits_len))
unused_bits = result
value >>= unused_bits_len
bits -= unused_bits_len
return [(value, bits, unused_bits)]
|
asn1crypto
|
positive
|
def _match_long_opt(self, opt: str, explicit_value: t.Optional[str], state: ParsingState) -> None:
if opt not in self._long_opt:
from difflib import get_close_matches
possibilities = get_close_matches(opt, self._long_opt)
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
<DeepExtract>
nargs = option.nargs
if len(state.rargs) < nargs:
if option.obj._flag_needs_value:
value = _flag_needs_value
else:
raise BadOptionUsage(opt, ngettext('Option {name!r} requires an argument.', 'Option {name!r} requires {nargs} arguments.', nargs).format(name=opt, nargs=nargs))
elif nargs == 1:
next_rarg = state.rargs[0]
if option.obj._flag_needs_value and isinstance(next_rarg, str) and (next_rarg[:1] in self._opt_prefixes) and (len(next_rarg) > 1):
value = _flag_needs_value
else:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
value = value
</DeepExtract>
elif explicit_value is not None:
raise BadOptionUsage(opt, _('Option {name!r} does not take a value.').format(name=opt))
else:
value = None
option.process(value, state)
|
def _match_long_opt(self, opt: str, explicit_value: t.Optional[str], state: ParsingState) -> None:
if opt not in self._long_opt:
from difflib import get_close_matches
possibilities = get_close_matches(opt, self._long_opt)
raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
option = self._long_opt[opt]
if option.takes_value:
if explicit_value is not None:
state.rargs.insert(0, explicit_value)
nargs = option.nargs
if len(state.rargs) < nargs:
if option.obj._flag_needs_value:
value = _flag_needs_value
else:
raise BadOptionUsage(opt, ngettext('Option {name!r} requires an argument.', 'Option {name!r} requires {nargs} arguments.', nargs).format(name=opt, nargs=nargs))
elif nargs == 1:
next_rarg = state.rargs[0]
if option.obj._flag_needs_value and isinstance(next_rarg, str) and (next_rarg[:1] in self._opt_prefixes) and (len(next_rarg) > 1):
value = _flag_needs_value
else:
value = state.rargs.pop(0)
else:
value = tuple(state.rargs[:nargs])
del state.rargs[:nargs]
value = value
elif explicit_value is not None:
raise BadOptionUsage(opt, _('Option {name!r} does not take a value.').format(name=opt))
else:
value = None
option.process(value, state)
|
click
|
positive
|
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_processors_many(self, inlet, outlet):
records = [2, 3]
<DeepExtract>
if records is None:
records = [object()]
async def pull_coro(_):
inlet._pull = records
inlet._pull = MagicMock(side_effect=pull_coro)
</DeepExtract>
processorA = MagicMock(side_effect=lambda x: x)
processorB = MagicMock(side_effect=lambda x: x)
link = Link(inlet, outlet, interval=0.01, processors=[processorA, processorB])
link.transfer()
processorA.assert_called_with(records)
processorB.assert_called_with(records)
outlet._push.assert_called_with(records, mock.ANY)
|
@patch(fqname(Outlet), spec=Outlet)
@patch(fqname(Inlet), spec=Inlet)
def test_processors_many(self, inlet, outlet):
records = [2, 3]
if records is None:
records = [object()]
async def pull_coro(_):
inlet._pull = records
inlet._pull = MagicMock(side_effect=pull_coro)
processorA = MagicMock(side_effect=lambda x: x)
processorB = MagicMock(side_effect=lambda x: x)
link = Link(inlet, outlet, interval=0.01, processors=[processorA, processorB])
link.transfer()
processorA.assert_called_with(records)
processorB.assert_called_with(records)
outlet._push.assert_called_with(records, mock.ANY)
|
databay
|
positive
|
@memo.BSEMemoize
def _family_notes_path(family, data_dir):
"""Form a path to the notes for a family"""
<DeepExtract>
data_dir = _default_data_dir if data_dir is None else data_dir
</DeepExtract>
family = family.lower()
if family not in get_families(data_dir):
raise RuntimeError("Family '{}' does not exist".format(family))
file_name = 'NOTES.' + family.lower()
file_path = os.path.join(data_dir, file_name)
return file_path
|
@memo.BSEMemoize
def _family_notes_path(family, data_dir):
"""Form a path to the notes for a family"""
data_dir = _default_data_dir if data_dir is None else data_dir
family = family.lower()
if family not in get_families(data_dir):
raise RuntimeError("Family '{}' does not exist".format(family))
file_name = 'NOTES.' + family.lower()
file_path = os.path.join(data_dir, file_name)
return file_path
|
basis_set_exchange
|
positive
|
def evaluate_keypoints(json_dataset, all_boxes, all_keypoints, output_dir, use_salt=True, cleanup=False):
res_file = os.path.join(output_dir, 'keypoints_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
<DeepExtract>
results = []
for (cls_ind, cls) in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_keypoints):
break
logger.info('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind, len(all_keypoints) - 1))
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_kp_results_one_category(json_dataset, all_boxes[cls_ind], all_keypoints[cls_ind], cat_id))
logger.info('Writing keypoint results json to: {}'.format(os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
</DeepExtract>
if json_dataset.name.find('test') == -1:
<DeepExtract>
ann_type = 'keypoints'
imgIds = json_dataset.COCO.getImgIds()
imgIds.sort()
coco_dt = json_dataset.COCO.loadRes(res_file)
coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
coco_eval.summarize()
coco_eval = coco_eval
</DeepExtract>
else:
coco_eval = None
if cleanup:
os.remove(res_file)
return coco_eval
|
def evaluate_keypoints(json_dataset, all_boxes, all_keypoints, output_dir, use_salt=True, cleanup=False):
res_file = os.path.join(output_dir, 'keypoints_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
results = []
for (cls_ind, cls) in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_keypoints):
break
logger.info('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind, len(all_keypoints) - 1))
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_kp_results_one_category(json_dataset, all_boxes[cls_ind], all_keypoints[cls_ind], cat_id))
logger.info('Writing keypoint results json to: {}'.format(os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
if json_dataset.name.find('test') == -1:
ann_type = 'keypoints'
imgIds = json_dataset.COCO.getImgIds()
imgIds.sort()
coco_dt = json_dataset.COCO.loadRes(res_file)
coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
coco_eval.params.imgIds = imgIds
coco_eval.evaluate()
coco_eval.accumulate()
eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
coco_eval.summarize()
coco_eval = coco_eval
else:
coco_eval = None
if cleanup:
os.remove(res_file)
return coco_eval
|
AIC2018_iamai
|
positive
|
def Vigenere(self, plaintext):
<DeepExtract>
tab = dict()
for (counter, i) in enumerate(self.group):
tab[self.group[counter]] = counter
real_key = []
for i in self.random_key(plaintext):
real_key.append(tab[i])
self.random_key(plaintext) = real_key
</DeepExtract>
cipheycore.vigenere_encrypt(plaintext, key, self.group)
|
def Vigenere(self, plaintext):
tab = dict()
for (counter, i) in enumerate(self.group):
tab[self.group[counter]] = counter
real_key = []
for i in self.random_key(plaintext):
real_key.append(tab[i])
self.random_key(plaintext) = real_key
cipheycore.vigenere_encrypt(plaintext, key, self.group)
|
Ciphey
|
positive
|
@timing
def get_conf_matrix(rh, incs):
"""
Iterates through runhistory to get a matrix of configurations (in
vector representation), a list of configurations and the number of
runs per configuration in a quantiled manner.
Parameters
----------
rh: RunHistory
smac.runhistory
incs: List[List[Configuration]]
incumbents of configurator runs, last entry is final incumbent
Returns
-------
conf_matrix: np.array
matrix of configurations in vector representation
conf_list: np.array
list of all Configuration objects that appeared in runhistory
the order of this list is used to determine all kinds of properties
in the plotting (but is arbitrarily determined)
runs_per_quantile: np.array
numpy array of runs per configuration per quantile
labels: List[str]
labels for timeslider (i.e. wallclock-times)
"""
conf_list = []
conf_matrix = []
for c in rh.get_all_configs():
if c not in conf_list:
conf_matrix.append(c.get_array())
conf_list.append(c)
for inc in [a for b in incs for a in b]:
if inc not in conf_list:
conf_matrix.append(inc.get_array())
conf_list.append(inc)
if self.num_quantiles >= len(conf_list):
self.logger.info('Number of quantiles %d bigger than number of configs %d, reducing to %d quantiles', self.num_quantiles, len(conf_list), len(conf_list) - 1)
self.num_quantiles = len(conf_list) - 1
<DeepExtract>
runs_total = len(rh.data)
(labels, last_time_seen) = ([], -1)
r_p_q_p_c = []
as_list = list(rh.data.items())
scale = np.geomspace if self.timeslider_log else np.linspace
timestamps = None
try:
as_list = sorted(as_list, key=lambda x: x[1].additional_info['timestamps']['finished'])
timestamps = [x[1].additional_info['timestamps']['finished'] for x in as_list]
time_ranges = scale(timestamps[0], timestamps[-1], num=self.num_quantiles + 1, endpoint=True)
ranges = []
idx = 0
for (time_idx, time) in enumerate(time_ranges):
while len(timestamps) - 1 > idx and (timestamps[idx] < time or idx <= time_idx):
idx += 1
ranges.append(idx)
except (KeyError, TypeError) as err:
self.logger.debug(err)
self.logger.debug('Failed to sort by timestamps... only a reason to worry if this is BOHB-analysis')
ranges = [int(x) for x in scale(1, runs_total, num=self.num_quantiles + 1)]
ranges[0] = 0
ranges[-1] = len(as_list)
self.logger.debug('Creating %d quantiles with a total number of runs of %d', self.num_quantiles, runs_total)
self.logger.debug('Ranges: %s', str(ranges))
for r in range(len(ranges))[1:]:
if ranges[r] <= ranges[r - 1]:
if ranges[r - 1] + 1 >= len(as_list):
raise RuntimeError('There was a problem with the quantiles of the configuration footprint. Please report this Error on "https://github.com/automl/CAVE/issues" and provide the debug.txt-file.')
ranges[r] = ranges[r - 1] + 1
self.logger.debug('Fixed ranges to: %s', str(ranges))
if not ranges[0] == 0 or not ranges[-1] == len(as_list) or (not len(ranges) == self.num_quantiles + 1):
raise RuntimeError('Sanity check on range-creation in configurator footprint went wrong. Please report this Error on "https://github.com/automl/CAVE/issues" and provide the debug.txt-file.')
tmp_rh = RunHistory()
for (i, j) in zip(ranges[:-1], ranges[1:]):
for idx in range(i, j):
(k, v) = as_list[idx]
tmp_rh.add(config=rh.ids_config[k.config_id], cost=v.cost, time=v.time, status=v.status, instance_id=k.instance_id, seed=k.seed, additional_info=v.additional_info)
if timestamps:
labels.append('{0:.2f}'.format(timestamps[j - 1]))
r_p_q_p_c.append([len(tmp_rh.get_runs_for_config(c, only_max_observed_budget=False)) for c in conf_list])
self.logger.debug('Labels: ' + str(labels))
(labels, runs_per_quantile) = (labels, r_p_q_p_c)
</DeepExtract>
assert len(runs_per_quantile) == self.num_quantiles
self.min_runs_per_conf = min([i for i in runs_per_quantile[-1] if i > 0])
self.max_runs_per_conf = max(runs_per_quantile[-1])
self.logger.debug('Min runs per conf: %d, Max runs per conf: %d', self.min_runs_per_conf, self.max_runs_per_conf)
self.logger.debug('Gathered %d configurations from 1 runhistories.' % len(conf_list))
runs_per_quantile = np.array([np.array(run) for run in runs_per_quantile])
return (np.array(conf_matrix), np.array(conf_list), runs_per_quantile, labels)
|
@timing
def get_conf_matrix(rh, incs):
"""
Iterates through runhistory to get a matrix of configurations (in
vector representation), a list of configurations and the number of
runs per configuration in a quantiled manner.
Parameters
----------
rh: RunHistory
smac.runhistory
incs: List[List[Configuration]]
incumbents of configurator runs, last entry is final incumbent
Returns
-------
conf_matrix: np.array
matrix of configurations in vector representation
conf_list: np.array
list of all Configuration objects that appeared in runhistory
the order of this list is used to determine all kinds of properties
in the plotting (but is arbitrarily determined)
runs_per_quantile: np.array
numpy array of runs per configuration per quantile
labels: List[str]
labels for timeslider (i.e. wallclock-times)
"""
conf_list = []
conf_matrix = []
for c in rh.get_all_configs():
if c not in conf_list:
conf_matrix.append(c.get_array())
conf_list.append(c)
for inc in [a for b in incs for a in b]:
if inc not in conf_list:
conf_matrix.append(inc.get_array())
conf_list.append(inc)
if self.num_quantiles >= len(conf_list):
self.logger.info('Number of quantiles %d bigger than number of configs %d, reducing to %d quantiles', self.num_quantiles, len(conf_list), len(conf_list) - 1)
self.num_quantiles = len(conf_list) - 1
runs_total = len(rh.data)
(labels, last_time_seen) = ([], -1)
r_p_q_p_c = []
as_list = list(rh.data.items())
scale = np.geomspace if self.timeslider_log else np.linspace
timestamps = None
try:
as_list = sorted(as_list, key=lambda x: x[1].additional_info['timestamps']['finished'])
timestamps = [x[1].additional_info['timestamps']['finished'] for x in as_list]
time_ranges = scale(timestamps[0], timestamps[-1], num=self.num_quantiles + 1, endpoint=True)
ranges = []
idx = 0
for (time_idx, time) in enumerate(time_ranges):
while len(timestamps) - 1 > idx and (timestamps[idx] < time or idx <= time_idx):
idx += 1
ranges.append(idx)
except (KeyError, TypeError) as err:
self.logger.debug(err)
self.logger.debug('Failed to sort by timestamps... only a reason to worry if this is BOHB-analysis')
ranges = [int(x) for x in scale(1, runs_total, num=self.num_quantiles + 1)]
ranges[0] = 0
ranges[-1] = len(as_list)
self.logger.debug('Creating %d quantiles with a total number of runs of %d', self.num_quantiles, runs_total)
self.logger.debug('Ranges: %s', str(ranges))
for r in range(len(ranges))[1:]:
if ranges[r] <= ranges[r - 1]:
if ranges[r - 1] + 1 >= len(as_list):
raise RuntimeError('There was a problem with the quantiles of the configuration footprint. Please report this Error on "https://github.com/automl/CAVE/issues" and provide the debug.txt-file.')
ranges[r] = ranges[r - 1] + 1
self.logger.debug('Fixed ranges to: %s', str(ranges))
if not ranges[0] == 0 or not ranges[-1] == len(as_list) or (not len(ranges) == self.num_quantiles + 1):
raise RuntimeError('Sanity check on range-creation in configurator footprint went wrong. Please report this Error on "https://github.com/automl/CAVE/issues" and provide the debug.txt-file.')
tmp_rh = RunHistory()
for (i, j) in zip(ranges[:-1], ranges[1:]):
for idx in range(i, j):
(k, v) = as_list[idx]
tmp_rh.add(config=rh.ids_config[k.config_id], cost=v.cost, time=v.time, status=v.status, instance_id=k.instance_id, seed=k.seed, additional_info=v.additional_info)
if timestamps:
labels.append('{0:.2f}'.format(timestamps[j - 1]))
r_p_q_p_c.append([len(tmp_rh.get_runs_for_config(c, only_max_observed_budget=False)) for c in conf_list])
self.logger.debug('Labels: ' + str(labels))
(labels, runs_per_quantile) = (labels, r_p_q_p_c)
assert len(runs_per_quantile) == self.num_quantiles
self.min_runs_per_conf = min([i for i in runs_per_quantile[-1] if i > 0])
self.max_runs_per_conf = max(runs_per_quantile[-1])
self.logger.debug('Min runs per conf: %d, Max runs per conf: %d', self.min_runs_per_conf, self.max_runs_per_conf)
self.logger.debug('Gathered %d configurations from 1 runhistories.' % len(conf_list))
runs_per_quantile = np.array([np.array(run) for run in runs_per_quantile])
return (np.array(conf_matrix), np.array(conf_list), runs_per_quantile, labels)
|
CAVE
|
positive
|
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_filter_does_not_clobber_existing_list_values(self, handler):
self.collection_def = {'request': {'operation': 'GetFrobs', 'params': [{'target': 'Filters[0].Name', 'source': 'string', 'value': 'frob-id'}, {'target': 'Filters[0].Values[0]', 'source': 'identifier', 'name': 'Id'}]}, 'resource': {'type': 'Frob', 'identifiers': [{'target': 'Id', 'source': 'response', 'path': 'Frobs[].Id'}]}}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
<DeepExtract>
resource_defs = {'Frob': {'identifiers': []}}
resource_def = self.collection_def.get('resource', {})
for identifier in resource_def.get('identifiers', []):
resource_defs['Frob']['identifiers'].append({'name': identifier['target']})
collection_model = Collection('test', self.collection_def, resource_defs)
collection = CollectionManager(collection_model=collection_model, parent=self.parent, factory=self.factory, service_context=ServiceContext(service_name='test', service_model=self.service_model, resource_json_definitions=resource_defs, service_waiter_model=None))
collection = collection
</DeepExtract>
self.parent.id = 'my-id'
list(collection.filter(Filters=[{'Name': 'another-filter', 'Values': ['foo']}]))
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(PaginationConfig={'PageSize': None, 'MaxItems': None}, Filters=[{'Values': ['my-id'], 'Name': 'frob-id'}, {'Values': ['foo'], 'Name': 'another-filter'}])
|
@mock.patch('boto3.resources.collection.ResourceHandler')
def test_filter_does_not_clobber_existing_list_values(self, handler):
self.collection_def = {'request': {'operation': 'GetFrobs', 'params': [{'target': 'Filters[0].Name', 'source': 'string', 'value': 'frob-id'}, {'target': 'Filters[0].Values[0]', 'source': 'identifier', 'name': 'Id'}]}, 'resource': {'type': 'Frob', 'identifiers': [{'target': 'Id', 'source': 'response', 'path': 'Frobs[].Id'}]}}
self.client.can_paginate.return_value = True
self.client.get_paginator.return_value.paginate.return_value = []
handler.return_value.return_value = []
resource_defs = {'Frob': {'identifiers': []}}
resource_def = self.collection_def.get('resource', {})
for identifier in resource_def.get('identifiers', []):
resource_defs['Frob']['identifiers'].append({'name': identifier['target']})
collection_model = Collection('test', self.collection_def, resource_defs)
collection = CollectionManager(collection_model=collection_model, parent=self.parent, factory=self.factory, service_context=ServiceContext(service_name='test', service_model=self.service_model, resource_json_definitions=resource_defs, service_waiter_model=None))
collection = collection
self.parent.id = 'my-id'
list(collection.filter(Filters=[{'Name': 'another-filter', 'Values': ['foo']}]))
paginator = self.client.get_paginator.return_value
paginator.paginate.assert_called_with(PaginationConfig={'PageSize': None, 'MaxItems': None}, Filters=[{'Values': ['my-id'], 'Name': 'frob-id'}, {'Values': ['foo'], 'Name': 'another-filter'}])
|
boto3
|
positive
|
def convert_hfr2row(self, hfr):
"""
Convert a HyperFrameRecord into a tuple (row). The user can input either a tuple (x,y,z), in which case we
fabricate column names. Or the user may pass a dictionary. If there are multiple values to unpack then we
will store them into Python lists. Note, if the names are generic, we return the tuple form.
Args:
hfr:
Returns:
"""
frames = hfr.get_frames(self)
row = []
for fr in frames:
if fr.is_local_fs_link_frame() or fr.is_s3_link_frame():
<DeepExtract>
file_set = []
if not (fr.is_local_fs_link_frame() or fr.is_s3_link_frame()):
_logger.error('actualize_link_urls called on non-link frame.')
raise ValueError('actualize_link_urls called on non-link frame.')
urls = fr.get_link_urls()
assert urllib.parse.urlparse(urls[0]).scheme == common.BUNDLE_URI_SCHEME.replace('://', '')
local_dir = self.get_object_dir()
local_file_set = [os.path.join(local_dir, fr.hframe_uuid, f.replace(common.BUNDLE_URI_SCHEME, '')) for f in urls]
for (lf, rurl) in zip(local_file_set, urls):
if os.path.isfile(lf):
if not True:
lf = urllib.parse.urljoin('file:', lf)
file_set.append(lf)
else:
remote_dir = self.get_remote_object_dir()
if remote_dir is not None:
file_set.append(os.path.join(remote_dir, fr.hframe_uuid, rurl.replace(common.BUNDLE_URI_SCHEME, '')))
else:
_logger.info('actualize_link_urls: Files are not local, and no remote context bound.')
raise Exception('actualize_link_urls: Files are not local, and no remote context bound.')
src_paths = file_set
</DeepExtract>
if len(src_paths) == 1:
row.append((fr.pb.name, src_paths[0]))
else:
row.append((fr.pb.name, np.array(src_paths)))
elif fr.pb.shape[0] == 1:
row.append((fr.pb.name, fr.to_ndarray().item()))
else:
row.append((fr.pb.name, fr.to_ndarray()))
if common.DEFAULT_FRAME_NAME in frames[0].pb.name:
tuple_of_lists = tuple([r[1] for r in row])
if len(tuple_of_lists) == 1:
return tuple(tuple_of_lists[0])
return tuple_of_lists
else:
d = {t[0]: t[1] if isinstance(t[1], (tuple, list, np.ndarray)) else [t[1]] for t in row}
return d
|
def convert_hfr2row(self, hfr):
"""
Convert a HyperFrameRecord into a tuple (row). The user can input either a tuple (x,y,z), in which case we
fabricate column names. Or the user may pass a dictionary. If there are multiple values to unpack then we
will store them into Python lists. Note, if the names are generic, we return the tuple form.
Args:
hfr:
Returns:
"""
frames = hfr.get_frames(self)
row = []
for fr in frames:
if fr.is_local_fs_link_frame() or fr.is_s3_link_frame():
file_set = []
if not (fr.is_local_fs_link_frame() or fr.is_s3_link_frame()):
_logger.error('actualize_link_urls called on non-link frame.')
raise ValueError('actualize_link_urls called on non-link frame.')
urls = fr.get_link_urls()
assert urllib.parse.urlparse(urls[0]).scheme == common.BUNDLE_URI_SCHEME.replace('://', '')
local_dir = self.get_object_dir()
local_file_set = [os.path.join(local_dir, fr.hframe_uuid, f.replace(common.BUNDLE_URI_SCHEME, '')) for f in urls]
for (lf, rurl) in zip(local_file_set, urls):
if os.path.isfile(lf):
if not True:
lf = urllib.parse.urljoin('file:', lf)
file_set.append(lf)
else:
remote_dir = self.get_remote_object_dir()
if remote_dir is not None:
file_set.append(os.path.join(remote_dir, fr.hframe_uuid, rurl.replace(common.BUNDLE_URI_SCHEME, '')))
else:
_logger.info('actualize_link_urls: Files are not local, and no remote context bound.')
raise Exception('actualize_link_urls: Files are not local, and no remote context bound.')
src_paths = file_set
if len(src_paths) == 1:
row.append((fr.pb.name, src_paths[0]))
else:
row.append((fr.pb.name, np.array(src_paths)))
elif fr.pb.shape[0] == 1:
row.append((fr.pb.name, fr.to_ndarray().item()))
else:
row.append((fr.pb.name, fr.to_ndarray()))
if common.DEFAULT_FRAME_NAME in frames[0].pb.name:
tuple_of_lists = tuple([r[1] for r in row])
if len(tuple_of_lists) == 1:
return tuple(tuple_of_lists[0])
return tuple_of_lists
else:
d = {t[0]: t[1] if isinstance(t[1], (tuple, list, np.ndarray)) else [t[1]] for t in row}
return d
|
disdat
|
positive
|
def test_interpolate_1d_linear_extrapolate_linear(self):
"""1D linear interpolation. Test values in the extrapolation areas"""
<DeepExtract>
if x is None:
x = self.x
if data is None:
data = self.data
self.interp_data = np.array([1.0, 0.827344627425, 0.65468925485, 0.482033882274, 0.19022089727, -0.135637119857, -0.461495136984, -0.671036971053, -0.787525858675, -0.904014746296, -0.907509439898, -0.685015745458, -0.462522051019, -0.229870470166, 0.084044201987, 0.397958874141, 0.711873546294, 0.796577712584, 0.852630565642, 0.908683418699, 0.751249583376, 0.487072403865, 0.222895224353, -0.052965755187, -0.343431484761, -0.633897214335, -0.858384419526, -0.851946789376, -0.845509159226, -0.839071529076], dtype=np.float64)
self.extrap_data_nea = np.array([1.0, 1.0, -0.839071529076, -0.839071529076], dtype=np.float64)
self.extrap_data_lin = np.array([1.4005604643743956, 1.2002802321871977, -0.831603878102657, -0.8241362271288615], dtype=np.float64)
self.interp_func = interpolators1d.Interpolate1DLinear(x, data, extrapolate=True, extrapolation_range=extrapolation_range, extrapolation_type='linear', tolerate_single_value=tolerate_single_value)
</DeepExtract>
for i in range(len(self.xsamples_extrapol)):
x = self.xsamples_extrapol[i]
self.assertAlmostEqual(self.interp_func(x), self.extrap_data_lin[i], delta=1e-08)
for order in range(1, 4):
self.assertAlmostEqual(self.interp_func.derivative(x, order), self.derivative(self.interp_func, x, 0.001, order), delta=1e-06)
|
def test_interpolate_1d_linear_extrapolate_linear(self):
"""1D linear interpolation. Test values in the extrapolation areas"""
if x is None:
x = self.x
if data is None:
data = self.data
self.interp_data = np.array([1.0, 0.827344627425, 0.65468925485, 0.482033882274, 0.19022089727, -0.135637119857, -0.461495136984, -0.671036971053, -0.787525858675, -0.904014746296, -0.907509439898, -0.685015745458, -0.462522051019, -0.229870470166, 0.084044201987, 0.397958874141, 0.711873546294, 0.796577712584, 0.852630565642, 0.908683418699, 0.751249583376, 0.487072403865, 0.222895224353, -0.052965755187, -0.343431484761, -0.633897214335, -0.858384419526, -0.851946789376, -0.845509159226, -0.839071529076], dtype=np.float64)
self.extrap_data_nea = np.array([1.0, 1.0, -0.839071529076, -0.839071529076], dtype=np.float64)
self.extrap_data_lin = np.array([1.4005604643743956, 1.2002802321871977, -0.831603878102657, -0.8241362271288615], dtype=np.float64)
self.interp_func = interpolators1d.Interpolate1DLinear(x, data, extrapolate=True, extrapolation_range=extrapolation_range, extrapolation_type='linear', tolerate_single_value=tolerate_single_value)
for i in range(len(self.xsamples_extrapol)):
x = self.xsamples_extrapol[i]
self.assertAlmostEqual(self.interp_func(x), self.extrap_data_lin[i], delta=1e-08)
for order in range(1, 4):
self.assertAlmostEqual(self.interp_func.derivative(x, order), self.derivative(self.interp_func, x, 0.001, order), delta=1e-06)
|
core
|
positive
|
def _load_coco_person_detection_results(self):
all_boxes = None
with open(self.bbox_file, 'r') as f:
all_boxes = json.load(f)
if not all_boxes:
logger.error('=> Load %s fail!' % self.bbox_file)
return None
logger.info('=> Total boxes: {}'.format(len(all_boxes)))
kpt_db = []
num_boxes = 0
for n_img in range(0, len(all_boxes)):
det_res = all_boxes[n_img]
if det_res['category_id'] != 1:
continue
<DeepExtract>
file_name = '%012d.jpg' % det_res['image_id']
if '2014' in self.image_set:
file_name = 'COCO_%s_' % self.image_set + file_name
prefix = 'test2017' if 'test' in self.image_set else self.image_set
data_name = prefix + '.zip@' if self.data_format == 'zip' else prefix
image_path = os.path.join(self.root, 'images', data_name, file_name)
img_name = image_path
</DeepExtract>
box = det_res['bbox']
score = det_res['score']
if score < self.image_thre:
continue
num_boxes = num_boxes + 1
<DeepExtract>
(x, y, w, h) = box[:4]
(center, scale) = self._xywh2cs(x, y, w, h)
</DeepExtract>
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.ones((self.num_joints, 3), dtype=np.float)
kpt_db.append({'image': img_name, 'center': center, 'scale': scale, 'score': score, 'joints_3d': joints_3d, 'joints_3d_vis': joints_3d_vis})
logger.info('=> Total boxes after fliter low score@{}: {}'.format(self.image_thre, num_boxes))
return kpt_db
|
def _load_coco_person_detection_results(self):
all_boxes = None
with open(self.bbox_file, 'r') as f:
all_boxes = json.load(f)
if not all_boxes:
logger.error('=> Load %s fail!' % self.bbox_file)
return None
logger.info('=> Total boxes: {}'.format(len(all_boxes)))
kpt_db = []
num_boxes = 0
for n_img in range(0, len(all_boxes)):
det_res = all_boxes[n_img]
if det_res['category_id'] != 1:
continue
file_name = '%012d.jpg' % det_res['image_id']
if '2014' in self.image_set:
file_name = 'COCO_%s_' % self.image_set + file_name
prefix = 'test2017' if 'test' in self.image_set else self.image_set
data_name = prefix + '.zip@' if self.data_format == 'zip' else prefix
image_path = os.path.join(self.root, 'images', data_name, file_name)
img_name = image_path
box = det_res['bbox']
score = det_res['score']
if score < self.image_thre:
continue
num_boxes = num_boxes + 1
(x, y, w, h) = box[:4]
(center, scale) = self._xywh2cs(x, y, w, h)
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.ones((self.num_joints, 3), dtype=np.float)
kpt_db.append({'image': img_name, 'center': center, 'scale': scale, 'score': score, 'joints_3d': joints_3d, 'joints_3d_vis': joints_3d_vis})
logger.info('=> Total boxes after fliter low score@{}: {}'.format(self.image_thre, num_boxes))
return kpt_db
|
cvToolkit
|
positive
|
def predict_proba(self, X):
"""
Make class probability predictions.
Parameters
----------
X : 1D or 2D list-like of strings
Input text or text pairs
Returns
----------
probs: numpy 2D array of floats
probability estimates for each class
"""
(texts_a, texts_b) = unpack_data(X)
<DeepExtract>
config = model2config(self)
(_, device) = prepare_model_and_device(self.model, config)
config.device = device
dataloader = get_test_dl(texts_a, texts_b, None, config)
self.model.eval()
(dataloader, config) = (dataloader, config)
</DeepExtract>
device = config.device
probs = []
batch_iter = tqdm(dataloader, desc='Predicting', leave=False)
for batch in batch_iter:
batch = tuple((t.to(device) for t in batch))
with torch.no_grad():
logits = self.model(*batch)
prob = F.softmax(logits, dim=-1)
prob = prob.detach().cpu().numpy()
probs.append(prob)
return np.vstack(tuple(probs))
|
def predict_proba(self, X):
"""
Make class probability predictions.
Parameters
----------
X : 1D or 2D list-like of strings
Input text or text pairs
Returns
----------
probs: numpy 2D array of floats
probability estimates for each class
"""
(texts_a, texts_b) = unpack_data(X)
config = model2config(self)
(_, device) = prepare_model_and_device(self.model, config)
config.device = device
dataloader = get_test_dl(texts_a, texts_b, None, config)
self.model.eval()
(dataloader, config) = (dataloader, config)
device = config.device
probs = []
batch_iter = tqdm(dataloader, desc='Predicting', leave=False)
for batch in batch_iter:
batch = tuple((t.to(device) for t in batch))
with torch.no_grad():
logits = self.model(*batch)
prob = F.softmax(logits, dim=-1)
prob = prob.detach().cpu().numpy()
probs.append(prob)
return np.vstack(tuple(probs))
|
Chinese-clinical-NER
|
positive
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.')
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the experimental results will be written.')
parser.add_argument('--model_file', default=None, type=str, required=True, help='The model file which will be evaluated.')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_lower_case', default=False, action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--no_cuda', default=False, action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=-1, help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--batch_size', default=16, type=int, help='Total batch size for cut.')
parser.add_argument('--num_batch', default=4, type=int, help='Num batch of an example.')
parser.add_argument('--zero_baseline', default=False, action='store_true', help='If use zero atteniton matrix as the baseline.')
parser.add_argument('--start_exp', default=0, type=int, help='The start index of training examples.')
parser.add_argument('--num_exp', default=500, type=int, help='The number of training examples for finding patterns.')
parser.add_argument('--data_type', default='train', type=str, help='Patterns from dev_set or training_set.')
args = parser.parse_args()
args.zero_baseline = True
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}'.format(device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError('Task not found: %s' % task_name)
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
logger.info('***** CUDA.empty_cache() *****')
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.data_type == 'dev':
eval_segment = 'dev_matched' if args.task_name == 'mnli' else 'dev'
eval_examples = processor.get_dev_examples(args.data_dir, segment=eval_segment)[args.start_exp:args.start_exp + args.num_exp]
else:
eval_segment = 'train'
eval_examples = processor.get_train_examples(args.data_dir)[args.start_exp:args.start_exp + args.num_exp]
model.eval()
if args.bert_model.find('base') != -1:
(num_head, num_layer) = (12, 12)
elif args.bert_model.find('large') != -1:
(num_head, num_layer) = (16, 24)
(eval_loss, eval_result) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
(all_logits, all_label_ids) = ([], [])
seg_result_dict = {}
saved_res = []
<DeepExtract>
if label_list:
label_map = {label: i for (i, label) in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(eval_examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, args.max_seq_length - 3)
elif len(tokens_a) > args.max_seq_length - 2:
tokens_a = tokens_a[:args.max_seq_length - 2]
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
base_tokens = ['[UNK]'] + ['[UNK]'] * len(tokens_a) + ['[UNK]']
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ['[SEP]']
base_tokens += ['[UNK]'] * len(tokens_b) + ['[UNK]']
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (args.max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == args.max_seq_length
assert len(input_ids) == args.max_seq_length
assert len(input_mask) == args.max_seq_length
assert len(segment_ids) == args.max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug('*** Example ***')
logger.debug('guid: %s' % example.guid)
logger.debug('tokens: %s' % ' '.join([str(x) for x in tokens]))
logger.debug('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
logger.debug('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
logger.debug('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
logger.debug('label: %s (id = %d)' % (example.label, label_id))
if args.task_name == 'sst-2' and len(tokens) < 15:
continue
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, tokens=tokens, baseline_ids=baseline_ids))
tokenslist.append({'token': tokens, 'golden_label': example.label, 'pred_label': None})
(eval_features, _) = (features, tokenslist)
</DeepExtract>
logger.info('***** Running evaluation: %s *****', eval_segment)
logger.info(' Num examples = %d', len(eval_examples))
all_baseline_ids = torch.tensor([f.baseline_ids for f in eval_features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=lbl_type)
all_tokens = [f.tokens for f in eval_features]
eval_data = TensorDataset(all_baseline_ids, all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
(eval_loss, eval_result) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
(all_logits, all_label_ids) = ([], [])
index_count = 0
for (baseline_ids, input_ids, input_mask, segment_ids, label_ids) in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
input_len = int(input_mask[0].sum())
tokens = all_tokens[index_count]
seg_pos = tokens.index('[SEP]')
tar_head_attr = None
with torch.no_grad():
(tmp_eval_loss, baseline_logits) = model(input_ids, 'res', segment_ids, input_mask, label_ids)
pred_label = int(torch.argmax(baseline_logits))
attr_max = None
for tar_layer in range(0, num_layer):
with torch.no_grad():
(att, _) = model(input_ids, 'att', segment_ids, input_mask, label_ids, tar_layer)
att = att[0]
baseline = None
<DeepExtract>
if baseline is None:
baseline = torch.zeros_like(att.data)
num_points = args.batch_size * args.num_batch
scale = 1.0 / num_points
step = (att.data.unsqueeze(0) - baseline.unsqueeze(0)) * scale
res = torch.cat([torch.add(baseline.unsqueeze(0), step * i) for i in range(num_points)], dim=0)
(scale_att, step) = (res, step[0])
</DeepExtract>
scale_att.requires_grad_(True)
attr_all = None
for j_batch in range(args.num_batch):
one_batch_att = scale_att[j_batch * args.batch_size:(j_batch + 1) * args.batch_size]
(tar_prob, grad) = model(input_ids, 'att', segment_ids, input_mask, label_ids, tar_layer, one_batch_att, pred_label=pred_label)
grad = grad.sum(dim=0)
attr_all = grad if attr_all is None else torch.add(attr_all, grad)
attr_all = attr_all[:, 0:input_len, 0:input_len] * step[:, 0:input_len, 0:input_len]
tar_head_index = int(torch.argmax(attr_all.reshape(num_head * input_len * input_len))) // (input_len * input_len)
if tar_head_attr is None:
tar_head_attr = attr_all[tar_head_index]
elif attr_all[tar_head_index].max() > tar_head_attr.max():
tar_head_attr = attr_all[tar_head_index]
attr_max = tar_head_attr.cpu().numpy().reshape(input_len * input_len)
attr_sorted_index = np.argsort(attr_max)[::-1]
saved_res.append({'max_combined_attr': [float(attr_max[attr_sorted_index[0]]), float(attr_max[attr_sorted_index[1]])], 'top1pattern': [tokens[attr_sorted_index[0] // input_len], tokens[attr_sorted_index[0] % input_len]], 'top2pattern': [tokens[attr_sorted_index[1] // input_len], tokens[attr_sorted_index[1] % input_len]], 'top1position': [int(attr_sorted_index[0] // input_len), int(attr_sorted_index[0] % input_len)], 'top2position': [int(attr_sorted_index[1] // input_len), int(attr_sorted_index[1] % input_len)], 'target_label': pred_label, 'golden_label': int(label_ids[0]), 'seg_pos': seg_pos, 'tokens': tokens})
logits = baseline_logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
all_logits.append(logits)
all_label_ids.append(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
index_count += 1
eval_loss = eval_loss / nb_eval_steps
all_logits = np.concatenate(all_logits, axis=0)
all_label_ids = np.concatenate(all_label_ids, axis=0)
metric_func = processor.get_metric_func()
eval_result = metric_func(all_logits, all_label_ids)
result = {'eval_loss': eval_loss, 'eval_result': eval_result, 'task_name': args.task_name, 'eval_segment': eval_segment}
if eval_segment not in seg_result_dict:
seg_result_dict[eval_segment] = []
seg_result_dict[eval_segment].append(result)
logger.info('***** Eval results ({0}) *****'.format(eval_segment))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
saved_res = sorted(saved_res, key=lambda a: sum(a['max_combined_attr']), reverse=True)
with open(os.path.join(args.output_dir, '{0}_adver_pattern_exp{1}-{2}.json'.format(args.data_type, args.start_exp, args.start_exp + args.num_exp)), 'w') as fout:
fout.write(json.dumps(saved_res, indent=2) + '\n')
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.')
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the experimental results will be written.')
parser.add_argument('--model_file', default=None, type=str, required=True, help='The model file which will be evaluated.')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_lower_case', default=False, action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--no_cuda', default=False, action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=-1, help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--batch_size', default=16, type=int, help='Total batch size for cut.')
parser.add_argument('--num_batch', default=4, type=int, help='Num batch of an example.')
parser.add_argument('--zero_baseline', default=False, action='store_true', help='If use zero atteniton matrix as the baseline.')
parser.add_argument('--start_exp', default=0, type=int, help='The start index of training examples.')
parser.add_argument('--num_exp', default=500, type=int, help='The number of training examples for finding patterns.')
parser.add_argument('--data_type', default='train', type=str, help='Patterns from dev_set or training_set.')
args = parser.parse_args()
args.zero_baseline = True
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}'.format(device, n_gpu, bool(args.local_rank != -1)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError('Task not found: %s' % task_name)
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
logger.info('***** CUDA.empty_cache() *****')
torch.cuda.empty_cache()
if args.task_name == 'sts-b':
lbl_type = torch.float
else:
lbl_type = torch.long
model_state_dict = torch.load(args.model_file)
model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict, num_labels=num_labels)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.data_type == 'dev':
eval_segment = 'dev_matched' if args.task_name == 'mnli' else 'dev'
eval_examples = processor.get_dev_examples(args.data_dir, segment=eval_segment)[args.start_exp:args.start_exp + args.num_exp]
else:
eval_segment = 'train'
eval_examples = processor.get_train_examples(args.data_dir)[args.start_exp:args.start_exp + args.num_exp]
model.eval()
if args.bert_model.find('base') != -1:
(num_head, num_layer) = (12, 12)
elif args.bert_model.find('large') != -1:
(num_head, num_layer) = (16, 24)
(eval_loss, eval_result) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
(all_logits, all_label_ids) = ([], [])
seg_result_dict = {}
saved_res = []
if label_list:
label_map = {label: i for (i, label) in enumerate(label_list)}
else:
label_map = None
features = []
tokenslist = []
for (ex_index, example) in enumerate(eval_examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, args.max_seq_length - 3)
elif len(tokens_a) > args.max_seq_length - 2:
tokens_a = tokens_a[:args.max_seq_length - 2]
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
base_tokens = ['[UNK]'] + ['[UNK]'] * len(tokens_a) + ['[UNK]']
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ['[SEP]']
base_tokens += ['[UNK]'] * len(tokens_b) + ['[UNK]']
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (args.max_seq_length - len(input_ids))
input_ids += padding
baseline_ids += padding
input_mask += padding
segment_ids += padding
assert len(baseline_ids) == args.max_seq_length
assert len(input_ids) == args.max_seq_length
assert len(input_mask) == args.max_seq_length
assert len(segment_ids) == args.max_seq_length
if label_map:
label_id = label_map[example.label]
else:
label_id = float(example.label)
if ex_index < 2:
logger.debug('*** Example ***')
logger.debug('guid: %s' % example.guid)
logger.debug('tokens: %s' % ' '.join([str(x) for x in tokens]))
logger.debug('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
logger.debug('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
logger.debug('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
logger.debug('label: %s (id = %d)' % (example.label, label_id))
if args.task_name == 'sst-2' and len(tokens) < 15:
continue
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, tokens=tokens, baseline_ids=baseline_ids))
tokenslist.append({'token': tokens, 'golden_label': example.label, 'pred_label': None})
(eval_features, _) = (features, tokenslist)
logger.info('***** Running evaluation: %s *****', eval_segment)
logger.info(' Num examples = %d', len(eval_examples))
all_baseline_ids = torch.tensor([f.baseline_ids for f in eval_features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=lbl_type)
all_tokens = [f.tokens for f in eval_features]
eval_data = TensorDataset(all_baseline_ids, all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1)
model.eval()
(eval_loss, eval_result) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
(all_logits, all_label_ids) = ([], [])
index_count = 0
for (baseline_ids, input_ids, input_mask, segment_ids, label_ids) in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
input_len = int(input_mask[0].sum())
tokens = all_tokens[index_count]
seg_pos = tokens.index('[SEP]')
tar_head_attr = None
with torch.no_grad():
(tmp_eval_loss, baseline_logits) = model(input_ids, 'res', segment_ids, input_mask, label_ids)
pred_label = int(torch.argmax(baseline_logits))
attr_max = None
for tar_layer in range(0, num_layer):
with torch.no_grad():
(att, _) = model(input_ids, 'att', segment_ids, input_mask, label_ids, tar_layer)
att = att[0]
baseline = None
if baseline is None:
baseline = torch.zeros_like(att.data)
num_points = args.batch_size * args.num_batch
scale = 1.0 / num_points
step = (att.data.unsqueeze(0) - baseline.unsqueeze(0)) * scale
res = torch.cat([torch.add(baseline.unsqueeze(0), step * i) for i in range(num_points)], dim=0)
(scale_att, step) = (res, step[0])
scale_att.requires_grad_(True)
attr_all = None
for j_batch in range(args.num_batch):
one_batch_att = scale_att[j_batch * args.batch_size:(j_batch + 1) * args.batch_size]
(tar_prob, grad) = model(input_ids, 'att', segment_ids, input_mask, label_ids, tar_layer, one_batch_att, pred_label=pred_label)
grad = grad.sum(dim=0)
attr_all = grad if attr_all is None else torch.add(attr_all, grad)
attr_all = attr_all[:, 0:input_len, 0:input_len] * step[:, 0:input_len, 0:input_len]
tar_head_index = int(torch.argmax(attr_all.reshape(num_head * input_len * input_len))) // (input_len * input_len)
if tar_head_attr is None:
tar_head_attr = attr_all[tar_head_index]
elif attr_all[tar_head_index].max() > tar_head_attr.max():
tar_head_attr = attr_all[tar_head_index]
attr_max = tar_head_attr.cpu().numpy().reshape(input_len * input_len)
attr_sorted_index = np.argsort(attr_max)[::-1]
saved_res.append({'max_combined_attr': [float(attr_max[attr_sorted_index[0]]), float(attr_max[attr_sorted_index[1]])], 'top1pattern': [tokens[attr_sorted_index[0] // input_len], tokens[attr_sorted_index[0] % input_len]], 'top2pattern': [tokens[attr_sorted_index[1] // input_len], tokens[attr_sorted_index[1] % input_len]], 'top1position': [int(attr_sorted_index[0] // input_len), int(attr_sorted_index[0] % input_len)], 'top2position': [int(attr_sorted_index[1] // input_len), int(attr_sorted_index[1] % input_len)], 'target_label': pred_label, 'golden_label': int(label_ids[0]), 'seg_pos': seg_pos, 'tokens': tokens})
logits = baseline_logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
all_logits.append(logits)
all_label_ids.append(label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
index_count += 1
eval_loss = eval_loss / nb_eval_steps
all_logits = np.concatenate(all_logits, axis=0)
all_label_ids = np.concatenate(all_label_ids, axis=0)
metric_func = processor.get_metric_func()
eval_result = metric_func(all_logits, all_label_ids)
result = {'eval_loss': eval_loss, 'eval_result': eval_result, 'task_name': args.task_name, 'eval_segment': eval_segment}
if eval_segment not in seg_result_dict:
seg_result_dict[eval_segment] = []
seg_result_dict[eval_segment].append(result)
logger.info('***** Eval results ({0}) *****'.format(eval_segment))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
saved_res = sorted(saved_res, key=lambda a: sum(a['max_combined_attr']), reverse=True)
with open(os.path.join(args.output_dir, '{0}_adver_pattern_exp{1}-{2}.json'.format(args.data_type, args.start_exp, args.start_exp + args.num_exp)), 'w') as fout:
fout.write(json.dumps(saved_res, indent=2) + '\n')
|
attattr
|
positive
|
def _test(config):
test_data = read_data(config, 'test', True)
update_config(config, [test_data])
<DeepExtract>
if config.debug:
config.num_steps = 2
config.eval_period = 1
config.log_period = 1
config.save_period = 1
config.val_num_batches = 2
config.test_num_batches = 2
</DeepExtract>
if config.use_glove_for_unk:
word2vec_dict = test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec']
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for (word, idx) in new_word2idx_dict.items()}
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
evaluator = MultiGPUF1Evaluator(config, models, tensor_dict=models[0].tensor_dict if config.vis else None)
graph_handler = GraphHandler(config, model)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_steps = math.ceil(test_data.num_examples / (config.batch_size * config.num_gpus))
if 0 < config.test_num_batches < num_steps:
num_steps = config.test_num_batches
e = None
for multi_batch in tqdm(test_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps, cluster=config.cluster), total=num_steps):
ei = evaluator.get_evaluation(sess, multi_batch)
e = ei if e is None else e + ei
if config.vis:
eval_subdir = os.path.join(config.eval_dir, '{}-{}'.format(ei.data_type, str(ei.global_step).zfill(6)))
if not os.path.exists(eval_subdir):
os.mkdir(eval_subdir)
path = os.path.join(eval_subdir, str(ei.idxs[0]).zfill(8))
graph_handler.dump_eval(ei, path=path)
print(e)
if config.dump_answer:
print('dumping answer ...')
graph_handler.dump_answer(e)
if config.dump_eval:
print('dumping eval ...')
graph_handler.dump_eval(e)
|
def _test(config):
test_data = read_data(config, 'test', True)
update_config(config, [test_data])
if config.debug:
config.num_steps = 2
config.eval_period = 1
config.log_period = 1
config.save_period = 1
config.val_num_batches = 2
config.test_num_batches = 2
if config.use_glove_for_unk:
word2vec_dict = test_data.shared['lower_word2vec'] if config.lower_word else test_data.shared['word2vec']
new_word2idx_dict = test_data.shared['new_word2idx']
idx2vec_dict = {idx: word2vec_dict[word] for (word, idx) in new_word2idx_dict.items()}
new_emb_mat = np.array([idx2vec_dict[idx] for idx in range(len(idx2vec_dict))], dtype='float32')
config.new_emb_mat = new_emb_mat
pprint(config.__flags, indent=2)
models = get_multi_gpu_models(config)
model = models[0]
evaluator = MultiGPUF1Evaluator(config, models, tensor_dict=models[0].tensor_dict if config.vis else None)
graph_handler = GraphHandler(config, model)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
graph_handler.initialize(sess)
num_steps = math.ceil(test_data.num_examples / (config.batch_size * config.num_gpus))
if 0 < config.test_num_batches < num_steps:
num_steps = config.test_num_batches
e = None
for multi_batch in tqdm(test_data.get_multi_batches(config.batch_size, config.num_gpus, num_steps=num_steps, cluster=config.cluster), total=num_steps):
ei = evaluator.get_evaluation(sess, multi_batch)
e = ei if e is None else e + ei
if config.vis:
eval_subdir = os.path.join(config.eval_dir, '{}-{}'.format(ei.data_type, str(ei.global_step).zfill(6)))
if not os.path.exists(eval_subdir):
os.mkdir(eval_subdir)
path = os.path.join(eval_subdir, str(ei.idxs[0]).zfill(8))
graph_handler.dump_eval(ei, path=path)
print(e)
if config.dump_answer:
print('dumping answer ...')
graph_handler.dump_answer(e)
if config.dump_eval:
print('dumping eval ...')
graph_handler.dump_eval(e)
|
dawn-bench-models
|
positive
|
@pytest.fixture
def setup_branched_reaction_tree(setup_branched_mcts):
def wrapper(exclude_from_stock=None):
<DeepExtract>
def wrapper(exclude_from_stock=None):
exclude_from_stock = exclude_from_stock or []
stock = [smi for smi in ['c1ccccc1', 'O', 'Oc1ccccc1', 'NC1CCCCC1', 'C1=CCC=C1'] if smi not in exclude_from_stock]
exclude_from_stock or [](None, *stock)
for smi in exclude_from_stock:
get_branched_expansion[smi] = {'smiles': '', 'prior': 1}
(_, node) = setup_expanded_mcts(get_branched_expansion)
(_, node) = wrapper
</DeepExtract>
return node.to_reaction_tree()
return wrapper
|
@pytest.fixture
def setup_branched_reaction_tree(setup_branched_mcts):
def wrapper(exclude_from_stock=None):
def wrapper(exclude_from_stock=None):
exclude_from_stock = exclude_from_stock or []
stock = [smi for smi in ['c1ccccc1', 'O', 'Oc1ccccc1', 'NC1CCCCC1', 'C1=CCC=C1'] if smi not in exclude_from_stock]
exclude_from_stock or [](None, *stock)
for smi in exclude_from_stock:
get_branched_expansion[smi] = {'smiles': '', 'prior': 1}
(_, node) = setup_expanded_mcts(get_branched_expansion)
(_, node) = wrapper
return node.to_reaction_tree()
return wrapper
|
aizynthfinder
|
positive
|
def copy_dict(d):
ret = {}
for (key, value) in d.items():
if isinstance(value, dict):
<DeepExtract>
ret = {}
for (key, value) in value.items():
if isinstance(value, dict):
ret[key] = copy_dict(value)
else:
ret[key] = value
del value
ret[key] = ret
</DeepExtract>
else:
ret[key] = value
del d
return ret
|
def copy_dict(d):
ret = {}
for (key, value) in d.items():
if isinstance(value, dict):
ret = {}
for (key, value) in value.items():
if isinstance(value, dict):
ret[key] = copy_dict(value)
else:
ret[key] = value
del value
ret[key] = ret
else:
ret[key] = value
del d
return ret
|
CAPTRA
|
positive
|
def parse_ib_txt(f, load_indices):
for line in map(str.strip, f):
if line.startswith('byte offset:'):
self.offset = int(line[13:])
if line.startswith('first index:'):
self.first = int(line[13:])
elif line.startswith('index count:'):
self.index_count = int(line[13:])
elif line.startswith('topology:'):
self.topology = line[10:]
if line != 'topology: trianglelist':
raise Fatal('"%s" is not yet supported' % line)
elif line.startswith('format:'):
self.format = line[8:]
elif line == '':
if not load_indices:
return
<DeepExtract>
for line in map(str.strip, f):
face = tuple(map(int, line.split()))
assert len(face) == 3
self.faces.append(face)
</DeepExtract>
assert len(self.faces) * 3 == self.index_count
|
def parse_ib_txt(f, load_indices):
for line in map(str.strip, f):
if line.startswith('byte offset:'):
self.offset = int(line[13:])
if line.startswith('first index:'):
self.first = int(line[13:])
elif line.startswith('index count:'):
self.index_count = int(line[13:])
elif line.startswith('topology:'):
self.topology = line[10:]
if line != 'topology: trianglelist':
raise Fatal('"%s" is not yet supported' % line)
elif line.startswith('format:'):
self.format = line[8:]
elif line == '':
if not load_indices:
return
for line in map(str.strip, f):
face = tuple(map(int, line.split()))
assert len(face) == 3
self.faces.append(face)
assert len(self.faces) * 3 == self.index_count
|
3d-fixes
|
positive
|
def __call__(self, results):
<DeepExtract>
if self.size is not None:
padded_img = mmcv.impad(results['img'], self.size)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
</DeepExtract>
<DeepExtract>
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
padded_masks = [mmcv.impad(mask, pad_shape, pad_val=self.pad_val) for mask in results[key]]
results[key] = np.stack(padded_masks, axis=0)
</DeepExtract>
return results
|
def __call__(self, results):
if self.size is not None:
padded_img = mmcv.impad(results['img'], self.size)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(results['img'], self.size_divisor, pad_val=self.pad_val)
results['img'] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
padded_masks = [mmcv.impad(mask, pad_shape, pad_val=self.pad_val) for mask in results[key]]
results[key] = np.stack(padded_masks, axis=0)
return results
|
DNL-Object-Detection
|
positive
|
def test_bitarray_chain(self):
<DeepExtract>
a = bitarray(0, endian)
a.frombytes(os.urandom(bits2bytes(64)))
del a[64:]
a = a
</DeepExtract>
d = {0: a}
for n in range(1, 100):
d[n] = bitarray(buffer=d[n - 1])
self.assertEqual(d[99], a)
a.setall(0)
self.assertEqual(d[99], zeros(64))
a[:] = 1
self.assertTrue(d[99].all())
for c in d.values():
<DeepExtract>
self.assertIsInstance(c, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = c.buffer_info()
self.assertEqual(size, bits2bytes(len(c)))
self.assertEqual(padbits, 8 * size - len(c))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, c.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(c.nbytes, size)
self.assertEqual(c.padbits, padbits)
self.assertEqual(c.readonly, readonly)
self.assertEqual(len(c) + c.padbits, 8 * c.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(c) % 8, 0)
self.assertEqual(len(c), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(c).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(c.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
def test_bitarray_chain(self):
a = bitarray(0, endian)
a.frombytes(os.urandom(bits2bytes(64)))
del a[64:]
a = a
d = {0: a}
for n in range(1, 100):
d[n] = bitarray(buffer=d[n - 1])
self.assertEqual(d[99], a)
a.setall(0)
self.assertEqual(d[99], zeros(64))
a[:] = 1
self.assertTrue(d[99].all())
for c in d.values():
self.assertIsInstance(c, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = c.buffer_info()
self.assertEqual(size, bits2bytes(len(c)))
self.assertEqual(padbits, 8 * size - len(c))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, c.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(c.nbytes, size)
self.assertEqual(c.padbits, padbits)
self.assertEqual(c.readonly, readonly)
self.assertEqual(len(c) + c.padbits, 8 * c.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(c) % 8, 0)
self.assertEqual(len(c), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(c).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(c.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
bitarray
|
positive
|
def warn(msg, delayed=None):
if delayed:
<DeepExtract>
if isinstance(delayed, functools.partial):
delayed = delayed.func
if not has_override(delayed, 'warnings'):
warnings = []
warnings = delayed._spectacular_annotation['warnings']
</DeepExtract>
warnings.append(msg)
<DeepExtract>
if not hasattr(delayed, '_spectacular_annotation'):
delayed._spectacular_annotation = {}
elif '_spectacular_annotation' not in delayed.__dict__:
delayed._spectacular_annotation = delayed._spectacular_annotation.copy()
delayed._spectacular_annotation['warnings'] = warnings
return delayed
</DeepExtract>
else:
GENERATOR_STATS.emit(msg, 'warning')
|
def warn(msg, delayed=None):
if delayed:
if isinstance(delayed, functools.partial):
delayed = delayed.func
if not has_override(delayed, 'warnings'):
warnings = []
warnings = delayed._spectacular_annotation['warnings']
warnings.append(msg)
if not hasattr(delayed, '_spectacular_annotation'):
delayed._spectacular_annotation = {}
elif '_spectacular_annotation' not in delayed.__dict__:
delayed._spectacular_annotation = delayed._spectacular_annotation.copy()
delayed._spectacular_annotation['warnings'] = warnings
return delayed
else:
GENERATOR_STATS.emit(msg, 'warning')
|
drf-spectacular
|
positive
|
def main(dt_file_path):
makefile = os.path.join(settings.PRODUCTS_ROOT, 'makefile')
with open(makefile, 'w') as f:
f.write('all: {}\n'.format(settings.DETECTION_EXE))
f.write('{}: ../codalab/evalwrap.cpp ../cppapi/eval_tools.hpp\n'.format(settings.DETECTION_EXE))
f.write('\tg++ -std=c++11 -O2 $< -o $@')
args = ['make', '-f', makefile]
print(*args)
p = subprocess.Popen(args)
assert 0 == p.wait()
with open(settings.TEST_DETECTION_GT) as f:
gt = f.read()
args = [settings.DETECTION_EXE, dt_file_path]
print(*args)
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
report_str = p.communicate(gt.encode('utf-8'))[0].decode('utf-8')
assert 0 == p.wait()
report = json.loads(report_str)
assert 0 == report['error'], report['msg']
with codecs.open(settings.PROPOSAL_REPORT if proposal else settings.DETECTION_REPORT, 'w', 'utf-8') as f:
json.dump(report, f, ensure_ascii=False, indent=2, sort_keys=True)
<DeepExtract>
jdata = [{'model_name': 'YOLO_v2', 'performance': {szname: {'attributes': [{'n': o['n'], 'recalls': {1: o['recall']}} for o in szattr['attributes']]} for (szname, szattr) in report['performance'].items()}}]
with open('explore_cls.template.html') as f:
template = Template(f.read())
with codecs.open(settings.PROPOSAL_EXPLORE if proposal else settings.DETECTION_EXPLORE, 'w', 'utf-8') as f:
f.write(template.render({'title': 'Explore detection performance', 'chartjs': get_chartjs(), 'performance_all': json.dumps(jdata, sort_keys=True), 'attributes': settings.ATTRIBUTES}))
</DeepExtract>
<DeepExtract>
def percentage(x, digit=1):
fmt = {1: '{:4.1f}%', 2: '{:5.2f}%'}
return fmt[digit].format(x * 100)
with open(settings.STAT_FREQUENCY) as f:
frequency = json.load(f)
freq_order = [o['text'] for o in frequency]
performance = report['performance']
for (szname, stat) in sorted(performance.items()):
print(szname)
for k in ('n', 'mAP', 'AP', 'mAP_micro'):
x = stat[k]
if isinstance(x, float):
x = percentage(x)
print('{:>4s}'.format(k), '=', x)
for (i, attr) in zip(range(-1, len(settings.ATTRIBUTES)), ['__all__'] + settings.ATTRIBUTES):
n = 0
rc = 0
for (k, o) in enumerate(performance[szname]['attributes']):
if i == -1 or int(k) & 2 ** i:
n += o['n']
rc += o['recall']
r = 0.0 if n == 0 else rc / n
print('{:13s}'.format(attr), 'n', '=', '{:6d}'.format(n), ',', 'recall', '=', percentage(r))
for char in freq_order[:10]:
print(char, percentage(performance[szname]['texts'].get(char, {'AP': 0.0})['AP']))
print()
</DeepExtract>
<DeepExtract>
def attr_recall(attr_perfs, attr_id):
m = len(settings.ATTRIBUTES)
n = rc = 0
for (k, o) in enumerate(attr_perfs):
if attr_id == -1 or (attr_id < m and 0 != int(k) & 2 ** attr_id) or (m <= attr_id and 0 == int(k) & 2 ** (attr_id - m)):
n += o['n']
rc += o['recall']
return 0.0 if n == 0 else rc / n
data = [[{'legend': szname, 'data': [attr_recall(report['performance'][szname]['attributes'], i) for i in range(-1, 2 * len(settings.ATTRIBUTES))]}] for (szname, _) in settings.SIZE_RANGES]
labels = ['all'] + settings.ATTRIBUTES + list(map('~{}'.format, settings.ATTRIBUTES))
with plt.style.context({'figure.subplot.left': 0.05, 'figure.subplot.right': 0.98, 'figure.subplot.top': 0.96, 'pdf.fonttype': 42, 'legend.loc': 'upper center'}):
plt.figure(figsize=(12, 3))
plt.xlim((0.3, 0.7 + len(labels)))
plt.ylim((0.0, 1.0))
plt.grid(which='major', axis='y', linestyle='dotted')
plot_tools.draw_bar(data, labels, width=0.18, legend_kwargs={'ncol': len(settings.SIZE_RANGES)})
plt.ylabel('recall')
plt.savefig(os.path.join(settings.PLOTS_DIR, ('pro' if proposal else 'det') + '_recall_by_attr_size.pdf'))
plt.close()
with plt.style.context({'figure.subplot.left': 0.1, 'figure.subplot.right': 0.97, 'figure.subplot.bottom': 0.1, 'figure.subplot.top': 0.97, 'pdf.fonttype': 42, 'legend.loc': 'upper right'}):
plt.figure(figsize=(5.5, 5.5))
plt.xlim((0.0, 1.0))
plt.ylim((0.0, 1.0))
plt.grid(which='major', axis='both', linestyle='dotted')
for (szname, stat) in sorted(report['performance'].items()):
y = [1.0] + stat['AP_curve'] + [0.0] * (stat['n'] - len(stat['AP_curve']))
x = np.linspace(0, 1, len(y))
plt.plot(x, y, label=szname)
plt.legend()
plt.xlabel('recall')
plt.ylabel('precision')
plt.savefig(os.path.join(settings.PLOTS_DIR, ('pro' if proposal else 'det') + '_AP_curve.pdf'))
plt.close()
with plt.style.context({'figure.subplot.left': 0.1, 'figure.subplot.right': 0.97, 'figure.subplot.bottom': 0.1, 'figure.subplot.top': 0.97, 'pdf.fonttype': 42, 'legend.loc': 'upper right'}):
plt.figure(figsize=(5.5, 5.5))
plt.xlim((0.0, 1.0))
plt.ylim((0.0, 1.0))
plt.grid(which='major', axis='both', linestyle='dotted')
for (szname, stat) in sorted(report['performance'].items()):
if stat['mAP_curve']:
(x, y) = zip(*stat['mAP_curve'])
x = [0.0] + list(x) + [x[-1]]
y = [y[0]] + list(y) + [0.0]
else:
(x, y) = ([0.0, 1.0], [0.0, 0.0])
plt.plot(x, y, label=szname)
plt.legend()
plt.xlabel('recall')
plt.ylabel('precision')
plt.savefig(os.path.join(settings.PLOTS_DIR, ('pro' if proposal else 'det') + '_mAP_curve.pdf'))
plt.close()
</DeepExtract>
|
def main(dt_file_path):
makefile = os.path.join(settings.PRODUCTS_ROOT, 'makefile')
with open(makefile, 'w') as f:
f.write('all: {}\n'.format(settings.DETECTION_EXE))
f.write('{}: ../codalab/evalwrap.cpp ../cppapi/eval_tools.hpp\n'.format(settings.DETECTION_EXE))
f.write('\tg++ -std=c++11 -O2 $< -o $@')
args = ['make', '-f', makefile]
print(*args)
p = subprocess.Popen(args)
assert 0 == p.wait()
with open(settings.TEST_DETECTION_GT) as f:
gt = f.read()
args = [settings.DETECTION_EXE, dt_file_path]
print(*args)
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
report_str = p.communicate(gt.encode('utf-8'))[0].decode('utf-8')
assert 0 == p.wait()
report = json.loads(report_str)
assert 0 == report['error'], report['msg']
with codecs.open(settings.PROPOSAL_REPORT if proposal else settings.DETECTION_REPORT, 'w', 'utf-8') as f:
json.dump(report, f, ensure_ascii=False, indent=2, sort_keys=True)
jdata = [{'model_name': 'YOLO_v2', 'performance': {szname: {'attributes': [{'n': o['n'], 'recalls': {1: o['recall']}} for o in szattr['attributes']]} for (szname, szattr) in report['performance'].items()}}]
with open('explore_cls.template.html') as f:
template = Template(f.read())
with codecs.open(settings.PROPOSAL_EXPLORE if proposal else settings.DETECTION_EXPLORE, 'w', 'utf-8') as f:
f.write(template.render({'title': 'Explore detection performance', 'chartjs': get_chartjs(), 'performance_all': json.dumps(jdata, sort_keys=True), 'attributes': settings.ATTRIBUTES}))
def percentage(x, digit=1):
fmt = {1: '{:4.1f}%', 2: '{:5.2f}%'}
return fmt[digit].format(x * 100)
with open(settings.STAT_FREQUENCY) as f:
frequency = json.load(f)
freq_order = [o['text'] for o in frequency]
performance = report['performance']
for (szname, stat) in sorted(performance.items()):
print(szname)
for k in ('n', 'mAP', 'AP', 'mAP_micro'):
x = stat[k]
if isinstance(x, float):
x = percentage(x)
print('{:>4s}'.format(k), '=', x)
for (i, attr) in zip(range(-1, len(settings.ATTRIBUTES)), ['__all__'] + settings.ATTRIBUTES):
n = 0
rc = 0
for (k, o) in enumerate(performance[szname]['attributes']):
if i == -1 or int(k) & 2 ** i:
n += o['n']
rc += o['recall']
r = 0.0 if n == 0 else rc / n
print('{:13s}'.format(attr), 'n', '=', '{:6d}'.format(n), ',', 'recall', '=', percentage(r))
for char in freq_order[:10]:
print(char, percentage(performance[szname]['texts'].get(char, {'AP': 0.0})['AP']))
print()
def attr_recall(attr_perfs, attr_id):
m = len(settings.ATTRIBUTES)
n = rc = 0
for (k, o) in enumerate(attr_perfs):
if attr_id == -1 or (attr_id < m and 0 != int(k) & 2 ** attr_id) or (m <= attr_id and 0 == int(k) & 2 ** (attr_id - m)):
n += o['n']
rc += o['recall']
return 0.0 if n == 0 else rc / n
data = [[{'legend': szname, 'data': [attr_recall(report['performance'][szname]['attributes'], i) for i in range(-1, 2 * len(settings.ATTRIBUTES))]}] for (szname, _) in settings.SIZE_RANGES]
labels = ['all'] + settings.ATTRIBUTES + list(map('~{}'.format, settings.ATTRIBUTES))
with plt.style.context({'figure.subplot.left': 0.05, 'figure.subplot.right': 0.98, 'figure.subplot.top': 0.96, 'pdf.fonttype': 42, 'legend.loc': 'upper center'}):
plt.figure(figsize=(12, 3))
plt.xlim((0.3, 0.7 + len(labels)))
plt.ylim((0.0, 1.0))
plt.grid(which='major', axis='y', linestyle='dotted')
plot_tools.draw_bar(data, labels, width=0.18, legend_kwargs={'ncol': len(settings.SIZE_RANGES)})
plt.ylabel('recall')
plt.savefig(os.path.join(settings.PLOTS_DIR, ('pro' if proposal else 'det') + '_recall_by_attr_size.pdf'))
plt.close()
with plt.style.context({'figure.subplot.left': 0.1, 'figure.subplot.right': 0.97, 'figure.subplot.bottom': 0.1, 'figure.subplot.top': 0.97, 'pdf.fonttype': 42, 'legend.loc': 'upper right'}):
plt.figure(figsize=(5.5, 5.5))
plt.xlim((0.0, 1.0))
plt.ylim((0.0, 1.0))
plt.grid(which='major', axis='both', linestyle='dotted')
for (szname, stat) in sorted(report['performance'].items()):
y = [1.0] + stat['AP_curve'] + [0.0] * (stat['n'] - len(stat['AP_curve']))
x = np.linspace(0, 1, len(y))
plt.plot(x, y, label=szname)
plt.legend()
plt.xlabel('recall')
plt.ylabel('precision')
plt.savefig(os.path.join(settings.PLOTS_DIR, ('pro' if proposal else 'det') + '_AP_curve.pdf'))
plt.close()
with plt.style.context({'figure.subplot.left': 0.1, 'figure.subplot.right': 0.97, 'figure.subplot.bottom': 0.1, 'figure.subplot.top': 0.97, 'pdf.fonttype': 42, 'legend.loc': 'upper right'}):
plt.figure(figsize=(5.5, 5.5))
plt.xlim((0.0, 1.0))
plt.ylim((0.0, 1.0))
plt.grid(which='major', axis='both', linestyle='dotted')
for (szname, stat) in sorted(report['performance'].items()):
if stat['mAP_curve']:
(x, y) = zip(*stat['mAP_curve'])
x = [0.0] + list(x) + [x[-1]]
y = [y[0]] + list(y) + [0.0]
else:
(x, y) = ([0.0, 1.0], [0.0, 0.0])
plt.plot(x, y, label=szname)
plt.legend()
plt.xlabel('recall')
plt.ylabel('precision')
plt.savefig(os.path.join(settings.PLOTS_DIR, ('pro' if proposal else 'det') + '_mAP_curve.pdf'))
plt.close()
</DeepExtract>
|
ctw-baseline
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res1 = 2147483647 & sc1 | extract_16s(d_a, 1) * extract_16s(d_b, 1) << n.value & (sc1 ^ 4294967295)
mul_res0 = 2147483647 & sc0 | extract_16s(d_a, 0) * extract_16s(d_b, 0) << n.value & (sc0 ^ 4294967295)
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
result_hw0 = (e_d_0 + mul_res0 + 32768).cast_to(Type.int_64)
result_hw1 = (e_d_1 + mul_res1 + 32768).cast_to(Type.int_64)
result_hw0_ssov = ssov32(result_hw0, self.max_pos, self.max_neg)
result_hw1_ssov = ssov32(result_hw1, self.max_pos, self.max_neg)
result = result_hw1_ssov & 4294901760 | result_hw0_ssov >> 16 & 65535
c = 0
v = overflow(result).cast_to(Type.int_32)
av = advanced_overflow(result).cast_to(Type.int_32)
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res1 = 2147483647 & sc1 | extract_16s(d_a, 1) * extract_16s(d_b, 1) << n.value & (sc1 ^ 4294967295)
mul_res0 = 2147483647 & sc0 | extract_16s(d_a, 0) * extract_16s(d_b, 0) << n.value & (sc0 ^ 4294967295)
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
result_hw0 = (e_d_0 + mul_res0 + 32768).cast_to(Type.int_64)
result_hw1 = (e_d_1 + mul_res1 + 32768).cast_to(Type.int_64)
result_hw0_ssov = ssov32(result_hw0, self.max_pos, self.max_neg)
result_hw1_ssov = ssov32(result_hw1, self.max_pos, self.max_neg)
result = result_hw1_ssov & 4294901760 | result_hw0_ssov >> 16 & 65535
c = 0
v = overflow(result).cast_to(Type.int_32)
av = advanced_overflow(result).cast_to(Type.int_32)
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
angr-platforms
|
positive
|
def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
<DeepExtract>
t = time.time() - self.start_time
</DeepExtract>
step_fmt = '%2d' % step
if num_steps > 0:
step_fmt = '%s/%5d' % (step_fmt, num_steps)
logger.info(('Step %s; acc: %6.2f; ppl: %5.2f; xent: %4.2f; ' + 'lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec') % (step_fmt, self.accuracy(), self.ppl(), self.xent(), learning_rate, self.n_src_words / (t + 1e-05), self.n_words / (t + 1e-05), time.time() - start))
sys.stdout.flush()
|
def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
t = time.time() - self.start_time
step_fmt = '%2d' % step
if num_steps > 0:
step_fmt = '%s/%5d' % (step_fmt, num_steps)
logger.info(('Step %s; acc: %6.2f; ppl: %5.2f; xent: %4.2f; ' + 'lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec') % (step_fmt, self.accuracy(), self.ppl(), self.xent(), learning_rate, self.n_src_words / (t + 1e-05), self.n_words / (t + 1e-05), time.time() - start))
sys.stdout.flush()
|
disambiguate
|
positive
|
def valid_flags(self, featmap_size, valid_size, device='cuda'):
(feat_h, feat_w) = featmap_size
(valid_h, valid_w) = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
<DeepExtract>
xx = valid_x.repeat(len(valid_y))
yy = valid_y.view(-1, 1).repeat(1, len(valid_x)).view(-1)
if row_major:
(valid_xx, valid_yy) = (xx, yy)
else:
(valid_xx, valid_yy) = (yy, xx)
</DeepExtract>
valid = valid_xx & valid_yy
return valid
|
def valid_flags(self, featmap_size, valid_size, device='cuda'):
(feat_h, feat_w) = featmap_size
(valid_h, valid_w) = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
xx = valid_x.repeat(len(valid_y))
yy = valid_y.view(-1, 1).repeat(1, len(valid_x)).view(-1)
if row_major:
(valid_xx, valid_yy) = (xx, yy)
else:
(valid_xx, valid_yy) = (yy, xx)
valid = valid_xx & valid_yy
return valid
|
ACSL
|
positive
|
def __sort(seq, low, high):
length = high - low + 1
if length <= INSERTION_SORT_LENGTH:
<DeepExtract>
for i in range(low + 1, high + 1):
j = i
while j > low and seq[j] < seq[j - 1]:
(seq[j], seq[j - 1]) = (seq[j - 1], seq[j])
j -= 1
</DeepExtract>
return
<DeepExtract>
index = self.five_sample(seq, low, high)
(seq[low], seq[index]) = (seq[index], seq[low])
(i, j) = (low + 1, high)
val = seq[low]
while 1:
while i < high and seq[i] <= val:
i += 1
while j > low and seq[j] >= val:
j -= 1
if i >= j:
break
(seq[i], seq[j]) = (seq[j], seq[i])
(seq[low], seq[j]) = (seq[j], seq[low])
index = j
</DeepExtract>
<DeepExtract>
length = index - low + 1
if length <= INSERTION_SORT_LENGTH:
self.insertion_sort(seq, low, index)
return
index = self.partition(seq, low, index)
self.__sort(seq, low, index)
self.__sort(seq, index + 1, index)
</DeepExtract>
<DeepExtract>
length = high - index + 1 + 1
if length <= INSERTION_SORT_LENGTH:
self.insertion_sort(seq, index + 1, high)
return
index = self.partition(seq, index + 1, high)
self.__sort(seq, index + 1, index)
self.__sort(seq, index + 1, high)
</DeepExtract>
|
def __sort(seq, low, high):
length = high - low + 1
if length <= INSERTION_SORT_LENGTH:
for i in range(low + 1, high + 1):
j = i
while j > low and seq[j] < seq[j - 1]:
(seq[j], seq[j - 1]) = (seq[j - 1], seq[j])
j -= 1
return
index = self.five_sample(seq, low, high)
(seq[low], seq[index]) = (seq[index], seq[low])
(i, j) = (low + 1, high)
val = seq[low]
while 1:
while i < high and seq[i] <= val:
i += 1
while j > low and seq[j] >= val:
j -= 1
if i >= j:
break
(seq[i], seq[j]) = (seq[j], seq[i])
(seq[low], seq[j]) = (seq[j], seq[low])
index = j
length = index - low + 1
if length <= INSERTION_SORT_LENGTH:
self.insertion_sort(seq, low, index)
return
index = self.partition(seq, low, index)
self.__sort(seq, low, index)
self.__sort(seq, index + 1, index)
length = high - index + 1 + 1
if length <= INSERTION_SORT_LENGTH:
self.insertion_sort(seq, index + 1, high)
return
index = self.partition(seq, index + 1, high)
self.__sort(seq, index + 1, index)
self.__sort(seq, index + 1, high)
</DeepExtract>
|
algorithms-sedgewick-python
|
positive
|
def get_user_by_lookup_dict(lookup_dict: Dict[str, Any], default: Union[_DefaultT, Literal[DefaultValues.RAISE_EXCEPTION]]=DefaultValues.RAISE_EXCEPTION, require_verified: bool=True) -> Union['AbstractBaseUser', _DefaultT]:
verification_enabled = registration_settings.REGISTER_VERIFICATION_ENABLED
<DeepExtract>
setting_name = 'USER_{name}'.format(name='VERIFICATION_FLAG_FIELD')
user_class = get_user_model()
placeholder = object()
value = getattr(user_class, 'VERIFICATION_FLAG_FIELD', placeholder)
if value is placeholder:
value = getattr(registration_settings, setting_name)
verification_flag_field = value
</DeepExtract>
user_class = get_user_model()
kwargs = {}
kwargs.update(lookup_dict)
if require_verified and verification_enabled and verification_flag_field:
kwargs[verification_flag_field] = True
try:
queryset = user_class.objects.all()
<DeepExtract>
try:
user = _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404 from None
</DeepExtract>
except Http404:
if default is DefaultValues.RAISE_EXCEPTION:
raise UserNotFound() from None
return default
return user
|
def get_user_by_lookup_dict(lookup_dict: Dict[str, Any], default: Union[_DefaultT, Literal[DefaultValues.RAISE_EXCEPTION]]=DefaultValues.RAISE_EXCEPTION, require_verified: bool=True) -> Union['AbstractBaseUser', _DefaultT]:
verification_enabled = registration_settings.REGISTER_VERIFICATION_ENABLED
setting_name = 'USER_{name}'.format(name='VERIFICATION_FLAG_FIELD')
user_class = get_user_model()
placeholder = object()
value = getattr(user_class, 'VERIFICATION_FLAG_FIELD', placeholder)
if value is placeholder:
value = getattr(registration_settings, setting_name)
verification_flag_field = value
user_class = get_user_model()
kwargs = {}
kwargs.update(lookup_dict)
if require_verified and verification_enabled and verification_flag_field:
kwargs[verification_flag_field] = True
try:
queryset = user_class.objects.all()
try:
user = _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404 from None
except Http404:
if default is DefaultValues.RAISE_EXCEPTION:
raise UserNotFound() from None
return default
return user
|
django-rest-registration
|
positive
|
def get_history(self, name):
<DeepExtract>
if name not in self.metrics:
raise ValueError('Unknown metric: %s' % (name,))
</DeepExtract>
return self.metrics[name].get_history()
|
def get_history(self, name):
if name not in self.metrics:
raise ValueError('Unknown metric: %s' % (name,))
return self.metrics[name].get_history()
|
AutoRec
|
positive
|
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
imageset = os.path.splitext(os.path.basename(imagesetfile))[0]
cachefile = os.path.join(cachedir, imageset + '_annots.pkl')
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
recs = {}
for (i, imagename) in enumerate(imagenames):
<DeepExtract>
tree = ET.parse(annopath.format(imagename))
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)]
objects.append(obj_struct)
recs[imagename] = objects
</DeepExtract>
if i % 100 == 0:
logger.info('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))
logger.info('Saving cached annotations to {:s}'.format(cachefile))
save_object(recs, cachefile)
else:
recs = load_object(cachefile)
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det}
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
<DeepExtract>
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
ap = ap
</DeepExtract>
return (rec, prec, ap)
|
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
imageset = os.path.splitext(os.path.basename(imagesetfile))[0]
cachefile = os.path.join(cachedir, imageset + '_annots.pkl')
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
recs = {}
for (i, imagename) in enumerate(imagenames):
tree = ET.parse(annopath.format(imagename))
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text), int(bbox.find('ymax').text)]
objects.append(obj_struct)
recs[imagename] = objects
if i % 100 == 0:
logger.info('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))
logger.info('Saving cached annotations to {:s}'.format(cachefile))
save_object(recs, cachefile)
else:
recs = load_object(cachefile)
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det}
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
ap = ap
return (rec, prec, ap)
|
Detectron-DA-Faster-RCNN
|
positive
|
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[32, 128, 192, 256], loss=loss, **kwargs)
if pretrained:
<DeepExtract>
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(os.getenv(ENV_TORCH_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = 'osnet_x0_5' + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls['osnet_x0_5'], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
(matched_layers, discarded_layers) = ([], [])
for (k, v) in state_dict.items():
if k.startswith('module.'):
k = k[7:]
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn('The pretrained weights from "{}" cannot be loaded, please check the key names manually (** ignored and continue **)'.format(cached_file))
else:
print('Successfully loaded imagenet pretrained weights from "{}"'.format(cached_file))
if len(discarded_layers) > 0:
print('** The following layers are discarded due to unmatched keys or layer size: {}'.format(discarded_layers))
</DeepExtract>
return model
|
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[32, 128, 192, 256], loss=loss, **kwargs)
if pretrained:
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(os.getenv(ENV_TORCH_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = 'osnet_x0_5' + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls['osnet_x0_5'], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
(matched_layers, discarded_layers) = ([], [])
for (k, v) in state_dict.items():
if k.startswith('module.'):
k = k[7:]
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn('The pretrained weights from "{}" cannot be loaded, please check the key names manually (** ignored and continue **)'.format(cached_file))
else:
print('Successfully loaded imagenet pretrained weights from "{}"'.format(cached_file))
if len(discarded_layers) > 0:
print('** The following layers are discarded due to unmatched keys or layer size: {}'.format(discarded_layers))
return model
|
deep-person-reid
|
positive
|
def global_shape(self, grid_space, scales):
<DeepExtract>
shape = np.array([int(np.ceil(s * n)) for (s, n) in zip(scales, self.shape)])
shape[np.array(self.shape) == 1] = 1
grid_shape = tuple(shape)
</DeepExtract>
if grid_space[0]:
if self.mmax == 0:
return (1, grid_shape[1])
else:
return grid_shape
elif grid_space[1]:
shape = list(grid_shape)
if self.mmax > 0:
shape[0] = self.shape[0]
elif self.dtype == np.complex128:
shape[0] = 1
elif self.dtype == np.float64:
shape[0] = 2
return tuple(shape)
else:
Nphi = self.shape[0]
Lmax = self.Lmax
if self.mmax > 0:
if self.dtype == np.complex128:
return (Nphi // 2, Lmax + 1 + max(0, Lmax + 1 - Nphi // 2))
elif self.dtype == np.float64:
return (Nphi // 2, Lmax + 1 + max(0, Lmax + 2 - Nphi // 2))
elif self.dtype == np.complex128:
return (1, Lmax + 1)
elif self.dtype == np.float64:
return (2, Lmax + 1)
|
def global_shape(self, grid_space, scales):
shape = np.array([int(np.ceil(s * n)) for (s, n) in zip(scales, self.shape)])
shape[np.array(self.shape) == 1] = 1
grid_shape = tuple(shape)
if grid_space[0]:
if self.mmax == 0:
return (1, grid_shape[1])
else:
return grid_shape
elif grid_space[1]:
shape = list(grid_shape)
if self.mmax > 0:
shape[0] = self.shape[0]
elif self.dtype == np.complex128:
shape[0] = 1
elif self.dtype == np.float64:
shape[0] = 2
return tuple(shape)
else:
Nphi = self.shape[0]
Lmax = self.Lmax
if self.mmax > 0:
if self.dtype == np.complex128:
return (Nphi // 2, Lmax + 1 + max(0, Lmax + 1 - Nphi // 2))
elif self.dtype == np.float64:
return (Nphi // 2, Lmax + 1 + max(0, Lmax + 2 - Nphi // 2))
elif self.dtype == np.complex128:
return (1, Lmax + 1)
elif self.dtype == np.float64:
return (2, Lmax + 1)
|
dedalus
|
positive
|
def get_kernel_availability():
"""Return a tuple - a list of installed kernel versions and a list of
available kernel versions.
"""
<DeepExtract>
if ['--showduplicates', 'kernel'] is None:
['--showduplicates', 'kernel'] = []
cmd = ['yum', 'list', '-y']
repos_to_disable = []
if isinstance(disable_repos, list):
repos_to_disable = disable_repos
else:
repos_to_disable = tool_opts.disablerepo
for repo in repos_to_disable:
cmd.append('--disablerepo=%s' % repo)
if set_releasever and system_info.releasever:
cmd.append('--releasever=%s' % system_info.releasever)
if system_info.version.major == 8:
cmd.append('--setopt=module_platform_id=platform:el8')
repos_to_enable = []
if isinstance(enable_repos, list):
repos_to_enable = enable_repos
else:
repos_to_enable = system_info.get_enabled_rhel_repos()
for repo in repos_to_enable:
cmd.append('--enablerepo=%s' % repo)
cmd.extend(['--showduplicates', 'kernel'])
(stdout, returncode) = utils.run_subprocess(cmd, print_output=False)
nothing_to_do_error_exists = stdout.endswith('Error: Nothing to do\n')
if returncode == 1 and nothing_to_do_error_exists:
loggerinst.debug('Yum has nothing to do. Ignoring.')
returncode = 0
(output, _) = (stdout, returncode)
</DeepExtract>
return (list(get_kernel(data)) for data in output.split('Available Packages'))
|
def get_kernel_availability():
"""Return a tuple - a list of installed kernel versions and a list of
available kernel versions.
"""
if ['--showduplicates', 'kernel'] is None:
['--showduplicates', 'kernel'] = []
cmd = ['yum', 'list', '-y']
repos_to_disable = []
if isinstance(disable_repos, list):
repos_to_disable = disable_repos
else:
repos_to_disable = tool_opts.disablerepo
for repo in repos_to_disable:
cmd.append('--disablerepo=%s' % repo)
if set_releasever and system_info.releasever:
cmd.append('--releasever=%s' % system_info.releasever)
if system_info.version.major == 8:
cmd.append('--setopt=module_platform_id=platform:el8')
repos_to_enable = []
if isinstance(enable_repos, list):
repos_to_enable = enable_repos
else:
repos_to_enable = system_info.get_enabled_rhel_repos()
for repo in repos_to_enable:
cmd.append('--enablerepo=%s' % repo)
cmd.extend(['--showduplicates', 'kernel'])
(stdout, returncode) = utils.run_subprocess(cmd, print_output=False)
nothing_to_do_error_exists = stdout.endswith('Error: Nothing to do\n')
if returncode == 1 and nothing_to_do_error_exists:
loggerinst.debug('Yum has nothing to do. Ignoring.')
returncode = 0
(output, _) = (stdout, returncode)
return (list(get_kernel(data)) for data in output.split('Available Packages'))
|
convert2rhel
|
positive
|
def test_event_payload_error(self):
def func():
message = ['l' for i in range(8 * 1024)]
message = ''.join(message)
payload = {'title': 'title', 'message': message}
self.statsd.event(**payload)
with pytest.raises(ValueError):
<DeepExtract>
message = ['l' for i in range(8 * 1024)]
message = ''.join(message)
payload = {'title': 'title', 'message': message}
self.statsd.event(**payload)
</DeepExtract>
self.statsd.event('title', 'message')
|
def test_event_payload_error(self):
def func():
message = ['l' for i in range(8 * 1024)]
message = ''.join(message)
payload = {'title': 'title', 'message': message}
self.statsd.event(**payload)
with pytest.raises(ValueError):
message = ['l' for i in range(8 * 1024)]
message = ''.join(message)
payload = {'title': 'title', 'message': message}
self.statsd.event(**payload)
self.statsd.event('title', 'message')
|
datadogpy
|
positive
|
def __init__(self):
<DeepExtract>
f = open('input.txt', 'r')
data = []
for line in f:
data.append(line.strip())
self.seq1 = data[0]
self.seq2 = data[1]
self.n = len(self.seq1)
self.m = len(self.seq2)
</DeepExtract>
<DeepExtract>
sMatrixTxt = '\n A C D E F G H I K L M N P Q R S T V W Y\nA 4 0 -2 -1 -2 0 -2 -1 -1 -1 -1 -2 -1 -1 -1 1 0 0 -3 -2\nC 0 9 -3 -4 -2 -3 -3 -1 -3 -1 -1 -3 -3 -3 -3 -1 -1 -1 -2 -2\nD -2 -3 6 2 -3 -1 -1 -3 -1 -4 -3 1 -1 0 -2 0 -1 -3 -4 -3\nE -1 -4 2 5 -3 -2 0 -3 1 -3 -2 0 -1 2 0 0 -1 -2 -3 -2\nF -2 -2 -3 -3 6 -3 -1 0 -3 0 0 -3 -4 -3 -3 -2 -2 -1 1 3\nG 0 -3 -1 -2 -3 6 -2 -4 -2 -4 -3 0 -2 -2 -2 0 -2 -3 -2 -3\nH -2 -3 -1 0 -1 -2 8 -3 -1 -3 -2 1 -2 0 0 -1 -2 -3 -2 2\nI -1 -1 -3 -3 0 -4 -3 4 -3 2 1 -3 -3 -3 -3 -2 -1 3 -3 -1\nK -1 -3 -1 1 -3 -2 -1 -3 5 -2 -1 0 -1 1 2 0 -1 -2 -3 -2\nL -1 -1 -4 -3 0 -4 -3 2 -2 4 2 -3 -3 -2 -2 -2 -1 1 -2 -1\nM -1 -1 -3 -2 0 -3 -2 1 -1 2 5 -2 -2 0 -1 -1 -1 1 -1 -1\nN -2 -3 1 0 -3 0 1 -3 0 -3 -2 6 -2 0 0 1 0 -3 -4 -2\nP -1 -3 -1 -1 -4 -2 -2 -3 -1 -3 -2 -2 7 -1 -2 -1 -1 -2 -4 -3\nQ -1 -3 0 2 -3 -2 0 -3 1 -2 0 0 -1 5 1 0 -1 -2 -2 -1\nR -1 -3 -2 0 -3 -2 0 -3 2 -2 -1 0 -2 1 5 -1 -1 -3 -3 -2\nS 1 -1 0 0 -2 0 -1 -2 0 -2 -1 1 -1 0 -1 4 1 -2 -3 -2\nT 0 -1 -1 -1 -2 -2 -2 -1 -1 -1 -1 0 -1 -1 -1 1 5 0 -2 -2\nV 0 -1 -3 -2 -1 -3 -3 3 -2 1 1 -3 -2 -2 -3 -2 0 4 -3 -1\nW -3 -2 -4 -3 1 -2 -2 -3 -3 -2 -1 -4 -4 -2 -3 -3 -2 -3 11 2\nY -2 -2 -3 -2 3 -3 2 -1 -2 -1 -1 -2 -3 -1 -2 -2 -2 -1 2 7\n'
sMatrixList = sMatrixTxt.strip().split('\n')
aaList = sMatrixList[0].split()
sMatrix = dict()
for aa in aaList:
sMatrix[aa] = dict()
for i in range(1, len(aaList) + 1):
currRow = sMatrixList[i].split()
for j in range(len(aaList)):
sMatrix[currRow[0]][aaList[j]] = int(currRow[j + 1])
self.sMatrix = sMatrix
</DeepExtract>
self.sigma = 5
<DeepExtract>
middle = math.floor((self.m - 0) / 2)
n = self.n - 0
m = self.m - 0
fromSource = [[0] * (n + 1), [0] * (n + 1)]
currColumn = 0
for i in range(1, n + 1):
fromSource[currColumn][i] = fromSource[currColumn][i - 1] - self.sigma
currColumn = 1 - currColumn
for jChar in range(0, 0 + middle + 2):
if jChar > self.n - 1:
continue
fromSource[currColumn][0] = fromSource[1 - currColumn][0] - self.sigma
for i in range(1, n + 1):
iChar = i + 0 - 1
score1 = fromSource[currColumn][i - 1] - self.sigma
score2 = fromSource[1 - currColumn][i] - self.sigma
score3 = fromSource[1 - currColumn][i - 1] + self.sMatrix[self.seq1[iChar]][self.seq2[jChar]]
fromSource[currColumn][i] = max(score1, score2, score3)
currColumn = 1 - currColumn
leftColumn1 = currColumn
toSink = [[0] * (n + 1), [0] * (n + 1)]
currColumn = 0
for i in range(n - 1, -1, -1):
toSink[currColumn][i] = toSink[currColumn][i + 1] - self.sigma
currColumn = 1 - currColumn
for jChar in range(self.m - 1, 0 + middle - 1, -1):
toSink[currColumn][n] = toSink[1 - currColumn][n] - self.sigma
for i in range(n - 1, -1, -1):
iChar = i + 0 - 1
score1 = toSink[currColumn][i + 1] - self.sigma
score2 = toSink[1 - currColumn][i] - self.sigma
score3 = toSink[1 - currColumn][i + 1] + self.sMatrix[self.seq1[iChar]][self.seq2[jChar]]
toSink[currColumn][i] = max(score1, score2, score3)
currColumn = 1 - currColumn
leftColumn2 = 1 - currColumn
length = [0] * (n + 1)
for i in range(n + 1):
length[i] = fromSource[leftColumn1][i] + toSink[leftColumn2][i]
iMax = max(range(len(length)), key=lambda x: length[x])
i1 = 0 + iMax - 1
j1 = 0 + middle
if iMax == n:
i2 = i1
j2 = j1 + 1
else:
score1 = fromSource[1 - leftColumn1][iMax] + toSink[1 - leftColumn2][iMax]
score2 = fromSource[leftColumn1][iMax + 1] + toSink[leftColumn2][iMax + 1]
score3 = fromSource[1 - leftColumn1][iMax + 1] + toSink[1 - leftColumn2][iMax + 1]
sMax = max(score1, score2, score3)
if sMax == score3:
i2 = i1 + 1
j2 = j1 + 1
elif sMax == score1:
i2 = i1
j2 = j1 + 1
else:
i2 = i1 + 1
j2 = j1
(i1, j1, i2, j2) = (i1, j1, i2, j2)
</DeepExtract>
print('(' + str(i1) + ', ' + str(j1) + ') (' + str(i2) + ', ' + str(j2) + ')')
|
def __init__(self):
f = open('input.txt', 'r')
data = []
for line in f:
data.append(line.strip())
self.seq1 = data[0]
self.seq2 = data[1]
self.n = len(self.seq1)
self.m = len(self.seq2)
sMatrixTxt = '\n A C D E F G H I K L M N P Q R S T V W Y\nA 4 0 -2 -1 -2 0 -2 -1 -1 -1 -1 -2 -1 -1 -1 1 0 0 -3 -2\nC 0 9 -3 -4 -2 -3 -3 -1 -3 -1 -1 -3 -3 -3 -3 -1 -1 -1 -2 -2\nD -2 -3 6 2 -3 -1 -1 -3 -1 -4 -3 1 -1 0 -2 0 -1 -3 -4 -3\nE -1 -4 2 5 -3 -2 0 -3 1 -3 -2 0 -1 2 0 0 -1 -2 -3 -2\nF -2 -2 -3 -3 6 -3 -1 0 -3 0 0 -3 -4 -3 -3 -2 -2 -1 1 3\nG 0 -3 -1 -2 -3 6 -2 -4 -2 -4 -3 0 -2 -2 -2 0 -2 -3 -2 -3\nH -2 -3 -1 0 -1 -2 8 -3 -1 -3 -2 1 -2 0 0 -1 -2 -3 -2 2\nI -1 -1 -3 -3 0 -4 -3 4 -3 2 1 -3 -3 -3 -3 -2 -1 3 -3 -1\nK -1 -3 -1 1 -3 -2 -1 -3 5 -2 -1 0 -1 1 2 0 -1 -2 -3 -2\nL -1 -1 -4 -3 0 -4 -3 2 -2 4 2 -3 -3 -2 -2 -2 -1 1 -2 -1\nM -1 -1 -3 -2 0 -3 -2 1 -1 2 5 -2 -2 0 -1 -1 -1 1 -1 -1\nN -2 -3 1 0 -3 0 1 -3 0 -3 -2 6 -2 0 0 1 0 -3 -4 -2\nP -1 -3 -1 -1 -4 -2 -2 -3 -1 -3 -2 -2 7 -1 -2 -1 -1 -2 -4 -3\nQ -1 -3 0 2 -3 -2 0 -3 1 -2 0 0 -1 5 1 0 -1 -2 -2 -1\nR -1 -3 -2 0 -3 -2 0 -3 2 -2 -1 0 -2 1 5 -1 -1 -3 -3 -2\nS 1 -1 0 0 -2 0 -1 -2 0 -2 -1 1 -1 0 -1 4 1 -2 -3 -2\nT 0 -1 -1 -1 -2 -2 -2 -1 -1 -1 -1 0 -1 -1 -1 1 5 0 -2 -2\nV 0 -1 -3 -2 -1 -3 -3 3 -2 1 1 -3 -2 -2 -3 -2 0 4 -3 -1\nW -3 -2 -4 -3 1 -2 -2 -3 -3 -2 -1 -4 -4 -2 -3 -3 -2 -3 11 2\nY -2 -2 -3 -2 3 -3 2 -1 -2 -1 -1 -2 -3 -1 -2 -2 -2 -1 2 7\n'
sMatrixList = sMatrixTxt.strip().split('\n')
aaList = sMatrixList[0].split()
sMatrix = dict()
for aa in aaList:
sMatrix[aa] = dict()
for i in range(1, len(aaList) + 1):
currRow = sMatrixList[i].split()
for j in range(len(aaList)):
sMatrix[currRow[0]][aaList[j]] = int(currRow[j + 1])
self.sMatrix = sMatrix
self.sigma = 5
middle = math.floor((self.m - 0) / 2)
n = self.n - 0
m = self.m - 0
fromSource = [[0] * (n + 1), [0] * (n + 1)]
currColumn = 0
for i in range(1, n + 1):
fromSource[currColumn][i] = fromSource[currColumn][i - 1] - self.sigma
currColumn = 1 - currColumn
for jChar in range(0, 0 + middle + 2):
if jChar > self.n - 1:
continue
fromSource[currColumn][0] = fromSource[1 - currColumn][0] - self.sigma
for i in range(1, n + 1):
iChar = i + 0 - 1
score1 = fromSource[currColumn][i - 1] - self.sigma
score2 = fromSource[1 - currColumn][i] - self.sigma
score3 = fromSource[1 - currColumn][i - 1] + self.sMatrix[self.seq1[iChar]][self.seq2[jChar]]
fromSource[currColumn][i] = max(score1, score2, score3)
currColumn = 1 - currColumn
leftColumn1 = currColumn
toSink = [[0] * (n + 1), [0] * (n + 1)]
currColumn = 0
for i in range(n - 1, -1, -1):
toSink[currColumn][i] = toSink[currColumn][i + 1] - self.sigma
currColumn = 1 - currColumn
for jChar in range(self.m - 1, 0 + middle - 1, -1):
toSink[currColumn][n] = toSink[1 - currColumn][n] - self.sigma
for i in range(n - 1, -1, -1):
iChar = i + 0 - 1
score1 = toSink[currColumn][i + 1] - self.sigma
score2 = toSink[1 - currColumn][i] - self.sigma
score3 = toSink[1 - currColumn][i + 1] + self.sMatrix[self.seq1[iChar]][self.seq2[jChar]]
toSink[currColumn][i] = max(score1, score2, score3)
currColumn = 1 - currColumn
leftColumn2 = 1 - currColumn
length = [0] * (n + 1)
for i in range(n + 1):
length[i] = fromSource[leftColumn1][i] + toSink[leftColumn2][i]
iMax = max(range(len(length)), key=lambda x: length[x])
i1 = 0 + iMax - 1
j1 = 0 + middle
if iMax == n:
i2 = i1
j2 = j1 + 1
else:
score1 = fromSource[1 - leftColumn1][iMax] + toSink[1 - leftColumn2][iMax]
score2 = fromSource[leftColumn1][iMax + 1] + toSink[leftColumn2][iMax + 1]
score3 = fromSource[1 - leftColumn1][iMax + 1] + toSink[1 - leftColumn2][iMax + 1]
sMax = max(score1, score2, score3)
if sMax == score3:
i2 = i1 + 1
j2 = j1 + 1
elif sMax == score1:
i2 = i1
j2 = j1 + 1
else:
i2 = i1 + 1
j2 = j1
(i1, j1, i2, j2) = (i1, j1, i2, j2)
print('(' + str(i1) + ', ' + str(j1) + ') (' + str(i2) + ', ' + str(j2) + ')')
|
Coursera-Bioinformatics
|
positive
|
def get_fermi(self, concentration: float, temperature: float, tol: float=0.01, nstep: int=50, step: float=0.1, precision: int=10, return_electron_hole_conc=False):
"""
Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to concentration. A greedy algorithm is used
where the relative error is minimized by calculating the doping at a
grid which continually becomes finer.
Args:
concentration: The doping concentration in 1/Bohr^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
return_electron_hole_conc: Whether to also return the separate
electron and hole concentrations at the doping level.
Returns:
If return_electron_hole_conc is False: The Fermi level in eV. Note
that this is different from the default dos.efermi.
If return_electron_hole_conc is True: the Fermi level, electron
concentration and hole concentration at the Fermi level as a tuple.
The electron and hole concentrations are in Bohr^-3.
"""
fermi = self.efermi
relative_error = float('inf')
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, temperature) for f in frange])
relative_error = abs(calc_doping / concentration - 1.0)
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > tol:
raise ValueError('Could not find fermi within {}% of concentration={}'.format(tol * 100, concentration))
if return_electron_hole_conc:
<DeepExtract>
wdos = _get_weighted_dos(self.energies, self.tdos, fermi, temperature, atomic_units=self.atomic_units)
num_electrons = wdos.sum() * self.de
conc = (self.nelect - num_electrons) / self.structure.volume
if True:
cb_conc = wdos[self.energies > self.efermi].sum() * self.de
vb_conc = wdos[self.energies <= self.efermi].sum() * self.de
cb_conc = cb_conc / self.structure.volume
vb_conc = (self.nelect - vb_conc) / self.structure.volume
(_, n_elec, n_hole) = (conc, cb_conc, vb_conc)
else:
(_, n_elec, n_hole) = conc
</DeepExtract>
return (fermi, n_elec, n_hole)
else:
return fermi
|
def get_fermi(self, concentration: float, temperature: float, tol: float=0.01, nstep: int=50, step: float=0.1, precision: int=10, return_electron_hole_conc=False):
"""
Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to concentration. A greedy algorithm is used
where the relative error is minimized by calculating the doping at a
grid which continually becomes finer.
Args:
concentration: The doping concentration in 1/Bohr^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
return_electron_hole_conc: Whether to also return the separate
electron and hole concentrations at the doping level.
Returns:
If return_electron_hole_conc is False: The Fermi level in eV. Note
that this is different from the default dos.efermi.
If return_electron_hole_conc is True: the Fermi level, electron
concentration and hole concentration at the Fermi level as a tuple.
The electron and hole concentrations are in Bohr^-3.
"""
fermi = self.efermi
relative_error = float('inf')
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, temperature) for f in frange])
relative_error = abs(calc_doping / concentration - 1.0)
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > tol:
raise ValueError('Could not find fermi within {}% of concentration={}'.format(tol * 100, concentration))
if return_electron_hole_conc:
wdos = _get_weighted_dos(self.energies, self.tdos, fermi, temperature, atomic_units=self.atomic_units)
num_electrons = wdos.sum() * self.de
conc = (self.nelect - num_electrons) / self.structure.volume
if True:
cb_conc = wdos[self.energies > self.efermi].sum() * self.de
vb_conc = wdos[self.energies <= self.efermi].sum() * self.de
cb_conc = cb_conc / self.structure.volume
vb_conc = (self.nelect - vb_conc) / self.structure.volume
(_, n_elec, n_hole) = (conc, cb_conc, vb_conc)
else:
(_, n_elec, n_hole) = conc
return (fermi, n_elec, n_hole)
else:
return fermi
|
amset
|
positive
|
def json_repr(obj):
"""Represent instance of a class as JSON.
"""
def serialize(obj):
"""Recursively walk object's hierarchy.
"""
if obj is None:
return None
if isinstance(obj, Enum):
return str(obj)
if isinstance(obj, (bool, int, float, str)):
return obj
if isinstance(obj, dict):
obj = obj.copy()
for key in sorted(obj.keys()):
<DeepExtract>
if obj[key] is None:
obj[key][key] = None
if isinstance(obj[key], Enum):
obj[key][key] = str(obj[key])
if isinstance(obj[key], (bool, int, float, str)):
obj[key][key] = obj[key]
if isinstance(obj[key], dict):
obj[key] = obj[key].copy()
for key in sorted(obj[key].keys()):
obj[key][key] = serialize(obj[key][key])
obj[key][key] = obj[key]
if isinstance(obj[key], list):
obj[key][key] = [serialize(item) for item in obj[key]]
if isinstance(obj[key], tuple):
obj[key][key] = tuple(serialize([item for item in obj[key]]))
if hasattr(obj[key], '__dict__'):
obj[key][key] = serialize(obj[key].__dict__)
obj[key][key] = repr(obj[key])
</DeepExtract>
return obj
if isinstance(obj, list):
return [serialize(item) for item in obj]
if isinstance(obj, tuple):
return tuple(serialize([item for item in obj]))
if hasattr(obj, '__dict__'):
return serialize(obj.__dict__)
return repr(obj)
return json.dumps(serialize(obj))
|
def json_repr(obj):
"""Represent instance of a class as JSON.
"""
def serialize(obj):
"""Recursively walk object's hierarchy.
"""
if obj is None:
return None
if isinstance(obj, Enum):
return str(obj)
if isinstance(obj, (bool, int, float, str)):
return obj
if isinstance(obj, dict):
obj = obj.copy()
for key in sorted(obj.keys()):
if obj[key] is None:
obj[key][key] = None
if isinstance(obj[key], Enum):
obj[key][key] = str(obj[key])
if isinstance(obj[key], (bool, int, float, str)):
obj[key][key] = obj[key]
if isinstance(obj[key], dict):
obj[key] = obj[key].copy()
for key in sorted(obj[key].keys()):
obj[key][key] = serialize(obj[key][key])
obj[key][key] = obj[key]
if isinstance(obj[key], list):
obj[key][key] = [serialize(item) for item in obj[key]]
if isinstance(obj[key], tuple):
obj[key][key] = tuple(serialize([item for item in obj[key]]))
if hasattr(obj[key], '__dict__'):
obj[key][key] = serialize(obj[key].__dict__)
obj[key][key] = repr(obj[key])
return obj
if isinstance(obj, list):
return [serialize(item) for item in obj]
if isinstance(obj, tuple):
return tuple(serialize([item for item in obj]))
if hasattr(obj, '__dict__'):
return serialize(obj.__dict__)
return repr(obj)
return json.dumps(serialize(obj))
|
apidoc
|
positive
|
def get_page_toc_object():
self_toc = TocTree(self.env).get_toc_for(pagename, self)
try:
<DeepExtract>
if not self_toc.children[0].children:
nav = None
reference = self_toc.children[0].children[0].children[0]
title = reference.astext()
url = reference.attributes['refuri']
active = 'current' in self_toc.children[0].attributes['classes']
if only_pages and '#' in url:
nav = None
nav = {}
nav['title'] = title
nav['url'] = url
nav['children'] = []
nav['active'] = active
if len(self_toc.children[0].children) > 1:
for child_item in self_toc.children[0].children[1].children:
child_nav = convert_docutils_node(child_item, only_pages=only_pages)
if child_nav is not None:
nav['children'].append(child_nav)
nav = nav
</DeepExtract>
return nav
except Exception:
return {}
|
def get_page_toc_object():
self_toc = TocTree(self.env).get_toc_for(pagename, self)
try:
if not self_toc.children[0].children:
nav = None
reference = self_toc.children[0].children[0].children[0]
title = reference.astext()
url = reference.attributes['refuri']
active = 'current' in self_toc.children[0].attributes['classes']
if only_pages and '#' in url:
nav = None
nav = {}
nav['title'] = title
nav['url'] = url
nav['children'] = []
nav['active'] = active
if len(self_toc.children[0].children) > 1:
for child_item in self_toc.children[0].children[1].children:
child_nav = convert_docutils_node(child_item, only_pages=only_pages)
if child_nav is not None:
nav['children'].append(child_nav)
nav = nav
return nav
except Exception:
return {}
|
dataprep
|
positive
|
def __init__(self, url, key, recursive_sample_limit=10, reanalyze=True, verify=True, **optional_parameters):
self.url = url
self.key = key
self.reanalyze = reanalyze
self.recursive_sample_limit = recursive_sample_limit
<DeepExtract>
headers = {'Authorization': 'api_key {}'.format(self.key)}
self.headers = headers
</DeepExtract>
self.session = sessions.Session()
self.session.headers = self.headers
self.session.verify = verify
self.optional_parameters = optional_parameters
|
def __init__(self, url, key, recursive_sample_limit=10, reanalyze=True, verify=True, **optional_parameters):
self.url = url
self.key = key
self.reanalyze = reanalyze
self.recursive_sample_limit = recursive_sample_limit
headers = {'Authorization': 'api_key {}'.format(self.key)}
self.headers = headers
self.session = sessions.Session()
self.session.headers = self.headers
self.session.verify = verify
self.optional_parameters = optional_parameters
|
Cortex-Analyzers
|
positive
|
def enable_unit_from(self, conf):
<DeepExtract>
if not conf:
wanted = default
wanted = conf.get(Install, 'WantedBy', default, True)
</DeepExtract>
if not wanted and (not self._force):
logg.debug('%s has no target', conf.name())
return False
target = wanted or self.get_default_target()
<DeepExtract>
if self.user_mode():
user_folder = self.user_folder()
folder = self.default_enablefolder(target, user_folder)
else:
folder = self.default_enablefolder(target)
</DeepExtract>
if self._root:
<DeepExtract>
if not self._root:
folder = folder
if not folder:
folder = folder
if is_good_root(self._root) and folder.startswith(self._root):
folder = folder
while folder.startswith(os.path.sep):
folder = folder[1:]
folder = os.path.join(self._root, folder)
</DeepExtract>
if not os.path.isdir(folder):
os.makedirs(folder)
source = conf.filename()
if not source:
logg.debug('%s has no real file', conf.name())
return False
symlink = os.path.join(folder, conf.name())
if True:
_f = self._force and '-f' or ''
logg.info("ln -s {_f} '{source}' '{symlink}'".format(**locals()))
if self._force and os.path.islink(symlink):
os.remove(target)
if not os.path.islink(symlink):
os.symlink(source, symlink)
return True
|
def enable_unit_from(self, conf):
if not conf:
wanted = default
wanted = conf.get(Install, 'WantedBy', default, True)
if not wanted and (not self._force):
logg.debug('%s has no target', conf.name())
return False
target = wanted or self.get_default_target()
if self.user_mode():
user_folder = self.user_folder()
folder = self.default_enablefolder(target, user_folder)
else:
folder = self.default_enablefolder(target)
if self._root:
if not self._root:
folder = folder
if not folder:
folder = folder
if is_good_root(self._root) and folder.startswith(self._root):
folder = folder
while folder.startswith(os.path.sep):
folder = folder[1:]
folder = os.path.join(self._root, folder)
if not os.path.isdir(folder):
os.makedirs(folder)
source = conf.filename()
if not source:
logg.debug('%s has no real file', conf.name())
return False
symlink = os.path.join(folder, conf.name())
if True:
_f = self._force and '-f' or ''
logg.info("ln -s {_f} '{source}' '{symlink}'".format(**locals()))
if self._force and os.path.islink(symlink):
os.remove(target)
if not os.path.islink(symlink):
os.symlink(source, symlink)
return True
|
docker-systemctl-images
|
positive
|
def _default_view(self):
"""If this is the root item, return the parent
which must be a TreeView, otherwise return the
parent Item's view.
"""
<DeepExtract>
if not index.isValid():
parent = QModelIndex()
item = index.internalPointer()
if not isinstance(item, QtTreeViewItem) or item.is_destroyed:
parent = QModelIndex()
parent = item.parent()
if not isinstance(parent, QtTreeViewItem) or parent.is_destroyed:
parent = QModelIndex()
d = parent.declaration
parent = self.createIndex(d.row, 0, parent)
</DeepExtract>
if isinstance(parent, QtTreeView):
return parent
return parent.view
|
def _default_view(self):
"""If this is the root item, return the parent
which must be a TreeView, otherwise return the
parent Item's view.
"""
if not index.isValid():
parent = QModelIndex()
item = index.internalPointer()
if not isinstance(item, QtTreeViewItem) or item.is_destroyed:
parent = QModelIndex()
parent = item.parent()
if not isinstance(parent, QtTreeViewItem) or parent.is_destroyed:
parent = QModelIndex()
d = parent.declaration
parent = self.createIndex(d.row, 0, parent)
if isinstance(parent, QtTreeView):
return parent
return parent.view
|
enamlx
|
positive
|
@override_rest_registration_settings({'USER_LOGIN_FIELDS': ['username', 'email']})
def test_when_one_non_unique_login_field_in_many_then_check_fails():
<DeepExtract>
app_configs = apps.app_configs
errors = []
all_checks = registry.get_checks(False)
rest_registration_checks = [check for check in all_checks if check.__module__.startswith('rest_registration.')]
for check in rest_registration_checks:
errors.extend(check(app_configs))
errors = errors
</DeepExtract>
<DeepExtract>
error_ids = sorted((e.id for e in errors))
expected_error_ids = sorted((code.get_full_code_id() for code in [ErrorCode.LOGIN_FIELDS_NOT_UNIQUE]))
msg = '\n\nList of errors:\n'
for error in errors:
msg += '- {error}\n'.format(error=error)
msg += ' does not match the codes: '
if [ErrorCode.LOGIN_FIELDS_NOT_UNIQUE]:
msg += ', '.join((str(e) for e in [ErrorCode.LOGIN_FIELDS_NOT_UNIQUE]))
else:
msg += '(empty list)'
assert error_ids == expected_error_ids, msg
</DeepExtract>
|
@override_rest_registration_settings({'USER_LOGIN_FIELDS': ['username', 'email']})
def test_when_one_non_unique_login_field_in_many_then_check_fails():
app_configs = apps.app_configs
errors = []
all_checks = registry.get_checks(False)
rest_registration_checks = [check for check in all_checks if check.__module__.startswith('rest_registration.')]
for check in rest_registration_checks:
errors.extend(check(app_configs))
errors = errors
error_ids = sorted((e.id for e in errors))
expected_error_ids = sorted((code.get_full_code_id() for code in [ErrorCode.LOGIN_FIELDS_NOT_UNIQUE]))
msg = '\n\nList of errors:\n'
for error in errors:
msg += '- {error}\n'.format(error=error)
msg += ' does not match the codes: '
if [ErrorCode.LOGIN_FIELDS_NOT_UNIQUE]:
msg += ', '.join((str(e) for e in [ErrorCode.LOGIN_FIELDS_NOT_UNIQUE]))
else:
msg += '(empty list)'
assert error_ids == expected_error_ids, msg
</DeepExtract>
|
django-rest-registration
|
positive
|
def mqtt_on_message(self, client, userdata, msg):
device_node = userdata
blobtopic = self.entity.get_property(device_node, 'Blob Topic')
if blobtopic is not None and blobtopic.value == msg.topic:
<DeepExtract>
device_node = userdata
fout = self.mqtt_clients[device_node.name]['blob']
if len(msg.payload) == 255:
msg_in = msg.payload.decode()
msg_in = msg_in.split(':')
if msg_in[0] == 'END':
in_hash_final = self.mqtt_clients[device_node.name]['blob_hash'].hexdigest()
if in_hash_final == msg_in[1]:
logger.debug('File Copied OK - Valid Hash')
else:
logger.error('Bad File Received - Invalid Hash')
fout.close()
return
elif msg_in[0] == 'START':
file_name = msg_in[1]
file_path = os.path.split(os.path.realpath(self.entity.file_name))[0] + '/' + file_name
self.mqtt_clients[device_node.name]['blob'] = open(file_path, 'wb')
self.mqtt_clients[device_node.name]['blob_hash'] = hashlib.md5()
return
else:
pass
self.mqtt_clients[device_node.name]['blob_hash'].update(msg.payload)
fout.write(msg.payload)
</DeepExtract>
else:
logger.debug(msg.topic + ':' + str(msg.payload.decode()))
logger.debug(msg.topic + ':' + msg.payload.decode())
message_str = msg.payload.decode()
node = self.entity.get_node_by_val(msg.topic, device_node)
pair_variable = node.get_pair_friend()
self.pub_data(device_node.name, pair_variable.name, message_str)
|
def mqtt_on_message(self, client, userdata, msg):
device_node = userdata
blobtopic = self.entity.get_property(device_node, 'Blob Topic')
if blobtopic is not None and blobtopic.value == msg.topic:
device_node = userdata
fout = self.mqtt_clients[device_node.name]['blob']
if len(msg.payload) == 255:
msg_in = msg.payload.decode()
msg_in = msg_in.split(':')
if msg_in[0] == 'END':
in_hash_final = self.mqtt_clients[device_node.name]['blob_hash'].hexdigest()
if in_hash_final == msg_in[1]:
logger.debug('File Copied OK - Valid Hash')
else:
logger.error('Bad File Received - Invalid Hash')
fout.close()
return
elif msg_in[0] == 'START':
file_name = msg_in[1]
file_path = os.path.split(os.path.realpath(self.entity.file_name))[0] + '/' + file_name
self.mqtt_clients[device_node.name]['blob'] = open(file_path, 'wb')
self.mqtt_clients[device_node.name]['blob_hash'] = hashlib.md5()
return
else:
pass
self.mqtt_clients[device_node.name]['blob_hash'].update(msg.payload)
fout.write(msg.payload)
else:
logger.debug(msg.topic + ':' + str(msg.payload.decode()))
logger.debug(msg.topic + ':' + msg.payload.decode())
message_str = msg.payload.decode()
node = self.entity.get_node_by_val(msg.topic, device_node)
pair_variable = node.get_pair_friend()
self.pub_data(device_node.name, pair_variable.name, message_str)
|
Converter-for-OPCUA
|
positive
|
def noise_per_object_v2_(gt_boxes, points=None, valid_mask=None, rotation_perturb=np.pi / 4, center_noise_std=1.0, global_random_rot_range=np.pi / 4, num_try=100):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(grot_lowers[..., np.newaxis], grot_uppers[..., np.newaxis], size=[num_boxes, num_try])
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2)
if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 0.001:
<DeepExtract>
num_boxes = gt_boxes[:, [0, 1, 3, 4, 6]].shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(gt_boxes[:, [0, 1, 3, 4, 6]])
current_corners = np.zeros((4, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
rot_mat_T = np.zeros((2, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= gt_boxes[:, [0, 1, 3, 4, 6]][i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += gt_boxes[:, [0, 1, 3, 4, 6]][i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
selected_noise = success_mask
</DeepExtract>
else:
<DeepExtract>
num_boxes = gt_boxes[:, [0, 1, 3, 4, 6]].shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(gt_boxes[:, [0, 1, 3, 4, 6]])
current_corners = np.zeros((4, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
current_box = np.zeros((1, 5), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
rot_mat_T = np.zeros((2, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
dst_pos = np.zeros((2,), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = gt_boxes[:, [0, 1, 3, 4, 6]][i]
current_radius = np.sqrt(gt_boxes[:, [0, 1, 3, 4, 6]][i, 0] ** 2 + gt_boxes[:, [0, 1, 3, 4, 6]][i, 1] ** 2)
current_grot = np.arctan2(gt_boxes[:, [0, 1, 3, 4, 6]][i, 0], gt_boxes[:, [0, 1, 3, 4, 6]][i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += dst_grot - current_grot
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2]
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += dst_pos - gt_boxes[:, [0, 1, 3, 4, 6]][i, :2]
rot_noises[i, j] += dst_grot - current_grot
break
selected_noise = success_mask
</DeepExtract>
<DeepExtract>
result = np.zeros((loc_noises.shape[0], *loc_noises.shape[2:]), dtype=loc_noises.dtype)
for i in range(loc_noises.shape[0]):
if selected_noise[i] != -1:
result[i] = loc_noises[i, selected_noise[i]]
loc_transforms = result
</DeepExtract>
<DeepExtract>
result = np.zeros((rot_noises.shape[0], *rot_noises.shape[2:]), dtype=rot_noises.dtype)
for i in range(rot_noises.shape[0]):
if selected_noise[i] != -1:
result[i] = rot_noises[i, selected_noise[i]]
rot_transforms = result
</DeepExtract>
if points is not None:
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
<DeepExtract>
num_box = gt_boxes[:, :3].shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transforms[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= gt_boxes[:, :3][j, :3]
points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j]
points[i, :3] += gt_boxes[:, :3][j, :3]
points[i, :3] += loc_transforms[j]
break
</DeepExtract>
<DeepExtract>
num_box = gt_boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
gt_boxes[i, :3] += loc_transforms[i]
gt_boxes[i, 6] += rot_transforms[i]
</DeepExtract>
|
def noise_per_object_v2_(gt_boxes, points=None, valid_mask=None, rotation_perturb=np.pi / 4, center_noise_std=1.0, global_random_rot_range=np.pi / 4, num_try=100):
"""random rotate or remove each groundtrutn independently.
use kitti viewer to test this function points_transform_
Args:
gt_boxes: [N, 7], gt box in lidar.points_transform_
points: [M, 4], point cloud in lidar.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [-global_random_rot_range, global_random_rot_range]
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [center_noise_std, center_noise_std, center_noise_std]
if valid_mask is None:
valid_mask = np.ones((num_boxes,), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(scale=center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(grot_lowers[..., np.newaxis], grot_uppers[..., np.newaxis], size=[num_boxes, num_try])
origin = [0.5, 0.5, 0]
gt_box_corners = box_np_ops.center_to_corner_box3d(gt_boxes[:, :3], gt_boxes[:, 3:6], gt_boxes[:, 6], origin=origin, axis=2)
if np.abs(global_random_rot_range[0] - global_random_rot_range[1]) < 0.001:
num_boxes = gt_boxes[:, [0, 1, 3, 4, 6]].shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(gt_boxes[:, [0, 1, 3, 4, 6]])
current_corners = np.zeros((4, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
rot_mat_T = np.zeros((2, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= gt_boxes[:, [0, 1, 3, 4, 6]][i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += gt_boxes[:, [0, 1, 3, 4, 6]][i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
selected_noise = success_mask
else:
num_boxes = gt_boxes[:, [0, 1, 3, 4, 6]].shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(gt_boxes[:, [0, 1, 3, 4, 6]])
current_corners = np.zeros((4, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
current_box = np.zeros((1, 5), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
rot_mat_T = np.zeros((2, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
dst_pos = np.zeros((2,), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
success_mask = -np.ones((num_boxes,), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=gt_boxes[:, [0, 1, 3, 4, 6]].dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = gt_boxes[:, [0, 1, 3, 4, 6]][i]
current_radius = np.sqrt(gt_boxes[:, [0, 1, 3, 4, 6]][i, 0] ** 2 + gt_boxes[:, [0, 1, 3, 4, 6]][i, 1] ** 2)
current_grot = np.arctan2(gt_boxes[:, [0, 1, 3, 4, 6]][i, 0], gt_boxes[:, [0, 1, 3, 4, 6]][i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += dst_grot - current_grot
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2]
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j], rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += dst_pos - gt_boxes[:, [0, 1, 3, 4, 6]][i, :2]
rot_noises[i, j] += dst_grot - current_grot
break
selected_noise = success_mask
result = np.zeros((loc_noises.shape[0], *loc_noises.shape[2:]), dtype=loc_noises.dtype)
for i in range(loc_noises.shape[0]):
if selected_noise[i] != -1:
result[i] = loc_noises[i, selected_noise[i]]
loc_transforms = result
result = np.zeros((rot_noises.shape[0], *rot_noises.shape[2:]), dtype=rot_noises.dtype)
for i in range(rot_noises.shape[0]):
if selected_noise[i] != -1:
result[i] = rot_noises[i, selected_noise[i]]
rot_transforms = result
if points is not None:
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
point_masks = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)
num_box = gt_boxes[:, :3].shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transforms[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= gt_boxes[:, :3][j, :3]
points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j]
points[i, :3] += gt_boxes[:, :3][j, :3]
points[i, :3] += loc_transforms[j]
break
num_box = gt_boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
gt_boxes[i, :3] += loc_transforms[i]
gt_boxes[i, 6] += rot_transforms[i]
</DeepExtract>
|
3D-CVF
|
positive
|
def build(self):
"""
Save the rendered output to the output file.
"""
logger.debug('Building {0} --> {1}'.format(self.source_path, self.final_url))
<DeepExtract>
data = self.data()
context = self.context(data=data)
(page_context, data) = self.parse_context(data)
(context, data) = self.site.plugin_manager.preBuildPage(self.site, self, context, data)
data = Template(data).render(context)
</DeepExtract>
if not self.discarded:
try:
os.makedirs(os.path.dirname(self.full_build_path))
except OSError:
pass
with io.FileIO(self.full_build_path, 'w') as f:
f.write(data.encode('utf-8'))
self.site.plugin_manager.postBuildPage(self)
|
def build(self):
"""
Save the rendered output to the output file.
"""
logger.debug('Building {0} --> {1}'.format(self.source_path, self.final_url))
data = self.data()
context = self.context(data=data)
(page_context, data) = self.parse_context(data)
(context, data) = self.site.plugin_manager.preBuildPage(self.site, self, context, data)
data = Template(data).render(context)
if not self.discarded:
try:
os.makedirs(os.path.dirname(self.full_build_path))
except OSError:
pass
with io.FileIO(self.full_build_path, 'w') as f:
f.write(data.encode('utf-8'))
self.site.plugin_manager.postBuildPage(self)
|
Cactus
|
positive
|
def forward(self, x):
"""Evaluate
Parameters
----------
x : `torch.Tensor`
tensor of shape ``[..., beta, alpha]``
Returns
-------
`torch.Tensor`
tensor of shape ``(..., (l+1)^2)``
"""
size = x.shape[:-2]
(res_beta, res_alpha) = x.shape[-2:]
x = x.reshape(-1, res_beta, res_alpha)
(sa, sm) = self.sha.shape
if sm <= sa and sa % 2 == 1:
<DeepExtract>
(*size, res) = x.shape
x = x.reshape(-1, res)
x = torch.fft.rfft(x, dim=1)
x = torch.cat([x[:, 1:sm // 2 + 1].imag.flip(1).mul(-math.sqrt(2)), x[:, :1].real, x[:, 1:sm // 2 + 1].real.mul(math.sqrt(2))], dim=1)
x = x.reshape(*size, 2 * sm // 2 + 1)
</DeepExtract>
else:
x = torch.einsum('am,zba->zbm', self.sha, x)
x = torch.einsum('mbi,zbm->zi', self.shb, x)
return x.reshape(*size, x.shape[1])
|
def forward(self, x):
"""Evaluate
Parameters
----------
x : `torch.Tensor`
tensor of shape ``[..., beta, alpha]``
Returns
-------
`torch.Tensor`
tensor of shape ``(..., (l+1)^2)``
"""
size = x.shape[:-2]
(res_beta, res_alpha) = x.shape[-2:]
x = x.reshape(-1, res_beta, res_alpha)
(sa, sm) = self.sha.shape
if sm <= sa and sa % 2 == 1:
(*size, res) = x.shape
x = x.reshape(-1, res)
x = torch.fft.rfft(x, dim=1)
x = torch.cat([x[:, 1:sm // 2 + 1].imag.flip(1).mul(-math.sqrt(2)), x[:, :1].real, x[:, 1:sm // 2 + 1].real.mul(math.sqrt(2))], dim=1)
x = x.reshape(*size, 2 * sm // 2 + 1)
else:
x = torch.einsum('am,zba->zbm', self.sha, x)
x = torch.einsum('mbi,zbm->zi', self.shb, x)
return x.reshape(*size, x.shape[1])
|
e3nn
|
positive
|
def __enter__(self) -> 'environment':
if self.dt is not None:
<DeepExtract>
assert isinstance(self.dt, float), f'"dt" must a float, but we got {self.dt}'
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['dt'] = self.dt
</DeepExtract>
if self.mode is not None:
<DeepExtract>
if not isinstance(self.mode, modes.Mode):
raise TypeError(f'Must be instance of brainpy.math.Mode. But we got {type(self.mode)}: {self.mode}')
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['mode'] = self.mode
</DeepExtract>
if self.x64 is not None:
<DeepExtract>
assert isinstance(self.x64, bool)
if self.x64:
enable_x64()
else:
disable_x64()
</DeepExtract>
if self.float_ is not None:
<DeepExtract>
if self.float_ not in [jnp.float16, jnp.float32, jnp.float64]:
raise TypeError(f'Float data type {self.float_} is not supported.')
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['float_'] = self.float_
</DeepExtract>
if self.int_ is not None:
<DeepExtract>
if self.int_ not in [jnp.int8, jnp.int16, jnp.int32, jnp.int64, jnp.uint8, jnp.uint16, jnp.uint32, jnp.uint64]:
raise TypeError(f'Integer data type {self.int_} is not supported.')
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['int_'] = self.int_
</DeepExtract>
if self.complex_ is not None:
<DeepExtract>
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['complex_'] = self.complex_
</DeepExtract>
if self.bool_ is not None:
<DeepExtract>
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['bool_'] = self.bool_
</DeepExtract>
return self
|
def __enter__(self) -> 'environment':
if self.dt is not None:
assert isinstance(self.dt, float), f'"dt" must a float, but we got {self.dt}'
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['dt'] = self.dt
if self.mode is not None:
if not isinstance(self.mode, modes.Mode):
raise TypeError(f'Must be instance of brainpy.math.Mode. But we got {type(self.mode)}: {self.mode}')
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['mode'] = self.mode
if self.x64 is not None:
assert isinstance(self.x64, bool)
if self.x64:
enable_x64()
else:
disable_x64()
if self.float_ is not None:
if self.float_ not in [jnp.float16, jnp.float32, jnp.float64]:
raise TypeError(f'Float data type {self.float_} is not supported.')
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['float_'] = self.float_
if self.int_ is not None:
if self.int_ not in [jnp.int8, jnp.int16, jnp.int32, jnp.int64, jnp.uint8, jnp.uint16, jnp.uint32, jnp.uint64]:
raise TypeError(f'Integer data type {self.int_} is not supported.')
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['int_'] = self.int_
if self.complex_ is not None:
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['complex_'] = self.complex_
if self.bool_ is not None:
global bm
if bm is None:
from brainpy import math as bm
bm.__dict__['bool_'] = self.bool_
return self
|
BrainPy
|
positive
|
def __init__(self, generator=2, group=17, keyLength=540):
"""
Generate the public and private keys.
"""
min_keyLength = 180
default_generator = 2
valid_generators = [2, 3, 5, 7]
if generator not in valid_generators:
print('Error: Invalid generator. Using default.')
self.generator = default_generator
else:
self.generator = generator
if keyLength < min_keyLength:
print('Error: keyLength is too small. Setting to minimum.')
self.keyLength = min_keyLength
else:
self.keyLength = keyLength
<DeepExtract>
default_group = 17
primes = {5: 2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919, 14: 32317006071311007300338913926423828248817941241140239112842009751400741706634354222619689417363569347117901737909704191754605873209195028853758986185622153212175412514901774520270235796078236248884246189477587641105928646099411723245426622522193230540919037680524235519125679715870117001058055877651038861847280257976054903569732561526167081339361799541336476559160368317896729073178384589680639671900977202194168647225871031411336429319536193471636533209717077448227988588565369208645296636077250268955505928362751121174096972998068410554359584866583291642136218231078990999448652468262416972035911852507045361090559, 15: 5809605995369958062791915965639201402176612226902900533702900882779736177890990861472094774477339581147373410185646378328043729800750470098210924487866935059164371588168047540943981644516632755067501626434556398193186628990071248660819361205119793693985433297036118232914410171876807536457391277857011849897410207519105333355801121109356897459426271845471397952675959440793493071628394122780510124618488232602464649876850458861245784240929258426287699705312584509625419513463605155428017165714465363094021609290561084025893662561222573202082865797821865270991145082200656978177192827024538990239969175546190770645685893438011714430426409338676314743571154537142031573004276428701433036381801705308659830751190352946025482059931306571004727362479688415574702596946457770284148435989129632853918392117997472632693078113129886487399347796982772784615865232621289656944284216824611318709764535152507354116344703769998514148343807, 16: 1044388881413152506679602719846529545831269060992135009022588756444338172022322690710444046669809783930111585737890362691860127079270495454517218673016928427459146001866885779762982229321192368303346235204368051010309155674155697460347176946394076535157284994895284821633700921811716738972451834979455897010306333468590751358365138782250372269117968985194322444535687415522007151638638141456178420621277822674995027990278673458629544391736919766299005511505446177668154446234882665961680796576903199116089347634947187778906528008004756692571666922964122566174582776707332452371001272163776841229318324903125740713574141005124561965913888899753461735347970011693256316751660678950830027510255804846105583465055446615090444309583050775808509297040039680057435342253926566240898195863631588888936364129920059308455669454034010391478238784189888594672336242763795138176353222845524644040094258962433613354036104643881925238489224010194193088911666165584229424668165441688927790460608264864204237717002054744337988941974661214699689706521543006262604535890998125752275942608772174376107314217749233048217904944409836238235772306749874396760463376480215133461333478395682746608242585133953883882226786118030184028136755970045385534758453247, 17: 33751521821438561184518523159967412330064897805741846548173890474429429901326672445203235101919165483964194359460994881062089387893762814044257438204432573941083014827006090258925875161018096327732335800595831915976014208822304007327848132734933297885803213675261564962603340457220776826322500058091310967253976619973988033663666385188155212656268079501726223369693427999804134467810120772356498596945532366527400517575471969335854905274504119509592366013711954148258884879224599915203456315881034776553083676995718335598586395591169999570824515035017543533352697525287753332500527176569576894926734950469293596134095086603716860086302051544539652689091299099784588919052383463057789440565460681441902442399956419060521629604697347879024654313800186078316526964529288062740879011035175920059192178561473199006205896719435014765345518490882366607110905303449152556221163232127426440691921134648766635695850239231304591744215610985029636895406718880766308249227315984267542266259489684372223916445411015900506239419267909716320331208988978180868987431623710347617992356201449023892203230133009421463914291201346063125219636964261683591541014344239275340735690997732222069758773963390876360546515755280517042160525487302898122311669799679447530453600399342697032714458549591285939453949034981248114322322367238645042515984447890788917823576330019151696568654314153058547592091366014550143819685170068343700104677609041166369760080933413605498962382077778845599834907475953430787446201384567328530675275792962354883770806900827183685718353469574731680520621944540947734619035177180057973022652571032196598229259194875709994709721793154158686515748507274224181316948797104601068212015232921691482496346854413698719750190601102705274481050543239815130686073601076304512284549218459846046082253596762433827419060089029417044871218316020923109988915707117567, 18: 1090748135619415929450294929359784500348155124953172211774101106966150168922785639028532473848836817769712164169076432969224698752674677662739994265785437233596157045970922338040698100507861033047312331823982435279475700199860971612732540528796554502867919746776983759391475987142521315878719577519148811830879919426939958487087540965716419167467499326156226529675209172277001377591248147563782880558861083327174154014975134893125116015776318890295960698011614157721282527539468816519319333337503114777192360412281721018955834377615480468479252748867320362385355596601795122806756217713579819870634321561907813255153703950795271232652404894983869492174481652303803498881366210508647263668376514131031102336837488999775744046733651827239395353540348414872854639719294694323450186884189822544540647226987292160693184734654941906936646576130260972193280317171696418971553954161446191759093719524951116705577362073481319296041201283516154269044389257727700289684119460283480452306204130024913879981135908026983868205969318167819680850998649694416907952712904962404937775789698917207356355227455066183815847669135530549755439819480321732925869069136146085326382334628745456398071603058051634209386708703306545903199608523824513729625136659128221100967735450519952404248198262813831097374261650380017277916975324134846574681307337017380830353680623216336949471306191686438249305686413380231046096450953594089375540285037292470929395114028305547452584962074309438151825437902976012891749355198678420603722034900311364893046495761404333938686140037848030916292543273684533640032637639100774502371542479302473698388692892420946478947733800387782741417786484770190108867879778991633218628640533982619322466154883011452291890252336487236086654396093853898628805813177559162076363154436494477507871294119841637867701722166609831201845484078070518041336869808398454625586921201308185638888082699408686536045192649569198110353659943111802300636106509865023943661829436426563007917282050894429388841748885398290707743052973605359277515749619730823773215894755121761467887865327707115573804264519206349215850195195364813387526811742474131549802130246506341207020335797706780705406945275438806265978516209706795702579244075380490231741030862614968783306207869687868108423639971983209077624758080499988275591392787267627182442892809646874228263172435642368588260139161962836121481966092745325488641054238839295138992979335446110090325230955276870524611359124918392740353154294858383359}
if group in primes.keys():
self.prime = primes[group]
else:
print('Error: No prime with group %i. Using default.' % group)
self.prime = primes[default_group]
</DeepExtract>
<DeepExtract>
self.privateKey = self.genRandom(keyLength)
</DeepExtract>
<DeepExtract>
self.publicKey = pow(self.generator, self.privateKey, self.prime)
</DeepExtract>
|
def __init__(self, generator=2, group=17, keyLength=540):
"""
Generate the public and private keys.
"""
min_keyLength = 180
default_generator = 2
valid_generators = [2, 3, 5, 7]
if generator not in valid_generators:
print('Error: Invalid generator. Using default.')
self.generator = default_generator
else:
self.generator = generator
if keyLength < min_keyLength:
print('Error: keyLength is too small. Setting to minimum.')
self.keyLength = min_keyLength
else:
self.keyLength = keyLength
default_group = 17
primes = {5: 2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919, 14: 32317006071311007300338913926423828248817941241140239112842009751400741706634354222619689417363569347117901737909704191754605873209195028853758986185622153212175412514901774520270235796078236248884246189477587641105928646099411723245426622522193230540919037680524235519125679715870117001058055877651038861847280257976054903569732561526167081339361799541336476559160368317896729073178384589680639671900977202194168647225871031411336429319536193471636533209717077448227988588565369208645296636077250268955505928362751121174096972998068410554359584866583291642136218231078990999448652468262416972035911852507045361090559, 15: 5809605995369958062791915965639201402176612226902900533702900882779736177890990861472094774477339581147373410185646378328043729800750470098210924487866935059164371588168047540943981644516632755067501626434556398193186628990071248660819361205119793693985433297036118232914410171876807536457391277857011849897410207519105333355801121109356897459426271845471397952675959440793493071628394122780510124618488232602464649876850458861245784240929258426287699705312584509625419513463605155428017165714465363094021609290561084025893662561222573202082865797821865270991145082200656978177192827024538990239969175546190770645685893438011714430426409338676314743571154537142031573004276428701433036381801705308659830751190352946025482059931306571004727362479688415574702596946457770284148435989129632853918392117997472632693078113129886487399347796982772784615865232621289656944284216824611318709764535152507354116344703769998514148343807, 16: 1044388881413152506679602719846529545831269060992135009022588756444338172022322690710444046669809783930111585737890362691860127079270495454517218673016928427459146001866885779762982229321192368303346235204368051010309155674155697460347176946394076535157284994895284821633700921811716738972451834979455897010306333468590751358365138782250372269117968985194322444535687415522007151638638141456178420621277822674995027990278673458629544391736919766299005511505446177668154446234882665961680796576903199116089347634947187778906528008004756692571666922964122566174582776707332452371001272163776841229318324903125740713574141005124561965913888899753461735347970011693256316751660678950830027510255804846105583465055446615090444309583050775808509297040039680057435342253926566240898195863631588888936364129920059308455669454034010391478238784189888594672336242763795138176353222845524644040094258962433613354036104643881925238489224010194193088911666165584229424668165441688927790460608264864204237717002054744337988941974661214699689706521543006262604535890998125752275942608772174376107314217749233048217904944409836238235772306749874396760463376480215133461333478395682746608242585133953883882226786118030184028136755970045385534758453247, 17: 33751521821438561184518523159967412330064897805741846548173890474429429901326672445203235101919165483964194359460994881062089387893762814044257438204432573941083014827006090258925875161018096327732335800595831915976014208822304007327848132734933297885803213675261564962603340457220776826322500058091310967253976619973988033663666385188155212656268079501726223369693427999804134467810120772356498596945532366527400517575471969335854905274504119509592366013711954148258884879224599915203456315881034776553083676995718335598586395591169999570824515035017543533352697525287753332500527176569576894926734950469293596134095086603716860086302051544539652689091299099784588919052383463057789440565460681441902442399956419060521629604697347879024654313800186078316526964529288062740879011035175920059192178561473199006205896719435014765345518490882366607110905303449152556221163232127426440691921134648766635695850239231304591744215610985029636895406718880766308249227315984267542266259489684372223916445411015900506239419267909716320331208988978180868987431623710347617992356201449023892203230133009421463914291201346063125219636964261683591541014344239275340735690997732222069758773963390876360546515755280517042160525487302898122311669799679447530453600399342697032714458549591285939453949034981248114322322367238645042515984447890788917823576330019151696568654314153058547592091366014550143819685170068343700104677609041166369760080933413605498962382077778845599834907475953430787446201384567328530675275792962354883770806900827183685718353469574731680520621944540947734619035177180057973022652571032196598229259194875709994709721793154158686515748507274224181316948797104601068212015232921691482496346854413698719750190601102705274481050543239815130686073601076304512284549218459846046082253596762433827419060089029417044871218316020923109988915707117567, 18: 1090748135619415929450294929359784500348155124953172211774101106966150168922785639028532473848836817769712164169076432969224698752674677662739994265785437233596157045970922338040698100507861033047312331823982435279475700199860971612732540528796554502867919746776983759391475987142521315878719577519148811830879919426939958487087540965716419167467499326156226529675209172277001377591248147563782880558861083327174154014975134893125116015776318890295960698011614157721282527539468816519319333337503114777192360412281721018955834377615480468479252748867320362385355596601795122806756217713579819870634321561907813255153703950795271232652404894983869492174481652303803498881366210508647263668376514131031102336837488999775744046733651827239395353540348414872854639719294694323450186884189822544540647226987292160693184734654941906936646576130260972193280317171696418971553954161446191759093719524951116705577362073481319296041201283516154269044389257727700289684119460283480452306204130024913879981135908026983868205969318167819680850998649694416907952712904962404937775789698917207356355227455066183815847669135530549755439819480321732925869069136146085326382334628745456398071603058051634209386708703306545903199608523824513729625136659128221100967735450519952404248198262813831097374261650380017277916975324134846574681307337017380830353680623216336949471306191686438249305686413380231046096450953594089375540285037292470929395114028305547452584962074309438151825437902976012891749355198678420603722034900311364893046495761404333938686140037848030916292543273684533640032637639100774502371542479302473698388692892420946478947733800387782741417786484770190108867879778991633218628640533982619322466154883011452291890252336487236086654396093853898628805813177559162076363154436494477507871294119841637867701722166609831201845484078070518041336869808398454625586921201308185638888082699408686536045192649569198110353659943111802300636106509865023943661829436426563007917282050894429388841748885398290707743052973605359277515749619730823773215894755121761467887865327707115573804264519206349215850195195364813387526811742474131549802130246506341207020335797706780705406945275438806265978516209706795702579244075380490231741030862614968783306207869687868108423639971983209077624758080499988275591392787267627182442892809646874228263172435642368588260139161962836121481966092745325488641054238839295138992979335446110090325230955276870524611359124918392740353154294858383359}
if group in primes.keys():
self.prime = primes[group]
else:
print('Error: No prime with group %i. Using default.' % group)
self.prime = primes[default_group]
self.privateKey = self.genRandom(keyLength)
self.publicKey = pow(self.generator, self.privateKey, self.prime)
</DeepExtract>
|
EmPyre
|
positive
|
def day_change_wheel(action):
<DeepExtract>
if action == 'IN':
val = 5.0 if not Map.altPress else 1.0
else:
val = -5.0 if not Map.altPress else -1.0
wf = 3 * val if Map.ctrlPress else 1.0 * val
wf = wf
</DeepExtract>
<DeepExtract>
if Sun.SP.Day_of_year + wf > 366:
Sun.SP.Day_of_year = 1
Sun.SP.Year += 1
elif Sun.SP.Day_of_year + wf < 1:
Sun.SP.Day_of_year = 366
Sun.SP.Year -= 1
else:
Sun.SP.Day_of_year += wf
dt = datetime.date(Sun.SP.Year, 1, 1) + datetime.timedelta(Sun.SP.Day_of_year - 1)
Sun.SP.Day = dt.day
Sun.SP.Month = dt.month
Display.refresh()
</DeepExtract>
|
def day_change_wheel(action):
if action == 'IN':
val = 5.0 if not Map.altPress else 1.0
else:
val = -5.0 if not Map.altPress else -1.0
wf = 3 * val if Map.ctrlPress else 1.0 * val
wf = wf
if Sun.SP.Day_of_year + wf > 366:
Sun.SP.Day_of_year = 1
Sun.SP.Year += 1
elif Sun.SP.Day_of_year + wf < 1:
Sun.SP.Day_of_year = 366
Sun.SP.Year -= 1
else:
Sun.SP.Day_of_year += wf
dt = datetime.date(Sun.SP.Year, 1, 1) + datetime.timedelta(Sun.SP.Day_of_year - 1)
Sun.SP.Day = dt.day
Sun.SP.Month = dt.month
Display.refresh()
</DeepExtract>
|
blender-architecture-scripts
|
positive
|
def send_command(self, cmd: str, read_response=True) -> str:
"""
Send the provided command (`cmd`) to the attached U-Boot console.
If `read_response` is `True`, the response is returned. Otherwise,
`None` is returned and no attempt to read the response data is made.
If one does not plan to use the response, keep `read_response`
set to `True` and simply ignore the return value; this will ensure
response data is removed from underlying buffers.
"""
self._ser.flush()
if not cmd.endswith('\n'):
cmd += '\n'
<DeepExtract>
self.write_raw(cmd.encode(self._encoding), update_monitor=update_monitor)
</DeepExtract>
self._ser.flush()
if read_response:
<DeepExtract>
raw_data = self.read_raw(readlen, update_monitor=update_monitor)
ret_str = raw_data.decode(self._encoding)
resp = ret_str.replace('\r\n', '\n')
</DeepExtract>
<DeepExtract>
cmd = cmd.rstrip()
if resp[:len(cmd)] == cmd:
resp = resp[len(cmd):].lstrip()
resp = resp
</DeepExtract>
if resp.endswith(self.prompt):
resp = resp[:-len(self.prompt)]
return resp
return None
|
def send_command(self, cmd: str, read_response=True) -> str:
"""
Send the provided command (`cmd`) to the attached U-Boot console.
If `read_response` is `True`, the response is returned. Otherwise,
`None` is returned and no attempt to read the response data is made.
If one does not plan to use the response, keep `read_response`
set to `True` and simply ignore the return value; this will ensure
response data is removed from underlying buffers.
"""
self._ser.flush()
if not cmd.endswith('\n'):
cmd += '\n'
self.write_raw(cmd.encode(self._encoding), update_monitor=update_monitor)
self._ser.flush()
if read_response:
raw_data = self.read_raw(readlen, update_monitor=update_monitor)
ret_str = raw_data.decode(self._encoding)
resp = ret_str.replace('\r\n', '\n')
cmd = cmd.rstrip()
if resp[:len(cmd)] == cmd:
resp = resp[len(cmd):].lstrip()
resp = resp
if resp.endswith(self.prompt):
resp = resp[:-len(self.prompt)]
return resp
return None
|
depthcharge
|
positive
|
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, 'Input must have 5 dims, got: {}'.format(img.dim())
img = img.reshape(img.size(0) * 2, img.size(2), img.size(3), img.size(4))
<DeepExtract>
x = self.backbone(img)
x = x
</DeepExtract>
z = self.neck(x)[0]
z = z / (torch.norm(z, p=2, dim=1, keepdim=True) + 1e-10)
z = torch.cat(GatherLayer.apply(z), dim=0)
assert z.size(0) % 2 == 0
N = z.size(0) // 2
s = torch.matmul(z, z.permute(1, 0))
<DeepExtract>
mask = 1 - torch.eye(N * 2, dtype=torch.uint8).cuda()
pos_ind = (torch.arange(N * 2).cuda(), 2 * torch.arange(N, dtype=torch.long).unsqueeze(1).repeat(1, 2).view(-1, 1).squeeze().cuda())
neg_mask = torch.ones((N * 2, N * 2 - 1), dtype=torch.uint8).cuda()
neg_mask[pos_ind] = 0
(mask, pos_ind, neg_mask) = (mask, pos_ind, neg_mask)
</DeepExtract>
s = torch.masked_select(s, mask == 1).reshape(s.size(0), -1)
positive = s[pos_ind].unsqueeze(1)
negative = torch.masked_select(s, neg_mask == 1).reshape(s.size(0), -1)
losses = self.head(positive, negative)
return losses
|
def forward_train(self, img, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert img.dim() == 5, 'Input must have 5 dims, got: {}'.format(img.dim())
img = img.reshape(img.size(0) * 2, img.size(2), img.size(3), img.size(4))
x = self.backbone(img)
x = x
z = self.neck(x)[0]
z = z / (torch.norm(z, p=2, dim=1, keepdim=True) + 1e-10)
z = torch.cat(GatherLayer.apply(z), dim=0)
assert z.size(0) % 2 == 0
N = z.size(0) // 2
s = torch.matmul(z, z.permute(1, 0))
mask = 1 - torch.eye(N * 2, dtype=torch.uint8).cuda()
pos_ind = (torch.arange(N * 2).cuda(), 2 * torch.arange(N, dtype=torch.long).unsqueeze(1).repeat(1, 2).view(-1, 1).squeeze().cuda())
neg_mask = torch.ones((N * 2, N * 2 - 1), dtype=torch.uint8).cuda()
neg_mask[pos_ind] = 0
(mask, pos_ind, neg_mask) = (mask, pos_ind, neg_mask)
s = torch.masked_select(s, mask == 1).reshape(s.size(0), -1)
positive = s[pos_ind].unsqueeze(1)
negative = torch.masked_select(s, neg_mask == 1).reshape(s.size(0), -1)
losses = self.head(positive, negative)
return losses
|
DenseCL
|
positive
|
def forward(self, x):
(b, c, h, w) = x.shape
x = x.reshape(b * self.groups, -1, h, w)
(x_0, x_1) = x.chunk(2, dim=1)
xn = self.avg_pool(x_0)
xn = self.cweight * xn + self.cbias
xn = x_0 * self.sigmoid(xn)
xs = self.gn(x_1)
xs = self.sweight * xs + self.sbias
xs = x_1 * self.sigmoid(xs)
out = torch.cat([xn, xs], dim=1)
out = out.reshape(b, -1, h, w)
<DeepExtract>
(b, c, h, w) = out.shape
out = out.reshape(b, 2, -1, h, w)
out = out.permute(0, 2, 1, 3, 4)
out = out.reshape(b, -1, h, w)
out = out
</DeepExtract>
return out
|
def forward(self, x):
(b, c, h, w) = x.shape
x = x.reshape(b * self.groups, -1, h, w)
(x_0, x_1) = x.chunk(2, dim=1)
xn = self.avg_pool(x_0)
xn = self.cweight * xn + self.cbias
xn = x_0 * self.sigmoid(xn)
xs = self.gn(x_1)
xs = self.sweight * xs + self.sbias
xs = x_1 * self.sigmoid(xs)
out = torch.cat([xn, xs], dim=1)
out = out.reshape(b, -1, h, w)
(b, c, h, w) = out.shape
out = out.reshape(b, 2, -1, h, w)
out = out.permute(0, 2, 1, 3, 4)
out = out.reshape(b, -1, h, w)
out = out
return out
|
awesome-attention-mechanism-in-cv
|
positive
|
def config_training(self):
<DeepExtract>
self.pix_crit = define_criterion(self.opt['train'].get('pixel_crit'))
self.warp_crit = define_criterion(self.opt['train'].get('warping_crit'))
</DeepExtract>
self.optim_G = optim.Adam(self.net_G.parameters(), lr=self.opt['train']['generator']['lr'], weight_decay=self.opt['train']['generator'].get('weight_decay', 0), betas=(self.opt['train']['generator'].get('beta1', 0.9), self.opt['train']['generator'].get('beta2', 0.999)))
self.sched_G = define_lr_schedule(self.opt['train']['generator'].get('lr_schedule'), self.optim_G)
|
def config_training(self):
self.pix_crit = define_criterion(self.opt['train'].get('pixel_crit'))
self.warp_crit = define_criterion(self.opt['train'].get('warping_crit'))
self.optim_G = optim.Adam(self.net_G.parameters(), lr=self.opt['train']['generator']['lr'], weight_decay=self.opt['train']['generator'].get('weight_decay', 0), betas=(self.opt['train']['generator'].get('beta1', 0.9), self.opt['train']['generator'].get('beta2', 0.999)))
self.sched_G = define_lr_schedule(self.opt['train']['generator'].get('lr_schedule'), self.optim_G)
|
EGVSR
|
positive
|
def manage_state(module, lambda_client):
changed = False
current_state = 'absent'
state = module.params['state']
action_taken = 'none'
<DeepExtract>
sid = module.params['statement_id']
api_params = set_api_params(module, ('function_name',))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
policy_results = None
try:
policy_results = lambda_client.get_policy(**api_params)
except is_boto3_error_code('ResourceNotFoundException'):
current_policy_statement = {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='retrieving function policy')
policy = json.loads(policy_results.get('Policy', '{}'))
current_policy_statement = extract_statement(policy, sid)
</DeepExtract>
if current_policy_statement:
current_state = 'present'
if state == 'present':
if current_state == 'present' and (not policy_equal(module, current_policy_statement)):
<DeepExtract>
changed = False
api_params = set_api_params(module, ('function_name', 'statement_id'))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
lambda_client.remove_permission(**api_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='removing permission from policy')
return changed
</DeepExtract>
<DeepExtract>
changed = False
params = ('function_name', 'statement_id', 'action', 'principal', 'source_arn', 'source_account', 'event_source_token')
api_params = set_api_params(module, params)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
if not module.check_mode:
try:
lambda_client.add_permission(**api_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='adding permission to policy')
changed = True
changed = changed
</DeepExtract>
action_taken = 'updated'
if not current_state == 'present':
<DeepExtract>
changed = False
params = ('function_name', 'statement_id', 'action', 'principal', 'source_arn', 'source_account', 'event_source_token')
api_params = set_api_params(module, params)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
if not module.check_mode:
try:
lambda_client.add_permission(**api_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='adding permission to policy')
changed = True
changed = changed
</DeepExtract>
action_taken = 'added'
elif current_state == 'present':
<DeepExtract>
changed = False
api_params = set_api_params(module, ('function_name', 'statement_id'))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
lambda_client.remove_permission(**api_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='removing permission from policy')
changed = changed
</DeepExtract>
action_taken = 'deleted'
return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
|
def manage_state(module, lambda_client):
changed = False
current_state = 'absent'
state = module.params['state']
action_taken = 'none'
sid = module.params['statement_id']
api_params = set_api_params(module, ('function_name',))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
policy_results = None
try:
policy_results = lambda_client.get_policy(**api_params)
except is_boto3_error_code('ResourceNotFoundException'):
current_policy_statement = {}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='retrieving function policy')
policy = json.loads(policy_results.get('Policy', '{}'))
current_policy_statement = extract_statement(policy, sid)
if current_policy_statement:
current_state = 'present'
if state == 'present':
if current_state == 'present' and (not policy_equal(module, current_policy_statement)):
changed = False
api_params = set_api_params(module, ('function_name', 'statement_id'))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
lambda_client.remove_permission(**api_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='removing permission from policy')
return changed
changed = False
params = ('function_name', 'statement_id', 'action', 'principal', 'source_arn', 'source_account', 'event_source_token')
api_params = set_api_params(module, params)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
if not module.check_mode:
try:
lambda_client.add_permission(**api_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='adding permission to policy')
changed = True
changed = changed
action_taken = 'updated'
if not current_state == 'present':
changed = False
params = ('function_name', 'statement_id', 'action', 'principal', 'source_arn', 'source_account', 'event_source_token')
api_params = set_api_params(module, params)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
if not module.check_mode:
try:
lambda_client.add_permission(**api_params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='adding permission to policy')
changed = True
changed = changed
action_taken = 'added'
elif current_state == 'present':
changed = False
api_params = set_api_params(module, ('function_name', 'statement_id'))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
lambda_client.remove_permission(**api_params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='removing permission from policy')
changed = changed
action_taken = 'deleted'
return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
|
amazon.aws
|
positive
|
def EncodeFile(encoder, inp_fname, out_fname, buffer_size=10000, verbose=False, over_write=False, inp_encoding='utf-8'):
if not os.path.isfile(out_fname):
if verbose:
print(' - Encoder: {} to {}'.format(os.path.basename(inp_fname) if len(inp_fname) > 0 else 'stdin', os.path.basename(out_fname)))
fin = open(inp_fname, 'r', encoding=inp_encoding, errors='surrogateescape') if len(inp_fname) > 0 else sys.stdin
fout = open(out_fname, mode='wb')
<DeepExtract>
n = 0
t = time.time()
for sentences in buffered_read(fin, buffer_size):
encoder.encode_sentences(sentences).tofile(fout)
n += len(sentences)
if verbose and n % 10000 == 0:
print('\r - Encoder: {:d} sentences'.format(n), end='')
if verbose:
print('\r - Encoder: {:d} sentences'.format(n), end='')
EncodeTime(t)
</DeepExtract>
fin.close()
fout.close()
elif not over_write and verbose:
print(' - Encoder: {} exists already'.format(os.path.basename(out_fname)))
|
def EncodeFile(encoder, inp_fname, out_fname, buffer_size=10000, verbose=False, over_write=False, inp_encoding='utf-8'):
if not os.path.isfile(out_fname):
if verbose:
print(' - Encoder: {} to {}'.format(os.path.basename(inp_fname) if len(inp_fname) > 0 else 'stdin', os.path.basename(out_fname)))
fin = open(inp_fname, 'r', encoding=inp_encoding, errors='surrogateescape') if len(inp_fname) > 0 else sys.stdin
fout = open(out_fname, mode='wb')
n = 0
t = time.time()
for sentences in buffered_read(fin, buffer_size):
encoder.encode_sentences(sentences).tofile(fout)
n += len(sentences)
if verbose and n % 10000 == 0:
print('\r - Encoder: {:d} sentences'.format(n), end='')
if verbose:
print('\r - Encoder: {:d} sentences'.format(n), end='')
EncodeTime(t)
fin.close()
fout.close()
elif not over_write and verbose:
print(' - Encoder: {} exists already'.format(os.path.basename(out_fname)))
|
banglanmt
|
positive
|
def clicked(self, button):
if button == QtGui.QDialogButtonBox.Apply:
<DeepExtract>
self.mesh_obj.CharacteristicLengthMax = self.clmax
self.mesh_obj.CharacteristicLengthMin = self.clmin
self.mesh_obj.ElementDimension = self.dimension
</DeepExtract>
<DeepExtract>
QApplication.setOverrideCursor(Qt.WaitCursor)
part = self.mesh_obj.Part
if self.mesh_obj.MeshRegionList:
if part.Shape.ShapeType == 'Compound' and hasattr(part, 'Proxy'):
if part.Proxy.Type == 'FeatureBooleanFragments' or part.Proxy.Type == 'FeatureSlice' or part.Proxy.Type == 'FeatureXOR':
error_message = 'The mesh to shape is a boolean split tools Compound and the mesh has mesh region list. GMSH could return unexpected meshes in such circumstances. It is strongly recommended to extract the shape to mesh from the Compound and use this one.'
QtGui.QMessageBox.critical(None, 'Shape to mesh is a BooleanFragmentsCompound and mesh regions are defined', error_message)
self.Start = time.time()
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
self.console_message_gmsh = ''
self.gmsh_runs = True
self.console_log('We are going to start ...')
self.get_active_analysis()
self.console_log('Start GMSH ...')
error = CfdTools.runGmsh(self.mesh_obj, self.analysis)
if error:
print(error)
self.console_log('GMSH had warnings ...')
self.console_log(error, '#FF0000')
else:
self.console_log('Clean run of GMSH')
self.console_log('GMSH done!')
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
self.Timer.stop()
self.update()
QApplication.restoreOverrideCursor()
</DeepExtract>
|
def clicked(self, button):
if button == QtGui.QDialogButtonBox.Apply:
self.mesh_obj.CharacteristicLengthMax = self.clmax
self.mesh_obj.CharacteristicLengthMin = self.clmin
self.mesh_obj.ElementDimension = self.dimension
QApplication.setOverrideCursor(Qt.WaitCursor)
part = self.mesh_obj.Part
if self.mesh_obj.MeshRegionList:
if part.Shape.ShapeType == 'Compound' and hasattr(part, 'Proxy'):
if part.Proxy.Type == 'FeatureBooleanFragments' or part.Proxy.Type == 'FeatureSlice' or part.Proxy.Type == 'FeatureXOR':
error_message = 'The mesh to shape is a boolean split tools Compound and the mesh has mesh region list. GMSH could return unexpected meshes in such circumstances. It is strongly recommended to extract the shape to mesh from the Compound and use this one.'
QtGui.QMessageBox.critical(None, 'Shape to mesh is a BooleanFragmentsCompound and mesh regions are defined', error_message)
self.Start = time.time()
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
self.console_message_gmsh = ''
self.gmsh_runs = True
self.console_log('We are going to start ...')
self.get_active_analysis()
self.console_log('Start GMSH ...')
error = CfdTools.runGmsh(self.mesh_obj, self.analysis)
if error:
print(error)
self.console_log('GMSH had warnings ...')
self.console_log(error, '#FF0000')
else:
self.console_log('Clean run of GMSH')
self.console_log('GMSH done!')
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
self.Timer.stop()
self.update()
QApplication.restoreOverrideCursor()
</DeepExtract>
|
Cfd
|
positive
|
def __init__(self) -> None:
config: dict = self._init_config()
self.scheduler: BaseScheduler = self._init_scheduler()
self.helper = OpenCTIConnectorHelper(config)
self.in_queue = Queue()
self.out_queues: dict[str, Queue] = {}
update_existing_data = bool(get_config_variable('CONNECTOR_UPDATE_EXISTING_DATA', ['connector', 'update_existing_data'], config))
api_username = get_config_variable('INTEL471_API_USERNAME', ['intel471', 'api_username'], config)
api_key = get_config_variable('INTEL471_API_KEY', ['intel471', 'api_key'], config)
for stream_class in (Intel471IndicatorsStream, Intel471CVEsStream, Intel471YARAStream, Intel471IOCsStream):
if (interval := get_config_variable(f'INTEL471_INTERVAL_{stream_class.label}'.upper(), ['intel471', f'interval_{stream_class.label}'], config, isNumber=True, default=0)):
self.out_queues[stream_class.label] = Queue()
initial_history = get_config_variable(f'INTEL471_INITIAL_HISTORY_{stream_class.label}'.upper(), ['intel471', f'initial_history_{stream_class.label}'], config, isNumber=True, default=0)
<DeepExtract>
self.scheduler.add_job(stream_class(self.helper, api_username, api_key, self.out_queues[stream_class.label], self.in_queue, initial_history, update_existing_data).run, name=stream_class(self.helper, api_username, api_key, self.out_queues[stream_class.label], self.in_queue, initial_history, update_existing_data).__class__.__name__, trigger='interval', minutes=interval)
</DeepExtract>
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGINT, self._signal_handler)
|
def __init__(self) -> None:
config: dict = self._init_config()
self.scheduler: BaseScheduler = self._init_scheduler()
self.helper = OpenCTIConnectorHelper(config)
self.in_queue = Queue()
self.out_queues: dict[str, Queue] = {}
update_existing_data = bool(get_config_variable('CONNECTOR_UPDATE_EXISTING_DATA', ['connector', 'update_existing_data'], config))
api_username = get_config_variable('INTEL471_API_USERNAME', ['intel471', 'api_username'], config)
api_key = get_config_variable('INTEL471_API_KEY', ['intel471', 'api_key'], config)
for stream_class in (Intel471IndicatorsStream, Intel471CVEsStream, Intel471YARAStream, Intel471IOCsStream):
if (interval := get_config_variable(f'INTEL471_INTERVAL_{stream_class.label}'.upper(), ['intel471', f'interval_{stream_class.label}'], config, isNumber=True, default=0)):
self.out_queues[stream_class.label] = Queue()
initial_history = get_config_variable(f'INTEL471_INITIAL_HISTORY_{stream_class.label}'.upper(), ['intel471', f'initial_history_{stream_class.label}'], config, isNumber=True, default=0)
self.scheduler.add_job(stream_class(self.helper, api_username, api_key, self.out_queues[stream_class.label], self.in_queue, initial_history, update_existing_data).run, name=stream_class(self.helper, api_username, api_key, self.out_queues[stream_class.label], self.in_queue, initial_history, update_existing_data).__class__.__name__, trigger='interval', minutes=interval)
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGINT, self._signal_handler)
|
connectors
|
positive
|
def _check_ed25519_cb_auth(self, uuid, response, model):
"""Verify ED25519_CB authentication."""
authinfo = response.get_header('Authentication-Info', '')
try:
<DeepExtract>
options = {}
p1 = authinfo.find(sep1)
if p1 == -1:
(method, options) = (authinfo, options)
head = authinfo[:p1].strip()
optvals = authinfo[p1 + 1:].split(sep2)
for optval in optvals:
optval = optval.strip()
mobj = _re_optval.match(optval)
if mobj is None:
raise ValueError('Illegal option string')
key = mobj.group(1)
value = mobj.group(2)
if value.startswith('"'):
value = value[1:-1]
options[key] = value
(method, options) = (head, options)
</DeepExtract>
except ValueError:
self._log.error('illegal Authentication-Info header')
return False
if 'signature' not in options or 'node' not in options or (not base64.check(options['signature'])) or (not uuid4.check(options['node'])):
self._log.error('illegal Authentication-Info header')
return False
sslinfo = self.client.connection[0].get_extra_info('sslinfo')
cb = sslinfo.get_channel_binding('tls-unique')
signature = base64.decode(options['signature'])
cert = model.get_certificate(uuid, options['node'])
if cert is None:
self._log.error('unknown node {} in ED25519_CB authentication', node)
return False
pubkey = base64.decode(cert['keys']['auth']['public'])
status = crypto.sign_verify(cb, signature, pubkey, 'ed25519')
if not status:
self._log.error('ED25519_CB signature did not match')
return False
return status
|
def _check_ed25519_cb_auth(self, uuid, response, model):
"""Verify ED25519_CB authentication."""
authinfo = response.get_header('Authentication-Info', '')
try:
options = {}
p1 = authinfo.find(sep1)
if p1 == -1:
(method, options) = (authinfo, options)
head = authinfo[:p1].strip()
optvals = authinfo[p1 + 1:].split(sep2)
for optval in optvals:
optval = optval.strip()
mobj = _re_optval.match(optval)
if mobj is None:
raise ValueError('Illegal option string')
key = mobj.group(1)
value = mobj.group(2)
if value.startswith('"'):
value = value[1:-1]
options[key] = value
(method, options) = (head, options)
except ValueError:
self._log.error('illegal Authentication-Info header')
return False
if 'signature' not in options or 'node' not in options or (not base64.check(options['signature'])) or (not uuid4.check(options['node'])):
self._log.error('illegal Authentication-Info header')
return False
sslinfo = self.client.connection[0].get_extra_info('sslinfo')
cb = sslinfo.get_channel_binding('tls-unique')
signature = base64.decode(options['signature'])
cert = model.get_certificate(uuid, options['node'])
if cert is None:
self._log.error('unknown node {} in ED25519_CB authentication', node)
return False
pubkey = base64.decode(cert['keys']['auth']['public'])
status = crypto.sign_verify(cb, signature, pubkey, 'ed25519')
if not status:
self._log.error('ED25519_CB signature did not match')
return False
return status
|
bluepass
|
positive
|
def _get_list(self, filters: Dict[str, Any], ordering: Optional[str], limit: Optional[int], offset: Optional[int]) -> Tuple[List[T], int]:
<DeepExtract>
param_to_chunk = next(((name, val) for (name, val) in filters.items() if isinstance(val, (list, tuple, set)) and len(val) > FILTER_CHUNK_SIZE), None)
if param_to_chunk is None:
filter_chunks = [filters]
(name, param_list) = param_to_chunk
filter_chunks = [{**filters, name: chunk} for chunk in chunk_list(list(param_list), FILTER_CHUNK_SIZE)]
</DeepExtract>
full_count: int = 0
full_results: List[Dict[str, Any]] = []
for filter_chunk in filter_chunks:
<DeepExtract>
base_offset = 0 if offset is None else offset
page_size = MAX_PAGE_SIZE if limit is None else min(limit, MAX_PAGE_SIZE)
query_params = self._build_query_params(filter_chunk, ordering, limit=page_size, offset=base_offset)
response_data = self._client.get(self._api_path, **query_params)
(count, results) = self._unpack_list_response(response_data)
num_to_fetch = count if limit is None else min(limit, count)
num_pages = ceil(num_to_fetch / MAX_PAGE_SIZE)
for page_no in range(1, num_pages):
to_fetch = min(page_size, num_to_fetch - len(results))
query_params = self._build_query_params(filter_chunk, ordering, limit=to_fetch, offset=base_offset + page_no * page_size)
response_data = self._client.get(self._api_path, **query_params)
(_, page) = self._unpack_list_response(response_data)
results.extend(page)
(count, results) = (count, results)
</DeepExtract>
full_count += count
full_results.extend(results)
if ordering and len(filter_chunks) > 1:
(order_key, reverse) = (ordering.lstrip('-'), True) if ordering.startswith('-') else (ordering, False)
full_results = sorted(full_results, key=lambda r: r[order_key], reverse=reverse)
instances = [self._model_class._from_api(dat) for dat in full_results]
return (instances, full_count)
|
def _get_list(self, filters: Dict[str, Any], ordering: Optional[str], limit: Optional[int], offset: Optional[int]) -> Tuple[List[T], int]:
param_to_chunk = next(((name, val) for (name, val) in filters.items() if isinstance(val, (list, tuple, set)) and len(val) > FILTER_CHUNK_SIZE), None)
if param_to_chunk is None:
filter_chunks = [filters]
(name, param_list) = param_to_chunk
filter_chunks = [{**filters, name: chunk} for chunk in chunk_list(list(param_list), FILTER_CHUNK_SIZE)]
full_count: int = 0
full_results: List[Dict[str, Any]] = []
for filter_chunk in filter_chunks:
base_offset = 0 if offset is None else offset
page_size = MAX_PAGE_SIZE if limit is None else min(limit, MAX_PAGE_SIZE)
query_params = self._build_query_params(filter_chunk, ordering, limit=page_size, offset=base_offset)
response_data = self._client.get(self._api_path, **query_params)
(count, results) = self._unpack_list_response(response_data)
num_to_fetch = count if limit is None else min(limit, count)
num_pages = ceil(num_to_fetch / MAX_PAGE_SIZE)
for page_no in range(1, num_pages):
to_fetch = min(page_size, num_to_fetch - len(results))
query_params = self._build_query_params(filter_chunk, ordering, limit=to_fetch, offset=base_offset + page_no * page_size)
response_data = self._client.get(self._api_path, **query_params)
(_, page) = self._unpack_list_response(response_data)
results.extend(page)
(count, results) = (count, results)
full_count += count
full_results.extend(results)
if ordering and len(filter_chunks) > 1:
(order_key, reverse) = (ordering.lstrip('-'), True) if ordering.startswith('-') else (ordering, False)
full_results = sorted(full_results, key=lambda r: r[order_key], reverse=reverse)
instances = [self._model_class._from_api(dat) for dat in full_results]
return (instances, full_count)
|
balsam
|
positive
|
def _download_data(self, dataset_url, save_path):
if not isdir(save_path):
os.makedirs(save_path)
base = basename(dataset_url)
temp_save_path = join(save_path, base)
with open(temp_save_path, 'wb') as f:
r = requests.get(dataset_url)
f.write(r.content)
if base.endswith('.tar.gz'):
obj = tarfile.open(temp_save_path)
elif base.endswith('.zip'):
obj = ZipFile(temp_save_path, 'r')
else:
raise ValueError('Unknown File Type: {0}.'.format(base))
<DeepExtract>
if self._verbose:
print('Unpacking Data...')
</DeepExtract>
obj.extractall(save_path)
obj.close()
os.remove(temp_save_path)
|
def _download_data(self, dataset_url, save_path):
if not isdir(save_path):
os.makedirs(save_path)
base = basename(dataset_url)
temp_save_path = join(save_path, base)
with open(temp_save_path, 'wb') as f:
r = requests.get(dataset_url)
f.write(r.content)
if base.endswith('.tar.gz'):
obj = tarfile.open(temp_save_path)
elif base.endswith('.zip'):
obj = ZipFile(temp_save_path, 'r')
else:
raise ValueError('Unknown File Type: {0}.'.format(base))
if self._verbose:
print('Unpacking Data...')
obj.extractall(save_path)
obj.close()
os.remove(temp_save_path)
|
EnlightenGAN
|
positive
|
def test_filter_unapplyFilter(self):
<DeepExtract>
sys.argv[1:] = [test_filename]
(gsac, opts) = getDataOpts()
axs = getAxes(opts)
ppmm = PickPhaseMenuMore(gsac, opts, axs)
fake_event = matplotlib.backend_bases.MouseEvent('button_press_event', ppmm.axstk.figure.canvas, 56, 224)
ppmm.filtering(fake_event)
ppmm = ppmm
</DeepExtract>
event_apply = matplotlib.backend_bases.MouseEvent('button_press_event', ppmm.figfilter.canvas, 636, 823)
ppmm.applyFilter(event_apply)
event_unapply = matplotlib.backend_bases.MouseEvent('button_press_event', ppmm.figfilter.canvas, 538, 838)
ppmm.unapplyFilter(event_unapply)
self.assertFalse(ppmm.opts.filterParameters['apply'])
self.assertFalse(py.fignum_exists(ppmm.figfilter.number))
|
def test_filter_unapplyFilter(self):
sys.argv[1:] = [test_filename]
(gsac, opts) = getDataOpts()
axs = getAxes(opts)
ppmm = PickPhaseMenuMore(gsac, opts, axs)
fake_event = matplotlib.backend_bases.MouseEvent('button_press_event', ppmm.axstk.figure.canvas, 56, 224)
ppmm.filtering(fake_event)
ppmm = ppmm
event_apply = matplotlib.backend_bases.MouseEvent('button_press_event', ppmm.figfilter.canvas, 636, 823)
ppmm.applyFilter(event_apply)
event_unapply = matplotlib.backend_bases.MouseEvent('button_press_event', ppmm.figfilter.canvas, 538, 838)
ppmm.unapplyFilter(event_unapply)
self.assertFalse(ppmm.opts.filterParameters['apply'])
self.assertFalse(py.fignum_exists(ppmm.figfilter.number))
|
aimbat
|
positive
|
def register_plugins(self):
<DeepExtract>
try:
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.types']['plugins']:
cls = entry_point.load()
self.types[ResourceType.get(cls.resource_type).resource_type_id] = cls
logger.debug('Registered resource type {}'.format(cls.__name__))
except SQLAlchemyError as ex:
logger.warning('Failed loading type information: {}'.format(ex))
</DeepExtract>
self.api.register_views(self)
|
def register_plugins(self):
try:
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.types']['plugins']:
cls = entry_point.load()
self.types[ResourceType.get(cls.resource_type).resource_type_id] = cls
logger.debug('Registered resource type {}'.format(cls.__name__))
except SQLAlchemyError as ex:
logger.warning('Failed loading type information: {}'.format(ex))
self.api.register_views(self)
|
cloud-inquisitor
|
positive
|
def test_get_choices(self):
<DeepExtract>
self.assertEqual(self.field.get_choices(include_blank=False, ordering=('a',)), [(obj.pk, str(obj)) for obj in [self.foo1, self.foo2]])
</DeepExtract>
<DeepExtract>
self.assertEqual(self.field.get_choices(include_blank=False, ordering=('-a',)), [(obj.pk, str(obj)) for obj in [self.foo2, self.foo1]])
</DeepExtract>
|
def test_get_choices(self):
self.assertEqual(self.field.get_choices(include_blank=False, ordering=('a',)), [(obj.pk, str(obj)) for obj in [self.foo1, self.foo2]])
self.assertEqual(self.field.get_choices(include_blank=False, ordering=('-a',)), [(obj.pk, str(obj)) for obj in [self.foo2, self.foo1]])
</DeepExtract>
|
django-firebird
|
positive
|
@cli.command()
@click.argument('config', type=click.Path(exists=True))
def run(config):
"""Runs the bot."""
def signal_handler(signum, frame):
manager.stop()
def attach_signals():
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, signal_handler)
manager = Manager(config_path=config)
<DeepExtract>
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, signal_handler)
</DeepExtract>
manager.run()
|
@cli.command()
@click.argument('config', type=click.Path(exists=True))
def run(config):
"""Runs the bot."""
def signal_handler(signum, frame):
manager.stop()
def attach_signals():
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, signal_handler)
manager = Manager(config_path=config)
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, signal_handler)
manager.run()
|
botnet
|
positive
|
def push(self, human_name=None, uuid=None, tags=None, data_context=None, delocalize=False):
"""
Push a particular hyperframe to our remote context. This only pushes the most recent (in time) version of
the hyperframe. It does not look for committed hyperframes (that's v2).
If current context is bound, copy bundle / files to s3, updating link frames to point to new paths.
Assumes s3 paths have already been sanitized (point to files in our context)
NOTE: we push the most recent hyperframe unless the UUID is specified. More complicated filters for future
work.
NOTE: Only push committed bundles. If no committed tag, then will not push.
TODO: Currently we copy S3 files even if they are already within a frame in a context.
Args:
human_name (str): The name of this bundle
uuid (str) : Uniquely identify the bundle to push.
tags (:dict): Set of tags bundle must have
data_context (`disdat.data_context.DataContext`): Optional data context in which to find / commit bundle.
delocalize (bool): Whether to clean the local context of the files after pushing
Returns:
(`hyperframe.HyperFrameRecord`): The, possibly new, pushed hyperframe.
"""
<DeepExtract>
if data_context is None:
data_context = self.curr_context
if data_context is None:
print('No current context. `dsdt switch <othercontext>`')
data_context = None
data_context = data_context
</DeepExtract>
if data_context is None:
return None
if data_context.remote_ctxt_url is None:
print('Push cannot execute. Local context {} on remote {} not bound.'.format(data_context.local_ctxt, data_context.remote_ctxt))
return None
if tags is None:
tags = {}
tags['committed'] = 'True'
if human_name is None and uuid is None:
<DeepExtract>
_logger.info('Fast Push synchronizing with remote context {}@{}'.format(data_context.remote_ctxt, data_context.remote_ctxt_url))
remote_s3_object_dir = data_context.get_remote_object_dir()
(bucket, _) = aws_s3.split_s3_url(remote_s3_object_dir)
all_keys = aws_s3.ls_s3_url_keys(remote_s3_object_dir, is_object_directory=data_context.bundle_count() > aws_s3.S3_LS_USE_MP_THRESH)
all_keys = {os.path.join('s3://', bucket, k): os.path.join('s3://', bucket, k) for k in all_keys}
all_local_hframes = data_context.get_hframes(tags={'committed': 'True'})
push_tuples = []
to_delete = []
for hfr in all_local_hframes:
src_dst_copies = self._copy_hfr_to_remote_branch(hfr, data_context, dry_run=True)
for (src, dst) in src_dst_copies:
if dst not in all_keys:
push_tuples.append((src, dst))
if not hyperframe.is_hyperframe_pb_file(src):
to_delete.append(urllib.parse.urlparse(src).path)
_logger.info('Fast push copying {} objects to S3 . . .'.format(len(push_tuples)))
results = aws_s3.put_s3_key_many(push_tuples)
_logger.info('Fast push completed {} transfers -- process pool closed and joined.'.format(len(results)))
assert len(results) == len(push_tuples), 'Fast push failed: transferred {} out of {} files'.format(len(results), len(push_tuples))
if delocalize:
for f in to_delete:
try:
os.remove(f)
except IOError as e:
print('fast_push: during delocalization, unable to remove {} due to {}'.format(f, e))
</DeepExtract>
return
if uuid is not None:
<DeepExtract>
data_context = self.ensure_data_context(data_context)
if data_context is None:
raise Exception('No current context')
found = data_context.get_hframes(uuid=uuid, tags=tags)
if len(found) == 1:
hfr = found[0]
elif len(found) == 0:
hfr = None
else:
raise Exception('Many records {} found with uuid {}'.format(len(found), uuid))
</DeepExtract>
elif human_name is not None:
<DeepExtract>
data_context = self.ensure_data_context(data_context)
if data_context is None:
raise Exception('No current context')
found = data_context.get_hframes(human_name=human_name, tags=tags)
if len(found) > 0:
if getall:
hfr = found
else:
hfr = found[0]
else:
hfr = None
</DeepExtract>
else:
print('Push requires either a human name or a uuid to identify the hyperframe.')
return None
if hfr is None:
print('Push unable to find committed bundle name [{}] uuid [{}]'.format(human_name, uuid))
return None
to_delete = []
try:
<DeepExtract>
assert data_context is not None
copies = []
for fr in hfr.get_frames(data_context):
if fr.is_hfr_frame():
for next_hfr in fr.get_hframes():
copies.extend(self._copy_hfr_to_remote_branch(next_hfr, data_context, dry_run=dry_run))
else:
obj_dir = data_context.get_remote_object_dir()
copies.extend(self._copy_fr_links_to_branch(fr, obj_dir, data_context, dry_run=dry_run))
copies.extend(data_context.write_hframe_remote(hfr, dry_run=dry_run))
src_dst_copies = copies
</DeepExtract>
for (src, dst) in src_dst_copies:
if not hyperframe.is_hyperframe_pb_file(src):
to_delete.append(urllib.parse.urlparse(src).path)
except Exception as e:
print('Push unable to copy bundle to branch: {}'.format(e))
return None
if delocalize:
for f in to_delete:
try:
os.remove(f)
except IOError as e:
print('fast_push: during delocalization, unable to remove {} due to {}'.format(f, e))
print('Pushed committed bundle {} uuid {} to remote {}'.format(human_name, hfr.pb.uuid, data_context.remote_ctxt_url))
return hfr
|
def push(self, human_name=None, uuid=None, tags=None, data_context=None, delocalize=False):
"""
Push a particular hyperframe to our remote context. This only pushes the most recent (in time) version of
the hyperframe. It does not look for committed hyperframes (that's v2).
If current context is bound, copy bundle / files to s3, updating link frames to point to new paths.
Assumes s3 paths have already been sanitized (point to files in our context)
NOTE: we push the most recent hyperframe unless the UUID is specified. More complicated filters for future
work.
NOTE: Only push committed bundles. If no committed tag, then will not push.
TODO: Currently we copy S3 files even if they are already within a frame in a context.
Args:
human_name (str): The name of this bundle
uuid (str) : Uniquely identify the bundle to push.
tags (:dict): Set of tags bundle must have
data_context (`disdat.data_context.DataContext`): Optional data context in which to find / commit bundle.
delocalize (bool): Whether to clean the local context of the files after pushing
Returns:
(`hyperframe.HyperFrameRecord`): The, possibly new, pushed hyperframe.
"""
if data_context is None:
data_context = self.curr_context
if data_context is None:
print('No current context. `dsdt switch <othercontext>`')
data_context = None
data_context = data_context
if data_context is None:
return None
if data_context.remote_ctxt_url is None:
print('Push cannot execute. Local context {} on remote {} not bound.'.format(data_context.local_ctxt, data_context.remote_ctxt))
return None
if tags is None:
tags = {}
tags['committed'] = 'True'
if human_name is None and uuid is None:
_logger.info('Fast Push synchronizing with remote context {}@{}'.format(data_context.remote_ctxt, data_context.remote_ctxt_url))
remote_s3_object_dir = data_context.get_remote_object_dir()
(bucket, _) = aws_s3.split_s3_url(remote_s3_object_dir)
all_keys = aws_s3.ls_s3_url_keys(remote_s3_object_dir, is_object_directory=data_context.bundle_count() > aws_s3.S3_LS_USE_MP_THRESH)
all_keys = {os.path.join('s3://', bucket, k): os.path.join('s3://', bucket, k) for k in all_keys}
all_local_hframes = data_context.get_hframes(tags={'committed': 'True'})
push_tuples = []
to_delete = []
for hfr in all_local_hframes:
src_dst_copies = self._copy_hfr_to_remote_branch(hfr, data_context, dry_run=True)
for (src, dst) in src_dst_copies:
if dst not in all_keys:
push_tuples.append((src, dst))
if not hyperframe.is_hyperframe_pb_file(src):
to_delete.append(urllib.parse.urlparse(src).path)
_logger.info('Fast push copying {} objects to S3 . . .'.format(len(push_tuples)))
results = aws_s3.put_s3_key_many(push_tuples)
_logger.info('Fast push completed {} transfers -- process pool closed and joined.'.format(len(results)))
assert len(results) == len(push_tuples), 'Fast push failed: transferred {} out of {} files'.format(len(results), len(push_tuples))
if delocalize:
for f in to_delete:
try:
os.remove(f)
except IOError as e:
print('fast_push: during delocalization, unable to remove {} due to {}'.format(f, e))
return
if uuid is not None:
data_context = self.ensure_data_context(data_context)
if data_context is None:
raise Exception('No current context')
found = data_context.get_hframes(uuid=uuid, tags=tags)
if len(found) == 1:
hfr = found[0]
elif len(found) == 0:
hfr = None
else:
raise Exception('Many records {} found with uuid {}'.format(len(found), uuid))
elif human_name is not None:
data_context = self.ensure_data_context(data_context)
if data_context is None:
raise Exception('No current context')
found = data_context.get_hframes(human_name=human_name, tags=tags)
if len(found) > 0:
if getall:
hfr = found
else:
hfr = found[0]
else:
hfr = None
else:
print('Push requires either a human name or a uuid to identify the hyperframe.')
return None
if hfr is None:
print('Push unable to find committed bundle name [{}] uuid [{}]'.format(human_name, uuid))
return None
to_delete = []
try:
assert data_context is not None
copies = []
for fr in hfr.get_frames(data_context):
if fr.is_hfr_frame():
for next_hfr in fr.get_hframes():
copies.extend(self._copy_hfr_to_remote_branch(next_hfr, data_context, dry_run=dry_run))
else:
obj_dir = data_context.get_remote_object_dir()
copies.extend(self._copy_fr_links_to_branch(fr, obj_dir, data_context, dry_run=dry_run))
copies.extend(data_context.write_hframe_remote(hfr, dry_run=dry_run))
src_dst_copies = copies
for (src, dst) in src_dst_copies:
if not hyperframe.is_hyperframe_pb_file(src):
to_delete.append(urllib.parse.urlparse(src).path)
except Exception as e:
print('Push unable to copy bundle to branch: {}'.format(e))
return None
if delocalize:
for f in to_delete:
try:
os.remove(f)
except IOError as e:
print('fast_push: during delocalization, unable to remove {} due to {}'.format(f, e))
print('Pushed committed bundle {} uuid {} to remote {}'.format(human_name, hfr.pb.uuid, data_context.remote_ctxt_url))
return hfr
|
disdat
|
positive
|
def test_area(self):
<DeepExtract>
riscv_machine.re_init()
riscv_machine.base = 'hex'
riscv_machine.flavor = 'riscv'
test_code = self.read_test_code(TEST_DIR_NAME + 'area.asm')
assemble(test_code, riscv_machine)
</DeepExtract>
self.assertEqual(riscv_machine.registers['X8'], 35)
self.assertEqual(riscv_machine.registers['X9'], 27)
self.assertEqual(riscv_machine.registers['X10'], 945)
|
def test_area(self):
riscv_machine.re_init()
riscv_machine.base = 'hex'
riscv_machine.flavor = 'riscv'
test_code = self.read_test_code(TEST_DIR_NAME + 'area.asm')
assemble(test_code, riscv_machine)
self.assertEqual(riscv_machine.registers['X8'], 35)
self.assertEqual(riscv_machine.registers['X9'], 27)
self.assertEqual(riscv_machine.registers['X10'], 945)
|
Emu86
|
positive
|
def exct_decode(t_heat, l_heat, b_heat, r_heat, ct_heat, t_regr=None, l_regr=None, b_regr=None, r_regr=None, K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000):
(batch, cat, height, width) = t_heat.size()
'\n t_heat = torch.sigmoid(t_heat)\n l_heat = torch.sigmoid(l_heat)\n b_heat = torch.sigmoid(b_heat)\n r_heat = torch.sigmoid(r_heat)\n ct_heat = torch.sigmoid(ct_heat)\n '
if aggr_weight > 0:
<DeepExtract>
t_heat = aggr_weight * _left_aggregate(t_heat) + aggr_weight * _right_aggregate(t_heat) + t_heat
</DeepExtract>
<DeepExtract>
l_heat = aggr_weight * _top_aggregate(l_heat) + aggr_weight * _bottom_aggregate(l_heat) + l_heat
</DeepExtract>
<DeepExtract>
b_heat = aggr_weight * _left_aggregate(b_heat) + aggr_weight * _right_aggregate(b_heat) + b_heat
</DeepExtract>
<DeepExtract>
r_heat = aggr_weight * _top_aggregate(r_heat) + aggr_weight * _bottom_aggregate(r_heat) + r_heat
</DeepExtract>
<DeepExtract>
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(t_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == t_heat).float()
t_heat = t_heat * keep
</DeepExtract>
<DeepExtract>
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(l_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == l_heat).float()
l_heat = l_heat * keep
</DeepExtract>
<DeepExtract>
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(b_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == b_heat).float()
b_heat = b_heat * keep
</DeepExtract>
<DeepExtract>
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(r_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == r_heat).float()
r_heat = r_heat * keep
</DeepExtract>
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
<DeepExtract>
(batch, cat, height, width) = t_heat.size()
(topk_scores, topk_inds) = torch.topk(t_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(t_scores, t_inds, t_clses, t_ys, t_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
</DeepExtract>
<DeepExtract>
(batch, cat, height, width) = l_heat.size()
(topk_scores, topk_inds) = torch.topk(l_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(l_scores, l_inds, l_clses, l_ys, l_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
</DeepExtract>
<DeepExtract>
(batch, cat, height, width) = b_heat.size()
(topk_scores, topk_inds) = torch.topk(b_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(b_scores, b_inds, b_clses, b_ys, b_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
</DeepExtract>
<DeepExtract>
(batch, cat, height, width) = r_heat.size()
(topk_scores, topk_inds) = torch.topk(r_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(r_scores, r_inds, r_clses, r_ys, r_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
</DeepExtract>
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat = ct_heat.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + (t_clses != r_clses)
cls_inds = cls_inds > 0
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = top_inds > 0
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = left_inds > 0
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = bottom_inds > 0
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = right_inds > 0
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + (b_scores < scores_thresh) + (r_scores < scores_thresh) + (ct_scores < center_thresh)
sc_inds = sc_inds > 0
scores = scores - sc_inds.float()
scores = scores - cls_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
(scores, inds) = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None and (b_regr is not None) and (r_regr is not None):
t_regr = _tranpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _tranpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _tranpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _tranpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = t_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys, b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
|
def exct_decode(t_heat, l_heat, b_heat, r_heat, ct_heat, t_regr=None, l_regr=None, b_regr=None, r_regr=None, K=40, scores_thresh=0.1, center_thresh=0.1, aggr_weight=0.0, num_dets=1000):
(batch, cat, height, width) = t_heat.size()
'\n t_heat = torch.sigmoid(t_heat)\n l_heat = torch.sigmoid(l_heat)\n b_heat = torch.sigmoid(b_heat)\n r_heat = torch.sigmoid(r_heat)\n ct_heat = torch.sigmoid(ct_heat)\n '
if aggr_weight > 0:
t_heat = aggr_weight * _left_aggregate(t_heat) + aggr_weight * _right_aggregate(t_heat) + t_heat
l_heat = aggr_weight * _top_aggregate(l_heat) + aggr_weight * _bottom_aggregate(l_heat) + l_heat
b_heat = aggr_weight * _left_aggregate(b_heat) + aggr_weight * _right_aggregate(b_heat) + b_heat
r_heat = aggr_weight * _top_aggregate(r_heat) + aggr_weight * _bottom_aggregate(r_heat) + r_heat
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(t_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == t_heat).float()
t_heat = t_heat * keep
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(l_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == l_heat).float()
l_heat = l_heat * keep
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(b_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == b_heat).float()
b_heat = b_heat * keep
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(r_heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == r_heat).float()
r_heat = r_heat * keep
t_heat[t_heat > 1] = 1
l_heat[l_heat > 1] = 1
b_heat[b_heat > 1] = 1
r_heat[r_heat > 1] = 1
(batch, cat, height, width) = t_heat.size()
(topk_scores, topk_inds) = torch.topk(t_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(t_scores, t_inds, t_clses, t_ys, t_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
(batch, cat, height, width) = l_heat.size()
(topk_scores, topk_inds) = torch.topk(l_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(l_scores, l_inds, l_clses, l_ys, l_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
(batch, cat, height, width) = b_heat.size()
(topk_scores, topk_inds) = torch.topk(b_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(b_scores, b_inds, b_clses, b_ys, b_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
(batch, cat, height, width) = r_heat.size()
(topk_scores, topk_inds) = torch.topk(r_heat.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
(topk_score, topk_ind) = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
(r_scores, r_inds, r_clses, r_ys, r_xs) = (topk_score, topk_inds, topk_clses, topk_ys, topk_xs)
t_ys = t_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
t_xs = t_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_ys = l_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
l_xs = l_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_ys = b_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
b_xs = b_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_ys = r_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
r_xs = r_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
t_clses = t_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_clses = l_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_clses = b_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_clses = r_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
box_ct_xs = ((l_xs + r_xs + 0.5) / 2).long()
box_ct_ys = ((t_ys + b_ys + 0.5) / 2).long()
ct_inds = t_clses.long() * (height * width) + box_ct_ys * width + box_ct_xs
ct_inds = ct_inds.view(batch, -1)
ct_heat = ct_heat.view(batch, -1, 1)
ct_scores = _gather_feat(ct_heat, ct_inds)
t_scores = t_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)
l_scores = l_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)
b_scores = b_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)
r_scores = r_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)
ct_scores = ct_scores.view(batch, K, K, K, K)
scores = (t_scores + l_scores + b_scores + r_scores + 2 * ct_scores) / 6
cls_inds = (t_clses != l_clses) + (t_clses != b_clses) + (t_clses != r_clses)
cls_inds = cls_inds > 0
top_inds = (t_ys > l_ys) + (t_ys > b_ys) + (t_ys > r_ys)
top_inds = top_inds > 0
left_inds = (l_xs > t_xs) + (l_xs > b_xs) + (l_xs > r_xs)
left_inds = left_inds > 0
bottom_inds = (b_ys < t_ys) + (b_ys < l_ys) + (b_ys < r_ys)
bottom_inds = bottom_inds > 0
right_inds = (r_xs < t_xs) + (r_xs < l_xs) + (r_xs < b_xs)
right_inds = right_inds > 0
sc_inds = (t_scores < scores_thresh) + (l_scores < scores_thresh) + (b_scores < scores_thresh) + (r_scores < scores_thresh) + (ct_scores < center_thresh)
sc_inds = sc_inds > 0
scores = scores - sc_inds.float()
scores = scores - cls_inds.float()
scores = scores - top_inds.float()
scores = scores - left_inds.float()
scores = scores - bottom_inds.float()
scores = scores - right_inds.float()
scores = scores.view(batch, -1)
(scores, inds) = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
if t_regr is not None and l_regr is not None and (b_regr is not None) and (r_regr is not None):
t_regr = _tranpose_and_gather_feat(t_regr, t_inds)
t_regr = t_regr.view(batch, K, 1, 1, 1, 2)
l_regr = _tranpose_and_gather_feat(l_regr, l_inds)
l_regr = l_regr.view(batch, 1, K, 1, 1, 2)
b_regr = _tranpose_and_gather_feat(b_regr, b_inds)
b_regr = b_regr.view(batch, 1, 1, K, 1, 2)
r_regr = _tranpose_and_gather_feat(r_regr, r_inds)
r_regr = r_regr.view(batch, 1, 1, 1, K, 2)
t_xs = t_xs + t_regr[..., 0]
t_ys = t_ys + t_regr[..., 1]
l_xs = l_xs + l_regr[..., 0]
l_ys = l_ys + l_regr[..., 1]
b_xs = b_xs + b_regr[..., 0]
b_ys = b_ys + b_regr[..., 1]
r_xs = r_xs + r_regr[..., 0]
r_ys = r_ys + r_regr[..., 1]
else:
t_xs = t_xs + 0.5
t_ys = t_ys + 0.5
l_xs = l_xs + 0.5
l_ys = l_ys + 0.5
b_xs = b_xs + 0.5
b_ys = b_ys + 0.5
r_xs = r_xs + 0.5
r_ys = r_ys + 0.5
bboxes = torch.stack((l_xs, t_ys, r_xs, b_ys), dim=5)
bboxes = bboxes.view(batch, -1, 4)
bboxes = _gather_feat(bboxes, inds)
clses = t_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
t_xs = t_xs.contiguous().view(batch, -1, 1)
t_xs = _gather_feat(t_xs, inds).float()
t_ys = t_ys.contiguous().view(batch, -1, 1)
t_ys = _gather_feat(t_ys, inds).float()
l_xs = l_xs.contiguous().view(batch, -1, 1)
l_xs = _gather_feat(l_xs, inds).float()
l_ys = l_ys.contiguous().view(batch, -1, 1)
l_ys = _gather_feat(l_ys, inds).float()
b_xs = b_xs.contiguous().view(batch, -1, 1)
b_xs = _gather_feat(b_xs, inds).float()
b_ys = b_ys.contiguous().view(batch, -1, 1)
b_ys = _gather_feat(b_ys, inds).float()
r_xs = r_xs.contiguous().view(batch, -1, 1)
r_xs = _gather_feat(r_xs, inds).float()
r_ys = r_ys.contiguous().view(batch, -1, 1)
r_ys = _gather_feat(r_ys, inds).float()
detections = torch.cat([bboxes, scores, t_xs, t_ys, l_xs, l_ys, b_xs, b_ys, r_xs, r_ys, clses], dim=2)
return detections
|
centerNet-deep-sort
|
positive
|
def forward_dummy(self, img):
<DeepExtract>
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
</DeepExtract>
outs = self.bbox_head(x)
return outs
|
def forward_dummy(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
outs = self.bbox_head(x)
return outs
|
DNL-Object-Detection
|
positive
|
def verify_input(self, index, public_key):
tx_in = self.tx_ins[index]
<DeepExtract>
outpoint = self.tx_ins[index].outpoint
message = serialize(outpoint) + serialize(self.tx_outs)
</DeepExtract>
return public_key.verify(tx_in.signature, message)
|
def verify_input(self, index, public_key):
tx_in = self.tx_ins[index]
outpoint = self.tx_ins[index].outpoint
message = serialize(outpoint) + serialize(self.tx_outs)
return public_key.verify(tx_in.signature, message)
|
digital-cash
|
positive
|
def get_context(self, context):
if 'delete_url' in context:
<DeepExtract>
context['delete_url'] = context['delete_url'] + ('&' if context['delete_url'].find('?') > 0 else '?') + '%s=%s' % self._get_relate_params()
</DeepExtract>
return context
|
def get_context(self, context):
if 'delete_url' in context:
context['delete_url'] = context['delete_url'] + ('&' if context['delete_url'].find('?') > 0 else '?') + '%s=%s' % self._get_relate_params()
return context
|
devops
|
positive
|
def start(self, skip=0):
<DeepExtract>
model = CChessModel(self.config)
if self.config.opts.new or not load_sl_best_model_weight(model):
model.build()
save_as_sl_best_model(model)
self.model = model
</DeepExtract>
with open(self.config.resource.sl_onegreen, 'r') as f:
self.games = json.load(f)
<DeepExtract>
self.compile_model()
total_steps = self.config.trainer.start_total_steps
logger.info(f'Start training, game count = {len(self.games)}, step = {self.config.trainer.sl_game_step} games, skip = {skip}')
for i in range(skip, len(self.games), self.config.trainer.sl_game_step):
games = self.games[i:i + self.config.trainer.sl_game_step]
self.fill_queue(games)
if len(self.dataset[0]) > self.config.trainer.batch_size:
steps = self.train_epoch(self.config.trainer.epoch_to_checkpoint)
total_steps += steps
self.save_current_model()
(a, b, c) = self.dataset
a.clear()
b.clear()
c.clear()
logger.debug(f'total steps = {total_steps}')
</DeepExtract>
|
def start(self, skip=0):
model = CChessModel(self.config)
if self.config.opts.new or not load_sl_best_model_weight(model):
model.build()
save_as_sl_best_model(model)
self.model = model
with open(self.config.resource.sl_onegreen, 'r') as f:
self.games = json.load(f)
self.compile_model()
total_steps = self.config.trainer.start_total_steps
logger.info(f'Start training, game count = {len(self.games)}, step = {self.config.trainer.sl_game_step} games, skip = {skip}')
for i in range(skip, len(self.games), self.config.trainer.sl_game_step):
games = self.games[i:i + self.config.trainer.sl_game_step]
self.fill_queue(games)
if len(self.dataset[0]) > self.config.trainer.batch_size:
steps = self.train_epoch(self.config.trainer.epoch_to_checkpoint)
total_steps += steps
self.save_current_model()
(a, b, c) = self.dataset
a.clear()
b.clear()
c.clear()
logger.debug(f'total steps = {total_steps}')
</DeepExtract>
|
ChineseChess-AlphaZero
|
positive
|
def tokenize(self, file_name):
<DeepExtract>
assert os.path.exists(file_name), 'file does not exists %s' % file_name
lines = []
with open(file_name, 'r') as f:
for line in f:
lines.append(line.strip())
lines = lines
</DeepExtract>
random.shuffle(lines)
def make_mask(choices, inv=False):
items = torch.Tensor([self.item_dict.w2i(c, inv=inv) for c in choices]).long()
mask = torch.Tensor(len(self.item_dict)).zero_()
mask.scatter_(0, items, torch.Tensor(items.size(0)).fill_(1))
return mask.unsqueeze(0)
def make_indexes(choices):
items = torch.Tensor([self.item_dict.w2i(c) for c in choices]).long()
return items
unk = self.word_dict.get_idx('<unk>')
(dataset, total, unks) = ([], 0, 0)
for line in lines:
tokens = line.split()
input_idxs = self.context_dict.w2i(get_tag(tokens, 'input'))
count_idx = self.count_dict.get_idx(get_tag(tokens, 'input'))
word_idxs = self.word_dict.w2i(get_tag(tokens, 'dialogue'))
item_idx = self.item_dict.w2i(get_tag(tokens, 'output'), inv=False)
item_idx_inv = self.item_dict.w2i(get_tag(tokens, 'output'), inv=True)
items = self.item_dict_old.w2i(get_tag(tokens, 'output'))
partner_input_idxs = self.context_dict.w2i(get_tag(tokens, 'partner_input'))
if self.sep_sel:
dataset.append((input_idxs, word_idxs, items, partner_input_idxs, count_idx))
else:
dataset.append((input_idxs, word_idxs, [item_idx, item_idx_inv], partner_input_idxs, count_idx))
total += len(input_idxs) + len(word_idxs) + len(partner_input_idxs)
unks += np.count_nonzero([idx == unk for idx in word_idxs])
if self.verbose:
print('dataset %s, total %d, unks %s, ratio %0.2f%%' % (file_name, total, unks, 100.0 * unks / total))
return dataset
|
def tokenize(self, file_name):
assert os.path.exists(file_name), 'file does not exists %s' % file_name
lines = []
with open(file_name, 'r') as f:
for line in f:
lines.append(line.strip())
lines = lines
random.shuffle(lines)
def make_mask(choices, inv=False):
items = torch.Tensor([self.item_dict.w2i(c, inv=inv) for c in choices]).long()
mask = torch.Tensor(len(self.item_dict)).zero_()
mask.scatter_(0, items, torch.Tensor(items.size(0)).fill_(1))
return mask.unsqueeze(0)
def make_indexes(choices):
items = torch.Tensor([self.item_dict.w2i(c) for c in choices]).long()
return items
unk = self.word_dict.get_idx('<unk>')
(dataset, total, unks) = ([], 0, 0)
for line in lines:
tokens = line.split()
input_idxs = self.context_dict.w2i(get_tag(tokens, 'input'))
count_idx = self.count_dict.get_idx(get_tag(tokens, 'input'))
word_idxs = self.word_dict.w2i(get_tag(tokens, 'dialogue'))
item_idx = self.item_dict.w2i(get_tag(tokens, 'output'), inv=False)
item_idx_inv = self.item_dict.w2i(get_tag(tokens, 'output'), inv=True)
items = self.item_dict_old.w2i(get_tag(tokens, 'output'))
partner_input_idxs = self.context_dict.w2i(get_tag(tokens, 'partner_input'))
if self.sep_sel:
dataset.append((input_idxs, word_idxs, items, partner_input_idxs, count_idx))
else:
dataset.append((input_idxs, word_idxs, [item_idx, item_idx_inv], partner_input_idxs, count_idx))
total += len(input_idxs) + len(word_idxs) + len(partner_input_idxs)
unks += np.count_nonzero([idx == unk for idx in word_idxs])
if self.verbose:
print('dataset %s, total %d, unks %s, ratio %0.2f%%' % (file_name, total, unks, 100.0 * unks / total))
return dataset
|
end-to-end-negotiator
|
positive
|
def construct(self):
<DeepExtract>
if self.svg_type == 'svg':
try:
pre_imagen = SVGMobject('%s' % self.file)
except:
pre_imagen = self.custom_object()
elif self.svg_type == 'text':
pre_imagen = self.import_text()
else:
pre_imagen = self.custom_object()
pre_imagen = pre_imagen
</DeepExtract>
if self.get_cero:
self.imagen = pre_imagen[0]
else:
self.imagen = pre_imagen
self.imagen.set_color(color=self.color).set_style(fill_opacity=self.fill_opacity, stroke_color=self.stroke_color, stroke_width=self.stroke_width, stroke_opacity=self.stroke_opacity, sheen_factor=self.sheen_factor, sheen_direction=self.sheen_direction)
if self.gradient_color:
self.imagen.set_color_by_gradient(*self.gradient_colors)
if self.cycle_color:
get_cycle_color = it.cycle(self.cycle_colors)
for obj in self.imagen:
obj.set_color(next(get_cycle_color))
if self.width != None:
self.imagen.set_width(self.width)
elif self.height != None:
self.imagen.set_height(self.height)
elif self.scale != None:
self.imagen.scale(self.scale)
else:
self.imagen.set_width(FRAME_WIDTH)
if self.imagen.get_height() > FRAME_HEIGHT:
self.imagen.set_height(FRAME_HEIGHT)
self.imagen.rotate(self.angle)
if self.flip == True:
self.imagen.flip(self.flip_edge)
for st in self.remove_stroke:
self.imagen[st].set_stroke(None, 0)
for st in self.show_stroke:
self.imagen[st].set_stroke(None, self.show_stroke_stroke)
<DeepExtract>
pass
</DeepExtract>
if self.show_numbers == True:
<DeepExtract>
self.imagen.copy().set_color(self.warning_color)
self.add(self.imagen.copy())
for j in range(len(self.imagen.copy())):
permission_print = True
for w in self.remove:
if j == w:
permission_print = False
if permission_print:
self.add(self.imagen[j])
if self.show_removers:
for obj in self.remove:
self.add_foreground_mobject(self.imagen.copy()[obj])
c = 0
for j in range(len(self.imagen.copy())):
permission_print = True
if self.number_type == 'TextMobject':
element = TexMobject('%d' % c, color=self.color_numbers, background_stroke_width=self.background_stroke_width)
else:
element = Text('%d' % c).set_color(self.color_numbers)
element.scale(self.numbers_scale)
element.next_to(self.imagen.copy()[j], self.direction_numbers, buff=self.space_between_numbers)
for w in self.remove:
if j == w:
permission_print = False
if permission_print:
self.add_foreground_mobjects(element)
c = c + 1
</DeepExtract>
if self.animation == True:
self.play(DrawBorderThenFill(self.imagen))
elif self.show_numbers == False:
self.add(self.imagen)
self.wait(self.wait_time)
<DeepExtract>
for i in self.show_elements:
self.add_foreground_mobjects(self.imagen[i].set_color(self.color_element), TexMobject('%d' % i, color=self.color_element, background_stroke_width=0).scale(self.numbers_scale).next_to(self.imagen[i], self.direction_numbers, buff=self.space_between_numbers))
</DeepExtract>
|
def construct(self):
if self.svg_type == 'svg':
try:
pre_imagen = SVGMobject('%s' % self.file)
except:
pre_imagen = self.custom_object()
elif self.svg_type == 'text':
pre_imagen = self.import_text()
else:
pre_imagen = self.custom_object()
pre_imagen = pre_imagen
if self.get_cero:
self.imagen = pre_imagen[0]
else:
self.imagen = pre_imagen
self.imagen.set_color(color=self.color).set_style(fill_opacity=self.fill_opacity, stroke_color=self.stroke_color, stroke_width=self.stroke_width, stroke_opacity=self.stroke_opacity, sheen_factor=self.sheen_factor, sheen_direction=self.sheen_direction)
if self.gradient_color:
self.imagen.set_color_by_gradient(*self.gradient_colors)
if self.cycle_color:
get_cycle_color = it.cycle(self.cycle_colors)
for obj in self.imagen:
obj.set_color(next(get_cycle_color))
if self.width != None:
self.imagen.set_width(self.width)
elif self.height != None:
self.imagen.set_height(self.height)
elif self.scale != None:
self.imagen.scale(self.scale)
else:
self.imagen.set_width(FRAME_WIDTH)
if self.imagen.get_height() > FRAME_HEIGHT:
self.imagen.set_height(FRAME_HEIGHT)
self.imagen.rotate(self.angle)
if self.flip == True:
self.imagen.flip(self.flip_edge)
for st in self.remove_stroke:
self.imagen[st].set_stroke(None, 0)
for st in self.show_stroke:
self.imagen[st].set_stroke(None, self.show_stroke_stroke)
pass
if self.show_numbers == True:
self.imagen.copy().set_color(self.warning_color)
self.add(self.imagen.copy())
for j in range(len(self.imagen.copy())):
permission_print = True
for w in self.remove:
if j == w:
permission_print = False
if permission_print:
self.add(self.imagen[j])
if self.show_removers:
for obj in self.remove:
self.add_foreground_mobject(self.imagen.copy()[obj])
c = 0
for j in range(len(self.imagen.copy())):
permission_print = True
if self.number_type == 'TextMobject':
element = TexMobject('%d' % c, color=self.color_numbers, background_stroke_width=self.background_stroke_width)
else:
element = Text('%d' % c).set_color(self.color_numbers)
element.scale(self.numbers_scale)
element.next_to(self.imagen.copy()[j], self.direction_numbers, buff=self.space_between_numbers)
for w in self.remove:
if j == w:
permission_print = False
if permission_print:
self.add_foreground_mobjects(element)
c = c + 1
if self.animation == True:
self.play(DrawBorderThenFill(self.imagen))
elif self.show_numbers == False:
self.add(self.imagen)
self.wait(self.wait_time)
for i in self.show_elements:
self.add_foreground_mobjects(self.imagen[i].set_color(self.color_element), TexMobject('%d' % i, color=self.color_element, background_stroke_width=0).scale(self.numbers_scale).next_to(self.imagen[i], self.direction_numbers, buff=self.space_between_numbers))
</DeepExtract>
|
AnimationsWithManim
|
positive
|
def __init__(self, graph):
self.graph = nx.convert_node_labels_to_integers(graph, first_label=1)
self.children = defaultdict(list)
self.parents = defaultdict(list)
self.treelets_predicate = defaultdict(list)
self.treelets_left = defaultdict(list)
self.treelets_right = defaultdict(list)
<DeepExtract>
for (src, trg) in self.graph.edges:
self.children[src].append(trg)
self.parents[trg].append(src)
for nid in self.graph.nodes:
if get_label(self.graph, nid, 'type') == 'constant':
succs = list(self.graph.successors(nid))
succs.sort(key=lambda x: get_label(self.graph, x, 'arg', 0))
combs = itertools.combinations(succs, 2)
for (left, right) in combs:
self.treelets_predicate[nid].append((left, right))
self.treelets_left[left].append((nid, right))
self.treelets_right[right].append((left, nid))
return
</DeepExtract>
return
|
def __init__(self, graph):
self.graph = nx.convert_node_labels_to_integers(graph, first_label=1)
self.children = defaultdict(list)
self.parents = defaultdict(list)
self.treelets_predicate = defaultdict(list)
self.treelets_left = defaultdict(list)
self.treelets_right = defaultdict(list)
for (src, trg) in self.graph.edges:
self.children[src].append(trg)
self.parents[trg].append(src)
for nid in self.graph.nodes:
if get_label(self.graph, nid, 'type') == 'constant':
succs = list(self.graph.successors(nid))
succs.sort(key=lambda x: get_label(self.graph, x, 'arg', 0))
combs = itertools.combinations(succs, 2)
for (left, right) in combs:
self.treelets_predicate[nid].append((left, right))
self.treelets_left[left].append((nid, right))
self.treelets_right[right].append((left, nid))
return
return
|
ccg2lambda
|
positive
|
def manipulator(str1, str2):
if '#' not in str1 and '#' not in str2:
if str1 == str2:
return 'Yes'
else:
return 'No'
<DeepExtract>
alphabets = string.ascii_uppercase
res = ''
l_s = len(str1)
nothashtag_inds_s1 = [i for (i, j) in enumerate(str1) if j != '#']
if l_s - nothashtag_inds_s1[-1] == 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str1[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
res += str1[nothashtag_inds_s1[-1]]
elif l_s - nothashtag_inds_s1[-1] > 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str1[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
n = l_s - nothashtag_inds_s1[-1] - 1
char = str1[nothashtag_inds_s1[-1]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, l_s, char_index)
new_index = n
res += all_chars[new_index]
res1 = res
</DeepExtract>
<DeepExtract>
alphabets = string.ascii_uppercase
res = ''
l_s = len(str2)
nothashtag_inds_s1 = [i for (i, j) in enumerate(str2) if j != '#']
if l_s - nothashtag_inds_s1[-1] == 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str2[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
res += str2[nothashtag_inds_s1[-1]]
elif l_s - nothashtag_inds_s1[-1] > 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str2[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
n = l_s - nothashtag_inds_s1[-1] - 1
char = str2[nothashtag_inds_s1[-1]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, l_s, char_index)
new_index = n
res += all_chars[new_index]
res2 = res
</DeepExtract>
if res1 == res2:
return 'Yes'
else:
return 'No'
|
def manipulator(str1, str2):
if '#' not in str1 and '#' not in str2:
if str1 == str2:
return 'Yes'
else:
return 'No'
alphabets = string.ascii_uppercase
res = ''
l_s = len(str1)
nothashtag_inds_s1 = [i for (i, j) in enumerate(str1) if j != '#']
if l_s - nothashtag_inds_s1[-1] == 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str1[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
res += str1[nothashtag_inds_s1[-1]]
elif l_s - nothashtag_inds_s1[-1] > 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str1[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
n = l_s - nothashtag_inds_s1[-1] - 1
char = str1[nothashtag_inds_s1[-1]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, l_s, char_index)
new_index = n
res += all_chars[new_index]
res1 = res
alphabets = string.ascii_uppercase
res = ''
l_s = len(str2)
nothashtag_inds_s1 = [i for (i, j) in enumerate(str2) if j != '#']
if l_s - nothashtag_inds_s1[-1] == 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str2[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
res += str2[nothashtag_inds_s1[-1]]
elif l_s - nothashtag_inds_s1[-1] > 1:
p = 0
for q in range(1, len(nothashtag_inds_s1)):
x = nothashtag_inds_s1[q] - nothashtag_inds_s1[p]
n = x - 1
char = str2[nothashtag_inds_s1[p]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, 26, char_index)
new_index = n
res += all_chars[new_index]
p += 1
n = l_s - nothashtag_inds_s1[-1] - 1
char = str2[nothashtag_inds_s1[-1]]
char_index = alphabets.index(char)
all_chars = getCircular(alphabets, l_s, char_index)
new_index = n
res += all_chars[new_index]
res2 = res
if res1 == res2:
return 'Yes'
else:
return 'No'
|
Competitive-Coding-Platforms
|
positive
|
@property
def func(self):
if self._func is None:
def fn(i):
return self.source[i].to_numpy()
<DeepExtract>
self._func = fn
</DeepExtract>
return self._func
|
@property
def func(self):
if self._func is None:
def fn(i):
return self.source[i].to_numpy()
self._func = fn
return self._func
|
climetlab
|
positive
|
def _place_node(node_id, x, min_y):
"""Determine x, y position for a node.
node_id: id of the node to be positioned
x: x position (depth) of the node
min_y: minimal y position of the node
(can't be above parent nodes)
returns: y offset relative to min_y
"""
self.processed.append(node_id)
try:
y_occupied = self.occupied[x]
except IndexError:
self.occupied.append(-1)
y_occupied = -1
y = max(min_y, y_occupied + 1)
try:
first_child = self.children[node_id][0]
y += self._place_node(first_child, x + 1, y)
except IndexError:
pass
self.occupied[x] = y
self.positions[node_id] = (x, y)
for child in self.children[node_id][1:]:
<DeepExtract>
self.processed.append(child)
try:
y_occupied = self.occupied[x + 1]
except IndexError:
self.occupied.append(-1)
y_occupied = -1
y = max(y, y_occupied + 1)
try:
first_child = self.children[child][0]
y += self._place_node(first_child, x + 1 + 1, y)
except IndexError:
pass
self.occupied[x + 1] = y
self.positions[child] = (x + 1, y)
for child in self.children[child][1:]:
self._place_node(child, x + 1 + 1, y)
return y - y
</DeepExtract>
return y - min_y
|
def _place_node(node_id, x, min_y):
"""Determine x, y position for a node.
node_id: id of the node to be positioned
x: x position (depth) of the node
min_y: minimal y position of the node
(can't be above parent nodes)
returns: y offset relative to min_y
"""
self.processed.append(node_id)
try:
y_occupied = self.occupied[x]
except IndexError:
self.occupied.append(-1)
y_occupied = -1
y = max(min_y, y_occupied + 1)
try:
first_child = self.children[node_id][0]
y += self._place_node(first_child, x + 1, y)
except IndexError:
pass
self.occupied[x] = y
self.positions[node_id] = (x, y)
for child in self.children[node_id][1:]:
self.processed.append(child)
try:
y_occupied = self.occupied[x + 1]
except IndexError:
self.occupied.append(-1)
y_occupied = -1
y = max(y, y_occupied + 1)
try:
first_child = self.children[child][0]
y += self._place_node(first_child, x + 1 + 1, y)
except IndexError:
pass
self.occupied[x + 1] = y
self.positions[child] = (x + 1, y)
for child in self.children[child][1:]:
self._place_node(child, x + 1 + 1, y)
return y - y
return y - min_y
|
eve-wspace
|
positive
|
@parameterized.expand(all_locale_params)
def test_skip_tokens(self, locale):
<DeepExtract>
self.info = locale.info
self.shortname = locale.shortname
</DeepExtract>
<DeepExtract>
if 'skip' in self.info:
tokens_list = self.info['skip']
self.assertIsInstance(tokens_list, list, 'Invalid type for {}: {} for locale {}'.format('skip', type(tokens_list).__name__, self.shortname))
invalid_tokens = [token for token in tokens_list if not token or not isinstance(token, str)]
self.assertFalse(invalid_tokens, 'Invalid tokens for {}: {} for locale {}'.format('skip', ', '.join(map(repr, invalid_tokens)), self.shortname))
</DeepExtract>
|
@parameterized.expand(all_locale_params)
def test_skip_tokens(self, locale):
self.info = locale.info
self.shortname = locale.shortname
if 'skip' in self.info:
tokens_list = self.info['skip']
self.assertIsInstance(tokens_list, list, 'Invalid type for {}: {} for locale {}'.format('skip', type(tokens_list).__name__, self.shortname))
invalid_tokens = [token for token in tokens_list if not token or not isinstance(token, str)]
self.assertFalse(invalid_tokens, 'Invalid tokens for {}: {} for locale {}'.format('skip', ', '.join(map(repr, invalid_tokens)), self.shortname))
</DeepExtract>
|
dateparser
|
positive
|
def get_synset_embedding(synset, word_vectors, get_vector):
class_name = wn.synset(synset).lemma_names()
class_name = ', '.join([_.replace('_', ' ') for _ in class_name])
class_name = class_name.lower()
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
<DeepExtract>
try:
feat = get_vector(word_vectors, options[j].strip())
now_feat = feat
except:
feat = np.zeros(feat_len)
str_set = list(filter(None, re.split('[ \\-_]+', options[j].strip())))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = feat + now_feat
cnt_word = cnt_word + 1
except:
continue
if cnt_word > 0:
feat = feat / cnt_word
now_feat = feat
</DeepExtract>
if np.abs(now_feat.sum()) > 0:
cnt_word += 1
feat += now_feat
if cnt_word > 0:
feat = feat / cnt_word
if np.abs(feat.sum()) == 0:
return feat
else:
return feat
|
def get_synset_embedding(synset, word_vectors, get_vector):
class_name = wn.synset(synset).lemma_names()
class_name = ', '.join([_.replace('_', ' ') for _ in class_name])
class_name = class_name.lower()
feat = np.zeros(feat_len)
options = class_name.split(',')
cnt_word = 0
for j in range(len(options)):
try:
feat = get_vector(word_vectors, options[j].strip())
now_feat = feat
except:
feat = np.zeros(feat_len)
str_set = list(filter(None, re.split('[ \\-_]+', options[j].strip())))
cnt_word = 0
for i in range(len(str_set)):
temp_str = str_set[i]
try:
now_feat = get_vector(word_vectors, temp_str)
feat = feat + now_feat
cnt_word = cnt_word + 1
except:
continue
if cnt_word > 0:
feat = feat / cnt_word
now_feat = feat
if np.abs(now_feat.sum()) > 0:
cnt_word += 1
feat += now_feat
if cnt_word > 0:
feat = feat / cnt_word
if np.abs(feat.sum()) == 0:
return feat
else:
return feat
|
Context-aware-ZSR
|
positive
|
@parameterized.expand([['true', True], ['false', False], ['certificate/path', 'certificate/path']])
@mock.patch('bigflow.deploy.deploy_dags_folder')
@mock.patch('bigflow.deploy.deploy_docker_image')
def test_should_use_provided_vault_endpoint_verify_value_when_deploy(self, verify, expected_verify, deploy_docker_image_mock, deploy_dags_folder_mock):
shutil.rmtree(Path.cwd() / '.image', ignore_errors=True)
<DeepExtract>
if '.image':
workdir = Path(os.path.join(os.getcwd(), '.image'))
workdir.mkdir(exist_ok=True)
else:
workdir = Path(os.getcwd())
f = workdir / 'imageinfo-123.toml'
if f.exists():
orig = f.read_bytes()
self.addCleanup(f.write_bytes, orig)
else:
self.addCleanup(f.unlink)
f.touch()
f.write_text('')
return f
</DeepExtract>
cli(['deploy', '--docker-repository', 'my-docker-repository', '--vault-endpoint', 'my-vault-endpoint', '--auth-method', 'vault', '--vault-secret', 'secrett', '--dags-bucket', 'my-dags-bucket', '--dags-dir', '/tmp/my-dags-dir', '--gcp-project-id', 'my-gcp-project-id', '--clear-dags-folder', '--vault-endpoint-verify', verify])
deploy_dags_folder_mock.assert_called_with(auth_method=AuthorizationType.VAULT, clear_dags_folder=True, dags_bucket='my-dags-bucket', dags_dir='/tmp/my-dags-dir', project_id='my-gcp-project-id', vault_endpoint='my-vault-endpoint', vault_secret='secrett', vault_endpoint_verify=expected_verify)
deploy_docker_image_mock.assert_called_with(auth_method=AuthorizationType.VAULT, docker_repository='my-docker-repository', image_tar_path='.image/imageinfo-123.toml', vault_endpoint='my-vault-endpoint', vault_secret='secrett', vault_endpoint_verify=expected_verify)
|
@parameterized.expand([['true', True], ['false', False], ['certificate/path', 'certificate/path']])
@mock.patch('bigflow.deploy.deploy_dags_folder')
@mock.patch('bigflow.deploy.deploy_docker_image')
def test_should_use_provided_vault_endpoint_verify_value_when_deploy(self, verify, expected_verify, deploy_docker_image_mock, deploy_dags_folder_mock):
shutil.rmtree(Path.cwd() / '.image', ignore_errors=True)
if '.image':
workdir = Path(os.path.join(os.getcwd(), '.image'))
workdir.mkdir(exist_ok=True)
else:
workdir = Path(os.getcwd())
f = workdir / 'imageinfo-123.toml'
if f.exists():
orig = f.read_bytes()
self.addCleanup(f.write_bytes, orig)
else:
self.addCleanup(f.unlink)
f.touch()
f.write_text('')
return f
cli(['deploy', '--docker-repository', 'my-docker-repository', '--vault-endpoint', 'my-vault-endpoint', '--auth-method', 'vault', '--vault-secret', 'secrett', '--dags-bucket', 'my-dags-bucket', '--dags-dir', '/tmp/my-dags-dir', '--gcp-project-id', 'my-gcp-project-id', '--clear-dags-folder', '--vault-endpoint-verify', verify])
deploy_dags_folder_mock.assert_called_with(auth_method=AuthorizationType.VAULT, clear_dags_folder=True, dags_bucket='my-dags-bucket', dags_dir='/tmp/my-dags-dir', project_id='my-gcp-project-id', vault_endpoint='my-vault-endpoint', vault_secret='secrett', vault_endpoint_verify=expected_verify)
deploy_docker_image_mock.assert_called_with(auth_method=AuthorizationType.VAULT, docker_repository='my-docker-repository', image_tar_path='.image/imageinfo-123.toml', vault_endpoint='my-vault-endpoint', vault_secret='secrett', vault_endpoint_verify=expected_verify)
|
bigflow
|
positive
|
def adjust_output(tree, args):
if not isinstance(tree, VS3):
raise Exception('Output adjustment must be done on a vertex shader')
<DeepExtract>
if hasattr(tree, 'stereo_const'):
(stereo_const, _) = (tree.stereo_const, 0)
if isinstance(tree, VertexShader) and args.use_nv_stereo_reg_vs:
tree.stereo_sampler = None
tree.nv_stereo_reg = Register(args.use_nv_stereo_reg_vs)
elif isinstance(tree, VertexShader) and args.stereo_sampler_vs:
tree.stereo_sampler = args.stereo_sampler_vs
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif isinstance(tree, PixelShader) and args.stereo_sampler_ps:
tree.stereo_sampler = args.stereo_sampler_ps
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif 's' in tree.reg_types and tree.def_stereo_sampler in tree.reg_types['s']:
tree.stereo_sampler = tree._find_free_reg('s', None)
debug('WARNING: SHADER ALREADY USES %s! USING %s FOR STEREO SAMPLER INSTEAD!' % (tree.def_stereo_sampler, tree.stereo_sampler))
if isinstance(tree, VertexShader):
acronym = 'VS'
quirk = 257
elif isinstance(tree, PixelShader):
acronym = 'PS'
quirk = 0
else:
raise AssertionError()
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('Def%sSampler' % acronym, str(quirk + tree.stereo_sampler.num), 'Shader already uses %s, so use %s instead:' % (tree.def_stereo_sampler, tree.stereo_sampler)))
else:
tree.stereo_sampler = tree.def_stereo_sampler
if args.adjust_multiply and args.adjust_multiply != -1:
w = args.adjust_multiply
tree.stereo_const = tree._find_free_reg('c', None, desired=preferred_stereo_const)
offset = 0
offset += tree.insert_decl()
offset += tree.insert_decl('def', [tree.stereo_const, x, y, z, w])
if tree.stereo_sampler is not None:
offset += tree.insert_decl('dcl_2d', [tree.stereo_sampler])
offset += tree.insert_decl()
(stereo_const, _) = (tree.stereo_const, offset)
</DeepExtract>
tmp_reg = tree._find_free_reg('r', VS3, desired=31)
success = False
for reg in args.adjust:
try:
<DeepExtract>
pos_reg = tree._find_free_reg('r', VS3)
if reg.startswith('dcl_texcoord'):
dst_reg = find_declaration(tree, reg, 'o').reg
elif reg.startswith('texcoord') or reg == 'position':
dst_reg = find_declaration(tree, 'dcl_%s' % reg, 'o').reg
else:
dst_reg = reg
replace_regs = {dst_reg: pos_reg}
tree.do_replacements(replace_regs, False)
append_vanity_comment(args, tree, 'Output adjustment inserted with')
if args.condition:
tree.add_inst('mov', [tmp_reg.x, args.condition])
tree.add_inst('if_eq', [tmp_reg.x, stereo_const.x])
tree.add_inst('texldl', [tmp_reg, stereo_const.z, tree.stereo_sampler])
separation = tmp_reg.x
convergence = tmp_reg.y
tree.add_inst('add', [tmp_reg.w, pos_reg.w, -convergence])
if not args.adjust_multiply:
tree.add_inst('mad', [pos_reg.x, tmp_reg.w, separation, pos_reg.x])
else:
tree.add_inst('mul', [tmp_reg.w, tmp_reg.w, separation])
if args.adjust_multiply and args.adjust_multiply != -1:
tree.add_inst('mul', [tmp_reg.w, tmp_reg.w, stereo_const.w])
if args.adjust_multiply and args.adjust_multiply == -1:
tree.add_inst('add', [pos_reg.x, pos_reg.x, -tmp_reg.w])
else:
tree.add_inst('add', [pos_reg.x, pos_reg.x, tmp_reg.w])
if args.condition:
tree.add_inst('endif', [])
tree.add_inst('mov', [dst_reg, pos_reg])
</DeepExtract>
success = True
except Exception as e:
if args.ignore_other_errors:
collected_errors.append((tree.filename, e))
import traceback, time
traceback.print_exc()
last_exc = e
continue
raise
if not success and last_exc is not None:
raise ExceptionDontReport()
|
def adjust_output(tree, args):
if not isinstance(tree, VS3):
raise Exception('Output adjustment must be done on a vertex shader')
if hasattr(tree, 'stereo_const'):
(stereo_const, _) = (tree.stereo_const, 0)
if isinstance(tree, VertexShader) and args.use_nv_stereo_reg_vs:
tree.stereo_sampler = None
tree.nv_stereo_reg = Register(args.use_nv_stereo_reg_vs)
elif isinstance(tree, VertexShader) and args.stereo_sampler_vs:
tree.stereo_sampler = args.stereo_sampler_vs
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif isinstance(tree, PixelShader) and args.stereo_sampler_ps:
tree.stereo_sampler = args.stereo_sampler_ps
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif 's' in tree.reg_types and tree.def_stereo_sampler in tree.reg_types['s']:
tree.stereo_sampler = tree._find_free_reg('s', None)
debug('WARNING: SHADER ALREADY USES %s! USING %s FOR STEREO SAMPLER INSTEAD!' % (tree.def_stereo_sampler, tree.stereo_sampler))
if isinstance(tree, VertexShader):
acronym = 'VS'
quirk = 257
elif isinstance(tree, PixelShader):
acronym = 'PS'
quirk = 0
else:
raise AssertionError()
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('Def%sSampler' % acronym, str(quirk + tree.stereo_sampler.num), 'Shader already uses %s, so use %s instead:' % (tree.def_stereo_sampler, tree.stereo_sampler)))
else:
tree.stereo_sampler = tree.def_stereo_sampler
if args.adjust_multiply and args.adjust_multiply != -1:
w = args.adjust_multiply
tree.stereo_const = tree._find_free_reg('c', None, desired=preferred_stereo_const)
offset = 0
offset += tree.insert_decl()
offset += tree.insert_decl('def', [tree.stereo_const, x, y, z, w])
if tree.stereo_sampler is not None:
offset += tree.insert_decl('dcl_2d', [tree.stereo_sampler])
offset += tree.insert_decl()
(stereo_const, _) = (tree.stereo_const, offset)
tmp_reg = tree._find_free_reg('r', VS3, desired=31)
success = False
for reg in args.adjust:
try:
pos_reg = tree._find_free_reg('r', VS3)
if reg.startswith('dcl_texcoord'):
dst_reg = find_declaration(tree, reg, 'o').reg
elif reg.startswith('texcoord') or reg == 'position':
dst_reg = find_declaration(tree, 'dcl_%s' % reg, 'o').reg
else:
dst_reg = reg
replace_regs = {dst_reg: pos_reg}
tree.do_replacements(replace_regs, False)
append_vanity_comment(args, tree, 'Output adjustment inserted with')
if args.condition:
tree.add_inst('mov', [tmp_reg.x, args.condition])
tree.add_inst('if_eq', [tmp_reg.x, stereo_const.x])
tree.add_inst('texldl', [tmp_reg, stereo_const.z, tree.stereo_sampler])
separation = tmp_reg.x
convergence = tmp_reg.y
tree.add_inst('add', [tmp_reg.w, pos_reg.w, -convergence])
if not args.adjust_multiply:
tree.add_inst('mad', [pos_reg.x, tmp_reg.w, separation, pos_reg.x])
else:
tree.add_inst('mul', [tmp_reg.w, tmp_reg.w, separation])
if args.adjust_multiply and args.adjust_multiply != -1:
tree.add_inst('mul', [tmp_reg.w, tmp_reg.w, stereo_const.w])
if args.adjust_multiply and args.adjust_multiply == -1:
tree.add_inst('add', [pos_reg.x, pos_reg.x, -tmp_reg.w])
else:
tree.add_inst('add', [pos_reg.x, pos_reg.x, tmp_reg.w])
if args.condition:
tree.add_inst('endif', [])
tree.add_inst('mov', [dst_reg, pos_reg])
success = True
except Exception as e:
if args.ignore_other_errors:
collected_errors.append((tree.filename, e))
import traceback, time
traceback.print_exc()
last_exc = e
continue
raise
if not success and last_exc is not None:
raise ExceptionDontReport()
|
3d-fixes
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.