before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def get_activations(images, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
<DeepExtract>
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for (op_idx, op) in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims is not None:
shape = [s.value for s in shape]
new_shape = []
for (j, s) in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
try:
o._shape = tf.TensorShape(new_shape)
except ValueError:
o._shape_val = tf.TensorShape(new_shape)
inception_layer = pool3
</DeepExtract>
d0 = images.shape[0]
if batch_size > d0:
print('warning: batch size is bigger than the data size. setting batch size to data size')
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches), end='', flush=True)
start = i * batch_size
end = start + batch_size
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(' done')
return pred_arr
|
def get_activations(images, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for (op_idx, op) in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims is not None:
shape = [s.value for s in shape]
new_shape = []
for (j, s) in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
try:
o._shape = tf.TensorShape(new_shape)
except ValueError:
o._shape_val = tf.TensorShape(new_shape)
inception_layer = pool3
d0 = images.shape[0]
if batch_size > d0:
print('warning: batch size is bigger than the data size. setting batch size to data size')
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches), end='', flush=True)
start = i * batch_size
end = start + batch_size
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(' done')
return pred_arr
|
BeautifyBasedOnGAN
|
positive
|
def decode(fs) -> int:
"""Reads in attributes from a file stream.
This updates the attributes value of the object
Args:
fs (io.BytesIO): file stream to read from
!!! Examples
```python
with open (my_data_file, 'rb') as f:
attributes = EfiVariableAttributes()
attributes.decode(f)
```
Returns:
Attributes in integer form
"""
attributes = struct.unpack(EfiVariableAttributes._struct_format, fs.read(EfiVariableAttributes._struct_size))[0]
<DeepExtract>
if isinstance(attributes, int):
self.attributes = attributes
elif isinstance(attributes, str):
self.attributes = EfiVariableAttributes.parse_attributes_str(attributes)
else:
raise TypeError(f'Invalid type: {type(attributes)}')
</DeepExtract>
return attributes
|
def decode(fs) -> int:
"""Reads in attributes from a file stream.
This updates the attributes value of the object
Args:
fs (io.BytesIO): file stream to read from
!!! Examples
```python
with open (my_data_file, 'rb') as f:
attributes = EfiVariableAttributes()
attributes.decode(f)
```
Returns:
Attributes in integer form
"""
attributes = struct.unpack(EfiVariableAttributes._struct_format, fs.read(EfiVariableAttributes._struct_size))[0]
if isinstance(attributes, int):
self.attributes = attributes
elif isinstance(attributes, str):
self.attributes = EfiVariableAttributes.parse_attributes_str(attributes)
else:
raise TypeError(f'Invalid type: {type(attributes)}')
return attributes
|
edk2-pytool-library
|
positive
|
def get_all_gray_label(angle_range):
<DeepExtract>
if mode in [0, 1, 3]:
coding_len = math.ceil(math.log(angle_range, 2))
elif mode == 2:
coding_len = math.floor(math.log(angle_range, 2) + 1) * 2
else:
raise Exception('Only support binary, gray and dichotomy coded label')
</DeepExtract>
return np.array(get_grace(['0', '1'], 1, coding_len))
|
def get_all_gray_label(angle_range):
if mode in [0, 1, 3]:
coding_len = math.ceil(math.log(angle_range, 2))
elif mode == 2:
coding_len = math.floor(math.log(angle_range, 2) + 1) * 2
else:
raise Exception('Only support binary, gray and dichotomy coded label')
return np.array(get_grace(['0', '1'], 1, coding_len))
|
DCL_RetinaNet_Tensorflow
|
positive
|
def test_config_check_publish_root(self):
<DeepExtract>
self.config['confluence_publish'] = True
self.config['confluence_server_url'] = 'https://intranet-wiki.example.com/'
self.config['confluence_space_key'] = 'DUMMY'
</DeepExtract>
self.config['confluence_publish_root'] = 123456
<DeepExtract>
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
</DeepExtract>
self.config['confluence_publish_root'] = '123456'
<DeepExtract>
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
</DeepExtract>
self.config['confluence_publish_root'] = 0
with self.assertRaises(ConfluenceConfigurationError):
<DeepExtract>
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
</DeepExtract>
self.config['confluence_publish_root'] = -123456
with self.assertRaises(ConfluenceConfigurationError):
<DeepExtract>
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
</DeepExtract>
|
def test_config_check_publish_root(self):
self.config['confluence_publish'] = True
self.config['confluence_server_url'] = 'https://intranet-wiki.example.com/'
self.config['confluence_space_key'] = 'DUMMY'
self.config['confluence_publish_root'] = 123456
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
self.config['confluence_publish_root'] = '123456'
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
self.config['confluence_publish_root'] = 0
with self.assertRaises(ConfluenceConfigurationError):
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
self.config['confluence_publish_root'] = -123456
with self.assertRaises(ConfluenceConfigurationError):
config = config if config else self.minimal_config
dataset = dataset if dataset else self.dataset
with prepare_sphinx(dataset, config=config, extra_config=edefs) as app:
env = BuildEnvironment(app)
builder = ConfluenceBuilder(app, env)
class MockedPublisher:
def init(self, config, cloud=None):
pass
def connect(self):
pass
def disconnect(self):
pass
builder.publisher = MockedPublisher()
for (k, v) in self.config.items():
setattr(builder.config, k, v)
builder.init()
</DeepExtract>
|
confluencebuilder
|
positive
|
def testStatusWithNoAppScalefile(self):
appscale = AppScale()
<DeepExtract>
flexmock(os)
os.should_receive('getcwd').and_return('/boo')
mock = flexmock(sys.modules['__builtin__'])
mock.should_call('open')
mock.should_receive('open').with_args('/boo/' + appscale.APPSCALEFILE).and_raise(IOError)
</DeepExtract>
self.assertRaises(AppScalefileException, appscale.status)
|
def testStatusWithNoAppScalefile(self):
appscale = AppScale()
flexmock(os)
os.should_receive('getcwd').and_return('/boo')
mock = flexmock(sys.modules['__builtin__'])
mock.should_call('open')
mock.should_receive('open').with_args('/boo/' + appscale.APPSCALEFILE).and_raise(IOError)
self.assertRaises(AppScalefileException, appscale.status)
|
appscale-tools
|
positive
|
def maybe_download_weights_from_s3(weights_file: str, *, auto_expand_tars: bool=False) -> str:
"""
:param weights_file:
:param auto_expand_tars:
:return:
"""
saved_model_dir = paths.runtime_paths().saved_model_dir
filepath = os.path.join(saved_model_dir, weights_file)
if os.path.isfile(filepath):
log.info(f'Using available {weights_file} in Armory `saved_model_dir`')
else:
log.info(f'{weights_file} not found in Armory `saved_model_dir`. Attempting to pull weights from S3')
try:
<DeepExtract>
verify_ssl = get_verify_ssl()
if not os.path.isfile(f'{saved_model_dir}/{weights_file}'):
client = boto3.client('s3', config=Config(signature_version=UNSIGNED), verify=verify_ssl)
try:
log.info(f"downloading S3 data file {'armory-public-data'}/{f'model-weights/{weights_file}'}")
total = client.head_object(Bucket='armory-public-data', Key=f'model-weights/{weights_file}')['ContentLength']
if is_progress():
with ProgressPercentage(client, 'armory-public-data', f'model-weights/{weights_file}', total) as Callback:
client.download_file('armory-public-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}', Callback=Callback)
else:
client.download_file('armory-public-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}')
except ClientError:
raise KeyError(f"File {f'model-weights/{weights_file}'} not available in {'armory-public-data'} bucket.")
else:
log.info(f"Reusing cached file {f'{saved_model_dir}/{weights_file}'}...")
</DeepExtract>
except KeyError:
if 'ARMORY_INCLUDE_SUBMISSION_BUCKETS' in os.environ and os.getenv('ARMORY_INCLUDE_SUBMISSION_BUCKETS') != '':
try:
<DeepExtract>
verify_ssl = get_verify_ssl()
if not os.path.isfile(f'{saved_model_dir}/{weights_file}'):
client = boto3.client('s3', aws_access_key_id=os.getenv('ARMORY_PRIVATE_S3_ID'), aws_secret_access_key=os.getenv('ARMORY_PRIVATE_S3_KEY'), verify=verify_ssl)
try:
log.info(f"downloading S3 data file {'armory-submission-data'}/{f'model-weights/{weights_file}'}")
total = client.head_object(Bucket='armory-submission-data', Key=f'model-weights/{weights_file}')['ContentLength']
if is_progress():
with ProgressPercentage(client, 'armory-submission-data', f'model-weights/{weights_file}', total) as Callback:
client.download_file('armory-submission-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}', Callback=Callback)
else:
client.download_file('armory-submission-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}')
except ClientError:
raise KeyError(f"File {f'model-weights/{weights_file}'} not available in {'armory-submission-data'} bucket.")
else:
log.info('Reusing cached S3 data file...')
</DeepExtract>
except KeyError:
raise ValueError(f'{weights_file} was not found in the armory public & submission S3 buckets.')
else:
raise ValueError(f"{weights_file} was not found in the armory S3 bucket. If you're attempting to load a custom set of weights for your model be sure that they are available in the armory `saved_model_dir` directory on your host environment.")
if auto_expand_tars:
if tarfile.is_tarfile(filepath):
log.debug(f'Detected model weights file {weights_file} as a tar archive')
with tarfile.open(filepath) as tar:
dirs = [fi.name for fi in tar.getmembers() if fi.isdir()]
commonpath = os.path.commonpath(tar.getnames())
if not commonpath or commonpath not in dirs:
raise PermissionError(f'{weights_file} does not expand into a subdirectory. Weights files submitted as tarballs must expand into a subdirectory.')
full_path = os.path.join(saved_model_dir, commonpath)
if os.path.exists(full_path):
log.warning(f'Model weights folder {commonpath} from {weights_file} already exists')
log.warning(f'Skipping auto-unpacking of {weights_file}')
log.warning(f'Delete {commonpath} manually to force unpacking')
else:
log.info(f'Auto-unpacking model weights from {weights_file}')
tar.extractall(path=saved_model_dir)
filepath = commonpath
return filepath
|
def maybe_download_weights_from_s3(weights_file: str, *, auto_expand_tars: bool=False) -> str:
"""
:param weights_file:
:param auto_expand_tars:
:return:
"""
saved_model_dir = paths.runtime_paths().saved_model_dir
filepath = os.path.join(saved_model_dir, weights_file)
if os.path.isfile(filepath):
log.info(f'Using available {weights_file} in Armory `saved_model_dir`')
else:
log.info(f'{weights_file} not found in Armory `saved_model_dir`. Attempting to pull weights from S3')
try:
verify_ssl = get_verify_ssl()
if not os.path.isfile(f'{saved_model_dir}/{weights_file}'):
client = boto3.client('s3', config=Config(signature_version=UNSIGNED), verify=verify_ssl)
try:
log.info(f"downloading S3 data file {'armory-public-data'}/{f'model-weights/{weights_file}'}")
total = client.head_object(Bucket='armory-public-data', Key=f'model-weights/{weights_file}')['ContentLength']
if is_progress():
with ProgressPercentage(client, 'armory-public-data', f'model-weights/{weights_file}', total) as Callback:
client.download_file('armory-public-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}', Callback=Callback)
else:
client.download_file('armory-public-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}')
except ClientError:
raise KeyError(f"File {f'model-weights/{weights_file}'} not available in {'armory-public-data'} bucket.")
else:
log.info(f"Reusing cached file {f'{saved_model_dir}/{weights_file}'}...")
except KeyError:
if 'ARMORY_INCLUDE_SUBMISSION_BUCKETS' in os.environ and os.getenv('ARMORY_INCLUDE_SUBMISSION_BUCKETS') != '':
try:
verify_ssl = get_verify_ssl()
if not os.path.isfile(f'{saved_model_dir}/{weights_file}'):
client = boto3.client('s3', aws_access_key_id=os.getenv('ARMORY_PRIVATE_S3_ID'), aws_secret_access_key=os.getenv('ARMORY_PRIVATE_S3_KEY'), verify=verify_ssl)
try:
log.info(f"downloading S3 data file {'armory-submission-data'}/{f'model-weights/{weights_file}'}")
total = client.head_object(Bucket='armory-submission-data', Key=f'model-weights/{weights_file}')['ContentLength']
if is_progress():
with ProgressPercentage(client, 'armory-submission-data', f'model-weights/{weights_file}', total) as Callback:
client.download_file('armory-submission-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}', Callback=Callback)
else:
client.download_file('armory-submission-data', f'model-weights/{weights_file}', f'{saved_model_dir}/{weights_file}')
except ClientError:
raise KeyError(f"File {f'model-weights/{weights_file}'} not available in {'armory-submission-data'} bucket.")
else:
log.info('Reusing cached S3 data file...')
except KeyError:
raise ValueError(f'{weights_file} was not found in the armory public & submission S3 buckets.')
else:
raise ValueError(f"{weights_file} was not found in the armory S3 bucket. If you're attempting to load a custom set of weights for your model be sure that they are available in the armory `saved_model_dir` directory on your host environment.")
if auto_expand_tars:
if tarfile.is_tarfile(filepath):
log.debug(f'Detected model weights file {weights_file} as a tar archive')
with tarfile.open(filepath) as tar:
dirs = [fi.name for fi in tar.getmembers() if fi.isdir()]
commonpath = os.path.commonpath(tar.getnames())
if not commonpath or commonpath not in dirs:
raise PermissionError(f'{weights_file} does not expand into a subdirectory. Weights files submitted as tarballs must expand into a subdirectory.')
full_path = os.path.join(saved_model_dir, commonpath)
if os.path.exists(full_path):
log.warning(f'Model weights folder {commonpath} from {weights_file} already exists')
log.warning(f'Skipping auto-unpacking of {weights_file}')
log.warning(f'Delete {commonpath} manually to force unpacking')
else:
log.info(f'Auto-unpacking model weights from {weights_file}')
tar.extractall(path=saved_model_dir)
filepath = commonpath
return filepath
|
armory
|
positive
|
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
<DeepExtract>
output_tensor = tf.contrib.layers.layer_norm(inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
</DeepExtract>
<DeepExtract>
if dropout_prob is None or dropout_prob == 0.0:
output_tensor = output_tensor
output = tf.nn.dropout(output_tensor, 1.0 - dropout_prob)
output_tensor = output
</DeepExtract>
return output_tensor
|
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = tf.contrib.layers.layer_norm(inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
if dropout_prob is None or dropout_prob == 0.0:
output_tensor = output_tensor
output = tf.nn.dropout(output_tensor, 1.0 - dropout_prob)
output_tensor = output
return output_tensor
|
ChineseEHRBert
|
positive
|
def test_get_tokens_on_inactive_app(self):
anna = self._create_user(self.test_username, '123456')
capability_a = self._create_capability('token_management', [['GET', '/v1/o/tokens/']], default=False)
application = self._create_application('an app', grant_type=Application.GRANT_AUTHORIZATION_CODE, redirect_uris='http://example.it')
application.scope.add(capability_a)
application.active = False
application.save()
with self.assertRaises(Exception) as e:
<DeepExtract>
self.client.force_login(anna)
payload = {'client_id': application.client_id, 'response_type': 'code', 'redirect_uri': application.redirect_uris, 'scope': application.scopes().split(' '), 'expires_in': 86400, 'allow': True}
if application.authorization_grant_type == Application.GRANT_IMPLICIT:
payload['response_type'] = 'token'
response = self.client.post('/v1/o/authorize/', data=payload)
self.client.logout()
if response.status_code != 302:
raise Exception(response.context_data)
self.assertEqual(response.status_code, 302)
if application.authorization_grant_type == Application.GRANT_IMPLICIT:
fragment = parse_qs(urlparse(response['Location']).fragment)
tkn = fragment.pop('access_token')[0]
else:
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': application.redirect_uris, 'client_id': application.client_id, 'client_secret': application.client_secret}
response = self.client.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
tkn = response.json()['access_token']
t = AccessToken.objects.get(token=tkn)
return t
</DeepExtract>
msg_expected = 'invalid_client'
err_msg = str(e.exception)
found = True
index = -1
try:
index = err_msg.index(msg_expected)
except ValueError:
found = False
self.assertTrue(index >= 0)
self.assertTrue(found)
application.active = True
application.save()
|
def test_get_tokens_on_inactive_app(self):
anna = self._create_user(self.test_username, '123456')
capability_a = self._create_capability('token_management', [['GET', '/v1/o/tokens/']], default=False)
application = self._create_application('an app', grant_type=Application.GRANT_AUTHORIZATION_CODE, redirect_uris='http://example.it')
application.scope.add(capability_a)
application.active = False
application.save()
with self.assertRaises(Exception) as e:
self.client.force_login(anna)
payload = {'client_id': application.client_id, 'response_type': 'code', 'redirect_uri': application.redirect_uris, 'scope': application.scopes().split(' '), 'expires_in': 86400, 'allow': True}
if application.authorization_grant_type == Application.GRANT_IMPLICIT:
payload['response_type'] = 'token'
response = self.client.post('/v1/o/authorize/', data=payload)
self.client.logout()
if response.status_code != 302:
raise Exception(response.context_data)
self.assertEqual(response.status_code, 302)
if application.authorization_grant_type == Application.GRANT_IMPLICIT:
fragment = parse_qs(urlparse(response['Location']).fragment)
tkn = fragment.pop('access_token')[0]
else:
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': application.redirect_uris, 'client_id': application.client_id, 'client_secret': application.client_secret}
response = self.client.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
tkn = response.json()['access_token']
t = AccessToken.objects.get(token=tkn)
return t
msg_expected = 'invalid_client'
err_msg = str(e.exception)
found = True
index = -1
try:
index = err_msg.index(msg_expected)
except ValueError:
found = False
self.assertTrue(index >= 0)
self.assertTrue(found)
application.active = True
application.save()
|
bluebutton-web-server
|
positive
|
def __init__(self, total=None, connect=_Default, read=_Default):
<DeepExtract>
if 'connect' is _Default:
self._connect = connect.DEFAULT_TIMEOUT
if 'connect' is None or 'connect' is connect.DEFAULT_TIMEOUT:
self._connect = 'connect'
try:
float('connect')
except (TypeError, ValueError):
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'connect'))
try:
if 'connect' < 0:
raise ValueError('Attempted to set %s timeout to %s, but the timeout cannot be set to a value less than 0.' % (name, 'connect'))
except TypeError:
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'connect'))
self._connect = 'connect'
</DeepExtract>
<DeepExtract>
if 'read' is _Default:
self._read = read.DEFAULT_TIMEOUT
if 'read' is None or 'read' is read.DEFAULT_TIMEOUT:
self._read = 'read'
try:
float('read')
except (TypeError, ValueError):
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'read'))
try:
if 'read' < 0:
raise ValueError('Attempted to set %s timeout to %s, but the timeout cannot be set to a value less than 0.' % (name, 'read'))
except TypeError:
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'read'))
self._read = 'read'
</DeepExtract>
<DeepExtract>
if 'total' is _Default:
self.total = total.DEFAULT_TIMEOUT
if 'total' is None or 'total' is total.DEFAULT_TIMEOUT:
self.total = 'total'
try:
float('total')
except (TypeError, ValueError):
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'total'))
try:
if 'total' < 0:
raise ValueError('Attempted to set %s timeout to %s, but the timeout cannot be set to a value less than 0.' % (name, 'total'))
except TypeError:
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'total'))
self.total = 'total'
</DeepExtract>
self._start_connect = None
|
def __init__(self, total=None, connect=_Default, read=_Default):
if 'connect' is _Default:
self._connect = connect.DEFAULT_TIMEOUT
if 'connect' is None or 'connect' is connect.DEFAULT_TIMEOUT:
self._connect = 'connect'
try:
float('connect')
except (TypeError, ValueError):
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'connect'))
try:
if 'connect' < 0:
raise ValueError('Attempted to set %s timeout to %s, but the timeout cannot be set to a value less than 0.' % (name, 'connect'))
except TypeError:
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'connect'))
self._connect = 'connect'
if 'read' is _Default:
self._read = read.DEFAULT_TIMEOUT
if 'read' is None or 'read' is read.DEFAULT_TIMEOUT:
self._read = 'read'
try:
float('read')
except (TypeError, ValueError):
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'read'))
try:
if 'read' < 0:
raise ValueError('Attempted to set %s timeout to %s, but the timeout cannot be set to a value less than 0.' % (name, 'read'))
except TypeError:
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'read'))
self._read = 'read'
if 'total' is _Default:
self.total = total.DEFAULT_TIMEOUT
if 'total' is None or 'total' is total.DEFAULT_TIMEOUT:
self.total = 'total'
try:
float('total')
except (TypeError, ValueError):
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'total'))
try:
if 'total' < 0:
raise ValueError('Attempted to set %s timeout to %s, but the timeout cannot be set to a value less than 0.' % (name, 'total'))
except TypeError:
raise ValueError('Timeout value %s was %s, but it must be an int or float.' % (name, 'total'))
self.total = 'total'
self._start_connect = None
|
cachewarmer
|
positive
|
def __init__(self, w, b, noise=0.01, num_train=1000, num_val=1000, batch_size=32):
super().__init__()
<DeepExtract>
raise NotImplemented
</DeepExtract>
n = num_train + num_val
key = jax.random.PRNGKey(0)
(key1, key2) = jax.random.split(key)
self.X = jax.random.normal(key1, (n, w.shape[0]))
noise = jax.random.normal(key2, (n, 1)) * noise
self.y = d2l.matmul(self.X, d2l.reshape(w, (-1, 1))) + b + noise
|
def __init__(self, w, b, noise=0.01, num_train=1000, num_val=1000, batch_size=32):
super().__init__()
raise NotImplemented
n = num_train + num_val
key = jax.random.PRNGKey(0)
(key1, key2) = jax.random.split(key)
self.X = jax.random.normal(key1, (n, w.shape[0]))
noise = jax.random.normal(key2, (n, 1)) * noise
self.y = d2l.matmul(self.X, d2l.reshape(w, (-1, 1))) + b + noise
|
d2l-en
|
positive
|
def forward(self, input, target, batch_mean=True):
"""
Args:
input (batch_size, n_sources, *)
target (batch_size, n_sources, *)
Returns:
loss (batch_size,): minimum loss for each data
pattern (batch_size,): permutation indices
"""
<DeepExtract>
if self.patterns is None:
if n_sources is None:
n_sources = input.size(1)
self.patterns = list(itertools.permutations(range(n_sources)))
self.patterns = torch.Tensor(self.patterns).long()
P = len(self.patterns)
possible_loss = []
for idx in range(P):
pattern = self.patterns[idx]
loss = self.criterion(input, target[:, pattern], batch_mean=False)
possible_loss.append(loss)
possible_loss = torch.stack(possible_loss, dim=1)
if hasattr(self.criterion, 'maximize') and self.criterion.maximize:
(loss, indices) = torch.max(possible_loss, dim=1)
else:
(loss, indices) = torch.min(possible_loss, dim=1)
if batch_mean:
loss = loss.mean(dim=0)
(loss, pattern) = (loss, self.patterns[indices])
</DeepExtract>
return (loss, pattern)
|
def forward(self, input, target, batch_mean=True):
"""
Args:
input (batch_size, n_sources, *)
target (batch_size, n_sources, *)
Returns:
loss (batch_size,): minimum loss for each data
pattern (batch_size,): permutation indices
"""
if self.patterns is None:
if n_sources is None:
n_sources = input.size(1)
self.patterns = list(itertools.permutations(range(n_sources)))
self.patterns = torch.Tensor(self.patterns).long()
P = len(self.patterns)
possible_loss = []
for idx in range(P):
pattern = self.patterns[idx]
loss = self.criterion(input, target[:, pattern], batch_mean=False)
possible_loss.append(loss)
possible_loss = torch.stack(possible_loss, dim=1)
if hasattr(self.criterion, 'maximize') and self.criterion.maximize:
(loss, indices) = torch.max(possible_loss, dim=1)
else:
(loss, indices) = torch.min(possible_loss, dim=1)
if batch_mean:
loss = loss.mean(dim=0)
(loss, pattern) = (loss, self.patterns[indices])
return (loss, pattern)
|
DNN-based_source_separation
|
positive
|
def eval_step(self):
self.data.next()
B = self.inputs('B')
<DeepExtract>
net = self.nets.net
output = net(B)
self.losses.net = output.sum()
self.results.output = output.sum().item()
</DeepExtract>
self.submodel.eval_step()
|
def eval_step(self):
self.data.next()
B = self.inputs('B')
net = self.nets.net
output = net(B)
self.losses.net = output.sum()
self.results.output = output.sum().item()
self.submodel.eval_step()
|
cortex
|
positive
|
@patch('backend.lambdas.tasks.generate_queries.s3.Bucket')
@patch('backend.lambdas.tasks.generate_queries.get_table')
@patch('backend.lambdas.tasks.generate_queries.get_partitions')
def test_it_removes_queries_with_no_applicable_matches_for_partitioned_data(self, get_partitions_mock, get_table_mock, bucket_mock):
put_object_mock = MagicMock()
bucket_mock.return_value = put_object_mock
columns = [{'Name': 'customer_id'}]
partition_keys = ['product_category']
partitions = [['Books'], ['Beauty']]
<DeepExtract>
get_table_mock.return_value = {'Name': table_name, 'DatabaseName': 'test', 'Owner': 'test', 'CreateTime': 1572438253.0, 'UpdateTime': 1572438253.0, 'LastAccessTime': 0.0, 'Retention': 0, 'StorageDescriptor': {'Columns': [{'Name': col['Name'], 'Type': col.get('Type', 'string')} for col in columns], 'Location': 's3://bucket/location', 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', 'Compressed': False, 'NumberOfBuckets': -1, 'SerdeInfo': {'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe', 'Parameters': {'serialization.format': '1'}}, 'BucketColumns': [], 'SortColumns': [], 'Parameters': {}, 'SkewedInfo': {'SkewedColumnNames': [], 'SkewedColumnValues': [], 'SkewedColumnValueLocationMaps': {}}, 'StoredAsSubDirectories': False}, 'PartitionKeys': [{'Name': partition_key, 'Type': partition_keys_type} for partition_key in partition_keys], 'TableType': 'EXTERNAL_TABLE', 'Parameters': {'EXTERNAL': 'TRUE'}}
</DeepExtract>
get_partitions_mock.return_value = [partition_stub(p, columns) for p in partitions]
resp = generate_athena_queries({'DataMapperId': 'A', 'QueryExecutor': 'athena', 'Columns': [col['Name'] for col in columns], 'Format': 'parquet', 'QueryExecutorParameters': {'DataCatalogProvider': 'glue', 'Database': 'test_db', 'Table': 'test_table'}}, [{'MatchId': '123', 'DataMappers': ['C'], 'DeletionQueueItemId': 'id1234'}], 'job_1234567890')
assert resp == []
assert not put_object_mock.put_object.called
|
@patch('backend.lambdas.tasks.generate_queries.s3.Bucket')
@patch('backend.lambdas.tasks.generate_queries.get_table')
@patch('backend.lambdas.tasks.generate_queries.get_partitions')
def test_it_removes_queries_with_no_applicable_matches_for_partitioned_data(self, get_partitions_mock, get_table_mock, bucket_mock):
put_object_mock = MagicMock()
bucket_mock.return_value = put_object_mock
columns = [{'Name': 'customer_id'}]
partition_keys = ['product_category']
partitions = [['Books'], ['Beauty']]
get_table_mock.return_value = {'Name': table_name, 'DatabaseName': 'test', 'Owner': 'test', 'CreateTime': 1572438253.0, 'UpdateTime': 1572438253.0, 'LastAccessTime': 0.0, 'Retention': 0, 'StorageDescriptor': {'Columns': [{'Name': col['Name'], 'Type': col.get('Type', 'string')} for col in columns], 'Location': 's3://bucket/location', 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', 'Compressed': False, 'NumberOfBuckets': -1, 'SerdeInfo': {'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe', 'Parameters': {'serialization.format': '1'}}, 'BucketColumns': [], 'SortColumns': [], 'Parameters': {}, 'SkewedInfo': {'SkewedColumnNames': [], 'SkewedColumnValues': [], 'SkewedColumnValueLocationMaps': {}}, 'StoredAsSubDirectories': False}, 'PartitionKeys': [{'Name': partition_key, 'Type': partition_keys_type} for partition_key in partition_keys], 'TableType': 'EXTERNAL_TABLE', 'Parameters': {'EXTERNAL': 'TRUE'}}
get_partitions_mock.return_value = [partition_stub(p, columns) for p in partitions]
resp = generate_athena_queries({'DataMapperId': 'A', 'QueryExecutor': 'athena', 'Columns': [col['Name'] for col in columns], 'Format': 'parquet', 'QueryExecutorParameters': {'DataCatalogProvider': 'glue', 'Database': 'test_db', 'Table': 'test_table'}}, [{'MatchId': '123', 'DataMappers': ['C'], 'DeletionQueueItemId': 'id1234'}], 'job_1234567890')
assert resp == []
assert not put_object_mock.put_object.called
|
amazon-s3-find-and-forget
|
positive
|
def do_request(self, req):
if DEBUG:
print('requesting', req.get_method(), req.get_full_url())
<DeepExtract>
opener = urllib_build_opener()
</DeepExtract>
opener.add_handler(self._cookie_processor)
try:
self._response = opener.open(req)
except HTTPError as e:
self._response = e
self.url = self._response.geturl()
self.path = Request(self.url).selector
self.data = self._response.read()
self.status = self._response.code
self._forms = None
self.form = None
return self.get_response()
|
def do_request(self, req):
if DEBUG:
print('requesting', req.get_method(), req.get_full_url())
opener = urllib_build_opener()
opener.add_handler(self._cookie_processor)
try:
self._response = opener.open(req)
except HTTPError as e:
self._response = e
self.url = self._response.geturl()
self.path = Request(self.url).selector
self.data = self._response.read()
self.status = self._response.code
self._forms = None
self.form = None
return self.get_response()
|
cosa-nostra
|
positive
|
def reset(self):
obs = self.wrapped_env.reset()
goal = self.sample_goal()
<DeepExtract>
self.desired_goal = goal
if self._goal_sampling_mode in {'presampled', 'env'}:
self.wrapped_env.set_goal(goal)
</DeepExtract>
self._initial_obs = obs
return self._update_obs(obs)
|
def reset(self):
obs = self.wrapped_env.reset()
goal = self.sample_goal()
self.desired_goal = goal
if self._goal_sampling_mode in {'presampled', 'env'}:
self.wrapped_env.set_goal(goal)
self._initial_obs = obs
return self._update_obs(obs)
|
CQL
|
positive
|
def grid_patient(pid, grid_size_in_minutes, window_size_in_minutes, reduced, chunkfile=None):
if chunkfile is None:
idx = df_chunks.loc[df_chunks.PatientID == pid, 'ChunkfileIndex'].values[0]
pids = df_chunks.loc[df_chunks.ChunkfileIndex == idx, 'PatientID']
pids = pd.DataFrame(pids)
idx_start = min(pids['PatientID'])
idx_stop = max(pids['PatientID'])
if reduced:
chunkfile = in_dir + 'reduced_fmat_' + str(idx) + '_' + str(idx_start) + '--' + str(idx_stop) + '.h5'
else:
chunkfile = in_dir + 'fmat_' + str(idx) + '_' + str(idx_start) + '--' + str(idx_stop) + '.h5'
print('\tprocessing patient ' + str(int(pid)))
p_df = pd.read_hdf(chunkfile, where='PatientID == ' + str(pid), columns=lactate_IDs + drug_IDs + ['Datetime'] + HR_ID + MAP_ID + weight_ID)
if p_df.empty:
print('WARNING: patient has no data')
bad_patients.write(str(int(pid)) + ',no_data\n')
return None
elif p_df.drop(['Datetime'], axis=1).isnull().mean().mean() == 1:
print('WARNING: patient has no data in these variables')
bad_patients.write(str(int(pid)) + ',no_endpoint_variables\n')
return None
p_df.set_index('Datetime', inplace=True)
p_df.sort_index(inplace=True)
<DeepExtract>
p_df['weight'] = p_df[weight_ID].fillna(method='ffill').fillna(method='bfill')
if p_df['weight'].isnull().sum() > 0:
print('Weight is missing on patient', pid, ' - imputing from height if possible')
typical_weight_dict = np.load(paths.misc_dir + 'typical_weight_dict.npy').item()
bmi_dict = np.load(paths.misc_dir + 'median_bmi_dict.npy').item()
if mimic:
static_info = pd.read_hdf(paths.merged_dir + in_version + '/static.h5', where='PatientID == ' + str(pid), columns=['Sex', 'Height'])
else:
static_info = pd.read_hdf(paths.clean_dir + in_version + '/static.h5', where='PatientID == ' + str(pid), columns=['Sex', 'Height'])
height = static_info['Height'].iloc[0]
patient_sex = static_info['Sex'].iloc[0]
try:
if np.isnan(height):
print('Missing height, imputing weight from standard measurement')
p_df['weight'] = np.mean([x for x in typical_weight_dict.values()])
else:
print('Height is not missing, imputing weight from typical BMI')
if patient_sex == 'M':
BMI = bmi_dict['male']
elif patient_sex == 'F':
BMI = bmi_dict['female']
elif patient_sex == 'U':
BMI = bmi_dict['unknown']
weight = BMI * (height / 100) ** 2
p_df['weight'] = weight
except:
ipdb.set_trace()
try:
assert p_df['weight'].isnull().sum() == 0
except AssertionError:
ipdb.set_trace()
p_df = p_df
</DeepExtract>
<DeepExtract>
lactate = p_df.loc[:, lactate_IDs].mean(axis=1)
p_df['lactate'] = lactate
p_df.drop(lactate_IDs, inplace=True, axis=1)
p_df = p_df
</DeepExtract>
<DeepExtract>
p_df['dobutamine'] = p_df.loc[:, dobutamine_ID].fillna(method='ffill')
if mimic:
p_df['milrinone'] = 0
p_df['levosimendan'] = 0
p_df['theophyllin'] = 0
p_df['dopamine'] = p_df.loc[:, dopamine_ID].fillna(method='ffill')
p_df['phenylephrin'] = p_df.loc[:, phenylephrin_ID].fillna(method='ffill')
else:
p_df['milrinone'] = p_df.loc[:, milrinone_ID].fillna(method='ffill')
p_df['levosimendan'] = p_df.loc[:, levosimendan_ID].fillna(method='ffill')
p_df['theophyllin'] = p_df.loc[:, theophyllin_IDs].fillna(method='ffill').sum(axis=1)
p_df['dopamine'] = 0
p_df['phenylephrin'] = 0
p_df['epinephrine'] = p_df.loc[:, epinephrine_IDs].fillna(method='ffill').sum(axis=1)
p_df['norepinephrine'] = p_df.loc[:, norepinephrine_IDs].fillna(method='ffill').sum(axis=1)
p_df['vasopressin'] = p_df.loc[:, vasopressin_IDs].fillna(method='ffill').sum(axis=1)
p_df = p_df
</DeepExtract>
if p_df[HR_ID].dropna().empty:
print('WARNING: patient has no HR')
bad_patients.write(str(int(pid)) + ',no_HR\n')
return None
first_HR = p_df[HR_ID].dropna().index[0]
last_HR = p_df[HR_ID].dropna().index[-1]
p_df = p_df.loc[first_HR:last_HR, :]
p_df = p_df.resample(str(grid_size_in_minutes) + 'T').median()
<DeepExtract>
for drug in ['dobutamine', 'milrinone', 'levosimendan', 'theophyllin', 'dopamine', 'phenylephrin', 'epinephrine', 'norepinephrine', 'vasopressin']:
p_df[drug] = p_df[drug].fillna(method='ffill')
return True
</DeepExtract>
<DeepExtract>
p_df['lactate_above_threshold'] = p_df['lactate'] >= 2
p_df.loc[p_df['lactate'].isnull(), 'lactate_above_threshold'] = np.nan
p_df['MAP_below_threshold'] = p_df[MAP_ID] <= 65
p_df.loc[p_df[MAP_ID[0]].isnull(), 'MAP_below_threshold'] = np.nan
p_df['level1_drugs_present'] = (p_df.loc[:, ['dobutamine', 'milrinone', 'levosimendan', 'theophyllin', 'dopamine', 'phenylephrin']] > 0).any(axis=1)
p_df['level2_drugs_present'] = (p_df.loc[:, 'epinephrine'] > 0) & (p_df.loc[:, 'epinephrine'] < 0.1 * p_df['weight']) | (p_df['norepinephrine'] > 0) & (p_df['norepinephrine'] < 0.1 * p_df['weight'])
p_df['level3_drugs_present'] = (p_df['norepinephrine'] >= 0.1 * p_df['weight']) | (p_df['epinephrine'] >= 0.1 * p_df['weight']) | (p_df['vasopressin'] > 0)
return True
</DeepExtract>
lactate_test = p_df['lactate_above_threshold'].copy()
interpolate_patient_lactate(p_df, pid, grid_size_in_minutes)
find_endpoints(p_df, MAP_ID, window_size_in_minutes=window_size_in_minutes, grid_size_in_minutes=grid_size_in_minutes)
p_df['PatientID'] = pid
assert p_df['lactate_above_threshold'].equals(lactate_test)
return p_df
|
def grid_patient(pid, grid_size_in_minutes, window_size_in_minutes, reduced, chunkfile=None):
if chunkfile is None:
idx = df_chunks.loc[df_chunks.PatientID == pid, 'ChunkfileIndex'].values[0]
pids = df_chunks.loc[df_chunks.ChunkfileIndex == idx, 'PatientID']
pids = pd.DataFrame(pids)
idx_start = min(pids['PatientID'])
idx_stop = max(pids['PatientID'])
if reduced:
chunkfile = in_dir + 'reduced_fmat_' + str(idx) + '_' + str(idx_start) + '--' + str(idx_stop) + '.h5'
else:
chunkfile = in_dir + 'fmat_' + str(idx) + '_' + str(idx_start) + '--' + str(idx_stop) + '.h5'
print('\tprocessing patient ' + str(int(pid)))
p_df = pd.read_hdf(chunkfile, where='PatientID == ' + str(pid), columns=lactate_IDs + drug_IDs + ['Datetime'] + HR_ID + MAP_ID + weight_ID)
if p_df.empty:
print('WARNING: patient has no data')
bad_patients.write(str(int(pid)) + ',no_data\n')
return None
elif p_df.drop(['Datetime'], axis=1).isnull().mean().mean() == 1:
print('WARNING: patient has no data in these variables')
bad_patients.write(str(int(pid)) + ',no_endpoint_variables\n')
return None
p_df.set_index('Datetime', inplace=True)
p_df.sort_index(inplace=True)
p_df['weight'] = p_df[weight_ID].fillna(method='ffill').fillna(method='bfill')
if p_df['weight'].isnull().sum() > 0:
print('Weight is missing on patient', pid, ' - imputing from height if possible')
typical_weight_dict = np.load(paths.misc_dir + 'typical_weight_dict.npy').item()
bmi_dict = np.load(paths.misc_dir + 'median_bmi_dict.npy').item()
if mimic:
static_info = pd.read_hdf(paths.merged_dir + in_version + '/static.h5', where='PatientID == ' + str(pid), columns=['Sex', 'Height'])
else:
static_info = pd.read_hdf(paths.clean_dir + in_version + '/static.h5', where='PatientID == ' + str(pid), columns=['Sex', 'Height'])
height = static_info['Height'].iloc[0]
patient_sex = static_info['Sex'].iloc[0]
try:
if np.isnan(height):
print('Missing height, imputing weight from standard measurement')
p_df['weight'] = np.mean([x for x in typical_weight_dict.values()])
else:
print('Height is not missing, imputing weight from typical BMI')
if patient_sex == 'M':
BMI = bmi_dict['male']
elif patient_sex == 'F':
BMI = bmi_dict['female']
elif patient_sex == 'U':
BMI = bmi_dict['unknown']
weight = BMI * (height / 100) ** 2
p_df['weight'] = weight
except:
ipdb.set_trace()
try:
assert p_df['weight'].isnull().sum() == 0
except AssertionError:
ipdb.set_trace()
p_df = p_df
lactate = p_df.loc[:, lactate_IDs].mean(axis=1)
p_df['lactate'] = lactate
p_df.drop(lactate_IDs, inplace=True, axis=1)
p_df = p_df
p_df['dobutamine'] = p_df.loc[:, dobutamine_ID].fillna(method='ffill')
if mimic:
p_df['milrinone'] = 0
p_df['levosimendan'] = 0
p_df['theophyllin'] = 0
p_df['dopamine'] = p_df.loc[:, dopamine_ID].fillna(method='ffill')
p_df['phenylephrin'] = p_df.loc[:, phenylephrin_ID].fillna(method='ffill')
else:
p_df['milrinone'] = p_df.loc[:, milrinone_ID].fillna(method='ffill')
p_df['levosimendan'] = p_df.loc[:, levosimendan_ID].fillna(method='ffill')
p_df['theophyllin'] = p_df.loc[:, theophyllin_IDs].fillna(method='ffill').sum(axis=1)
p_df['dopamine'] = 0
p_df['phenylephrin'] = 0
p_df['epinephrine'] = p_df.loc[:, epinephrine_IDs].fillna(method='ffill').sum(axis=1)
p_df['norepinephrine'] = p_df.loc[:, norepinephrine_IDs].fillna(method='ffill').sum(axis=1)
p_df['vasopressin'] = p_df.loc[:, vasopressin_IDs].fillna(method='ffill').sum(axis=1)
p_df = p_df
if p_df[HR_ID].dropna().empty:
print('WARNING: patient has no HR')
bad_patients.write(str(int(pid)) + ',no_HR\n')
return None
first_HR = p_df[HR_ID].dropna().index[0]
last_HR = p_df[HR_ID].dropna().index[-1]
p_df = p_df.loc[first_HR:last_HR, :]
p_df = p_df.resample(str(grid_size_in_minutes) + 'T').median()
for drug in ['dobutamine', 'milrinone', 'levosimendan', 'theophyllin', 'dopamine', 'phenylephrin', 'epinephrine', 'norepinephrine', 'vasopressin']:
p_df[drug] = p_df[drug].fillna(method='ffill')
return True
p_df['lactate_above_threshold'] = p_df['lactate'] >= 2
p_df.loc[p_df['lactate'].isnull(), 'lactate_above_threshold'] = np.nan
p_df['MAP_below_threshold'] = p_df[MAP_ID] <= 65
p_df.loc[p_df[MAP_ID[0]].isnull(), 'MAP_below_threshold'] = np.nan
p_df['level1_drugs_present'] = (p_df.loc[:, ['dobutamine', 'milrinone', 'levosimendan', 'theophyllin', 'dopamine', 'phenylephrin']] > 0).any(axis=1)
p_df['level2_drugs_present'] = (p_df.loc[:, 'epinephrine'] > 0) & (p_df.loc[:, 'epinephrine'] < 0.1 * p_df['weight']) | (p_df['norepinephrine'] > 0) & (p_df['norepinephrine'] < 0.1 * p_df['weight'])
p_df['level3_drugs_present'] = (p_df['norepinephrine'] >= 0.1 * p_df['weight']) | (p_df['epinephrine'] >= 0.1 * p_df['weight']) | (p_df['vasopressin'] > 0)
return True
lactate_test = p_df['lactate_above_threshold'].copy()
interpolate_patient_lactate(p_df, pid, grid_size_in_minutes)
find_endpoints(p_df, MAP_ID, window_size_in_minutes=window_size_in_minutes, grid_size_in_minutes=grid_size_in_minutes)
p_df['PatientID'] = pid
assert p_df['lactate_above_threshold'].equals(lactate_test)
return p_df
|
circEWS
|
positive
|
def __init__(self, catchall=True, autojson=True):
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
self.resources = ResourceManager()
self.routes = []
self.router = Router()
self.error_handler = {}
self.plugins = []
if self.config['autojson']:
<DeepExtract>
if hasattr(JSONPlugin(), 'setup'):
JSONPlugin().setup(self)
if not callable(JSONPlugin()) and (not hasattr(JSONPlugin(), 'apply')):
raise TypeError('Plugins must be callable or implement .apply()')
self.plugins.append(JSONPlugin())
self.reset()
return JSONPlugin()
</DeepExtract>
<DeepExtract>
if hasattr(TemplatePlugin(), 'setup'):
TemplatePlugin().setup(self)
if not callable(TemplatePlugin()) and (not hasattr(TemplatePlugin(), 'apply')):
raise TypeError('Plugins must be callable or implement .apply()')
self.plugins.append(TemplatePlugin())
self.reset()
return TemplatePlugin()
</DeepExtract>
|
def __init__(self, catchall=True, autojson=True):
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
self.resources = ResourceManager()
self.routes = []
self.router = Router()
self.error_handler = {}
self.plugins = []
if self.config['autojson']:
if hasattr(JSONPlugin(), 'setup'):
JSONPlugin().setup(self)
if not callable(JSONPlugin()) and (not hasattr(JSONPlugin(), 'apply')):
raise TypeError('Plugins must be callable or implement .apply()')
self.plugins.append(JSONPlugin())
self.reset()
return JSONPlugin()
if hasattr(TemplatePlugin(), 'setup'):
TemplatePlugin().setup(self)
if not callable(TemplatePlugin()) and (not hasattr(TemplatePlugin(), 'apply')):
raise TypeError('Plugins must be callable or implement .apply()')
self.plugins.append(TemplatePlugin())
self.reset()
return TemplatePlugin()
</DeepExtract>
|
cmus_app
|
positive
|
def insert_abs(self, r, c, ch):
"""This inserts a character at (r,c). Everything under
and to the right is shifted right one character.
The last character of the line is lost.
"""
<DeepExtract>
if r < 1:
r = 1
if r > self.rows:
r = self.rows
r = r
</DeepExtract>
<DeepExtract>
if c < 1:
c = 1
if c > self.cols:
c = self.cols
c = c
</DeepExtract>
for ci in range(self.cols, c, -1):
<DeepExtract>
r = constrain(r, 1, self.rows)
ci = constrain(ci, 1, self.cols)
self.get_abs(r, ci - 1) = str(self.get_abs(r, ci - 1))[0]
self.w[r - 1][ci - 1] = self.get_abs(r, ci - 1)
</DeepExtract>
<DeepExtract>
r = constrain(r, 1, self.rows)
c = constrain(c, 1, self.cols)
ch = str(ch)[0]
self.w[r - 1][c - 1] = ch
</DeepExtract>
|
def insert_abs(self, r, c, ch):
"""This inserts a character at (r,c). Everything under
and to the right is shifted right one character.
The last character of the line is lost.
"""
if r < 1:
r = 1
if r > self.rows:
r = self.rows
r = r
if c < 1:
c = 1
if c > self.cols:
c = self.cols
c = c
for ci in range(self.cols, c, -1):
r = constrain(r, 1, self.rows)
ci = constrain(ci, 1, self.cols)
self.get_abs(r, ci - 1) = str(self.get_abs(r, ci - 1))[0]
self.w[r - 1][ci - 1] = self.get_abs(r, ci - 1)
r = constrain(r, 1, self.rows)
c = constrain(c, 1, self.cols)
ch = str(ch)[0]
self.w[r - 1][c - 1] = ch
</DeepExtract>
|
camr
|
positive
|
def deleter(self, **kw):
""" Requires the key of the row.
Returns a count of rows deleted (will be either 0 or 1).
Will only work on tables with primary keys.
Does not confirm that it has the key for the row - so can be
dangerous.
Note - it may not delete anything if an insufficient unique
key is provided.
Uses 'generative sql' to build up query.
"""
try:
gener_sql = self._table.delete()
<DeepExtract>
where = None
technique = 'pk'
for column in self._table.c:
if column.name in self._table.primary_key:
if column.name not in kw:
technique = 'uk'
if technique == 'pk':
for column in self._table.c:
if column.name in self._table.primary_key:
where = gener_sql.where(self._table.c[column.name] == kw[column.name])
elif technique == 'uk':
for constraint in self._get_unique_constraints():
print(' constraint: %s' % constraint)
print(' syscat: %s' % self._table.c[constraint])
print(' filter_col: %s' % ','.join(kw))
print(' filter_col[sub]: %s' % kw[constraint])
print('Constraint: %s' % constraint)
print('constraint: %s' % self._table.c[constraint])
print('filter_col: %s' % kw[constraint])
where = gener_sql.where(self._table.c[constraint] == kw[constraint])
if where is None:
if not self._get_unique_constraints():
raise KeyError('no pk provided but table lacks a uk')
else:
raise KeyError('no pk or uk provided')
if where is None:
raise KeyError
gener_sql = where
</DeepExtract>
result = gener_sql.execute()
except KeyError:
return 0
assert result.rowcount in [0, 1]
return result.rowcount
|
def deleter(self, **kw):
""" Requires the key of the row.
Returns a count of rows deleted (will be either 0 or 1).
Will only work on tables with primary keys.
Does not confirm that it has the key for the row - so can be
dangerous.
Note - it may not delete anything if an insufficient unique
key is provided.
Uses 'generative sql' to build up query.
"""
try:
gener_sql = self._table.delete()
where = None
technique = 'pk'
for column in self._table.c:
if column.name in self._table.primary_key:
if column.name not in kw:
technique = 'uk'
if technique == 'pk':
for column in self._table.c:
if column.name in self._table.primary_key:
where = gener_sql.where(self._table.c[column.name] == kw[column.name])
elif technique == 'uk':
for constraint in self._get_unique_constraints():
print(' constraint: %s' % constraint)
print(' syscat: %s' % self._table.c[constraint])
print(' filter_col: %s' % ','.join(kw))
print(' filter_col[sub]: %s' % kw[constraint])
print('Constraint: %s' % constraint)
print('constraint: %s' % self._table.c[constraint])
print('filter_col: %s' % kw[constraint])
where = gener_sql.where(self._table.c[constraint] == kw[constraint])
if where is None:
if not self._get_unique_constraints():
raise KeyError('no pk provided but table lacks a uk')
else:
raise KeyError('no pk or uk provided')
if where is None:
raise KeyError
gener_sql = where
result = gener_sql.execute()
except KeyError:
return 0
assert result.rowcount in [0, 1]
return result.rowcount
|
DataGristle
|
positive
|
def generate_switched_suite(opts, out):
root_opts = defaultdict(list)
opts = list(zip(count(1), opts))
if DO_STATS:
for (rule, opt) in opts:
name = opt[0]
src_root = get_root(opt[4]).getOpName()
out.write('STATISTIC(Rule{0}, "{1}.{0}. {2}");\n'.format(rule, src_root, name))
out.write('Instruction *InstCombiner::runOnInstruction(Instruction *I) {\n')
if SIMPLIFY:
out.write('\n if (Value *V = SimplifyInstruction(I, SQ)) {\n return replaceInstUsesWith(*I, V);\n }\n')
out.write(' switch (I->getOpcode()) {\n default: break;\n')
for opt in opts:
root_opts[get_root(opt[1][4]).getOpName()].append(opt)
for (root, opts) in root_opts.items():
if root not in llvm_opcode:
continue
out.write(' case {0}:\n'.format(llvm_opcode[root]))
for (rule, opt) in opts:
<DeepExtract>
(name, pre, src_bb, tgt_bb, src, tgt, src_used, tgt_used, tgt_skip) = opt
if len(src_bb) != 1 or len(tgt_bb) != 1:
raise AliveError("codegen can't handle multiple basic blocks: " + name)
root = get_root(src)
cg = CodeGenerator()
cg.value_names[root] = 'I'
cg.bind_value(root)
todo = [root]
clauses = []
while todo:
val = todo.pop()
if isinstance(val, Instr):
(exp, new_vals) = match_value(val, cg)
clauses.append(exp)
todo.extend(reversed(new_vals))
val.register_types(cg)
cg.phase = cg.Target
pre.register_types(cg)
for name in cg.named_types:
cg.unify(*cg.named_types[name])
tgt_vals = [v for (k, v) in tgt.items() if not (isinstance(v, Input) or k in tgt_skip)]
for value in tgt_vals:
value.register_types(cg)
root_name = root.getName()
new_root = tgt[root_name]
cg.unify(root, new_root)
clauses.extend(cg.clauses)
for (v, t) in cg.guaranteed.items():
if not cg.bound(v):
continue
clauses.extend(minimal_type_constraints(cg.get_llvm_type(v), cg.required[v], t))
if not isinstance(pre, TruePred):
clauses.append(pre.visit_pre(cg))
if DO_STATS and LIMITER:
clauses.append(CBinExpr('<', CVariable('Rule' + str(rule)), CVariable('10000')))
body = []
if DO_STATS:
body = [CUnaryExpr('++', CVariable('Rule' + str(rule)))]
for value in tgt_vals:
if isinstance(value, Instr) and value != new_root:
body.extend(value.visit_target(cg, True))
if isinstance(new_root, CopyOperand):
body.append(CDefinition.init(cg.PtrInstruction, cg.get_cexp(tgt[root_name]), CFunctionCall('replaceInstUsesWith', CVariable('*I'), cg.get_cexp(new_root.v))))
else:
body.extend(new_root.visit_target(cg, False))
body.append(CReturn(cg.get_cexp(new_root)))
cif = CIf(CBinExpr.reduce('&&', clauses), body).format()
decl_it = CDefinition.block(((t, CVariable(v)) for (v, t) in cg.name_type.items() if v != 'I'))
decl = iter_seq((line + d.format() for d in decl_it))
code = nest(2, seq(line, '{ // ', name, nest(2, seq(decl, line, line, cif)), line, '}'))
out.write(code.format())
</DeepExtract>
out.write('\n break;\n\n')
out.write('\n }\n\n return nullptr;\n}\n')
|
def generate_switched_suite(opts, out):
root_opts = defaultdict(list)
opts = list(zip(count(1), opts))
if DO_STATS:
for (rule, opt) in opts:
name = opt[0]
src_root = get_root(opt[4]).getOpName()
out.write('STATISTIC(Rule{0}, "{1}.{0}. {2}");\n'.format(rule, src_root, name))
out.write('Instruction *InstCombiner::runOnInstruction(Instruction *I) {\n')
if SIMPLIFY:
out.write('\n if (Value *V = SimplifyInstruction(I, SQ)) {\n return replaceInstUsesWith(*I, V);\n }\n')
out.write(' switch (I->getOpcode()) {\n default: break;\n')
for opt in opts:
root_opts[get_root(opt[1][4]).getOpName()].append(opt)
for (root, opts) in root_opts.items():
if root not in llvm_opcode:
continue
out.write(' case {0}:\n'.format(llvm_opcode[root]))
for (rule, opt) in opts:
(name, pre, src_bb, tgt_bb, src, tgt, src_used, tgt_used, tgt_skip) = opt
if len(src_bb) != 1 or len(tgt_bb) != 1:
raise AliveError("codegen can't handle multiple basic blocks: " + name)
root = get_root(src)
cg = CodeGenerator()
cg.value_names[root] = 'I'
cg.bind_value(root)
todo = [root]
clauses = []
while todo:
val = todo.pop()
if isinstance(val, Instr):
(exp, new_vals) = match_value(val, cg)
clauses.append(exp)
todo.extend(reversed(new_vals))
val.register_types(cg)
cg.phase = cg.Target
pre.register_types(cg)
for name in cg.named_types:
cg.unify(*cg.named_types[name])
tgt_vals = [v for (k, v) in tgt.items() if not (isinstance(v, Input) or k in tgt_skip)]
for value in tgt_vals:
value.register_types(cg)
root_name = root.getName()
new_root = tgt[root_name]
cg.unify(root, new_root)
clauses.extend(cg.clauses)
for (v, t) in cg.guaranteed.items():
if not cg.bound(v):
continue
clauses.extend(minimal_type_constraints(cg.get_llvm_type(v), cg.required[v], t))
if not isinstance(pre, TruePred):
clauses.append(pre.visit_pre(cg))
if DO_STATS and LIMITER:
clauses.append(CBinExpr('<', CVariable('Rule' + str(rule)), CVariable('10000')))
body = []
if DO_STATS:
body = [CUnaryExpr('++', CVariable('Rule' + str(rule)))]
for value in tgt_vals:
if isinstance(value, Instr) and value != new_root:
body.extend(value.visit_target(cg, True))
if isinstance(new_root, CopyOperand):
body.append(CDefinition.init(cg.PtrInstruction, cg.get_cexp(tgt[root_name]), CFunctionCall('replaceInstUsesWith', CVariable('*I'), cg.get_cexp(new_root.v))))
else:
body.extend(new_root.visit_target(cg, False))
body.append(CReturn(cg.get_cexp(new_root)))
cif = CIf(CBinExpr.reduce('&&', clauses), body).format()
decl_it = CDefinition.block(((t, CVariable(v)) for (v, t) in cg.name_type.items() if v != 'I'))
decl = iter_seq((line + d.format() for d in decl_it))
code = nest(2, seq(line, '{ // ', name, nest(2, seq(decl, line, line, cif)), line, '}'))
out.write(code.format())
out.write('\n break;\n\n')
out.write('\n }\n\n return nullptr;\n}\n')
|
alive
|
positive
|
@deprecate_kwarg('type', 'plot_type')
def hedgehog_plot(self, params: ArrayLike1D | None=None, horizon: int=10, step: int=10, start: int | DateLike | None=None, plot_type: Literal['volatility', 'mean']='volatility', method: ForecastingMethod='analytic', simulations: int=1000) -> Figure:
"""
Plot forecasts from estimated model
Parameters
----------
params : {ndarray, Series}
Alternative parameters to use. If not provided, the parameters
computed by fitting the model are used. Must be 1-d and identical
in shape to the parameters computed by fitting the model.
horizon : int, optional
Number of steps to forecast
step : int, optional
Non-negative number of forecasts to skip between spines
start : int, datetime or str, optional
An integer, datetime or str indicating the first observation to
produce the forecast for. Datetimes can only be used with pandas
inputs that have a datetime index. Strings must be convertible
to a date time, such as in '1945-01-01'. If not provided, the start
is set to the earliest forecastable date.
plot_type : {'volatility', 'mean'}
Quantity to plot, the forecast volatility or the forecast mean
method : {'analytic', 'simulation', 'bootstrap'}
Method to use when producing the forecast. The default is analytic.
The method only affects the variance forecast generation. Not all
volatility models support all methods. In particular, volatility
models that do not evolve in squares such as EGARCH or TARCH do not
support the 'analytic' method for horizons > 1.
simulations : int
Number of simulations to run when computing the forecast using
either simulation or bootstrap.
Returns
-------
fig : figure
Handle to the figure
Examples
--------
>>> import pandas as pd
>>> from arch import arch_model
>>> am = arch_model(None,mean='HAR',lags=[1,5,22],vol='Constant')
>>> sim_data = am.simulate([0.1,0.4,0.3,0.2,1.0], 250)
>>> sim_data.index = pd.date_range('2000-01-01',periods=250)
>>> am = arch_model(sim_data['data'],mean='HAR',lags=[1,5,22], vol='Constant')
>>> res = am.fit()
>>> fig = res.hedgehog_plot(plot_type='mean')
"""
import matplotlib.pyplot as plt
plot_mean = plot_type.lower() == 'mean'
if start is None:
invalid_start = True
start = 0
while invalid_start:
try:
<DeepExtract>
</DeepExtract>
invalid_start = False
except ValueError:
start += 1
else:
<DeepExtract>
</DeepExtract>
(fig, ax) = plt.subplots(1, 1)
use_date = isinstance(self._dep_var.index, pd.DatetimeIndex)
plot_fn = ax.plot_date if use_date else ax.plot
x_values = np.array(self._dep_var.index)
if plot_mean:
y_values = np.asarray(self._dep_var)
else:
y_values = np.asarray(self.conditional_volatility)
plot_fn(x_values, y_values, linestyle='-', marker='')
first_obs = np.min(np.where(np.logical_not(np.isnan(forecasts.mean)))[0])
spines = []
t = forecasts.mean.shape[0]
for i in range(first_obs, t, step):
if i + horizon + 1 > x_values.shape[0]:
continue
temp_x = x_values[i:i + horizon + 1]
if plot_mean:
spine_data = np.asarray(forecasts.mean.iloc[i], dtype=float)
else:
spine_data = np.asarray(np.sqrt(forecasts.variance.iloc[i]), dtype=float)
temp_y = np.hstack([y_values[i], spine_data])
line = plot_fn(temp_x, temp_y, linewidth=3, linestyle='-', marker='')
spines.append(line)
color = spines[0][0].get_color()
for spine in spines[1:]:
spine[0].set_color(color)
plot_title = 'Mean' if plot_mean else 'Volatility'
ax.set_title(self._dep_name + ' ' + plot_title + ' Forecast Hedgehog Plot')
return fig
|
@deprecate_kwarg('type', 'plot_type')
def hedgehog_plot(self, params: ArrayLike1D | None=None, horizon: int=10, step: int=10, start: int | DateLike | None=None, plot_type: Literal['volatility', 'mean']='volatility', method: ForecastingMethod='analytic', simulations: int=1000) -> Figure:
"""
Plot forecasts from estimated model
Parameters
----------
params : {ndarray, Series}
Alternative parameters to use. If not provided, the parameters
computed by fitting the model are used. Must be 1-d and identical
in shape to the parameters computed by fitting the model.
horizon : int, optional
Number of steps to forecast
step : int, optional
Non-negative number of forecasts to skip between spines
start : int, datetime or str, optional
An integer, datetime or str indicating the first observation to
produce the forecast for. Datetimes can only be used with pandas
inputs that have a datetime index. Strings must be convertible
to a date time, such as in '1945-01-01'. If not provided, the start
is set to the earliest forecastable date.
plot_type : {'volatility', 'mean'}
Quantity to plot, the forecast volatility or the forecast mean
method : {'analytic', 'simulation', 'bootstrap'}
Method to use when producing the forecast. The default is analytic.
The method only affects the variance forecast generation. Not all
volatility models support all methods. In particular, volatility
models that do not evolve in squares such as EGARCH or TARCH do not
support the 'analytic' method for horizons > 1.
simulations : int
Number of simulations to run when computing the forecast using
either simulation or bootstrap.
Returns
-------
fig : figure
Handle to the figure
Examples
--------
>>> import pandas as pd
>>> from arch import arch_model
>>> am = arch_model(None,mean='HAR',lags=[1,5,22],vol='Constant')
>>> sim_data = am.simulate([0.1,0.4,0.3,0.2,1.0], 250)
>>> sim_data.index = pd.date_range('2000-01-01',periods=250)
>>> am = arch_model(sim_data['data'],mean='HAR',lags=[1,5,22], vol='Constant')
>>> res = am.fit()
>>> fig = res.hedgehog_plot(plot_type='mean')
"""
import matplotlib.pyplot as plt
plot_mean = plot_type.lower() == 'mean'
if start is None:
invalid_start = True
start = 0
while invalid_start:
try:
invalid_start = False
except ValueError:
start += 1
else:
(fig, ax) = plt.subplots(1, 1)
use_date = isinstance(self._dep_var.index, pd.DatetimeIndex)
plot_fn = ax.plot_date if use_date else ax.plot
x_values = np.array(self._dep_var.index)
if plot_mean:
y_values = np.asarray(self._dep_var)
else:
y_values = np.asarray(self.conditional_volatility)
plot_fn(x_values, y_values, linestyle='-', marker='')
first_obs = np.min(np.where(np.logical_not(np.isnan(forecasts.mean)))[0])
spines = []
t = forecasts.mean.shape[0]
for i in range(first_obs, t, step):
if i + horizon + 1 > x_values.shape[0]:
continue
temp_x = x_values[i:i + horizon + 1]
if plot_mean:
spine_data = np.asarray(forecasts.mean.iloc[i], dtype=float)
else:
spine_data = np.asarray(np.sqrt(forecasts.variance.iloc[i]), dtype=float)
temp_y = np.hstack([y_values[i], spine_data])
line = plot_fn(temp_x, temp_y, linewidth=3, linestyle='-', marker='')
spines.append(line)
color = spines[0][0].get_color()
for spine in spines[1:]:
spine[0].set_color(color)
plot_title = 'Mean' if plot_mean else 'Volatility'
ax.set_title(self._dep_name + ' ' + plot_title + ' Forecast Hedgehog Plot')
return fig
|
arch
|
positive
|
def test_get_simple_slot_values_for_simple_slot(self):
self.test_intent_request.intent.slots[self.test_slot_name].slot_value = self.test_simple_slot
<DeepExtract>
self.test_request_envelope.request = self.test_intent_request
test_input = HandlerInput(request_envelope=self.test_request_envelope)
</DeepExtract>
slot_value = get_slot_value_v2(handler_input=test_input, slot_name=self.test_slot_name)
actual_slot_value_list = get_simple_slot_values(slot_value=slot_value)
expected_slot_value_list = [self.test_simple_slot]
self.assertListEqual(actual_slot_value_list, expected_slot_value_list, 'get_simple_slot_values method returned incorrect list of simple slot values, when the slotValue passed in had a simple slot value')
|
def test_get_simple_slot_values_for_simple_slot(self):
self.test_intent_request.intent.slots[self.test_slot_name].slot_value = self.test_simple_slot
self.test_request_envelope.request = self.test_intent_request
test_input = HandlerInput(request_envelope=self.test_request_envelope)
slot_value = get_slot_value_v2(handler_input=test_input, slot_name=self.test_slot_name)
actual_slot_value_list = get_simple_slot_values(slot_value=slot_value)
expected_slot_value_list = [self.test_simple_slot]
self.assertListEqual(actual_slot_value_list, expected_slot_value_list, 'get_simple_slot_values method returned incorrect list of simple slot values, when the slotValue passed in had a simple slot value')
|
alexa-skills-kit-sdk-for-python
|
positive
|
def _set_body(self, body):
if body is not None:
assert type(body) == str
<DeepExtract>
self.headers.append(('Content_length', len(body)))
</DeepExtract>
self._body = body
|
def _set_body(self, body):
if body is not None:
assert type(body) == str
self.headers.append(('Content_length', len(body)))
self._body = body
|
concurrence
|
positive
|
def add_polar_frac(frame, dt=20.0):
frame = frame[list(map(lambda x: x[0] != x[1], frame.baseline))]
<DeepExtract>
frame.loc[:, 'round_time'] = list(map(lambda x: np.round((x - datetime.datetime(2017, 4, 4)).total_seconds() / dt), frame['datetime']))
frame = frame
</DeepExtract>
frame.loc[:, 'polar_frac'] = [0.0] * np.shape(frame)[0]
frameG = frame.groupby(('baseline', 'round_time')).filter(lambda x: len(x) > 1)
frame = frame.groupby(('baseline', 'round_time')).filter(lambda x: len(x) < 5)
frame = frame.groupby(('baseline', 'round_time')).filter(lambda x: ('RL' in list(x.polarization)) | ('LR' in list(x.polarization)))
polar_fracL = []
for (index, row) in frame.iterrows():
dt_foo = row.round_time
base_foo = row.baseline
amp_RL = list(frame[(frame.round_time == dt_foo) & (frame.baseline == base_foo) & (frame.polarization == 'RL')].amp)
amp_LR = list(frame[(frame.round_time == dt_foo) & (frame.baseline == base_foo) & (frame.polarization == 'LR')].amp)
if len(amp_RL) == 0:
amp_RL = 0.0
else:
amp_RL = amp_RL[0]
if len(amp_LR) == 0:
amp_LR = 0.0
else:
amp_LR = amp_LR[0]
amp_cross = np.maximum(amp_RL, amp_LR)
polar_fracL.append(amp_cross / row.amp)
frame['polar_frac'] = polar_fracL
return frame
|
def add_polar_frac(frame, dt=20.0):
frame = frame[list(map(lambda x: x[0] != x[1], frame.baseline))]
frame.loc[:, 'round_time'] = list(map(lambda x: np.round((x - datetime.datetime(2017, 4, 4)).total_seconds() / dt), frame['datetime']))
frame = frame
frame.loc[:, 'polar_frac'] = [0.0] * np.shape(frame)[0]
frameG = frame.groupby(('baseline', 'round_time')).filter(lambda x: len(x) > 1)
frame = frame.groupby(('baseline', 'round_time')).filter(lambda x: len(x) < 5)
frame = frame.groupby(('baseline', 'round_time')).filter(lambda x: ('RL' in list(x.polarization)) | ('LR' in list(x.polarization)))
polar_fracL = []
for (index, row) in frame.iterrows():
dt_foo = row.round_time
base_foo = row.baseline
amp_RL = list(frame[(frame.round_time == dt_foo) & (frame.baseline == base_foo) & (frame.polarization == 'RL')].amp)
amp_LR = list(frame[(frame.round_time == dt_foo) & (frame.baseline == base_foo) & (frame.polarization == 'LR')].amp)
if len(amp_RL) == 0:
amp_RL = 0.0
else:
amp_RL = amp_RL[0]
if len(amp_LR) == 0:
amp_LR = 0.0
else:
amp_LR = amp_LR[0]
amp_cross = np.maximum(amp_RL, amp_LR)
polar_fracL.append(amp_cross / row.amp)
frame['polar_frac'] = polar_fracL
return frame
|
eat
|
positive
|
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
<DeepExtract>
if name is None:
name = output_tensor.name
if expected_rank is not None:
assert_rank(output_tensor, expected_rank, name)
shape = output_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
output_shape = shape
dyn_shape = tf.shape(output_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
output_shape = shape
</DeepExtract>
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
|
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
if name is None:
name = output_tensor.name
if expected_rank is not None:
assert_rank(output_tensor, expected_rank, name)
shape = output_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
output_shape = shape
dyn_shape = tf.shape(output_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
output_shape = shape
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
|
BERT-for-Sequence-Labeling-and-Text-Classification
|
positive
|
def core_createModels():
print('core_create_Models')
bpy.ops.object.empty_add(type='PLAIN_AXES')
bpy.context.view_layer.objects.active.name = copy.copy(str(bpy.context.scene.BBModelRemark) + '_' + str(chainPDB))
bpy.context.view_layer.objects.active.bb2_pdbID = copy.copy(str(pdbID))
parentEmpty = bpy.context.view_layer.objects.active
bpy.context.view_layer.objects.active.bb2_objectType = 'PDBEMPTY'
bpy.context.view_layer.objects.active.bb2_subID = copy.copy(str(chainPDB))
bpy.context.view_layer.objects.active.bb2_outputOptions = '1'
bpy.context.view_layer.objects.active.bb2_pdbPath = copy.copy(str(bpy.context.scene.BBImportPath))
bpy.context.view_layer.objects.active.location = (0.0, 0.0, 0.0)
bpy.context.view_layer.objects.active.hide_set(True)
FinalFrame = bpy.data.scenes['Scene'].frame_end
global chainCache
global curFrame
SetKeyFrame.append((len(tmpPDBmodelImportOrder) - 1) * 100)
id = bpy.context.scene.BBModelRemark
curFrame = 1
for m in tmpPDBmodelImportOrder:
model = pdbIDmodelsDictionary[int(pdbID)][m]
if not model:
continue
bpy.ops.object.select_all(action='DESELECT')
for o in bpy.data.objects:
o.select_set(False)
bpy.context.view_layer.objects.active = None
bpy.context.scene.frame_set(curFrame)
if curFrame == 1:
modelCopy = model.copy()
bpy.context.view_layer.objects.active = bpy.data.objects['atom']
bpy.data.objects['atom'].hide_viewport = False
bpy.data.objects['atom'].select_set(True)
bpy.data.objects['atom'].name = str(id)
<DeepExtract>
object = bpy.context.view_layer.objects.active
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = object
bpy.context.view_layer.objects.active.select_set(True)
if int(len(model) - 1) <= 50:
for num in range(len(model) - 1):
bpy.ops.object.duplicate(linked=True, mode='DUMMY')
else:
lista = []
lista.append(bpy.context.view_layer.objects.active)
for obj in bpy.data.objects:
if bpy.context.view_layer.objects.active != obj and obj.select_get():
lista.append(obj)
total = int(str(numpy.log2(len(model) - 1)).split('.')[0])
i = 0
while i < total:
create_duplicate(lista)
i += 1
difference = int(len(model) - 1 - pow(2, int(str(numpy.log2(len(model) - 1)).split('.')[0]))) + 1
duplicate(difference)
</DeepExtract>
try:
for (i, obj) in enumerate(bpy.data.objects):
if obj.name.split('.')[0] == id and obj.type == 'MESH':
entry = modelCopy.popitem()
obj.name = entry[0]
index = str(entry[1])[76:78].strip()
obj.material_slots[0].material = bpy.data.materials[index]
obj.scale = [scale_cov[index][0], scale_cov[index][0], scale_cov[index][0]]
obj.BBInfo = str(entry[1])
obj.bb2_pdbID = copy.copy(str(pdbID))
obj.bb2_subID = obj.BBInfo[21:22]
obj.bb2_objectType = 'ATOM'
obj.select_set(True)
obj.parent = parentEmpty
Frame[parentEmpty] = 0
except Exception as E:
raise Exception('Unable to generate 3D model from PDB File', E)
if bpy.context.scene.BBImportMakeBonds:
try:
mainChainCache = mainChainCacheDict[int(pdbID)]
mainChainCache_Nucleic = mainChainCache_NucleicDict[int(pdbID)]
mainChainCache_Nucleic_Filtered = mainChainCache_Nucleic_FilteredDict[int(pdbID)]
chainCache = chainCacheDict[int(pdbID)]
chainCache_Nucleic = chainCache_NucleicDict[int(pdbID)]
tmpModel = pdbIDmodelsDictionary[int(pdbID)][m]
cacheSize = len(mainChainCache) - 1
for (i, entry) in enumerate(mainChainCache):
if i < cacheSize:
obj = bpy.data.objects[entry]
nextEntry = bpy.data.objects[mainChainCache[i + 1]]
line = tmpModel[entry]
obj.location = line.get('loc')
line = tmpModel[mainChainCache[i + 1]]
nextEntry.location = line.get('loc')
<DeepExtract>
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((nextEntry.location[0] - obj.location[0], nextEntry.location[1] - obj.location[1], nextEntry.location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
</DeepExtract>
cacheSize = len(mainChainCache_Nucleic_Filtered) - 1
for (i, entry) in enumerate(mainChainCache_Nucleic_Filtered):
if i < cacheSize:
obj = bpy.data.objects[entry]
nextEntry = bpy.data.objects[mainChainCache_Nucleic_Filtered[i + 1]]
line = tmpModel[entry]
obj.location = line.get('loc')
line = tmpModel[mainChainCache_Nucleic_Filtered[i + 1]]
nextEntry.location = line.get('loc')
<DeepExtract>
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((nextEntry.location[0] - obj.location[0], nextEntry.location[1] - obj.location[1], nextEntry.location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
</DeepExtract>
chainCache = sorted(chainCache.items())
for entry in chainCache:
line = entry[1].split('#')
amac = line[0]
chainSeq = line[1]
atom = line[2]
chainID = line[3]
if atom != C and atom != CA and (atom != N) and (atom != H):
<DeepExtract>
if atom == 'O' or atom == 'OXT':
parent = ['C', C]
elif atom == 'CB' or atom == 'HA' or atom == 'HA2' or (atom == 'HA3'):
parent = ['CA', C]
elif atom == 'SG' or 'HB' in atom or 'OG' in atom or ('CG' in atom):
parent = ['CB', C]
elif atom == 'H' or atom == 'H1' or atom == 'H2' or (atom == 'H3'):
parent = ['N', N]
elif atom == 'HG1':
parent = ['OG1', O]
elif atom == 'HG23' or atom == 'HG22' or atom == 'HG21':
parent = ['CG2', C]
elif atom == 'SD' or 'CD' in atom or 'ND' in atom or (atom == 'HG2') or (atom == 'HG3') or (atom == 'OD1') or (atom == 'OD2') or (atom == 'HG12') or (atom == 'HG13') or (atom == 'HG13'):
if amac == 'ILE' or amac == 'VAL':
parent = ['CG1', C]
else:
parent = ['CG', C]
elif atom == 'CE2' or atom == 'CE3' or atom == 'NE2' or (atom == 'HD2'):
if amac == 'GLN':
parent = ['CD', C]
elif amac == 'ARG' or amac == 'LYS' or amac == 'PRO':
parent = ['CD', C]
elif amac == 'ASP':
parent = ['OD2', O]
else:
parent = ['CD2', C]
elif atom == 'CE1' or atom == 'HD11' or atom == 'HD12' or (atom == 'HD13') or (atom == 'HD1') or (atom == 'NE1'):
if amac == 'HIS':
parent = ['ND1', N]
else:
parent = ['CD1', C]
elif atom == 'NE' or atom == 'HD3' or atom == 'CE' or (atom == 'OE1') or (atom == 'OE2'):
if amac == 'MET':
parent = ['SD', S]
else:
parent = ['CD', C]
elif atom == 'CZ' or atom == 'HE' or atom == 'HE1':
if amac == 'ARG':
parent = ['NE', N]
elif amac == 'TRP':
parent = ['NE1', N]
elif amac == 'MET':
parent = ['CE', C]
elif amac == 'PHE' or amac == 'HIS' or amac == 'TYR':
parent = ['CE1', C]
elif atom == 'NH1' or atom == 'NH2' or atom == 'HZ' or (atom == 'OH'):
parent = ['CZ', C]
elif atom == 'HH11' or atom == 'HH12' or atom == '1HH1' or (atom == '1HH2'):
parent = ['NH1', N]
elif atom == 'HH21' or atom == 'HH22' or atom == '2HH2' or (atom == '1HH2'):
parent = ['NH2', N]
elif atom == 'HD21' or atom == 'HD22' or atom == 'HD23':
if amac == 'LEU':
parent = ['CD2', C]
else:
parent = ['ND2', N]
elif atom == 'HE3' or atom == 'NZ':
if amac == 'TRP':
parent = ['CE3', C]
else:
parent = ['CE', C]
elif atom == 'HZ1' or atom == 'HZ2' or atom == 'HZ3':
if amac == 'TRP' and atom == 'HZ2':
parent = ['CZ2', S]
elif amac == 'TRP' and atom == 'HZ3':
parent = ['CZ3', S]
else:
parent = ['NZ', N]
elif atom == 'HG':
if amac == 'LEU':
parent = ['CG', C]
if amac == 'CYS':
parent = {'SG', S}
else:
parent = ['OG', O]
elif atom == 'HE2' or atom == 'CZ2' or atom == 'HE21' or (atom == 'HE22'):
if amac == 'HIS' or amac == 'GLN':
parent = ['NE2', N]
elif amac == 'PHE' or amac == 'TYR' or amac == 'TRP':
parent = ['CE2', C]
elif amac == 'GLU':
parent = ['OE2', O]
elif amac == 'MET' or amac == 'LYS':
parent = ['CE', C]
elif atom == 'HH':
parent = ['OH', O]
elif atom == 'CZ3':
parent = ['CE3', C]
elif atom == 'CH2':
parent = ['CZ2', C]
elif atom == 'HH2':
parent = ['CH2', C]
parent = parent
</DeepExtract>
target = amac + '#' + chainSeq + '#' + parent[0] + '#' + chainID + '#' + parent[1]
targetKey = 'atom'
for item in chainCache:
if item[1] == target:
targetKey = item[0]
break
if targetKey == 'atom':
print('TargetKey not set, will skip Rigid Body Joint')
else:
obj = bpy.data.objects[entry[0]]
line = tmpModel[entry[0]]
obj.location = line.get('loc')
line = tmpModel[targetKey]
nextEntry.location = line.get('loc')
<DeepExtract>
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((bpy.data.objects[targetKey].location[0] - obj.location[0], bpy.data.objects[targetKey].location[1] - obj.location[1], bpy.data.objects[targetKey].location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
</DeepExtract>
chainCache = sorted(chainCache_Nucleic.items())
for entry in chainCache:
line = entry[1].split('#')
amac = line[0]
chainSeq = line[1]
atom = line[2]
chainID = line[3]
if atom not in NucleicAtoms:
<DeepExtract>
if atom == "O4'":
parent = ["C4'", C]
elif atom == "C2'":
parent = ["C3'", C]
elif atom == "O2'":
parent = ["C2'", C]
elif atom == "C1'":
parent = ["C2'", C]
elif atom == 'N9':
parent = ["C1'", C]
elif atom == 'C8':
parent = ['N9', N]
elif atom == 'N7':
parent = ['C8', C]
elif atom == 'C4':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['N9', N]
elif (amac == 'C' or amac == 'DC') or (amac == 'U' or amac == 'DT'):
parent = ['N3', N]
elif atom == 'C5':
parent = ['C4', C]
elif atom == 'N3':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['C4', C]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ['C2', C]
elif atom == 'C2':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['N3', N]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ['N1', N]
elif atom == 'N1':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['C2', C]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ["C1'", C]
elif atom == 'C6':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['N1', N]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ['C5', C]
elif atom == 'N6' or atom == 'O6':
parent = ['C6', C]
elif atom == 'N2' or atom == 'O2':
parent = ['C2', C]
elif atom == 'N4' or atom == 'O4':
parent = ['C4', C]
elif atom == 'C7':
parent = ['C5', C]
parent = parent
</DeepExtract>
target = amac + '#' + chainSeq + '#' + parent[0] + '#' + chainID + '#' + parent[1]
targetKey = 'atom'
for item in chainCache:
if item[1] == target:
targetKey = item[0]
break
if targetKey == 'atom':
print('TargetKey not set, will skip Rigid Body Joint')
else:
obj = bpy.data.objects[entry[0]]
line = tmpModel[entry[0]]
obj.location = line.get('loc')
line = tmpModel[targetKey]
nextEntry.location = line.get('loc')
<DeepExtract>
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((bpy.data.objects[targetKey].location[0] - obj.location[0], bpy.data.objects[targetKey].location[1] - obj.location[1], bpy.data.objects[targetKey].location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
</DeepExtract>
except Exception as E:
raise Exception('Unable to generate all bonds and constraints:', E)
try:
for (key, line) in pdbIDmodelsDictionary[int(pdbID)][m].items():
OBJ = bpy.data.objects[key]
OBJ.select_set(True)
OBJ.location = line.get('loc')
except Exception as E:
raise Exception('Unable to place 3D atoms:', E)
if len(pdbIDmodelsDictionary[int(pdbID)]) != 1:
try:
bpy.ops.anim.keyframe_insert_menu(type='Location')
except Exception as E:
print('Exception: ' + str(E))
if FinalFrame == 250:
bpy.context.scene.frame_end = curFrame
elif bpy.data.scenes['Scene'].frame_end < curFrame:
bpy.context.scene.frame_end = curFrame
curFrame += bpy.context.scene.BBDeltaFrame
else:
if FinalFrame == 250:
bpy.context.scene.frame_end = curFrame
elif bpy.data.scenes['Scene'].frame_end < curFrame:
bpy.context.scene.frame_end = curFrame
curFrame += bpy.context.scene.BBDeltaFrame
for frame in Frame.keys():
if Frame[frame] == '0':
Frame[frame] = curFrame - 100
<DeepExtract>
print('Empty Chains creation')
global Address
Address = bpy.context.scene.BBImportPath
core_cleaningUp()
</DeepExtract>
|
def core_createModels():
print('core_create_Models')
bpy.ops.object.empty_add(type='PLAIN_AXES')
bpy.context.view_layer.objects.active.name = copy.copy(str(bpy.context.scene.BBModelRemark) + '_' + str(chainPDB))
bpy.context.view_layer.objects.active.bb2_pdbID = copy.copy(str(pdbID))
parentEmpty = bpy.context.view_layer.objects.active
bpy.context.view_layer.objects.active.bb2_objectType = 'PDBEMPTY'
bpy.context.view_layer.objects.active.bb2_subID = copy.copy(str(chainPDB))
bpy.context.view_layer.objects.active.bb2_outputOptions = '1'
bpy.context.view_layer.objects.active.bb2_pdbPath = copy.copy(str(bpy.context.scene.BBImportPath))
bpy.context.view_layer.objects.active.location = (0.0, 0.0, 0.0)
bpy.context.view_layer.objects.active.hide_set(True)
FinalFrame = bpy.data.scenes['Scene'].frame_end
global chainCache
global curFrame
SetKeyFrame.append((len(tmpPDBmodelImportOrder) - 1) * 100)
id = bpy.context.scene.BBModelRemark
curFrame = 1
for m in tmpPDBmodelImportOrder:
model = pdbIDmodelsDictionary[int(pdbID)][m]
if not model:
continue
bpy.ops.object.select_all(action='DESELECT')
for o in bpy.data.objects:
o.select_set(False)
bpy.context.view_layer.objects.active = None
bpy.context.scene.frame_set(curFrame)
if curFrame == 1:
modelCopy = model.copy()
bpy.context.view_layer.objects.active = bpy.data.objects['atom']
bpy.data.objects['atom'].hide_viewport = False
bpy.data.objects['atom'].select_set(True)
bpy.data.objects['atom'].name = str(id)
object = bpy.context.view_layer.objects.active
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = object
bpy.context.view_layer.objects.active.select_set(True)
if int(len(model) - 1) <= 50:
for num in range(len(model) - 1):
bpy.ops.object.duplicate(linked=True, mode='DUMMY')
else:
lista = []
lista.append(bpy.context.view_layer.objects.active)
for obj in bpy.data.objects:
if bpy.context.view_layer.objects.active != obj and obj.select_get():
lista.append(obj)
total = int(str(numpy.log2(len(model) - 1)).split('.')[0])
i = 0
while i < total:
create_duplicate(lista)
i += 1
difference = int(len(model) - 1 - pow(2, int(str(numpy.log2(len(model) - 1)).split('.')[0]))) + 1
duplicate(difference)
try:
for (i, obj) in enumerate(bpy.data.objects):
if obj.name.split('.')[0] == id and obj.type == 'MESH':
entry = modelCopy.popitem()
obj.name = entry[0]
index = str(entry[1])[76:78].strip()
obj.material_slots[0].material = bpy.data.materials[index]
obj.scale = [scale_cov[index][0], scale_cov[index][0], scale_cov[index][0]]
obj.BBInfo = str(entry[1])
obj.bb2_pdbID = copy.copy(str(pdbID))
obj.bb2_subID = obj.BBInfo[21:22]
obj.bb2_objectType = 'ATOM'
obj.select_set(True)
obj.parent = parentEmpty
Frame[parentEmpty] = 0
except Exception as E:
raise Exception('Unable to generate 3D model from PDB File', E)
if bpy.context.scene.BBImportMakeBonds:
try:
mainChainCache = mainChainCacheDict[int(pdbID)]
mainChainCache_Nucleic = mainChainCache_NucleicDict[int(pdbID)]
mainChainCache_Nucleic_Filtered = mainChainCache_Nucleic_FilteredDict[int(pdbID)]
chainCache = chainCacheDict[int(pdbID)]
chainCache_Nucleic = chainCache_NucleicDict[int(pdbID)]
tmpModel = pdbIDmodelsDictionary[int(pdbID)][m]
cacheSize = len(mainChainCache) - 1
for (i, entry) in enumerate(mainChainCache):
if i < cacheSize:
obj = bpy.data.objects[entry]
nextEntry = bpy.data.objects[mainChainCache[i + 1]]
line = tmpModel[entry]
obj.location = line.get('loc')
line = tmpModel[mainChainCache[i + 1]]
nextEntry.location = line.get('loc')
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((nextEntry.location[0] - obj.location[0], nextEntry.location[1] - obj.location[1], nextEntry.location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
cacheSize = len(mainChainCache_Nucleic_Filtered) - 1
for (i, entry) in enumerate(mainChainCache_Nucleic_Filtered):
if i < cacheSize:
obj = bpy.data.objects[entry]
nextEntry = bpy.data.objects[mainChainCache_Nucleic_Filtered[i + 1]]
line = tmpModel[entry]
obj.location = line.get('loc')
line = tmpModel[mainChainCache_Nucleic_Filtered[i + 1]]
nextEntry.location = line.get('loc')
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((nextEntry.location[0] - obj.location[0], nextEntry.location[1] - obj.location[1], nextEntry.location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
chainCache = sorted(chainCache.items())
for entry in chainCache:
line = entry[1].split('#')
amac = line[0]
chainSeq = line[1]
atom = line[2]
chainID = line[3]
if atom != C and atom != CA and (atom != N) and (atom != H):
if atom == 'O' or atom == 'OXT':
parent = ['C', C]
elif atom == 'CB' or atom == 'HA' or atom == 'HA2' or (atom == 'HA3'):
parent = ['CA', C]
elif atom == 'SG' or 'HB' in atom or 'OG' in atom or ('CG' in atom):
parent = ['CB', C]
elif atom == 'H' or atom == 'H1' or atom == 'H2' or (atom == 'H3'):
parent = ['N', N]
elif atom == 'HG1':
parent = ['OG1', O]
elif atom == 'HG23' or atom == 'HG22' or atom == 'HG21':
parent = ['CG2', C]
elif atom == 'SD' or 'CD' in atom or 'ND' in atom or (atom == 'HG2') or (atom == 'HG3') or (atom == 'OD1') or (atom == 'OD2') or (atom == 'HG12') or (atom == 'HG13') or (atom == 'HG13'):
if amac == 'ILE' or amac == 'VAL':
parent = ['CG1', C]
else:
parent = ['CG', C]
elif atom == 'CE2' or atom == 'CE3' or atom == 'NE2' or (atom == 'HD2'):
if amac == 'GLN':
parent = ['CD', C]
elif amac == 'ARG' or amac == 'LYS' or amac == 'PRO':
parent = ['CD', C]
elif amac == 'ASP':
parent = ['OD2', O]
else:
parent = ['CD2', C]
elif atom == 'CE1' or atom == 'HD11' or atom == 'HD12' or (atom == 'HD13') or (atom == 'HD1') or (atom == 'NE1'):
if amac == 'HIS':
parent = ['ND1', N]
else:
parent = ['CD1', C]
elif atom == 'NE' or atom == 'HD3' or atom == 'CE' or (atom == 'OE1') or (atom == 'OE2'):
if amac == 'MET':
parent = ['SD', S]
else:
parent = ['CD', C]
elif atom == 'CZ' or atom == 'HE' or atom == 'HE1':
if amac == 'ARG':
parent = ['NE', N]
elif amac == 'TRP':
parent = ['NE1', N]
elif amac == 'MET':
parent = ['CE', C]
elif amac == 'PHE' or amac == 'HIS' or amac == 'TYR':
parent = ['CE1', C]
elif atom == 'NH1' or atom == 'NH2' or atom == 'HZ' or (atom == 'OH'):
parent = ['CZ', C]
elif atom == 'HH11' or atom == 'HH12' or atom == '1HH1' or (atom == '1HH2'):
parent = ['NH1', N]
elif atom == 'HH21' or atom == 'HH22' or atom == '2HH2' or (atom == '1HH2'):
parent = ['NH2', N]
elif atom == 'HD21' or atom == 'HD22' or atom == 'HD23':
if amac == 'LEU':
parent = ['CD2', C]
else:
parent = ['ND2', N]
elif atom == 'HE3' or atom == 'NZ':
if amac == 'TRP':
parent = ['CE3', C]
else:
parent = ['CE', C]
elif atom == 'HZ1' or atom == 'HZ2' or atom == 'HZ3':
if amac == 'TRP' and atom == 'HZ2':
parent = ['CZ2', S]
elif amac == 'TRP' and atom == 'HZ3':
parent = ['CZ3', S]
else:
parent = ['NZ', N]
elif atom == 'HG':
if amac == 'LEU':
parent = ['CG', C]
if amac == 'CYS':
parent = {'SG', S}
else:
parent = ['OG', O]
elif atom == 'HE2' or atom == 'CZ2' or atom == 'HE21' or (atom == 'HE22'):
if amac == 'HIS' or amac == 'GLN':
parent = ['NE2', N]
elif amac == 'PHE' or amac == 'TYR' or amac == 'TRP':
parent = ['CE2', C]
elif amac == 'GLU':
parent = ['OE2', O]
elif amac == 'MET' or amac == 'LYS':
parent = ['CE', C]
elif atom == 'HH':
parent = ['OH', O]
elif atom == 'CZ3':
parent = ['CE3', C]
elif atom == 'CH2':
parent = ['CZ2', C]
elif atom == 'HH2':
parent = ['CH2', C]
parent = parent
target = amac + '#' + chainSeq + '#' + parent[0] + '#' + chainID + '#' + parent[1]
targetKey = 'atom'
for item in chainCache:
if item[1] == target:
targetKey = item[0]
break
if targetKey == 'atom':
print('TargetKey not set, will skip Rigid Body Joint')
else:
obj = bpy.data.objects[entry[0]]
line = tmpModel[entry[0]]
obj.location = line.get('loc')
line = tmpModel[targetKey]
nextEntry.location = line.get('loc')
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((bpy.data.objects[targetKey].location[0] - obj.location[0], bpy.data.objects[targetKey].location[1] - obj.location[1], bpy.data.objects[targetKey].location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
chainCache = sorted(chainCache_Nucleic.items())
for entry in chainCache:
line = entry[1].split('#')
amac = line[0]
chainSeq = line[1]
atom = line[2]
chainID = line[3]
if atom not in NucleicAtoms:
if atom == "O4'":
parent = ["C4'", C]
elif atom == "C2'":
parent = ["C3'", C]
elif atom == "O2'":
parent = ["C2'", C]
elif atom == "C1'":
parent = ["C2'", C]
elif atom == 'N9':
parent = ["C1'", C]
elif atom == 'C8':
parent = ['N9', N]
elif atom == 'N7':
parent = ['C8', C]
elif atom == 'C4':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['N9', N]
elif (amac == 'C' or amac == 'DC') or (amac == 'U' or amac == 'DT'):
parent = ['N3', N]
elif atom == 'C5':
parent = ['C4', C]
elif atom == 'N3':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['C4', C]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ['C2', C]
elif atom == 'C2':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['N3', N]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ['N1', N]
elif atom == 'N1':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['C2', C]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ["C1'", C]
elif atom == 'C6':
if amac == 'A' or amac == 'DA' or amac == 'G' or (amac == 'DG'):
parent = ['N1', N]
elif amac == 'C' or amac == 'DC' or amac == 'U' or (amac == 'DT'):
parent = ['C5', C]
elif atom == 'N6' or atom == 'O6':
parent = ['C6', C]
elif atom == 'N2' or atom == 'O2':
parent = ['C2', C]
elif atom == 'N4' or atom == 'O4':
parent = ['C4', C]
elif atom == 'C7':
parent = ['C5', C]
parent = parent
target = amac + '#' + chainSeq + '#' + parent[0] + '#' + chainID + '#' + parent[1]
targetKey = 'atom'
for item in chainCache:
if item[1] == target:
targetKey = item[0]
break
if targetKey == 'atom':
print('TargetKey not set, will skip Rigid Body Joint')
else:
obj = bpy.data.objects[entry[0]]
line = tmpModel[entry[0]]
obj.location = line.get('loc')
line = tmpModel[targetKey]
nextEntry.location = line.get('loc')
parentxaxis = Vector((1.0, 0.0, 0.0))
hingevector = Vector((bpy.data.objects[targetKey].location[0] - obj.location[0], bpy.data.objects[targetKey].location[1] - obj.location[1], bpy.data.objects[targetKey].location[2] - obj.location[2]))
rotvec2mapx2hingevector = parentxaxis.cross(hingevector)
rotvec2mapx2hingevector.normalize()
angle2mapx2hingevector = parentxaxis.angle(hingevector)
matrot = Matrix.Rotation(angle2mapx2hingevector, 3, rotvec2mapx2hingevector)
euler = matrot.to_euler()
except Exception as E:
raise Exception('Unable to generate all bonds and constraints:', E)
try:
for (key, line) in pdbIDmodelsDictionary[int(pdbID)][m].items():
OBJ = bpy.data.objects[key]
OBJ.select_set(True)
OBJ.location = line.get('loc')
except Exception as E:
raise Exception('Unable to place 3D atoms:', E)
if len(pdbIDmodelsDictionary[int(pdbID)]) != 1:
try:
bpy.ops.anim.keyframe_insert_menu(type='Location')
except Exception as E:
print('Exception: ' + str(E))
if FinalFrame == 250:
bpy.context.scene.frame_end = curFrame
elif bpy.data.scenes['Scene'].frame_end < curFrame:
bpy.context.scene.frame_end = curFrame
curFrame += bpy.context.scene.BBDeltaFrame
else:
if FinalFrame == 250:
bpy.context.scene.frame_end = curFrame
elif bpy.data.scenes['Scene'].frame_end < curFrame:
bpy.context.scene.frame_end = curFrame
curFrame += bpy.context.scene.BBDeltaFrame
for frame in Frame.keys():
if Frame[frame] == '0':
Frame[frame] = curFrame - 100
print('Empty Chains creation')
global Address
Address = bpy.context.scene.BBImportPath
core_cleaningUp()
</DeepExtract>
|
BioBlender21
|
positive
|
def validate(self):
if isinstance(self.data, list):
for st in self.data:
<DeepExtract>
self.check_if_dict(st, 'Statement')
self.check_allowed_fields(statement_allowed_fields, st, 'Statement')
self.check_required_fields(statement_required_fields, st, 'Statement')
if 'version' in st:
if isinstance(st['version'], str):
version_regex = re.compile('^1\\.0(\\.\\d+)?$')
if not version_regex.match(st['version']):
self.return_error('%s is not a supported version' % st['version'])
else:
self.return_error('Version must be a string')
if 'id' in st:
self.validate_uuid(st['id'], 'Statement id')
if 'timestamp' in st:
timestamp = st['timestamp']
try:
parse_datetime(timestamp)
if timestamp.endswith('-00') or timestamp.endswith('-0000') or timestamp.endswith('-00:00'):
self.return_error('Timestamp error - Statement Timestamp Illegal offset (-00, -0000, or -00:00) %s' % timestamp)
except Exception as e:
self.return_error('Timestamp error - There was an error while parsing the date from %s -- Error: %s' % (timestamp, str(e)))
if 'stored' in st:
stored = st['stored']
try:
parse_datetime(stored)
except Exception as e:
self.return_error('Stored error - There was an error while parsing the date from %s -- Error: %s' % (stored, str(e)))
self.validate_agent(st['actor'], 'actor')
self.validate_verb(st['verb'], st['object'])
stmt_object = st['object']
self.validate_object(stmt_object)
if 'objectType' not in stmt_object:
st['object']['objectType'] = 'Activity'
if 'result' in st:
self.validate_result(st['result'])
if 'context' in st:
self.validate_context(st['context'], stmt_object)
if 'authority' in st:
self.validate_agent(st['authority'], 'authority')
if 'objectType' in st['authority'] and st['authority']['objectType'] == 'Group':
self.validate_authority_group(st['authority'])
if 'attachments' in st:
self.validate_attachments(st['attachments'])
</DeepExtract>
return 'All Statements are valid'
elif isinstance(self.data, dict):
<DeepExtract>
self.check_if_dict(self.data, 'Statement')
self.check_allowed_fields(statement_allowed_fields, self.data, 'Statement')
self.check_required_fields(statement_required_fields, self.data, 'Statement')
if 'version' in self.data:
if isinstance(self.data['version'], str):
version_regex = re.compile('^1\\.0(\\.\\d+)?$')
if not version_regex.match(self.data['version']):
self.return_error('%s is not a supported version' % self.data['version'])
else:
self.return_error('Version must be a string')
if 'id' in self.data:
self.validate_uuid(self.data['id'], 'Statement id')
if 'timestamp' in self.data:
timestamp = self.data['timestamp']
try:
parse_datetime(timestamp)
if timestamp.endswith('-00') or timestamp.endswith('-0000') or timestamp.endswith('-00:00'):
self.return_error('Timestamp error - Statement Timestamp Illegal offset (-00, -0000, or -00:00) %s' % timestamp)
except Exception as e:
self.return_error('Timestamp error - There was an error while parsing the date from %s -- Error: %s' % (timestamp, str(e)))
if 'stored' in self.data:
stored = self.data['stored']
try:
parse_datetime(stored)
except Exception as e:
self.return_error('Stored error - There was an error while parsing the date from %s -- Error: %s' % (stored, str(e)))
self.validate_agent(self.data['actor'], 'actor')
self.validate_verb(self.data['verb'], self.data['object'])
stmt_object = self.data['object']
self.validate_object(stmt_object)
if 'objectType' not in stmt_object:
self.data['object']['objectType'] = 'Activity'
if 'result' in self.data:
self.validate_result(self.data['result'])
if 'context' in self.data:
self.validate_context(self.data['context'], stmt_object)
if 'authority' in self.data:
self.validate_agent(self.data['authority'], 'authority')
if 'objectType' in self.data['authority'] and self.data['authority']['objectType'] == 'Group':
self.validate_authority_group(self.data['authority'])
if 'attachments' in self.data:
self.validate_attachments(self.data['attachments'])
</DeepExtract>
return 'Statement is valid'
else:
<DeepExtract>
raise ParamError(f'There are no statements to validate, payload: {self.data}')
</DeepExtract>
|
def validate(self):
if isinstance(self.data, list):
for st in self.data:
self.check_if_dict(st, 'Statement')
self.check_allowed_fields(statement_allowed_fields, st, 'Statement')
self.check_required_fields(statement_required_fields, st, 'Statement')
if 'version' in st:
if isinstance(st['version'], str):
version_regex = re.compile('^1\\.0(\\.\\d+)?$')
if not version_regex.match(st['version']):
self.return_error('%s is not a supported version' % st['version'])
else:
self.return_error('Version must be a string')
if 'id' in st:
self.validate_uuid(st['id'], 'Statement id')
if 'timestamp' in st:
timestamp = st['timestamp']
try:
parse_datetime(timestamp)
if timestamp.endswith('-00') or timestamp.endswith('-0000') or timestamp.endswith('-00:00'):
self.return_error('Timestamp error - Statement Timestamp Illegal offset (-00, -0000, or -00:00) %s' % timestamp)
except Exception as e:
self.return_error('Timestamp error - There was an error while parsing the date from %s -- Error: %s' % (timestamp, str(e)))
if 'stored' in st:
stored = st['stored']
try:
parse_datetime(stored)
except Exception as e:
self.return_error('Stored error - There was an error while parsing the date from %s -- Error: %s' % (stored, str(e)))
self.validate_agent(st['actor'], 'actor')
self.validate_verb(st['verb'], st['object'])
stmt_object = st['object']
self.validate_object(stmt_object)
if 'objectType' not in stmt_object:
st['object']['objectType'] = 'Activity'
if 'result' in st:
self.validate_result(st['result'])
if 'context' in st:
self.validate_context(st['context'], stmt_object)
if 'authority' in st:
self.validate_agent(st['authority'], 'authority')
if 'objectType' in st['authority'] and st['authority']['objectType'] == 'Group':
self.validate_authority_group(st['authority'])
if 'attachments' in st:
self.validate_attachments(st['attachments'])
return 'All Statements are valid'
elif isinstance(self.data, dict):
self.check_if_dict(self.data, 'Statement')
self.check_allowed_fields(statement_allowed_fields, self.data, 'Statement')
self.check_required_fields(statement_required_fields, self.data, 'Statement')
if 'version' in self.data:
if isinstance(self.data['version'], str):
version_regex = re.compile('^1\\.0(\\.\\d+)?$')
if not version_regex.match(self.data['version']):
self.return_error('%s is not a supported version' % self.data['version'])
else:
self.return_error('Version must be a string')
if 'id' in self.data:
self.validate_uuid(self.data['id'], 'Statement id')
if 'timestamp' in self.data:
timestamp = self.data['timestamp']
try:
parse_datetime(timestamp)
if timestamp.endswith('-00') or timestamp.endswith('-0000') or timestamp.endswith('-00:00'):
self.return_error('Timestamp error - Statement Timestamp Illegal offset (-00, -0000, or -00:00) %s' % timestamp)
except Exception as e:
self.return_error('Timestamp error - There was an error while parsing the date from %s -- Error: %s' % (timestamp, str(e)))
if 'stored' in self.data:
stored = self.data['stored']
try:
parse_datetime(stored)
except Exception as e:
self.return_error('Stored error - There was an error while parsing the date from %s -- Error: %s' % (stored, str(e)))
self.validate_agent(self.data['actor'], 'actor')
self.validate_verb(self.data['verb'], self.data['object'])
stmt_object = self.data['object']
self.validate_object(stmt_object)
if 'objectType' not in stmt_object:
self.data['object']['objectType'] = 'Activity'
if 'result' in self.data:
self.validate_result(self.data['result'])
if 'context' in self.data:
self.validate_context(self.data['context'], stmt_object)
if 'authority' in self.data:
self.validate_agent(self.data['authority'], 'authority')
if 'objectType' in self.data['authority'] and self.data['authority']['objectType'] == 'Group':
self.validate_authority_group(self.data['authority'])
if 'attachments' in self.data:
self.validate_attachments(self.data['attachments'])
return 'Statement is valid'
else:
raise ParamError(f'There are no statements to validate, payload: {self.data}')
</DeepExtract>
|
ADL_LRS
|
positive
|
def get_devices(hid_filter=None):
"""Filter a HID device list by current object parameters. Devices
must match the all of the filtering parameters
"""
if not hid_filter:
if type(hid_filter) == type(None):
<DeepExtract>
guid = winapi.GetHidGuid()
results = []
required_size = DWORD()
info_data = winapi.SP_DEVINFO_DATA()
info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA)
with winapi.DeviceInterfaceSetInfo(guid) as h_info:
for interface_data in winapi.enum_device_interfaces(h_info, guid):
device_path = winapi.get_device_path(h_info, interface_data, byref(info_data))
parent_device = c_ulong()
if setup_api.CM_Get_Parent(byref(parent_device), info_data.dev_inst, 0) != 0:
parent_device.value = 0
required_size.value = 0
winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), None, 0, byref(required_size))
device_instance_id = create_unicode_buffer(required_size.value)
if required_size.value > 0:
winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), device_instance_id, required_size, byref(required_size))
hid_device = HidDevice(device_path, parent_device.value, device_instance_id.value)
else:
hid_device = HidDevice(device_path, parent_device.value)
if hid_device.vendor_id:
results.append(hid_device)
hid_filter = results
</DeepExtract>
else:
return hid_filter
results = {}.fromkeys(hid_filter)
validating_attributes = list(self.filter_params.keys())
if not len(results):
return {}
for device in list(results.keys()):
if not device.is_active():
del results[device]
if not len(results):
return {}
for item in validating_attributes:
if item.endswith('_includes'):
item = item[:-len('_includes')]
elif item.endswith('_mask'):
item = item[:-len('_mask')]
elif item + '_mask' in self.filter_params or item + '_includes' in self.filter_params:
continue
elif item not in HidDevice.filter_attributes:
continue
for device in list(results.keys()):
if not hasattr(device, item):
del results[device]
elif item + '_mask' in validating_attributes:
if getattr(device, item) & self.filter_params[item + '_mask'] != self.filter_params[item] & self.filter_params[item + '_mask']:
del results[device]
elif item + '_includes' in validating_attributes:
if self.filter_params[item + '_includes'] not in getattr(device, item):
del results[device]
elif getattr(device, item) != self.filter_params[item]:
del results[device]
return list(results.keys())
|
def get_devices(hid_filter=None):
"""Filter a HID device list by current object parameters. Devices
must match the all of the filtering parameters
"""
if not hid_filter:
if type(hid_filter) == type(None):
guid = winapi.GetHidGuid()
results = []
required_size = DWORD()
info_data = winapi.SP_DEVINFO_DATA()
info_data.cb_size = sizeof(winapi.SP_DEVINFO_DATA)
with winapi.DeviceInterfaceSetInfo(guid) as h_info:
for interface_data in winapi.enum_device_interfaces(h_info, guid):
device_path = winapi.get_device_path(h_info, interface_data, byref(info_data))
parent_device = c_ulong()
if setup_api.CM_Get_Parent(byref(parent_device), info_data.dev_inst, 0) != 0:
parent_device.value = 0
required_size.value = 0
winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), None, 0, byref(required_size))
device_instance_id = create_unicode_buffer(required_size.value)
if required_size.value > 0:
winapi.SetupDiGetDeviceInstanceId(h_info, byref(info_data), device_instance_id, required_size, byref(required_size))
hid_device = HidDevice(device_path, parent_device.value, device_instance_id.value)
else:
hid_device = HidDevice(device_path, parent_device.value)
if hid_device.vendor_id:
results.append(hid_device)
hid_filter = results
else:
return hid_filter
results = {}.fromkeys(hid_filter)
validating_attributes = list(self.filter_params.keys())
if not len(results):
return {}
for device in list(results.keys()):
if not device.is_active():
del results[device]
if not len(results):
return {}
for item in validating_attributes:
if item.endswith('_includes'):
item = item[:-len('_includes')]
elif item.endswith('_mask'):
item = item[:-len('_mask')]
elif item + '_mask' in self.filter_params or item + '_includes' in self.filter_params:
continue
elif item not in HidDevice.filter_attributes:
continue
for device in list(results.keys()):
if not hasattr(device, item):
del results[device]
elif item + '_mask' in validating_attributes:
if getattr(device, item) & self.filter_params[item + '_mask'] != self.filter_params[item] & self.filter_params[item + '_mask']:
del results[device]
elif item + '_includes' in validating_attributes:
if self.filter_params[item + '_includes'] not in getattr(device, item):
del results[device]
elif getattr(device, item) != self.filter_params[item]:
del results[device]
return list(results.keys())
|
CyKit
|
positive
|
def _create_model_old(self):
<DeepExtract>
self.upper_rescalings = np.empty(self.total_size)
self.lower_rescalings = np.empty(self.total_size)
for (var_p_index, var_p_name) in enumerate(self.var_p_names):
high = self.var_p_highs[var_p_index]
low = self.var_p_lows[var_p_index]
if self.var_p_types[var_p_index] == 'float':
self.upper_rescalings[var_p_index] = high + 0.1 * (high - low)
self.lower_rescalings[var_p_index] = low - 0.1 * (high - low)
elif self.var_p_types[var_p_index] == 'integer':
self.upper_rescalings[var_p_index] = high
self.lower_rescalings[var_p_index] = low
self.network_input = 2.0 * (self.observed_params - self.lower_rescalings) / (self.upper_rescalings - self.lower_rescalings) - 1.0
print('OBSERVED_PARAMS', self.observed_params)
print('NETWORK_INPUT', self.network_input)
quit()
</DeepExtract>
with pm.Model() as self.model:
for layer_index in range(self.num_layers):
setattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))
setattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))
if layer_index == 0:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
elif 0 < layer_index < self.num_layers - 1:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
else:
self.loc = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) + self.lower_rescalings)
self.tau_rescaling = np.zeros((self.num_obs, self.observed_params.shape[1]))
for obs_index in range(self.num_obs):
self.tau_rescaling[obs_index] += self.domain_ranges
self.tau_rescaling = self.tau_rescaling ** 2
self.tau = pm.Gamma('tau', self.num_obs ** 2, 1.0, shape=(self.num_obs, self.observed_params.shape[1]))
self.tau = self.tau / self.tau_rescaling
self.scale = pm.Deterministic('scale', 1.0 / pm.math.sqrt(self.tau))
print(self.observed_params.shape)
print(self._floats)
print(self._integers)
quit()
self.out = pm.Normal('out', self.loc, tau=self.tau, observed=self.observed_params)
alpha = ((n - mu) / sigma ** 2 - 1) / (n / mu - (n - mu) / sigma ** 2)
beta = (n / mu - 1) * alpha
self.alpha = pm.Deterministic('alpha', alpha)
self.beta = pm.Deterministic('beta', beta)
|
def _create_model_old(self):
self.upper_rescalings = np.empty(self.total_size)
self.lower_rescalings = np.empty(self.total_size)
for (var_p_index, var_p_name) in enumerate(self.var_p_names):
high = self.var_p_highs[var_p_index]
low = self.var_p_lows[var_p_index]
if self.var_p_types[var_p_index] == 'float':
self.upper_rescalings[var_p_index] = high + 0.1 * (high - low)
self.lower_rescalings[var_p_index] = low - 0.1 * (high - low)
elif self.var_p_types[var_p_index] == 'integer':
self.upper_rescalings[var_p_index] = high
self.lower_rescalings[var_p_index] = low
self.network_input = 2.0 * (self.observed_params - self.lower_rescalings) / (self.upper_rescalings - self.lower_rescalings) - 1.0
print('OBSERVED_PARAMS', self.observed_params)
print('NETWORK_INPUT', self.network_input)
quit()
with pm.Model() as self.model:
for layer_index in range(self.num_layers):
setattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))
setattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))
if layer_index == 0:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
elif 0 < layer_index < self.num_layers - 1:
fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))
setattr(self, 'fc%d' % layer_index, fc)
else:
self.loc = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) + self.lower_rescalings)
self.tau_rescaling = np.zeros((self.num_obs, self.observed_params.shape[1]))
for obs_index in range(self.num_obs):
self.tau_rescaling[obs_index] += self.domain_ranges
self.tau_rescaling = self.tau_rescaling ** 2
self.tau = pm.Gamma('tau', self.num_obs ** 2, 1.0, shape=(self.num_obs, self.observed_params.shape[1]))
self.tau = self.tau / self.tau_rescaling
self.scale = pm.Deterministic('scale', 1.0 / pm.math.sqrt(self.tau))
print(self.observed_params.shape)
print(self._floats)
print(self._integers)
quit()
self.out = pm.Normal('out', self.loc, tau=self.tau, observed=self.observed_params)
alpha = ((n - mu) / sigma ** 2 - 1) / (n / mu - (n - mu) / sigma ** 2)
beta = (n / mu - 1) * alpha
self.alpha = pm.Deterministic('alpha', alpha)
self.beta = pm.Deterministic('beta', beta)
|
ChemOS
|
positive
|
def eval_with_noise(env, net, noise):
old_params = net.state_dict()
for (p, p_n) in zip(net.parameters(), noise):
p.data += NOISE_STD * p_n
<DeepExtract>
obs = env.reset()
reward = 0.0
steps = 0
while True:
obs_v = torch.FloatTensor([obs])
act_prob = net(obs_v)
acts = act_prob.max(dim=1)[1]
(obs, r, done, _) = env.step(acts.data.numpy()[0])
reward += r
steps += 1
if done:
break
(r, s) = (reward, steps)
</DeepExtract>
net.load_state_dict(old_params)
return (r, s)
|
def eval_with_noise(env, net, noise):
old_params = net.state_dict()
for (p, p_n) in zip(net.parameters(), noise):
p.data += NOISE_STD * p_n
obs = env.reset()
reward = 0.0
steps = 0
while True:
obs_v = torch.FloatTensor([obs])
act_prob = net(obs_v)
acts = act_prob.max(dim=1)[1]
(obs, r, done, _) = env.step(acts.data.numpy()[0])
reward += r
steps += 1
if done:
break
(r, s) = (reward, steps)
net.load_state_dict(old_params)
return (r, s)
|
Deep-Reinforcement-Learning-Hands-On
|
positive
|
def createChunks(input_file, output_dir, suffix, chunk_size):
os.makedirs(output_dir, exist_ok=True)
<DeepExtract>
lines = []
with open(input_file) as f:
for line in f:
assert line.strip(), 'Empty line found'
lines.append(line.strip())
input_lines = lines
</DeepExtract>
no_chunks = math.ceil(len(input_lines) / chunk_size)
for i in range(no_chunks):
output_file = os.path.join(output_dir, f'{i}.{suffix}')
lines = input_lines[i * chunk_size:(i + 1) * chunk_size]
<DeepExtract>
with open(output_file, 'w') as outf:
for line in lines:
print(line.strip(), file=outf)
</DeepExtract>
|
def createChunks(input_file, output_dir, suffix, chunk_size):
os.makedirs(output_dir, exist_ok=True)
lines = []
with open(input_file) as f:
for line in f:
assert line.strip(), 'Empty line found'
lines.append(line.strip())
input_lines = lines
no_chunks = math.ceil(len(input_lines) / chunk_size)
for i in range(no_chunks):
output_file = os.path.join(output_dir, f'{i}.{suffix}')
lines = input_lines[i * chunk_size:(i + 1) * chunk_size]
with open(output_file, 'w') as outf:
for line in lines:
print(line.strip(), file=outf)
</DeepExtract>
|
banglanmt
|
positive
|
def _report_training(self, step, num_steps, learning_rate, report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps, learning_rate, self.start_time)
<DeepExtract>
if self.tensorboard_writer is not None:
report_stats.log_tensorboard('progress', self.tensorboard_writer, learning_rate, self.progress_step)
</DeepExtract>
report_stats = onmt.utils.Statistics()
return report_stats
|
def _report_training(self, step, num_steps, learning_rate, report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps, learning_rate, self.start_time)
if self.tensorboard_writer is not None:
report_stats.log_tensorboard('progress', self.tensorboard_writer, learning_rate, self.progress_step)
report_stats = onmt.utils.Statistics()
return report_stats
|
conversational-QG
|
positive
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_get_prices_failure_not_enough_prices_returned(mock_rqi, client):
<DeepExtract>
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
</DeepExtract>
mock_rqi.get_prices.return_value = []
response = client.get(PRICES_ENDPOINT, {'sell_asset': data['stellar_assets'][0].asset_identification_format, 'sell_amount': 100})
assert response.status_code == 500, response.content
mock_rqi.get_prices.assert_called_once()
kwargs = mock_rqi.get_prices.call_args[1]
del kwargs['token']
del kwargs['request']
assert kwargs == {'sell_asset': data['stellar_assets'][0], 'sell_amount': Decimal(100), 'buy_assets': data['offchain_assets'], 'buy_delivery_method': None, 'sell_delivery_method': None, 'country_code': None}
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_get_prices_failure_not_enough_prices_returned(mock_rqi, client):
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
mock_rqi.get_prices.return_value = []
response = client.get(PRICES_ENDPOINT, {'sell_asset': data['stellar_assets'][0].asset_identification_format, 'sell_amount': 100})
assert response.status_code == 500, response.content
mock_rqi.get_prices.assert_called_once()
kwargs = mock_rqi.get_prices.call_args[1]
del kwargs['token']
del kwargs['request']
assert kwargs == {'sell_asset': data['stellar_assets'][0], 'sell_amount': Decimal(100), 'buy_assets': data['offchain_assets'], 'buy_delivery_method': None, 'sell_delivery_method': None, 'country_code': None}
|
django-polaris
|
positive
|
def __setitem__(self, key, value):
"""'Magic' value setter.
This function tries to guess at what kind of value you want to
store. If you pass in a valid UTF-8 or Unicode string, it
treats it as a text value. If you pass in a list, it treats it
as a list of string/Unicode values. If you pass in a string
that is not valid UTF-8, it assumes it is a binary value.
Python 3: all bytes will be assumed to be a byte value, even
if they are valid utf-8.
If you need to force a specific type of value (e.g. binary
data that also happens to be valid UTF-8, or an external
reference), use the APEValue factory and set the value to the
result of that::
from mutagen.apev2 import APEValue, EXTERNAL
tag['Website'] = APEValue('http://example.org', EXTERNAL)
"""
if not is_valid_apev2_key(key):
raise KeyError('%r is not a valid APEv2 key' % key)
if PY2:
key = key.encode('ascii')
if not isinstance(value, _APEValue):
if isinstance(value, text_type):
<DeepExtract>
if TEXT in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if TEXT == TEXT:
value = APETextValue(value, TEXT)
elif TEXT == BINARY:
value = APEBinaryValue(value, TEXT)
elif TEXT == EXTERNAL:
value = APEExtValue(value, TEXT)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
</DeepExtract>
elif isinstance(value, list):
items = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError('item in list not str')
v = v.decode('utf-8')
items.append(v)
<DeepExtract>
if TEXT in (TEXT, EXTERNAL):
if not isinstance(u'\x00'.join(items), text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
u'\x00'.join(items) = u'\x00'.join(items).encode('utf-8')
if TEXT == TEXT:
u'\x00'.join(items) = APETextValue(u'\x00'.join(items), TEXT)
elif TEXT == BINARY:
u'\x00'.join(items) = APEBinaryValue(u'\x00'.join(items), TEXT)
elif TEXT == EXTERNAL:
u'\x00'.join(items) = APEExtValue(u'\x00'.join(items), TEXT)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
</DeepExtract>
elif PY3:
<DeepExtract>
if BINARY in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if BINARY == TEXT:
value = APETextValue(value, BINARY)
elif BINARY == BINARY:
value = APEBinaryValue(value, BINARY)
elif BINARY == EXTERNAL:
value = APEExtValue(value, BINARY)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
</DeepExtract>
else:
try:
value.decode('utf-8')
except UnicodeError:
<DeepExtract>
if BINARY in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if BINARY == TEXT:
value = APETextValue(value, BINARY)
elif BINARY == BINARY:
value = APEBinaryValue(value, BINARY)
elif BINARY == EXTERNAL:
value = APEExtValue(value, BINARY)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
</DeepExtract>
else:
<DeepExtract>
if TEXT in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if TEXT == TEXT:
value = APETextValue(value, TEXT)
elif TEXT == BINARY:
value = APEBinaryValue(value, TEXT)
elif TEXT == EXTERNAL:
value = APEExtValue(value, TEXT)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
</DeepExtract>
super(APEv2, self).__setitem__(key, value)
|
def __setitem__(self, key, value):
"""'Magic' value setter.
This function tries to guess at what kind of value you want to
store. If you pass in a valid UTF-8 or Unicode string, it
treats it as a text value. If you pass in a list, it treats it
as a list of string/Unicode values. If you pass in a string
that is not valid UTF-8, it assumes it is a binary value.
Python 3: all bytes will be assumed to be a byte value, even
if they are valid utf-8.
If you need to force a specific type of value (e.g. binary
data that also happens to be valid UTF-8, or an external
reference), use the APEValue factory and set the value to the
result of that::
from mutagen.apev2 import APEValue, EXTERNAL
tag['Website'] = APEValue('http://example.org', EXTERNAL)
"""
if not is_valid_apev2_key(key):
raise KeyError('%r is not a valid APEv2 key' % key)
if PY2:
key = key.encode('ascii')
if not isinstance(value, _APEValue):
if isinstance(value, text_type):
if TEXT in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if TEXT == TEXT:
value = APETextValue(value, TEXT)
elif TEXT == BINARY:
value = APEBinaryValue(value, TEXT)
elif TEXT == EXTERNAL:
value = APEExtValue(value, TEXT)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
elif isinstance(value, list):
items = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError('item in list not str')
v = v.decode('utf-8')
items.append(v)
if TEXT in (TEXT, EXTERNAL):
if not isinstance(u'\x00'.join(items), text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
u'\x00'.join(items) = u'\x00'.join(items).encode('utf-8')
if TEXT == TEXT:
u'\x00'.join(items) = APETextValue(u'\x00'.join(items), TEXT)
elif TEXT == BINARY:
u'\x00'.join(items) = APEBinaryValue(u'\x00'.join(items), TEXT)
elif TEXT == EXTERNAL:
u'\x00'.join(items) = APEExtValue(u'\x00'.join(items), TEXT)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
elif PY3:
if BINARY in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if BINARY == TEXT:
value = APETextValue(value, BINARY)
elif BINARY == BINARY:
value = APEBinaryValue(value, BINARY)
elif BINARY == EXTERNAL:
value = APEExtValue(value, BINARY)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
else:
try:
value.decode('utf-8')
except UnicodeError:
if BINARY in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if BINARY == TEXT:
value = APETextValue(value, BINARY)
elif BINARY == BINARY:
value = APEBinaryValue(value, BINARY)
elif BINARY == EXTERNAL:
value = APEExtValue(value, BINARY)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
else:
if TEXT in (TEXT, EXTERNAL):
if not isinstance(value, text_type):
if PY3:
raise TypeError('str only for text/external values')
else:
value = value.encode('utf-8')
if TEXT == TEXT:
value = APETextValue(value, TEXT)
elif TEXT == BINARY:
value = APEBinaryValue(value, TEXT)
elif TEXT == EXTERNAL:
value = APEExtValue(value, TEXT)
else:
raise ValueError('kind must be TEXT, BINARY, or EXTERNAL')
super(APEv2, self).__setitem__(key, value)
|
AvalonXmlAgent.bundle
|
positive
|
def decode(tree_vec, mol_vec, prob_decode):
(pred_root, pred_nodes) = self.decoder.decode(tree_vec, prob_decode)
for (i, node) in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = len(node.neighbors) == 1
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx(): atom.GetIdx() for atom in cur_mol.GetAtoms()}
<DeepExtract>
fa_nid = None.nid if None is not None else -1
prev_nodes = [None] if None is not None else []
children = [nei for nei in pred_root.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x: x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid, a2, a1) for (nid, a1, a2) in [] if nid == pred_root.nid]
cands = enum_assemble(pred_root, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
cur_mol = None
(cand_smiles, cand_mols, cand_amap) = zip(*cands)
cands = [(candmol, pred_nodes, pred_root) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1, -1)).squeeze() + 1e-05
cand_idx = torch.multinomial(probs, probs.numel())
else:
(_, cand_idx) = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in range(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].data]
new_global_amap = copy.deepcopy(global_amap)
for (nei_id, ctr_atom, nei_atom) in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[pred_root.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap)
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None:
continue
result = True
for nei_node in children:
if nei_node.is_leaf:
continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, new_global_amap, pred_amap, nei_node, pred_root, prob_decode)
if cur_mol is None:
result = False
break
if result:
cur_mol = cur_mol
cur_mol = None
</DeepExtract>
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None:
return None
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
(_, max_id) = scores.max(dim=0)
return stereo_cands[max_id.data]
|
def decode(tree_vec, mol_vec, prob_decode):
(pred_root, pred_nodes) = self.decoder.decode(tree_vec, prob_decode)
for (i, node) in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = len(node.neighbors) == 1
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx(): atom.GetIdx() for atom in cur_mol.GetAtoms()}
fa_nid = None.nid if None is not None else -1
prev_nodes = [None] if None is not None else []
children = [nei for nei in pred_root.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x: x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid, a2, a1) for (nid, a1, a2) in [] if nid == pred_root.nid]
cands = enum_assemble(pred_root, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
cur_mol = None
(cand_smiles, cand_mols, cand_amap) = zip(*cands)
cands = [(candmol, pred_nodes, pred_root) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1, -1)).squeeze() + 1e-05
cand_idx = torch.multinomial(probs, probs.numel())
else:
(_, cand_idx) = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in range(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].data]
new_global_amap = copy.deepcopy(global_amap)
for (nei_id, ctr_atom, nei_atom) in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[pred_root.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap)
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None:
continue
result = True
for nei_node in children:
if nei_node.is_leaf:
continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, new_global_amap, pred_amap, nei_node, pred_root, prob_decode)
if cur_mol is None:
result = False
break
if result:
cur_mol = cur_mol
cur_mol = None
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None:
return None
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
(_, max_id) = scores.max(dim=0)
return stereo_cands[max_id.data]
|
DIG
|
positive
|
def _srk2_pop_var_vector_wiener(sde_type, code_lines, variables, parameters, vdt):
all_f = [f'f_{var}' for var in variables]
all_g = [f'g_{var}' for var in variables]
noise_string = f"\n {', '.join(all_f)} = f({', '.join(variables + parameters)}) # shape = (..)\n {', '.join(all_g)} = g({', '.join(variables + parameters)}) # shape = (.., m)\n noise_shape = math.shape(g_x1)\n _D = noise_shape[:-1]\n _m = noise_shape[-1]\n "
code_lines.extend(noise_string.split('\n'))
<DeepExtract>
if sde_type == constants.ITO_SDE:
I2 = f"0.5*(_term3 - {vdt} * math.eye({'_m'})) + _a*0.5*{vdt}/math.pi"
elif sde_type == constants.STRA_SDE:
I2 = f'0.5*_term3 + _a*0.5*dt/math.pi'
else:
raise ValueError(f'Unknown SDE_INT type: {sde_type}. We only supports {constants.SUPPORTED_INTG_TYPE}.')
if '_D':
'_D' = '_D' + '+'
noise_string = f"\n # Noise Terms #\n # ----------- #\n \n # single Ito integrals\n _I1 = math.normal(0., {vdt}_sqrt, {'_D'}({'_m'},))\n # double Ito integrals\n _h = (2.0 / {vdt}) ** 0.5)\n _a = math.zeros(shape={'_D'}({'_m'}, {'_m'}))\n for _k in range(1, num_iter + 1):\n _x = math.normal(loc=0., scale=1., size={'_D'}({'_m'}, 1))\n _y = math.normal(loc=0., scale=1., size={'_D'}(1, {'_m'})) + _h * _I1\n _term1 = math.matmul(_x, _y)\n _term2 = math.matmul(math.reshape(_y, {'_D'}({'_m'}, 1)), \n math.reshape(_x, {'_D'}(1, {'_m'})))\n _a += (_term1 - _term2) / _k\n _I1_rs = math.reshape(_I1, {'_D'}({'_m'}, 1))\n _term3 = math.matmul(_I1_rs, math.reshape(_I1, {'_D'}(1, {'_m'})))\n _I2 = {I2}\n "
noise_lines = noise_string.split('\n')
code_lines.extend(noise_lines)
</DeepExtract>
for var in variables:
code_lines.append(f' g_{var}_rs = math.reshape(g_{var}, _D+(1, _m))')
for var in variables:
code_lines.append(f' g_H1_{var} = math.reshape(math.matmul(g_{var}_rs, _I2) / {vdt}_sqrt, _D + (_m,))')
for var in variables:
code_lines.append(f' {var}_rs = math.reshape({var}, _D + (1,))')
for var in variables:
code_lines.append(f' H2_{var} = {var}_rs + g_H1_{var}')
code_lines.append(f' H3_{var} = {var}_rs - g_H1_{var}')
code_lines.append(' ')
for var in variables:
code_lines.append(f' _g_{var} = math.matmul(g_{var}_rs, _I1_rs)')
for var in variables:
code_lines.append(f' {var}_new = {var} + f_{var} + _g_{var}[..., 0, 0]')
code_lines.append('for _k in range(_m):')
all_H2 = [f'H2_{var}[..., _k]' for var in variables]
all_g_H2 = [f'g_{var}_H2' for var in variables]
code_lines.append(f" {', '.join(all_g_H2)} = g({', '.join(all_H2 + parameters)})")
all_H3 = [f'H3_{var}[..., _k]' for var in variables]
all_g_H3 = [f'g_{var}_H3' for var in variables]
code_lines.append(f" {', '.join(all_g_H3)} = g({', '.join(all_H3 + parameters)})")
for var in variables:
code_lines.append(f' {var}_new += 0.5 * {vdt}_sqrt * (g_{var}_H2[..., _k] - g_{var}_H3[..., _k])')
|
def _srk2_pop_var_vector_wiener(sde_type, code_lines, variables, parameters, vdt):
all_f = [f'f_{var}' for var in variables]
all_g = [f'g_{var}' for var in variables]
noise_string = f"\n {', '.join(all_f)} = f({', '.join(variables + parameters)}) # shape = (..)\n {', '.join(all_g)} = g({', '.join(variables + parameters)}) # shape = (.., m)\n noise_shape = math.shape(g_x1)\n _D = noise_shape[:-1]\n _m = noise_shape[-1]\n "
code_lines.extend(noise_string.split('\n'))
if sde_type == constants.ITO_SDE:
I2 = f"0.5*(_term3 - {vdt} * math.eye({'_m'})) + _a*0.5*{vdt}/math.pi"
elif sde_type == constants.STRA_SDE:
I2 = f'0.5*_term3 + _a*0.5*dt/math.pi'
else:
raise ValueError(f'Unknown SDE_INT type: {sde_type}. We only supports {constants.SUPPORTED_INTG_TYPE}.')
if '_D':
'_D' = '_D' + '+'
noise_string = f"\n # Noise Terms #\n # ----------- #\n \n # single Ito integrals\n _I1 = math.normal(0., {vdt}_sqrt, {'_D'}({'_m'},))\n # double Ito integrals\n _h = (2.0 / {vdt}) ** 0.5)\n _a = math.zeros(shape={'_D'}({'_m'}, {'_m'}))\n for _k in range(1, num_iter + 1):\n _x = math.normal(loc=0., scale=1., size={'_D'}({'_m'}, 1))\n _y = math.normal(loc=0., scale=1., size={'_D'}(1, {'_m'})) + _h * _I1\n _term1 = math.matmul(_x, _y)\n _term2 = math.matmul(math.reshape(_y, {'_D'}({'_m'}, 1)), \n math.reshape(_x, {'_D'}(1, {'_m'})))\n _a += (_term1 - _term2) / _k\n _I1_rs = math.reshape(_I1, {'_D'}({'_m'}, 1))\n _term3 = math.matmul(_I1_rs, math.reshape(_I1, {'_D'}(1, {'_m'})))\n _I2 = {I2}\n "
noise_lines = noise_string.split('\n')
code_lines.extend(noise_lines)
for var in variables:
code_lines.append(f' g_{var}_rs = math.reshape(g_{var}, _D+(1, _m))')
for var in variables:
code_lines.append(f' g_H1_{var} = math.reshape(math.matmul(g_{var}_rs, _I2) / {vdt}_sqrt, _D + (_m,))')
for var in variables:
code_lines.append(f' {var}_rs = math.reshape({var}, _D + (1,))')
for var in variables:
code_lines.append(f' H2_{var} = {var}_rs + g_H1_{var}')
code_lines.append(f' H3_{var} = {var}_rs - g_H1_{var}')
code_lines.append(' ')
for var in variables:
code_lines.append(f' _g_{var} = math.matmul(g_{var}_rs, _I1_rs)')
for var in variables:
code_lines.append(f' {var}_new = {var} + f_{var} + _g_{var}[..., 0, 0]')
code_lines.append('for _k in range(_m):')
all_H2 = [f'H2_{var}[..., _k]' for var in variables]
all_g_H2 = [f'g_{var}_H2' for var in variables]
code_lines.append(f" {', '.join(all_g_H2)} = g({', '.join(all_H2 + parameters)})")
all_H3 = [f'H3_{var}[..., _k]' for var in variables]
all_g_H3 = [f'g_{var}_H3' for var in variables]
code_lines.append(f" {', '.join(all_g_H3)} = g({', '.join(all_H3 + parameters)})")
for var in variables:
code_lines.append(f' {var}_new += 0.5 * {vdt}_sqrt * (g_{var}_H2[..., _k] - g_{var}_H3[..., _k])')
|
BrainPy
|
positive
|
def initialize_postload(self):
self.covmat = self.fullcov[np.ix_(self.used_indices, self.used_indices)]
self.covinv = np.linalg.inv(self.covmat)
<DeepExtract>
nused = len(self.used_items)
data = np.empty(nused)
for (i, (type_ix, f1, f2, theta_ix)) in enumerate(self.used_items):
data[i] = self.data_arrays[type_ix][f1, f2][theta_ix]
self.data_vector = data
</DeepExtract>
self.errors = copy.deepcopy(self.data_arrays)
cov_ix = 0
for (i, (type_ix, f1, f2, ix)) in enumerate(self.indices):
self.errors[type_ix][f1, f2][ix] = np.sqrt(self.fullcov[cov_ix, cov_ix])
cov_ix += 1
self.theta_bins_radians = self.theta_bins / 60 * np.pi / 180
if self.use_hankel:
import hankel
maxx = self.theta_bins_radians[-1] * self.l_max
h = 3.2 * np.pi / maxx
N = int(3.2 / h)
self.hankel0 = hankel.HankelTransform(nu=0, N=N, h=h)
self.hankel2 = hankel.HankelTransform(nu=2, N=N, h=h)
self.hankel4 = hankel.HankelTransform(nu=4, N=N, h=h)
elif self.binned_bessels:
dls = np.diff(np.unique(np.exp(np.linspace(np.log(1.0), np.log(self.l_max), int(500 * self.acc))).astype(int)))
groups = []
ell = 2
self.ls_bessel = np.zeros(dls.size)
for (i, dlx) in enumerate(dls):
self.ls_bessel[i] = (2 * ell + dlx - 1) / 2.0
groups.append(np.arange(ell, ell + dlx))
ell += dlx
js = np.empty((3, self.ls_bessel.size, len(self.theta_bins_radians)))
bigell = np.arange(0, self.l_max + 1, dtype=np.float64)
for (i, theta) in enumerate(self.theta_bins_radians):
bigx = bigell * theta
for (ix, nu) in enumerate([0, 2, 4]):
bigj = special.jn(nu, bigx) * bigell / (2 * np.pi)
for (j, g) in enumerate(groups):
js[ix, j, i] = np.sum(bigj[g])
self.bessel_cache = (js[0, :, :], js[1, :, :], js[2, :, :])
else:
dl = 4
self.ls_bessel = np.arange(2 + dl / 2, self.l_max + 1, dl, dtype=np.float64)
j0s = np.empty((len(self.ls_bessel), len(self.theta_bins_radians)))
j2s = np.empty((len(self.ls_bessel), len(self.theta_bins_radians)))
j4s = np.empty((len(self.ls_bessel), len(self.theta_bins_radians)))
for (i, theta) in enumerate(self.theta_bins_radians):
x = self.ls_bessel * theta
j0s[:, i] = self.ls_bessel * special.jn(0, x)
j2s[:, i] = self.ls_bessel * special.jn(2, x)
j4s[:, i] = self.ls_bessel * special.jn(4, x)
j0s *= dl / (2 * np.pi)
j2s *= dl / (2 * np.pi)
j4s *= dl / (2 * np.pi)
self.bessel_cache = (j0s, j2s, j4s)
if self.acc > 1:
self.zs = np.linspace(0.005, self.zmax, int(350 * self.acc))
else:
self.zs = self.zmid[self.zmid <= self.zmax]
assert self.zmax <= 5, 'z max too large!'
self.zs_interp = np.linspace(0, self.zmax, 100)
|
def initialize_postload(self):
self.covmat = self.fullcov[np.ix_(self.used_indices, self.used_indices)]
self.covinv = np.linalg.inv(self.covmat)
nused = len(self.used_items)
data = np.empty(nused)
for (i, (type_ix, f1, f2, theta_ix)) in enumerate(self.used_items):
data[i] = self.data_arrays[type_ix][f1, f2][theta_ix]
self.data_vector = data
self.errors = copy.deepcopy(self.data_arrays)
cov_ix = 0
for (i, (type_ix, f1, f2, ix)) in enumerate(self.indices):
self.errors[type_ix][f1, f2][ix] = np.sqrt(self.fullcov[cov_ix, cov_ix])
cov_ix += 1
self.theta_bins_radians = self.theta_bins / 60 * np.pi / 180
if self.use_hankel:
import hankel
maxx = self.theta_bins_radians[-1] * self.l_max
h = 3.2 * np.pi / maxx
N = int(3.2 / h)
self.hankel0 = hankel.HankelTransform(nu=0, N=N, h=h)
self.hankel2 = hankel.HankelTransform(nu=2, N=N, h=h)
self.hankel4 = hankel.HankelTransform(nu=4, N=N, h=h)
elif self.binned_bessels:
dls = np.diff(np.unique(np.exp(np.linspace(np.log(1.0), np.log(self.l_max), int(500 * self.acc))).astype(int)))
groups = []
ell = 2
self.ls_bessel = np.zeros(dls.size)
for (i, dlx) in enumerate(dls):
self.ls_bessel[i] = (2 * ell + dlx - 1) / 2.0
groups.append(np.arange(ell, ell + dlx))
ell += dlx
js = np.empty((3, self.ls_bessel.size, len(self.theta_bins_radians)))
bigell = np.arange(0, self.l_max + 1, dtype=np.float64)
for (i, theta) in enumerate(self.theta_bins_radians):
bigx = bigell * theta
for (ix, nu) in enumerate([0, 2, 4]):
bigj = special.jn(nu, bigx) * bigell / (2 * np.pi)
for (j, g) in enumerate(groups):
js[ix, j, i] = np.sum(bigj[g])
self.bessel_cache = (js[0, :, :], js[1, :, :], js[2, :, :])
else:
dl = 4
self.ls_bessel = np.arange(2 + dl / 2, self.l_max + 1, dl, dtype=np.float64)
j0s = np.empty((len(self.ls_bessel), len(self.theta_bins_radians)))
j2s = np.empty((len(self.ls_bessel), len(self.theta_bins_radians)))
j4s = np.empty((len(self.ls_bessel), len(self.theta_bins_radians)))
for (i, theta) in enumerate(self.theta_bins_radians):
x = self.ls_bessel * theta
j0s[:, i] = self.ls_bessel * special.jn(0, x)
j2s[:, i] = self.ls_bessel * special.jn(2, x)
j4s[:, i] = self.ls_bessel * special.jn(4, x)
j0s *= dl / (2 * np.pi)
j2s *= dl / (2 * np.pi)
j4s *= dl / (2 * np.pi)
self.bessel_cache = (j0s, j2s, j4s)
if self.acc > 1:
self.zs = np.linspace(0.005, self.zmax, int(350 * self.acc))
else:
self.zs = self.zmid[self.zmid <= self.zmax]
assert self.zmax <= 5, 'z max too large!'
self.zs_interp = np.linspace(0, self.zmax, 100)
|
cobaya
|
positive
|
def describe_overlap_rules(self):
<DeepExtract>
if not self.is_fitted:
raise ValueError('Call .fit() before describing rules')
</DeepExtract>
if self._support_only:
return 'No Overlap Rules Fitted (support_only=True).'
else:
X = self.X_supp
o_est = self.RS_overlap_estimator
return self._describe_rules(o_est, X, estimator_name='OVERLAP')
|
def describe_overlap_rules(self):
if not self.is_fitted:
raise ValueError('Call .fit() before describing rules')
if self._support_only:
return 'No Overlap Rules Fitted (support_only=True).'
else:
X = self.X_supp
o_est = self.RS_overlap_estimator
return self._describe_rules(o_est, X, estimator_name='OVERLAP')
|
dowhy
|
positive
|
def load_rsa_private_key(*names):
"""Load RSA private key."""
<DeepExtract>
(_, ext) = os.path.splitext(names[-1])
if ext.lower() == '.pem':
loader = serialization.load_pem_private_key
elif ext.lower() == '.der':
loader = serialization.load_der_private_key
raise ValueError('Loader could not be recognized based on extension')
</DeepExtract>
return jose.ComparableRSAKey(loader(load_vector(*names), password=None, backend=default_backend()))
|
def load_rsa_private_key(*names):
"""Load RSA private key."""
(_, ext) = os.path.splitext(names[-1])
if ext.lower() == '.pem':
loader = serialization.load_pem_private_key
elif ext.lower() == '.der':
loader = serialization.load_der_private_key
raise ValueError('Loader could not be recognized based on extension')
return jose.ComparableRSAKey(loader(load_vector(*names), password=None, backend=default_backend()))
|
acme-debian
|
positive
|
def genst_all(cn):
<DeepExtract>
assert isinstance(cn, AbstractCItem)
assert isinstance(cn.ea, (int, long))
assert isinstance(cn._ctype, int)
assert cn._ctype >= 0 and cn._ctype < HxCType.CIT_END
assert isinstance(str(cn), str)
assert cn.is_expr == (cn._ctype >= HxCType.COT_EMPTY and cn._ctype <= HxCType.COT_LAST)
assert cn.is_statement == (cn._ctype >= HxCType.CIT_EMPTY and cn._ctype < HxCType.CIT_END)
assert isinstance(cn.has_label, bool)
assert isinstance(cn.label_num, int)
assert (cn.label_num != -1) == cn.has_label
</DeepExtract>
assert isinstance(cn, (CNode, HxCItem))
if isinstance(cn, CNode):
<DeepExtract>
assert isinstance(cn, CNode)
assert isinstance(cn.closest_ea, (int, long))
assert cn.closest_ea != idc.BADADDR
assert cn.closest_ea == cn.ea or cn.ea == idc.BADADDR
assert cn.closest_ea is not None
assert isinstance(cn.cstr, str)
if cn.has_parent:
assert isinstance(cn.parent, CNode)
else:
assert cn == cn.hxcfunc.root_node
assert isinstance(cn.hxcfunc, HxCFunc)
if isinstance(cn, CNodeExprCast):
assert cn.ignore_cast != cn
assert cn.ignore_cast_parent != cn
else:
assert cn.ignore_cast == cn
assert cn.ignore_cast_parent == cn
</DeepExtract>
if isinstance(cn, CNodeExpr):
<DeepExtract>
assert isinstance(cn, CNodeExpr)
assert isinstance(cn.ops, list)
for cno in cn.ops:
assert isinstance(cno, CNodeExpr)
assert isinstance(cn.type, BipType)
if isinstance(cn, CNodeExprFinal):
assert cn.find_final_left_node() == cn
assert cn.find_left_node_notmatching([CNodeExpr]) == cn
else:
assert isinstance(cn.find_final_left_node(), CNodeExprFinal)
assert isinstance(cn.find_left_node_notmatching([CNodeExpr]), CNodeExprFinal)
assert cn.find_left_node_notmatching([]) == cn
</DeepExtract>
if isinstance(cn, CNodeStmt):
<DeepExtract>
assert isinstance(cn, CNodeStmt)
assert isinstance(cn.stmt_children, list)
assert isinstance(cn.expr_children, list)
for cnc in cn.stmt_children:
assert isinstance(cnc, CNodeStmt)
for cnc in cn.expr_children:
assert isinstance(cnc, CNodeExpr)
</DeepExtract>
else:
if isinstance(cn, HxCExpr):
<DeepExtract>
assert isinstance(cn, HxCItem)
assert isinstance(cn, HxCExpr)
assert isinstance(cn.ops, list)
for hio in cn.ops:
assert isinstance(hio, HxCExpr)
assert isinstance(cn.type, BipType)
</DeepExtract>
if isinstance(cn, HxCStmt):
<DeepExtract>
assert isinstance(cn, HxCItem)
assert isinstance(cn, HxCStmt)
assert isinstance(cn.stmt_children, list)
assert isinstance(cn.expr_children, list)
for hic in cn.stmt_children:
assert isinstance(hic, HxCStmt)
for hic in cn.expr_children:
assert isinstance(hic, HxCExpr)
</DeepExtract>
if isinstance(cn, (HxCExprNum, CNodeExprNum)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprNum, CNodeExprNum))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
assert isinstance(cn.size, (int, long))
assert cn.value < 1 << cn.size * 8
</DeepExtract>
if isinstance(cn, (HxCExprFNum, CNodeExprFNum)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprFNum, CNodeExprFNum))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
assert isinstance(cn.size, (int, long))
assert cn.size in (4, 8)
</DeepExtract>
if isinstance(cn, (HxCExprStr, CNodeExprStr)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprStr, CNodeExprStr))
assert cn.ops == []
assert isinstance(cn.value, str)
</DeepExtract>
if isinstance(cn, (HxCExprObj, CNodeExprObj)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprObj, CNodeExprObj))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
</DeepExtract>
if isinstance(cn, (HxCExprVar, CNodeExprVar)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprVar, CNodeExprVar))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
assert isinstance(cn.index, (int, long))
assert cn.index == cn.value
if isinstance(cn, CNodeExprVar):
assert isinstance(cn.lvar, HxLvar)
assert isinstance(cn.lvar_name, str)
assert cn.lvar.name == cn.lvar_name
</DeepExtract>
if isinstance(cn, (HxCExprHelper, CNodeExprHelper)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprHelper, CNodeExprHelper))
assert cn.ops == []
assert isinstance(cn.value, str)
</DeepExtract>
if isinstance(cn, (HxCExprType, CNodeExprType)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprType, CNodeExprType))
assert cn.ops == []
assert isinstance(cn.value, BipType)
assert cn.value == cn.type
</DeepExtract>
if isinstance(cn, (HxCExprTernary, CNodeExprTernary)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprTernary, CNodeExprTernary))
assert len(cn.ops) == 3
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.cond == cn.ops[0]
assert isinstance(cn.expr1, (HxCExpr, CNodeExpr))
assert cn.expr1 == cn.ops[1]
assert isinstance(cn.expr2, (HxCExpr, CNodeExpr))
assert cn.expr2 == cn.ops[2]
</DeepExtract>
if isinstance(cn, (HxCExprDoubleOperation, CNodeExprDoubleOperation)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprDoubleOperation, CNodeExprDoubleOperation))
assert len(cn.ops) == 2
assert isinstance(cn.first_op, (HxCExpr, CNodeExpr))
assert cn.first_op == cn.ops[0]
assert isinstance(cn.second_op, (HxCExpr, CNodeExpr))
assert cn.second_op == cn.ops[1]
</DeepExtract>
if isinstance(cn, (HxCExprAsg, CNodeExprAsg)):
<DeepExtract>
assert isinstance(cn, (HxCExprAssignment, CNodeExprAssignment))
assert isinstance(cn.src, (HxCExpr, CNodeExpr))
assert cn.src == cn.ops[1]
assert isinstance(cn.dst, (HxCExpr, CNodeExpr))
assert cn.dst == cn.ops[0]
</DeepExtract>
if isinstance(cn, (HxCExprUnaryOperation, CNodeExprUnaryOperation)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprUnaryOperation, CNodeExprUnaryOperation))
assert len(cn.ops) == 1
assert isinstance(cn.operand, (HxCExpr, CNodeExpr))
assert cn.operand == cn.ops[0]
</DeepExtract>
if isinstance(cn, (HxCExprPtr, CNodeExprPtr)):
<DeepExtract>
assert isinstance(cn, (HxCExprPtr, CNodeExprPtr))
assert isinstance(cn.access_size, (int, long))
</DeepExtract>
if isinstance(cn, (HxCExprCall, CNodeExprCall)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprCall, CNodeExprCall))
ops = cn.ops
assert len(ops) == cn.number_args + 1
assert isinstance(cn.type_call, BipType)
assert isinstance(cn.caller, (HxCExpr, CNodeExpr))
assert isinstance(cn.is_helper, bool)
assert isinstance(cn.number_args, (int, long))
assert cn.caller == ops[0]
if isinstance(cn, CNodeExpr):
assert cn.caller_addr is None or isinstance(cn.caller_addr, (int, long))
assert cn.caller_func is None or isinstance(cn.caller_func, BipFunction)
if cn.number_args >= 1:
iv = cn.get_arg_intval(0)
assert isinstance(iv, (int, long)) or iv is None
if isinstance(cn.caller, (HxCExprObj, CNodeExprObj)):
assert cn.is_helper == False
if isinstance(cn, CNodeExpr):
assert cn.caller_addr is not None
assert cn.caller_addr == cn.caller.value
assert cn.caller_func == BipFunction(cn.caller.value)
args = cn.args
assert isinstance(args, list)
i = 0
for ar in cn.args_iter:
assert isinstance(ar, (HxCExpr, CNodeExpr))
assert ar == cn.get_arg(i)
assert ar == args[i]
assert ar == ops[i + 1]
if isinstance(ar, CNodeExprObj):
assert cn.get_arg_intval(i) == ar.value
i += 1
assert i == cn.number_args
with pytest.raises(ValueError):
cn.get_arg(cn.number_args + 1)
</DeepExtract>
if isinstance(cn, (HxCExprIdx, CNodeExprIdx)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprMemAccess, CNodeExprMemAccess))
assert isinstance(cn, (HxCExprIdx, CNodeExprIdx))
assert len(cn.ops) == 2
assert isinstance(cn.array, (HxCExpr, CNodeExpr))
assert cn.array == cn.obj
assert cn.array == cn.ops[0]
assert isinstance(cn.index, (HxCExpr, CNodeExpr))
assert cn.index == cn.off
assert cn.index == cn.ops[1]
</DeepExtract>
if isinstance(cn, (HxCExprMemref, CNodeExprMemref)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprMemAccess, CNodeExprMemAccess))
assert isinstance(cn, (HxCExprMemref, CNodeExprMemref))
assert len(cn.ops) == 1
assert isinstance(cn.mem, (HxCExpr, CNodeExpr))
assert cn.mem == cn.obj
assert cn.mem == cn.ops[0]
assert isinstance(cn.off, (int, long))
</DeepExtract>
if isinstance(cn, (HxCExprMemptr, CNodeExprMemptr)):
<DeepExtract>
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprMemAccess, CNodeExprMemAccess))
assert isinstance(cn, (HxCExprMemptr, CNodeExprMemptr))
assert len(cn.ops) == 1
assert isinstance(cn.ptr, (HxCExpr, CNodeExpr))
assert cn.ptr == cn.obj
assert cn.ptr == cn.ops[0]
assert isinstance(cn.off, (int, long))
assert isinstance(cn.access_size, (int, long))
</DeepExtract>
if isinstance(cn, (HxCStmtExpr, CNodeStmtExpr)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtExpr, CNodeStmtExpr))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 1
assert isinstance(cn.expr, (HxCExpr, CNodeExpr))
assert cn.expr == cn.value
</DeepExtract>
if isinstance(cn, (HxCStmtGoto, CNodeStmtGoto)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtGoto, CNodeStmtGoto))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 0
assert isinstance(cn.label, (int, long))
assert cn.label == cn.value
if isinstance(cn, CNodeStmtGoto):
assert isinstance(cn.cnode_dst, CNode)
</DeepExtract>
if isinstance(cn, (HxCStmtAsm, CNodeStmtAsm)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtAsm, CNodeStmtAsm))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 0
assert isinstance(cn.addr_instr, list)
assert len(cn.addr_instr) > 0
assert isinstance(cn.addr_instr[0], (int, long))
assert isinstance(cn.length, (int, long))
assert len(cn.addr_instr) == cn.length
assert len(cn) == cn.length
li = cn.value
assert isinstance(li, list)
assert len(li) == cn.length
assert isinstance(li[0], BipInstr)
</DeepExtract>
if isinstance(cn, (HxCStmtReturn, CNodeStmtReturn)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtReturn, CNodeStmtReturn))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 1
assert cn.value == cn.ret_val
assert cn.value == cn.expr_children[0]
assert isinstance(cn.ret_val, (HxCExpr, CNodeExpr))
</DeepExtract>
if isinstance(cn, (HxCStmtIf, CNodeStmtIf)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtIf, CNodeStmtIf))
assert len(cn.expr_children) == 1
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.expr_children[0] == cn.cond
assert isinstance(cn.st_then, (HxCStmt, CNodeStmt))
assert cn.stmt_children[0] == cn.st_then
assert isinstance(cn.has_else, bool)
if cn.has_else:
assert isinstance(cn.st_else, (HxCStmt, CNodeStmt))
assert len(cn.stmt_children) == 2
assert cn.stmt_children[1] == cn.st_else
else:
assert cn.st_else is None
assert len(cn.stmt_children) == 1
</DeepExtract>
if isinstance(cn, (HxCStmtFor, CNodeStmtFor)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtFor, CNodeStmtFor))
assert len(cn.stmt_children) == 1
assert len(cn.expr_children) == 3
assert isinstance(cn.init, (HxCExpr, CNodeExpr))
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert isinstance(cn.step, (HxCExpr, CNodeExpr))
assert cn.init == cn.expr_children[0]
assert cn.cond == cn.expr_children[1]
assert cn.step == cn.expr_children[2]
assert isinstance(cn.st_body, (HxCStmt, CNodeStmt))
assert cn.st_body == cn.stmt_children[0]
</DeepExtract>
if isinstance(cn, (HxCStmtWhile, CNodeStmtWhile)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtWhile, CNodeStmtWhile))
assert len(cn.stmt_children) == 1
assert len(cn.expr_children) == 1
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.cond == cn.expr_children[0]
assert isinstance(cn.st_body, (HxCStmt, CNodeStmt))
assert cn.st_body == cn.stmt_children[0]
</DeepExtract>
if isinstance(cn, (HxCStmtDoWhile, CNodeStmtDoWhile)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtDoWhile, CNodeStmtDoWhile))
assert len(cn.stmt_children) == 1
assert len(cn.expr_children) == 1
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.cond == cn.expr_children[0]
assert isinstance(cn.st_body, (HxCStmt, CNodeStmt))
assert cn.st_body == cn.stmt_children[0]
</DeepExtract>
if isinstance(cn, (HxCStmtSwitch, CNodeStmtSwitch)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtSwitch, CNodeStmtSwitch))
assert len(cn.stmt_children) != 0
assert len(cn.expr_children) == 1
assert isinstance(cn.expr, (HxCExpr, CNodeExpr))
assert cn.expr == cn.expr_children[0]
assert isinstance(cn.max_val, (int, long))
cas = cn.st_cases
casv = cn.cases_val
assert isinstance(cas, list)
assert isinstance(casv, list)
assert len(cas) != 0
assert len(cas) == len(cn.stmt_children)
assert len(cas) == len(casv)
for i in range(len(cas)):
assert isinstance(cas[i], (HxCStmt, CNodeStmt))
assert isinstance(casv[i], list)
assert isinstance(casv[i][0], (int, long))
</DeepExtract>
if isinstance(cn, (HxCStmtBlock, CNodeStmtBlock)):
<DeepExtract>
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtBlock, CNodeStmtBlock))
assert len(cn.stmt_children) != 0
assert len(cn.expr_children) == 0
assert len(cn.elts) == len(cn.stmt_children)
</DeepExtract>
|
def genst_all(cn):
assert isinstance(cn, AbstractCItem)
assert isinstance(cn.ea, (int, long))
assert isinstance(cn._ctype, int)
assert cn._ctype >= 0 and cn._ctype < HxCType.CIT_END
assert isinstance(str(cn), str)
assert cn.is_expr == (cn._ctype >= HxCType.COT_EMPTY and cn._ctype <= HxCType.COT_LAST)
assert cn.is_statement == (cn._ctype >= HxCType.CIT_EMPTY and cn._ctype < HxCType.CIT_END)
assert isinstance(cn.has_label, bool)
assert isinstance(cn.label_num, int)
assert (cn.label_num != -1) == cn.has_label
assert isinstance(cn, (CNode, HxCItem))
if isinstance(cn, CNode):
assert isinstance(cn, CNode)
assert isinstance(cn.closest_ea, (int, long))
assert cn.closest_ea != idc.BADADDR
assert cn.closest_ea == cn.ea or cn.ea == idc.BADADDR
assert cn.closest_ea is not None
assert isinstance(cn.cstr, str)
if cn.has_parent:
assert isinstance(cn.parent, CNode)
else:
assert cn == cn.hxcfunc.root_node
assert isinstance(cn.hxcfunc, HxCFunc)
if isinstance(cn, CNodeExprCast):
assert cn.ignore_cast != cn
assert cn.ignore_cast_parent != cn
else:
assert cn.ignore_cast == cn
assert cn.ignore_cast_parent == cn
if isinstance(cn, CNodeExpr):
assert isinstance(cn, CNodeExpr)
assert isinstance(cn.ops, list)
for cno in cn.ops:
assert isinstance(cno, CNodeExpr)
assert isinstance(cn.type, BipType)
if isinstance(cn, CNodeExprFinal):
assert cn.find_final_left_node() == cn
assert cn.find_left_node_notmatching([CNodeExpr]) == cn
else:
assert isinstance(cn.find_final_left_node(), CNodeExprFinal)
assert isinstance(cn.find_left_node_notmatching([CNodeExpr]), CNodeExprFinal)
assert cn.find_left_node_notmatching([]) == cn
if isinstance(cn, CNodeStmt):
assert isinstance(cn, CNodeStmt)
assert isinstance(cn.stmt_children, list)
assert isinstance(cn.expr_children, list)
for cnc in cn.stmt_children:
assert isinstance(cnc, CNodeStmt)
for cnc in cn.expr_children:
assert isinstance(cnc, CNodeExpr)
else:
if isinstance(cn, HxCExpr):
assert isinstance(cn, HxCItem)
assert isinstance(cn, HxCExpr)
assert isinstance(cn.ops, list)
for hio in cn.ops:
assert isinstance(hio, HxCExpr)
assert isinstance(cn.type, BipType)
if isinstance(cn, HxCStmt):
assert isinstance(cn, HxCItem)
assert isinstance(cn, HxCStmt)
assert isinstance(cn.stmt_children, list)
assert isinstance(cn.expr_children, list)
for hic in cn.stmt_children:
assert isinstance(hic, HxCStmt)
for hic in cn.expr_children:
assert isinstance(hic, HxCExpr)
if isinstance(cn, (HxCExprNum, CNodeExprNum)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprNum, CNodeExprNum))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
assert isinstance(cn.size, (int, long))
assert cn.value < 1 << cn.size * 8
if isinstance(cn, (HxCExprFNum, CNodeExprFNum)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprFNum, CNodeExprFNum))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
assert isinstance(cn.size, (int, long))
assert cn.size in (4, 8)
if isinstance(cn, (HxCExprStr, CNodeExprStr)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprStr, CNodeExprStr))
assert cn.ops == []
assert isinstance(cn.value, str)
if isinstance(cn, (HxCExprObj, CNodeExprObj)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprObj, CNodeExprObj))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
if isinstance(cn, (HxCExprVar, CNodeExprVar)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprVar, CNodeExprVar))
assert cn.ops == []
assert isinstance(cn.value, (int, long))
assert isinstance(cn.index, (int, long))
assert cn.index == cn.value
if isinstance(cn, CNodeExprVar):
assert isinstance(cn.lvar, HxLvar)
assert isinstance(cn.lvar_name, str)
assert cn.lvar.name == cn.lvar_name
if isinstance(cn, (HxCExprHelper, CNodeExprHelper)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprHelper, CNodeExprHelper))
assert cn.ops == []
assert isinstance(cn.value, str)
if isinstance(cn, (HxCExprType, CNodeExprType)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprFinal, CNodeExprFinal))
assert isinstance(cn, (HxCExprType, CNodeExprType))
assert cn.ops == []
assert isinstance(cn.value, BipType)
assert cn.value == cn.type
if isinstance(cn, (HxCExprTernary, CNodeExprTernary)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprTernary, CNodeExprTernary))
assert len(cn.ops) == 3
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.cond == cn.ops[0]
assert isinstance(cn.expr1, (HxCExpr, CNodeExpr))
assert cn.expr1 == cn.ops[1]
assert isinstance(cn.expr2, (HxCExpr, CNodeExpr))
assert cn.expr2 == cn.ops[2]
if isinstance(cn, (HxCExprDoubleOperation, CNodeExprDoubleOperation)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprDoubleOperation, CNodeExprDoubleOperation))
assert len(cn.ops) == 2
assert isinstance(cn.first_op, (HxCExpr, CNodeExpr))
assert cn.first_op == cn.ops[0]
assert isinstance(cn.second_op, (HxCExpr, CNodeExpr))
assert cn.second_op == cn.ops[1]
if isinstance(cn, (HxCExprAsg, CNodeExprAsg)):
assert isinstance(cn, (HxCExprAssignment, CNodeExprAssignment))
assert isinstance(cn.src, (HxCExpr, CNodeExpr))
assert cn.src == cn.ops[1]
assert isinstance(cn.dst, (HxCExpr, CNodeExpr))
assert cn.dst == cn.ops[0]
if isinstance(cn, (HxCExprUnaryOperation, CNodeExprUnaryOperation)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprUnaryOperation, CNodeExprUnaryOperation))
assert len(cn.ops) == 1
assert isinstance(cn.operand, (HxCExpr, CNodeExpr))
assert cn.operand == cn.ops[0]
if isinstance(cn, (HxCExprPtr, CNodeExprPtr)):
assert isinstance(cn, (HxCExprPtr, CNodeExprPtr))
assert isinstance(cn.access_size, (int, long))
if isinstance(cn, (HxCExprCall, CNodeExprCall)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprCall, CNodeExprCall))
ops = cn.ops
assert len(ops) == cn.number_args + 1
assert isinstance(cn.type_call, BipType)
assert isinstance(cn.caller, (HxCExpr, CNodeExpr))
assert isinstance(cn.is_helper, bool)
assert isinstance(cn.number_args, (int, long))
assert cn.caller == ops[0]
if isinstance(cn, CNodeExpr):
assert cn.caller_addr is None or isinstance(cn.caller_addr, (int, long))
assert cn.caller_func is None or isinstance(cn.caller_func, BipFunction)
if cn.number_args >= 1:
iv = cn.get_arg_intval(0)
assert isinstance(iv, (int, long)) or iv is None
if isinstance(cn.caller, (HxCExprObj, CNodeExprObj)):
assert cn.is_helper == False
if isinstance(cn, CNodeExpr):
assert cn.caller_addr is not None
assert cn.caller_addr == cn.caller.value
assert cn.caller_func == BipFunction(cn.caller.value)
args = cn.args
assert isinstance(args, list)
i = 0
for ar in cn.args_iter:
assert isinstance(ar, (HxCExpr, CNodeExpr))
assert ar == cn.get_arg(i)
assert ar == args[i]
assert ar == ops[i + 1]
if isinstance(ar, CNodeExprObj):
assert cn.get_arg_intval(i) == ar.value
i += 1
assert i == cn.number_args
with pytest.raises(ValueError):
cn.get_arg(cn.number_args + 1)
if isinstance(cn, (HxCExprIdx, CNodeExprIdx)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprMemAccess, CNodeExprMemAccess))
assert isinstance(cn, (HxCExprIdx, CNodeExprIdx))
assert len(cn.ops) == 2
assert isinstance(cn.array, (HxCExpr, CNodeExpr))
assert cn.array == cn.obj
assert cn.array == cn.ops[0]
assert isinstance(cn.index, (HxCExpr, CNodeExpr))
assert cn.index == cn.off
assert cn.index == cn.ops[1]
if isinstance(cn, (HxCExprMemref, CNodeExprMemref)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprMemAccess, CNodeExprMemAccess))
assert isinstance(cn, (HxCExprMemref, CNodeExprMemref))
assert len(cn.ops) == 1
assert isinstance(cn.mem, (HxCExpr, CNodeExpr))
assert cn.mem == cn.obj
assert cn.mem == cn.ops[0]
assert isinstance(cn.off, (int, long))
if isinstance(cn, (HxCExprMemptr, CNodeExprMemptr)):
assert isinstance(cn, (HxCExpr, CNodeExpr))
assert isinstance(cn, (HxCExprMemAccess, CNodeExprMemAccess))
assert isinstance(cn, (HxCExprMemptr, CNodeExprMemptr))
assert len(cn.ops) == 1
assert isinstance(cn.ptr, (HxCExpr, CNodeExpr))
assert cn.ptr == cn.obj
assert cn.ptr == cn.ops[0]
assert isinstance(cn.off, (int, long))
assert isinstance(cn.access_size, (int, long))
if isinstance(cn, (HxCStmtExpr, CNodeStmtExpr)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtExpr, CNodeStmtExpr))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 1
assert isinstance(cn.expr, (HxCExpr, CNodeExpr))
assert cn.expr == cn.value
if isinstance(cn, (HxCStmtGoto, CNodeStmtGoto)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtGoto, CNodeStmtGoto))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 0
assert isinstance(cn.label, (int, long))
assert cn.label == cn.value
if isinstance(cn, CNodeStmtGoto):
assert isinstance(cn.cnode_dst, CNode)
if isinstance(cn, (HxCStmtAsm, CNodeStmtAsm)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtAsm, CNodeStmtAsm))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 0
assert isinstance(cn.addr_instr, list)
assert len(cn.addr_instr) > 0
assert isinstance(cn.addr_instr[0], (int, long))
assert isinstance(cn.length, (int, long))
assert len(cn.addr_instr) == cn.length
assert len(cn) == cn.length
li = cn.value
assert isinstance(li, list)
assert len(li) == cn.length
assert isinstance(li[0], BipInstr)
if isinstance(cn, (HxCStmtReturn, CNodeStmtReturn)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtFinal, CNodeStmtFinal))
assert isinstance(cn, (HxCStmtReturn, CNodeStmtReturn))
assert len(cn.stmt_children) == 0
assert len(cn.expr_children) == 1
assert cn.value == cn.ret_val
assert cn.value == cn.expr_children[0]
assert isinstance(cn.ret_val, (HxCExpr, CNodeExpr))
if isinstance(cn, (HxCStmtIf, CNodeStmtIf)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtIf, CNodeStmtIf))
assert len(cn.expr_children) == 1
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.expr_children[0] == cn.cond
assert isinstance(cn.st_then, (HxCStmt, CNodeStmt))
assert cn.stmt_children[0] == cn.st_then
assert isinstance(cn.has_else, bool)
if cn.has_else:
assert isinstance(cn.st_else, (HxCStmt, CNodeStmt))
assert len(cn.stmt_children) == 2
assert cn.stmt_children[1] == cn.st_else
else:
assert cn.st_else is None
assert len(cn.stmt_children) == 1
if isinstance(cn, (HxCStmtFor, CNodeStmtFor)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtFor, CNodeStmtFor))
assert len(cn.stmt_children) == 1
assert len(cn.expr_children) == 3
assert isinstance(cn.init, (HxCExpr, CNodeExpr))
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert isinstance(cn.step, (HxCExpr, CNodeExpr))
assert cn.init == cn.expr_children[0]
assert cn.cond == cn.expr_children[1]
assert cn.step == cn.expr_children[2]
assert isinstance(cn.st_body, (HxCStmt, CNodeStmt))
assert cn.st_body == cn.stmt_children[0]
if isinstance(cn, (HxCStmtWhile, CNodeStmtWhile)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtWhile, CNodeStmtWhile))
assert len(cn.stmt_children) == 1
assert len(cn.expr_children) == 1
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.cond == cn.expr_children[0]
assert isinstance(cn.st_body, (HxCStmt, CNodeStmt))
assert cn.st_body == cn.stmt_children[0]
if isinstance(cn, (HxCStmtDoWhile, CNodeStmtDoWhile)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtDoWhile, CNodeStmtDoWhile))
assert len(cn.stmt_children) == 1
assert len(cn.expr_children) == 1
assert isinstance(cn.cond, (HxCExpr, CNodeExpr))
assert cn.cond == cn.expr_children[0]
assert isinstance(cn.st_body, (HxCStmt, CNodeStmt))
assert cn.st_body == cn.stmt_children[0]
if isinstance(cn, (HxCStmtSwitch, CNodeStmtSwitch)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtLoop, CNodeStmtLoop))
assert isinstance(cn, (HxCStmtSwitch, CNodeStmtSwitch))
assert len(cn.stmt_children) != 0
assert len(cn.expr_children) == 1
assert isinstance(cn.expr, (HxCExpr, CNodeExpr))
assert cn.expr == cn.expr_children[0]
assert isinstance(cn.max_val, (int, long))
cas = cn.st_cases
casv = cn.cases_val
assert isinstance(cas, list)
assert isinstance(casv, list)
assert len(cas) != 0
assert len(cas) == len(cn.stmt_children)
assert len(cas) == len(casv)
for i in range(len(cas)):
assert isinstance(cas[i], (HxCStmt, CNodeStmt))
assert isinstance(casv[i], list)
assert isinstance(casv[i][0], (int, long))
if isinstance(cn, (HxCStmtBlock, CNodeStmtBlock)):
assert isinstance(cn, (HxCStmt, CNodeStmt))
assert isinstance(cn, (HxCStmtBlock, CNodeStmtBlock))
assert len(cn.stmt_children) != 0
assert len(cn.expr_children) == 0
assert len(cn.elts) == len(cn.stmt_children)
</DeepExtract>
|
bip
|
positive
|
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
<DeepExtract>
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
gt_roidb = roidb
gt_roidb = [self._load_pascal_annotation(index) for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
gt_roidb = gt_roidb
</DeepExtract>
<DeepExtract>
filename = os.path.abspath(os.path.join(cfg.DATA_DIR, 'selective_search_data', self.name + '.mat'))
assert os.path.exists(filename), 'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
ss_roidb = self.create_roidb_from_box_list(box_list, gt_roidb)
</DeepExtract>
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
<DeepExtract>
filename = os.path.abspath(os.path.join(cfg.DATA_DIR, 'selective_search_data', self.name + '.mat'))
assert os.path.exists(filename), 'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
roidb = self.create_roidb_from_box_list(box_list, None)
</DeepExtract>
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
|
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} ss roidb loaded from {}'.format(self.name, cache_file))
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
gt_roidb = roidb
gt_roidb = [self._load_pascal_annotation(index) for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
gt_roidb = gt_roidb
filename = os.path.abspath(os.path.join(cfg.DATA_DIR, 'selective_search_data', self.name + '.mat'))
assert os.path.exists(filename), 'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
ss_roidb = self.create_roidb_from_box_list(box_list, gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
filename = os.path.abspath(os.path.join(cfg.DATA_DIR, 'selective_search_data', self.name + '.mat'))
assert os.path.exists(filename), 'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
roidb = self.create_roidb_from_box_list(box_list, None)
with open(cache_file, 'wb') as fid:
pickle.dump(roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote ss roidb to {}'.format(cache_file))
return roidb
|
DA_Detection
|
positive
|
def _internal_tranquilo(evaluate_criterion, x, noisy, conv_options, stop_options, radius_options, batch_size, target_sample_size, stagnation_options, search_radius_factor, n_evals_per_point, n_evals_at_start, trustregion, sampling_rng, history, sample_points, solve_subproblem, filter_points, fit_model, aggregate_model, estimate_variance, accept_candidate):
eval_info = {0: n_evals_at_start}
evaluate_criterion(eval_info)
_init_fvec = history.get_fvecs(0).mean(axis=0)
_init_vector_model = VectorModel(intercepts=_init_fvec, linear_terms=np.zeros((len(_init_fvec), len(x))), square_terms=np.zeros((len(_init_fvec), len(x), len(x))), shift=trustregion.center, scale=trustregion.radius)
_init_model = aggregate_model(_init_vector_model)
state = State(trustregion=trustregion, model_indices=[0], model=_init_model, vector_model=_init_vector_model, index=0, x=x, fval=np.mean(history.get_fvals(0)), rho=np.nan, accepted=True, new_indices=[0], old_indices_discarded=[], old_indices_used=[], candidate_index=0, candidate_x=x)
states = [state]
(converged, msg) = (False, None)
for _ in range(stop_options.max_iter):
search_region = state.trustregion._replace(radius=search_radius_factor * state.trustregion.radius)
old_indices = history.get_x_indices_in_region(search_region)
old_xs = history.get_xs(old_indices)
(model_xs, model_indices) = filter_points(xs=old_xs, indices=old_indices, state=state, target_size=target_sample_size)
new_xs = sample_points(trustregion=state.trustregion, n_points=max(0, target_sample_size - len(model_xs)), existing_xs=model_xs, rng=sampling_rng)
new_indices = history.add_xs(new_xs)
eval_info = {i: n_evals_per_point for i in new_indices}
evaluate_criterion(eval_info)
<DeepExtract>
model_indices = np.atleast_1d(model_indices).astype(int)
new_indices = np.atleast_1d(new_indices).astype(int)
model_indices = np.hstack((model_indices, new_indices))
</DeepExtract>
model_xs = history.get_xs(model_indices)
model_data = history.get_model_data(x_indices=model_indices, average=True)
vector_model = fit_model(*model_data, region=state.trustregion, old_model=state.vector_model, weights=None)
scalar_model = aggregate_model(vector_model=vector_model)
sub_sol = solve_subproblem(model=scalar_model, trustregion=state.trustregion)
_relative_step_length = np.linalg.norm(sub_sol.x - state.x) / state.trustregion.radius
if len(model_xs) > target_sample_size:
while _relative_step_length < stagnation_options.min_relative_step_keep and len(model_xs) > target_sample_size:
(model_xs, model_indices) = drop_worst_points(xs=model_xs, indices=model_indices, state=state, n_to_drop=1)
model_data = history.get_model_data(x_indices=model_indices, average=True)
vector_model = fit_model(*model_data, region=state.trustregion, old_model=state.vector_model, weights=None)
scalar_model = aggregate_model(vector_model=vector_model)
sub_sol = solve_subproblem(model=scalar_model, trustregion=state.trustregion)
_relative_step_length = np.linalg.norm(sub_sol.x - state.x) / state.trustregion.radius
sample_counter = 0
while _relative_step_length < stagnation_options.min_relative_step:
if stagnation_options.drop:
(model_xs, model_indices) = drop_worst_points(xs=model_xs, indices=model_indices, state=state, n_to_drop=stagnation_options.sample_increment)
new_xs = sample_points(trustregion=state.trustregion, n_points=stagnation_options.sample_increment, existing_xs=model_xs, rng=sampling_rng)
new_indices = history.add_xs(new_xs)
eval_info = {i: n_evals_per_point for i in new_indices}
evaluate_criterion(eval_info)
<DeepExtract>
model_indices = np.atleast_1d(model_indices).astype(int)
new_indices = np.atleast_1d(new_indices).astype(int)
model_indices = np.hstack((model_indices, new_indices))
</DeepExtract>
model_xs = history.get_xs(model_indices)
model_data = history.get_model_data(x_indices=model_indices, average=True)
vector_model = fit_model(*model_data, region=state.trustregion, old_model=state.vector_model, weights=None)
scalar_model = aggregate_model(vector_model=vector_model)
sub_sol = solve_subproblem(model=scalar_model, trustregion=state.trustregion)
_relative_step_length = np.linalg.norm(sub_sol.x - state.x) / state.trustregion.radius
sample_counter += 1
if sample_counter >= stagnation_options.max_trials:
break
if noisy:
scalar_noise_variance = estimate_variance(trustregion=state.trustregion, history=history, model_type='scalar')
else:
scalar_noise_variance = None
acceptance_result = accept_candidate(subproblem_solution=sub_sol, state=state, wrapped_criterion=evaluate_criterion, noise_variance=scalar_noise_variance, history=history)
state = state._replace(model_indices=model_indices, model=scalar_model, new_indices=np.setdiff1d(model_indices, old_indices), old_indices_used=np.intersect1d(model_indices, old_indices), old_indices_discarded=np.setdiff1d(old_indices, model_indices), **acceptance_result._asdict())
states.append(state)
new_radius = adjust_radius(radius=state.trustregion.radius, rho=acceptance_result.rho, step_length=acceptance_result.step_length, options=radius_options)
new_trustregion = state.trustregion._replace(center=acceptance_result.x, radius=new_radius)
state = state._replace(trustregion=new_trustregion)
if acceptance_result.accepted and (not conv_options.disable):
<DeepExtract>
(old, new) = states[-2:]
f_change_abs = np.abs(old.fval - new.fval)
f_change_rel = f_change_abs / max(np.abs(old.fval), 1)
x_change_abs = np.linalg.norm(old.x - new.x)
x_change_rel = np.linalg.norm((old.x - new.x) / np.clip(np.abs(old.x), 1, np.inf))
g_norm_abs = np.linalg.norm(new.model.linear_terms)
g_norm_rel = g_norm_abs / max(g_norm_abs, 1)
converged = True
if g_norm_rel <= conv_options.gtol_rel:
msg = 'Relative gradient norm smaller than tolerance.'
elif g_norm_abs <= conv_options.gtol_abs:
msg = 'Absolute gradient norm smaller than tolerance.'
elif f_change_rel <= conv_options.ftol_rel:
msg = 'Relative criterion change smaller than tolerance.'
elif f_change_abs <= conv_options.ftol_abs:
msg = 'Absolute criterion change smaller than tolerance.'
elif x_change_rel <= conv_options.xtol_rel:
msg = 'Relative params change smaller than tolerance.'
elif x_change_abs <= conv_options.xtol_abs:
msg = 'Absolute params change smaller than tolerance.'
else:
converged = False
msg = None
(converged, msg) = (converged, msg)
</DeepExtract>
if converged:
break
if history.get_n_fun() >= stop_options.max_eval:
converged = False
msg = 'Maximum number of criterion evaluations reached.'
break
res = {'solution_x': state.x, 'solution_criterion': state.fval, 'states': states, 'message': msg, 'tranquilo_history': history}
return res
|
def _internal_tranquilo(evaluate_criterion, x, noisy, conv_options, stop_options, radius_options, batch_size, target_sample_size, stagnation_options, search_radius_factor, n_evals_per_point, n_evals_at_start, trustregion, sampling_rng, history, sample_points, solve_subproblem, filter_points, fit_model, aggregate_model, estimate_variance, accept_candidate):
eval_info = {0: n_evals_at_start}
evaluate_criterion(eval_info)
_init_fvec = history.get_fvecs(0).mean(axis=0)
_init_vector_model = VectorModel(intercepts=_init_fvec, linear_terms=np.zeros((len(_init_fvec), len(x))), square_terms=np.zeros((len(_init_fvec), len(x), len(x))), shift=trustregion.center, scale=trustregion.radius)
_init_model = aggregate_model(_init_vector_model)
state = State(trustregion=trustregion, model_indices=[0], model=_init_model, vector_model=_init_vector_model, index=0, x=x, fval=np.mean(history.get_fvals(0)), rho=np.nan, accepted=True, new_indices=[0], old_indices_discarded=[], old_indices_used=[], candidate_index=0, candidate_x=x)
states = [state]
(converged, msg) = (False, None)
for _ in range(stop_options.max_iter):
search_region = state.trustregion._replace(radius=search_radius_factor * state.trustregion.radius)
old_indices = history.get_x_indices_in_region(search_region)
old_xs = history.get_xs(old_indices)
(model_xs, model_indices) = filter_points(xs=old_xs, indices=old_indices, state=state, target_size=target_sample_size)
new_xs = sample_points(trustregion=state.trustregion, n_points=max(0, target_sample_size - len(model_xs)), existing_xs=model_xs, rng=sampling_rng)
new_indices = history.add_xs(new_xs)
eval_info = {i: n_evals_per_point for i in new_indices}
evaluate_criterion(eval_info)
model_indices = np.atleast_1d(model_indices).astype(int)
new_indices = np.atleast_1d(new_indices).astype(int)
model_indices = np.hstack((model_indices, new_indices))
model_xs = history.get_xs(model_indices)
model_data = history.get_model_data(x_indices=model_indices, average=True)
vector_model = fit_model(*model_data, region=state.trustregion, old_model=state.vector_model, weights=None)
scalar_model = aggregate_model(vector_model=vector_model)
sub_sol = solve_subproblem(model=scalar_model, trustregion=state.trustregion)
_relative_step_length = np.linalg.norm(sub_sol.x - state.x) / state.trustregion.radius
if len(model_xs) > target_sample_size:
while _relative_step_length < stagnation_options.min_relative_step_keep and len(model_xs) > target_sample_size:
(model_xs, model_indices) = drop_worst_points(xs=model_xs, indices=model_indices, state=state, n_to_drop=1)
model_data = history.get_model_data(x_indices=model_indices, average=True)
vector_model = fit_model(*model_data, region=state.trustregion, old_model=state.vector_model, weights=None)
scalar_model = aggregate_model(vector_model=vector_model)
sub_sol = solve_subproblem(model=scalar_model, trustregion=state.trustregion)
_relative_step_length = np.linalg.norm(sub_sol.x - state.x) / state.trustregion.radius
sample_counter = 0
while _relative_step_length < stagnation_options.min_relative_step:
if stagnation_options.drop:
(model_xs, model_indices) = drop_worst_points(xs=model_xs, indices=model_indices, state=state, n_to_drop=stagnation_options.sample_increment)
new_xs = sample_points(trustregion=state.trustregion, n_points=stagnation_options.sample_increment, existing_xs=model_xs, rng=sampling_rng)
new_indices = history.add_xs(new_xs)
eval_info = {i: n_evals_per_point for i in new_indices}
evaluate_criterion(eval_info)
model_indices = np.atleast_1d(model_indices).astype(int)
new_indices = np.atleast_1d(new_indices).astype(int)
model_indices = np.hstack((model_indices, new_indices))
model_xs = history.get_xs(model_indices)
model_data = history.get_model_data(x_indices=model_indices, average=True)
vector_model = fit_model(*model_data, region=state.trustregion, old_model=state.vector_model, weights=None)
scalar_model = aggregate_model(vector_model=vector_model)
sub_sol = solve_subproblem(model=scalar_model, trustregion=state.trustregion)
_relative_step_length = np.linalg.norm(sub_sol.x - state.x) / state.trustregion.radius
sample_counter += 1
if sample_counter >= stagnation_options.max_trials:
break
if noisy:
scalar_noise_variance = estimate_variance(trustregion=state.trustregion, history=history, model_type='scalar')
else:
scalar_noise_variance = None
acceptance_result = accept_candidate(subproblem_solution=sub_sol, state=state, wrapped_criterion=evaluate_criterion, noise_variance=scalar_noise_variance, history=history)
state = state._replace(model_indices=model_indices, model=scalar_model, new_indices=np.setdiff1d(model_indices, old_indices), old_indices_used=np.intersect1d(model_indices, old_indices), old_indices_discarded=np.setdiff1d(old_indices, model_indices), **acceptance_result._asdict())
states.append(state)
new_radius = adjust_radius(radius=state.trustregion.radius, rho=acceptance_result.rho, step_length=acceptance_result.step_length, options=radius_options)
new_trustregion = state.trustregion._replace(center=acceptance_result.x, radius=new_radius)
state = state._replace(trustregion=new_trustregion)
if acceptance_result.accepted and (not conv_options.disable):
(old, new) = states[-2:]
f_change_abs = np.abs(old.fval - new.fval)
f_change_rel = f_change_abs / max(np.abs(old.fval), 1)
x_change_abs = np.linalg.norm(old.x - new.x)
x_change_rel = np.linalg.norm((old.x - new.x) / np.clip(np.abs(old.x), 1, np.inf))
g_norm_abs = np.linalg.norm(new.model.linear_terms)
g_norm_rel = g_norm_abs / max(g_norm_abs, 1)
converged = True
if g_norm_rel <= conv_options.gtol_rel:
msg = 'Relative gradient norm smaller than tolerance.'
elif g_norm_abs <= conv_options.gtol_abs:
msg = 'Absolute gradient norm smaller than tolerance.'
elif f_change_rel <= conv_options.ftol_rel:
msg = 'Relative criterion change smaller than tolerance.'
elif f_change_abs <= conv_options.ftol_abs:
msg = 'Absolute criterion change smaller than tolerance.'
elif x_change_rel <= conv_options.xtol_rel:
msg = 'Relative params change smaller than tolerance.'
elif x_change_abs <= conv_options.xtol_abs:
msg = 'Absolute params change smaller than tolerance.'
else:
converged = False
msg = None
(converged, msg) = (converged, msg)
if converged:
break
if history.get_n_fun() >= stop_options.max_eval:
converged = False
msg = 'Maximum number of criterion evaluations reached.'
break
res = {'solution_x': state.x, 'solution_criterion': state.fval, 'states': states, 'message': msg, 'tranquilo_history': history}
return res
|
estimagic
|
positive
|
def step(self, data):
"""
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
"""
<DeepExtract>
if self.config.act_type == C.GLU:
num_hidden = 2 * self.config.num_hidden
else:
num_hidden = self.config.num_hidden
</DeepExtract>
data = mx.sym.swapaxes(data, dim1=1, dim2=2)
data = mx.sym.reshape(data, shape=(0, -3))
weight = mx.sym.reshape(self.conv_weight, shape=(0, -3))
data_conv = mx.sym.FullyConnected(data=data, weight=weight, bias=self.conv_bias, num_hidden=num_hidden)
data_conv = mx.sym.expand_dims(data_conv, axis=2)
return self._post_convolution(data_conv)
|
def step(self, data):
"""
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
"""
if self.config.act_type == C.GLU:
num_hidden = 2 * self.config.num_hidden
else:
num_hidden = self.config.num_hidden
data = mx.sym.swapaxes(data, dim1=1, dim2=2)
data = mx.sym.reshape(data, shape=(0, -3))
weight = mx.sym.reshape(self.conv_weight, shape=(0, -3))
data_conv = mx.sym.FullyConnected(data=data, weight=weight, bias=self.conv_bias, num_hidden=num_hidden)
data_conv = mx.sym.expand_dims(data_conv, axis=2)
return self._post_convolution(data_conv)
|
DCGCN
|
positive
|
def tokenize(self, sents):
"""returns a list of lists of tuples (word, tag) """
'List is in the same order as the sents'
givenList = isinstance(sents, list)
if not givenList:
sents = [sents]
<DeepExtract>
if not self.tokenizerP:
command = self.tagger_dir + '/' + self.tokenizer_command
self.tokenizerP = Popen([command] + self.tokenizer_args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
tokenizerP = self.tokenizerP
</DeepExtract>
tokenizedLists = []
for s in sents:
s = s.strip().replace('\n', ' ') + '\n'
tokenizerP.stdin.write(s.encode('utf8'))
tokenizerP.stdin.flush()
tagLine = tokenizerP.stdout.readline().strip()
if tagLine:
info = tagLine.decode().split('\t')
tokens = info[0].split()
tokenizedLists.append(tokens)
if givenList:
return tokenizedLists
else:
return tokenizedLists[0]
|
def tokenize(self, sents):
"""returns a list of lists of tuples (word, tag) """
'List is in the same order as the sents'
givenList = isinstance(sents, list)
if not givenList:
sents = [sents]
if not self.tokenizerP:
command = self.tagger_dir + '/' + self.tokenizer_command
self.tokenizerP = Popen([command] + self.tokenizer_args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
tokenizerP = self.tokenizerP
tokenizedLists = []
for s in sents:
s = s.strip().replace('\n', ' ') + '\n'
tokenizerP.stdin.write(s.encode('utf8'))
tokenizerP.stdin.flush()
tagLine = tokenizerP.stdout.readline().strip()
if tagLine:
info = tagLine.decode().split('\t')
tokens = info[0].split()
tokenizedLists.append(tokens)
if givenList:
return tokenizedLists
else:
return tokenizedLists[0]
|
dlatk
|
positive
|
def get_payload(self, folders, delete_type, delete_sub_folders):
from .folders import Folder, FolderId, DistinguishedFolderId
emptyfolder = create_element('m:%s' % self.SERVICE_NAME, DeleteType=delete_type, DeleteSubFolders='true' if delete_sub_folders else 'false')
folder_ids = create_element('m:FolderIds')
for folder in folders:
log.debug('Emptying folder %s', folder)
if not isinstance(folder, (Folder, FolderId, DistinguishedFolderId)):
<DeepExtract>
if isinstance(folder, FolderId):
folder = folder
if isinstance(folder, (tuple, list)):
folder = FolderId(*folder)
if isinstance(folder, dict):
folder = FolderId(**folder)
folder = FolderId(folder.id, folder.changekey)
</DeepExtract>
set_xml_value(folder_ids, folder, version=self.account.version)
if not len(folder_ids):
raise ValueError('"folders" must not be empty')
emptyfolder.append(folder_ids)
return emptyfolder
|
def get_payload(self, folders, delete_type, delete_sub_folders):
from .folders import Folder, FolderId, DistinguishedFolderId
emptyfolder = create_element('m:%s' % self.SERVICE_NAME, DeleteType=delete_type, DeleteSubFolders='true' if delete_sub_folders else 'false')
folder_ids = create_element('m:FolderIds')
for folder in folders:
log.debug('Emptying folder %s', folder)
if not isinstance(folder, (Folder, FolderId, DistinguishedFolderId)):
if isinstance(folder, FolderId):
folder = folder
if isinstance(folder, (tuple, list)):
folder = FolderId(*folder)
if isinstance(folder, dict):
folder = FolderId(**folder)
folder = FolderId(folder.id, folder.changekey)
set_xml_value(folder_ids, folder, version=self.account.version)
if not len(folder_ids):
raise ValueError('"folders" must not be empty')
emptyfolder.append(folder_ids)
return emptyfolder
|
exchangelib
|
positive
|
def get_rows(self, train_ids):
tmp_name = 'tmp_pairs'
<DeepExtract>
self.db.execute('CREATE TEMP TABLE {table_name} (id INT);'.format(table_name=tmp_name))
if len(map(lambda x: (x,), train_ids)) != 0:
self.db.executemany('INSERT INTO {table_name} VALUES (?);'.format(table_name=tmp_name), map(lambda x: (x,), train_ids))
return tmp_name
</DeepExtract>
pairs = self.db.execute('SELECT * FROM {table_pairs} WHERE rowid IN (SELECT id FROM {tmp_table});'.format(table_pairs=DataAccess.TABLE_PAIRS, tmp_table=tmp_name)).fetchall()
<DeepExtract>
self.db.execute('drop table {tmp_table_name};'.format(tmp_table_name=tmp_name))
</DeepExtract>
input_data = np.array(pairs)
(ids, pair_data) = (input_data[:, 0], input_data[:, 7:])
return (input_data, ids, pair_data)
|
def get_rows(self, train_ids):
tmp_name = 'tmp_pairs'
self.db.execute('CREATE TEMP TABLE {table_name} (id INT);'.format(table_name=tmp_name))
if len(map(lambda x: (x,), train_ids)) != 0:
self.db.executemany('INSERT INTO {table_name} VALUES (?);'.format(table_name=tmp_name), map(lambda x: (x,), train_ids))
return tmp_name
pairs = self.db.execute('SELECT * FROM {table_pairs} WHERE rowid IN (SELECT id FROM {tmp_table});'.format(table_pairs=DataAccess.TABLE_PAIRS, tmp_table=tmp_name)).fetchall()
self.db.execute('drop table {tmp_table_name};'.format(tmp_table_name=tmp_name))
input_data = np.array(pairs)
(ids, pair_data) = (input_data[:, 0], input_data[:, 7:])
return (input_data, ids, pair_data)
|
drnet
|
positive
|
def test_get_max_min_increasing(self):
<DeepExtract>
perf_list = []
MAX_VAL = 20
for i in range(0, MAX_VAL):
t = i / MAX_VAL
tmp_score = stats_datastruct.Stats_datastruct()
tmp_score.TPR = i / MAX_VAL
perf_list.append(perf_datastruct.Perf(score=tmp_score, threshold=t))
perf_list = perf_list
</DeepExtract>
print([p.score.TPR for p in perf_list])
(thre, val) = self.quality_evaluator.get_optimal_for_optimized_attribute(perfs_list=perf_list, attribute='TPR', higher=False, rightmost=True, is_increasing=True, tolerance=0.9)
self.logger.info(f'Found value {thre}')
tmp_conf = calibrator_conf.Default_calibrator_conf()
tmp_conf.thre_upper_at_least_xpercent_TPR = thre
self.plotmaker.print_graph_with_thresholds(perf_list, thresholds_handler=tmp_conf, output_path=self.output_folder, file_name='max_min_inc.png')
self.assertAlmostEqual(thre, 0.8, delta=0.1)
|
def test_get_max_min_increasing(self):
perf_list = []
MAX_VAL = 20
for i in range(0, MAX_VAL):
t = i / MAX_VAL
tmp_score = stats_datastruct.Stats_datastruct()
tmp_score.TPR = i / MAX_VAL
perf_list.append(perf_datastruct.Perf(score=tmp_score, threshold=t))
perf_list = perf_list
print([p.score.TPR for p in perf_list])
(thre, val) = self.quality_evaluator.get_optimal_for_optimized_attribute(perfs_list=perf_list, attribute='TPR', higher=False, rightmost=True, is_increasing=True, tolerance=0.9)
self.logger.info(f'Found value {thre}')
tmp_conf = calibrator_conf.Default_calibrator_conf()
tmp_conf.thre_upper_at_least_xpercent_TPR = thre
self.plotmaker.print_graph_with_thresholds(perf_list, thresholds_handler=tmp_conf, output_path=self.output_folder, file_name='max_min_inc.png')
self.assertAlmostEqual(thre, 0.8, delta=0.1)
|
douglas-quaid
|
positive
|
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = self.tokenizer.decode(token_ids, skip_special_tokens)
if clean_up_tokenization_spaces:
<DeepExtract>
text = text.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
clean_text = text
</DeepExtract>
return clean_text
else:
return text
|
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
text = self.tokenizer.decode(token_ids, skip_special_tokens)
if clean_up_tokenization_spaces:
text = text.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
clean_text = text
return clean_text
else:
return text
|
BERT-EMD
|
positive
|
def convert_http_session_to_pattern(session):
if session.http_request_response:
(requests, responses) = split_into_requests_and_responses(session.http_request_response)
if len(responses) != 0:
warn('HTTPServerResponse type is not supported in STIX 2.x', 429)
if len(requests) >= 1:
<DeepExtract>
expressions = []
if requests[0].http_request_line is not None:
if requests[0].http_request_line.http_method is not None:
term = add_comparison_expression(requests[0].http_request_line.http_method, "network-traffic:extensions.'http-request-ext'.request_method")
if term:
expressions.append(term)
if requests[0].http_request_line.value is not None:
term = add_comparison_expression(requests[0].http_request_line.value, "network-traffic:extensions.'http-request-ext'.request_value")
if term:
expressions.append(term)
if requests[0].http_request_line.version is not None:
term = add_comparison_expression(requests[0].http_request_line.version, "network-traffic:extensions.'http-request-ext'.request_version")
if term:
expressions.append(term)
if requests[0].http_request_header is not None:
if requests[0].http_request_header.parsed_header is not None:
header = requests[0].http_request_header.parsed_header
for prop_spec in _NETWORK_CONNECTION_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(header, prop_1x) and getattr(header, prop_1x):
value = getattr(header, prop_1x)
if isinstance(value, Address):
value = getattr(value, 'address_value')
elif isinstance(value, HostField):
value = getattr(value, 'domain_name').value
elif isinstance(value, URI):
value = value.value
term = add_comparison_expression(value, object_path)
if term:
expressions.append(term)
if requests[0].http_message_body is not None:
mb = requests[0].http_message_body
if mb.length:
term = add_comparison_expression(mb.length, "network-traffic:extensions.'http-request-ext'.message_body_length")
if term:
expressions.append(term)
if mb.message_body:
expressions.append(create_term("network-traffic:extensions.'http-request-ext'.message_body_data_ref.payload_bin", 'Equals', encode_in_base64(str(mb.message_body))))
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
if len(requests) > 1:
warn('Only HTTP_Request_Response used for http-request-ext, using first value', 512)
return expression
|
def convert_http_session_to_pattern(session):
if session.http_request_response:
(requests, responses) = split_into_requests_and_responses(session.http_request_response)
if len(responses) != 0:
warn('HTTPServerResponse type is not supported in STIX 2.x', 429)
if len(requests) >= 1:
expressions = []
if requests[0].http_request_line is not None:
if requests[0].http_request_line.http_method is not None:
term = add_comparison_expression(requests[0].http_request_line.http_method, "network-traffic:extensions.'http-request-ext'.request_method")
if term:
expressions.append(term)
if requests[0].http_request_line.value is not None:
term = add_comparison_expression(requests[0].http_request_line.value, "network-traffic:extensions.'http-request-ext'.request_value")
if term:
expressions.append(term)
if requests[0].http_request_line.version is not None:
term = add_comparison_expression(requests[0].http_request_line.version, "network-traffic:extensions.'http-request-ext'.request_version")
if term:
expressions.append(term)
if requests[0].http_request_header is not None:
if requests[0].http_request_header.parsed_header is not None:
header = requests[0].http_request_header.parsed_header
for prop_spec in _NETWORK_CONNECTION_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(header, prop_1x) and getattr(header, prop_1x):
value = getattr(header, prop_1x)
if isinstance(value, Address):
value = getattr(value, 'address_value')
elif isinstance(value, HostField):
value = getattr(value, 'domain_name').value
elif isinstance(value, URI):
value = value.value
term = add_comparison_expression(value, object_path)
if term:
expressions.append(term)
if requests[0].http_message_body is not None:
mb = requests[0].http_message_body
if mb.length:
term = add_comparison_expression(mb.length, "network-traffic:extensions.'http-request-ext'.message_body_length")
if term:
expressions.append(term)
if mb.message_body:
expressions.append(create_term("network-traffic:extensions.'http-request-ext'.message_body_data_ref.payload_bin", 'Equals', encode_in_base64(str(mb.message_body))))
expression = create_boolean_expression('AND', expressions)
if len(requests) > 1:
warn('Only HTTP_Request_Response used for http-request-ext, using first value', 512)
return expression
|
cti-stix-elevator
|
positive
|
def add_device(self, _widget=None):
""" Show dialog for adding new device and create the device based on
user selection """
selected_device = self.list_partitions.selected_partition[0]
<DeepExtract>
msg = None
if selected_device.type == 'free space':
parent_device = selected_device.parents[0]
else:
parent_device = selected_device
if parent_device.type == 'lvmvg' and (not parent_device.complete):
msg = _('{name} is not complete. It is not possible to add new LVs to VG with missing PVs.').format(name=parent_device.name)
if parent_device.format.type == 'lvmpv' and parent_device.size < Size('4 MiB'):
msg = _('Not enough free space for a new LVM Volume Group.')
if parent_device.is_disk and parent_device.format.type == 'disklabel':
disk = parent_device.format.parted_disk
selected_device = self.list_partitions.selected_partition[0]
if disk.primaryPartitionCount >= disk.maxPrimaryPartitionCount and selected_device.is_primary:
msg = _('Disk {name} already reached maximum allowed number of primary partitions for {label} disklabel.').format(name=parent_device.name, label=parent_device.format.label_type)
(allow, msg) = (False, msg) if msg else (True, None)
</DeepExtract>
if not allow:
<DeepExtract>
message_dialogs.ErrorDialog(self.main_window, msg)
</DeepExtract>
return
if selected_device.type == 'free space':
if selected_device.is_uninitialized_disk:
<DeepExtract>
dialog = other_dialogs.AddLabelDialog(self.main_window)
selection = self.run_dialog(dialog)
message = _('Failed to add disklabel:')
if selection:
result = self.client.remote_call('create_disk_label', selected_device.disk, selection)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._reraise_exception(result.exception, result.traceback, message, dialog_window=dialog.dialog)
elif result.actions:
action_str = _('create new disklabel on {name}').format(name=selected_device.disk.name)
self.list_actions.append('add', action_str, result.actions)
self._handle_user_change()
self.update_partitions_view()
</DeepExtract>
return
elif selected_device.parents[0].type == 'mdarray' and (not selected_device.parents[0].format.type):
<DeepExtract>
dialog = other_dialogs.AddLabelDialog(self.main_window)
selection = self.run_dialog(dialog)
message = _('Failed to add disklabel:')
if selection:
result = self.client.remote_call('create_disk_label', selected_device.parents[0], selection)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._reraise_exception(result.exception, result.traceback, message, dialog_window=dialog.dialog)
elif result.actions:
action_str = _('create new disklabel on {name}').format(name=selected_device.parents[0].name)
self.list_actions.append('add', action_str, result.actions)
self._handle_user_change()
self.update_partitions_view()
</DeepExtract>
return
if selected_device.type == 'free space':
selected_parent = selected_device.parents[0]
selected_free = selected_device
else:
selected_parent = selected_device
selected_free = self.client.remote_call('get_free_device', selected_device)
if self.installer_mode:
mountpoints = self.client.remote_call('get_mountpoints')
else:
mountpoints = []
dialog = add_dialog.AddDialog(parent_window=self.main_window, selected_parent=selected_parent, selected_free=selected_free, available_free=self.client.remote_call('get_free_info'), supported_filesystems=self.supported_filesystems, mountpoints=mountpoints, installer_mode=self.installer_mode)
<DeepExtract>
response = dialog.run()
response = response
</DeepExtract>
message = _('Failed to add the device:')
if response == Gtk.ResponseType.OK:
user_input = dialog.get_selection()
result = self.client.remote_call('add_device', user_input)
if not result.success:
if not result.exception:
<DeepExtract>
message_dialogs.ErrorDialog(self.main_window, result.message)
</DeepExtract>
else:
<DeepExtract>
raise type(result.exception)(message + '\n' + str(result.exception) + '\n' + result.traceback)
</DeepExtract>
elif result.actions:
action_str = _('add {size} {type} device').format(size=str(user_input.size_selection.total_size), type=user_input.device_type)
self.list_actions.append('add', action_str, result.actions)
<DeepExtract>
pass
</DeepExtract>
self.list_devices.update_devices_view()
<DeepExtract>
self.list_partitions.update_partitions_list(self.list_devices.selected_device)
self.logical_view.visualize_devices(self.list_partitions.partitions_list)
</DeepExtract>
dialog.destroy()
|
def add_device(self, _widget=None):
""" Show dialog for adding new device and create the device based on
user selection """
selected_device = self.list_partitions.selected_partition[0]
msg = None
if selected_device.type == 'free space':
parent_device = selected_device.parents[0]
else:
parent_device = selected_device
if parent_device.type == 'lvmvg' and (not parent_device.complete):
msg = _('{name} is not complete. It is not possible to add new LVs to VG with missing PVs.').format(name=parent_device.name)
if parent_device.format.type == 'lvmpv' and parent_device.size < Size('4 MiB'):
msg = _('Not enough free space for a new LVM Volume Group.')
if parent_device.is_disk and parent_device.format.type == 'disklabel':
disk = parent_device.format.parted_disk
selected_device = self.list_partitions.selected_partition[0]
if disk.primaryPartitionCount >= disk.maxPrimaryPartitionCount and selected_device.is_primary:
msg = _('Disk {name} already reached maximum allowed number of primary partitions for {label} disklabel.').format(name=parent_device.name, label=parent_device.format.label_type)
(allow, msg) = (False, msg) if msg else (True, None)
if not allow:
message_dialogs.ErrorDialog(self.main_window, msg)
return
if selected_device.type == 'free space':
if selected_device.is_uninitialized_disk:
dialog = other_dialogs.AddLabelDialog(self.main_window)
selection = self.run_dialog(dialog)
message = _('Failed to add disklabel:')
if selection:
result = self.client.remote_call('create_disk_label', selected_device.disk, selection)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._reraise_exception(result.exception, result.traceback, message, dialog_window=dialog.dialog)
elif result.actions:
action_str = _('create new disklabel on {name}').format(name=selected_device.disk.name)
self.list_actions.append('add', action_str, result.actions)
self._handle_user_change()
self.update_partitions_view()
return
elif selected_device.parents[0].type == 'mdarray' and (not selected_device.parents[0].format.type):
dialog = other_dialogs.AddLabelDialog(self.main_window)
selection = self.run_dialog(dialog)
message = _('Failed to add disklabel:')
if selection:
result = self.client.remote_call('create_disk_label', selected_device.parents[0], selection)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._reraise_exception(result.exception, result.traceback, message, dialog_window=dialog.dialog)
elif result.actions:
action_str = _('create new disklabel on {name}').format(name=selected_device.parents[0].name)
self.list_actions.append('add', action_str, result.actions)
self._handle_user_change()
self.update_partitions_view()
return
if selected_device.type == 'free space':
selected_parent = selected_device.parents[0]
selected_free = selected_device
else:
selected_parent = selected_device
selected_free = self.client.remote_call('get_free_device', selected_device)
if self.installer_mode:
mountpoints = self.client.remote_call('get_mountpoints')
else:
mountpoints = []
dialog = add_dialog.AddDialog(parent_window=self.main_window, selected_parent=selected_parent, selected_free=selected_free, available_free=self.client.remote_call('get_free_info'), supported_filesystems=self.supported_filesystems, mountpoints=mountpoints, installer_mode=self.installer_mode)
response = dialog.run()
response = response
message = _('Failed to add the device:')
if response == Gtk.ResponseType.OK:
user_input = dialog.get_selection()
result = self.client.remote_call('add_device', user_input)
if not result.success:
if not result.exception:
message_dialogs.ErrorDialog(self.main_window, result.message)
else:
raise type(result.exception)(message + '\n' + str(result.exception) + '\n' + result.traceback)
elif result.actions:
action_str = _('add {size} {type} device').format(size=str(user_input.size_selection.total_size), type=user_input.device_type)
self.list_actions.append('add', action_str, result.actions)
pass
self.list_devices.update_devices_view()
self.list_partitions.update_partitions_list(self.list_devices.selected_device)
self.logical_view.visualize_devices(self.list_partitions.partitions_list)
dialog.destroy()
|
blivet-gui
|
positive
|
def CheckEnd(self, filename, clean_lines, linenum, error):
<DeepExtract>
if '^( *)\\}' not in _regexp_compile_cache:
_regexp_compile_cache['^( *)\\}'] = sre_compile.compile('^( *)\\}')
indent = _regexp_compile_cache['^( *)\\}'].match(clean_lines.elided[linenum])
</DeepExtract>
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent)
|
def CheckEnd(self, filename, clean_lines, linenum, error):
if '^( *)\\}' not in _regexp_compile_cache:
_regexp_compile_cache['^( *)\\}'] = sre_compile.compile('^( *)\\}')
indent = _regexp_compile_cache['^( *)\\}'].match(clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent)
|
DeRPN
|
positive
|
def step_cell(y, x, get, set):
<DeepExtract>
state = self.rows[y % self.height][x % self.width]
</DeepExtract>
<DeepExtract>
n_ = get(y - 1, x + 0)
ne = get(y - 1, x + 1)
e_ = get(y + 0, x + 1)
se = get(y + 1, x + 1)
s_ = get(y + 1, x + 0)
sw = get(y + 1, x - 1)
w_ = get(y + 0, x - 1)
nw = get(y - 1, x - 1)
neighbor_states = [n_, ne, e_, se, s_, sw, w_, nw]
count = 0
for state in neighbor_states:
if state == ALIVE:
count += 1
neighbors = count
</DeepExtract>
<DeepExtract>
if state == ALIVE:
if neighbors < 2:
next_state = EMPTY
elif neighbors > 3:
next_state = EMPTY
elif neighbors == 3:
next_state = ALIVE
next_state = state
</DeepExtract>
<DeepExtract>
self.rows[y % self.height][x % self.width] = next_state
</DeepExtract>
|
def step_cell(y, x, get, set):
state = self.rows[y % self.height][x % self.width]
n_ = get(y - 1, x + 0)
ne = get(y - 1, x + 1)
e_ = get(y + 0, x + 1)
se = get(y + 1, x + 1)
s_ = get(y + 1, x + 0)
sw = get(y + 1, x - 1)
w_ = get(y + 0, x - 1)
nw = get(y - 1, x - 1)
neighbor_states = [n_, ne, e_, se, s_, sw, w_, nw]
count = 0
for state in neighbor_states:
if state == ALIVE:
count += 1
neighbors = count
if state == ALIVE:
if neighbors < 2:
next_state = EMPTY
elif neighbors > 3:
next_state = EMPTY
elif neighbors == 3:
next_state = ALIVE
next_state = state
self.rows[y % self.height][x % self.width] = next_state
</DeepExtract>
|
effectivepython
|
positive
|
def check_oauth(request_handler, *args):
if self._in_error:
<DeepExtract>
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
</DeepExtract>
return
user = users.get_current_user()
if not user:
request_handler.redirect(users.create_login_url(request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
|
def check_oauth(request_handler, *args):
if self._in_error:
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
return
user = users.get_current_user()
if not user:
request_handler.redirect(users.create_login_url(request_handler.request.uri))
return
self.flow.params['state'] = request_handler.request.url
self._request_handler = request_handler
self.credentials = StorageByKeyName(CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
|
engineauth
|
positive
|
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
<DeepExtract>
(m, n) = [(ss - 1.0) / 2.0 for ss in (diameter, diameter)]
(y, x) = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * diameter / 6 * diameter / 6))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
gaussian = h
</DeepExtract>
(x, y) = (int(center[0]), int(center[1]))
(height, width) = heatmap.shape[0:2]
(left, right) = (min(x, radius), min(width - x, radius + 1))
(top, bottom) = (min(y, radius), min(height - y, radius + 1))
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
|
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
(m, n) = [(ss - 1.0) / 2.0 for ss in (diameter, diameter)]
(y, x) = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * diameter / 6 * diameter / 6))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
gaussian = h
(x, y) = (int(center[0]), int(center[1]))
(height, width) = heatmap.shape[0:2]
(left, right) = (min(x, radius), min(width - x, radius + 1))
(top, bottom) = (min(y, radius), min(height - y, radius + 1))
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
|
CenterFusion
|
positive
|
def synonym_score(self, word_a, word_b):
"""
Returns a synonym score for the provided words.
If the two words are not considered a synonym
0.0 is returned.
:param word_a:
:param word_b:
:return:
"""
<DeepExtract>
try:
a_vector = self.embeddings[word_a]
b_vector = self.embeddings[word_b]
diff = dot(matutils.unitvec(a_vector), matutils.unitvec(b_vector))
similarity = diff
except KeyError:
logger.debug("'%s' or '%s' don't have a word vector" % (word_a, word_b))
similarity = 0.0
</DeepExtract>
if similarity > MIN_WORD_SIMILARITY:
return similarity
else:
return 0.0
|
def synonym_score(self, word_a, word_b):
"""
Returns a synonym score for the provided words.
If the two words are not considered a synonym
0.0 is returned.
:param word_a:
:param word_b:
:return:
"""
try:
a_vector = self.embeddings[word_a]
b_vector = self.embeddings[word_b]
diff = dot(matutils.unitvec(a_vector), matutils.unitvec(b_vector))
similarity = diff
except KeyError:
logger.debug("'%s' or '%s' don't have a word vector" % (word_a, word_b))
similarity = 0.0
if similarity > MIN_WORD_SIMILARITY:
return similarity
else:
return 0.0
|
aqqu
|
positive
|
def __run(root=None):
visited = set()
if root is not None:
<DeepExtract>
cur_node = None
if isinstance(root, Edge):
cur_node = root.dest if not self._backwards else root.source
self.visitor.visit(root)
else:
cur_node = root
list_edges = self.graph.out_edges(cur_node) if not self._backwards else self.graph.in_edges(cur_node)
for edge in list_edges:
self.worklist.insert(0, edge)
</DeepExtract>
while self.worklist:
current = self.worklist.pop(0)
if current in visited:
continue
<DeepExtract>
cur_node = None
if isinstance(current, Edge):
cur_node = current.dest if not self._backwards else current.source
self.visitor.visit(current)
else:
cur_node = current
list_edges = self.graph.out_edges(cur_node) if not self._backwards else self.graph.in_edges(cur_node)
for edge in list_edges:
self.worklist.insert(0, edge)
</DeepExtract>
visited.add(current)
|
def __run(root=None):
visited = set()
if root is not None:
cur_node = None
if isinstance(root, Edge):
cur_node = root.dest if not self._backwards else root.source
self.visitor.visit(root)
else:
cur_node = root
list_edges = self.graph.out_edges(cur_node) if not self._backwards else self.graph.in_edges(cur_node)
for edge in list_edges:
self.worklist.insert(0, edge)
while self.worklist:
current = self.worklist.pop(0)
if current in visited:
continue
cur_node = None
if isinstance(current, Edge):
cur_node = current.dest if not self._backwards else current.source
self.visitor.visit(current)
else:
cur_node = current
list_edges = self.graph.out_edges(cur_node) if not self._backwards else self.graph.in_edges(cur_node)
for edge in list_edges:
self.worklist.insert(0, edge)
visited.add(current)
|
equip
|
positive
|
def eval_para(model, iterator, sent_ids, output_path):
model.eval()
(Words, Is_heads, Tags, Y, Y_hat) = ([], [], [], [], [])
with torch.no_grad():
for (i, batch) in enumerate(tqdm(iterator)):
(words, x, is_heads, tags, y, seqlens) = batch
(_, _, y_hat) = model(x, y)
Words.extend(words)
Is_heads.extend(is_heads)
Tags.extend(tags)
Y.extend(y.numpy().tolist())
Y_hat.extend(y_hat.cpu().numpy().tolist())
entities = {k: dict() for (k, sid) in sent_ids}
for (i, (words, is_heads, tags, y_hat)) in enumerate(zip(Words, Is_heads, Tags, Y_hat)):
y_hat = [hat for (head, hat) in zip(is_heads, y_hat) if head == 1]
preds = [idx2tag[hat] for hat in y_hat]
assert len(preds) == len(words), f'len(preds)={len(preds)}, len(words)={len(words)}'
(words, preds) = (words[1:-1], preds[1:-1])
<DeepExtract>
MONTH = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
rule = re.compile('^[0-9]+$')
def date_pattern_1(ptr):
if ptr + 2 < len(words):
preds = words[ptr + 1] in MONTH and re.match(rule, words[ptr]) and re.match(rule, words[ptr + 2])
else:
preds = False
def date_pattern_2(ptr):
if ptr + 3 < len(words):
preds = words[ptr] in MONTH and re.match(rule, words[ptr + 1]) and (words[ptr + 2] == ',') and re.match(rule, words[ptr + 3])
else:
preds = False
ptr = 0
while ptr < len(words):
if preds[ptr] != 'O':
ptr += 1
elif date_pattern_1(ptr):
preds[ptr:ptr + 3] = ['J-DATE'] * 3
ptr += 3
elif date_pattern_2(ptr):
preds[ptr:ptr + 4] = ['J-DATE'] * 4
ptr += 4
elif re.match(rule, words[ptr]):
preds[ptr] = 'J-NUM'
ptr += 1
else:
ptr += 1
preds = preds
</DeepExtract>
<DeepExtract>
entities = []
ptr = 0
while ptr < len(words):
FLAG = False
for prefix in ['I-', 'J-']:
sub_words = []
while ptr < len(words) and preds[ptr].startswith(prefix):
sub_words.append(words[ptr])
ptr += 1
if len(sub_words) > 0:
entity = ' '.join(sub_words).replace(' .', '.').replace(' ,', ',')
entities.append([entity, preds[ptr - 1]])
FLAG = True
if not FLAG:
ptr += 1
entity = entities
</DeepExtract>
(key, sid) = (sent_ids[i][0], sent_ids[i][1])
entities[key][sid] = entity
json.dump(entities, open(output_path, 'w'))
return
|
def eval_para(model, iterator, sent_ids, output_path):
model.eval()
(Words, Is_heads, Tags, Y, Y_hat) = ([], [], [], [], [])
with torch.no_grad():
for (i, batch) in enumerate(tqdm(iterator)):
(words, x, is_heads, tags, y, seqlens) = batch
(_, _, y_hat) = model(x, y)
Words.extend(words)
Is_heads.extend(is_heads)
Tags.extend(tags)
Y.extend(y.numpy().tolist())
Y_hat.extend(y_hat.cpu().numpy().tolist())
entities = {k: dict() for (k, sid) in sent_ids}
for (i, (words, is_heads, tags, y_hat)) in enumerate(zip(Words, Is_heads, Tags, Y_hat)):
y_hat = [hat for (head, hat) in zip(is_heads, y_hat) if head == 1]
preds = [idx2tag[hat] for hat in y_hat]
assert len(preds) == len(words), f'len(preds)={len(preds)}, len(words)={len(words)}'
(words, preds) = (words[1:-1], preds[1:-1])
MONTH = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
rule = re.compile('^[0-9]+$')
def date_pattern_1(ptr):
if ptr + 2 < len(words):
preds = words[ptr + 1] in MONTH and re.match(rule, words[ptr]) and re.match(rule, words[ptr + 2])
else:
preds = False
def date_pattern_2(ptr):
if ptr + 3 < len(words):
preds = words[ptr] in MONTH and re.match(rule, words[ptr + 1]) and (words[ptr + 2] == ',') and re.match(rule, words[ptr + 3])
else:
preds = False
ptr = 0
while ptr < len(words):
if preds[ptr] != 'O':
ptr += 1
elif date_pattern_1(ptr):
preds[ptr:ptr + 3] = ['J-DATE'] * 3
ptr += 3
elif date_pattern_2(ptr):
preds[ptr:ptr + 4] = ['J-DATE'] * 4
ptr += 4
elif re.match(rule, words[ptr]):
preds[ptr] = 'J-NUM'
ptr += 1
else:
ptr += 1
preds = preds
entities = []
ptr = 0
while ptr < len(words):
FLAG = False
for prefix in ['I-', 'J-']:
sub_words = []
while ptr < len(words) and preds[ptr].startswith(prefix):
sub_words.append(words[ptr])
ptr += 1
if len(sub_words) > 0:
entity = ' '.join(sub_words).replace(' .', '.').replace(' ,', ',')
entities.append([entity, preds[ptr - 1]])
FLAG = True
if not FLAG:
ptr += 1
entity = entities
(key, sid) = (sent_ids[i][0], sent_ids[i][1])
entities[key][sid] = entity
json.dump(entities, open(output_path, 'w'))
return
|
DFGN-pytorch
|
positive
|
@pytest.fixture(scope='function')
def peewee_database():
from cozy.db.track import Track
from cozy.db.book import Book
from cozy.db.settings import Settings
from cozy.db.storage_blacklist import StorageBlackList
from cozy.db.storage import Storage
from cozy.db.file import File
from cozy.db.track_to_file import TrackToFile
<DeepExtract>
from playhouse.pool import PooledSqliteDatabase
from cozy.db.artwork_cache import ArtworkCache
from cozy.db.book import Book
from cozy.db.offline_cache import OfflineCache
from cozy.db.settings import Settings
from cozy.db.storage import Storage
from cozy.db.storage_blacklist import StorageBlackList
from cozy.db.track import Track
from cozy.db.file import File
from cozy.db.track_to_file import TrackToFile
from cozy.db.collation import collate_natural
models = [Track, Book, File, TrackToFile, Settings, ArtworkCache, Storage, StorageBlackList, OfflineCache]
print('Setup database...')
db_path = ':memory:'
test_db = PooledSqliteDatabase(db_path, pragmas=[('journal_mode', 'wal')])
test_db.bind(models, bind_refs=False, bind_backrefs=False)
test_db.connect()
test_db.create_tables(models)
test_db.register_collation(collate_natural)
(db_path, models, test_db) = (db_path, models, test_db)
</DeepExtract>
path_of_test_folder = os.path.dirname(os.path.realpath(__file__)) + '/'
with open(path_of_test_folder + 'books.json') as json_file:
book_data = json.load(json_file)
with open(path_of_test_folder + 'tracks.json') as json_file:
track_data = json.load(json_file)
with open(path_of_test_folder + 'files.json') as json_file:
file_data = json.load(json_file)
with open(path_of_test_folder + 'track_to_file.json') as json_file:
track_to_file_data = json.load(json_file)
Book.insert_many(book_data).execute()
for chunk in chunks(track_data, 25):
Track.insert_many(chunk).execute()
for chunk in chunks(file_data, 25):
File.insert_many(chunk).execute()
for chunk in chunks(track_to_file_data, 25):
TrackToFile.insert_many(chunk).execute()
with open(path_of_test_folder + 'storages.json') as json_file:
storage_data = json.load(json_file)
Storage.insert_many(storage_data).execute()
Settings.create(path='', last_played_book=Book.get())
StorageBlackList.create(path='/path/to/replace/test1.mp3')
StorageBlackList.create(path='/path/to/not/replace/test2.mp3')
print('Provide database...')
yield test_db
<DeepExtract>
print('Teardown database...')
test_db.drop_tables(models)
test_db.close()
</DeepExtract>
|
@pytest.fixture(scope='function')
def peewee_database():
from cozy.db.track import Track
from cozy.db.book import Book
from cozy.db.settings import Settings
from cozy.db.storage_blacklist import StorageBlackList
from cozy.db.storage import Storage
from cozy.db.file import File
from cozy.db.track_to_file import TrackToFile
from playhouse.pool import PooledSqliteDatabase
from cozy.db.artwork_cache import ArtworkCache
from cozy.db.book import Book
from cozy.db.offline_cache import OfflineCache
from cozy.db.settings import Settings
from cozy.db.storage import Storage
from cozy.db.storage_blacklist import StorageBlackList
from cozy.db.track import Track
from cozy.db.file import File
from cozy.db.track_to_file import TrackToFile
from cozy.db.collation import collate_natural
models = [Track, Book, File, TrackToFile, Settings, ArtworkCache, Storage, StorageBlackList, OfflineCache]
print('Setup database...')
db_path = ':memory:'
test_db = PooledSqliteDatabase(db_path, pragmas=[('journal_mode', 'wal')])
test_db.bind(models, bind_refs=False, bind_backrefs=False)
test_db.connect()
test_db.create_tables(models)
test_db.register_collation(collate_natural)
(db_path, models, test_db) = (db_path, models, test_db)
path_of_test_folder = os.path.dirname(os.path.realpath(__file__)) + '/'
with open(path_of_test_folder + 'books.json') as json_file:
book_data = json.load(json_file)
with open(path_of_test_folder + 'tracks.json') as json_file:
track_data = json.load(json_file)
with open(path_of_test_folder + 'files.json') as json_file:
file_data = json.load(json_file)
with open(path_of_test_folder + 'track_to_file.json') as json_file:
track_to_file_data = json.load(json_file)
Book.insert_many(book_data).execute()
for chunk in chunks(track_data, 25):
Track.insert_many(chunk).execute()
for chunk in chunks(file_data, 25):
File.insert_many(chunk).execute()
for chunk in chunks(track_to_file_data, 25):
TrackToFile.insert_many(chunk).execute()
with open(path_of_test_folder + 'storages.json') as json_file:
storage_data = json.load(json_file)
Storage.insert_many(storage_data).execute()
Settings.create(path='', last_played_book=Book.get())
StorageBlackList.create(path='/path/to/replace/test1.mp3')
StorageBlackList.create(path='/path/to/not/replace/test2.mp3')
print('Provide database...')
yield test_db
print('Teardown database...')
test_db.drop_tables(models)
test_db.close()
</DeepExtract>
|
cozy
|
positive
|
def jump_handler(self, brk, source, instr_line):
if isinstance(instr_line[self.instr], Jal_MIPS):
<DeepExtract>
pass
</DeepExtract>
if isinstance(instr_line[self.instr], Jr_MIPS):
return self.jump_to_label(brk.label, source, brk.line_num)
return self.jump_to_label(brk.label, source, brk.line_num, True)
|
def jump_handler(self, brk, source, instr_line):
if isinstance(instr_line[self.instr], Jal_MIPS):
pass
if isinstance(instr_line[self.instr], Jr_MIPS):
return self.jump_to_label(brk.label, source, brk.line_num)
return self.jump_to_label(brk.label, source, brk.line_num, True)
|
Emu86
|
positive
|
def readUTF(buffer, maxlen):
if maxlen >= 2:
<DeepExtract>
length = struct.unpack('!H', buffer[:2])[0]
</DeepExtract>
else:
raise MalformedPacket('Not enough data to read string length')
maxlen -= 2
if length > maxlen:
raise MalformedPacket('Length delimited string too long')
buf = buffer[2:2 + length].decode('utf-8')
for c in buf:
ord_c = ord(c)
if ord_c >= 55296 and ord_c <= 57343:
raise MalformedPacket('[MQTT-1.5.4-1] D800-DFFF found in UTF-8 data')
if ord_c == 0:
raise MalformedPacket('[MQTT-1.5.4-2] Null found in UTF-8 data')
if ord_c == 65279:
raise MalformedPacket('[MQTT-1.5.4-3] U+FEFF in UTF-8 data')
return (buf, length + 2)
|
def readUTF(buffer, maxlen):
if maxlen >= 2:
length = struct.unpack('!H', buffer[:2])[0]
else:
raise MalformedPacket('Not enough data to read string length')
maxlen -= 2
if length > maxlen:
raise MalformedPacket('Length delimited string too long')
buf = buffer[2:2 + length].decode('utf-8')
for c in buf:
ord_c = ord(c)
if ord_c >= 55296 and ord_c <= 57343:
raise MalformedPacket('[MQTT-1.5.4-1] D800-DFFF found in UTF-8 data')
if ord_c == 0:
raise MalformedPacket('[MQTT-1.5.4-2] Null found in UTF-8 data')
if ord_c == 65279:
raise MalformedPacket('[MQTT-1.5.4-3] U+FEFF in UTF-8 data')
return (buf, length + 2)
|
depthai-experiments
|
positive
|
def __init__(self, radius=3, ang1=30, ang2=130, ang3=260, small_radius=0.4, **kwargs):
digest_config(self, kwargs)
super().__init__(**kwargs)
circle = Circle(radius=radius)
vt_1 = ValueTracker(ang1)
vt_2 = ValueTracker(ang2)
vt_3 = ValueTracker(ang3)
p1 = Dot(circle.point_at_angle(ang1 * DEGREES))
p2 = Dot(circle.point_at_angle(ang2 * DEGREES))
p3 = Dot(circle.point_at_angle(ang3 * DEGREES))
in_lines = VMobject(**self.inner_line_config)
out_lines = VMobject(**self.outer_line_config)
<DeepExtract>
line1 = Line(p3.get_center(), p1.get_center())
line2 = Line(p3.get_center(), p2.get_center())
h = Line(p3.get_center(), p3.get_center() + RIGHT)
angle = angle_between_vectors(line1.get_unit_vector(), line2.get_unit_vector())
h1 = angle_between_vectors(h.get_unit_vector(), line1.get_unit_vector())
h2 = angle_between_vectors(h.get_unit_vector(), line2.get_unit_vector())
if line1.get_angle() <= line2.get_angle():
start_angle = h1
else:
start_angle = h2
arc = Arc(start_angle, angle, radius=small_radius, arc_center=p3.get_center(), **self.outer_arc_config)
if mob:
out_arc = arc
else:
out_arc = angle
</DeepExtract>
<DeepExtract>
line1 = Line(p3.get_center(), p1.get_center())
line2 = Line(p3.get_center(), p2.get_center())
h = Line(p3.get_center(), p3.get_center() + RIGHT)
angle = angle_between_vectors(line1.get_unit_vector(), line2.get_unit_vector())
v1 = Line(circle.get_center(), p1.get_center())
start_angle = angle_between_vectors(h.get_unit_vector(), v1.get_unit_vector())
arc = Arc(start_angle, angle * 2, radius=small_radius, arc_center=circle.get_center(), **self.inner_arc_config)
if mob:
in_arc = arc
else:
in_arc = angle * 2
</DeepExtract>
theta_2 = TexMobject('2\\theta', **self.tex_2_config)
theta_1 = TexMobject('\\theta', **self.tex_1_config)
theta_1_val = DecimalTextNumber(0, unit='deg', num_decimal_places=3, **self.tex_2_config)
theta_2_val = DecimalTextNumber(0, unit='deg', num_decimal_places=3, **self.tex_1_config)
equal = Text('= 2 * ', font='Digital-7')
theta_eq = VGroup(theta_1_val, equal, theta_2_val)
theta_eq_temp = VGroup(theta_1_val, equal, theta_2_val)
theta_eq.arrange(RIGHT, buff=0.6, aligned_edge=DOWN)
theta_2_val.shift(LEFT * max(*[f.get_width() for f in theta_2_val]) * 1)
rectangle = Rectangle(width=theta_eq.get_width() + 0.2, height=theta_eq.get_height() + 0.2)
rectangle.move_to(theta_eq)
theta_eq.add(rectangle)
p1.add_updater(lambda mob: mob.move_to(circle.point_at_angle(vt_1.get_value() * DEGREES)))
p2.add_updater(lambda mob: mob.move_to(circle.point_at_angle(vt_2.get_value() * DEGREES)))
p3.add_updater(lambda mob: mob.move_to(circle.point_at_angle(vt_3.get_value() * DEGREES)))
in_lines.add_updater(lambda mob: mob.set_points_as_corners([p1.get_center(), circle.get_center(), p2.get_center()]))
out_lines.add_updater(lambda mob: mob.set_points_as_corners([p1.get_center(), p3.get_center(), p2.get_center()]))
out_arc.add_updater(lambda mob: mob.become(self.get_arc_between_lines(small_radius, p1, p2, p3)))
in_arc.add_updater(lambda mob: mob.become(self.get_inner_angle(small_radius, p1, p2, p3, circle)))
theta_1.add_updater(lambda mob: mob.move_to(p3.get_center() + Line(p3.get_center(), out_arc.point_from_proportion(0.5)).get_vector() * 1.7))
theta_2.add_updater(lambda mob: mob.move_to(circle.get_center() + Line(circle.get_center(), in_arc.point_from_proportion(0.5)).get_vector() * 1.7))
theta_1_val.add_updater(lambda mob: mob.set_value(self.get_inner_angle(1, p1, p2, p3, circle, False) * 180 / PI))
theta_2_val.add_updater(lambda mob: mob.set_value(self.get_arc_between_lines(1, p1, p2, p3, False) * 180 / PI))
rectangle.max_width = rectangle.get_width()
def rect_up(mob):
line = Line(theta_eq_temp.get_left() + LEFT * 0.2, theta_eq_temp.get_right() + RIGHT * 0.2)
if line.get_width() > mob.max_width:
mob.max_width = line.get_width()
mob.set_width(mob.max_width)
mob.align_to(theta_1_val, LEFT)
mob.shift(LEFT * 0.1)
rectangle.add_updater(rect_up)
dots = VGroup(p1, p2, p3)
vts = Group(vt_1, vt_2, vt_3)
self.vts = vts
self.add(circle, dots, in_lines, out_lines, in_arc, out_arc, theta_1, theta_2, theta_eq)
|
def __init__(self, radius=3, ang1=30, ang2=130, ang3=260, small_radius=0.4, **kwargs):
digest_config(self, kwargs)
super().__init__(**kwargs)
circle = Circle(radius=radius)
vt_1 = ValueTracker(ang1)
vt_2 = ValueTracker(ang2)
vt_3 = ValueTracker(ang3)
p1 = Dot(circle.point_at_angle(ang1 * DEGREES))
p2 = Dot(circle.point_at_angle(ang2 * DEGREES))
p3 = Dot(circle.point_at_angle(ang3 * DEGREES))
in_lines = VMobject(**self.inner_line_config)
out_lines = VMobject(**self.outer_line_config)
line1 = Line(p3.get_center(), p1.get_center())
line2 = Line(p3.get_center(), p2.get_center())
h = Line(p3.get_center(), p3.get_center() + RIGHT)
angle = angle_between_vectors(line1.get_unit_vector(), line2.get_unit_vector())
h1 = angle_between_vectors(h.get_unit_vector(), line1.get_unit_vector())
h2 = angle_between_vectors(h.get_unit_vector(), line2.get_unit_vector())
if line1.get_angle() <= line2.get_angle():
start_angle = h1
else:
start_angle = h2
arc = Arc(start_angle, angle, radius=small_radius, arc_center=p3.get_center(), **self.outer_arc_config)
if mob:
out_arc = arc
else:
out_arc = angle
line1 = Line(p3.get_center(), p1.get_center())
line2 = Line(p3.get_center(), p2.get_center())
h = Line(p3.get_center(), p3.get_center() + RIGHT)
angle = angle_between_vectors(line1.get_unit_vector(), line2.get_unit_vector())
v1 = Line(circle.get_center(), p1.get_center())
start_angle = angle_between_vectors(h.get_unit_vector(), v1.get_unit_vector())
arc = Arc(start_angle, angle * 2, radius=small_radius, arc_center=circle.get_center(), **self.inner_arc_config)
if mob:
in_arc = arc
else:
in_arc = angle * 2
theta_2 = TexMobject('2\\theta', **self.tex_2_config)
theta_1 = TexMobject('\\theta', **self.tex_1_config)
theta_1_val = DecimalTextNumber(0, unit='deg', num_decimal_places=3, **self.tex_2_config)
theta_2_val = DecimalTextNumber(0, unit='deg', num_decimal_places=3, **self.tex_1_config)
equal = Text('= 2 * ', font='Digital-7')
theta_eq = VGroup(theta_1_val, equal, theta_2_val)
theta_eq_temp = VGroup(theta_1_val, equal, theta_2_val)
theta_eq.arrange(RIGHT, buff=0.6, aligned_edge=DOWN)
theta_2_val.shift(LEFT * max(*[f.get_width() for f in theta_2_val]) * 1)
rectangle = Rectangle(width=theta_eq.get_width() + 0.2, height=theta_eq.get_height() + 0.2)
rectangle.move_to(theta_eq)
theta_eq.add(rectangle)
p1.add_updater(lambda mob: mob.move_to(circle.point_at_angle(vt_1.get_value() * DEGREES)))
p2.add_updater(lambda mob: mob.move_to(circle.point_at_angle(vt_2.get_value() * DEGREES)))
p3.add_updater(lambda mob: mob.move_to(circle.point_at_angle(vt_3.get_value() * DEGREES)))
in_lines.add_updater(lambda mob: mob.set_points_as_corners([p1.get_center(), circle.get_center(), p2.get_center()]))
out_lines.add_updater(lambda mob: mob.set_points_as_corners([p1.get_center(), p3.get_center(), p2.get_center()]))
out_arc.add_updater(lambda mob: mob.become(self.get_arc_between_lines(small_radius, p1, p2, p3)))
in_arc.add_updater(lambda mob: mob.become(self.get_inner_angle(small_radius, p1, p2, p3, circle)))
theta_1.add_updater(lambda mob: mob.move_to(p3.get_center() + Line(p3.get_center(), out_arc.point_from_proportion(0.5)).get_vector() * 1.7))
theta_2.add_updater(lambda mob: mob.move_to(circle.get_center() + Line(circle.get_center(), in_arc.point_from_proportion(0.5)).get_vector() * 1.7))
theta_1_val.add_updater(lambda mob: mob.set_value(self.get_inner_angle(1, p1, p2, p3, circle, False) * 180 / PI))
theta_2_val.add_updater(lambda mob: mob.set_value(self.get_arc_between_lines(1, p1, p2, p3, False) * 180 / PI))
rectangle.max_width = rectangle.get_width()
def rect_up(mob):
line = Line(theta_eq_temp.get_left() + LEFT * 0.2, theta_eq_temp.get_right() + RIGHT * 0.2)
if line.get_width() > mob.max_width:
mob.max_width = line.get_width()
mob.set_width(mob.max_width)
mob.align_to(theta_1_val, LEFT)
mob.shift(LEFT * 0.1)
rectangle.add_updater(rect_up)
dots = VGroup(p1, p2, p3)
vts = Group(vt_1, vt_2, vt_3)
self.vts = vts
self.add(circle, dots, in_lines, out_lines, in_arc, out_arc, theta_1, theta_2, theta_eq)
|
AnimationsWithManim
|
positive
|
def get_reward(self):
self.xyerrs.append(self.get_manhattan(self.belief.cpu().detach().numpy(), ignore_hd=True))
<DeepExtract>
guess = (self.bel_grid.head, self.bel_grid.row, self.bel_grid.col)
e_dir = abs(guess[0] - self.true_grid.head)
e_dir = min(self.grid_dirs - e_dir, e_dir)
if False:
e_dir = 0
self.manhattan = float(e_dir + abs(guess[1] - self.true_grid.row) + abs(guess[2] - self.true_grid.col))
</DeepExtract>
self.manhattans.append(self.manhattan)
if self.args.verbose > 2:
print('manhattans', len(self.manhattans))
self.reward = 0.0
self.reward_vector = np.zeros(5)
if self.args.penalty_for_block != 0:
self.reward_vector[0] -= self.args.penalty_for_block * self.collision_attempt
self.reward += -self.args.penalty_for_block * self.collision_attempt
if self.args.rew_explore and self.new_pose:
self.reward_vector[1] += 1.0
self.reward += 1.0
if self.args.rew_bel_new and self.new_bel:
self.reward_vector[1] += 1.0
self.reward += 1.0
if self.args.rew_bel_gt:
N = self.grid_dirs * self.grid_rows * self.grid_cols
self.reward_vector[2] += torch.log(N * self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col]).item()
self.reward += torch.log(N * self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col]).item()
if self.args.rew_bel_gt_nonlog:
self.reward_vector[2] += self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()
self.reward += self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()
if self.args.rew_KL_bel_gt:
bel_gt = self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()
N = self.grid_dirs * self.grid_rows * self.grid_cols
new_bel_gt = 1.0 / N * np.log(N * np.clip(bel_gt, 1e-09, 1.0))
self.reward_vector[2] += new_bel_gt
self.reward += new_bel_gt
if self.args.rew_infogain:
bel = torch.clamp(self.belief, 1e-09, 1.0)
new_bel_ent = float((bel * torch.log(bel)).sum())
info_gain = new_bel_ent - self.bel_ent
self.bel_ent = new_bel_ent
self.reward += info_gain
self.reward_vector[3] += info_gain
if self.args.rew_bel_ent:
bel = self.belief
self.reward += (bel * torch.log(bel)).sum().item()
self.reward_vector[3] += (bel * torch.log(bel)).sum().item()
if self.args.rew_hit:
self.reward += 1 if self.manhattan == 0 else 0
self.reward_vector[4] += 1 if self.manhattan == 0 else 0
if self.args.rew_dist:
self.reward += (self.longest - self.manhattan) / self.longest
self.reward_vector[4] = (self.longest - self.manhattan) / self.longest
if self.args.rew_inv_dist:
self.reward += 1.0 / (self.manhattan + 1.0)
self.reward_vector[4] = 1.0 / (self.manhattan + 1.0)
self.reward = float(self.reward)
self.rewards.append(self.reward)
if self.args.verbose > 2:
print('rewards', len(self.rewards))
if np.isnan(self.reward):
raise Exception('reward=nan')
if self.args.verbose > 1:
print('reward=%f' % self.reward)
|
def get_reward(self):
self.xyerrs.append(self.get_manhattan(self.belief.cpu().detach().numpy(), ignore_hd=True))
guess = (self.bel_grid.head, self.bel_grid.row, self.bel_grid.col)
e_dir = abs(guess[0] - self.true_grid.head)
e_dir = min(self.grid_dirs - e_dir, e_dir)
if False:
e_dir = 0
self.manhattan = float(e_dir + abs(guess[1] - self.true_grid.row) + abs(guess[2] - self.true_grid.col))
self.manhattans.append(self.manhattan)
if self.args.verbose > 2:
print('manhattans', len(self.manhattans))
self.reward = 0.0
self.reward_vector = np.zeros(5)
if self.args.penalty_for_block != 0:
self.reward_vector[0] -= self.args.penalty_for_block * self.collision_attempt
self.reward += -self.args.penalty_for_block * self.collision_attempt
if self.args.rew_explore and self.new_pose:
self.reward_vector[1] += 1.0
self.reward += 1.0
if self.args.rew_bel_new and self.new_bel:
self.reward_vector[1] += 1.0
self.reward += 1.0
if self.args.rew_bel_gt:
N = self.grid_dirs * self.grid_rows * self.grid_cols
self.reward_vector[2] += torch.log(N * self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col]).item()
self.reward += torch.log(N * self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col]).item()
if self.args.rew_bel_gt_nonlog:
self.reward_vector[2] += self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()
self.reward += self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()
if self.args.rew_KL_bel_gt:
bel_gt = self.belief[self.true_grid.head, self.true_grid.row, self.true_grid.col].item()
N = self.grid_dirs * self.grid_rows * self.grid_cols
new_bel_gt = 1.0 / N * np.log(N * np.clip(bel_gt, 1e-09, 1.0))
self.reward_vector[2] += new_bel_gt
self.reward += new_bel_gt
if self.args.rew_infogain:
bel = torch.clamp(self.belief, 1e-09, 1.0)
new_bel_ent = float((bel * torch.log(bel)).sum())
info_gain = new_bel_ent - self.bel_ent
self.bel_ent = new_bel_ent
self.reward += info_gain
self.reward_vector[3] += info_gain
if self.args.rew_bel_ent:
bel = self.belief
self.reward += (bel * torch.log(bel)).sum().item()
self.reward_vector[3] += (bel * torch.log(bel)).sum().item()
if self.args.rew_hit:
self.reward += 1 if self.manhattan == 0 else 0
self.reward_vector[4] += 1 if self.manhattan == 0 else 0
if self.args.rew_dist:
self.reward += (self.longest - self.manhattan) / self.longest
self.reward_vector[4] = (self.longest - self.manhattan) / self.longest
if self.args.rew_inv_dist:
self.reward += 1.0 / (self.manhattan + 1.0)
self.reward_vector[4] = 1.0 / (self.manhattan + 1.0)
self.reward = float(self.reward)
self.rewards.append(self.reward)
if self.args.verbose > 2:
print('rewards', len(self.rewards))
if np.isnan(self.reward):
raise Exception('reward=nan')
if self.args.verbose > 1:
print('reward=%f' % self.reward)
|
dal
|
positive
|
def pspnet(n_classes, input_height=384, input_width=576):
<DeepExtract>
assert input_height % 192 == 0
assert input_width % 192 == 0
(img_input, levels) = vanilla_encoder(input_height=input_height, input_width=input_width)
[f1, f2, f3, f4, f5] = levels
o = f5
pool_factors = [1, 2, 3, 6]
pool_outs = [o]
for p in pool_factors:
pooled = pool_block(o, p)
pool_outs.append(pooled)
o = Concatenate(axis=MERGE_AXIS)(pool_outs)
o = Conv2D(512, (1, 1), data_format=IMAGE_ORDERING, use_bias=False)(o)
o = BatchNormalization()(o)
o = Activation('relu')(o)
o = Conv2D(n_classes, (3, 3), data_format=IMAGE_ORDERING, padding='same')(o)
o = resize_image(o, (8, 8), data_format=IMAGE_ORDERING)
model = get_segmentation_model(img_input, o)
model = model
</DeepExtract>
model.model_name = 'pspnet'
return model
|
def pspnet(n_classes, input_height=384, input_width=576):
assert input_height % 192 == 0
assert input_width % 192 == 0
(img_input, levels) = vanilla_encoder(input_height=input_height, input_width=input_width)
[f1, f2, f3, f4, f5] = levels
o = f5
pool_factors = [1, 2, 3, 6]
pool_outs = [o]
for p in pool_factors:
pooled = pool_block(o, p)
pool_outs.append(pooled)
o = Concatenate(axis=MERGE_AXIS)(pool_outs)
o = Conv2D(512, (1, 1), data_format=IMAGE_ORDERING, use_bias=False)(o)
o = BatchNormalization()(o)
o = Activation('relu')(o)
o = Conv2D(n_classes, (3, 3), data_format=IMAGE_ORDERING, padding='same')(o)
o = resize_image(o, (8, 8), data_format=IMAGE_ORDERING)
model = get_segmentation_model(img_input, o)
model = model
model.model_name = 'pspnet'
return model
|
aXeleRate
|
positive
|
def idx2name_cls(m, idx):
if _meta_idx2name_cls == None:
<DeepExtract>
global _meta_name2idx_cls
global _meta_idx2name_cls
global _meta_name2idx_pre
global _meta_idx2name_pre
_meta_name2idx_cls = {}
_meta_idx2name_cls = {}
_meta_name2idx_pre = {}
_meta_idx2name_pre = {}
for k in m['meta/cls/name2idx'].keys():
idx = int(str(m['meta/cls/name2idx/' + k][...]))
_meta_name2idx_cls[k] = idx
_meta_idx2name_cls[idx] = k
for k in m['meta/pre/name2idx'].keys():
idx = int(str(m['meta/pre/name2idx/' + k][...]))
_meta_name2idx_pre[k] = idx
_meta_idx2name_pre[idx] = k
</DeepExtract>
return _meta_idx2name_cls[idx]
|
def idx2name_cls(m, idx):
if _meta_idx2name_cls == None:
global _meta_name2idx_cls
global _meta_idx2name_cls
global _meta_name2idx_pre
global _meta_idx2name_pre
_meta_name2idx_cls = {}
_meta_idx2name_cls = {}
_meta_name2idx_pre = {}
_meta_idx2name_pre = {}
for k in m['meta/cls/name2idx'].keys():
idx = int(str(m['meta/cls/name2idx/' + k][...]))
_meta_name2idx_cls[k] = idx
_meta_idx2name_cls[idx] = k
for k in m['meta/pre/name2idx'].keys():
idx = int(str(m['meta/pre/name2idx/' + k][...]))
_meta_name2idx_pre[k] = idx
_meta_idx2name_pre[idx] = k
return _meta_idx2name_cls[idx]
|
cvpr17_vtranse
|
positive
|
def test_basic_03_with_some_admin(self):
<DeepExtract>
n = Namespace()
n.add_option('alpha', default=3, doc='the first parameter', is_argument=True)
n.add_option('beta', default='the second', doc='the first parameter', short_form='b')
n.add_option('gamma', default='1 2 3', from_string_converter=quote_stripping_list_of_ints, to_string_converter=partial(list_to_str, delimiter=' '), secret=True)
n.add_option('delta', default=False, from_string_converter=boolean_converter)
option_definitions = n
</DeepExtract>
cm = ConfigurationManager(definition_source=option_definitions, values_source_list=[command_line], argv_source=['0', '--admin.expose_secrets', '--gamma="-1 -2 -3 -4 -5 -6"', '--delta', '--admin.strict'], use_auto_help=False)
config = cm.get_config()
expected = {'alpha': 0, 'beta': 'the second', 'gamma': [-1, -2, -3, -4, -5, -6], 'delta': True, 'admin.print_conf': None, 'admin.dump_conf': '', 'admin.strict': True, 'admin.expose_secrets': True}
for k in config.keys_breadth_first():
self.assertEqual(config[k], expected[k])
|
def test_basic_03_with_some_admin(self):
n = Namespace()
n.add_option('alpha', default=3, doc='the first parameter', is_argument=True)
n.add_option('beta', default='the second', doc='the first parameter', short_form='b')
n.add_option('gamma', default='1 2 3', from_string_converter=quote_stripping_list_of_ints, to_string_converter=partial(list_to_str, delimiter=' '), secret=True)
n.add_option('delta', default=False, from_string_converter=boolean_converter)
option_definitions = n
cm = ConfigurationManager(definition_source=option_definitions, values_source_list=[command_line], argv_source=['0', '--admin.expose_secrets', '--gamma="-1 -2 -3 -4 -5 -6"', '--delta', '--admin.strict'], use_auto_help=False)
config = cm.get_config()
expected = {'alpha': 0, 'beta': 'the second', 'gamma': [-1, -2, -3, -4, -5, -6], 'delta': True, 'admin.print_conf': None, 'admin.dump_conf': '', 'admin.strict': True, 'admin.expose_secrets': True}
for k in config.keys_breadth_first():
self.assertEqual(config[k], expected[k])
|
configman
|
positive
|
def print_columns(self):
mode = self.mode
<DeepExtract>
self._mode = DRSMode.FIELDS
</DeepExtract>
seen_nid = dict()
for x in self:
if x not in seen_nid:
print(x)
seen_nid[x] = 0
self._mode = mode
|
def print_columns(self):
mode = self.mode
self._mode = DRSMode.FIELDS
seen_nid = dict()
for x in self:
if x not in seen_nid:
print(x)
seen_nid[x] = 0
self._mode = mode
|
aurum-datadiscovery
|
positive
|
def _load_scale_limits(compu_scale):
<DeepExtract>
tmp = self._get_arxml_children(compu_scale, 'LOWER-LIMIT')
if len(tmp) == 0:
lower_limit = None
elif len(tmp) == 1:
lower_limit = tmp[0]
else:
raise ValueError(f"{'LOWER-LIMIT'} does not resolve into a unique node")
</DeepExtract>
<DeepExtract>
tmp = self._get_arxml_children(compu_scale, 'UPPER-LIMIT')
if len(tmp) == 0:
upper_limit = None
elif len(tmp) == 1:
upper_limit = tmp[0]
else:
raise ValueError(f"{'UPPER-LIMIT'} does not resolve into a unique node")
</DeepExtract>
if lower_limit is not None:
lower_limit = parse_number_string(lower_limit.text)
if upper_limit is not None:
upper_limit = parse_number_string(upper_limit.text)
return (lower_limit, upper_limit)
|
def _load_scale_limits(compu_scale):
tmp = self._get_arxml_children(compu_scale, 'LOWER-LIMIT')
if len(tmp) == 0:
lower_limit = None
elif len(tmp) == 1:
lower_limit = tmp[0]
else:
raise ValueError(f"{'LOWER-LIMIT'} does not resolve into a unique node")
tmp = self._get_arxml_children(compu_scale, 'UPPER-LIMIT')
if len(tmp) == 0:
upper_limit = None
elif len(tmp) == 1:
upper_limit = tmp[0]
else:
raise ValueError(f"{'UPPER-LIMIT'} does not resolve into a unique node")
if lower_limit is not None:
lower_limit = parse_number_string(lower_limit.text)
if upper_limit is not None:
upper_limit = parse_number_string(upper_limit.text)
return (lower_limit, upper_limit)
|
cantools
|
positive
|
def _get_text_insert_point(section):
""" get the dimension value text insert point """
<DeepExtract>
(point1, point2) = (self._get_dimline_point(section), self._get_dimline_point(section + 1))
</DeepExtract>
dist = self.prop('height') / 2.0 + self.prop('textabove')
return vadd(midpoint(point1, point2), vmul_scalar(self.normal_vector, dist))
|
def _get_text_insert_point(section):
""" get the dimension value text insert point """
(point1, point2) = (self._get_dimline_point(section), self._get_dimline_point(section + 1))
dist = self.prop('height') / 2.0 + self.prop('textabove')
return vadd(midpoint(point1, point2), vmul_scalar(self.normal_vector, dist))
|
dxfwrite
|
positive
|
def hash_vector(self, v, querying=False):
"""
Hashes the vector and returns the bucket key as string.
"""
bucket_keys = []
if querying:
for lshash in self.child_hashes:
for bucket_key in lshash.hash_vector(v, querying):
prefixed_key = lshash.hash_name + '_' + bucket_key
if prefixed_key in self.bucket_key_map:
bucket_keys.extend(self.bucket_key_map[prefixed_key].keys())
else:
for lshash in self.child_hashes:
for bucket_key in lshash.hash_vector(v, querying):
<DeepExtract>
result = []
for j in range(len(bucket_key)):
bits = list(bucket_key)
bits[j] = '1' if bucket_key[j] == '0' else '0'
result.append(''.join(bits))
perm_keys = result
</DeepExtract>
perm_keys.append(bucket_key)
bucket_keys.append(lshash.hash_name + '_' + bucket_key)
for perm_key in perm_keys:
prefixed_key = lshash.hash_name + '_' + perm_key
if not prefixed_key in self.bucket_key_map:
self.bucket_key_map[prefixed_key] = {}
for variant in perm_keys:
prefixed_variant = lshash.hash_name + '_' + variant
self.bucket_key_map[prefixed_key][prefixed_variant] = 1
return bucket_keys
|
def hash_vector(self, v, querying=False):
"""
Hashes the vector and returns the bucket key as string.
"""
bucket_keys = []
if querying:
for lshash in self.child_hashes:
for bucket_key in lshash.hash_vector(v, querying):
prefixed_key = lshash.hash_name + '_' + bucket_key
if prefixed_key in self.bucket_key_map:
bucket_keys.extend(self.bucket_key_map[prefixed_key].keys())
else:
for lshash in self.child_hashes:
for bucket_key in lshash.hash_vector(v, querying):
result = []
for j in range(len(bucket_key)):
bits = list(bucket_key)
bits[j] = '1' if bucket_key[j] == '0' else '0'
result.append(''.join(bits))
perm_keys = result
perm_keys.append(bucket_key)
bucket_keys.append(lshash.hash_name + '_' + bucket_key)
for perm_key in perm_keys:
prefixed_key = lshash.hash_name + '_' + perm_key
if not prefixed_key in self.bucket_key_map:
self.bucket_key_map[prefixed_key] = {}
for variant in perm_keys:
prefixed_variant = lshash.hash_name + '_' + variant
self.bucket_key_map[prefixed_key][prefixed_variant] = 1
return bucket_keys
|
aurum-datadiscovery
|
positive
|
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces['error']:
return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if not style or style == 'default':
style = 'pep440'
if style == 'pep440':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += plus_or_dot(pieces)
rendered += '%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0+untagged.%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
</DeepExtract>
elif style == 'pep440-pre':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
rendered = rendered
</DeepExtract>
elif style == 'pep440-post':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += 'g%s' % pieces['short']
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += '+g%s' % pieces['short']
rendered = rendered
</DeepExtract>
elif style == 'pep440-old':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered = rendered
</DeepExtract>
elif style == 'git-describe':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
</DeepExtract>
elif style == 'git-describe-long':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
</DeepExtract>
else:
raise ValueError("unknown style '%s'" % style)
return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}
|
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces['error']:
return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if not style or style == 'default':
style = 'pep440'
if style == 'pep440':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += plus_or_dot(pieces)
rendered += '%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0+untagged.%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
elif style == 'pep440-pre':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '.post.dev%d' % pieces['distance']
else:
rendered = '0.post.dev%d' % pieces['distance']
rendered = rendered
elif style == 'pep440-post':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += 'g%s' % pieces['short']
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += '+g%s' % pieces['short']
rendered = rendered
elif style == 'pep440-old':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered = rendered
elif style == 'git-describe':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
elif style == 'git-describe-long':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
else:
raise ValueError("unknown style '%s'" % style)
return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}
|
espaloma
|
positive
|
def update(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
self.optimizer.zero_grad()
<DeepExtract>
(ob, ac, rew, next_ob, done) = map(lambda x: torch.from_numpy(x).to(self.device), [ob_no, ac_na, re_n, next_ob_no, terminal_n])
with torch.no_grad():
if self.double_q:
max_ac = self.Q_func(next_ob).argmax(-1, True)
else:
max_ac = self.target_Q_func(next_ob).argmax(-1, True)
curr_Q = self.Q_func(ob).gather(-1, ac.long().view(-1, 1)).squeeze()
best_next_Q = self.target_Q_func(next_ob).gather(-1, max_ac).squeeze()
calc_Q = rew + self.gamma * best_next_Q * (1 - done)
loss = nn.functional.smooth_l1_loss(curr_Q, calc_Q)
</DeepExtract>
loss.backward()
nn.utils.clip_grad_norm_(self.Q_func.parameters(), max_norm=self.grad_norm_clipping)
self.optimizer.step()
self.lr_scheduler.step()
return loss
|
def update(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
self.optimizer.zero_grad()
(ob, ac, rew, next_ob, done) = map(lambda x: torch.from_numpy(x).to(self.device), [ob_no, ac_na, re_n, next_ob_no, terminal_n])
with torch.no_grad():
if self.double_q:
max_ac = self.Q_func(next_ob).argmax(-1, True)
else:
max_ac = self.target_Q_func(next_ob).argmax(-1, True)
curr_Q = self.Q_func(ob).gather(-1, ac.long().view(-1, 1)).squeeze()
best_next_Q = self.target_Q_func(next_ob).gather(-1, max_ac).squeeze()
calc_Q = rew + self.gamma * best_next_Q * (1 - done)
loss = nn.functional.smooth_l1_loss(curr_Q, calc_Q)
loss.backward()
nn.utils.clip_grad_norm_(self.Q_func.parameters(), max_norm=self.grad_norm_clipping)
self.optimizer.step()
self.lr_scheduler.step()
return loss
|
berkeley-deep-RL-pytorch-solutions
|
positive
|
def test_form_root_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop(0)
<DeepExtract>
safe_parent_nodes = [(node.pk, str(node), node.get_depth()) for node in nodes]
</DeepExtract>
<DeepExtract>
form_class = movenodeform_factory(type(node))
form = form_class(instance=node)
assert ['desc', '_position', '_ref_node_id'] == list(form.base_fields.keys())
got = [choice[0] for choice in form.fields['_position'].choices]
assert ['first-child', 'left', 'right'] == got
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
</DeepExtract>
|
def test_form_root_node(self, model):
nodes = list(model.get_tree())
node = nodes.pop(0)
safe_parent_nodes = [(node.pk, str(node), node.get_depth()) for node in nodes]
form_class = movenodeform_factory(type(node))
form = form_class(instance=node)
assert ['desc', '_position', '_ref_node_id'] == list(form.base_fields.keys())
got = [choice[0] for choice in form.fields['_position'].choices]
assert ['first-child', 'left', 'right'] == got
nodes = self._get_nodes_list(safe_parent_nodes)
self._assert_nodes_in_choices(form, nodes)
</DeepExtract>
|
django-treebeard
|
positive
|
def test_run_operations(self):
gui = self.app._window
<DeepExtract>
def _create_cleaner_file_in_directory(dirname):
cleaner_content = '<?xml version="1.0" encoding="UTF-8"?><cleaner id="{}"><label>Test run_operations</label><option id="{}"><label>Test1</label><description>Delete files in a test directory</description><action command="delete" search="walk.all" path="{}"/></option></cleaner>'.format(self._NEW_CLEANER_ID, self._NEW_OPTION_ID, dirname)
cleaner_filename = os.path.join(dirname, 'test_run_operations_cleaner.xml')
self.write_file(cleaner_filename, cleaner_content, 'w')
file_to_clean = cleaner_filename
def _set_mocks_return_values(cleaner_filename, mock_cleaner_change_dialog, mock_list_cleanerml_files):
mock_list_cleanerml_files.return_value = [cleaner_filename]
mock_cleaner_change_dialog.return_value = None
def _load_new_cleaner_in_gui(gui):
gui.cb_refresh_operations()
self.refresh_gui()
dirname = self.mkdtemp(prefix='bleachbit-test-run_operations')
cleaner_filename = _create_cleaner_file_in_directory(dirname)
self.assertExists(cleaner_filename)
_set_mocks_return_values(cleaner_filename, mock_cleaner_change_dialog, mock_list_cleanerml_files)
_load_new_cleaner_in_gui(gui)
file_to_clean = self.mkstemp(prefix='somefile', dir=dirname)
self.assertExists(file_to_clean)
file_to_clean = file_to_clean
</DeepExtract>
<DeepExtract>
model = gui.view.get_model()
tree = self.find_widget(gui, Gtk.TreeView)
self.assertIsNotNone(tree)
it = self.find_option(model, self._NEW_CLEANER_ID, self._NEW_OPTION_ID)
self.assertIsNotNone(it)
tree.scroll_to_cell(model.get_path(it), None, False, 0, 0)
model[model.iter_parent(it)][1] = True
model[it][1] = True
self.refresh_gui()
</DeepExtract>
with mock.patch('bleachbit.GUI.GUI._confirm_delete', return_value=True):
self.assertTrue(gui._confirm_delete(False, False))
gui.run_operations(None)
<DeepExtract>
while Gtk.events_pending():
Gtk.main_iteration_do(blocking=False)
time.sleep(delay)
</DeepExtract>
self.assertNotExists(file_to_clean)
|
def test_run_operations(self):
gui = self.app._window
def _create_cleaner_file_in_directory(dirname):
cleaner_content = '<?xml version="1.0" encoding="UTF-8"?><cleaner id="{}"><label>Test run_operations</label><option id="{}"><label>Test1</label><description>Delete files in a test directory</description><action command="delete" search="walk.all" path="{}"/></option></cleaner>'.format(self._NEW_CLEANER_ID, self._NEW_OPTION_ID, dirname)
cleaner_filename = os.path.join(dirname, 'test_run_operations_cleaner.xml')
self.write_file(cleaner_filename, cleaner_content, 'w')
file_to_clean = cleaner_filename
def _set_mocks_return_values(cleaner_filename, mock_cleaner_change_dialog, mock_list_cleanerml_files):
mock_list_cleanerml_files.return_value = [cleaner_filename]
mock_cleaner_change_dialog.return_value = None
def _load_new_cleaner_in_gui(gui):
gui.cb_refresh_operations()
self.refresh_gui()
dirname = self.mkdtemp(prefix='bleachbit-test-run_operations')
cleaner_filename = _create_cleaner_file_in_directory(dirname)
self.assertExists(cleaner_filename)
_set_mocks_return_values(cleaner_filename, mock_cleaner_change_dialog, mock_list_cleanerml_files)
_load_new_cleaner_in_gui(gui)
file_to_clean = self.mkstemp(prefix='somefile', dir=dirname)
self.assertExists(file_to_clean)
file_to_clean = file_to_clean
model = gui.view.get_model()
tree = self.find_widget(gui, Gtk.TreeView)
self.assertIsNotNone(tree)
it = self.find_option(model, self._NEW_CLEANER_ID, self._NEW_OPTION_ID)
self.assertIsNotNone(it)
tree.scroll_to_cell(model.get_path(it), None, False, 0, 0)
model[model.iter_parent(it)][1] = True
model[it][1] = True
self.refresh_gui()
with mock.patch('bleachbit.GUI.GUI._confirm_delete', return_value=True):
self.assertTrue(gui._confirm_delete(False, False))
gui.run_operations(None)
while Gtk.events_pending():
Gtk.main_iteration_do(blocking=False)
time.sleep(delay)
self.assertNotExists(file_to_clean)
|
bleachbit
|
positive
|
def _validate_operator(self, field, uid, **kwargs):
<DeepExtract>
allowed_operators = self.operators or OPERATORS[field.simple_type]
</DeepExtract>
if field.field.null:
allowed_operators += ('isnull', '-isnull')
uid = uid or allowed_operators[0]
operator = operators.get(uid)
if operator is None:
raise ValidationError(u'"{0}" is not a valid operator'.format(uid))
if operator.uid not in allowed_operators:
raise ValidationError(u'Operator "{0}" cannot be used for this translator'.format(operator))
return operator
|
def _validate_operator(self, field, uid, **kwargs):
allowed_operators = self.operators or OPERATORS[field.simple_type]
if field.field.null:
allowed_operators += ('isnull', '-isnull')
uid = uid or allowed_operators[0]
operator = operators.get(uid)
if operator is None:
raise ValidationError(u'"{0}" is not a valid operator'.format(uid))
if operator.uid not in allowed_operators:
raise ValidationError(u'Operator "{0}" cannot be used for this translator'.format(operator))
return operator
|
avocado
|
positive
|
def cached_data(self, name, data_func=None, max_age=60, session=False):
"""Cache API with session-scoped expiry.
.. versionadded:: 1.25
Args:
name (str): Cache key
data_func (callable): Callable that returns fresh data. It
is called if the cache has expired or doesn't exist.
max_age (int): Maximum allowable age of cache in seconds.
session (bool, optional): Whether to scope the cache
to the current session.
``name``, ``data_func`` and ``max_age`` are the same as for the
:meth:`~workflow.Workflow.cached_data` method on
:class:`~workflow.Workflow`.
If ``session`` is ``True``, then ``name`` is prefixed
with :attr:`session_id`.
"""
if session:
<DeepExtract>
name = '{0}{1}'.format(self._session_prefix, name)
</DeepExtract>
return super(Workflow3, self).cached_data(name, data_func, max_age)
|
def cached_data(self, name, data_func=None, max_age=60, session=False):
"""Cache API with session-scoped expiry.
.. versionadded:: 1.25
Args:
name (str): Cache key
data_func (callable): Callable that returns fresh data. It
is called if the cache has expired or doesn't exist.
max_age (int): Maximum allowable age of cache in seconds.
session (bool, optional): Whether to scope the cache
to the current session.
``name``, ``data_func`` and ``max_age`` are the same as for the
:meth:`~workflow.Workflow.cached_data` method on
:class:`~workflow.Workflow`.
If ``session`` is ``True``, then ``name`` is prefixed
with :attr:`session_id`.
"""
if session:
name = '{0}{1}'.format(self._session_prefix, name)
return super(Workflow3, self).cached_data(name, data_func, max_age)
|
alfred-dropbox
|
positive
|
def insert_facebook_lrs(fb_feed, course_code):
"""
1. Parses facebook feed
2. Uses construct_tincan_statement to format data ready to send for the LRS
3. Sends to the LRS and Saves to postgres json field
:param fb_feed: Facebook Feed as dict
:param course_code: The unit offering code
:return:
"""
platform = 'Facebook'
platform_url = 'http://www.facebook.com/'
for pst in fb_feed:
if 'message' in pst:
post_type = pst['type']
created_time = dateutil.parser.parse(pst['created_time'])
from_uid = pst['from']['id']
from_name = pst['from']['name']
post_id = pst['actions'][0]['link']
message = pst['message']
if fbid_exists(from_uid, course_code):
<DeepExtract>
usr_dict = {'fb_id': from_uid}
try:
usr = UserProfile.objects.filter(fb_id__iexact=from_uid).get()
except UserProfile.DoesNotExist:
usr = None
if usr is not None:
usr_dict['email'] = usr.user.email
usr_dict = usr_dict
</DeepExtract>
insert_post(usr_dict, post_id, message, from_name, from_uid, created_time, course_code, platform, platform_url)
if 'likes' in pst:
for like in pst['likes']['data']:
like_uid = like['id']
like_name = like['name']
if fbid_exists(like_uid, course_code):
<DeepExtract>
usr_dict = {'fb_id': like_uid}
try:
usr = UserProfile.objects.filter(fb_id__iexact=like_uid).get()
except UserProfile.DoesNotExist:
usr = None
if usr is not None:
usr_dict['email'] = usr.user.email
usr_dict = usr_dict
</DeepExtract>
insert_like(usr_dict, post_id, like_uid, like_name, message, course_code, platform, platform_url, liked_username=from_uid)
if 'comments' in pst:
for comment in pst['comments']['data']:
comment_created_time = comment['created_time']
comment_from_uid = comment['from']['id']
comment_from_name = comment['from']['name']
comment_message = comment['message']
comment_id = comment['id']
if fbid_exists(comment_from_uid, course_code):
<DeepExtract>
usr_dict = {'fb_id': comment_from_uid}
try:
usr = UserProfile.objects.filter(fb_id__iexact=comment_from_uid).get()
except UserProfile.DoesNotExist:
usr = None
if usr is not None:
usr_dict['email'] = usr.user.email
usr_dict = usr_dict
</DeepExtract>
insert_comment(usr_dict, post_id, comment_id, comment_message, comment_from_uid, comment_from_name, comment_created_time, course_code, platform, platform_url, parentusername=from_uid)
|
def insert_facebook_lrs(fb_feed, course_code):
"""
1. Parses facebook feed
2. Uses construct_tincan_statement to format data ready to send for the LRS
3. Sends to the LRS and Saves to postgres json field
:param fb_feed: Facebook Feed as dict
:param course_code: The unit offering code
:return:
"""
platform = 'Facebook'
platform_url = 'http://www.facebook.com/'
for pst in fb_feed:
if 'message' in pst:
post_type = pst['type']
created_time = dateutil.parser.parse(pst['created_time'])
from_uid = pst['from']['id']
from_name = pst['from']['name']
post_id = pst['actions'][0]['link']
message = pst['message']
if fbid_exists(from_uid, course_code):
usr_dict = {'fb_id': from_uid}
try:
usr = UserProfile.objects.filter(fb_id__iexact=from_uid).get()
except UserProfile.DoesNotExist:
usr = None
if usr is not None:
usr_dict['email'] = usr.user.email
usr_dict = usr_dict
insert_post(usr_dict, post_id, message, from_name, from_uid, created_time, course_code, platform, platform_url)
if 'likes' in pst:
for like in pst['likes']['data']:
like_uid = like['id']
like_name = like['name']
if fbid_exists(like_uid, course_code):
usr_dict = {'fb_id': like_uid}
try:
usr = UserProfile.objects.filter(fb_id__iexact=like_uid).get()
except UserProfile.DoesNotExist:
usr = None
if usr is not None:
usr_dict['email'] = usr.user.email
usr_dict = usr_dict
insert_like(usr_dict, post_id, like_uid, like_name, message, course_code, platform, platform_url, liked_username=from_uid)
if 'comments' in pst:
for comment in pst['comments']['data']:
comment_created_time = comment['created_time']
comment_from_uid = comment['from']['id']
comment_from_name = comment['from']['name']
comment_message = comment['message']
comment_id = comment['id']
if fbid_exists(comment_from_uid, course_code):
usr_dict = {'fb_id': comment_from_uid}
try:
usr = UserProfile.objects.filter(fb_id__iexact=comment_from_uid).get()
except UserProfile.DoesNotExist:
usr = None
if usr is not None:
usr_dict['email'] = usr.user.email
usr_dict = usr_dict
insert_comment(usr_dict, post_id, comment_id, comment_message, comment_from_uid, comment_from_name, comment_created_time, course_code, platform, platform_url, parentusername=from_uid)
|
CLAtoolkit
|
positive
|
def _compute_plot_limits(ax, xlim, ylim, center=False):
<DeepExtract>
if type(xlim) is list:
pass
elif xlim is None:
xlim = [None, None]
elif xlim == 'tight':
xlim = ['tight', 'tight']
elif xlim:
xlim = [True, True]
else:
xlim = [False, False]
xlim = xlim
</DeepExtract>
<DeepExtract>
if type(ylim) is list:
pass
elif ylim is None:
ylim = [None, None]
elif ylim == 'tight':
ylim = ['tight', 'tight']
elif ylim:
ylim = [True, True]
else:
ylim = [False, False]
ylim = ylim
</DeepExtract>
<DeepExtract>
if type(center) is list:
pass
elif center is None:
center = [None, None]
elif center == 'tight':
center = ['tight', 'tight']
elif center:
center = [True, True]
else:
center = [False, False]
center = center
</DeepExtract>
lims = [xlim, ylim]
auto_lims = [ax.get_xlim(), ax.get_ylim()]
ax.autoscale(enable=True, tight=True)
tight_limts = [ax.get_xlim(), ax.get_ylim()]
for lim in range(len(lims)):
for direction in range(len(lims[lim])):
if lims[lim][direction] is None:
lims[lim][direction] = auto_lims[lim][direction]
elif lims[lim][direction] == 'tight':
lims[lim][direction] = tight_limts[lim][direction]
elif type(lims[lim][direction]) == float or type(lims[lim][direction]) == int:
pass
else:
warnings.warn('Unknown input for limits, it is neither None, nor tight,nor a float ', stacklevel=1)
if center[0]:
lims[0][1] = abs(max(lims[0], key=abs))
lims[0][0] = -lims[0][1]
if center[1]:
lims[1][1] = abs(max(lims[1], key=abs))
lims[1][0] = -lims[1][1]
return (lims[0], lims[1])
|
def _compute_plot_limits(ax, xlim, ylim, center=False):
if type(xlim) is list:
pass
elif xlim is None:
xlim = [None, None]
elif xlim == 'tight':
xlim = ['tight', 'tight']
elif xlim:
xlim = [True, True]
else:
xlim = [False, False]
xlim = xlim
if type(ylim) is list:
pass
elif ylim is None:
ylim = [None, None]
elif ylim == 'tight':
ylim = ['tight', 'tight']
elif ylim:
ylim = [True, True]
else:
ylim = [False, False]
ylim = ylim
if type(center) is list:
pass
elif center is None:
center = [None, None]
elif center == 'tight':
center = ['tight', 'tight']
elif center:
center = [True, True]
else:
center = [False, False]
center = center
lims = [xlim, ylim]
auto_lims = [ax.get_xlim(), ax.get_ylim()]
ax.autoscale(enable=True, tight=True)
tight_limts = [ax.get_xlim(), ax.get_ylim()]
for lim in range(len(lims)):
for direction in range(len(lims[lim])):
if lims[lim][direction] is None:
lims[lim][direction] = auto_lims[lim][direction]
elif lims[lim][direction] == 'tight':
lims[lim][direction] = tight_limts[lim][direction]
elif type(lims[lim][direction]) == float or type(lims[lim][direction]) == int:
pass
else:
warnings.warn('Unknown input for limits, it is neither None, nor tight,nor a float ', stacklevel=1)
if center[0]:
lims[0][1] = abs(max(lims[0], key=abs))
lims[0][0] = -lims[0][1]
if center[1]:
lims[1][1] = abs(max(lims[1], key=abs))
lims[1][0] = -lims[1][1]
return (lims[0], lims[1])
|
cockpit
|
positive
|
def get_week_start(day=None):
"""Returns the Monday of the given week."""
<DeepExtract>
tz = tz or timezone.get_current_timezone()
try:
if timezone.is_naive(day or datetime.date.today()):
day = timezone.make_aware(day or datetime.date.today(), tz)
except AttributeError:
dt = datetime.datetime.combine(day or datetime.date.today(), datetime.time())
day = timezone.make_aware(dt, tz)
day = day or datetime.date.today()
</DeepExtract>
days_since_monday = day.weekday()
if days_since_monday != 0:
day = day - relativedelta(days=days_since_monday)
return day
|
def get_week_start(day=None):
"""Returns the Monday of the given week."""
tz = tz or timezone.get_current_timezone()
try:
if timezone.is_naive(day or datetime.date.today()):
day = timezone.make_aware(day or datetime.date.today(), tz)
except AttributeError:
dt = datetime.datetime.combine(day or datetime.date.today(), datetime.time())
day = timezone.make_aware(dt, tz)
day = day or datetime.date.today()
days_since_monday = day.weekday()
if days_since_monday != 0:
day = day - relativedelta(days=days_since_monday)
return day
|
django-timepiece
|
positive
|
def get_update_state(self, entity_id):
"""
Get the state of a specific entity.
Args:
self: (todo): write your description
entity_id: (str): write your description
"""
<DeepExtract>
c = self._cursor()
try:
row = c.execute('select pts, qts, date, seq from update_state where id = ?', values).fetchone()
finally:
c.close()
</DeepExtract>
if row:
(pts, qts, date, seq) = row
date = datetime.datetime.fromtimestamp(date, tz=datetime.timezone.utc)
return types.updates.State(pts, qts, date, seq, unread_count=0)
|
def get_update_state(self, entity_id):
"""
Get the state of a specific entity.
Args:
self: (todo): write your description
entity_id: (str): write your description
"""
c = self._cursor()
try:
row = c.execute('select pts, qts, date, seq from update_state where id = ?', values).fetchone()
finally:
c.close()
if row:
(pts, qts, date, seq) = row
date = datetime.datetime.fromtimestamp(date, tz=datetime.timezone.utc)
return types.updates.State(pts, qts, date, seq, unread_count=0)
|
Awesome-Scripts
|
positive
|
def shopping_dataset() -> Dataset:
"""Shopping ranking dataset."""
dataset_directory = os.path.join(internal_test_data_path(), 'dataset')
dataset_path = os.path.join(dataset_directory, 'shopping_relevance_small1.csv')
dataset = pd.read_csv(dataset_path)
<DeepExtract>
assert 0.3 >= 0.0
assert 0.3 <= 1.0
index_second = np.random.rand(len(dataset)) < 0.3
(train, test) = (dataset[~index_second], dataset[index_second])
</DeepExtract>
return prepare_dataset(train, test, label='relevance', num_classes=1)
|
def shopping_dataset() -> Dataset:
"""Shopping ranking dataset."""
dataset_directory = os.path.join(internal_test_data_path(), 'dataset')
dataset_path = os.path.join(dataset_directory, 'shopping_relevance_small1.csv')
dataset = pd.read_csv(dataset_path)
assert 0.3 >= 0.0
assert 0.3 <= 1.0
index_second = np.random.rand(len(dataset)) < 0.3
(train, test) = (dataset[~index_second], dataset[index_second])
return prepare_dataset(train, test, label='relevance', num_classes=1)
|
decision-forests
|
positive
|
def main():
<DeepExtract>
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
parser.add_argument('--gpus', type=int, default=1, help='number of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = args
</DeepExtract>
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
|
def main():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
parser.add_argument('--gpus', type=int, default=1, help='number of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
args = args
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line)
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if args.seed is not None:
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
|
DetectoRS
|
positive
|
def valid_L2I(self):
<DeepExtract>
all_testnll = []
for (images, labels) in test_data_iterator:
feed_dict = {}
x = np.cast[np.float32]((images - 127.5) / 127.5)
x = np.split(x, args.nr_gpu)
feed_dict.update({self.Worker_L2I.xs[i]: x[i] for i in range(args.nr_gpu)})
if args.useSoftLabel == 1:
soft_labels_ = self.sess.run(self.soft_labels, feed_dict={self.Worker_I2L.model.input_image: images.astype('float32'), self.Worker_I2L.model.needImgAug: False})
one_hot_labels_ = np.zeros((args.batch_size, 10), dtype=np.float32)
one_hot_labels_[np.arange(args.batch_size), labels] = 1.0
feed_dict.update({self.Worker_L2I.hs[i]: (1.0 - alpha_) * soft_labels_ + alpha_ * one_hot_labels_ for i in range(self.Worker_L2I.args.nr_gpu)})
else:
y = np.split(labels, args.nr_gpu)
feed_dict.update({self.Worker_L2I.ys[i]: y[i] for i in range(args.nr_gpu)})
all_testnll.append(self.sess.run([self.nlls_L2I_test_bpd], feed_dict))
avg_testnll = np.mean(all_testnll)
print('[L2I], testnll={0:.6f}'.format(avg_testnll))
</DeepExtract>
"\n for alpha_ in range(11):\n print('alpha=%f' % (alpha_ * 0.1))\n self.L2I_TestNll(alpha_ * 0.1)\n "
|
def valid_L2I(self):
all_testnll = []
for (images, labels) in test_data_iterator:
feed_dict = {}
x = np.cast[np.float32]((images - 127.5) / 127.5)
x = np.split(x, args.nr_gpu)
feed_dict.update({self.Worker_L2I.xs[i]: x[i] for i in range(args.nr_gpu)})
if args.useSoftLabel == 1:
soft_labels_ = self.sess.run(self.soft_labels, feed_dict={self.Worker_I2L.model.input_image: images.astype('float32'), self.Worker_I2L.model.needImgAug: False})
one_hot_labels_ = np.zeros((args.batch_size, 10), dtype=np.float32)
one_hot_labels_[np.arange(args.batch_size), labels] = 1.0
feed_dict.update({self.Worker_L2I.hs[i]: (1.0 - alpha_) * soft_labels_ + alpha_ * one_hot_labels_ for i in range(self.Worker_L2I.args.nr_gpu)})
else:
y = np.split(labels, args.nr_gpu)
feed_dict.update({self.Worker_L2I.ys[i]: y[i] for i in range(args.nr_gpu)})
all_testnll.append(self.sess.run([self.nlls_L2I_test_bpd], feed_dict))
avg_testnll = np.mean(all_testnll)
print('[L2I], testnll={0:.6f}'.format(avg_testnll))
"\n for alpha_ in range(11):\n print('alpha=%f' % (alpha_ * 0.1))\n self.L2I_TestNll(alpha_ * 0.1)\n "
|
DualLearning
|
positive
|
def test_atan(self):
<DeepExtract>
self = Mpfr_t()
mpfr_init2(self, 53)
x = self
</DeepExtract>
<DeepExtract>
self = Mpfr_t()
mpfr_init2(self, 53)
y = self
</DeepExtract>
mpfr_set_d(x, 0.625, MPFR_RNDN)
mpfr_atan(y, x, MPFR_RNDN)
self.assertEqual(mpfr_get_d(y, MPFR_RNDN), 0.5585993153435624)
|
def test_atan(self):
self = Mpfr_t()
mpfr_init2(self, 53)
x = self
self = Mpfr_t()
mpfr_init2(self, 53)
y = self
mpfr_set_d(x, 0.625, MPFR_RNDN)
mpfr_atan(y, x, MPFR_RNDN)
self.assertEqual(mpfr_get_d(y, MPFR_RNDN), 0.5585993153435624)
|
bigfloat
|
positive
|
@utf8_decode
def receive_ws_callback(self, team_hash, fd):
"""
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
team = self.teams[team_hash]
while True:
try:
(opcode, data) = team.ws.recv_data(control_frame=True)
except ssl.SSLWantReadError:
return w.WEECHAT_RC_OK
except (WebSocketConnectionClosedException, socket.error) as e:
<DeepExtract>
if not (isinstance(e, WebSocketConnectionClosedException) or e.errno in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT)):
raise
w.prnt(team.channel_buffer, 'Lost connection to slack team {} (on {}), reconnecting.'.format(team.domain, 'receive'))
dbg('Socket failed on {} with exception:\n{}'.format('receive', format_exc_tb()), level=5)
team.set_disconnected()
</DeepExtract>
return w.WEECHAT_RC_OK
if opcode == ABNF.OPCODE_PONG:
team.last_pong_time = time.time()
return w.WEECHAT_RC_OK
elif opcode != ABNF.OPCODE_TEXT:
return w.WEECHAT_RC_OK
message_json = json.loads(data.decode('utf-8'))
if self.recording:
<DeepExtract>
now = time.time()
if team:
team_subdomain = team.subdomain
else:
team_json = message_json.get('team')
if team_json:
team_subdomain = team_json.get('domain')
else:
team_subdomain = 'unknown_team'
directory = '{}/{}'.format(RECORD_DIR, team_subdomain)
if 'websocket':
directory = '{}/{}'.format(directory, 'websocket')
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get('type', 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write('{}'.format(json.dumps(message_json)))
f.close()
</DeepExtract>
message_json['wee_slack_metadata_team'] = team
<DeepExtract>
dbg('RECEIVED FROM QUEUE')
if slow:
self.slow_queue.append(message_json)
else:
self.queue.append(message_json)
</DeepExtract>
|
@utf8_decode
def receive_ws_callback(self, team_hash, fd):
"""
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
team = self.teams[team_hash]
while True:
try:
(opcode, data) = team.ws.recv_data(control_frame=True)
except ssl.SSLWantReadError:
return w.WEECHAT_RC_OK
except (WebSocketConnectionClosedException, socket.error) as e:
if not (isinstance(e, WebSocketConnectionClosedException) or e.errno in (errno.EPIPE, errno.ECONNRESET, errno.ETIMEDOUT)):
raise
w.prnt(team.channel_buffer, 'Lost connection to slack team {} (on {}), reconnecting.'.format(team.domain, 'receive'))
dbg('Socket failed on {} with exception:\n{}'.format('receive', format_exc_tb()), level=5)
team.set_disconnected()
return w.WEECHAT_RC_OK
if opcode == ABNF.OPCODE_PONG:
team.last_pong_time = time.time()
return w.WEECHAT_RC_OK
elif opcode != ABNF.OPCODE_TEXT:
return w.WEECHAT_RC_OK
message_json = json.loads(data.decode('utf-8'))
if self.recording:
now = time.time()
if team:
team_subdomain = team.subdomain
else:
team_json = message_json.get('team')
if team_json:
team_subdomain = team_json.get('domain')
else:
team_subdomain = 'unknown_team'
directory = '{}/{}'.format(RECORD_DIR, team_subdomain)
if 'websocket':
directory = '{}/{}'.format(directory, 'websocket')
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get('type', 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write('{}'.format(json.dumps(message_json)))
f.close()
message_json['wee_slack_metadata_team'] = team
dbg('RECEIVED FROM QUEUE')
if slow:
self.slow_queue.append(message_json)
else:
self.queue.append(message_json)
</DeepExtract>
|
dotfiles
|
positive
|
def save_params(self, path):
<DeepExtract>
for (param, target) in zip(self.network.parameters, *self.best_params):
param.set_value(target)
if free_params:
for (param, param_value) in zip(self.network.free_parameters, free_params):
param.set_value(param_value)
</DeepExtract>
self.network.save_params(path)
|
def save_params(self, path):
for (param, target) in zip(self.network.parameters, *self.best_params):
param.set_value(target)
if free_params:
for (param, param_value) in zip(self.network.free_parameters, free_params):
param.set_value(param_value)
self.network.save_params(path)
|
deepy
|
positive
|
def add_custom_splits(self, string: str, custom_splits: Iterable[CustomSplit]) -> None:
"""Custom Split Map Setter Method
Side Effects:
Adds a mapping from @string to the custom splits @custom_splits.
"""
<DeepExtract>
key = (id(string), string)
</DeepExtract>
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
def add_custom_splits(self, string: str, custom_splits: Iterable[CustomSplit]) -> None:
"""Custom Split Map Setter Method
Side Effects:
Adds a mapping from @string to the custom splits @custom_splits.
"""
key = (id(string), string)
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
|
black
|
positive
|
def wrap(fn):
@functools.wraps(fn)
def _inner(*args, **kwargs):
func_args = inspect.getcallargs(fn, *args, **kwargs)
self = func_args.get('self')
self._depth += 1
def trigger(event):
for (f, event_flag) in self._listeners:
if event_flag & event_type:
event.args = args[1:]
event.kwargs = kwargs
event.flag = event_type
event.depth = self._depth
f(event)
_traceback = None
_retval = None
try:
<DeepExtract>
for (f, event_flag) in self._listeners:
if event_flag & event_type:
HookEvent(is_before=True).args = args[1:]
HookEvent(is_before=True).kwargs = kwargs
HookEvent(is_before=True).flag = event_type
HookEvent(is_before=True).depth = self._depth
f(HookEvent(is_before=True))
</DeepExtract>
_retval = fn(*args, **kwargs)
return _retval
except Exception as e:
_traceback = Traceback(traceback.format_exc(), e)
raise
finally:
<DeepExtract>
for (f, event_flag) in self._listeners:
if event_flag & event_type:
HookEvent(is_before=False, retval=_retval, traceback=_traceback).args = args[1:]
HookEvent(is_before=False, retval=_retval, traceback=_traceback).kwargs = kwargs
HookEvent(is_before=False, retval=_retval, traceback=_traceback).flag = event_type
HookEvent(is_before=False, retval=_retval, traceback=_traceback).depth = self._depth
f(HookEvent(is_before=False, retval=_retval, traceback=_traceback))
</DeepExtract>
self._depth -= 1
return _inner
|
def wrap(fn):
@functools.wraps(fn)
def _inner(*args, **kwargs):
func_args = inspect.getcallargs(fn, *args, **kwargs)
self = func_args.get('self')
self._depth += 1
def trigger(event):
for (f, event_flag) in self._listeners:
if event_flag & event_type:
event.args = args[1:]
event.kwargs = kwargs
event.flag = event_type
event.depth = self._depth
f(event)
_traceback = None
_retval = None
try:
for (f, event_flag) in self._listeners:
if event_flag & event_type:
HookEvent(is_before=True).args = args[1:]
HookEvent(is_before=True).kwargs = kwargs
HookEvent(is_before=True).flag = event_type
HookEvent(is_before=True).depth = self._depth
f(HookEvent(is_before=True))
_retval = fn(*args, **kwargs)
return _retval
except Exception as e:
_traceback = Traceback(traceback.format_exc(), e)
raise
finally:
for (f, event_flag) in self._listeners:
if event_flag & event_type:
HookEvent(is_before=False, retval=_retval, traceback=_traceback).args = args[1:]
HookEvent(is_before=False, retval=_retval, traceback=_traceback).kwargs = kwargs
HookEvent(is_before=False, retval=_retval, traceback=_traceback).flag = event_type
HookEvent(is_before=False, retval=_retval, traceback=_traceback).depth = self._depth
f(HookEvent(is_before=False, retval=_retval, traceback=_traceback))
self._depth -= 1
return _inner
|
ATX
|
positive
|
@pytest.mark.django_db
def test_plan_list_update_200_if_authorized(client, django_user_model):
"""Tests for 200 response for PlanListUpdate with adequate permissions."""
<DeepExtract>
plan_list = models.PlanList.objects.create(title=title)
</DeepExtract>
content = ContentType.objects.get_for_model(models.SubscriptionPlan)
permission = Permission.objects.get(content_type=content, codename='subscriptions')
user = django_user_model.objects.create_user(username='user', password='password')
user.user_permissions.add(permission)
client.login(username='user', password='password')
response = client.get(reverse('dfs_plan_list_update', kwargs={'plan_list_id': plan_list.id}))
assert response.status_code == 200
|
@pytest.mark.django_db
def test_plan_list_update_200_if_authorized(client, django_user_model):
"""Tests for 200 response for PlanListUpdate with adequate permissions."""
plan_list = models.PlanList.objects.create(title=title)
content = ContentType.objects.get_for_model(models.SubscriptionPlan)
permission = Permission.objects.get(content_type=content, codename='subscriptions')
user = django_user_model.objects.create_user(username='user', password='password')
user.user_permissions.add(permission)
client.login(username='user', password='password')
response = client.get(reverse('dfs_plan_list_update', kwargs={'plan_list_id': plan_list.id}))
assert response.status_code == 200
|
django-flexible-subscriptions
|
positive
|
def execute_jobs(job_specs: Iterable[JobSpec], dry_run: bool=False, caliban_config: Optional[Dict[str, Any]]=None):
"""executes a sequence of jobs based on job specs
Arg:
job_specs: specifications for jobs to be executed
dry_run: if True, only print what would be done
caliban_config: caliban configuration data
"""
caliban_config = caliban_config or {}
with ut.tqdm_logging() as orig_stream:
pbar = tqdm.tqdm(logged_job_specs(job_specs), file=orig_stream, total=len(job_specs), ascii=True, unit='experiment', desc='Executing')
for (idx, job_spec) in enumerate(pbar, 1):
command = job_spec.spec['command']
logging.info(f"Running command: {' '.join(command)}")
if not dry_run:
(_, ret_code) = ufs.capture_stdout(command, '', ut.TqdmFile(sys.stderr))
else:
ret_code = 0
j = Job(spec=job_spec, container=job_spec.spec['container'], details={'ret_code': ret_code}, status=JobStatus.SUCCEEDED if ret_code == 0 else JobStatus.FAILED)
<DeepExtract>
if j.status == JobStatus.SUCCEEDED:
logging.info(t.green(f'Job {idx} succeeded!'))
else:
logging.error(t.red(f"Job {idx} failed with return code {j.details['ret_code']}."))
args = ce.experiment_to_args(j.spec.experiment.kwargs, j.spec.experiment.args)
logging.error(t.red(f'Failing args for job {idx}: {args}'))
</DeepExtract>
if dry_run:
logging.info(t.yellow(f'\nTo build your image and execute these jobs, run your command again without {c.DRY_RUN_FLAG}\n'))
return None
|
def execute_jobs(job_specs: Iterable[JobSpec], dry_run: bool=False, caliban_config: Optional[Dict[str, Any]]=None):
"""executes a sequence of jobs based on job specs
Arg:
job_specs: specifications for jobs to be executed
dry_run: if True, only print what would be done
caliban_config: caliban configuration data
"""
caliban_config = caliban_config or {}
with ut.tqdm_logging() as orig_stream:
pbar = tqdm.tqdm(logged_job_specs(job_specs), file=orig_stream, total=len(job_specs), ascii=True, unit='experiment', desc='Executing')
for (idx, job_spec) in enumerate(pbar, 1):
command = job_spec.spec['command']
logging.info(f"Running command: {' '.join(command)}")
if not dry_run:
(_, ret_code) = ufs.capture_stdout(command, '', ut.TqdmFile(sys.stderr))
else:
ret_code = 0
j = Job(spec=job_spec, container=job_spec.spec['container'], details={'ret_code': ret_code}, status=JobStatus.SUCCEEDED if ret_code == 0 else JobStatus.FAILED)
if j.status == JobStatus.SUCCEEDED:
logging.info(t.green(f'Job {idx} succeeded!'))
else:
logging.error(t.red(f"Job {idx} failed with return code {j.details['ret_code']}."))
args = ce.experiment_to_args(j.spec.experiment.kwargs, j.spec.experiment.args)
logging.error(t.red(f'Failing args for job {idx}: {args}'))
if dry_run:
logging.info(t.yellow(f'\nTo build your image and execute these jobs, run your command again without {c.DRY_RUN_FLAG}\n'))
return None
|
caliban
|
positive
|
def check_metrics(self, example_script, expect):
"""A helper method to test the metrics being returned.
:param example_script: Filename of an example script to test
:param expect: dict with expected values of metrics
"""
self.b_mgr.metrics = metrics.Metrics()
self.b_mgr.scores = []
<DeepExtract>
path = os.path.join(os.getcwd(), 'examples', example_script)
self.b_mgr.ignore_nosec = ignore_nosec
self.b_mgr.discover_files([path], True)
self.b_mgr.run_tests()
</DeepExtract>
m = self.b_mgr.metrics.data
for k in expect:
if k != 'issues':
self.assertEqual(expect[k], m['_totals'][k])
if 'issues' in expect:
for (criteria, default) in C.CRITERIA:
for rank in C.RANKING:
label = f'{criteria}.{rank}'
expected = 0
if expect['issues'].get(criteria).get(rank):
expected = expect['issues'][criteria][rank]
self.assertEqual(expected, m['_totals'][label])
|
def check_metrics(self, example_script, expect):
"""A helper method to test the metrics being returned.
:param example_script: Filename of an example script to test
:param expect: dict with expected values of metrics
"""
self.b_mgr.metrics = metrics.Metrics()
self.b_mgr.scores = []
path = os.path.join(os.getcwd(), 'examples', example_script)
self.b_mgr.ignore_nosec = ignore_nosec
self.b_mgr.discover_files([path], True)
self.b_mgr.run_tests()
m = self.b_mgr.metrics.data
for k in expect:
if k != 'issues':
self.assertEqual(expect[k], m['_totals'][k])
if 'issues' in expect:
for (criteria, default) in C.CRITERIA:
for rank in C.RANKING:
label = f'{criteria}.{rank}'
expected = 0
if expect['issues'].get(criteria).get(rank):
expected = expect['issues'][criteria][rank]
self.assertEqual(expected, m['_totals'][label])
|
bandit
|
positive
|
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator. Default: ``True``
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching. Default:
``False``
"""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
<DeepExtract>
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
self._cur_epoch_itr = batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and (not fix_batches_to_gpus):
batches = shuffle_batches(list(batches), self.seed + self.epoch)
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + self.epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + self.epoch)
else:
batches = self.frozen_batches
batches = ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
self._cur_epoch_itr = CountingIterator(torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches))
</DeepExtract>
return self._cur_epoch_itr
|
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator. Default: ``True``
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching. Default:
``False``
"""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
self._cur_epoch_itr = batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and (not fix_batches_to_gpus):
batches = shuffle_batches(list(batches), self.seed + self.epoch)
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + self.epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + self.epoch)
else:
batches = self.frozen_batches
batches = ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
self._cur_epoch_itr = CountingIterator(torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches))
return self._cur_epoch_itr
|
control-length
|
positive
|
@read_lock(config_lock)
def get_tag_opt(self, tag, option):
<DeepExtract>
if tag in self.tag_config:
tc = eval(repr(self.tag_config[tag]), {}, {})
tc = eval(repr(self.tag_template_config), {}, {})
</DeepExtract>
(valid, value) = access_dict(tc, option)
if not valid:
return None
return value
|
@read_lock(config_lock)
def get_tag_opt(self, tag, option):
if tag in self.tag_config:
tc = eval(repr(self.tag_config[tag]), {}, {})
tc = eval(repr(self.tag_template_config), {}, {})
(valid, value) = access_dict(tc, option)
if not valid:
return None
return value
|
canto-curses
|
positive
|
def test_response_forms_identical_when_form_changed(self):
form_before = self.initial_response.context['form'].serialized
<DeepExtract>
question = models.FormQuestion.objects.filter(pk=form_before[0]['id'])
question.update(text='this text is not persistent')
</DeepExtract>
response = self.client.get(self.wizard_url)
form_after = response.context['form'].serialized
self.assertNotEqual('this text is not persistent', form_after[0]['question_text'])
self.assertEqual(form_before, form_after)
|
def test_response_forms_identical_when_form_changed(self):
form_before = self.initial_response.context['form'].serialized
question = models.FormQuestion.objects.filter(pk=form_before[0]['id'])
question.update(text='this text is not persistent')
response = self.client.get(self.wizard_url)
form_after = response.context['form'].serialized
self.assertNotEqual('this text is not persistent', form_after[0]['question_text'])
self.assertEqual(form_before, form_after)
|
callisto-core
|
positive
|
def get_ips(network, alg, batch_size):
<DeepExtract>
os.environ['DEBUG_MEM'] = str(debug_mem)
os.environ['DEBUG_SPEED'] = str(True)
cmd = network_to_command[network]
cmd = cmd.replace('BS', f'{batch_size}').replace('CONFIG', alg_to_config[alg])
return run_cmd(cmd)
</DeepExtract>
line = list(open('speed_results.tsv').readlines())[-1]
return json.loads(line)['ips']
|
def get_ips(network, alg, batch_size):
os.environ['DEBUG_MEM'] = str(debug_mem)
os.environ['DEBUG_SPEED'] = str(True)
cmd = network_to_command[network]
cmd = cmd.replace('BS', f'{batch_size}').replace('CONFIG', alg_to_config[alg])
return run_cmd(cmd)
line = list(open('speed_results.tsv').readlines())[-1]
return json.loads(line)['ips']
|
actnn
|
positive
|
def __init__(self, train_name, test_name, label_map):
self.label_map = label_map
self.train_df = pd.read_csv(config.datasets_structure / train_name)
self.test_df = pd.read_csv(config.datasets_structure / test_name)
<DeepExtract>
self.train_df = self.normalize(self.train_df)
self.test_df = self.normalize(self.test_df)
</DeepExtract>
<DeepExtract>
self.train_df = self.label(self.train_df)
self.test_df = self.label(self.test_df)
</DeepExtract>
|
def __init__(self, train_name, test_name, label_map):
self.label_map = label_map
self.train_df = pd.read_csv(config.datasets_structure / train_name)
self.test_df = pd.read_csv(config.datasets_structure / test_name)
self.train_df = self.normalize(self.train_df)
self.test_df = self.normalize(self.test_df)
self.train_df = self.label(self.train_df)
self.test_df = self.label(self.test_df)
</DeepExtract>
|
axcell
|
positive
|
def parse_file(self, filepath: Path, config: NamedTuple, flags: str=None) -> Tuple[HTS, List[FNode], List[FNode]]:
"""
Reads an initial state file and produces (HTS, invariants, ltl_invariants)
"""
hts = HTS(filepath.name)
ts = TS('TS %s' % filepath.name)
init = []
with filepath.open('r') as f:
<DeepExtract>
hts = HTS('INIT')
ts = TS('TS INIT')
init = []
for line in f.read().split('\n'):
line = line.strip()
if not line:
continue
else:
res = self.parse_line(line)
if res is not None:
init.append(res)
Logger.msg('Initial state file set concrete values for {} state variables'.format(len(init)), 1)
ts.init = And(init)
ts.invar = TRUE()
ts.trans = TRUE()
hts.add_ts(ts)
hts = hts
</DeepExtract>
return (hts, None, None)
|
def parse_file(self, filepath: Path, config: NamedTuple, flags: str=None) -> Tuple[HTS, List[FNode], List[FNode]]:
"""
Reads an initial state file and produces (HTS, invariants, ltl_invariants)
"""
hts = HTS(filepath.name)
ts = TS('TS %s' % filepath.name)
init = []
with filepath.open('r') as f:
hts = HTS('INIT')
ts = TS('TS INIT')
init = []
for line in f.read().split('\n'):
line = line.strip()
if not line:
continue
else:
res = self.parse_line(line)
if res is not None:
init.append(res)
Logger.msg('Initial state file set concrete values for {} state variables'.format(len(init)), 1)
ts.init = And(init)
ts.invar = TRUE()
ts.trans = TRUE()
hts.add_ts(ts)
hts = hts
return (hts, None, None)
|
CoSA
|
positive
|
def _decode(self, inputs, context, attn_ctx=None, attn_mask=None):
batch_size = context.size(0)
<DeepExtract>
batch_size = context.size(0)
hiddens = self.enc2dec_hidden_fc(context)
if self.rnn_type == 'gru':
hiddens = hiddens.view(batch_size, self.n_decoder_layers, self.decoder_hidden_dim).transpose(0, 1).contiguous()
elif self.rnn_type == 'lstm':
hiddens = hiddens.view(batch_size, self.n_decoder_layers, self.decoder_hidden_dim, 2)
h = hiddens[:, :, :, 0].transpose(0, 1).contiguous()
c = hiddens[:, :, :, 1].transpose(0, 1).contiguous()
hiddens = (h, c)
hiddens = hiddens
</DeepExtract>
feats = None
feats = context.unsqueeze(1).repeat(1, inputs.size(1), 1)
ret_dict = self.decoder.forward(batch_size=batch_size, inputs=inputs, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_TEACHER_FORCE)
return ret_dict
|
def _decode(self, inputs, context, attn_ctx=None, attn_mask=None):
batch_size = context.size(0)
batch_size = context.size(0)
hiddens = self.enc2dec_hidden_fc(context)
if self.rnn_type == 'gru':
hiddens = hiddens.view(batch_size, self.n_decoder_layers, self.decoder_hidden_dim).transpose(0, 1).contiguous()
elif self.rnn_type == 'lstm':
hiddens = hiddens.view(batch_size, self.n_decoder_layers, self.decoder_hidden_dim, 2)
h = hiddens[:, :, :, 0].transpose(0, 1).contiguous()
c = hiddens[:, :, :, 1].transpose(0, 1).contiguous()
hiddens = (h, c)
hiddens = hiddens
feats = None
feats = context.unsqueeze(1).repeat(1, inputs.size(1), 1)
ret_dict = self.decoder.forward(batch_size=batch_size, inputs=inputs, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_TEACHER_FORCE)
return ret_dict
|
dialog-processing
|
positive
|
def on_middle_click(self):
is_fullscreened = self.lookup_action('fullscreen').get_state()
if is_fullscreened:
hc_action = self.lookup_action('hide_controls')
<DeepExtract>
hc_action = self.lookup_action('hide_controls')
hc_action.change_state(GLib.Variant.new_boolean(not hc_action.get_state()))
</DeepExtract>
else:
self.options_manager.get_active_pane().middle_click_action()
|
def on_middle_click(self):
is_fullscreened = self.lookup_action('fullscreen').get_state()
if is_fullscreened:
hc_action = self.lookup_action('hide_controls')
hc_action = self.lookup_action('hide_controls')
hc_action.change_state(GLib.Variant.new_boolean(not hc_action.get_state()))
else:
self.options_manager.get_active_pane().middle_click_action()
|
drawing
|
positive
|
@patch.object(AppReleaseProvider, 'get_release_info')
def test_create_co_maintainer(self, get_release_info):
owner = get_user_model().objects.create_user(username='owner', password='owner', email='owner@owner.com')
<DeepExtract>
app = App.objects.create(id='news', owner=owner)
app.co_maintainers.set([self.user])
app.save()
return AppRelease.objects.create(version=version, app=app)
</DeepExtract>
self._login()
get_release_info.return_value = (self.app_args, 'checksum')
with self.settings(VALIDATE_CERTIFICATES=False):
response = self.api_client.post(self.create_url, data={'download': 'https://download.com', 'signature': 'sign'}, format='json')
self.assertEqual(200, response.status_code)
AppRelease.objects.get(version='9.0.0', app__id='news')
|
@patch.object(AppReleaseProvider, 'get_release_info')
def test_create_co_maintainer(self, get_release_info):
owner = get_user_model().objects.create_user(username='owner', password='owner', email='owner@owner.com')
app = App.objects.create(id='news', owner=owner)
app.co_maintainers.set([self.user])
app.save()
return AppRelease.objects.create(version=version, app=app)
self._login()
get_release_info.return_value = (self.app_args, 'checksum')
with self.settings(VALIDATE_CERTIFICATES=False):
response = self.api_client.post(self.create_url, data={'download': 'https://download.com', 'signature': 'sign'}, format='json')
self.assertEqual(200, response.status_code)
AppRelease.objects.get(version='9.0.0', app__id='news')
|
appstore
|
positive
|
def _invert_signal_tree(tree: List, cur_mpx: Optional[Dict]=None, ret: Optional[Dict]=None) -> Dict:
"""The tree is laid out with two kinds of dicts. Single-element dict
keyed by string -> multiplexer, which is own dict keyed by
integers.
"""
if ret is None:
ret = {}
if cur_mpx is None:
cur_mpx = {}
for sigs in tree:
if type(sigs) == dict:
((mpx_name, mpx_vals),) = sigs.items()
for (mpx_val, sig_tree) in mpx_vals.items():
next_mpx = cur_mpx.copy()
next_mpx[mpx_name] = mpx_val
<DeepExtract>
if ret is None:
ret = {}
if next_mpx is None:
next_mpx = {}
for sigs in sig_tree:
if type(sigs) == dict:
((mpx_name, mpx_vals),) = sigs.items()
for (mpx_val, sig_tree) in mpx_vals.items():
next_mpx = next_mpx.copy()
next_mpx[mpx_name] = mpx_val
_invert_signal_tree(sig_tree, next_mpx, ret)
elif type(sigs) == str:
ret.setdefault(sigs, []).append(set(tuple(next_mpx.items())))
else:
raise TypeError(repr(sigs))
return ret
</DeepExtract>
elif type(sigs) == str:
ret.setdefault(sigs, []).append(set(tuple(cur_mpx.items())))
else:
raise TypeError(repr(sigs))
return ret
|
def _invert_signal_tree(tree: List, cur_mpx: Optional[Dict]=None, ret: Optional[Dict]=None) -> Dict:
"""The tree is laid out with two kinds of dicts. Single-element dict
keyed by string -> multiplexer, which is own dict keyed by
integers.
"""
if ret is None:
ret = {}
if cur_mpx is None:
cur_mpx = {}
for sigs in tree:
if type(sigs) == dict:
((mpx_name, mpx_vals),) = sigs.items()
for (mpx_val, sig_tree) in mpx_vals.items():
next_mpx = cur_mpx.copy()
next_mpx[mpx_name] = mpx_val
if ret is None:
ret = {}
if next_mpx is None:
next_mpx = {}
for sigs in sig_tree:
if type(sigs) == dict:
((mpx_name, mpx_vals),) = sigs.items()
for (mpx_val, sig_tree) in mpx_vals.items():
next_mpx = next_mpx.copy()
next_mpx[mpx_name] = mpx_val
_invert_signal_tree(sig_tree, next_mpx, ret)
elif type(sigs) == str:
ret.setdefault(sigs, []).append(set(tuple(next_mpx.items())))
else:
raise TypeError(repr(sigs))
return ret
elif type(sigs) == str:
ret.setdefault(sigs, []).append(set(tuple(cur_mpx.items())))
else:
raise TypeError(repr(sigs))
return ret
|
cantools
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.