before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
@property def connection_manager(self): if self._connection_manager is None: <DeepExtract> @contextmanager @wraps(client_generator) def context_manager(*args, **kwargs): nonlocal conn_str, gen_func try: ret = client_generator(self._connection_string, *args, **kwargs) yield ret finally: del ret self._connection_manager = context_manager </DeepExtract> return self._connection_manager
@property def connection_manager(self): if self._connection_manager is None: @contextmanager @wraps(client_generator) def context_manager(*args, **kwargs): nonlocal conn_str, gen_func try: ret = client_generator(self._connection_string, *args, **kwargs) yield ret finally: del ret self._connection_manager = context_manager return self._connection_manager
backup
positive
def sweep_hyperparameters(self, function, num_configs): returned_value_and_params = [] for _ in range(num_configs): <DeepExtract> hyperparameters = {} for hp in self._hyperparameters: hyperparameters[hp.name] = hp.generate() hyperparameters = ppp.dot_map_dict_to_nested_dict(hyperparameters) kwargs = ppp.merge_recursive_dicts(hyperparameters, copy.deepcopy(self._default_kwargs), ignore_duplicate_keys_in_second_dict=True) </DeepExtract> score = function(**kwargs) returned_value_and_params.append((score, kwargs)) return returned_value_and_params
def sweep_hyperparameters(self, function, num_configs): returned_value_and_params = [] for _ in range(num_configs): hyperparameters = {} for hp in self._hyperparameters: hyperparameters[hp.name] = hp.generate() hyperparameters = ppp.dot_map_dict_to_nested_dict(hyperparameters) kwargs = ppp.merge_recursive_dicts(hyperparameters, copy.deepcopy(self._default_kwargs), ignore_duplicate_keys_in_second_dict=True) score = function(**kwargs) returned_value_and_params.append((score, kwargs)) return returned_value_and_params
DoorGym
positive
def _test_one_item(self, item_id): <DeepExtract> if ids is None: ids = self.default_ids for item_id in ids: self.create_model(item_id, **kwargs) </DeepExtract> <DeepExtract> params = [self.ids_param, 'fields', 'exclude'] args = [[item_id], fields, self.always_exclude] data = {param: arg for (param, arg) in zip(params, args) if arg} data.update(extra_args) get_response = self.authenticated_get(self.path(data)) if self.test_post_method: post_response = self.authenticated_post(self.path(), data=data) self.assertEqual(get_response.status_code, post_response.status_code) if 200 <= get_response.status_code < 300: self.assertEqual(get_response.data, post_response.data) response = get_response </DeepExtract> self.assertEqual(response.status_code, 200) self.assertCountEqual(response.data, [self.expected_result(item_id)])
def _test_one_item(self, item_id): if ids is None: ids = self.default_ids for item_id in ids: self.create_model(item_id, **kwargs) params = [self.ids_param, 'fields', 'exclude'] args = [[item_id], fields, self.always_exclude] data = {param: arg for (param, arg) in zip(params, args) if arg} data.update(extra_args) get_response = self.authenticated_get(self.path(data)) if self.test_post_method: post_response = self.authenticated_post(self.path(), data=data) self.assertEqual(get_response.status_code, post_response.status_code) if 200 <= get_response.status_code < 300: self.assertEqual(get_response.data, post_response.data) response = get_response self.assertEqual(response.status_code, 200) self.assertCountEqual(response.data, [self.expected_result(item_id)])
edx-analytics-data-api
positive
def capture_start(self): <DeepExtract> if self.fn_get_print_value: val = self.fn_get_print_value() else: val = self.fn_get_value() if self.fn_formatter: val = self.fn_formatter(val) self.val_orig = val </DeepExtract> self.val_edit = str(self.val_orig) self.val_pos = len(self.val_edit) self.captured = True self.keys = {'ZERO': '0', 'NUMPAD_0': '0', 'ONE': '1', 'NUMPAD_1': '1', 'TWO': '2', 'NUMPAD_2': '2', 'THREE': '3', 'NUMPAD_3': '3', 'FOUR': '4', 'NUMPAD_4': '4', 'FIVE': '5', 'NUMPAD_5': '5', 'SIX': '6', 'NUMPAD_6': '6', 'SEVEN': '7', 'NUMPAD_7': '7', 'EIGHT': '8', 'NUMPAD_8': '8', 'NINE': '9', 'NUMPAD_9': '9', 'PERIOD': '.', 'NUMPAD_PERIOD': '.', 'MINUS': '-', 'NUMPAD_MINUS': '-'} self.drawing.set_cursor('TEXT') return True
def capture_start(self): if self.fn_get_print_value: val = self.fn_get_print_value() else: val = self.fn_get_value() if self.fn_formatter: val = self.fn_formatter(val) self.val_orig = val self.val_edit = str(self.val_orig) self.val_pos = len(self.val_edit) self.captured = True self.keys = {'ZERO': '0', 'NUMPAD_0': '0', 'ONE': '1', 'NUMPAD_1': '1', 'TWO': '2', 'NUMPAD_2': '2', 'THREE': '3', 'NUMPAD_3': '3', 'FOUR': '4', 'NUMPAD_4': '4', 'FIVE': '5', 'NUMPAD_5': '5', 'SIX': '6', 'NUMPAD_6': '6', 'SEVEN': '7', 'NUMPAD_7': '7', 'EIGHT': '8', 'NUMPAD_8': '8', 'NINE': '9', 'NUMPAD_9': '9', 'PERIOD': '.', 'NUMPAD_PERIOD': '.', 'MINUS': '-', 'NUMPAD_MINUS': '-'} self.drawing.set_cursor('TEXT') return True
addon_common
positive
def __init__(self, name, ob_space, ac_space, kind='large'): with tf.variable_scope(name): <DeepExtract> assert isinstance(ob_space, gym.spaces.Box) self.pdtype = pdtype = make_pdtype(ac_space) sequence_length = None ob = U.get_placeholder(name='ob', dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape)) x = ob / 255.0 if kind == 'small': x = tf.nn.relu(U.conv2d(x, 16, 'l1', [8, 8], [4, 4], pad='VALID')) x = tf.nn.relu(U.conv2d(x, 32, 'l2', [4, 4], [2, 2], pad='VALID')) x = U.flattenallbut0(x) x = tf.nn.relu(tf.layers.dense(x, 256, name='lin', kernel_initializer=U.normc_initializer(1.0))) elif kind == 'large': x = tf.nn.relu(U.conv2d(x, 32, 'l1', [8, 8], [4, 4], pad='VALID')) x = tf.nn.relu(U.conv2d(x, 64, 'l2', [4, 4], [2, 2], pad='VALID')) x = tf.nn.relu(U.conv2d(x, 64, 'l3', [3, 3], [1, 1], pad='VALID')) x = U.flattenallbut0(x) x = tf.nn.relu(tf.layers.dense(x, 512, name='lin', kernel_initializer=U.normc_initializer(1.0))) else: raise NotImplementedError logits = tf.layers.dense(x, pdtype.param_shape()[0], name='logits', kernel_initializer=U.normc_initializer(0.01)) self.pd = pdtype.pdfromflat(logits) self.vpred = tf.layers.dense(x, 1, name='value', kernel_initializer=U.normc_initializer(1.0))[:, 0] self.state_in = [] self.state_out = [] stochastic = tf.placeholder(dtype=tf.bool, shape=()) ac = self.pd.sample() self._act = U.function([stochastic, ob], [ac, self.vpred]) </DeepExtract> self.scope = tf.get_variable_scope().name
def __init__(self, name, ob_space, ac_space, kind='large'): with tf.variable_scope(name): assert isinstance(ob_space, gym.spaces.Box) self.pdtype = pdtype = make_pdtype(ac_space) sequence_length = None ob = U.get_placeholder(name='ob', dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape)) x = ob / 255.0 if kind == 'small': x = tf.nn.relu(U.conv2d(x, 16, 'l1', [8, 8], [4, 4], pad='VALID')) x = tf.nn.relu(U.conv2d(x, 32, 'l2', [4, 4], [2, 2], pad='VALID')) x = U.flattenallbut0(x) x = tf.nn.relu(tf.layers.dense(x, 256, name='lin', kernel_initializer=U.normc_initializer(1.0))) elif kind == 'large': x = tf.nn.relu(U.conv2d(x, 32, 'l1', [8, 8], [4, 4], pad='VALID')) x = tf.nn.relu(U.conv2d(x, 64, 'l2', [4, 4], [2, 2], pad='VALID')) x = tf.nn.relu(U.conv2d(x, 64, 'l3', [3, 3], [1, 1], pad='VALID')) x = U.flattenallbut0(x) x = tf.nn.relu(tf.layers.dense(x, 512, name='lin', kernel_initializer=U.normc_initializer(1.0))) else: raise NotImplementedError logits = tf.layers.dense(x, pdtype.param_shape()[0], name='logits', kernel_initializer=U.normc_initializer(0.01)) self.pd = pdtype.pdfromflat(logits) self.vpred = tf.layers.dense(x, 1, name='value', kernel_initializer=U.normc_initializer(1.0))[:, 0] self.state_in = [] self.state_out = [] stochastic = tf.placeholder(dtype=tf.bool, shape=()) ac = self.pd.sample() self._act = U.function([stochastic, ob], [ac, self.vpred]) self.scope = tf.get_variable_scope().name
baselines
positive
@micropython.viper def flash_open(): file = 'jtagspi%08x.bit.gz' % idcode() prog_stream(open_file(file, True)) if not prog_close(): print('%s failed' % file) <DeepExtract> spi_jtag_on() jtag.hwspi.init(sck=Pin(gpio_tcknc)) bitbang_jtag_on() reset_tap() runtest_idle(1, 0) </DeepExtract> reset_tap() runtest_idle(1, 0)
@micropython.viper def flash_open(): file = 'jtagspi%08x.bit.gz' % idcode() prog_stream(open_file(file, True)) if not prog_close(): print('%s failed' % file) spi_jtag_on() jtag.hwspi.init(sck=Pin(gpio_tcknc)) bitbang_jtag_on() reset_tap() runtest_idle(1, 0) reset_tap() runtest_idle(1, 0)
esp32ecp5
positive
def parseForPreview(stream, format): """ Parses the CSV file and formats it for previewing via Slickgrid """ <DeepExtract> def getFormatArg(argName, defVal): (rows, header) = format[argName] if argName in format else defVal delimiter = getFormatArg('delimiter', ',') hasHeader = getFormatArg('hasHeader', True) rowsToKeep = getFormatArg('rowsToKeep', None) columnNames = getFormatArg('columnNames', None) or [] raw_reader = csv.reader(stream, delimiter=str(delimiter)) reader = unicodeCsvReader(raw_reader) if not hasHeader: reader = chain([[]], reader) if rowsToKeep: rows = islice(reader, int(rowsToKeep) + 1) else: rows = reader cols = izip_longest(*rows, fillvalue='') rows = izip(*cols) header = [] for (i, name) in enumerate(rows.next()): if i < len(columnNames): name = columnNames[i] or name if name: header.append(name) else: header.append('Column_%s' % (i + 1)) rows = ifilter(any, rows) (rows, header) = (list(rows), header) </DeepExtract> rows = [{x: y for (x, y) in zip(header, row)} for row in rows[0:100]] return {'status': 'success', 'rows': rows, 'header': header}
def parseForPreview(stream, format): """ Parses the CSV file and formats it for previewing via Slickgrid """ def getFormatArg(argName, defVal): (rows, header) = format[argName] if argName in format else defVal delimiter = getFormatArg('delimiter', ',') hasHeader = getFormatArg('hasHeader', True) rowsToKeep = getFormatArg('rowsToKeep', None) columnNames = getFormatArg('columnNames', None) or [] raw_reader = csv.reader(stream, delimiter=str(delimiter)) reader = unicodeCsvReader(raw_reader) if not hasHeader: reader = chain([[]], reader) if rowsToKeep: rows = islice(reader, int(rowsToKeep) + 1) else: rows = reader cols = izip_longest(*rows, fillvalue='') rows = izip(*cols) header = [] for (i, name) in enumerate(rows.next()): if i < len(columnNames): name = columnNames[i] or name if name: header.append(name) else: header.append('Column_%s' % (i + 1)) rows = ifilter(any, rows) (rows, header) = (list(rows), header) rows = [{x: y for (x, y) in zip(header, row)} for row in rows[0:100]] return {'status': 'success', 'rows': rows, 'header': header}
builder
positive
def handle_missing_properties_of_threat_actor(threat_actor_instance, threat_actor): (container, extension_definition_id) = determine_container_for_missing_properties('threat-actor', threat_actor_instance) if container is not None: handle_multiple_missing_statement_properties(container, threat_actor.planning_and_operational_supports, 'planning_and_operational_support', threat_actor_instance['id'], is_literal=True) if get_option_value('spec_version') == '2.0': handle_missing_confidence_property(container, threat_actor.confidence, threat_actor_instance['id']) else: <DeepExtract> if threat_actor.confidence is not None and threat_actor.confidence.value is not None: threat_actor_instance['confidence'] = convert_confidence(threat_actor.confidence, threat_actor_instance['id']) </DeepExtract> fill_in_extension_properties(threat_actor_instance, container, extension_definition_id)
def handle_missing_properties_of_threat_actor(threat_actor_instance, threat_actor): (container, extension_definition_id) = determine_container_for_missing_properties('threat-actor', threat_actor_instance) if container is not None: handle_multiple_missing_statement_properties(container, threat_actor.planning_and_operational_supports, 'planning_and_operational_support', threat_actor_instance['id'], is_literal=True) if get_option_value('spec_version') == '2.0': handle_missing_confidence_property(container, threat_actor.confidence, threat_actor_instance['id']) else: if threat_actor.confidence is not None and threat_actor.confidence.value is not None: threat_actor_instance['confidence'] = convert_confidence(threat_actor.confidence, threat_actor_instance['id']) fill_in_extension_properties(threat_actor_instance, container, extension_definition_id)
cti-stix-elevator
positive
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ <DeepExtract> if self._observed_errors <= 1: backoff = 0 backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1) backoff = min(self.BACKOFF_MAX, backoff_value) </DeepExtract> if backoff <= 0: return time.sleep(backoff)
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ if self._observed_errors <= 1: backoff = 0 backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1) backoff = min(self.BACKOFF_MAX, backoff_value) if backoff <= 0: return time.sleep(backoff)
alfred-rates
positive
@override_rest_registration_settings({'USER_VERIFICATION_ID_FIELD': 'username'}) def test_register_with_username_as_verification_id_ok(self): <DeepExtract> username = 'testusername' email = 'testusername@example.com' if password_confirm is None: password_confirm = 'testpassword' data = {'username': username, 'password': 'testpassword', 'password_confirm': password_confirm, 'email': email} data.update(options) data = data </DeepExtract> request = self.create_post_request(data) with self.assert_one_mail_sent() as sent_emails, self.timer() as timer: response = self.view_func(request) self.assert_valid_response(response, status.HTTP_201_CREATED) user_id = response.data['id'] user = self.user_class.objects.get(id=user_id) self.assertEqual(user.username, data['username']) self.assertTrue(user.check_password(data['password'])) self.assertFalse(user.is_active) sent_email = sent_emails[0] self.assertEqual(sent_email.from_email, VERIFICATION_FROM_EMAIL) self.assertListEqual(sent_email.to, [data['email']]) url = self.assert_one_url_line_in_text(sent_email.body) verification_data = self.assert_valid_verification_url(url, expected_path=REGISTER_VERIFICATION_URL, expected_fields={'signature', 'user_id', 'timestamp'}) user_verification_id = verification_data['user_id'] self.assertEqual(user_verification_id, user.username) url_sig_timestamp = int(verification_data['timestamp']) self.assertGreaterEqual(url_sig_timestamp, timer.start_time) self.assertLessEqual(url_sig_timestamp, timer.end_time) signer = RegisterSigner(verification_data) signer.verify()
@override_rest_registration_settings({'USER_VERIFICATION_ID_FIELD': 'username'}) def test_register_with_username_as_verification_id_ok(self): username = 'testusername' email = 'testusername@example.com' if password_confirm is None: password_confirm = 'testpassword' data = {'username': username, 'password': 'testpassword', 'password_confirm': password_confirm, 'email': email} data.update(options) data = data request = self.create_post_request(data) with self.assert_one_mail_sent() as sent_emails, self.timer() as timer: response = self.view_func(request) self.assert_valid_response(response, status.HTTP_201_CREATED) user_id = response.data['id'] user = self.user_class.objects.get(id=user_id) self.assertEqual(user.username, data['username']) self.assertTrue(user.check_password(data['password'])) self.assertFalse(user.is_active) sent_email = sent_emails[0] self.assertEqual(sent_email.from_email, VERIFICATION_FROM_EMAIL) self.assertListEqual(sent_email.to, [data['email']]) url = self.assert_one_url_line_in_text(sent_email.body) verification_data = self.assert_valid_verification_url(url, expected_path=REGISTER_VERIFICATION_URL, expected_fields={'signature', 'user_id', 'timestamp'}) user_verification_id = verification_data['user_id'] self.assertEqual(user_verification_id, user.username) url_sig_timestamp = int(verification_data['timestamp']) self.assertGreaterEqual(url_sig_timestamp, timer.start_time) self.assertLessEqual(url_sig_timestamp, timer.end_time) signer = RegisterSigner(verification_data) signer.verify()
django-rest-registration
positive
def add_column(self, fieldname, column, align='c', valign='t'): """Add a column to the Table. Arguments: fieldname - name of the field to contain the new column of data column - column of data, should be a list with as many elements as the Table has rows align - desired alignment for this column - "l" for left, "c" for centre and "r" for right valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom""" if len(self._rows) in (0, len(column)): <DeepExtract> try: assert align in ['l', 'c', 'r'] except AssertionError: raise Exception('Alignment %s is invalid, use l, c or r!' % align) </DeepExtract> <DeepExtract> try: assert valign in ['t', 'm', 'b', None] except AssertionError: raise Exception('Alignment %s is invalid, use t, m, b or None!' % valign) </DeepExtract> self._field_names.append(fieldname) self._align[fieldname] = align self._valign[fieldname] = valign for i in range(0, len(column)): if len(self._rows) < i + 1: self._rows.append([]) self._rows[i].append(column[i]) else: raise Exception('Column length %d does not match number of rows %d!' % (len(column), len(self._rows)))
def add_column(self, fieldname, column, align='c', valign='t'): """Add a column to the Table. Arguments: fieldname - name of the field to contain the new column of data column - column of data, should be a list with as many elements as the Table has rows align - desired alignment for this column - "l" for left, "c" for centre and "r" for right valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom""" if len(self._rows) in (0, len(column)): try: assert align in ['l', 'c', 'r'] except AssertionError: raise Exception('Alignment %s is invalid, use l, c or r!' % align) try: assert valign in ['t', 'm', 'b', None] except AssertionError: raise Exception('Alignment %s is invalid, use t, m, b or None!' % valign) self._field_names.append(fieldname) self._align[fieldname] = align self._valign[fieldname] = valign for i in range(0, len(column)): if len(self._rows) < i + 1: self._rows.append([]) self._rows[i].append(column[i]) else: raise Exception('Column length %d does not match number of rows %d!' % (len(column), len(self._rows)))
C--Compiler
positive
def test_assignment_page(self): <DeepExtract> client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client </DeepExtract> response = client.post('/course/1/assignment/1') self.assertEqual(response.status_code, 200) self.assertIn(b'Assignment #1', response.content)
def test_assignment_page(self): client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client response = client.post('/course/1/assignment/1') self.assertEqual(response.status_code, 200) self.assertIn(b'Assignment #1', response.content)
academicstoday-django
positive
def get_absolute_path_for_rc(file_path): WINE_DEFAULT_DRIVE_LETTER = 'z:' <DeepExtract> [is_relative, file_path] = strip_blender_path_prefix(file_path) if is_relative: blend_file_path = os.path.dirname(bpy.data.filepath) file_path = '{}/{}'.format(blend_file_path, file_path) file_path = os.path.abspath(file_path) </DeepExtract> if sys.platform != 'win32': file_path = '{}{}'.format(WINE_DEFAULT_DRIVE_LETTER, file_path) return file_path
def get_absolute_path_for_rc(file_path): WINE_DEFAULT_DRIVE_LETTER = 'z:' [is_relative, file_path] = strip_blender_path_prefix(file_path) if is_relative: blend_file_path = os.path.dirname(bpy.data.filepath) file_path = '{}/{}'.format(blend_file_path, file_path) file_path = os.path.abspath(file_path) if sys.platform != 'win32': file_path = '{}{}'.format(WINE_DEFAULT_DRIVE_LETTER, file_path) return file_path
BCRYExporter
positive
def describe_vpcs(connection, module): """ Describe VPCs. connection : boto3 client connection object module : AnsibleAWSModule object """ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) vpc_ids = module.params.get('vpc_ids') vpc_info = list() try: response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to describe VPCs {0}'.format(vpc_ids)) dns_support = {} dns_hostnames = {} for vpc in response['Vpcs']: error_message = 'Unable to describe VPC attribute {0} on VPC {1}' <DeepExtract> result = None try: dns_support = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], Attribute='enableDnsSupport', aws_retry=True) except is_boto3_error_code('InvalidVpcID.NotFound'): module.warn(error_message.format('enableDnsSupport', vpc['VpcId'])) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=error_message.format('enableDnsSupport', vpc['VpcId'])) dns_support = result </DeepExtract> <DeepExtract> result = None try: dns_hostnames = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], Attribute='enableDnsHostnames', aws_retry=True) except is_boto3_error_code('InvalidVpcID.NotFound'): module.warn(error_message.format('enableDnsHostnames', vpc['VpcId'])) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=error_message.format('enableDnsHostnames', vpc['VpcId'])) dns_hostnames = result </DeepExtract> if dns_support: vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') if dns_hostnames: vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') vpc['id'] = vpc['VpcId'] vpc_info.append(camel_dict_to_snake_dict(vpc)) vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) module.exit_json(vpcs=vpc_info)
def describe_vpcs(connection, module): """ Describe VPCs. connection : boto3 client connection object module : AnsibleAWSModule object """ filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) vpc_ids = module.params.get('vpc_ids') vpc_info = list() try: response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to describe VPCs {0}'.format(vpc_ids)) dns_support = {} dns_hostnames = {} for vpc in response['Vpcs']: error_message = 'Unable to describe VPC attribute {0} on VPC {1}' result = None try: dns_support = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], Attribute='enableDnsSupport', aws_retry=True) except is_boto3_error_code('InvalidVpcID.NotFound'): module.warn(error_message.format('enableDnsSupport', vpc['VpcId'])) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=error_message.format('enableDnsSupport', vpc['VpcId'])) dns_support = result result = None try: dns_hostnames = connection.describe_vpc_attribute(VpcId=vpc['VpcId'], Attribute='enableDnsHostnames', aws_retry=True) except is_boto3_error_code('InvalidVpcID.NotFound'): module.warn(error_message.format('enableDnsHostnames', vpc['VpcId'])) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=error_message.format('enableDnsHostnames', vpc['VpcId'])) dns_hostnames = result if dns_support: vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') if dns_hostnames: vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') vpc['id'] = vpc['VpcId'] vpc_info.append(camel_dict_to_snake_dict(vpc)) vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) module.exit_json(vpcs=vpc_info)
amazon.aws
positive
@classmethod def from_tuples(cls, fieldname, value): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: (filename, data, content_type) = value else: (filename, data) = value <DeepExtract> if filename: content_type = mimetypes.guess_type(filename)[0] or default content_type = default </DeepExtract> else: filename = None content_type = None data = value request_param = cls(fieldname, data, filename=filename) request_param.make_multipart(content_type=content_type) return request_param
@classmethod def from_tuples(cls, fieldname, value): """ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters. Supports constructing :class:`~urllib3.fields.RequestField` from parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', Field names and filenames must be unicode. """ if isinstance(value, tuple): if len(value) == 3: (filename, data, content_type) = value else: (filename, data) = value if filename: content_type = mimetypes.guess_type(filename)[0] or default content_type = default else: filename = None content_type = None data = value request_param = cls(fieldname, data, filename=filename) request_param.make_multipart(content_type=content_type) return request_param
Alexa-MyQGarage
positive
def update_face_annotation(request: FaceAnnotationUpdateRequest, http_request: HttpRequest) -> bool: annotation = FaceRecognitionRectangle.objects.get(pk=request.annotation_id) is_annotation_editable = object_annotation_utils.is_face_annotation_editable(request.user_id, annotation) if not is_annotation_editable: return create_user_feeback(annotation, request) else: if request.new_subject_id > 0: new_subject = Album.objects.get(pk=request.new_subject_id) annotation.subject_consensus = new_subject <DeepExtract> try: user_suggestion = FaceRecognitionUserSuggestion.objects.get(rectangle=annotation, user_id=request.user_id) except FaceRecognitionUserSuggestion.DoesNotExist: user_suggestion = None </DeepExtract> if user_suggestion is not None: user_suggestion.subject_album = new_subject user_suggestion.save() if request.user_id == annotation.user_id: <DeepExtract> user_suggestion = get_existing_user_additional_data_suggestion(proposer=AddSubjectData.get_profile(http_request), annotation_id=annotation.id) has_age_suggestion = request.new_age_suggestion and request.new_age_suggestion != AGE_NOT_SURE has_gender_suggestion = request.new_gender_suggestion is not None and request.new_gender_suggestion != GENDER_NOT_SURE if user_suggestion is None and (has_age_suggestion or has_gender_suggestion): add_additional_subject_data = AddAdditionalSubjectData(subject_rectangle_id=annotation.id, age=request.new_age_suggestion, gender=request.new_gender_suggestion) AddSubjectData.add_subject_data(add_additional_subject_data, http_request) elif user_suggestion.gender != request.new_gender_suggestion or user_suggestion.age != request.new_age_suggestion: user_suggestion.age = request.new_age_suggestion user_suggestion.gender = request.new_gender_suggestion user_suggestion.save() </DeepExtract> annotation.save() if annotation.photo.first_annotation is None: annotation.photo.first_annotation = annotation.modified annotation.photo.latest_annotation = annotation.modified annotation.photo.light_save() return True
def update_face_annotation(request: FaceAnnotationUpdateRequest, http_request: HttpRequest) -> bool: annotation = FaceRecognitionRectangle.objects.get(pk=request.annotation_id) is_annotation_editable = object_annotation_utils.is_face_annotation_editable(request.user_id, annotation) if not is_annotation_editable: return create_user_feeback(annotation, request) else: if request.new_subject_id > 0: new_subject = Album.objects.get(pk=request.new_subject_id) annotation.subject_consensus = new_subject try: user_suggestion = FaceRecognitionUserSuggestion.objects.get(rectangle=annotation, user_id=request.user_id) except FaceRecognitionUserSuggestion.DoesNotExist: user_suggestion = None if user_suggestion is not None: user_suggestion.subject_album = new_subject user_suggestion.save() if request.user_id == annotation.user_id: user_suggestion = get_existing_user_additional_data_suggestion(proposer=AddSubjectData.get_profile(http_request), annotation_id=annotation.id) has_age_suggestion = request.new_age_suggestion and request.new_age_suggestion != AGE_NOT_SURE has_gender_suggestion = request.new_gender_suggestion is not None and request.new_gender_suggestion != GENDER_NOT_SURE if user_suggestion is None and (has_age_suggestion or has_gender_suggestion): add_additional_subject_data = AddAdditionalSubjectData(subject_rectangle_id=annotation.id, age=request.new_age_suggestion, gender=request.new_gender_suggestion) AddSubjectData.add_subject_data(add_additional_subject_data, http_request) elif user_suggestion.gender != request.new_gender_suggestion or user_suggestion.age != request.new_age_suggestion: user_suggestion.age = request.new_age_suggestion user_suggestion.gender = request.new_gender_suggestion user_suggestion.save() annotation.save() if annotation.photo.first_annotation is None: annotation.photo.first_annotation = annotation.modified annotation.photo.latest_annotation = annotation.modified annotation.photo.light_save() return True
ajapaik-web
positive
def test_email_partner_no_invoice_config(self): sender_email = self.partner_with_email.email <DeepExtract> file_name = 'unknown_invoice.pdf' file_path = 'account_invoice_import/tests/pdf/%s' % file_name with file_open(file_path, 'rb') as f: pdf_file = f.read() msg_dict = {'email_from': '"My supplier" <%s>' % sender_email, 'to': self.company.invoice_import_email, 'subject': 'Invoice n°1242', 'body': 'Please find enclosed your PDF invoice', 'message_id': '<v0214040cad98743824@foo.com>', 'attachments': [self.env['mail.thread']._Attachment(file_name, pdf_file, {})]} msg_dict = msg_dict </DeepExtract> self.env['account.invoice.import'].message_new(msg_dict) move = self.env['account.move'].search([('company_id', '=', self.company.id), ('move_type', '=', 'in_invoice'), ('partner_id', '=', self.partner_with_email.id), ('state', '=', 'draft')]) self.assertEqual(len(move), 1) self.assertTrue(self.company.currency_id.is_zero(move.amount_total)) self.assertFalse(move.invoice_date) self.assertFalse(move.invoice_line_ids)
def test_email_partner_no_invoice_config(self): sender_email = self.partner_with_email.email file_name = 'unknown_invoice.pdf' file_path = 'account_invoice_import/tests/pdf/%s' % file_name with file_open(file_path, 'rb') as f: pdf_file = f.read() msg_dict = {'email_from': '"My supplier" <%s>' % sender_email, 'to': self.company.invoice_import_email, 'subject': 'Invoice n°1242', 'body': 'Please find enclosed your PDF invoice', 'message_id': '<v0214040cad98743824@foo.com>', 'attachments': [self.env['mail.thread']._Attachment(file_name, pdf_file, {})]} msg_dict = msg_dict self.env['account.invoice.import'].message_new(msg_dict) move = self.env['account.move'].search([('company_id', '=', self.company.id), ('move_type', '=', 'in_invoice'), ('partner_id', '=', self.partner_with_email.id), ('state', '=', 'draft')]) self.assertEqual(len(move), 1) self.assertTrue(self.company.currency_id.is_zero(move.amount_total)) self.assertFalse(move.invoice_date) self.assertFalse(move.invoice_line_ids)
edi
positive
def test_get_builtin_by_name(self): """Test get_func_by_name on builtin functions.""" for f in [bool, int, float, str, tuple, list, set, dict, all]: <DeepExtract> self.assertTrue(hasattr(f, '__name__'), f) res = self.get_func_by_name(f.__name__) self.check_get_func_by_name_res(f, res, exact_match) if sys.version_info >= (3, 3): res = self.get_func_by_name(f.__qualname__) self.check_get_func_by_name_res(f, res, exact_match) res = self.get_func_by_name(self.pyobject_function_str(f)) self.check_get_func_by_name_res(f, res, exact_match) </DeepExtract> for f in [object]: <DeepExtract> self.assertTrue(hasattr(f, '__name__'), f) res = self.get_func_by_name(f.__name__) self.check_get_func_by_name_res(f, res, False) if sys.version_info >= (3, 3): res = self.get_func_by_name(f.__qualname__) self.check_get_func_by_name_res(f, res, False) res = self.get_func_by_name(self.pyobject_function_str(f)) self.check_get_func_by_name_res(f, res, False) </DeepExtract>
def test_get_builtin_by_name(self): """Test get_func_by_name on builtin functions.""" for f in [bool, int, float, str, tuple, list, set, dict, all]: self.assertTrue(hasattr(f, '__name__'), f) res = self.get_func_by_name(f.__name__) self.check_get_func_by_name_res(f, res, exact_match) if sys.version_info >= (3, 3): res = self.get_func_by_name(f.__qualname__) self.check_get_func_by_name_res(f, res, exact_match) res = self.get_func_by_name(self.pyobject_function_str(f)) self.check_get_func_by_name_res(f, res, exact_match) for f in [object]: self.assertTrue(hasattr(f, '__name__'), f) res = self.get_func_by_name(f.__name__) self.check_get_func_by_name_res(f, res, False) if sys.version_info >= (3, 3): res = self.get_func_by_name(f.__qualname__) self.check_get_func_by_name_res(f, res, False) res = self.get_func_by_name(self.pyobject_function_str(f)) self.check_get_func_by_name_res(f, res, False) </DeepExtract>
DidYouMean-Python
positive
def updateAImodelSettings(self, project, settings): """ Updates the project's AI model settings. Verifies whether the specified AI and ranking model libraries exist on this setup of AIDE. Raises an exception otherwise. Also tries to verify any model options provided with the model's built-in function (if present and implemented). Returns warnings, errors, etc. about that. """ availableModels = self.getAvailableAImodels()['models'] projImmutables = self.dbConn.execute('\n SELECT annotationtype, predictiontype\n FROM aide_admin.project\n WHERE shortname = %s;\n ', (project,), 1) projImmutables = projImmutables[0] annoType = projImmutables['annotationtype'] predType = projImmutables['predictiontype'] fieldNames = [('ai_model_enabled', bool), ('ai_model_library', str), ('ai_alcriterion_library', str), ('numimages_autotrain', int), ('minnumannoperimage', int), ('maxnumimages_train', int), ('maxnumimages_inference', int), ('inference_chunk_size', int), ('max_num_concurrent_tasks', int), ('segmentation_ignore_unlabeled', bool)] (settings_new, settingsKeys_new) = parse_parameters(settings, fieldNames, absent_ok=True, escape=True, none_ok=True) addBackgroundClass = False forceDisableAImodel = False for (idx, key) in enumerate(settingsKeys_new): if key == 'ai_model_library': modelLib = settings_new[idx] if modelLib is None or len(modelLib.strip()) == 0: forceDisableAImodel = True else: if not modelLib in availableModels['prediction']: raise Exception(f'Model library "{modelLib}" is not installed in this instance of AIDE.') selectedModel = availableModels['prediction'][modelLib] validAnnoTypes = [selectedModel['annotationType']] if isinstance(selectedModel['annotationType'], str) else selectedModel['annotationType'] validPredTypes = [selectedModel['predictionType']] if isinstance(selectedModel['predictionType'], str) else selectedModel['predictionType'] if not annoType in validAnnoTypes: raise Exception(f'Model "{modelLib}" does not support annotations of type "{annoType}".') if not predType in validPredTypes: raise Exception(f'Model "{modelLib}" does not support predictions of type "{predType}".') elif key == 'ai_model_settings': continue elif key == 'ai_alcriterion_library': modelLib = settings_new[idx] if modelLib is None or len(modelLib.strip()) == 0: forceDisableAImodel = True elif not modelLib in availableModels['ranking']: raise Exception(f'Ranking library "{modelLib}" is not installed in this instance of AIDE.') elif key == 'ai_alcriterion_settings': pass elif key == 'segmentation_ignore_unlabeled': if annoType == 'segmentationMasks' and settings_new[idx] is False: addBackgroundClass = True if forceDisableAImodel: flagFound = False for (idx, key) in enumerate(settingsKeys_new): if key == 'ai_model_enabled': settings_new[idx] = False flagFound = True break if not flagFound: settings_new.append(False) settingsKeys_new.append('ai_model_enabled') settings_new.append(project) queryStr = sql.SQL('UPDATE aide_admin.project\n SET\n {}\n WHERE shortname = %s;\n ').format(sql.SQL(',').join([sql.SQL('{} = %s'.format(item)) for item in settingsKeys_new])) self.dbConn.execute(queryStr, tuple(settings_new), None) if addBackgroundClass: labelClasses = self.dbConn.execute(sql.SQL('\n SELECT * FROM {id_lc}\n ').format(id_lc=sql.Identifier(project, 'labelclass')), None, 'all') hasBackground = False for lc in labelClasses: if lc['idx'] == 0: hasBackground = True break if not hasBackground: lcNames = set([lc['name'] for lc in labelClasses]) bgName = 'background' counter = 0 while bgName in lcNames: bgName = f'background ({counter})' counter += 1 self.dbConn.execute(sql.SQL('\n INSERT INTO {id_lc} (name, idx, hidden)\n VALUES (%s, 0, true)\n ').format(id_lc=sql.Identifier(project, 'labelclass')), (bgName,), None) response = {'status': 0} if 'ai_model_settings' in settings: <DeepExtract> optionsVerification = self.verifyAImodelOptions(project, settings['ai_model_settings']) if optionsVerification['valid']: if isinstance(settings['ai_model_settings'], dict): settings['ai_model_settings'] = json.dumps(settings['ai_model_settings']) self.dbConn.execute('\n UPDATE aide_admin.project\n SET ai_model_settings = %s\n WHERE shortname = %s;\n ', (settings['ai_model_settings'], project), None) else: optionsVerification['errors'].append('Model options have not passed verification and where therefore not saved.') aiModelOptionsStatus = optionsVerification </DeepExtract> response['ai_model_settings_status'] = aiModelOptionsStatus return response
def updateAImodelSettings(self, project, settings): """ Updates the project's AI model settings. Verifies whether the specified AI and ranking model libraries exist on this setup of AIDE. Raises an exception otherwise. Also tries to verify any model options provided with the model's built-in function (if present and implemented). Returns warnings, errors, etc. about that. """ availableModels = self.getAvailableAImodels()['models'] projImmutables = self.dbConn.execute('\n SELECT annotationtype, predictiontype\n FROM aide_admin.project\n WHERE shortname = %s;\n ', (project,), 1) projImmutables = projImmutables[0] annoType = projImmutables['annotationtype'] predType = projImmutables['predictiontype'] fieldNames = [('ai_model_enabled', bool), ('ai_model_library', str), ('ai_alcriterion_library', str), ('numimages_autotrain', int), ('minnumannoperimage', int), ('maxnumimages_train', int), ('maxnumimages_inference', int), ('inference_chunk_size', int), ('max_num_concurrent_tasks', int), ('segmentation_ignore_unlabeled', bool)] (settings_new, settingsKeys_new) = parse_parameters(settings, fieldNames, absent_ok=True, escape=True, none_ok=True) addBackgroundClass = False forceDisableAImodel = False for (idx, key) in enumerate(settingsKeys_new): if key == 'ai_model_library': modelLib = settings_new[idx] if modelLib is None or len(modelLib.strip()) == 0: forceDisableAImodel = True else: if not modelLib in availableModels['prediction']: raise Exception(f'Model library "{modelLib}" is not installed in this instance of AIDE.') selectedModel = availableModels['prediction'][modelLib] validAnnoTypes = [selectedModel['annotationType']] if isinstance(selectedModel['annotationType'], str) else selectedModel['annotationType'] validPredTypes = [selectedModel['predictionType']] if isinstance(selectedModel['predictionType'], str) else selectedModel['predictionType'] if not annoType in validAnnoTypes: raise Exception(f'Model "{modelLib}" does not support annotations of type "{annoType}".') if not predType in validPredTypes: raise Exception(f'Model "{modelLib}" does not support predictions of type "{predType}".') elif key == 'ai_model_settings': continue elif key == 'ai_alcriterion_library': modelLib = settings_new[idx] if modelLib is None or len(modelLib.strip()) == 0: forceDisableAImodel = True elif not modelLib in availableModels['ranking']: raise Exception(f'Ranking library "{modelLib}" is not installed in this instance of AIDE.') elif key == 'ai_alcriterion_settings': pass elif key == 'segmentation_ignore_unlabeled': if annoType == 'segmentationMasks' and settings_new[idx] is False: addBackgroundClass = True if forceDisableAImodel: flagFound = False for (idx, key) in enumerate(settingsKeys_new): if key == 'ai_model_enabled': settings_new[idx] = False flagFound = True break if not flagFound: settings_new.append(False) settingsKeys_new.append('ai_model_enabled') settings_new.append(project) queryStr = sql.SQL('UPDATE aide_admin.project\n SET\n {}\n WHERE shortname = %s;\n ').format(sql.SQL(',').join([sql.SQL('{} = %s'.format(item)) for item in settingsKeys_new])) self.dbConn.execute(queryStr, tuple(settings_new), None) if addBackgroundClass: labelClasses = self.dbConn.execute(sql.SQL('\n SELECT * FROM {id_lc}\n ').format(id_lc=sql.Identifier(project, 'labelclass')), None, 'all') hasBackground = False for lc in labelClasses: if lc['idx'] == 0: hasBackground = True break if not hasBackground: lcNames = set([lc['name'] for lc in labelClasses]) bgName = 'background' counter = 0 while bgName in lcNames: bgName = f'background ({counter})' counter += 1 self.dbConn.execute(sql.SQL('\n INSERT INTO {id_lc} (name, idx, hidden)\n VALUES (%s, 0, true)\n ').format(id_lc=sql.Identifier(project, 'labelclass')), (bgName,), None) response = {'status': 0} if 'ai_model_settings' in settings: optionsVerification = self.verifyAImodelOptions(project, settings['ai_model_settings']) if optionsVerification['valid']: if isinstance(settings['ai_model_settings'], dict): settings['ai_model_settings'] = json.dumps(settings['ai_model_settings']) self.dbConn.execute('\n UPDATE aide_admin.project\n SET ai_model_settings = %s\n WHERE shortname = %s;\n ', (settings['ai_model_settings'], project), None) else: optionsVerification['errors'].append('Model options have not passed verification and where therefore not saved.') aiModelOptionsStatus = optionsVerification response['ai_model_settings_status'] = aiModelOptionsStatus return response
aerial_wildlife_detection
positive
def find_drop_in_files(self, unit): """ search for some.service.d/extra.conf files """ result = {} basename_d = unit + '.d' for folder in self.sysd_folders(): if not folder: continue <DeepExtract> if not self._root: folder = folder if not folder: folder = folder if is_good_root(self._root) and folder.startswith(self._root): folder = folder while folder.startswith(os.path.sep): folder = folder[1:] folder = os.path.join(self._root, folder) </DeepExtract> <DeepExtract> if not folder: override_d = basename_d if not basename_d: override_d = basename_d if is_good_root(folder) and basename_d.startswith(folder): override_d = basename_d while basename_d.startswith(os.path.sep): basename_d = basename_d[1:] override_d = os.path.join(folder, basename_d) </DeepExtract> if not os.path.isdir(override_d): continue for name in os.listdir(override_d): path = os.path.join(override_d, name) if os.path.isdir(path): continue if not path.endswith('.conf'): continue if name not in result: result[name] = path return result
def find_drop_in_files(self, unit): """ search for some.service.d/extra.conf files """ result = {} basename_d = unit + '.d' for folder in self.sysd_folders(): if not folder: continue if not self._root: folder = folder if not folder: folder = folder if is_good_root(self._root) and folder.startswith(self._root): folder = folder while folder.startswith(os.path.sep): folder = folder[1:] folder = os.path.join(self._root, folder) if not folder: override_d = basename_d if not basename_d: override_d = basename_d if is_good_root(folder) and basename_d.startswith(folder): override_d = basename_d while basename_d.startswith(os.path.sep): basename_d = basename_d[1:] override_d = os.path.join(folder, basename_d) if not os.path.isdir(override_d): continue for name in os.listdir(override_d): path = os.path.join(override_d, name) if os.path.isdir(path): continue if not path.endswith('.conf'): continue if name not in result: result[name] = path return result
docker-systemctl-images
positive
def run(self, argv, dependencies): self.parse_args(argv) cocos.Logging.info(MultiLanguage.get_string('DEPLOY_INFO_MODE_FMT', self._mode)) <DeepExtract> if not self._platforms.is_ios_active(): return compile_dep = dependencies['compile'] self._iosapp_path = compile_dep._iosapp_path self._use_sdk = compile_dep.use_sdk </DeepExtract> <DeepExtract> if not self._platforms.is_mac_active(): return compile_dep = dependencies['compile'] self._macapp_path = compile_dep._macapp_path self.target_name = compile_dep.target_name </DeepExtract> <DeepExtract> if not self._platforms.is_android_active(): return cocos.Logging.info(MultiLanguage.get_string('DEPLOY_INFO_INSTALLING_APK')) compile_dep = dependencies['compile'] self.package = compile_dep.android_package self.activity = compile_dep.android_activity apk_path = compile_dep.apk_path sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT') adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb')) if not self._no_uninstall: if cocos.app_is_installed(adb_path, self.package): adb_uninstall = '%s uninstall %s' % (adb_path, self.package) self._run_cmd(adb_uninstall) adb_install = '%s install -r "%s"' % (adb_path, apk_path) self._run_cmd(adb_install) </DeepExtract> <DeepExtract> if not self._platforms.is_web_active(): return compile_dep = dependencies['compile'] self.sub_url = compile_dep.sub_url self.run_root = compile_dep.run_root </DeepExtract> <DeepExtract> if not self._platforms.is_win32_active(): return compile_dep = dependencies['compile'] self.run_root = compile_dep.run_root self.project_name = compile_dep.project_name </DeepExtract> <DeepExtract> if not self._platforms.is_linux_active(): return compile_dep = dependencies['compile'] self.run_root = compile_dep.run_root self.project_name = compile_dep.project_name </DeepExtract> <DeepExtract> if not self._platforms.is_tizen_active(): return tizen_proj_path = self._platforms.project_path() from xml.dom import minidom doc = minidom.parse(os.path.join(tizen_proj_path, 'tizen-manifest.xml')) self.tizen_packageid = doc.getElementsByTagName('manifest')[0].getAttribute('package') tizen_studio_path = cocos.check_environment_variable('TIZEN_STUDIO_HOME') tizen_cmd_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(tizen_studio_path, 'tools', 'ide', 'bin', 'tizen')) if not self._no_uninstall: try: uninstall_cmd = '%s uninstall -p %s' % (tizen_cmd_path, self.tizen_packageid) self._run_cmd(uninstall_cmd) except Exception: pass compile_dep = dependencies['compile'] pkg_path = compile_dep.tizen_pkg_path (pkg_dir, pkg_file_name) = os.path.split(pkg_path) install_cmd = '%s install -- "%s" -n "%s"' % (tizen_cmd_path, pkg_dir, pkg_file_name) self._run_cmd(install_cmd) </DeepExtract>
def run(self, argv, dependencies): self.parse_args(argv) cocos.Logging.info(MultiLanguage.get_string('DEPLOY_INFO_MODE_FMT', self._mode)) if not self._platforms.is_ios_active(): return compile_dep = dependencies['compile'] self._iosapp_path = compile_dep._iosapp_path self._use_sdk = compile_dep.use_sdk if not self._platforms.is_mac_active(): return compile_dep = dependencies['compile'] self._macapp_path = compile_dep._macapp_path self.target_name = compile_dep.target_name if not self._platforms.is_android_active(): return cocos.Logging.info(MultiLanguage.get_string('DEPLOY_INFO_INSTALLING_APK')) compile_dep = dependencies['compile'] self.package = compile_dep.android_package self.activity = compile_dep.android_activity apk_path = compile_dep.apk_path sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT') adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb')) if not self._no_uninstall: if cocos.app_is_installed(adb_path, self.package): adb_uninstall = '%s uninstall %s' % (adb_path, self.package) self._run_cmd(adb_uninstall) adb_install = '%s install -r "%s"' % (adb_path, apk_path) self._run_cmd(adb_install) if not self._platforms.is_web_active(): return compile_dep = dependencies['compile'] self.sub_url = compile_dep.sub_url self.run_root = compile_dep.run_root if not self._platforms.is_win32_active(): return compile_dep = dependencies['compile'] self.run_root = compile_dep.run_root self.project_name = compile_dep.project_name if not self._platforms.is_linux_active(): return compile_dep = dependencies['compile'] self.run_root = compile_dep.run_root self.project_name = compile_dep.project_name if not self._platforms.is_tizen_active(): return tizen_proj_path = self._platforms.project_path() from xml.dom import minidom doc = minidom.parse(os.path.join(tizen_proj_path, 'tizen-manifest.xml')) self.tizen_packageid = doc.getElementsByTagName('manifest')[0].getAttribute('package') tizen_studio_path = cocos.check_environment_variable('TIZEN_STUDIO_HOME') tizen_cmd_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(tizen_studio_path, 'tools', 'ide', 'bin', 'tizen')) if not self._no_uninstall: try: uninstall_cmd = '%s uninstall -p %s' % (tizen_cmd_path, self.tizen_packageid) self._run_cmd(uninstall_cmd) except Exception: pass compile_dep = dependencies['compile'] pkg_path = compile_dep.tizen_pkg_path (pkg_dir, pkg_file_name) = os.path.split(pkg_path) install_cmd = '%s install -- "%s" -n "%s"' % (tizen_cmd_path, pkg_dir, pkg_file_name) self._run_cmd(install_cmd) </DeepExtract>
cocos2d-console
positive
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for (i, label) in enumerate(label_list)} premise_2_tokenzed = {} hypothesis_2_tokenzed = {} list_2_tokenizedID = {} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(examples))) tokens_a = premise_2_tokenzed.get(example.text_a) if tokens_a is None: tokens_a = tokenizer.tokenize(example.text_a) premise_2_tokenzed[example.text_a] = tokens_a tokens_b = premise_2_tokenzed.get(example.text_b) if tokens_b is None: tokens_b = tokenizer.tokenize(example.text_b) hypothesis_2_tokenzed[example.text_b] = tokens_b <DeepExtract> while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_seq_length - 3: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() </DeepExtract> tokens_A = ['[CLS]'] + tokens_a + ['[SEP]'] segment_ids_A = [0] * len(tokens_A) tokens_B = tokens_b + ['[SEP]'] segment_ids_B = [1] * (len(tokens_b) + 1) tokens = tokens_A + tokens_B segment_ids = segment_ids_A + segment_ids_B input_ids_A = list_2_tokenizedID.get(' '.join(tokens_A)) if input_ids_A is None: input_ids_A = tokenizer.convert_tokens_to_ids(tokens_A) list_2_tokenizedID[' '.join(tokens_A)] = input_ids_A input_ids_B = list_2_tokenizedID.get(' '.join(tokens_B)) if input_ids_B is None: input_ids_B = tokenizer.convert_tokens_to_ids(tokens_B) list_2_tokenizedID[' '.join(tokens_B)] = input_ids_B input_ids = input_ids_A + input_ids_B input_mask = [1] * len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == 'classification': label_id = label_map[example.label] elif output_mode == 'regression': label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info('*** Example ***') logger.info('guid: %s' % example.guid) logger.info('tokens: %s' % ' '.join([str(x) for x in tokens])) logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])) logger.info('label: %s (id = %d)' % (example.label, label_id)) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for (i, label) in enumerate(label_list)} premise_2_tokenzed = {} hypothesis_2_tokenzed = {} list_2_tokenizedID = {} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(examples))) tokens_a = premise_2_tokenzed.get(example.text_a) if tokens_a is None: tokens_a = tokenizer.tokenize(example.text_a) premise_2_tokenzed[example.text_a] = tokens_a tokens_b = premise_2_tokenzed.get(example.text_b) if tokens_b is None: tokens_b = tokenizer.tokenize(example.text_b) hypothesis_2_tokenzed[example.text_b] = tokens_b while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_seq_length - 3: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() tokens_A = ['[CLS]'] + tokens_a + ['[SEP]'] segment_ids_A = [0] * len(tokens_A) tokens_B = tokens_b + ['[SEP]'] segment_ids_B = [1] * (len(tokens_b) + 1) tokens = tokens_A + tokens_B segment_ids = segment_ids_A + segment_ids_B input_ids_A = list_2_tokenizedID.get(' '.join(tokens_A)) if input_ids_A is None: input_ids_A = tokenizer.convert_tokens_to_ids(tokens_A) list_2_tokenizedID[' '.join(tokens_A)] = input_ids_A input_ids_B = list_2_tokenizedID.get(' '.join(tokens_B)) if input_ids_B is None: input_ids_B = tokenizer.convert_tokens_to_ids(tokens_B) list_2_tokenizedID[' '.join(tokens_B)] = input_ids_B input_ids = input_ids_A + input_ids_B input_mask = [1] * len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == 'classification': label_id = label_map[example.label] elif output_mode == 'regression': label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info('*** Example ***') logger.info('guid: %s' % example.guid) logger.info('tokens: %s' % ' '.join([str(x) for x in tokens])) logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])) logger.info('label: %s (id = %d)' % (example.label, label_id)) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features
BenchmarkingZeroShot
positive
def RepeatedHotkeyTest(windows): """Return the repeated hotkey errors""" <DeepExtract> hotkeyControls = {} allChars = '' for win in windows: if not ImplementsHotkey(win): continue (pos, char) = GetHotkey(win.WindowText()) if not char: continue hotkeyControls.setdefault(char.lower(), []).append(win) allChars += win.WindowText().lower() allChars = set(allChars) hotkeys = set(hotkeyControls.keys()) (hotkeyControls, allChars, hotkeys) = (hotkeyControls, allChars, hotkeys) </DeepExtract> dlgAvailable = allChars.difference(hotkeys) dlgAvailable.difference_update(set('-& _')) bugs = [] for (char, controls) in list(hotkeyControls.items()): if len(controls) > 1: ctrlsAvailableChars = '' for ctrl in controls: controlChars = '' controlChars = set(ctrl.WindowText().lower()) controlAvailableChars = controlChars.intersection(dlgAvailable) controlAvailableChars = '<%s>' % _SetAsString(controlAvailableChars) ctrlsAvailableChars += controlAvailableChars refCtrls = [ctrl.ref for ctrl in controls if ctrl.ref] <DeepExtract> hotkeyControls = {} allChars = '' for win in refCtrls: if not ImplementsHotkey(win): continue (pos, char) = GetHotkey(win.WindowText()) if not char: continue hotkeyControls.setdefault(char.lower(), []).append(win) allChars += win.WindowText().lower() allChars = set(allChars) hotkeys = set(hotkeyControls.keys()) (refHotkeyControls, refAllChars, refHotkeys) = (hotkeyControls, allChars, hotkeys) </DeepExtract> isInRef = -1 if len(refHotkeys) > 1: isInRef = 1 else: isInRef = 0 bugs.append((controls, {'RepeatedHotkey': char, 'CharsUsedInDialog': _SetAsString(hotkeys), 'AllCharsInDialog': _SetAsString(allChars), 'AvailableInControls': ctrlsAvailableChars}, testname, isInRef)) return bugs
def RepeatedHotkeyTest(windows): """Return the repeated hotkey errors""" hotkeyControls = {} allChars = '' for win in windows: if not ImplementsHotkey(win): continue (pos, char) = GetHotkey(win.WindowText()) if not char: continue hotkeyControls.setdefault(char.lower(), []).append(win) allChars += win.WindowText().lower() allChars = set(allChars) hotkeys = set(hotkeyControls.keys()) (hotkeyControls, allChars, hotkeys) = (hotkeyControls, allChars, hotkeys) dlgAvailable = allChars.difference(hotkeys) dlgAvailable.difference_update(set('-& _')) bugs = [] for (char, controls) in list(hotkeyControls.items()): if len(controls) > 1: ctrlsAvailableChars = '' for ctrl in controls: controlChars = '' controlChars = set(ctrl.WindowText().lower()) controlAvailableChars = controlChars.intersection(dlgAvailable) controlAvailableChars = '<%s>' % _SetAsString(controlAvailableChars) ctrlsAvailableChars += controlAvailableChars refCtrls = [ctrl.ref for ctrl in controls if ctrl.ref] hotkeyControls = {} allChars = '' for win in refCtrls: if not ImplementsHotkey(win): continue (pos, char) = GetHotkey(win.WindowText()) if not char: continue hotkeyControls.setdefault(char.lower(), []).append(win) allChars += win.WindowText().lower() allChars = set(allChars) hotkeys = set(hotkeyControls.keys()) (refHotkeyControls, refAllChars, refHotkeys) = (hotkeyControls, allChars, hotkeys) isInRef = -1 if len(refHotkeys) > 1: isInRef = 1 else: isInRef = 0 bugs.append((controls, {'RepeatedHotkey': char, 'CharsUsedInDialog': _SetAsString(hotkeys), 'AllCharsInDialog': _SetAsString(allChars), 'AvailableInControls': ctrlsAvailableChars}, testname, isInRef)) return bugs
BrowserRefresh-Sublime
positive
def cylindricalFaceSelected(selection): if len(selection.SubElementNames) == 1: subElement = selection.SubElementNames[0] if subElement.startswith('Face'): <DeepExtract> assert subElement.startswith('Face') ind = int(subElement[4:]) - 1 face = selection.Object.Shape.Faces[ind] </DeepExtract> if hasattr(face.Surface, 'Radius'): return True elif str(face.Surface).startswith('<SurfaceOfRevolution'): return True else: <DeepExtract> uv = sum([[(u, v) for u in numpy.linspace(0, 1, n_u)] for v in numpy.linspace(0, 1, n_v)], []) P = [numpy.array(face.Surface.value(u, v)) for (u, v) in uv] N = [numpy.cross(*face.Surface.tangent(u, v)) for (u, v) in uv] intersections = [] for i in range(len(N) - 1): for j in range(i + 1, len(N)): if 1 - abs(numpy.dot(N[i], N[j])) < 10 ** (-6): continue (p1_x, p1_y, p1_z) = P[i] (u1_x, u1_y, u1_z) = N[i] (p2_x, p2_y, p2_z) = P[j] (u2_x, u2_y, u2_z) = N[j] t1_t1_coef = u1_x ** 2 + u1_y ** 2 + u1_z ** 2 t1_t2_coef = -2 * u1_x * u2_x - 2 * u1_y * u2_y - 2 * u1_z * u2_z t2_t2_coef = u2_x ** 2 + u2_y ** 2 + u2_z ** 2 t1_coef = 2 * p1_x * u1_x + 2 * p1_y * u1_y + 2 * p1_z * u1_z - 2 * p2_x * u1_x - 2 * p2_y * u1_y - 2 * p2_z * u1_z t2_coef = -2 * p1_x * u2_x - 2 * p1_y * u2_y - 2 * p1_z * u2_z + 2 * p2_x * u2_x + 2 * p2_y * u2_y + 2 * p2_z * u2_z A = numpy.array([[2 * t1_t1_coef, t1_t2_coef], [t1_t2_coef, 2 * t2_t2_coef]]) b = numpy.array([t1_coef, t2_coef]) try: (t1, t2) = numpy.linalg.solve(A, -b) except numpy.linalg.LinAlgError: continue pos_t1 = P[i] + numpy.array(N[i]) * t1 pos_t2 = P[j] + N[j] * t2 intersections.append(pos_t1) intersections.append(pos_t2) if len(intersections) < 2: error = numpy.inf (axis, center, error) = (None, None, error) else: X = numpy.array(intersections) centroid = numpy.mean(X, axis=0) M = numpy.array([i - centroid for i in intersections]) A = numpy.dot(M.transpose(), M) (U, s, V) = numpy.linalg.svd(A) axis_pos = centroid axis_dir = V[0] error = s[1] (axis, center, error) = (numpyVecToFC(axis_dir), numpyVecToFC(axis_pos), error) </DeepExtract> error_normalized = error / face.BoundBox.DiagonalLength if error_normalized < 10 ** (-6): return True return False
def cylindricalFaceSelected(selection): if len(selection.SubElementNames) == 1: subElement = selection.SubElementNames[0] if subElement.startswith('Face'): assert subElement.startswith('Face') ind = int(subElement[4:]) - 1 face = selection.Object.Shape.Faces[ind] if hasattr(face.Surface, 'Radius'): return True elif str(face.Surface).startswith('<SurfaceOfRevolution'): return True else: uv = sum([[(u, v) for u in numpy.linspace(0, 1, n_u)] for v in numpy.linspace(0, 1, n_v)], []) P = [numpy.array(face.Surface.value(u, v)) for (u, v) in uv] N = [numpy.cross(*face.Surface.tangent(u, v)) for (u, v) in uv] intersections = [] for i in range(len(N) - 1): for j in range(i + 1, len(N)): if 1 - abs(numpy.dot(N[i], N[j])) < 10 ** (-6): continue (p1_x, p1_y, p1_z) = P[i] (u1_x, u1_y, u1_z) = N[i] (p2_x, p2_y, p2_z) = P[j] (u2_x, u2_y, u2_z) = N[j] t1_t1_coef = u1_x ** 2 + u1_y ** 2 + u1_z ** 2 t1_t2_coef = -2 * u1_x * u2_x - 2 * u1_y * u2_y - 2 * u1_z * u2_z t2_t2_coef = u2_x ** 2 + u2_y ** 2 + u2_z ** 2 t1_coef = 2 * p1_x * u1_x + 2 * p1_y * u1_y + 2 * p1_z * u1_z - 2 * p2_x * u1_x - 2 * p2_y * u1_y - 2 * p2_z * u1_z t2_coef = -2 * p1_x * u2_x - 2 * p1_y * u2_y - 2 * p1_z * u2_z + 2 * p2_x * u2_x + 2 * p2_y * u2_y + 2 * p2_z * u2_z A = numpy.array([[2 * t1_t1_coef, t1_t2_coef], [t1_t2_coef, 2 * t2_t2_coef]]) b = numpy.array([t1_coef, t2_coef]) try: (t1, t2) = numpy.linalg.solve(A, -b) except numpy.linalg.LinAlgError: continue pos_t1 = P[i] + numpy.array(N[i]) * t1 pos_t2 = P[j] + N[j] * t2 intersections.append(pos_t1) intersections.append(pos_t2) if len(intersections) < 2: error = numpy.inf (axis, center, error) = (None, None, error) else: X = numpy.array(intersections) centroid = numpy.mean(X, axis=0) M = numpy.array([i - centroid for i in intersections]) A = numpy.dot(M.transpose(), M) (U, s, V) = numpy.linalg.svd(A) axis_pos = centroid axis_dir = V[0] error = s[1] (axis, center, error) = (numpyVecToFC(axis_dir), numpyVecToFC(axis_pos), error) error_normalized = error / face.BoundBox.DiagonalLength if error_normalized < 10 ** (-6): return True return False
A2plus
positive
def test_execute_add_control_id_smt(tmp_path: pathlib.Path) -> None: """Test execute add control mapping smt.""" <DeepExtract> _test_init(tmp_path) (config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config') </DeepExtract> section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json' <DeepExtract> rows = [] csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv') with open(csv_path, 'r', newline='') as f: csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL) for row in csv_reader: rows.append(row) rows = rows </DeepExtract> assert rows[2][10] == 'sc-7_smt.a sc-7_smt.b sc-7.3 sc-7.4_smt.a sc-7.5 ia-3' rows[2][10] = rows[2][10] + ' ld-0_smt.a' with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader: mock_csv_reader.return_value = rows tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section) retval = tgt.execute() assert retval == TaskOutcome.SUCCESS fp = pathlib.Path(tmp_path) / 'component-definition.json' cd = ComponentDefinition.oscal_read(fp) component = cd.components[0] control_implementation = component.control_implementations[0] implemented_requirements = control_implementation.implemented_requirements assert len(implemented_requirements) == 22 assert implemented_requirements[21].control_id == 'ld-0' assert len(implemented_requirements[21].statements) == 1 statement = implemented_requirements[21].statements[0] assert statement.statement_id == 'ld-0_smt.a' assert len(statement.props) == 1 assert statement.props[0].name == 'Rule_Id' assert statement.props[0].value == 'account_owner_authorized_ip_range_configured'
def test_execute_add_control_id_smt(tmp_path: pathlib.Path) -> None: """Test execute add control mapping smt.""" _test_init(tmp_path) (config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config') section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json' rows = [] csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv') with open(csv_path, 'r', newline='') as f: csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL) for row in csv_reader: rows.append(row) rows = rows assert rows[2][10] == 'sc-7_smt.a sc-7_smt.b sc-7.3 sc-7.4_smt.a sc-7.5 ia-3' rows[2][10] = rows[2][10] + ' ld-0_smt.a' with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader: mock_csv_reader.return_value = rows tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section) retval = tgt.execute() assert retval == TaskOutcome.SUCCESS fp = pathlib.Path(tmp_path) / 'component-definition.json' cd = ComponentDefinition.oscal_read(fp) component = cd.components[0] control_implementation = component.control_implementations[0] implemented_requirements = control_implementation.implemented_requirements assert len(implemented_requirements) == 22 assert implemented_requirements[21].control_id == 'ld-0' assert len(implemented_requirements[21].statements) == 1 statement = implemented_requirements[21].statements[0] assert statement.statement_id == 'ld-0_smt.a' assert len(statement.props) == 1 assert statement.props[0].name == 'Rule_Id' assert statement.props[0].value == 'account_owner_authorized_ip_range_configured'
compliance-trestle
positive
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Puts a secret in a scope. "write" is an alias for "put".') @click.option('--scope', required=True, type=SecretScopeClickType(), help=SecretScopeClickType.help) @click.option('--key', required=True, type=SecretKeyClickType(), help=SecretKeyClickType.help) @click.option('--string-value', default=None, help='Read value from string and stored in UTF-8 (MB4) form') @click.option('--binary-file', default=None, type=click.Path(exists=True, readable=True), help='Read value from binary-file and stored as bytes.') @debug_option @profile_option @eat_exceptions @provide_api_client def put_secret(api_client, scope, key, string_value, binary_file): """ Puts a secret in the provided scope with the given name. Overwrites any existing value if the name exists. You should specify at most one option in "string-value" and "binary-file". If "string-value", the argument will be stored in UTF-8 (MB4) form. If "binary-file", the argument should be a path to file. File content will be read as secret value and stored as bytes. If none of "string-value" and "binary-file" specified, an editor will be opened for inputting secret value. The value will be stored in UTF-8 (MB4) form. "databricks secrets write" is an alias for "databricks secrets put", and will be deprecated in a future release. """ <DeepExtract> if string_value and binary_file: error_and_quit('At most one of {} should be provided.'.format(['string-value', 'binary-file'])) elif string_value is None and binary_file is None: prompt = '# Do not edit the above line. Everything below it will be ignored.\n' + '# Please input your secret value above the line. Text will be stored in\n' + '# UTF-8 (MB4) form and any trailing new line will be stripped.\n' + '# Exit without saving will abort writing secret.' content = click.edit('\n\n' + DASH_MARKER + prompt) if content is None: error_and_quit('No changes made, write secret aborted. Please follow the instruction to input secret value.') elif DASH_MARKER not in content: error_and_quit('Please DO NOT edit the line with dashes. Write secret aborted.') (string_param, bytes_param) = (content.split(DASH_MARKER, 1)[0].rstrip('\n'), None) elif string_value is not None: (string_param, bytes_param) = (string_value, None) elif binary_file is not None: with open(binary_file, 'rb') as f: binary_content = f.read() base64_bytes = base64.b64encode(binary_content) base64_str = base64_bytes.decode('utf-8') (string_param, bytes_param) = (None, base64_str) </DeepExtract> SecretApi(api_client).put_secret(scope, key, string_param, bytes_param)
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Puts a secret in a scope. "write" is an alias for "put".') @click.option('--scope', required=True, type=SecretScopeClickType(), help=SecretScopeClickType.help) @click.option('--key', required=True, type=SecretKeyClickType(), help=SecretKeyClickType.help) @click.option('--string-value', default=None, help='Read value from string and stored in UTF-8 (MB4) form') @click.option('--binary-file', default=None, type=click.Path(exists=True, readable=True), help='Read value from binary-file and stored as bytes.') @debug_option @profile_option @eat_exceptions @provide_api_client def put_secret(api_client, scope, key, string_value, binary_file): """ Puts a secret in the provided scope with the given name. Overwrites any existing value if the name exists. You should specify at most one option in "string-value" and "binary-file". If "string-value", the argument will be stored in UTF-8 (MB4) form. If "binary-file", the argument should be a path to file. File content will be read as secret value and stored as bytes. If none of "string-value" and "binary-file" specified, an editor will be opened for inputting secret value. The value will be stored in UTF-8 (MB4) form. "databricks secrets write" is an alias for "databricks secrets put", and will be deprecated in a future release. """ if string_value and binary_file: error_and_quit('At most one of {} should be provided.'.format(['string-value', 'binary-file'])) elif string_value is None and binary_file is None: prompt = '# Do not edit the above line. Everything below it will be ignored.\n' + '# Please input your secret value above the line. Text will be stored in\n' + '# UTF-8 (MB4) form and any trailing new line will be stripped.\n' + '# Exit without saving will abort writing secret.' content = click.edit('\n\n' + DASH_MARKER + prompt) if content is None: error_and_quit('No changes made, write secret aborted. Please follow the instruction to input secret value.') elif DASH_MARKER not in content: error_and_quit('Please DO NOT edit the line with dashes. Write secret aborted.') (string_param, bytes_param) = (content.split(DASH_MARKER, 1)[0].rstrip('\n'), None) elif string_value is not None: (string_param, bytes_param) = (string_value, None) elif binary_file is not None: with open(binary_file, 'rb') as f: binary_content = f.read() base64_bytes = base64.b64encode(binary_content) base64_str = base64_bytes.decode('utf-8') (string_param, bytes_param) = (None, base64_str) SecretApi(api_client).put_secret(scope, key, string_param, bytes_param)
databricks-cli
positive
def __delete(node: Node, key: CT) -> Node: if not node: return None if key < node.key: <DeepExtract> if not node.left: node.left.left = None if key < node.left.key: node.left.left = self.__delete(node.left.left, key) elif key > node.left.key: node.left.right = self.__delete(node.left.right, key) else: if not node.left.left or not node.left.right: node.left.left = node.left.left or node.left.right tmp = node.left node.left = self.__min_val(tmp.right) node.left.right = self.__delete_min(tmp.right) node.left.left = tmp.left node.left.size = self.node_size(node.left.left) + self.node_size(node.left.right) + 1 node.left.left = node.left </DeepExtract> elif key > node.key: <DeepExtract> if not node.right: node.right.right = None if key < node.right.key: node.right.left = self.__delete(node.right.left, key) elif key > node.right.key: node.right.right = self.__delete(node.right.right, key) else: if not node.right.left or not node.right.right: node.right.right = node.right.left or node.right.right tmp = node.right node.right = self.__min_val(tmp.right) node.right.right = self.__delete_min(tmp.right) node.right.left = tmp.left node.right.size = self.node_size(node.right.left) + self.node_size(node.right.right) + 1 node.right.right = node.right </DeepExtract> else: if not node.left or not node.right: return node.left or node.right tmp = node node = self.__min_val(tmp.right) <DeepExtract> if not tmp.right.left: tmp.right.right = tmp.right.right tmp.right.left = self.__delete_min(tmp.right.left) tmp.right.size = self.node_size(tmp.right.left) + self.node_size(tmp.right.right) + 1 tmp.right.right = tmp.right </DeepExtract> node.left = tmp.left node.size = self.node_size(node.left) + self.node_size(node.right) + 1 return node
def __delete(node: Node, key: CT) -> Node: if not node: return None if key < node.key: if not node.left: node.left.left = None if key < node.left.key: node.left.left = self.__delete(node.left.left, key) elif key > node.left.key: node.left.right = self.__delete(node.left.right, key) else: if not node.left.left or not node.left.right: node.left.left = node.left.left or node.left.right tmp = node.left node.left = self.__min_val(tmp.right) node.left.right = self.__delete_min(tmp.right) node.left.left = tmp.left node.left.size = self.node_size(node.left.left) + self.node_size(node.left.right) + 1 node.left.left = node.left elif key > node.key: if not node.right: node.right.right = None if key < node.right.key: node.right.left = self.__delete(node.right.left, key) elif key > node.right.key: node.right.right = self.__delete(node.right.right, key) else: if not node.right.left or not node.right.right: node.right.right = node.right.left or node.right.right tmp = node.right node.right = self.__min_val(tmp.right) node.right.right = self.__delete_min(tmp.right) node.right.left = tmp.left node.right.size = self.node_size(node.right.left) + self.node_size(node.right.right) + 1 node.right.right = node.right else: if not node.left or not node.right: return node.left or node.right tmp = node node = self.__min_val(tmp.right) if not tmp.right.left: tmp.right.right = tmp.right.right tmp.right.left = self.__delete_min(tmp.right.left) tmp.right.size = self.node_size(tmp.right.left) + self.node_size(tmp.right.right) + 1 tmp.right.right = tmp.right node.left = tmp.left node.size = self.node_size(node.left) + self.node_size(node.right) + 1 return node
algorithms-sedgewick-python
positive
def point_estimate(df: Series, **kwargs) -> float: <DeepExtract> if kwargs[FEATURE] not in df: df = df def col_sum(x): df = reduce(lambda x, y: x + y, x) def dimension(x): df = x.shape[0] if isinstance(x, np.ndarray) and x.size > 1 else 1 k = df[kwargs[FEATURE_SUMSQ]].apply(dimension).iloc[0] XX0 = np.zeros((k + 1, k + 1)) XX0[1:k + 1, 1:k + 1] = col_sum(df[kwargs[FEATURE_SUMSQ]]) XX0[0, 0] = col_sum(df[kwargs[DENOMINATOR]]) XX0[0, 1:k + 1] = col_sum(df[kwargs[FEATURE]]) XX0[1:k + 1, 0] = col_sum(df[kwargs[FEATURE]]) Xy0 = np.zeros((k + 1, 1)) Xy0[0,] = col_sum(df[kwargs[NUMERATOR]]) Xy0[1:k + 1,] = np.atleast_2d(col_sum(df[kwargs[FEATURE_CROSS]])).reshape(-1, 1) b = np.matmul(np.linalg.inv(XX0), Xy0) out = b[1:k + 1] if out.size == 1: out = out.item() outseries = Series(index=df.index, dtype=df[kwargs[FEATURE]].dtype) df[REGRESSION_PARAM] = outseries.apply(lambda x: out) df = df </DeepExtract> point_estimate = df[kwargs[NUMERATOR]] / df[kwargs[DENOMINATOR]] if REGRESSION_PARAM in df: feature_mean = df[kwargs[FEATURE]].sum() / df[kwargs[DENOMINATOR]].sum() def lin_reg_point_estimate_delta(row: Series, feature_mean: float, **kwargs: Dict) -> Series: return dfmatmul(row[REGRESSION_PARAM], row[kwargs[FEATURE]] - feature_mean * row[kwargs[DENOMINATOR]], outer=False) return point_estimate - df.apply(lin_reg_point_estimate_delta, feature_mean=feature_mean, axis=1, **kwargs) / df[kwargs[DENOMINATOR]] return point_estimate
def point_estimate(df: Series, **kwargs) -> float: if kwargs[FEATURE] not in df: df = df def col_sum(x): df = reduce(lambda x, y: x + y, x) def dimension(x): df = x.shape[0] if isinstance(x, np.ndarray) and x.size > 1 else 1 k = df[kwargs[FEATURE_SUMSQ]].apply(dimension).iloc[0] XX0 = np.zeros((k + 1, k + 1)) XX0[1:k + 1, 1:k + 1] = col_sum(df[kwargs[FEATURE_SUMSQ]]) XX0[0, 0] = col_sum(df[kwargs[DENOMINATOR]]) XX0[0, 1:k + 1] = col_sum(df[kwargs[FEATURE]]) XX0[1:k + 1, 0] = col_sum(df[kwargs[FEATURE]]) Xy0 = np.zeros((k + 1, 1)) Xy0[0,] = col_sum(df[kwargs[NUMERATOR]]) Xy0[1:k + 1,] = np.atleast_2d(col_sum(df[kwargs[FEATURE_CROSS]])).reshape(-1, 1) b = np.matmul(np.linalg.inv(XX0), Xy0) out = b[1:k + 1] if out.size == 1: out = out.item() outseries = Series(index=df.index, dtype=df[kwargs[FEATURE]].dtype) df[REGRESSION_PARAM] = outseries.apply(lambda x: out) df = df point_estimate = df[kwargs[NUMERATOR]] / df[kwargs[DENOMINATOR]] if REGRESSION_PARAM in df: feature_mean = df[kwargs[FEATURE]].sum() / df[kwargs[DENOMINATOR]].sum() def lin_reg_point_estimate_delta(row: Series, feature_mean: float, **kwargs: Dict) -> Series: return dfmatmul(row[REGRESSION_PARAM], row[kwargs[FEATURE]] - feature_mean * row[kwargs[DENOMINATOR]], outer=False) return point_estimate - df.apply(lin_reg_point_estimate_delta, feature_mean=feature_mean, axis=1, **kwargs) / df[kwargs[DENOMINATOR]] return point_estimate
confidence
positive
def ge(self, other): """ Greater than or overlaps. Returns True if no part of this Interval extends lower than other. :raises ValueError: if either self or other is a null Interval :param other: Interval or point :return: True or False :rtype: bool """ <DeepExtract> if self.is_null(): raise ValueError('Cannot compare null Intervals!') if hasattr(other, 'is_null') and other.is_null(): raise ValueError('Cannot compare null Intervals!') </DeepExtract> return self.begin >= getattr(other, 'begin', other)
def ge(self, other): """ Greater than or overlaps. Returns True if no part of this Interval extends lower than other. :raises ValueError: if either self or other is a null Interval :param other: Interval or point :return: True or False :rtype: bool """ if self.is_null(): raise ValueError('Cannot compare null Intervals!') if hasattr(other, 'is_null') and other.is_null(): raise ValueError('Cannot compare null Intervals!') return self.begin >= getattr(other, 'begin', other)
Clair3
positive
def forward(self, input, target, batch_mean=True): """ Args: input (batch_size, *) target (batch_size, *) Returns: loss () or (batch_size, ) """ reduction = self.reduction n_dims = input.dim() <DeepExtract> ratio = (target + self.eps) / (input + self.eps) loss = ratio - torch.log(ratio) - 1 loss = loss </DeepExtract> dims = tuple(range(1, n_dims)) if reduction == 'sum': loss = loss.sum(dim=dims) elif reduction == 'mean': loss = loss.mean(dim=dims) else: raise NotImplementedError('Not support {} for reduction'.format(reduction)) if batch_mean: loss = loss.mean(dim=0) return loss
def forward(self, input, target, batch_mean=True): """ Args: input (batch_size, *) target (batch_size, *) Returns: loss () or (batch_size, ) """ reduction = self.reduction n_dims = input.dim() ratio = (target + self.eps) / (input + self.eps) loss = ratio - torch.log(ratio) - 1 loss = loss dims = tuple(range(1, n_dims)) if reduction == 'sum': loss = loss.sum(dim=dims) elif reduction == 'mean': loss = loss.mean(dim=dims) else: raise NotImplementedError('Not support {} for reduction'.format(reduction)) if batch_mean: loss = loss.mean(dim=0) return loss
DNN-based_source_separation
positive
def set_up_launch_environment(deps_dir, profile_dir): <DeepExtract> files = [f for f in os.listdir(deps_dir) if os.path.isdir(os.path.join(deps_dir, f))] existing_dirs = [] for file in files: filesystem_dir = os.path.join(deps_dir, file, 'profile.d') dir_to_join = os.path.join(deps_dir, file, 'profile.d') if os.path.exists(filesystem_dir): existing_dirs.append(dir_to_join) profile_dirs = existing_dirs </DeepExtract> for dir in profile_dirs: sections = dir.split(os.sep) if len(sections) < 2: raise Exception('Invalid dependencies directory') deps_idx = sections[len(sections) - 2] files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] for file in files: src = os.path.join(dir, file) dest = os.path.join(profile_dir, deps_idx + '_' + file) shutil.copyfile(src, dest)
def set_up_launch_environment(deps_dir, profile_dir): files = [f for f in os.listdir(deps_dir) if os.path.isdir(os.path.join(deps_dir, f))] existing_dirs = [] for file in files: filesystem_dir = os.path.join(deps_dir, file, 'profile.d') dir_to_join = os.path.join(deps_dir, file, 'profile.d') if os.path.exists(filesystem_dir): existing_dirs.append(dir_to_join) profile_dirs = existing_dirs for dir in profile_dirs: sections = dir.split(os.sep) if len(sections) < 2: raise Exception('Invalid dependencies directory') deps_idx = sections[len(sections) - 2] files = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] for file in files: src = os.path.join(dir, file) dest = os.path.join(profile_dir, deps_idx + '_' + file) shutil.copyfile(src, dest)
cf-mendix-buildpack
positive
def __init__(self, csv_data_file, csv_class_file, base_dir=None, **kwargs): """ Initialize a CSV data generator. Args csv_data_file: Path to the CSV annotations file. csv_class_file: Path to the CSV classes file. base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file). """ self.image_names = [] self.image_data = {} self.base_dir = base_dir if self.base_dir is None: self.base_dir = os.path.dirname(csv_data_file) try: with _open_for_csv(csv_class_file) as file: <DeepExtract> result = OrderedDict() for (line, row) in enumerate(csv.reader(file, delimiter=',')): line += 1 try: (class_name, class_id) = row except ValueError: raise_from(ValueError("line {}: format should be 'class_name,class_id'".format(line)), None) class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line)) if class_name in result: raise ValueError("line {}: duplicate class name: '{}'".format(line, class_name)) result[class_name] = class_id self.classes = result </DeepExtract> except ValueError as e: raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None) self.labels = {} for (key, value) in self.classes.items(): self.labels[value] = key try: with _open_for_csv(csv_data_file) as file: <DeepExtract> result = OrderedDict() for (line, row) in enumerate(csv.reader(file, delimiter=',')): line += 1 try: (img_file, x1, y1, x2, y2, class_name) = row[:6] except ValueError: raise_from(ValueError("line {}: format should be 'img_file,x1,y1,x2,y2,class_name' or 'img_file,,,,,'".format(line)), None) if img_file not in result: result[img_file] = [] if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''): continue x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line)) y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line)) x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line)) y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line)) if x2 <= x1: raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1)) if y2 <= y1: raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1)) if class_name not in self.classes: raise ValueError("line {}: unknown class name: '{}' (classes: {})".format(line, class_name, self.classes)) result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name}) self.image_data = result </DeepExtract> except ValueError as e: raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None) self.image_names = list(self.image_data.keys()) super(CSVGenerator, self).__init__(**kwargs)
def __init__(self, csv_data_file, csv_class_file, base_dir=None, **kwargs): """ Initialize a CSV data generator. Args csv_data_file: Path to the CSV annotations file. csv_class_file: Path to the CSV classes file. base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file). """ self.image_names = [] self.image_data = {} self.base_dir = base_dir if self.base_dir is None: self.base_dir = os.path.dirname(csv_data_file) try: with _open_for_csv(csv_class_file) as file: result = OrderedDict() for (line, row) in enumerate(csv.reader(file, delimiter=',')): line += 1 try: (class_name, class_id) = row except ValueError: raise_from(ValueError("line {}: format should be 'class_name,class_id'".format(line)), None) class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line)) if class_name in result: raise ValueError("line {}: duplicate class name: '{}'".format(line, class_name)) result[class_name] = class_id self.classes = result except ValueError as e: raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None) self.labels = {} for (key, value) in self.classes.items(): self.labels[value] = key try: with _open_for_csv(csv_data_file) as file: result = OrderedDict() for (line, row) in enumerate(csv.reader(file, delimiter=',')): line += 1 try: (img_file, x1, y1, x2, y2, class_name) = row[:6] except ValueError: raise_from(ValueError("line {}: format should be 'img_file,x1,y1,x2,y2,class_name' or 'img_file,,,,,'".format(line)), None) if img_file not in result: result[img_file] = [] if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''): continue x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line)) y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line)) x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line)) y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line)) if x2 <= x1: raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1)) if y2 <= y1: raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1)) if class_name not in self.classes: raise ValueError("line {}: unknown class name: '{}' (classes: {})".format(line, class_name, self.classes)) result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name}) self.image_data = result except ValueError as e: raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None) self.image_names = list(self.image_data.keys()) super(CSVGenerator, self).__init__(**kwargs)
ensembleObjectDetection
positive
def _tb_log_sample_images_all_img(trainer: 'Trainer', images: Dict[str, np.ndarray], z_plane: Optional[int]=None, group: str='sample') -> None: """Tensorboard plotting handler that plots all arrays in the ``images`` dict as 2D grayscale images. Multi-channel images are split along the C dimension and plotted separately. """ name = images.pop('fname', [None])[0] for (key, img) in images.items(): img = img[:1] <DeepExtract> if img.ndim == 5: if z_plane is None: z_plane = img.shape[2] // 2 assert z_plane in range(img.shape[2]) batch2img = lambda x: x[0, :, z_plane] elif img.ndim == 4: batch2img = lambda x: x[0, :] elif img.ndim == 2: batch2img = lambda x: x[None,] else: raise ValueError('Only 4D and 5D tensors are supported.') </DeepExtract> img = batch2img(img) if img.shape[0] == 1: trainer.tb.add_figure(f'{group}/{key}', plot_image(img[0], cmap='gray', filename=name), global_step=trainer.step) else: for c in range(img.shape[0]): trainer.tb.add_figure(f'{group}/{key}{c}', plot_image(img[c], cmap='gray', filename=name), global_step=trainer.step)
def _tb_log_sample_images_all_img(trainer: 'Trainer', images: Dict[str, np.ndarray], z_plane: Optional[int]=None, group: str='sample') -> None: """Tensorboard plotting handler that plots all arrays in the ``images`` dict as 2D grayscale images. Multi-channel images are split along the C dimension and plotted separately. """ name = images.pop('fname', [None])[0] for (key, img) in images.items(): img = img[:1] if img.ndim == 5: if z_plane is None: z_plane = img.shape[2] // 2 assert z_plane in range(img.shape[2]) batch2img = lambda x: x[0, :, z_plane] elif img.ndim == 4: batch2img = lambda x: x[0, :] elif img.ndim == 2: batch2img = lambda x: x[None,] else: raise ValueError('Only 4D and 5D tensors are supported.') img = batch2img(img) if img.shape[0] == 1: trainer.tb.add_figure(f'{group}/{key}', plot_image(img[0], cmap='gray', filename=name), global_step=trainer.step) else: for c in range(img.shape[0]): trainer.tb.add_figure(f'{group}/{key}{c}', plot_image(img[c], cmap='gray', filename=name), global_step=trainer.step)
elektronn3
positive
def load_keras_application(config, mf_path, goal, input_shape, pooling, reg, dropout, n_classes, verbose): architecture = config['architecture'] trained_on = config['trained_on'] retrained = config.get(constants.RETRAINED, False) top_params = config.get(constants.TOP_PARAMS, None) model_params = {} if trained_on != constants.IMAGENET: raise IOError("The architecture '{}', trained on '{}' cannot be found".format(architecture, trained_on)) if retrained and top_params is None: raise IOError("Your config file is missing some parameters : '{}'".format(constants.TOP_PARAMS)) if goal == constants.SCORING: if not retrained: if top_params is None: model = keras_applications[architecture]['model_func'](weights=None, include_top=True) <DeepExtract> weights_filename = get_weights_filename(mf_path, config, suffix) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) <DeepExtract> top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) </DeepExtract> <DeepExtract> weights_filename = get_weights_filename(mf_path, config, constants.CUSTOM_TOP_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) <DeepExtract> top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) </DeepExtract> <DeepExtract> weights_filename = get_weights_filename(mf_path, config, constants.RETRAINED_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) elif goal == constants.RETRAINING: if not retrained: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=input_shape) <DeepExtract> weights_filename = get_weights_filename(mf_path, config, constants.NOTOP_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) <DeepExtract> top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) </DeepExtract> model_params['input_shape'] = input_shape else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) <DeepExtract> top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) </DeepExtract> <DeepExtract> weights_filename = get_weights_filename(mf_path, config, constants.RETRAINED_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) elif goal == constants.BEFORE_TRAIN: if not retrained: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=input_shape) <DeepExtract> weights_filename = get_weights_filename(mf_path, config, constants.NOTOP_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) <DeepExtract> top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) </DeepExtract> <DeepExtract> weights_filename = get_weights_filename(mf_path, config, constants.RETRAINED_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path </DeepExtract> model.load_weights(model_weights_path) return {'model': model, 'preprocessing': keras_applications[architecture]['preprocessing'], 'model_params': model_params}
def load_keras_application(config, mf_path, goal, input_shape, pooling, reg, dropout, n_classes, verbose): architecture = config['architecture'] trained_on = config['trained_on'] retrained = config.get(constants.RETRAINED, False) top_params = config.get(constants.TOP_PARAMS, None) model_params = {} if trained_on != constants.IMAGENET: raise IOError("The architecture '{}', trained on '{}' cannot be found".format(architecture, trained_on)) if retrained and top_params is None: raise IOError("Your config file is missing some parameters : '{}'".format(constants.TOP_PARAMS)) if goal == constants.SCORING: if not retrained: if top_params is None: model = keras_applications[architecture]['model_func'](weights=None, include_top=True) weights_filename = get_weights_filename(mf_path, config, suffix) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) weights_filename = get_weights_filename(mf_path, config, constants.CUSTOM_TOP_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) weights_filename = get_weights_filename(mf_path, config, constants.RETRAINED_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) elif goal == constants.RETRAINING: if not retrained: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=input_shape) weights_filename = get_weights_filename(mf_path, config, constants.NOTOP_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) model_params['input_shape'] = input_shape else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) weights_filename = get_weights_filename(mf_path, config, constants.RETRAINED_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) elif goal == constants.BEFORE_TRAIN: if not retrained: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=input_shape) weights_filename = get_weights_filename(mf_path, config, constants.NOTOP_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) else: model = keras_applications[architecture]['model_func'](weights=None, include_top=False, input_shape=top_params['input_shape']) top_params = {} if top_params is None else top_params top_params['pooling'] = select_param('pooling', pooling, top_params) top_params['n_classes'] = select_param('n_classes', n_classes, top_params) x = model.layers[-1].output if top_params['pooling'] == 'None': x = Flatten()(x) elif top_params['pooling'] == 'avg': x = GlobalAveragePooling2D()(x) elif top_params['pooling'] == 'max': x = GlobalMaxPooling2D()(x) if dropout is not None and dropout != 0.0: x = Dropout(dropout)(x) if verbose: print('Adding dropout to model with rate: {}'.format(dropout)) regularizer = None if reg is not None: reg_l2 = reg['l2'] reg_l1 = reg['l1'] if reg_l1 != 0.0 and reg_l2 != 0.0: regularizer = regularizers.l1_l2(l1=reg_l1, l2=reg_l2) if reg_l1 == 0.0 and reg_l2 != 0.0: regularizer = regularizers.l2(reg_l2) if reg_l1 != 0.0 and reg_l2 == 0.0: regularizer = regularizers.l1(reg_l1) if verbose: print('Using regularizer for model: {}'.format(reg)) predictions = Dense(top_params['n_classes'], activation='softmax', name='predictions', kernel_regularizer=regularizer)(x) model = Model(input=model.input, output=predictions) (model, model_params) = (model, top_params) weights_filename = get_weights_filename(mf_path, config, constants.RETRAINED_SUFFIX) model_weights_path = get_file_path(mf_path, weights_filename) if not os.path.isfile(model_weights_path) and should_exist: raise IOError('No weigth file found') model_weights_path = model_weights_path model.load_weights(model_weights_path) return {'model': model, 'preprocessing': keras_applications[architecture]['preprocessing'], 'model_params': model_params}
dataiku-contrib
positive
def content_type_choices(self): <DeepExtract> if self.discovered: return for app in settings.INSTALLED_APPS: __import__(app, {}, {}, ['link_schemas']) self.discovered = True </DeepExtract> choices = [('', '----')] for (model_class, content_type) in sorted(self.content_types.items()): choices.append((content_type.pk, u'%s: %s' % (content_type.app_label.replace('_', ' '), content_type.name))) return sorted(choices)
def content_type_choices(self): if self.discovered: return for app in settings.INSTALLED_APPS: __import__(app, {}, {}, ['link_schemas']) self.discovered = True choices = [('', '----')] for (model_class, content_type) in sorted(self.content_types.items()): choices.append((content_type.pk, u'%s: %s' % (content_type.app_label.replace('_', ' '), content_type.name))) return sorted(choices)
Arkestra
positive
@_project_and_creds @_with_cluster def _job_submit(args: dict, cluster: Cluster) -> None: """submits job(s) to cluster Args: args: argument dictionary cluster: cluster instance """ script_args = conf.extract_script_args(args) job_mode = cli.resolve_job_mode(args) docker_args = cli.generate_docker_args(job_mode, args) docker_run_args = args.get('docker_run_args', []) or [] dry_run = args['dry_run'] package = args['module'] <DeepExtract> if args.get('name') is None: dt = datetime.now().astimezone() args.get('name') = f"caliban-{u.current_user()}-{dt.strftime('%Y%m%d-%H%M%S')}" job_name = args.get('name') </DeepExtract> gpu_spec = args.get('gpu_spec') preemptible = not args['nonpreemptible'] min_cpu = args.get('min_cpu') min_mem = args.get('min_mem') experiment_config = args.get('experiment_config') or [{}] xgroup = args.get('xgroup') image_tag = args.get('image_tag') export = args.get('export', None) caliban_config = docker_args.get('caliban_config', {}) labels = args.get('label') if labels is not None: labels = dict(cu.sanitize_labels(labels)) labels = labels or {} docker_m = {'job_mode': job_mode, 'package': package, **docker_args} if job_mode == conf.JobMode.GPU and gpu_spec is None: gpu_spec = k.DEFAULT_GPU_SPEC if not cluster.validate_gpu_spec(gpu_spec): return tpu_spec = args.get('tpu_spec') preemptible_tpu = not args.get('nonpreemptible_tpu') tpu_driver = args.get('tpu_driver') if tpu_spec is not None: available_tpu = cluster.get_tpu_types() if available_tpu is None: logging.error('error getting valid tpu types for cluster') return if tpu_spec not in available_tpu: logging.error('invalid tpu spec, cluster supports:') for t in available_tpu: logging.info('{}x{}'.format(t.count, t.tpu.name)) return if not cluster.validate_tpu_driver(tpu_driver): logging.error('error: unsupported tpu driver {}'.format(tpu_driver)) logging.info('supported tpu drivers for this cluster:') for d in cluster.get_tpu_drivers(): logging.info(' {}'.format(d)) return if tpu_spec is None and gpu_spec is None: min_cpu = min_cpu or k.DEFAULT_MIN_CPU_CPU min_mem = min_mem or k.DEFAULT_MIN_MEM_CPU else: min_cpu = min_cpu or k.DEFAULT_MIN_CPU_ACCEL min_mem = min_mem or k.DEFAULT_MIN_MEM_ACCEL accel_spec = Cluster.convert_accel_spec(gpu_spec, tpu_spec) if accel_spec is None: return (accel, accel_count) = accel_spec engine = get_mem_engine() if dry_run else get_sql_engine() with session_scope(engine) as session: container_spec = generate_container_spec(session, docker_m, image_tag) if image_tag is None: image_tag = generate_image_tag(cluster.project_id, docker_m, dry_run) labels[um.GPU_ENABLED_TAG] = str(job_mode == conf.JobMode.GPU).lower() labels[um.TPU_ENABLED_TAG] = str(tpu_spec is not None) labels[um.DOCKER_IMAGE_TAG] = image_tag experiments = create_experiments(session=session, container_spec=container_spec, script_args=script_args, experiment_config=experiment_config, xgroup=xgroup) specs = list(cluster.create_simple_experiment_job_specs(name=util.sanitize_job_name(job_name), image=image_tag, min_cpu=min_cpu, min_mem=min_mem, experiments=experiments, args=script_args, accelerator=accel, accelerator_count=accel_count, preemptible=preemptible, preemptible_tpu=preemptible_tpu, tpu_driver=tpu_driver, labels=labels, caliban_config=caliban_config)) if dry_run: logging.info('jobs that would be submitted:') for s in specs: logging.info(f'\n{json.dumps(s.spec, indent=2)}') return if export is not None: if not _export_jobs(export, cluster.create_v1jobs(specs, job_name, labels)): print('error exporting jobs to {}'.format(export)) return for s in specs: try: cluster.submit_job(job_spec=s, name=job_name, labels=labels) except Exception as e: logging.error(f'exception: {e}') session.commit() return logging.info(f'jobs submitted, visit {cluster.dashboard_url()} to monitor') return
@_project_and_creds @_with_cluster def _job_submit(args: dict, cluster: Cluster) -> None: """submits job(s) to cluster Args: args: argument dictionary cluster: cluster instance """ script_args = conf.extract_script_args(args) job_mode = cli.resolve_job_mode(args) docker_args = cli.generate_docker_args(job_mode, args) docker_run_args = args.get('docker_run_args', []) or [] dry_run = args['dry_run'] package = args['module'] if args.get('name') is None: dt = datetime.now().astimezone() args.get('name') = f"caliban-{u.current_user()}-{dt.strftime('%Y%m%d-%H%M%S')}" job_name = args.get('name') gpu_spec = args.get('gpu_spec') preemptible = not args['nonpreemptible'] min_cpu = args.get('min_cpu') min_mem = args.get('min_mem') experiment_config = args.get('experiment_config') or [{}] xgroup = args.get('xgroup') image_tag = args.get('image_tag') export = args.get('export', None) caliban_config = docker_args.get('caliban_config', {}) labels = args.get('label') if labels is not None: labels = dict(cu.sanitize_labels(labels)) labels = labels or {} docker_m = {'job_mode': job_mode, 'package': package, **docker_args} if job_mode == conf.JobMode.GPU and gpu_spec is None: gpu_spec = k.DEFAULT_GPU_SPEC if not cluster.validate_gpu_spec(gpu_spec): return tpu_spec = args.get('tpu_spec') preemptible_tpu = not args.get('nonpreemptible_tpu') tpu_driver = args.get('tpu_driver') if tpu_spec is not None: available_tpu = cluster.get_tpu_types() if available_tpu is None: logging.error('error getting valid tpu types for cluster') return if tpu_spec not in available_tpu: logging.error('invalid tpu spec, cluster supports:') for t in available_tpu: logging.info('{}x{}'.format(t.count, t.tpu.name)) return if not cluster.validate_tpu_driver(tpu_driver): logging.error('error: unsupported tpu driver {}'.format(tpu_driver)) logging.info('supported tpu drivers for this cluster:') for d in cluster.get_tpu_drivers(): logging.info(' {}'.format(d)) return if tpu_spec is None and gpu_spec is None: min_cpu = min_cpu or k.DEFAULT_MIN_CPU_CPU min_mem = min_mem or k.DEFAULT_MIN_MEM_CPU else: min_cpu = min_cpu or k.DEFAULT_MIN_CPU_ACCEL min_mem = min_mem or k.DEFAULT_MIN_MEM_ACCEL accel_spec = Cluster.convert_accel_spec(gpu_spec, tpu_spec) if accel_spec is None: return (accel, accel_count) = accel_spec engine = get_mem_engine() if dry_run else get_sql_engine() with session_scope(engine) as session: container_spec = generate_container_spec(session, docker_m, image_tag) if image_tag is None: image_tag = generate_image_tag(cluster.project_id, docker_m, dry_run) labels[um.GPU_ENABLED_TAG] = str(job_mode == conf.JobMode.GPU).lower() labels[um.TPU_ENABLED_TAG] = str(tpu_spec is not None) labels[um.DOCKER_IMAGE_TAG] = image_tag experiments = create_experiments(session=session, container_spec=container_spec, script_args=script_args, experiment_config=experiment_config, xgroup=xgroup) specs = list(cluster.create_simple_experiment_job_specs(name=util.sanitize_job_name(job_name), image=image_tag, min_cpu=min_cpu, min_mem=min_mem, experiments=experiments, args=script_args, accelerator=accel, accelerator_count=accel_count, preemptible=preemptible, preemptible_tpu=preemptible_tpu, tpu_driver=tpu_driver, labels=labels, caliban_config=caliban_config)) if dry_run: logging.info('jobs that would be submitted:') for s in specs: logging.info(f'\n{json.dumps(s.spec, indent=2)}') return if export is not None: if not _export_jobs(export, cluster.create_v1jobs(specs, job_name, labels)): print('error exporting jobs to {}'.format(export)) return for s in specs: try: cluster.submit_job(job_spec=s, name=job_name, labels=labels) except Exception as e: logging.error(f'exception: {e}') session.commit() return logging.info(f'jobs submitted, visit {cluster.dashboard_url()} to monitor') return
caliban
positive
def parameterized_branch(self, parameterization): """Create a pseudo-child-group parameterized by `for <parameterization>:`.""" <DeepExtract> parameterized_child = Matcher(self.comp, self.original, self.loc, self.check_var, self.style, self.name_list, self.names) </DeepExtract> self.child_groups.append((parameterization, parameterized_child)) return parameterized_child
def parameterized_branch(self, parameterization): """Create a pseudo-child-group parameterized by `for <parameterization>:`.""" parameterized_child = Matcher(self.comp, self.original, self.loc, self.check_var, self.style, self.name_list, self.names) self.child_groups.append((parameterization, parameterized_child)) return parameterized_child
coconut
positive
def __init__(self, net_size): super(ShuffleNetV2, self).__init__() out_channels = configs[net_size]['out_channels'] num_blocks = configs[net_size]['num_blocks'] self.conv1 = nn.Conv2d(3, 24, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_channels = 24 <DeepExtract> layers = [DownBlock(self.in_channels, out_channels[0])] for i in range(num_blocks[0]): layers.append(BasicBlock(out_channels[0])) self.in_channels = out_channels[0] self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> layers = [DownBlock(self.in_channels, out_channels[1])] for i in range(num_blocks[1]): layers.append(BasicBlock(out_channels[1])) self.in_channels = out_channels[1] self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> layers = [DownBlock(self.in_channels, out_channels[2])] for i in range(num_blocks[2]): layers.append(BasicBlock(out_channels[2])) self.in_channels = out_channels[2] self.layer3 = nn.Sequential(*layers) </DeepExtract> self.conv2 = nn.Conv2d(out_channels[2], out_channels[3], kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_channels[3]) self.linear = nn.Linear(out_channels[3], 10)
def __init__(self, net_size): super(ShuffleNetV2, self).__init__() out_channels = configs[net_size]['out_channels'] num_blocks = configs[net_size]['num_blocks'] self.conv1 = nn.Conv2d(3, 24, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_channels = 24 layers = [DownBlock(self.in_channels, out_channels[0])] for i in range(num_blocks[0]): layers.append(BasicBlock(out_channels[0])) self.in_channels = out_channels[0] self.layer1 = nn.Sequential(*layers) layers = [DownBlock(self.in_channels, out_channels[1])] for i in range(num_blocks[1]): layers.append(BasicBlock(out_channels[1])) self.in_channels = out_channels[1] self.layer2 = nn.Sequential(*layers) layers = [DownBlock(self.in_channels, out_channels[2])] for i in range(num_blocks[2]): layers.append(BasicBlock(out_channels[2])) self.in_channels = out_channels[2] self.layer3 = nn.Sequential(*layers) self.conv2 = nn.Conv2d(out_channels[2], out_channels[3], kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_channels[3]) self.linear = nn.Linear(out_channels[3], 10)
adaptdl
positive
def api_response(client, address): """ Respond to API requests Gets the request, parses it as JSON, and if it is indeed JSON and of the proper format, a response is generated and returned as JSON via the client object :param client: Client object representing the connection :param tuple address: (IP, port) of the request :return: Response """ client.settimeout(2) try: buffer = '' while True: try: data = client.recv(1024).decode('ascii') except UnicodeDecodeError: raise InternalAPIException buffer += data if not data or data.strip() == '' or len(data) > 2048: break try: test = json.loads(buffer) break except json.JSONDecodeError: pass if not buffer: raise InternalAPIException except (socket.timeout, TimeoutError, ConnectionError, InternalAPIException): self.manager.log.info('No input on API call from %s:%s - closing' % address) return False self.manager.log.debug('Received API request from %s:%s' % address) try: payload = json.loads(buffer) if 'request' not in payload: raise InternalAPIException <DeepExtract> if payload['request'] == 'cancel-job': payload = payload.get('payload', {}) remote_id = payload.get('remote_id') jobtype = payload.get('jobtype') level = payload.get('level', BasicWorker.INTERRUPT_RETRY) self.manager.request_interrupt(remote_id=remote_id, jobtype=jobtype, interrupt_level=level) response = 'OK' elif payload['request'] == 'workers': workers = {} for jobtype in self.manager.worker_pool: workers[jobtype] = len(self.manager.worker_pool[jobtype]) workers['total'] = sum([workers[workertype] for workertype in workers]) response = workers if payload['request'] == 'jobs': jobs = self.db.fetchall('SELECT * FROM jobs') if jobs is None: response = {'error': 'Database unavailable'} response = {} for job in jobs: if job['jobtype'] not in response: response[job['jobtype']] = 0 response[job['jobtype']] += 1 response['total'] = sum([response[jobtype] for jobtype in response]) response = response if payload['request'] == 'datasets': week = 86400 * 7 now = int(time.time()) items = self.db.fetchall('SELECT * FROM datasets WHERE timestamp > %s ORDER BY timestamp ASC', (now - week,)) response = {'1h': 0, '1d': 0, '1w': 0} for item in items: response['1w'] += 1 if item['timestamp'] > now - 3600: response['1h'] += 1 if item['timestamp'] > now - 86400: response['1d'] += 1 response = response if payload['request'] == 'worker-status': open_jobs = self.db.fetchall('SELECT jobtype, timestamp, timestamp_claimed, timestamp_lastclaimed, interval, remote_id FROM jobs ORDER BY jobtype ASC, timestamp ASC, remote_id ASC') running = [] queue = {} for job in open_jobs: try: worker = list(filter(lambda worker: worker.job.data['jobtype'] == job['jobtype'] and worker.job.data['remote_id'] == job['remote_id'], self.manager.worker_pool.get(job['jobtype'], [])))[0] except IndexError: worker = None if not bool(worker): if job['jobtype'] not in queue: queue[job['jobtype']] = 0 queue[job['jobtype']] += 1 else: if hasattr(worker, 'dataset') and worker.dataset: running_key = worker.dataset.key running_user = worker.dataset.owner running_parent = worker.dataset.top_parent().key else: running_key = None running_user = None running_parent = None running.append({'type': job['jobtype'], 'is_claimed': job['timestamp_claimed'] > 0, 'is_running': bool(worker), 'is_processor': hasattr(worker, 'dataset'), 'is_recurring': int(job['interval']) > 0, 'is_maybe_crashed': job['timestamp_claimed'] > 0 and (not worker), 'dataset_key': running_key, 'dataset_user': running_user, 'dataset_parent_key': running_parent, 'timestamp_queued': job['timestamp'], 'timestamp_claimed': job['timestamp_lastclaimed']}) response = {'running': running, 'queued': queue} response = False </DeepExtract> if not response: raise InternalAPIException response = json.dumps({'error': False, 'response': response}) except (json.JSONDecodeError, InternalAPIException): response = json.dumps({'error': 'Invalid JSON'}) try: response = client.sendall(response.encode('ascii')) except (BrokenPipeError, ConnectionError, socket.timeout): response = None return response
def api_response(client, address): """ Respond to API requests Gets the request, parses it as JSON, and if it is indeed JSON and of the proper format, a response is generated and returned as JSON via the client object :param client: Client object representing the connection :param tuple address: (IP, port) of the request :return: Response """ client.settimeout(2) try: buffer = '' while True: try: data = client.recv(1024).decode('ascii') except UnicodeDecodeError: raise InternalAPIException buffer += data if not data or data.strip() == '' or len(data) > 2048: break try: test = json.loads(buffer) break except json.JSONDecodeError: pass if not buffer: raise InternalAPIException except (socket.timeout, TimeoutError, ConnectionError, InternalAPIException): self.manager.log.info('No input on API call from %s:%s - closing' % address) return False self.manager.log.debug('Received API request from %s:%s' % address) try: payload = json.loads(buffer) if 'request' not in payload: raise InternalAPIException if payload['request'] == 'cancel-job': payload = payload.get('payload', {}) remote_id = payload.get('remote_id') jobtype = payload.get('jobtype') level = payload.get('level', BasicWorker.INTERRUPT_RETRY) self.manager.request_interrupt(remote_id=remote_id, jobtype=jobtype, interrupt_level=level) response = 'OK' elif payload['request'] == 'workers': workers = {} for jobtype in self.manager.worker_pool: workers[jobtype] = len(self.manager.worker_pool[jobtype]) workers['total'] = sum([workers[workertype] for workertype in workers]) response = workers if payload['request'] == 'jobs': jobs = self.db.fetchall('SELECT * FROM jobs') if jobs is None: response = {'error': 'Database unavailable'} response = {} for job in jobs: if job['jobtype'] not in response: response[job['jobtype']] = 0 response[job['jobtype']] += 1 response['total'] = sum([response[jobtype] for jobtype in response]) response = response if payload['request'] == 'datasets': week = 86400 * 7 now = int(time.time()) items = self.db.fetchall('SELECT * FROM datasets WHERE timestamp > %s ORDER BY timestamp ASC', (now - week,)) response = {'1h': 0, '1d': 0, '1w': 0} for item in items: response['1w'] += 1 if item['timestamp'] > now - 3600: response['1h'] += 1 if item['timestamp'] > now - 86400: response['1d'] += 1 response = response if payload['request'] == 'worker-status': open_jobs = self.db.fetchall('SELECT jobtype, timestamp, timestamp_claimed, timestamp_lastclaimed, interval, remote_id FROM jobs ORDER BY jobtype ASC, timestamp ASC, remote_id ASC') running = [] queue = {} for job in open_jobs: try: worker = list(filter(lambda worker: worker.job.data['jobtype'] == job['jobtype'] and worker.job.data['remote_id'] == job['remote_id'], self.manager.worker_pool.get(job['jobtype'], [])))[0] except IndexError: worker = None if not bool(worker): if job['jobtype'] not in queue: queue[job['jobtype']] = 0 queue[job['jobtype']] += 1 else: if hasattr(worker, 'dataset') and worker.dataset: running_key = worker.dataset.key running_user = worker.dataset.owner running_parent = worker.dataset.top_parent().key else: running_key = None running_user = None running_parent = None running.append({'type': job['jobtype'], 'is_claimed': job['timestamp_claimed'] > 0, 'is_running': bool(worker), 'is_processor': hasattr(worker, 'dataset'), 'is_recurring': int(job['interval']) > 0, 'is_maybe_crashed': job['timestamp_claimed'] > 0 and (not worker), 'dataset_key': running_key, 'dataset_user': running_user, 'dataset_parent_key': running_parent, 'timestamp_queued': job['timestamp'], 'timestamp_claimed': job['timestamp_lastclaimed']}) response = {'running': running, 'queued': queue} response = False if not response: raise InternalAPIException response = json.dumps({'error': False, 'response': response}) except (json.JSONDecodeError, InternalAPIException): response = json.dumps({'error': 'Invalid JSON'}) try: response = client.sendall(response.encode('ascii')) except (BrokenPipeError, ConnectionError, socket.timeout): response = None return response
4cat
positive
def decode_chain_sampling(self, hid, begin_emb, seq_len, stop_at_token=None): """ Decode sequence by feeding predicted token to the net again. Act according to probabilities """ res_logits = [] res_actions = [] cur_emb = begin_emb for _ in range(seq_len): <DeepExtract> (out, new_hid) = self.decoder(cur_emb.unsqueeze(0), hid) out = self.output(out) (out_logits, hid) = (out.squeeze(dim=0), new_hid) </DeepExtract> out_probs_v = F.softmax(out_logits, dim=1) out_probs = out_probs_v.data.cpu().numpy()[0] action = int(np.random.choice(out_probs.shape[0], p=out_probs)) action_v = torch.LongTensor([action]).to(begin_emb.device) cur_emb = self.emb(action_v) res_logits.append(out_logits) res_actions.append(action) if stop_at_token is not None and action == stop_at_token: break return (torch.cat(res_logits), res_actions)
def decode_chain_sampling(self, hid, begin_emb, seq_len, stop_at_token=None): """ Decode sequence by feeding predicted token to the net again. Act according to probabilities """ res_logits = [] res_actions = [] cur_emb = begin_emb for _ in range(seq_len): (out, new_hid) = self.decoder(cur_emb.unsqueeze(0), hid) out = self.output(out) (out_logits, hid) = (out.squeeze(dim=0), new_hid) out_probs_v = F.softmax(out_logits, dim=1) out_probs = out_probs_v.data.cpu().numpy()[0] action = int(np.random.choice(out_probs.shape[0], p=out_probs)) action_v = torch.LongTensor([action]).to(begin_emb.device) cur_emb = self.emb(action_v) res_logits.append(out_logits) res_actions.append(action) if stop_at_token is not None and action == stop_at_token: break return (torch.cat(res_logits), res_actions)
Deep-Reinforcement-Learning-Hands-On
positive
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): if self.params_initialized: <DeepExtract> assert self.binded and self.params_initialized if self._params_dirty: self._sync_params_from_devices() (arg_params, aux_params) = (self._arg_params, self._aux_params) </DeepExtract> if force_rebind: <DeepExtract> self.binded = False self._exec_group = None self._data_shapes = None self._label_shapes = None </DeepExtract> if self.binded: self.logger.warning('Already binded, ignoring bind()') return assert shared_module is None, 'shared_module for MutableModule is not supported' self.for_training = for_training self.inputs_need_grad = inputs_need_grad self.binded = True max_shapes_dict = dict() if self._max_data_shapes is not None: max_shapes_dict.update(dict(self._max_data_shapes[0])) if self._max_label_shapes is not None: max_shapes_dict.update(dict(self._max_label_shapes[0])) max_data_shapes = list() for (name, shape) in data_shapes[0]: if name in max_shapes_dict: max_data_shapes.append((name, max_shapes_dict[name])) else: max_data_shapes.append((name, shape)) max_label_shapes = list() if not label_shapes.count(None) == len(label_shapes): for (name, shape) in label_shapes[0]: if name in max_shapes_dict: max_label_shapes.append((name, max_shapes_dict[name])) else: max_label_shapes.append((name, shape)) if len(max_label_shapes) == 0: max_label_shapes = None module = Module(self._symbol, self._data_names, self._label_names, logger=self.logger, context=self._context, work_load_list=self._work_load_list, fixed_param_names=self._fixed_param_names) module.bind([max_data_shapes for _ in range(len(self._context))], [max_label_shapes for _ in range(len(self._context))], for_training, inputs_need_grad, force_rebind=False, shared_module=None) self._curr_module = module if self.params_initialized: <DeepExtract> if not allow_missing: self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init) return if self.params_initialized and (not force_init): warnings.warn('Parameters already initialized and force_init=False. set_params call ignored.', stacklevel=2) return self._exec_group.set_params(arg_params, aux_params) self._params_dirty = True self.params_initialized = True </DeepExtract>
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'): if self.params_initialized: assert self.binded and self.params_initialized if self._params_dirty: self._sync_params_from_devices() (arg_params, aux_params) = (self._arg_params, self._aux_params) if force_rebind: self.binded = False self._exec_group = None self._data_shapes = None self._label_shapes = None if self.binded: self.logger.warning('Already binded, ignoring bind()') return assert shared_module is None, 'shared_module for MutableModule is not supported' self.for_training = for_training self.inputs_need_grad = inputs_need_grad self.binded = True max_shapes_dict = dict() if self._max_data_shapes is not None: max_shapes_dict.update(dict(self._max_data_shapes[0])) if self._max_label_shapes is not None: max_shapes_dict.update(dict(self._max_label_shapes[0])) max_data_shapes = list() for (name, shape) in data_shapes[0]: if name in max_shapes_dict: max_data_shapes.append((name, max_shapes_dict[name])) else: max_data_shapes.append((name, shape)) max_label_shapes = list() if not label_shapes.count(None) == len(label_shapes): for (name, shape) in label_shapes[0]: if name in max_shapes_dict: max_label_shapes.append((name, max_shapes_dict[name])) else: max_label_shapes.append((name, shape)) if len(max_label_shapes) == 0: max_label_shapes = None module = Module(self._symbol, self._data_names, self._label_names, logger=self.logger, context=self._context, work_load_list=self._work_load_list, fixed_param_names=self._fixed_param_names) module.bind([max_data_shapes for _ in range(len(self._context))], [max_label_shapes for _ in range(len(self._context))], for_training, inputs_need_grad, force_rebind=False, shared_module=None) self._curr_module = module if self.params_initialized: if not allow_missing: self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init) return if self.params_initialized and (not force_init): warnings.warn('Parameters already initialized and force_init=False. set_params call ignored.', stacklevel=2) return self._exec_group.set_params(arg_params, aux_params) self._params_dirty = True self.params_initialized = True </DeepExtract>
Deep-Feature-Flow-Segmentation
positive
def _imputation(X): start = time.time() logger.info('Data imputation...') <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> if self.var_len_categorical_columns is not None: var_len_categorical_vars = [c.name for c in self.var_len_categorical_columns] else: var_len_categorical_vars = [] </DeepExtract> transformers = [('continuous', self.transformers.SimpleImputer(missing_values=np.nan, strategy='mean'), continuous_vars)] obj_cats = [] num_cats = [] for c in categorical_vars + var_len_categorical_vars: dtype = str(X[c].dtype) if dtype.startswith('obj') or dtype.startswith('str'): obj_cats.append(c) else: num_cats.append(c) if len(obj_cats) > 0: transformers.append(('categorical_obj', self.transformers.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=''), obj_cats)) if len(num_cats) > 0: transformers.append(('categorical_num', self.transformers.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0), num_cats)) if hasattr(self.transformers, 'ColumnTransformer'): ct = self.transformers.ColumnTransformer(transformers) else: ct = ColumnTransformer(transformers) columns = continuous_vars + obj_cats + num_cats dfwrapper = self.transformers.DataFrameWrapper(ct, columns=columns) X = dfwrapper.fit_transform(X) self.X_transformers['imputation'] = dfwrapper logger.info(f'Imputation taken {time.time() - start}s') return X
def _imputation(X): start = time.time() logger.info('Data imputation...') raise NotImplementedError raise NotImplementedError if self.var_len_categorical_columns is not None: var_len_categorical_vars = [c.name for c in self.var_len_categorical_columns] else: var_len_categorical_vars = [] transformers = [('continuous', self.transformers.SimpleImputer(missing_values=np.nan, strategy='mean'), continuous_vars)] obj_cats = [] num_cats = [] for c in categorical_vars + var_len_categorical_vars: dtype = str(X[c].dtype) if dtype.startswith('obj') or dtype.startswith('str'): obj_cats.append(c) else: num_cats.append(c) if len(obj_cats) > 0: transformers.append(('categorical_obj', self.transformers.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=''), obj_cats)) if len(num_cats) > 0: transformers.append(('categorical_num', self.transformers.SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0), num_cats)) if hasattr(self.transformers, 'ColumnTransformer'): ct = self.transformers.ColumnTransformer(transformers) else: ct = ColumnTransformer(transformers) columns = continuous_vars + obj_cats + num_cats dfwrapper = self.transformers.DataFrameWrapper(ct, columns=columns) X = dfwrapper.fit_transform(X) self.X_transformers['imputation'] = dfwrapper logger.info(f'Imputation taken {time.time() - start}s') return X
DeepTables
positive
@builtin('set-cdr!') def scheme_cdr(x, y): <DeepExtract> if not scheme_pairp(x): msg = 'argument {0} of {1} has wrong type ({2})' raise SchemeError(msg.format(0, 'set-cdr!', type(x).__name__)) return x </DeepExtract> x.second = y
@builtin('set-cdr!') def scheme_cdr(x, y): if not scheme_pairp(x): msg = 'argument {0} of {1} has wrong type ({2})' raise SchemeError(msg.format(0, 'set-cdr!', type(x).__name__)) return x x.second = y
cs61a
positive
def check_health(runner, client): if client.ping(): <DeepExtract> health_response = client.check_health() if not health_response.has_error(): feedback = health_response.get_feedback() if feedback['health'] == 'healthy': pass elif feedback['health'] == 'sick': message = 'MxRuntime WARNING: Health: %s' % feedback['diagnosis'] (health_status, health_message) = (STATE_WARNING, message) elif feedback['health'] == 'unknown': pass else: message = 'MxRuntime WARNING: Unexpected health check status: %s' % feedback['health'] (health_status, health_message) = (STATE_WARNING, message) elif health_response.get_result() == 3 and health_response.get_cause() == 'java.lang.IllegalArgumentException: Action should not be null': pass elif health_response.get_result() == health_response.ERR_ACTION_NOT_FOUND: pass else: message = 'MxRuntime WARNING: Health check failed unexpectedly: %s' % health_response.get_error() (health_status, health_message) = (STATE_WARNING, message) (health_status, health_message) = (STATE_OK, 'Health check OK') </DeepExtract> print(health_message) return health_status print('Runtime not running. Health could not be determined') return STATE_UNKNOWN
def check_health(runner, client): if client.ping(): health_response = client.check_health() if not health_response.has_error(): feedback = health_response.get_feedback() if feedback['health'] == 'healthy': pass elif feedback['health'] == 'sick': message = 'MxRuntime WARNING: Health: %s' % feedback['diagnosis'] (health_status, health_message) = (STATE_WARNING, message) elif feedback['health'] == 'unknown': pass else: message = 'MxRuntime WARNING: Unexpected health check status: %s' % feedback['health'] (health_status, health_message) = (STATE_WARNING, message) elif health_response.get_result() == 3 and health_response.get_cause() == 'java.lang.IllegalArgumentException: Action should not be null': pass elif health_response.get_result() == health_response.ERR_ACTION_NOT_FOUND: pass else: message = 'MxRuntime WARNING: Health check failed unexpectedly: %s' % health_response.get_error() (health_status, health_message) = (STATE_WARNING, message) (health_status, health_message) = (STATE_OK, 'Health check OK') print(health_message) return health_status print('Runtime not running. Health could not be determined') return STATE_UNKNOWN
cf-mendix-buildpack
positive
def register(self, cls, executable=None): """Register the COM server class.""" mth = getattr(cls, '_register', None) if mth is not None: mth(self) else: <DeepExtract> table = [t[:2] for t in self._registry_entries(cls)] table = list(set(table)) table.sort() table.reverse() _debug('Unregister %s', cls) for (hkey, subkey) in table: try: if True: _debug('SHDeleteKey %s\\%s', _explain(hkey), subkey) SHDeleteKey(hkey, subkey) else: _debug('DeleteKey %s\\%s', _explain(hkey), subkey) winreg.DeleteKey(hkey, subkey) except WindowsError as detail: if get_winerror(detail) != 2: raise tlib = getattr(cls, '_reg_typelib_', None) if tlib is not None: try: _debug('UnRegisterTypeLib(%s, %s, %s)', *tlib) UnRegisterTypeLib(*tlib) except WindowsError as detail: if not get_winerror(detail) in (TYPE_E_REGISTRYACCESS, TYPE_E_CANTLOADLIBRARY): raise _debug('Done') </DeepExtract> <DeepExtract> table = self._registry_entries(cls) table.sort() _debug('Registering %s', cls) for (hkey, subkey, valuename, value) in table: _debug('[%s\\%s]', _explain(hkey), subkey) _debug('%s="%s"', valuename or '@', value) k = winreg.CreateKey(hkey, subkey) winreg.SetValueEx(k, valuename, None, winreg.REG_SZ, str(value)) tlib = getattr(cls, '_reg_typelib_', None) if tlib is not None: if hasattr(sys, 'frozendllhandle'): dll = self._get_serverdll() _debug('LoadTypeLibEx(%s, REGKIND_REGISTER)', dll) LoadTypeLibEx(dll, REGKIND_REGISTER) else: if executable: path = executable elif hasattr(sys, 'frozen'): path = sys.executable else: path = cls._typelib_path_ _debug('LoadTypeLibEx(%s, REGKIND_REGISTER)', path) LoadTypeLibEx(path, REGKIND_REGISTER) _debug('Done') </DeepExtract>
def register(self, cls, executable=None): """Register the COM server class.""" mth = getattr(cls, '_register', None) if mth is not None: mth(self) else: table = [t[:2] for t in self._registry_entries(cls)] table = list(set(table)) table.sort() table.reverse() _debug('Unregister %s', cls) for (hkey, subkey) in table: try: if True: _debug('SHDeleteKey %s\\%s', _explain(hkey), subkey) SHDeleteKey(hkey, subkey) else: _debug('DeleteKey %s\\%s', _explain(hkey), subkey) winreg.DeleteKey(hkey, subkey) except WindowsError as detail: if get_winerror(detail) != 2: raise tlib = getattr(cls, '_reg_typelib_', None) if tlib is not None: try: _debug('UnRegisterTypeLib(%s, %s, %s)', *tlib) UnRegisterTypeLib(*tlib) except WindowsError as detail: if not get_winerror(detail) in (TYPE_E_REGISTRYACCESS, TYPE_E_CANTLOADLIBRARY): raise _debug('Done') table = self._registry_entries(cls) table.sort() _debug('Registering %s', cls) for (hkey, subkey, valuename, value) in table: _debug('[%s\\%s]', _explain(hkey), subkey) _debug('%s="%s"', valuename or '@', value) k = winreg.CreateKey(hkey, subkey) winreg.SetValueEx(k, valuename, None, winreg.REG_SZ, str(value)) tlib = getattr(cls, '_reg_typelib_', None) if tlib is not None: if hasattr(sys, 'frozendllhandle'): dll = self._get_serverdll() _debug('LoadTypeLibEx(%s, REGKIND_REGISTER)', dll) LoadTypeLibEx(dll, REGKIND_REGISTER) else: if executable: path = executable elif hasattr(sys, 'frozen'): path = sys.executable else: path = cls._typelib_path_ _debug('LoadTypeLibEx(%s, REGKIND_REGISTER)', path) LoadTypeLibEx(path, REGKIND_REGISTER) _debug('Done') </DeepExtract>
comtypes
positive
def create_test_bot(name, language): botpath = os.path.join(server_info['repo_path'], 'ants', 'dist', 'sample_bots', language) bot_filename = os.path.join(botpath, name + extension[language]) if not os.path.exists(bot_filename): if not create_starter_bot(name): print('No {0} bot named {1}'.format(language, name)) print(bot_filename) return False else: return True connection = MySQLdb.connect(host=server_info['db_host'], user=server_info['db_username'], passwd=server_info['db_password'], db=server_info['db_name']) cursor = connection.cursor(MySQLdb.cursors.DictCursor) cursor.execute("\n select username\n from user\n where username like '%s%%'\n " % name) bot_id = max([int(row['username'][len(name):]) for row in cursor.fetchall()] or [0]) + 1 cursor.execute("\n insert into user\n values (null,'%s%s','$6$rounds=54321$hQd}`.j1e#X&PuN*$D8.wbEp6vwwLoC27GpiGVOFediuAWaGTQ2MPHD64i/bVGxtj0XNeRJeJRKVgDC/uTh.W2m5YoaoA6To1cJ7ZF/',\n '%s%s@ai-contest.com',1,'7b3f9842775fa9c9d489a3714e857580',0,'Test Account',11,current_timestamp(),0,0);\n " % (name, bot_id, name, bot_id)) user_id = cursor.lastrowid print('user_id: %s' % user_id) cursor.execute('\n insert into submission (user_id, version, status, timestamp, language_id) \n values (%s, 1, 20, current_timestamp(), 0)\n ' % user_id) submission_id = cursor.lastrowid print('submission_id: %s' % submission_id) connection.commit() connection.close() <DeepExtract> bot_dir = os.path.join(server_info['uploads_path'], str(submission_id // 1000), str(submission_id)) </DeepExtract> print(bot_dir) if os.path.exists(bot_dir): os.rmdir(bot_dir) os.makedirs(bot_dir) bot_zip_filename = os.path.join(bot_dir, 'entry.zip') with zipfile.ZipFile(bot_zip_filename, 'w') as bot_zip: bot_zip.write(bot_filename, 'MyBot' + extension[language]) for filename in support[language]: support_filename = os.path.join(botpath, filename) if os.path.exists(support_filename): bot_zip.write(support_filename, filename) else: print('No support file {0}'.format(filename)) return True
def create_test_bot(name, language): botpath = os.path.join(server_info['repo_path'], 'ants', 'dist', 'sample_bots', language) bot_filename = os.path.join(botpath, name + extension[language]) if not os.path.exists(bot_filename): if not create_starter_bot(name): print('No {0} bot named {1}'.format(language, name)) print(bot_filename) return False else: return True connection = MySQLdb.connect(host=server_info['db_host'], user=server_info['db_username'], passwd=server_info['db_password'], db=server_info['db_name']) cursor = connection.cursor(MySQLdb.cursors.DictCursor) cursor.execute("\n select username\n from user\n where username like '%s%%'\n " % name) bot_id = max([int(row['username'][len(name):]) for row in cursor.fetchall()] or [0]) + 1 cursor.execute("\n insert into user\n values (null,'%s%s','$6$rounds=54321$hQd}`.j1e#X&PuN*$D8.wbEp6vwwLoC27GpiGVOFediuAWaGTQ2MPHD64i/bVGxtj0XNeRJeJRKVgDC/uTh.W2m5YoaoA6To1cJ7ZF/',\n '%s%s@ai-contest.com',1,'7b3f9842775fa9c9d489a3714e857580',0,'Test Account',11,current_timestamp(),0,0);\n " % (name, bot_id, name, bot_id)) user_id = cursor.lastrowid print('user_id: %s' % user_id) cursor.execute('\n insert into submission (user_id, version, status, timestamp, language_id) \n values (%s, 1, 20, current_timestamp(), 0)\n ' % user_id) submission_id = cursor.lastrowid print('submission_id: %s' % submission_id) connection.commit() connection.close() bot_dir = os.path.join(server_info['uploads_path'], str(submission_id // 1000), str(submission_id)) print(bot_dir) if os.path.exists(bot_dir): os.rmdir(bot_dir) os.makedirs(bot_dir) bot_zip_filename = os.path.join(bot_dir, 'entry.zip') with zipfile.ZipFile(bot_zip_filename, 'w') as bot_zip: bot_zip.write(bot_filename, 'MyBot' + extension[language]) for filename in support[language]: support_filename = os.path.join(botpath, filename) if os.path.exists(support_filename): bot_zip.write(support_filename, filename) else: print('No support file {0}'.format(filename)) return True
aichallenge
positive
def load_pretrained_imagenet_weights(model): """Load pretrained weights Args: num_layers: 50 for res50 and so on. model: the generalized rcnnn module """ (_, ext) = os.path.splitext(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS) if ext == '.pkl': with open(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp: src_blobs = pickle.load(fp, encoding='latin1') if 'blobs' in src_blobs: src_blobs = src_blobs['blobs'] pretrianed_state_dict = src_blobs else: weights_file = os.path.join(cfg.ROOT_DIR, cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS) <DeepExtract> dst_dict = {} for (k, v) in torch.load(weights_file).items(): toks = k.split('.') if k.startswith('layer'): assert len(toks[0]) == 6 res_id = int(toks[0][5]) + 1 name = '.'.join(['res%d' % res_id] + toks[1:]) dst_dict[name] = v elif k.startswith('fc'): continue else: name = '.'.join(['res1'] + toks) dst_dict[name] = v pretrianed_state_dict = dst_dict </DeepExtract> for (name, mod) in model.named_modules(): if isinstance(mod, mynn.AffineChannel2d): if cfg.FPN.FPN_ON: pretrianed_name = name.split('.', 2)[-1] else: pretrianed_name = name.split('.', 1)[-1] bn_mean = pretrianed_state_dict[pretrianed_name + '.running_mean'] bn_var = pretrianed_state_dict[pretrianed_name + '.running_var'] scale = pretrianed_state_dict[pretrianed_name + '.weight'] bias = pretrianed_state_dict[pretrianed_name + '.bias'] std = torch.sqrt(bn_var + 1e-05) new_scale = scale / std new_bias = bias - bn_mean * scale / std pretrianed_state_dict[pretrianed_name + '.weight'] = new_scale pretrianed_state_dict[pretrianed_name + '.bias'] = new_bias model_state_dict = model.state_dict() pattern = dwh.resnet_weights_name_pattern() (name_mapping, _) = model.detectron_weight_mapping for (k, v) in name_mapping.items(): if isinstance(v, str): if pattern.match(v): if cfg.FPN.FPN_ON: pretrianed_key = k.split('.', 2)[-1] else: pretrianed_key = k.split('.', 1)[-1] if ext == '.pkl': model_state_dict[k].copy_(torch.Tensor(pretrianed_state_dict[v])) else: model_state_dict[k].copy_(pretrianed_state_dict[pretrianed_key])
def load_pretrained_imagenet_weights(model): """Load pretrained weights Args: num_layers: 50 for res50 and so on. model: the generalized rcnnn module """ (_, ext) = os.path.splitext(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS) if ext == '.pkl': with open(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp: src_blobs = pickle.load(fp, encoding='latin1') if 'blobs' in src_blobs: src_blobs = src_blobs['blobs'] pretrianed_state_dict = src_blobs else: weights_file = os.path.join(cfg.ROOT_DIR, cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS) dst_dict = {} for (k, v) in torch.load(weights_file).items(): toks = k.split('.') if k.startswith('layer'): assert len(toks[0]) == 6 res_id = int(toks[0][5]) + 1 name = '.'.join(['res%d' % res_id] + toks[1:]) dst_dict[name] = v elif k.startswith('fc'): continue else: name = '.'.join(['res1'] + toks) dst_dict[name] = v pretrianed_state_dict = dst_dict for (name, mod) in model.named_modules(): if isinstance(mod, mynn.AffineChannel2d): if cfg.FPN.FPN_ON: pretrianed_name = name.split('.', 2)[-1] else: pretrianed_name = name.split('.', 1)[-1] bn_mean = pretrianed_state_dict[pretrianed_name + '.running_mean'] bn_var = pretrianed_state_dict[pretrianed_name + '.running_var'] scale = pretrianed_state_dict[pretrianed_name + '.weight'] bias = pretrianed_state_dict[pretrianed_name + '.bias'] std = torch.sqrt(bn_var + 1e-05) new_scale = scale / std new_bias = bias - bn_mean * scale / std pretrianed_state_dict[pretrianed_name + '.weight'] = new_scale pretrianed_state_dict[pretrianed_name + '.bias'] = new_bias model_state_dict = model.state_dict() pattern = dwh.resnet_weights_name_pattern() (name_mapping, _) = model.detectron_weight_mapping for (k, v) in name_mapping.items(): if isinstance(v, str): if pattern.match(v): if cfg.FPN.FPN_ON: pretrianed_key = k.split('.', 2)[-1] else: pretrianed_key = k.split('.', 1)[-1] if ext == '.pkl': model_state_dict[k].copy_(torch.Tensor(pretrianed_state_dict[v])) else: model_state_dict[k].copy_(pretrianed_state_dict[pretrianed_key])
DIoU-pytorch-detectron
positive
def test_ssd300_forward(): <DeepExtract> import mmcv config = _get_config_module('ssd300_coco.py') model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.train_cfg)) test_cfg = mmcv.Config(copy.deepcopy(config.test_cfg)) (model, train_cfg, test_cfg) = (model, train_cfg, test_cfg) </DeepExtract> model['pretrained'] = None from mmdet.models import build_detector detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg) input_shape = (1, 3, 300, 300) <DeepExtract> (N, C, H, W) = input_shape rng = np.random.RandomState(0) imgs = rng.rand(*input_shape) img_metas = [{'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} for _ in range(N)] gt_bboxes = [] gt_labels = [] for batch_idx in range(N): if num_items is None: num_boxes = rng.randint(1, 10) else: num_boxes = num_items[batch_idx] (cx, cy, bw, bh) = rng.rand(num_boxes, 4).T tl_x = (cx * W - W * bw / 2).clip(0, W) tl_y = (cy * H - H * bh / 2).clip(0, H) br_x = (cx * W + W * bw / 2).clip(0, W) br_y = (cy * H + H * bh / 2).clip(0, H) boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T class_idxs = rng.randint(1, num_classes, size=num_boxes) gt_bboxes.append(torch.FloatTensor(boxes)) gt_labels.append(torch.LongTensor(class_idxs)) mm_inputs = {'imgs': torch.FloatTensor(imgs), 'img_metas': img_metas, 'gt_bboxes': gt_bboxes, 'gt_labels': gt_labels, 'gt_bboxes_ignore': None} mm_inputs = mm_inputs </DeepExtract> imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward(imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for (one_img, one_meta) in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result)
def test_ssd300_forward(): import mmcv config = _get_config_module('ssd300_coco.py') model = copy.deepcopy(config.model) train_cfg = mmcv.Config(copy.deepcopy(config.train_cfg)) test_cfg = mmcv.Config(copy.deepcopy(config.test_cfg)) (model, train_cfg, test_cfg) = (model, train_cfg, test_cfg) model['pretrained'] = None from mmdet.models import build_detector detector = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg) input_shape = (1, 3, 300, 300) (N, C, H, W) = input_shape rng = np.random.RandomState(0) imgs = rng.rand(*input_shape) img_metas = [{'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} for _ in range(N)] gt_bboxes = [] gt_labels = [] for batch_idx in range(N): if num_items is None: num_boxes = rng.randint(1, 10) else: num_boxes = num_items[batch_idx] (cx, cy, bw, bh) = rng.rand(num_boxes, 4).T tl_x = (cx * W - W * bw / 2).clip(0, W) tl_y = (cy * H - H * bh / 2).clip(0, H) br_x = (cx * W + W * bw / 2).clip(0, W) br_y = (cy * H + H * bh / 2).clip(0, H) boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T class_idxs = rng.randint(1, num_classes, size=num_boxes) gt_bboxes.append(torch.FloatTensor(boxes)) gt_labels.append(torch.LongTensor(class_idxs)) mm_inputs = {'imgs': torch.FloatTensor(imgs), 'img_metas': img_metas, 'gt_bboxes': gt_bboxes, 'gt_labels': gt_labels, 'gt_bboxes_ignore': None} mm_inputs = mm_inputs imgs = mm_inputs.pop('imgs') img_metas = mm_inputs.pop('img_metas') gt_bboxes = mm_inputs['gt_bboxes'] gt_labels = mm_inputs['gt_labels'] losses = detector.forward(imgs, img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, return_loss=True) assert isinstance(losses, dict) with torch.no_grad(): img_list = [g[None, :] for g in imgs] batch_results = [] for (one_img, one_meta) in zip(img_list, img_metas): result = detector.forward([one_img], [[one_meta]], return_loss=False) batch_results.append(result)
ATSS-EfficientDet-PyTorch
positive
def _do_dump(args): dbase = database.load_file(args.database, encoding=args.encoding, prune_choices=args.prune, strict=not args.no_strict) if isinstance(dbase, CanDatabase): <DeepExtract> WIDTH = 80 try: (WIDTH, _) = os.get_terminal_size() except: pass print('================================= Messages =================================') print() print(' ' + 72 * '-') for message in dbase.messages: _dump_can_message(message, with_comments=args.with_comments, WIDTH=WIDTH) </DeepExtract> elif isinstance(dbase, DiagnosticsDatabase): <DeepExtract> print('=================================== Dids ===================================') print() print(' ' + 72 * '-') for did in dbase.dids: print() print(f' Name: {did.name}') print(f' Length: {did.length} bytes') print(' Layout:') print() for data in did.datas: print(f' Name: {data.name}') print(f' Start bit: {data.start}') print(f' Length: {data.length}') print() print() print(' ' + 72 * '-') </DeepExtract> else: sys.exit('Unsupported database type.')
def _do_dump(args): dbase = database.load_file(args.database, encoding=args.encoding, prune_choices=args.prune, strict=not args.no_strict) if isinstance(dbase, CanDatabase): WIDTH = 80 try: (WIDTH, _) = os.get_terminal_size() except: pass print('================================= Messages =================================') print() print(' ' + 72 * '-') for message in dbase.messages: _dump_can_message(message, with_comments=args.with_comments, WIDTH=WIDTH) elif isinstance(dbase, DiagnosticsDatabase): print('=================================== Dids ===================================') print() print(' ' + 72 * '-') for did in dbase.dids: print() print(f' Name: {did.name}') print(f' Length: {did.length} bytes') print(' Layout:') print() for data in did.datas: print(f' Name: {data.name}') print(f' Start bit: {data.start}') print(f' Length: {data.length}') print() print() print(' ' + 72 * '-') else: sys.exit('Unsupported database type.')
cantools
positive
def train_data(self, max_seq_len=512, dataset_size=None, epochs=1, mask_gen=None, debug=False, **kwargs): if debug: max_examples = 1000 else: max_examples = None <DeepExtract> with open(os.path.join(self.data_dir, 'train.jsonl')) as fs: data = [json.loads(l) for l in fs] examples = [] for d in data: passage = self.tokenizer.tokenize(d['passage'].strip()) question = self.tokenizer.tokenize(d['question'].strip()) label = None if 'label' not in d else self.label2id(str(d['label']).lower()) examples.append(ExampleInstance(segments=[passage, question], label=label)) def get_stats(l): train = f'Max={max(l)}, min={min(l)}, avg={np.mean(l)}' ctx_token_size = [len(e.segments[0]) for e in examples] q_token_size = [len(e.segments[1]) for e in examples] total_size = [len(e.segments[0]) + len(e.segments[1]) for e in examples] logger.info(f'Context statistics: {get_stats(ctx_token_size)}, long={len([t for t in ctx_token_size if t > 500])}/{len(ctx_token_size)}') logger.info(f'question statistics: {get_stats(q_token_size)}') logger.info(f'Total statistics: {get_stats(total_size)}, long={len([t for t in total_size if t > 500])}') train = examples </DeepExtract> examples = ExampleSet(train) if dataset_size is None: dataset_size = len(examples) * epochs return DynamicDataset(examples, feature_fn=self.get_feature_fn(max_seq_len=max_seq_len, mask_gen=mask_gen), dataset_size=dataset_size, shuffle=True, **kwargs)
def train_data(self, max_seq_len=512, dataset_size=None, epochs=1, mask_gen=None, debug=False, **kwargs): if debug: max_examples = 1000 else: max_examples = None with open(os.path.join(self.data_dir, 'train.jsonl')) as fs: data = [json.loads(l) for l in fs] examples = [] for d in data: passage = self.tokenizer.tokenize(d['passage'].strip()) question = self.tokenizer.tokenize(d['question'].strip()) label = None if 'label' not in d else self.label2id(str(d['label']).lower()) examples.append(ExampleInstance(segments=[passage, question], label=label)) def get_stats(l): train = f'Max={max(l)}, min={min(l)}, avg={np.mean(l)}' ctx_token_size = [len(e.segments[0]) for e in examples] q_token_size = [len(e.segments[1]) for e in examples] total_size = [len(e.segments[0]) + len(e.segments[1]) for e in examples] logger.info(f'Context statistics: {get_stats(ctx_token_size)}, long={len([t for t in ctx_token_size if t > 500])}/{len(ctx_token_size)}') logger.info(f'question statistics: {get_stats(q_token_size)}') logger.info(f'Total statistics: {get_stats(total_size)}, long={len([t for t in total_size if t > 500])}') train = examples examples = ExampleSet(train) if dataset_size is None: dataset_size = len(examples) * epochs return DynamicDataset(examples, feature_fn=self.get_feature_fn(max_seq_len=max_seq_len, mask_gen=mask_gen), dataset_size=dataset_size, shuffle=True, **kwargs)
DeBERTa
positive
def test_worker_forwards_pcm_data_to_vad(self): messages = [{'frontend': self.make_frontend_request('message 1', 'ONLINE', id=1, has_next=True)}] <DeepExtract> self.poller.add_messages(messages) self.worker.run() </DeepExtract> <DeepExtract> self.assertEquals([('message 1', 'resampled message 1')], self.vad.data) </DeepExtract>
def test_worker_forwards_pcm_data_to_vad(self): messages = [{'frontend': self.make_frontend_request('message 1', 'ONLINE', id=1, has_next=True)}] self.poller.add_messages(messages) self.worker.run() self.assertEquals([('message 1', 'resampled message 1')], self.vad.data) </DeepExtract>
cloud-asr
positive
def getJavaScript(self, main, ranges, labels, width, height, ext): """Create the chart javascript""" value = '' 'Trying to return something along the lines of:\n \n function chart(obj) {\n var par = d3.select(".d3-tip #chart")\n var labels = ["x", 2004, 2007, 2010];\n var data0 = ["data0", obj["OVRK2004"], obj["OVRK2007"], obj["OVRK2010"]];\n var data1 = ["data1", obj["INCRK2004"], obj["INCRK2007"], obj["INCRK2010"]];\n \n var chart = c3.generate({\n bindto: par,\n data: {\n x: "x",\n type: "line",\n columns: [\n labels,\n data0,\n data1\n ],\n groups: [\n [\'data0\', \'data1\']\n ]\n names: {\n data0: "Overall",\n data1: "Income"\n }\n },\n size: {\n width: 240,\n height: 240\n },\n axis: {\n y: {\n min: 0,\n max: 32000\n }\n }\n });\n }\n ' template = u' function chart(obj){{\n var par = d3.select("{par}")\n {labels}\n {vars}\n \n var chart = c3.generate({{\n bindto: par,\n data: {{\n {xaxis}\n type: "{chartName}",\n columns: [\n {labelvar}{data}\n ],\n {groups}\n names: {{ {names} }}\n }},\n size: {{\n width: {width},\n height: {height}\n }},\n axis: {{\n y: {{\n min: {min},\n max: {max}\n }}\n }}\n }});\n }}' parentTemplate = u'.d3-tip #chart' if ext == True: parentTemplate = '#extTip #chart' <DeepExtract> lmin = [] lmax = [] rmin = 0 rmax = 0 for r in ranges: for f in r.getFields(): index = main.layer.fieldNameIndex(f) lmin.append(main.layer.minimumValue(index)) lmax.append(main.layer.maximumValue(index)) if len(lmin) > 0: min(lmin) if len(lmax) > 0: if self.stacked == True: rmax = sum(lmax) else: rmax = max(lmax) (min, max) = (rmin, rmax) </DeepExtract> labelPart = u'var labels = ["x", {0}];' labelTemplate = labelPart.format(u','.join(labels)) labelVarTemplate = u'labels,' varPart = u'var data{0} = ["data{0}", {1}];' fieldPart = u'obj["{0}"]' varTemplate = u'' varList = [] xaxisTemplate = u'x: "x",' for l in labels: if len(l) == 0: xaxisTemplate = u'' labelTemplate = u'' labelVarTemplate = u'' break dataPart = u'data{0}' dataTemplate = u'' dataList = [] namesPart = u'data{0}: "{1}" ' namesTemplate = u'' nameList = [] i = 1 for r in ranges: varList.append(varPart.format(str(i), r.getCsvFormattedFields(fieldPart))) dataList.append(dataPart.format(str(i))) nameList.append(namesPart.format(str(i), r.getName())) i += 1 varTemplate = ' '.join(varList) dataTemplate = ', '.join(dataList) namesTemplate = ','.join(nameList) value = template.format(par=parentTemplate, labels=labelTemplate, vars=varTemplate, xaxis=xaxisTemplate, chartName=self.c3Name, groups=self.getStackingScript(ranges), labelvar=labelVarTemplate, data=dataTemplate, names=namesTemplate, width=width, height=height, min=min, max=max) return value
def getJavaScript(self, main, ranges, labels, width, height, ext): """Create the chart javascript""" value = '' 'Trying to return something along the lines of:\n \n function chart(obj) {\n var par = d3.select(".d3-tip #chart")\n var labels = ["x", 2004, 2007, 2010];\n var data0 = ["data0", obj["OVRK2004"], obj["OVRK2007"], obj["OVRK2010"]];\n var data1 = ["data1", obj["INCRK2004"], obj["INCRK2007"], obj["INCRK2010"]];\n \n var chart = c3.generate({\n bindto: par,\n data: {\n x: "x",\n type: "line",\n columns: [\n labels,\n data0,\n data1\n ],\n groups: [\n [\'data0\', \'data1\']\n ]\n names: {\n data0: "Overall",\n data1: "Income"\n }\n },\n size: {\n width: 240,\n height: 240\n },\n axis: {\n y: {\n min: 0,\n max: 32000\n }\n }\n });\n }\n ' template = u' function chart(obj){{\n var par = d3.select("{par}")\n {labels}\n {vars}\n \n var chart = c3.generate({{\n bindto: par,\n data: {{\n {xaxis}\n type: "{chartName}",\n columns: [\n {labelvar}{data}\n ],\n {groups}\n names: {{ {names} }}\n }},\n size: {{\n width: {width},\n height: {height}\n }},\n axis: {{\n y: {{\n min: {min},\n max: {max}\n }}\n }}\n }});\n }}' parentTemplate = u'.d3-tip #chart' if ext == True: parentTemplate = '#extTip #chart' lmin = [] lmax = [] rmin = 0 rmax = 0 for r in ranges: for f in r.getFields(): index = main.layer.fieldNameIndex(f) lmin.append(main.layer.minimumValue(index)) lmax.append(main.layer.maximumValue(index)) if len(lmin) > 0: min(lmin) if len(lmax) > 0: if self.stacked == True: rmax = sum(lmax) else: rmax = max(lmax) (min, max) = (rmin, rmax) labelPart = u'var labels = ["x", {0}];' labelTemplate = labelPart.format(u','.join(labels)) labelVarTemplate = u'labels,' varPart = u'var data{0} = ["data{0}", {1}];' fieldPart = u'obj["{0}"]' varTemplate = u'' varList = [] xaxisTemplate = u'x: "x",' for l in labels: if len(l) == 0: xaxisTemplate = u'' labelTemplate = u'' labelVarTemplate = u'' break dataPart = u'data{0}' dataTemplate = u'' dataList = [] namesPart = u'data{0}: "{1}" ' namesTemplate = u'' nameList = [] i = 1 for r in ranges: varList.append(varPart.format(str(i), r.getCsvFormattedFields(fieldPart))) dataList.append(dataPart.format(str(i))) nameList.append(namesPart.format(str(i), r.getName())) i += 1 varTemplate = ' '.join(varList) dataTemplate = ', '.join(dataList) namesTemplate = ','.join(nameList) value = template.format(par=parentTemplate, labels=labelTemplate, vars=varTemplate, xaxis=xaxisTemplate, chartName=self.c3Name, groups=self.getStackingScript(ranges), labelvar=labelVarTemplate, data=dataTemplate, names=namesTemplate, width=width, height=height, min=min, max=max) return value
d3MapRenderer
positive
def plot_eval_field(self, field, datasets='auto', new_row=False, semilogy=False, legend=False, title=None, y_range=None, dash_patterns=('solid', 'dashed', 'dotted')): y_axis_type = 'auto' if not semilogy else 'log' f = figure(y_axis_type=y_axis_type, background_fill_color='#EAEAF2', background_fill_alpha=0.6, y_range=y_range) assert datasets == 'auto' or isinstance(datasets, list) if datasets == 'auto': datasets = [] for ds_eval in self.eval_dicts.values(): datasets += list(ds_eval.keys()) datasets = set(datasets) for (dataset, dash_pattern) in zip(datasets, dash_patterns): for run_id in self.run_ids: color = self.colors[run_id] eval_df = self.eval_dicts[run_id] if dataset in eval_df: df = eval_df[dataset] if field in eval_df[dataset]: x = df['epoch'].values y = df[field].values run_num = run_id.split('-')[-1] name = f'{run_num}/{dataset}' name = '\n '.join(textwrap.wrap(name, width=20)) if len(x) == 1: f.circle(x, y, color=color, line_dash=dash_pattern, name=name) x = np.concatenate(([0], x)) y = np.concatenate((y, y)) f.line(x, y, line_width=1.0, color=color, line_dash=dash_pattern, legend_label=str(run_num), name=name) if title is not None: f.title.text = title if legend: f.legend.location = 'top_right' f.legend.click_policy = 'hide' f.legend.label_text_font_size = '6pt' else: f.legend.visible = False tool = HoverTool(tooltips=[('x,y', '@x, @y'), ('name', '$name')], line_policy='nearest', point_policy='snap_to_data') f.add_tools(tool) <DeepExtract> if new_row: row = [] self.figures.append(row) else: row = self.figures[-1] row.append(f) </DeepExtract> return f
def plot_eval_field(self, field, datasets='auto', new_row=False, semilogy=False, legend=False, title=None, y_range=None, dash_patterns=('solid', 'dashed', 'dotted')): y_axis_type = 'auto' if not semilogy else 'log' f = figure(y_axis_type=y_axis_type, background_fill_color='#EAEAF2', background_fill_alpha=0.6, y_range=y_range) assert datasets == 'auto' or isinstance(datasets, list) if datasets == 'auto': datasets = [] for ds_eval in self.eval_dicts.values(): datasets += list(ds_eval.keys()) datasets = set(datasets) for (dataset, dash_pattern) in zip(datasets, dash_patterns): for run_id in self.run_ids: color = self.colors[run_id] eval_df = self.eval_dicts[run_id] if dataset in eval_df: df = eval_df[dataset] if field in eval_df[dataset]: x = df['epoch'].values y = df[field].values run_num = run_id.split('-')[-1] name = f'{run_num}/{dataset}' name = '\n '.join(textwrap.wrap(name, width=20)) if len(x) == 1: f.circle(x, y, color=color, line_dash=dash_pattern, name=name) x = np.concatenate(([0], x)) y = np.concatenate((y, y)) f.line(x, y, line_width=1.0, color=color, line_dash=dash_pattern, legend_label=str(run_num), name=name) if title is not None: f.title.text = title if legend: f.legend.location = 'top_right' f.legend.click_policy = 'hide' f.legend.label_text_font_size = '6pt' else: f.legend.visible = False tool = HoverTool(tooltips=[('x,y', '@x, @y'), ('name', '$name')], line_policy='nearest', point_policy='snap_to_data') f.add_tools(tool) if new_row: row = [] self.figures.append(row) else: row = self.figures[-1] row.append(f) return f
cosypose
positive
def test_enable_ttl_wraps_exception(session, dynamodb): class Model(BaseModel): class Meta: ttl = {'column': 'expiry'} id = Column(String, hash_key=True) expiry = Column(Timestamp, dynamo_name='e!!') <DeepExtract> error_response = {'Error': {'Code': 'FooError', 'Message': 'FooMessage'}} operation_name = 'OperationName' dynamodb.update_time_to_live.side_effect = expected = botocore.exceptions.ClientError(error_response, operation_name) </DeepExtract> with pytest.raises(BloopException) as excinfo: session.enable_ttl('LocalTableName', Model) assert excinfo.value.__cause__ is expected
def test_enable_ttl_wraps_exception(session, dynamodb): class Model(BaseModel): class Meta: ttl = {'column': 'expiry'} id = Column(String, hash_key=True) expiry = Column(Timestamp, dynamo_name='e!!') error_response = {'Error': {'Code': 'FooError', 'Message': 'FooMessage'}} operation_name = 'OperationName' dynamodb.update_time_to_live.side_effect = expected = botocore.exceptions.ClientError(error_response, operation_name) with pytest.raises(BloopException) as excinfo: session.enable_ttl('LocalTableName', Model) assert excinfo.value.__cause__ is expected
bloop
positive
def test_get_device_id_returns_none_for_missing_device_info(self): self.test_request_envelope.context.system.device = None <DeepExtract> self.test_request_envelope.request = self.test_launch_request test_input = HandlerInput(request_envelope=self.test_request_envelope) </DeepExtract> self.assertEqual(get_device_id(handler_input=test_input), None, "get_device_id method didn't return None when input request doesn't have device information")
def test_get_device_id_returns_none_for_missing_device_info(self): self.test_request_envelope.context.system.device = None self.test_request_envelope.request = self.test_launch_request test_input = HandlerInput(request_envelope=self.test_request_envelope) self.assertEqual(get_device_id(handler_input=test_input), None, "get_device_id method didn't return None when input request doesn't have device information")
alexa-skills-kit-sdk-for-python
positive
def main(train_loader, test_loader, model): print('\nparsed options:\n{}\n'.format(vars(args))) if args.cuda: model.cuda() <DeepExtract> optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, dampening=0.9, weight_decay=args.wd) optimizer1 = optimizer </DeepExtract> if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch end = start + args.epochs with torch.no_grad(): <DeepExtract> torch.cuda.empty_cache() model.eval() detector = ScaleSpaceAffinePatchExtractor(mrSize=5.192, num_features=3000, border=5, num_Baum_iters=1, AffNet=model) descriptor = HardNet() model_weights = 'HardNet++.pth' hncheckpoint = torch.load(model_weights) descriptor.load_state_dict(hncheckpoint['state_dict']) descriptor.eval() if args.cuda: detector = detector.cuda() descriptor = descriptor.cuda() input_img_fname1 = 'test-graf/img1.png' input_img_fname2 = 'test-graf/img6.png' H_fname = 'test-graf/H1to6p' output_img_fname = 'graf_match.png' img1 = load_grayscale_var(input_img_fname1) img2 = load_grayscale_var(input_img_fname2) H = np.loadtxt(H_fname) H1to2 = Variable(torch.from_numpy(H).float()) SNN_threshold = 0.8 with torch.no_grad(): (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor) torch.cuda.empty_cache() (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/detections1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/detection2_' + str(-1) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(-1)) print('Test on graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers2_' + str(-1) + '.png') print('Now native ori') del LAFs1, descriptors1, LAFs2, descriptors2, dist_matrix, tent_matches_in_2, plain_indxs_in1, tent_matches_in_1, idxs_in_2, mask, min_2nd_dist, idxs_2nd_in_2, min_dist, LAF1s_tent, LAF2s_tent torch.cuda.empty_cache() gc.collect() (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor, False) (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor, False) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/ori_detections1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_detection2_' + str(-1) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(-1)) print('Test on ori graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[idxs_in_2.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers2_' + str(-1) + '.png') return </DeepExtract> for epoch in range(start, end): <DeepExtract> model.train() pbar = tqdm(enumerate(train_loader)) for (batch_idx, data) in pbar: (data_a, data_p) = data if args.cuda: (data_a, data_p) = (data_a.float().cuda(), data_p.float().cuda()) (data_a, data_p) = (Variable(data_a), Variable(data_p)) st = int((data_p.size(2) - model.PS) / 2) fin = st + model.PS ep1 = epoch while str(ep1) not in tilt_schedule.keys(): ep1 -= 1 if ep1 < 0: break max_tilt = tilt_schedule[str(ep1)] (data_a_aff_crop, data_a_aff, rot_LAFs_a, inv_rotmat_a, inv_TA_a) = extract_random_LAF(data_a, math.pi, max_tilt, model.PS) if 'Rot' not in args.arch: (data_p_aff_crop, data_p_aff, rot_LAFs_p, inv_rotmat_p, inv_TA_p) = extract_random_LAF(data_p, rot_LAFs_a, max_tilt, model.PS) else: (data_p_aff_crop, data_p_aff, rot_LAFs_p, inv_rotmat_p, inv_TA_p) = extract_random_LAF(data_p, math.pi, max_tilt, model.PS) if inv_rotmat_p is None: inv_rotmat_p = inv_rotmat_a (out_a_aff, out_p_aff) = (model(data_a_aff_crop, True), model(data_p_aff_crop, True)) out_patches_a_crop = extract_and_crop_patches_by_predicted_transform(data_a_aff, out_a_aff, crop_size=model.PS) out_patches_p_crop = extract_and_crop_patches_by_predicted_transform(data_p_aff, out_p_aff, crop_size=model.PS) desc_a = descriptor(out_patches_a_crop) desc_p = descriptor(out_patches_p_crop) descr_dist = torch.sqrt(((desc_a - desc_p) ** 2).view(data_a.size(0), -1).sum(dim=1) + 1e-06).mean() if args.loss == 'HardNet': loss = loss_HardNet(desc_a, desc_p) elif args.loss == 'HardNegC': loss = loss_HardNegC(desc_a, desc_p) elif args.loss == 'PosDist': loss = descr_dist else: print('Unknown loss function') sys.exit(1) optimizer1.zero_grad() loss.backward() optimizer1.step() adjust_learning_rate(optimizer1) if batch_idx % args.log_interval == 0: pbar.set_description('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f},{:.4f}'.format(epoch, batch_idx * len(data_a), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), float(loss.detach().cpu().numpy()), float(descr_dist.detach().cpu().numpy()))) torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()}, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch)) </DeepExtract> with torch.no_grad(): <DeepExtract> torch.cuda.empty_cache() model.eval() detector = ScaleSpaceAffinePatchExtractor(mrSize=5.192, num_features=3000, border=5, num_Baum_iters=1, AffNet=model) descriptor = HardNet() model_weights = 'HardNet++.pth' hncheckpoint = torch.load(model_weights) descriptor.load_state_dict(hncheckpoint['state_dict']) descriptor.eval() if args.cuda: detector = detector.cuda() descriptor = descriptor.cuda() input_img_fname1 = 'test-graf/img1.png' input_img_fname2 = 'test-graf/img6.png' H_fname = 'test-graf/H1to6p' output_img_fname = 'graf_match.png' img1 = load_grayscale_var(input_img_fname1) img2 = load_grayscale_var(input_img_fname2) H = np.loadtxt(H_fname) H1to2 = Variable(torch.from_numpy(H).float()) SNN_threshold = 0.8 with torch.no_grad(): (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor) torch.cuda.empty_cache() (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/detections1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/detection2_' + str(epoch) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(epoch)) print('Test on graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers2_' + str(epoch) + '.png') print('Now native ori') del LAFs1, descriptors1, LAFs2, descriptors2, dist_matrix, tent_matches_in_2, plain_indxs_in1, tent_matches_in_1, idxs_in_2, mask, min_2nd_dist, idxs_2nd_in_2, min_dist, LAF1s_tent, LAF2s_tent torch.cuda.empty_cache() gc.collect() (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor, False) (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor, False) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/ori_detections1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_detection2_' + str(epoch) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(epoch)) print('Test on ori graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[idxs_in_2.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers2_' + str(epoch) + '.png') return </DeepExtract> return 0
def main(train_loader, test_loader, model): print('\nparsed options:\n{}\n'.format(vars(args))) if args.cuda: model.cuda() optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, dampening=0.9, weight_decay=args.wd) optimizer1 = optimizer if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch end = start + args.epochs with torch.no_grad(): torch.cuda.empty_cache() model.eval() detector = ScaleSpaceAffinePatchExtractor(mrSize=5.192, num_features=3000, border=5, num_Baum_iters=1, AffNet=model) descriptor = HardNet() model_weights = 'HardNet++.pth' hncheckpoint = torch.load(model_weights) descriptor.load_state_dict(hncheckpoint['state_dict']) descriptor.eval() if args.cuda: detector = detector.cuda() descriptor = descriptor.cuda() input_img_fname1 = 'test-graf/img1.png' input_img_fname2 = 'test-graf/img6.png' H_fname = 'test-graf/H1to6p' output_img_fname = 'graf_match.png' img1 = load_grayscale_var(input_img_fname1) img2 = load_grayscale_var(input_img_fname2) H = np.loadtxt(H_fname) H1to2 = Variable(torch.from_numpy(H).float()) SNN_threshold = 0.8 with torch.no_grad(): (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor) torch.cuda.empty_cache() (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/detections1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/detection2_' + str(-1) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(-1)) print('Test on graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers2_' + str(-1) + '.png') print('Now native ori') del LAFs1, descriptors1, LAFs2, descriptors2, dist_matrix, tent_matches_in_2, plain_indxs_in1, tent_matches_in_1, idxs_in_2, mask, min_2nd_dist, idxs_2nd_in_2, min_dist, LAF1s_tent, LAF2s_tent torch.cuda.empty_cache() gc.collect() (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor, False) (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor, False) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/ori_detections1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_detection2_' + str(-1) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(-1)) print('Test on ori graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers1_' + str(-1) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[idxs_in_2.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers2_' + str(-1) + '.png') return for epoch in range(start, end): model.train() pbar = tqdm(enumerate(train_loader)) for (batch_idx, data) in pbar: (data_a, data_p) = data if args.cuda: (data_a, data_p) = (data_a.float().cuda(), data_p.float().cuda()) (data_a, data_p) = (Variable(data_a), Variable(data_p)) st = int((data_p.size(2) - model.PS) / 2) fin = st + model.PS ep1 = epoch while str(ep1) not in tilt_schedule.keys(): ep1 -= 1 if ep1 < 0: break max_tilt = tilt_schedule[str(ep1)] (data_a_aff_crop, data_a_aff, rot_LAFs_a, inv_rotmat_a, inv_TA_a) = extract_random_LAF(data_a, math.pi, max_tilt, model.PS) if 'Rot' not in args.arch: (data_p_aff_crop, data_p_aff, rot_LAFs_p, inv_rotmat_p, inv_TA_p) = extract_random_LAF(data_p, rot_LAFs_a, max_tilt, model.PS) else: (data_p_aff_crop, data_p_aff, rot_LAFs_p, inv_rotmat_p, inv_TA_p) = extract_random_LAF(data_p, math.pi, max_tilt, model.PS) if inv_rotmat_p is None: inv_rotmat_p = inv_rotmat_a (out_a_aff, out_p_aff) = (model(data_a_aff_crop, True), model(data_p_aff_crop, True)) out_patches_a_crop = extract_and_crop_patches_by_predicted_transform(data_a_aff, out_a_aff, crop_size=model.PS) out_patches_p_crop = extract_and_crop_patches_by_predicted_transform(data_p_aff, out_p_aff, crop_size=model.PS) desc_a = descriptor(out_patches_a_crop) desc_p = descriptor(out_patches_p_crop) descr_dist = torch.sqrt(((desc_a - desc_p) ** 2).view(data_a.size(0), -1).sum(dim=1) + 1e-06).mean() if args.loss == 'HardNet': loss = loss_HardNet(desc_a, desc_p) elif args.loss == 'HardNegC': loss = loss_HardNegC(desc_a, desc_p) elif args.loss == 'PosDist': loss = descr_dist else: print('Unknown loss function') sys.exit(1) optimizer1.zero_grad() loss.backward() optimizer1.step() adjust_learning_rate(optimizer1) if batch_idx % args.log_interval == 0: pbar.set_description('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.4f},{:.4f}'.format(epoch, batch_idx * len(data_a), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), float(loss.detach().cpu().numpy()), float(descr_dist.detach().cpu().numpy()))) torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict()}, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch)) with torch.no_grad(): torch.cuda.empty_cache() model.eval() detector = ScaleSpaceAffinePatchExtractor(mrSize=5.192, num_features=3000, border=5, num_Baum_iters=1, AffNet=model) descriptor = HardNet() model_weights = 'HardNet++.pth' hncheckpoint = torch.load(model_weights) descriptor.load_state_dict(hncheckpoint['state_dict']) descriptor.eval() if args.cuda: detector = detector.cuda() descriptor = descriptor.cuda() input_img_fname1 = 'test-graf/img1.png' input_img_fname2 = 'test-graf/img6.png' H_fname = 'test-graf/H1to6p' output_img_fname = 'graf_match.png' img1 = load_grayscale_var(input_img_fname1) img2 = load_grayscale_var(input_img_fname2) H = np.loadtxt(H_fname) H1to2 = Variable(torch.from_numpy(H).float()) SNN_threshold = 0.8 with torch.no_grad(): (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor) torch.cuda.empty_cache() (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/detections1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/detection2_' + str(epoch) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(epoch)) print('Test on graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/inliers2_' + str(epoch) + '.png') print('Now native ori') del LAFs1, descriptors1, LAFs2, descriptors2, dist_matrix, tent_matches_in_2, plain_indxs_in1, tent_matches_in_1, idxs_in_2, mask, min_2nd_dist, idxs_2nd_in_2, min_dist, LAF1s_tent, LAF2s_tent torch.cuda.empty_cache() gc.collect() (LAFs1, descriptors1) = get_geometry_and_descriptors(img1, detector, descriptor, False) (LAFs2, descriptors2) = get_geometry_and_descriptors(img2, detector, descriptor, False) visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show=False, save_to=LOG_DIR + '/ori_detections1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_detection2_' + str(epoch) + '.png') dist_matrix = distance_matrix_vector(descriptors1, descriptors2) (min_dist, idxs_in_2) = torch.min(dist_matrix, 1) dist_matrix[:, idxs_in_2] = 100000 (min_2nd_dist, idxs_2nd_in_2) = torch.min(dist_matrix, 1) mask = min_dist / (min_2nd_dist + 1e-08) <= SNN_threshold tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad=False).cuda()[mask] tent_matches_in_2 = idxs_in_2[mask] tent_matches_in_1 = tent_matches_in_1.long() tent_matches_in_2 = tent_matches_in_2.long() LAF1s_tent = LAFs1[tent_matches_in_1, :, :] LAF2s_tent = LAFs2[tent_matches_in_2, :, :] (min_dist, plain_indxs_in1, idxs_in_2) = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent, H1to2.cuda(), dist_threshold=6) plain_indxs_in1 = plain_indxs_in1.long() inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0)) print('Test epoch', str(epoch)) print('Test on ori graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio') visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers1_' + str(epoch) + '.png') visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[idxs_in_2.long(), :, :].detach().cpu().numpy().squeeze(), 'g', show=False, save_to=LOG_DIR + '/ori_inliers2_' + str(epoch) + '.png') return return 0
affnet
positive
@refetch_frequency(timedelta(minutes=5)) def fetch_production(zone_key: VALID_ZONE_KEYS='FO', session: Optional[Session]=None, target_datetime: Optional[datetime]=None, logger: Logger=getLogger('FO')) -> dict: if target_datetime: raise NotImplementedError('This parser is not yet able to parse past dates') ses = session or Session() url = 'https://www.sev.fo/api/realtimemap/now' response: Response = ses.get(url) obj = response.json() data = {'zoneKey': zone_key, 'capacity': {}, 'production': {'biomass': 0, 'coal': 0, 'gas': 0, 'geothermal': 0, 'nuclear': 0, 'solar': 0, 'unknown': 0}, 'storage': {}, 'source': 'sev.fo'} for (key, value) in obj.items(): if key == 'tiden': data['datetime'] = arrow.get(arrow.get(value).datetime, 'Atlantic/Faroe').datetime elif 'Sum' in key: continue elif 'Test' in key: continue elif 'VnVand' in key: continue elif key.endswith(ZONE_MAP[zone_key]['data_key']): raw_generation_type: str = key.replace(ZONE_MAP[zone_key]['data_key'], '') <DeepExtract> generation_type = MAP_GENERATION.get(raw_generation_type, None) </DeepExtract> if not generation_type: raise ParserException('FO.py', f'Unknown generation type: {raw_generation_type}', zone_key) value = float(value.replace(',', '.')) data['production'][generation_type] = data['production'].get(generation_type, 0) + value else: pass data = validate(data, logger, required=ZONE_MAP[zone_key]['validation']['required'], floor=ZONE_MAP[zone_key]['validation']['floor']) if isinstance(data, dict): return data else: raise ParserException('FO.py', f'No valid data was returned for {zone_key}', zone_key)
@refetch_frequency(timedelta(minutes=5)) def fetch_production(zone_key: VALID_ZONE_KEYS='FO', session: Optional[Session]=None, target_datetime: Optional[datetime]=None, logger: Logger=getLogger('FO')) -> dict: if target_datetime: raise NotImplementedError('This parser is not yet able to parse past dates') ses = session or Session() url = 'https://www.sev.fo/api/realtimemap/now' response: Response = ses.get(url) obj = response.json() data = {'zoneKey': zone_key, 'capacity': {}, 'production': {'biomass': 0, 'coal': 0, 'gas': 0, 'geothermal': 0, 'nuclear': 0, 'solar': 0, 'unknown': 0}, 'storage': {}, 'source': 'sev.fo'} for (key, value) in obj.items(): if key == 'tiden': data['datetime'] = arrow.get(arrow.get(value).datetime, 'Atlantic/Faroe').datetime elif 'Sum' in key: continue elif 'Test' in key: continue elif 'VnVand' in key: continue elif key.endswith(ZONE_MAP[zone_key]['data_key']): raw_generation_type: str = key.replace(ZONE_MAP[zone_key]['data_key'], '') generation_type = MAP_GENERATION.get(raw_generation_type, None) if not generation_type: raise ParserException('FO.py', f'Unknown generation type: {raw_generation_type}', zone_key) value = float(value.replace(',', '.')) data['production'][generation_type] = data['production'].get(generation_type, 0) + value else: pass data = validate(data, logger, required=ZONE_MAP[zone_key]['validation']['required'], floor=ZONE_MAP[zone_key]['validation']['floor']) if isinstance(data, dict): return data else: raise ParserException('FO.py', f'No valid data was returned for {zone_key}', zone_key)
electricitymap-contrib
positive
def delete_backup(config, backup_names, all_nodes): monitoring = Monitoring(config=config.monitoring) try: storage = Storage(config=config.storage) cluster_backups = storage.list_cluster_backups() <DeepExtract> backups_to_purge = list() cluster_backups_by_name = {bk.name: bk for bk in cluster_backups} for backup_name in backup_names: if backup_name in cluster_backups_by_name: backups_to_purge.extend(cluster_backups_by_name[backup_name].node_backups.values()) else: raise KeyError('The backup {} does not exist'.format(backup_name)) if not all_nodes: backups_to_purge = [nb for nb in backups_to_purge if storage.config.fqdn == nb.fqdn] backups_to_purge = backups_to_purge </DeepExtract> logging.info('Deleting Backup(s) {}...'.format(','.join(backup_names))) <DeepExtract> logging.info('{} backups are candidate to be purged'.format(len(backups_to_purge))) fqdns = set() nb_objects_purged = 0 total_purged_size = 0 total_objects_within_grace = 0 for backup in backups_to_purge: (purged_objects, purged_size) = purge_backup(storage, backup) nb_objects_purged += purged_objects total_purged_size += purged_size fqdns.add(backup.fqdn) if len(fqdns) == 0: fqdns.add(storage.config.fqdn) for fqdn in fqdns: (cleaned_objects_count, cleaned_objects_size, nb_objects_within_grace) = cleanup_obsolete_files(storage, fqdn, config.storage.backup_grace_period_in_days) nb_objects_purged += cleaned_objects_count total_purged_size += cleaned_objects_size total_objects_within_grace += nb_objects_within_grace logging.info('Purged {} objects with a total size of {}'.format(nb_objects_purged, format_bytes_str(total_purged_size))) if total_objects_within_grace > 0: logging.info('{} objects within {} days grace period were not deleted'.format(total_objects_within_grace, config.storage.backup_grace_period_in_days)) return (nb_objects_purged, total_purged_size, total_objects_within_grace) </DeepExtract> logging.debug('Emitting metrics') tags = ['medusa-node-backup', 'delete-error', 'DELETE-ERROR'] monitoring.send(tags, 0) except Exception as e: tags = ['medusa-node-backup', 'delete-error', 'DELETE-ERROR'] monitoring.send(tags, 1) medusa.utils.handle_exception(e, 'This error happened during the delete of backup(s) "{}": {}'.format(','.join(backup_names), str(e)), config)
def delete_backup(config, backup_names, all_nodes): monitoring = Monitoring(config=config.monitoring) try: storage = Storage(config=config.storage) cluster_backups = storage.list_cluster_backups() backups_to_purge = list() cluster_backups_by_name = {bk.name: bk for bk in cluster_backups} for backup_name in backup_names: if backup_name in cluster_backups_by_name: backups_to_purge.extend(cluster_backups_by_name[backup_name].node_backups.values()) else: raise KeyError('The backup {} does not exist'.format(backup_name)) if not all_nodes: backups_to_purge = [nb for nb in backups_to_purge if storage.config.fqdn == nb.fqdn] backups_to_purge = backups_to_purge logging.info('Deleting Backup(s) {}...'.format(','.join(backup_names))) logging.info('{} backups are candidate to be purged'.format(len(backups_to_purge))) fqdns = set() nb_objects_purged = 0 total_purged_size = 0 total_objects_within_grace = 0 for backup in backups_to_purge: (purged_objects, purged_size) = purge_backup(storage, backup) nb_objects_purged += purged_objects total_purged_size += purged_size fqdns.add(backup.fqdn) if len(fqdns) == 0: fqdns.add(storage.config.fqdn) for fqdn in fqdns: (cleaned_objects_count, cleaned_objects_size, nb_objects_within_grace) = cleanup_obsolete_files(storage, fqdn, config.storage.backup_grace_period_in_days) nb_objects_purged += cleaned_objects_count total_purged_size += cleaned_objects_size total_objects_within_grace += nb_objects_within_grace logging.info('Purged {} objects with a total size of {}'.format(nb_objects_purged, format_bytes_str(total_purged_size))) if total_objects_within_grace > 0: logging.info('{} objects within {} days grace period were not deleted'.format(total_objects_within_grace, config.storage.backup_grace_period_in_days)) return (nb_objects_purged, total_purged_size, total_objects_within_grace) logging.debug('Emitting metrics') tags = ['medusa-node-backup', 'delete-error', 'DELETE-ERROR'] monitoring.send(tags, 0) except Exception as e: tags = ['medusa-node-backup', 'delete-error', 'DELETE-ERROR'] monitoring.send(tags, 1) medusa.utils.handle_exception(e, 'This error happened during the delete of backup(s) "{}": {}'.format(','.join(backup_names), str(e)), config)
cassandra-medusa
positive
def test_assignment(self): entry_point = variable_assign_value() node = entry_point.get_children()[1] <DeepExtract> xml_str = self.set_namespace(BlocklyXmlBuilder().build(node)) </DeepExtract> parsed = BlocklyXmlParser().parse(xml_str) self.assertIsInstance(parsed, list) self.assertEqual(1, len(parsed)) root = parsed[0] assignment = root['data'] self.assertIsInstance(assignment, dict) self.assertEqual(get_content_type_id(Assignment), assignment['content_type']) children = root['children'] self.assertIsInstance(children, list) self.assertEqual(2, len(children)) (variable, constant) = children self.assertIsInstance(variable, dict) variable_data = variable['data'] self.assertEqual(get_content_type_id(Variable), variable_data['content_type']) self.assertEqual('A', variable_data['name']) self.assertIsInstance(constant, dict) constant_data = constant['data'] self.assertEqual(get_content_type_id(NumberConstant), constant_data['content_type']) self.assertEqual(1, constant_data['value'])
def test_assignment(self): entry_point = variable_assign_value() node = entry_point.get_children()[1] xml_str = self.set_namespace(BlocklyXmlBuilder().build(node)) parsed = BlocklyXmlParser().parse(xml_str) self.assertIsInstance(parsed, list) self.assertEqual(1, len(parsed)) root = parsed[0] assignment = root['data'] self.assertIsInstance(assignment, dict) self.assertEqual(get_content_type_id(Assignment), assignment['content_type']) children = root['children'] self.assertIsInstance(children, list) self.assertEqual(2, len(children)) (variable, constant) = children self.assertIsInstance(variable, dict) variable_data = variable['data'] self.assertEqual(get_content_type_id(Variable), variable_data['content_type']) self.assertEqual('A', variable_data['name']) self.assertIsInstance(constant, dict) constant_data = constant['data'] self.assertEqual(get_content_type_id(NumberConstant), constant_data['content_type']) self.assertEqual(1, constant_data['value'])
django-business-logic
positive
def get_value(self, player): if player.id not in self.values: <DeepExtract> v = player for prop in self.properies: if isinstance(prop, str): a = getattr(v, prop) if callable(a): v = a() else: v = a if callable(prop): v = prop(v) self.values[player.id] = v </DeepExtract> return self.values[player.id]
def get_value(self, player): if player.id not in self.values: v = player for prop in self.properies: if isinstance(prop, str): a = getattr(v, prop) if callable(a): v = a() else: v = a if callable(prop): v = prop(v) self.values[player.id] = v return self.values[player.id]
aligulac
positive
def _connect_with_pooling(keywords): def get_pooled_db(): from DBUtils import PooledDB if PooledDB.__version__.split('.') < '0.9.3'.split('.'): return PooledDB.PooledDB(dbapi=self.db_module, **keywords) else: return PooledDB.PooledDB(creator=self.db_module, **keywords) if getattr(self, '_pooleddb', None) is None: <DeepExtract> from DBUtils import PooledDB if PooledDB.__version__.split('.') < '0.9.3'.split('.'): self._pooleddb = PooledDB.PooledDB(dbapi=self.db_module, **keywords) else: self._pooleddb = PooledDB.PooledDB(creator=self.db_module, **keywords) </DeepExtract> return self._pooleddb.connection()
def _connect_with_pooling(keywords): def get_pooled_db(): from DBUtils import PooledDB if PooledDB.__version__.split('.') < '0.9.3'.split('.'): return PooledDB.PooledDB(dbapi=self.db_module, **keywords) else: return PooledDB.PooledDB(creator=self.db_module, **keywords) if getattr(self, '_pooleddb', None) is None: from DBUtils import PooledDB if PooledDB.__version__.split('.') < '0.9.3'.split('.'): self._pooleddb = PooledDB.PooledDB(dbapi=self.db_module, **keywords) else: self._pooleddb = PooledDB.PooledDB(creator=self.db_module, **keywords) return self._pooleddb.connection()
cosa-nostra
positive
def set_plugin_path(self, path: str) -> None: """Use this method to set the path for Chepy plugins. Args: path (str): Path to plugins directory Returns: None """ <DeepExtract> expand_path = Path(path).expanduser().absolute() </DeepExtract> if expand_path.exists(): conf_path = Path().home() / '.chepy' / 'chepy.conf' c = ConfigParser() c.read(conf_path) c.set('Plugins', 'pluginpath', str(expand_path)) with open(conf_path, 'w') as f: c.write(f) <DeepExtract> logging.info(blue(green('Plugin path has been set. Restart for changes.'))) return None </DeepExtract> sys.exit() return None else: raise AttributeError('The path does not exist')
def set_plugin_path(self, path: str) -> None: """Use this method to set the path for Chepy plugins. Args: path (str): Path to plugins directory Returns: None """ expand_path = Path(path).expanduser().absolute() if expand_path.exists(): conf_path = Path().home() / '.chepy' / 'chepy.conf' c = ConfigParser() c.read(conf_path) c.set('Plugins', 'pluginpath', str(expand_path)) with open(conf_path, 'w') as f: c.write(f) logging.info(blue(green('Plugin path has been set. Restart for changes.'))) return None sys.exit() return None else: raise AttributeError('The path does not exist')
chepy
positive
@classmethod def from_pretrained(cls, model_name, num_classes=1000, in_channels=3): model = cls.from_name(model_name, override_params={'num_classes': num_classes}) <DeepExtract> state_dict = model_zoo.load_url(url_map[model_name]) if num_classes == 1000: model.load_state_dict(state_dict) else: state_dict.pop('_fc.weight') state_dict.pop('_fc.bias') res = model.load_state_dict(state_dict, strict=False) assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights' print('Loaded pretrained weights for {}'.format(model_name)) </DeepExtract> if in_channels != 3: <DeepExtract> if model._global_params.image_size is None: Conv2d = Conv2dDynamicSamePadding else: Conv2d = partial(Conv2dStaticSamePadding, image_size=model._global_params.image_size) </DeepExtract> <DeepExtract> multiplier = model._global_params.width_coefficient if not multiplier: out_channels = 32 divisor = model._global_params.depth_divisor min_depth = model._global_params.min_depth 32 *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(32 + divisor / 2) // divisor * divisor) if new_filters < 0.9 * 32: new_filters += divisor out_channels = int(new_filters) </DeepExtract> model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) return model
@classmethod def from_pretrained(cls, model_name, num_classes=1000, in_channels=3): model = cls.from_name(model_name, override_params={'num_classes': num_classes}) state_dict = model_zoo.load_url(url_map[model_name]) if num_classes == 1000: model.load_state_dict(state_dict) else: state_dict.pop('_fc.weight') state_dict.pop('_fc.bias') res = model.load_state_dict(state_dict, strict=False) assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights' print('Loaded pretrained weights for {}'.format(model_name)) if in_channels != 3: if model._global_params.image_size is None: Conv2d = Conv2dDynamicSamePadding else: Conv2d = partial(Conv2dStaticSamePadding, image_size=model._global_params.image_size) multiplier = model._global_params.width_coefficient if not multiplier: out_channels = 32 divisor = model._global_params.depth_divisor min_depth = model._global_params.min_depth 32 *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(32 + divisor / 2) // divisor * divisor) if new_filters < 0.9 * 32: new_filters += divisor out_channels = int(new_filters) model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) return model
EfficientDet-bifpn
positive
def count_and_scan_documents(self, index, bool_clause=None, sort_clause=None, query_fields=None, search_query=None, model_settings=None): """ Count the number of document and fetch them from Elasticsearch :param index: on which index the request must be done :param bool_clause: boolean condition :param sort_clause: request to sort result :param query_fields: the query field :param search_query: the search query :param model_settings: part of the configuration linked to the model :return: the number of document and a generator/list of all documents """ <DeepExtract> if model_settings is None: timestamp_field = self.settings.config.get('general', 'timestamp_field', fallback=DEFAULT_TIMESTAMP_FIELD) history_window_days = self.settings.config.getint('general', 'history_window_days') history_window_hours = self.settings.config.getint('general', 'history_window_hours') else: timestamp_field = model_settings['timestamp_field'] history_window_days = model_settings['history_window_days'] history_window_hours = model_settings['history_window_hours'] (timestamp_field, history_window_days, history_window_hours) = (timestamp_field, history_window_days, history_window_hours) </DeepExtract> <DeepExtract> if start_time is None or end_time is None: start_time = dt.datetime.now() - dt.timedelta(days=history_window_days, hours=history_window_hours) end_time = dt.datetime.now() start_time_iso = start_time.isoformat() end_time_iso = end_time.isoformat() time_filter = {'range': {str(timestamp_field): {'gte': start_time_iso, 'lte': end_time_iso}}} search_range = time_filter </DeepExtract> <DeepExtract> res = self.conn.count(index=index, body=build_search_query(bool_clause=bool_clause, search_range=search_range, query_fields=query_fields, search_query=search_query)) total_events = res['count'] </DeepExtract> if total_events > 0: return (total_events, self._scan(index, search_range, bool_clause, sort_clause, query_fields, search_query, model_settings)) return (total_events, [])
def count_and_scan_documents(self, index, bool_clause=None, sort_clause=None, query_fields=None, search_query=None, model_settings=None): """ Count the number of document and fetch them from Elasticsearch :param index: on which index the request must be done :param bool_clause: boolean condition :param sort_clause: request to sort result :param query_fields: the query field :param search_query: the search query :param model_settings: part of the configuration linked to the model :return: the number of document and a generator/list of all documents """ if model_settings is None: timestamp_field = self.settings.config.get('general', 'timestamp_field', fallback=DEFAULT_TIMESTAMP_FIELD) history_window_days = self.settings.config.getint('general', 'history_window_days') history_window_hours = self.settings.config.getint('general', 'history_window_hours') else: timestamp_field = model_settings['timestamp_field'] history_window_days = model_settings['history_window_days'] history_window_hours = model_settings['history_window_hours'] (timestamp_field, history_window_days, history_window_hours) = (timestamp_field, history_window_days, history_window_hours) if start_time is None or end_time is None: start_time = dt.datetime.now() - dt.timedelta(days=history_window_days, hours=history_window_hours) end_time = dt.datetime.now() start_time_iso = start_time.isoformat() end_time_iso = end_time.isoformat() time_filter = {'range': {str(timestamp_field): {'gte': start_time_iso, 'lte': end_time_iso}}} search_range = time_filter res = self.conn.count(index=index, body=build_search_query(bool_clause=bool_clause, search_range=search_range, query_fields=query_fields, search_query=search_query)) total_events = res['count'] if total_events > 0: return (total_events, self._scan(index, search_range, bool_clause, sort_clause, query_fields, search_query, model_settings)) return (total_events, [])
ee-outliers
positive
def vis_imputed(): """ Visualise variables post-imputation """ out_dir = paths.root + '/visualisation/variables/5_imputed/' fname = paths.root + '/5_imputed/fmat_endpoints_imputed_170308.h5' df = pd.read_hdf(fname, mode='r') variable_ids = df.columns for varid in variable_ids: if varid in {'PatientID', 'RelDatetime', 'AbsDatetime', 'event1', 'event2', 'event3', 'maybe_event1', 'maybe_event2', 'maybe_event3', 'probably_not_event1', 'probably_not_event2', 'probably_not_event3'}: continue var_df = df[['PatientID', 'RelDatetime', varid]] <DeepExtract> assert 'PatientID' in var_df.columns assert varid in var_df.columns try: varname = id2string[varid] print('Plotting', varname, '(' + varid + ')') except KeyError: print('WARNING: no string recorded for id', varid) varname = varid if np.sum(np.isfinite(var_df[[varid]].values)) < 2: print('Variable', varname, '(' + varid + ') is (almost) entirely nan. Skipping.') return False var_df = var_df.dropna() identifier = out_dir + varid + '_' + re.sub('/', '', re.sub(' ', '_', varname)) var_hist(var_df[[varid]].values, varname, varid, identifier) var_hist_bypatient(var_df[['PatientID', varid]], varname, varid, identifier) return True </DeepExtract> return True
def vis_imputed(): """ Visualise variables post-imputation """ out_dir = paths.root + '/visualisation/variables/5_imputed/' fname = paths.root + '/5_imputed/fmat_endpoints_imputed_170308.h5' df = pd.read_hdf(fname, mode='r') variable_ids = df.columns for varid in variable_ids: if varid in {'PatientID', 'RelDatetime', 'AbsDatetime', 'event1', 'event2', 'event3', 'maybe_event1', 'maybe_event2', 'maybe_event3', 'probably_not_event1', 'probably_not_event2', 'probably_not_event3'}: continue var_df = df[['PatientID', 'RelDatetime', varid]] assert 'PatientID' in var_df.columns assert varid in var_df.columns try: varname = id2string[varid] print('Plotting', varname, '(' + varid + ')') except KeyError: print('WARNING: no string recorded for id', varid) varname = varid if np.sum(np.isfinite(var_df[[varid]].values)) < 2: print('Variable', varname, '(' + varid + ') is (almost) entirely nan. Skipping.') return False var_df = var_df.dropna() identifier = out_dir + varid + '_' + re.sub('/', '', re.sub(' ', '_', varname)) var_hist(var_df[[varid]].values, varname, varid, identifier) var_hist_bypatient(var_df[['PatientID', varid]], varname, varid, identifier) return True return True
circEWS
positive
def min_index_util(self, r, vset): vset.add(r) tmp = r for c in self.nodes[r].children: if c not in vset: <DeepExtract> vset.add(c) tmp = c for c in self.nodes[c].children: if c not in vset: mc = self.min_index_util(c, vset) if mc < tmp: tmp = mc mc = tmp </DeepExtract> if mc < tmp: tmp = mc return tmp
def min_index_util(self, r, vset): vset.add(r) tmp = r for c in self.nodes[r].children: if c not in vset: vset.add(c) tmp = c for c in self.nodes[c].children: if c not in vset: mc = self.min_index_util(c, vset) if mc < tmp: tmp = mc mc = tmp if mc < tmp: tmp = mc return tmp
camr
positive
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None): data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): <DeepExtract> if k not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % k) if isinstance(v, collections.Callable): self.hooks[k].append(v) elif hasattr(v, '__iter__'): self.hooks[k].extend((h for h in v if isinstance(h, collections.Callable))) </DeepExtract> self.method = method self.url = url self.headers = headers self.files = files self.data = data self.params = params self.auth = auth self.cookies = cookies
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None): data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): if k not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % k) if isinstance(v, collections.Callable): self.hooks[k].append(v) elif hasattr(v, '__iter__'): self.hooks[k].extend((h for h in v if isinstance(h, collections.Callable))) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.params = params self.auth = auth self.cookies = cookies
crunchy-xml-decoder
positive
def execute(self, context): ed = bpy.context.scene.mbdyn.elems nd = bpy.context.scene.mbdyn.nodes try: elem = ed['total_joint_' + str(self.int_label)] <DeepExtract> mbs = context.scene.mbdyn nd = mbs.nodes if any((obj == elem.blender_object for obj in context.scene.objects.keys())): retval = {'OBJECT_EXISTS'} try: n1 = nd['node_' + str(elem.nodes[0].int_label)].blender_object except KeyError: retval = {'NODE1_NOTFOUND'} try: n2 = nd['node_' + str(elem.nodes[1].int_label)].blender_object except KeyError: retval = {'NODE2_NOTFOUND'} n1OBJ = bpy.data.objects[n1] n2OBJ = bpy.data.objects[n2] q1 = n1OBJ.rotation_quaternion q2 = n2OBJ.rotation_quaternion R1 = n1OBJ.rotation_quaternion.to_matrix() R2 = n2OBJ.rotation_quaternion.to_matrix() fP1 = elem.offsets[0].value fP2 = elem.offsets[1].value try: set_active_collection('joints') elcol = bpy.data.collections.new(name=elem.name) bpy.data.collections['joints'].children.link(elcol) set_active_collection(elcol.name) lib_path = os.path.join(mbs.addon_path, 'library', 'joints.blend', 'Object') bpy.ops.wm.append(directory=lib_path, filename='total') totjOBJ = bpy.context.selected_objects[0] totjOBJ.name = elem.name totjOBJ.location = n1OBJ.location + R1 @ Vector((fP1[0], fP1[1], fP1[2])) totjOBJ.rotation_mode = 'QUATERNION' totjOBJ.rotation_quaternion = Quaternion(elem.rotoffsets[0].value[0:]) @ q1 OBJs = list() OBJs.append(totjOBJ) pos = ['total.disp.x', 'total.disp.y', 'total.disp.z'] for kk in range(3): if not elem.offsets[2].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=pos[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = n1OBJ.location + R1 @ Vector((fP1[0], fP1[1], fP1[2])) obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[0].value[0:]) @ q1 rot = ['total.rot.x', 'total.rot.y', 'total.rot.z'] for kk in range(3): if not elem.offsets[4].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=rot[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = n1OBJ.location + R1 @ Vector(elem.offsets[0].value[0:]) obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[1].value[0:]) @ q1 ctx = context.copy() ctx['active_object'] = OBJs[0] ctx['selected_editable_objects'] = OBJs bpy.ops.object.join(ctx) s = 0.5 / sqrt(3.0) * (n1OBJ.scale.magnitude + n2OBJ.scale.magnitude) totjOBJ.scale = Vector((s, s, s)) RF1p = bpy.data.objects.new(totjOBJ.name + '_RF1_pos', None) RF1p.location = totjOBJ.location RF1p.empty_display_type = 'ARROWS' RF1p.rotation_mode = 'QUATERNION' RF1p.rotation_quaternion = Quaternion(elem.rotoffsets[0].value[0:]) @ q1 RF1r = bpy.data.objects.new(totjOBJ.name + '_RF1_rot', None) RF1r.location = totjOBJ.location RF1r.empty_display_type = 'ARROWS' RF1r.rotation_mode = 'QUATERNION' RF1r.rotation_quaternion = Quaternion(elem.rotoffsets[1].value[0:]) @ q1 RF2p = bpy.data.objects.new(totjOBJ.name + '_RF2_pos', None) RF2p.location = totjOBJ.location RF2p.empty_display_type = 'ARROWS' RF2p.rotation_mode = 'QUATERNION' RF2p.rotation_quaternion = Quaternion(elem.rotoffsets[2].value[0:]) @ q2 RF2r = bpy.data.objects.new(totjOBJ.name + '_RF2_rot', None) RF2r.location = totjOBJ.location RF2r.empty_display_type = 'ARROWS' RF2r.rotation_mode = 'QUATERNION' RF2r.rotation_quaternion = Quaternion(elem.rotoffsets[3].value[0:]) @ q2 elem.blender_object = totjOBJ.name totjOBJ.mbdyn.dkey = elem.name totjOBJ.mbdyn.type = 'element' parenting(totjOBJ, n1OBJ) parenting(RF1p, n1OBJ) parenting(RF1r, n1OBJ) parenting(RF2p, n2OBJ) parenting(RF2r, n2OBJ) elcol.objects.link(RF1p) elcol.objects.link(RF1r) elcol.objects.link(RF2p) elcol.objects.link(RF2r) RF1p.hide_set(state=True) RF1r.hide_set(state=True) RF2p.hide_set(state=True) RF2r.hide_set(state=True) elcol.objects.link(n1OBJ) elcol.objects.link(n2OBJ) set_active_collection('Master Collection') retval = {'FINISHED'} except FileNotFoundError: retval = {'LIBRARY_ERROR'} except KeyError: retval = {'COLLECTION_ERROR'} </DeepExtract> if retval == {'OBJECT_EXISTS'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'NODE1_NOTFOUND'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'NODE2_NOTFOUND'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'COLLECTION_ERROR'}: eldbmsf(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'LIBRARY_ERROR'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'FINISHED'}: eldbmsg({'IMPORT_SUCCESS'}, type(self).__name__ + '::execute()', elem) return retval else: return retval except KeyError: eldbmsg({'DICT_ERROR'}, type(self).__name__ + '::execute()', elem) return {'CANCELLED'}
def execute(self, context): ed = bpy.context.scene.mbdyn.elems nd = bpy.context.scene.mbdyn.nodes try: elem = ed['total_joint_' + str(self.int_label)] mbs = context.scene.mbdyn nd = mbs.nodes if any((obj == elem.blender_object for obj in context.scene.objects.keys())): retval = {'OBJECT_EXISTS'} try: n1 = nd['node_' + str(elem.nodes[0].int_label)].blender_object except KeyError: retval = {'NODE1_NOTFOUND'} try: n2 = nd['node_' + str(elem.nodes[1].int_label)].blender_object except KeyError: retval = {'NODE2_NOTFOUND'} n1OBJ = bpy.data.objects[n1] n2OBJ = bpy.data.objects[n2] q1 = n1OBJ.rotation_quaternion q2 = n2OBJ.rotation_quaternion R1 = n1OBJ.rotation_quaternion.to_matrix() R2 = n2OBJ.rotation_quaternion.to_matrix() fP1 = elem.offsets[0].value fP2 = elem.offsets[1].value try: set_active_collection('joints') elcol = bpy.data.collections.new(name=elem.name) bpy.data.collections['joints'].children.link(elcol) set_active_collection(elcol.name) lib_path = os.path.join(mbs.addon_path, 'library', 'joints.blend', 'Object') bpy.ops.wm.append(directory=lib_path, filename='total') totjOBJ = bpy.context.selected_objects[0] totjOBJ.name = elem.name totjOBJ.location = n1OBJ.location + R1 @ Vector((fP1[0], fP1[1], fP1[2])) totjOBJ.rotation_mode = 'QUATERNION' totjOBJ.rotation_quaternion = Quaternion(elem.rotoffsets[0].value[0:]) @ q1 OBJs = list() OBJs.append(totjOBJ) pos = ['total.disp.x', 'total.disp.y', 'total.disp.z'] for kk in range(3): if not elem.offsets[2].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=pos[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = n1OBJ.location + R1 @ Vector((fP1[0], fP1[1], fP1[2])) obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[0].value[0:]) @ q1 rot = ['total.rot.x', 'total.rot.y', 'total.rot.z'] for kk in range(3): if not elem.offsets[4].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=rot[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = n1OBJ.location + R1 @ Vector(elem.offsets[0].value[0:]) obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[1].value[0:]) @ q1 ctx = context.copy() ctx['active_object'] = OBJs[0] ctx['selected_editable_objects'] = OBJs bpy.ops.object.join(ctx) s = 0.5 / sqrt(3.0) * (n1OBJ.scale.magnitude + n2OBJ.scale.magnitude) totjOBJ.scale = Vector((s, s, s)) RF1p = bpy.data.objects.new(totjOBJ.name + '_RF1_pos', None) RF1p.location = totjOBJ.location RF1p.empty_display_type = 'ARROWS' RF1p.rotation_mode = 'QUATERNION' RF1p.rotation_quaternion = Quaternion(elem.rotoffsets[0].value[0:]) @ q1 RF1r = bpy.data.objects.new(totjOBJ.name + '_RF1_rot', None) RF1r.location = totjOBJ.location RF1r.empty_display_type = 'ARROWS' RF1r.rotation_mode = 'QUATERNION' RF1r.rotation_quaternion = Quaternion(elem.rotoffsets[1].value[0:]) @ q1 RF2p = bpy.data.objects.new(totjOBJ.name + '_RF2_pos', None) RF2p.location = totjOBJ.location RF2p.empty_display_type = 'ARROWS' RF2p.rotation_mode = 'QUATERNION' RF2p.rotation_quaternion = Quaternion(elem.rotoffsets[2].value[0:]) @ q2 RF2r = bpy.data.objects.new(totjOBJ.name + '_RF2_rot', None) RF2r.location = totjOBJ.location RF2r.empty_display_type = 'ARROWS' RF2r.rotation_mode = 'QUATERNION' RF2r.rotation_quaternion = Quaternion(elem.rotoffsets[3].value[0:]) @ q2 elem.blender_object = totjOBJ.name totjOBJ.mbdyn.dkey = elem.name totjOBJ.mbdyn.type = 'element' parenting(totjOBJ, n1OBJ) parenting(RF1p, n1OBJ) parenting(RF1r, n1OBJ) parenting(RF2p, n2OBJ) parenting(RF2r, n2OBJ) elcol.objects.link(RF1p) elcol.objects.link(RF1r) elcol.objects.link(RF2p) elcol.objects.link(RF2r) RF1p.hide_set(state=True) RF1r.hide_set(state=True) RF2p.hide_set(state=True) RF2r.hide_set(state=True) elcol.objects.link(n1OBJ) elcol.objects.link(n2OBJ) set_active_collection('Master Collection') retval = {'FINISHED'} except FileNotFoundError: retval = {'LIBRARY_ERROR'} except KeyError: retval = {'COLLECTION_ERROR'} if retval == {'OBJECT_EXISTS'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'NODE1_NOTFOUND'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'NODE2_NOTFOUND'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'COLLECTION_ERROR'}: eldbmsf(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'LIBRARY_ERROR'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'FINISHED'}: eldbmsg({'IMPORT_SUCCESS'}, type(self).__name__ + '::execute()', elem) return retval else: return retval except KeyError: eldbmsg({'DICT_ERROR'}, type(self).__name__ + '::execute()', elem) return {'CANCELLED'}
blendyn
positive
def add_to_malware_repo(file_path: Union[str, pathlib.Path]) -> pathlib.Path: """ Adds the given file path to the malware repo. Returns resulting destination path. """ file_path = pathlib.Path(file_path) <DeepExtract> malware_repo = mwcp.config.get('MALWARE_REPO') if not malware_repo: raise ValueError(f"MALWARE_REPO field not set in '{mwcp.config.user_path}'. Try running `mwcp config` to set this.") if file_path: with open(file_path, 'rb') as fo: md5 = hashlib.md5(fo.read()).hexdigest() if not md5: raise ValueError(f'Missing file_path or md5 parameter.') if len(md5) < 4: raise ValueError(f"Unable to determine md5 from '{md5}'. Must be at least 4 characters.") if len(md5) < 32: sub_dir = pathlib.Path(malware_repo, md5[:4]) if not sub_dir.exists(): raise ValueError(f"Failed to find sample starting with the md5 '{md5}'.") file_paths = [] for file_path in sub_dir.iterdir(): if file_path.name.startswith(md5): file_paths.append(file_path) if not file_paths: raise ValueError(f"Failed to find sample starting with the md5 '{md5}'.") if len(file_paths) > 1: md5s = '\t\n'.join((file_path.name for file_path in file_paths)) raise ValueError(f"Found multiple samples starting with the md5 '{md5}': \n\t{md5s}") dest_file_path = file_paths[0] dest_file_path = pathlib.Path(malware_repo, md5[:4], md5) </DeepExtract> if dest_file_path.exists(): logger.info(f'File already exists in malware repo: {dest_file_path}') return dest_file_path dest_file_path.parent.mkdir(parents=True, exist_ok=True) logger.info(f'Copying {file_path} to {dest_file_path}') shutil.copy(file_path, dest_file_path) return dest_file_path
def add_to_malware_repo(file_path: Union[str, pathlib.Path]) -> pathlib.Path: """ Adds the given file path to the malware repo. Returns resulting destination path. """ file_path = pathlib.Path(file_path) malware_repo = mwcp.config.get('MALWARE_REPO') if not malware_repo: raise ValueError(f"MALWARE_REPO field not set in '{mwcp.config.user_path}'. Try running `mwcp config` to set this.") if file_path: with open(file_path, 'rb') as fo: md5 = hashlib.md5(fo.read()).hexdigest() if not md5: raise ValueError(f'Missing file_path or md5 parameter.') if len(md5) < 4: raise ValueError(f"Unable to determine md5 from '{md5}'. Must be at least 4 characters.") if len(md5) < 32: sub_dir = pathlib.Path(malware_repo, md5[:4]) if not sub_dir.exists(): raise ValueError(f"Failed to find sample starting with the md5 '{md5}'.") file_paths = [] for file_path in sub_dir.iterdir(): if file_path.name.startswith(md5): file_paths.append(file_path) if not file_paths: raise ValueError(f"Failed to find sample starting with the md5 '{md5}'.") if len(file_paths) > 1: md5s = '\t\n'.join((file_path.name for file_path in file_paths)) raise ValueError(f"Found multiple samples starting with the md5 '{md5}': \n\t{md5s}") dest_file_path = file_paths[0] dest_file_path = pathlib.Path(malware_repo, md5[:4], md5) if dest_file_path.exists(): logger.info(f'File already exists in malware repo: {dest_file_path}') return dest_file_path dest_file_path.parent.mkdir(parents=True, exist_ok=True) logger.info(f'Copying {file_path} to {dest_file_path}') shutil.copy(file_path, dest_file_path) return dest_file_path
DC3-MWCP
positive
def __call__(self, anchors, objectness, box_regression, targets): """ Arguments: anchors (list[BoxList]) objectness (list[Tensor]) box_regression (list[Tensor]) targets (list[BoxList]) Returns: objectness_loss (Tensor) box_loss (Tensor """ anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors] <DeepExtract> labels = [] regression_targets = [] for (anchors_per_image, targets_per_image) in zip(anchors, targets): matched_targets = self.match_targets_to_anchors(anchors_per_image, targets_per_image) matched_idxs = matched_targets.get_field('matched_idxs') labels_per_image = matched_idxs >= 0 labels_per_image = labels_per_image.to(dtype=torch.float32) labels_per_image[~anchors_per_image.get_field('visibility')] = -1 inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS labels_per_image[inds_to_discard] = -1 regression_targets_per_image = self.box_coder.encode(matched_targets.bbox, anchors_per_image.bbox) labels.append(labels_per_image) regression_targets.append(regression_targets_per_image) (labels, regression_targets) = (labels, regression_targets) </DeepExtract> (sampled_pos_inds, sampled_neg_inds) = self.fg_bg_sampler(labels) sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1) sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1) sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) objectness_flattened = [] box_regression_flattened = [] for (objectness_per_level, box_regression_per_level) in zip(objectness, box_regression): (N, A, H, W) = objectness_per_level.shape objectness_per_level = objectness_per_level.permute(0, 2, 3, 1).reshape(N, -1) box_regression_per_level = box_regression_per_level.view(N, -1, 4, H, W) box_regression_per_level = box_regression_per_level.permute(0, 3, 4, 1, 2) box_regression_per_level = box_regression_per_level.reshape(N, -1, 4) objectness_flattened.append(objectness_per_level) box_regression_flattened.append(box_regression_per_level) objectness = cat(objectness_flattened, dim=1).reshape(-1) box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4) labels = torch.cat(labels, dim=0) regression_targets = torch.cat(regression_targets, dim=0) box_loss = smooth_l1_loss(box_regression[sampled_pos_inds], regression_targets[sampled_pos_inds], beta=1.0 / 9, size_average=False) / sampled_inds.numel() objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds]) return (objectness_loss, box_loss)
def __call__(self, anchors, objectness, box_regression, targets): """ Arguments: anchors (list[BoxList]) objectness (list[Tensor]) box_regression (list[Tensor]) targets (list[BoxList]) Returns: objectness_loss (Tensor) box_loss (Tensor """ anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors] labels = [] regression_targets = [] for (anchors_per_image, targets_per_image) in zip(anchors, targets): matched_targets = self.match_targets_to_anchors(anchors_per_image, targets_per_image) matched_idxs = matched_targets.get_field('matched_idxs') labels_per_image = matched_idxs >= 0 labels_per_image = labels_per_image.to(dtype=torch.float32) labels_per_image[~anchors_per_image.get_field('visibility')] = -1 inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS labels_per_image[inds_to_discard] = -1 regression_targets_per_image = self.box_coder.encode(matched_targets.bbox, anchors_per_image.bbox) labels.append(labels_per_image) regression_targets.append(regression_targets_per_image) (labels, regression_targets) = (labels, regression_targets) (sampled_pos_inds, sampled_neg_inds) = self.fg_bg_sampler(labels) sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1) sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1) sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) objectness_flattened = [] box_regression_flattened = [] for (objectness_per_level, box_regression_per_level) in zip(objectness, box_regression): (N, A, H, W) = objectness_per_level.shape objectness_per_level = objectness_per_level.permute(0, 2, 3, 1).reshape(N, -1) box_regression_per_level = box_regression_per_level.view(N, -1, 4, H, W) box_regression_per_level = box_regression_per_level.permute(0, 3, 4, 1, 2) box_regression_per_level = box_regression_per_level.reshape(N, -1, 4) objectness_flattened.append(objectness_per_level) box_regression_flattened.append(box_regression_per_level) objectness = cat(objectness_flattened, dim=1).reshape(-1) box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4) labels = torch.cat(labels, dim=0) regression_targets = torch.cat(regression_targets, dim=0) box_loss = smooth_l1_loss(box_regression[sampled_pos_inds], regression_targets[sampled_pos_inds], beta=1.0 / 9, size_average=False) / sampled_inds.numel() objectness_loss = F.binary_cross_entropy_with_logits(objectness[sampled_inds], labels[sampled_inds]) return (objectness_loss, box_loss)
AE-WTN
positive
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group): """Internal utility function to bind the i-th executor. """ shared_exec = None if shared_group is None else shared_group.execs[i] context = self.contexts[i] shared_data_arrays = self.shared_data_arrays[i] input_shapes = dict(data_shapes) if label_shapes is not None: input_shapes.update(dict(label_shapes)) (arg_shapes, _, aux_shapes) = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None, 'shape inference failed' input_types = {x.name: x.dtype for x in data_shapes} if label_shapes is not None: input_types.update({x.name: x.dtype for x in label_shapes}) (arg_types, _, aux_types) = self.symbol.infer_type(**input_types) assert arg_types is not None, 'type inference failed' arg_arrays = [] grad_arrays = {} if self.for_training else None def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger): """Internal helper to get a memory block or re-use by re-shaping""" if name in shared_data_arrays: arg_arr = shared_data_arrays[name] if np.prod(arg_arr.shape) >= np.prod(arg_shape): assert arg_arr.dtype == arg_type arg_arr = arg_arr.reshape(arg_shape) else: logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shape) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.') arg_arr = nd.zeros(arg_shape, context, dtype=arg_type) shared_data_arrays[name] = arg_arr else: arg_arr = nd.zeros(arg_shape, context, dtype=arg_type) shared_data_arrays[name] = arg_arr return arg_arr for j in range(len(self.arg_names)): name = self.arg_names[j] if name in self.param_names: if shared_exec is None: arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) if self.grad_req[name] != 'null': grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) grad_arrays[name] = grad_arr else: arg_arr = shared_exec.arg_dict[name] assert arg_arr.shape == arg_shapes[j] assert arg_arr.dtype == arg_types[j] if self.grad_req[name] != 'null': grad_arrays[name] = shared_exec.grad_dict[name] else: <DeepExtract> if name in shared_data_arrays: arg_arr = shared_data_arrays[name] if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]): assert arg_arr.dtype == arg_types[j] arg_arr = arg_arr.reshape(arg_shapes[j]) else: self.logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.') arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays[name] = arg_arr else: arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays[name] = arg_arr arg_arr = arg_arr </DeepExtract> if self.grad_req[name] != 'null': <DeepExtract> if 'grad of ' + name in shared_data_arrays: arg_arr = shared_data_arrays['grad of ' + name] if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]): assert arg_arr.dtype == arg_types[j] arg_arr = arg_arr.reshape(arg_shapes[j]) else: self.logger.warning('bucketing: data "%s" has a shape %s' % ('grad of ' + name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.') arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays['grad of ' + name] = arg_arr else: arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays['grad of ' + name] = arg_arr grad_arrays['grad of ' + name] = arg_arr </DeepExtract> arg_arrays.append(arg_arr) if shared_exec is None: aux_arrays = [nd.zeros(s, context, dtype=t) for (s, t) in zip(aux_shapes, aux_types)] else: for (j, arr) in enumerate(shared_exec.aux_arrays): assert aux_shapes[j] == arr.shape assert aux_types[j] == arr.dtype aux_arrays = shared_exec.aux_arrays[:] executor = self.symbol.bind(ctx=context, args=arg_arrays, args_grad=grad_arrays, aux_states=aux_arrays, grad_req=self.grad_req, shared_exec=shared_exec) return executor
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group): """Internal utility function to bind the i-th executor. """ shared_exec = None if shared_group is None else shared_group.execs[i] context = self.contexts[i] shared_data_arrays = self.shared_data_arrays[i] input_shapes = dict(data_shapes) if label_shapes is not None: input_shapes.update(dict(label_shapes)) (arg_shapes, _, aux_shapes) = self.symbol.infer_shape(**input_shapes) assert arg_shapes is not None, 'shape inference failed' input_types = {x.name: x.dtype for x in data_shapes} if label_shapes is not None: input_types.update({x.name: x.dtype for x in label_shapes}) (arg_types, _, aux_types) = self.symbol.infer_type(**input_types) assert arg_types is not None, 'type inference failed' arg_arrays = [] grad_arrays = {} if self.for_training else None def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger): """Internal helper to get a memory block or re-use by re-shaping""" if name in shared_data_arrays: arg_arr = shared_data_arrays[name] if np.prod(arg_arr.shape) >= np.prod(arg_shape): assert arg_arr.dtype == arg_type arg_arr = arg_arr.reshape(arg_shape) else: logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shape) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.') arg_arr = nd.zeros(arg_shape, context, dtype=arg_type) shared_data_arrays[name] = arg_arr else: arg_arr = nd.zeros(arg_shape, context, dtype=arg_type) shared_data_arrays[name] = arg_arr return arg_arr for j in range(len(self.arg_names)): name = self.arg_names[j] if name in self.param_names: if shared_exec is None: arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) if self.grad_req[name] != 'null': grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) grad_arrays[name] = grad_arr else: arg_arr = shared_exec.arg_dict[name] assert arg_arr.shape == arg_shapes[j] assert arg_arr.dtype == arg_types[j] if self.grad_req[name] != 'null': grad_arrays[name] = shared_exec.grad_dict[name] else: if name in shared_data_arrays: arg_arr = shared_data_arrays[name] if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]): assert arg_arr.dtype == arg_types[j] arg_arr = arg_arr.reshape(arg_shapes[j]) else: self.logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.') arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays[name] = arg_arr else: arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays[name] = arg_arr arg_arr = arg_arr if self.grad_req[name] != 'null': if 'grad of ' + name in shared_data_arrays: arg_arr = shared_data_arrays['grad of ' + name] if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]): assert arg_arr.dtype == arg_types[j] arg_arr = arg_arr.reshape(arg_shapes[j]) else: self.logger.warning('bucketing: data "%s" has a shape %s' % ('grad of ' + name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.') arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays['grad of ' + name] = arg_arr else: arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j]) shared_data_arrays['grad of ' + name] = arg_arr grad_arrays['grad of ' + name] = arg_arr arg_arrays.append(arg_arr) if shared_exec is None: aux_arrays = [nd.zeros(s, context, dtype=t) for (s, t) in zip(aux_shapes, aux_types)] else: for (j, arr) in enumerate(shared_exec.aux_arrays): assert aux_shapes[j] == arr.shape assert aux_types[j] == arr.dtype aux_arrays = shared_exec.aux_arrays[:] executor = self.symbol.bind(ctx=context, args=arg_arrays, args_grad=grad_arrays, aux_states=aux_arrays, grad_req=self.grad_req, shared_exec=shared_exec) return executor
Deep-Feature-video
positive
def command(self) -> EditResult | None: <DeepExtract> default = default or '' self.status.clear() if 'command' is not None: history_data = [*self.history.data['command'], default] if default_prev and 'command' in self.history.prev: '' = f"{''} [{self.history.prev['command']}]" else: history_data = [default] ret = Prompt(self, '', history_data).run() if ret is not PromptResult.CANCELLED and 'command' is not None: if ret: history_lst = self.history.data['command'] if not history_lst or history_lst[-1] != ret: history_lst.append(ret) self.history.prev['command'] = ret elif default_prev and 'command' in self.history.prev: response = self.history.prev['command'] if not allow_empty and (not ret): response = self.status.cancelled() else: response = ret </DeepExtract> if response is PromptResult.CANCELLED: return None (cmd, *args) = response.split() if cmd not in self.COMMANDS: self.status.update(f'invalid command: {response}') return None command = self.COMMANDS[cmd] if isinstance(command.nargs, int): if len(args) != command.nargs: self.status.update(f'`{cmd}`: expected {command.nargs} args but got {len(args)}') return None elif command.nargs == '?': if len(args) not in {0, 1}: self.status.update(f'`{cmd}`: expected 0 or 1 args but got {len(args)}') return None else: raise NotImplementedError(command) return command.callback(self, args)
def command(self) -> EditResult | None: default = default or '' self.status.clear() if 'command' is not None: history_data = [*self.history.data['command'], default] if default_prev and 'command' in self.history.prev: '' = f"{''} [{self.history.prev['command']}]" else: history_data = [default] ret = Prompt(self, '', history_data).run() if ret is not PromptResult.CANCELLED and 'command' is not None: if ret: history_lst = self.history.data['command'] if not history_lst or history_lst[-1] != ret: history_lst.append(ret) self.history.prev['command'] = ret elif default_prev and 'command' in self.history.prev: response = self.history.prev['command'] if not allow_empty and (not ret): response = self.status.cancelled() else: response = ret if response is PromptResult.CANCELLED: return None (cmd, *args) = response.split() if cmd not in self.COMMANDS: self.status.update(f'invalid command: {response}') return None command = self.COMMANDS[cmd] if isinstance(command.nargs, int): if len(args) != command.nargs: self.status.update(f'`{cmd}`: expected {command.nargs} args but got {len(args)}') return None elif command.nargs == '?': if len(args) not in {0, 1}: self.status.update(f'`{cmd}`: expected 0 or 1 args but got {len(args)}') return None else: raise NotImplementedError(command) return command.callback(self, args)
babi
positive
def bacnet_property(property_name, value, *, force_mutable=None): """ Given a property, add it to the object """ def decorate(func): @wraps(func) def wrapper(*args, **kwargs): if callable(func): obj = func(*args, **kwargs) else: obj = func <DeepExtract> allowed_prop = {} for each in type(obj).properties: allowed_prop[each.identifier] = each.datatype for base in type(obj).__bases__: try: for each in base.properties: allowed_prop[each.identifier] = each.datatype except AttributeError: pass allowed_prop = allowed_prop </DeepExtract> <DeepExtract> if property_name in _SHOULD_BE_COMMANDABLE and (not force_mutable): mutable = True elif force_mutable: mutable = force_mutable else: mutable = False mutable = mutable </DeepExtract> if property_name == 'units': new_prop = EngineeringUnits.enumerations[value] obj.units = new_prop else: try: new_prop = Property(property_name, allowed_prop[property_name], default=value, mutable=mutable) except KeyError: raise ValueError('Invalid property ({}) for object'.format(property_name)) obj.add_property(new_prop) return obj return wrapper return decorate
def bacnet_property(property_name, value, *, force_mutable=None): """ Given a property, add it to the object """ def decorate(func): @wraps(func) def wrapper(*args, **kwargs): if callable(func): obj = func(*args, **kwargs) else: obj = func allowed_prop = {} for each in type(obj).properties: allowed_prop[each.identifier] = each.datatype for base in type(obj).__bases__: try: for each in base.properties: allowed_prop[each.identifier] = each.datatype except AttributeError: pass allowed_prop = allowed_prop if property_name in _SHOULD_BE_COMMANDABLE and (not force_mutable): mutable = True elif force_mutable: mutable = force_mutable else: mutable = False mutable = mutable if property_name == 'units': new_prop = EngineeringUnits.enumerations[value] obj.units = new_prop else: try: new_prop = Property(property_name, allowed_prop[property_name], default=value, mutable=mutable) except KeyError: raise ValueError('Invalid property ({}) for object'.format(property_name)) obj.add_property(new_prop) return obj return wrapper return decorate
BAC0
positive
def restart(self): if self.is_alive(): <DeepExtract> logging.debug('Killing process {}'.format(self.name)) self.__process_handle.kill() </DeepExtract> <DeepExtract> logging.debug('Starting process {}'.format(self.name)) self.__process_handle = subprocess.Popen(self.cmd, env=self.env) logging.debug('Started process {} via command {}. Process Id: {}'.format(self.name, self.cmd, self.__process_handle.pid)) </DeepExtract>
def restart(self): if self.is_alive(): logging.debug('Killing process {}'.format(self.name)) self.__process_handle.kill() logging.debug('Starting process {}'.format(self.name)) self.__process_handle = subprocess.Popen(self.cmd, env=self.env) logging.debug('Started process {} via command {}. Process Id: {}'.format(self.name, self.cmd, self.__process_handle.pid)) </DeepExtract>
cf-mendix-buildpack
positive
@salience.setter def salience(self, salience: int): """Activation salience value.""" <DeepExtract> activations = [] activation = lib.GetNextActivation(self._env, ffi.NULL) while activation != ffi.NULL: activations.append(activation) activation = lib.GetNextActivation(self._env, activation) if self._act not in activations: raise CLIPSError(self._env, 'Activation %s not in the agenda' % self.name) </DeepExtract> lib.ActivationSetSalience(self._act, salience)
@salience.setter def salience(self, salience: int): """Activation salience value.""" activations = [] activation = lib.GetNextActivation(self._env, ffi.NULL) while activation != ffi.NULL: activations.append(activation) activation = lib.GetNextActivation(self._env, activation) if self._act not in activations: raise CLIPSError(self._env, 'Activation %s not in the agenda' % self.name) lib.ActivationSetSalience(self._act, salience)
clipspy
positive
def get_qid_count_cands(self, alias, max_cand_pad=False): """Get the [QID, sort_value] candidates for an alias. Args: alias: alias max_cand_pad: whether to pad with ['-1',-1] or not if fewer than max_candidates candidates Returns: List of [QID, sort_value] """ <DeepExtract> if isinstance(self._alias2qids, dict): qid_pairs = self._alias2qids[alias] else: qid_pairs = self._alias2qids.get_value(alias) qid_pairs = qid_pairs </DeepExtract> res = qid_pairs if max_cand_pad: res = res + ['-1', -1] * (self.max_candidates - len(res)) return res
def get_qid_count_cands(self, alias, max_cand_pad=False): """Get the [QID, sort_value] candidates for an alias. Args: alias: alias max_cand_pad: whether to pad with ['-1',-1] or not if fewer than max_candidates candidates Returns: List of [QID, sort_value] """ if isinstance(self._alias2qids, dict): qid_pairs = self._alias2qids[alias] else: qid_pairs = self._alias2qids.get_value(alias) qid_pairs = qid_pairs res = qid_pairs if max_cand_pad: res = res + ['-1', -1] * (self.max_candidates - len(res)) return res
bootleg
positive
@pytest.mark.usefixtures('aiida_profile') def test_valid_init(generate_hubbard_utils, generate_hubbard_structure): """Test the constructor.""" <DeepExtract> def _generate_hubbard_utils(): hubbard_utils = HubbardUtils(hubbard_structure=generate_hubbard_structure()) hubbard_utils = _generate_hubbard_utils </DeepExtract> assert hubbard_utils.hubbard_structure.hubbard == generate_hubbard_structure().hubbard
@pytest.mark.usefixtures('aiida_profile') def test_valid_init(generate_hubbard_utils, generate_hubbard_structure): """Test the constructor.""" def _generate_hubbard_utils(): hubbard_utils = HubbardUtils(hubbard_structure=generate_hubbard_structure()) hubbard_utils = _generate_hubbard_utils assert hubbard_utils.hubbard_structure.hubbard == generate_hubbard_structure().hubbard
aiida-quantumespresso
positive
def fit(self, X, y=None, sample_weight=None): """Applies the Benktander technique to triangle **X** Parameters ---------- X: Triangle Loss data to which the model will be applied. y: None Ignored sample_weight: Triangle Required exposure to be used in the calculation. Returns ------- self: object Returns the instance itself. """ if sample_weight is None: raise ValueError('sample_weight is required.') super().fit(X, y, sample_weight) <DeepExtract> xp = X.get_array_module() if self.apriori_sigma != 0: random_state = xp.random.RandomState(self.random_state) apriori = random_state.normal(self.apriori, self.apriori_sigma, X.shape[0]) apriori = apriori.reshape(X.shape[0], -1)[..., None, None] apriori = sample_weight * apriori apriori.kdims = X.kdims apriori.key_labels = X.key_labels else: apriori = sample_weight * self.apriori apriori.columns = sample_weight.columns self.expectation_ = apriori </DeepExtract> <DeepExtract> from chainladder.utils.utility_functions import num_to_nan if self.X_.is_cumulative == False: ld = self.X_.sum('development') ultimate = ld.val_to_dev() else: ld = self.X_.latest_diagonal ultimate = self.X_.copy() cdf = self._align_cdf(ultimate.val_to_dev(), self.expectation_) backend = cdf.array_backend xp = cdf.get_array_module() cdf = cdf.sort_index() ld = ld.sort_index() self.expectation_ = self.expectation_.sort_index() ultimate = ultimate.sort_index() cdf = (1 - 1 / num_to_nan(cdf.values))[None] exponents = xp.arange(self.n_iters + 1) exponents = xp.reshape(exponents, tuple([len(exponents)] + [1] * 4)) cdf = cdf ** ((cdf + 1e-16) / (cdf + 1e-16) * exponents) cdf = xp.nan_to_num(cdf) a = xp.sum(cdf[:-1, ...], 0) * xp.nan_to_num(ld.set_backend(backend).values) b = cdf[-1, ...] * xp.nan_to_num(self.expectation_.set_backend(backend).values) ultimate.values = num_to_nan(a + b) ultimate.array_backend = backend ultimate.ddims = self.cdf_.ddims[:ultimate.shape[-1]] self.ultimate_ = self._set_ult_attr(ultimate) </DeepExtract> self.process_variance_ = self._include_process_variance() return self
def fit(self, X, y=None, sample_weight=None): """Applies the Benktander technique to triangle **X** Parameters ---------- X: Triangle Loss data to which the model will be applied. y: None Ignored sample_weight: Triangle Required exposure to be used in the calculation. Returns ------- self: object Returns the instance itself. """ if sample_weight is None: raise ValueError('sample_weight is required.') super().fit(X, y, sample_weight) xp = X.get_array_module() if self.apriori_sigma != 0: random_state = xp.random.RandomState(self.random_state) apriori = random_state.normal(self.apriori, self.apriori_sigma, X.shape[0]) apriori = apriori.reshape(X.shape[0], -1)[..., None, None] apriori = sample_weight * apriori apriori.kdims = X.kdims apriori.key_labels = X.key_labels else: apriori = sample_weight * self.apriori apriori.columns = sample_weight.columns self.expectation_ = apriori from chainladder.utils.utility_functions import num_to_nan if self.X_.is_cumulative == False: ld = self.X_.sum('development') ultimate = ld.val_to_dev() else: ld = self.X_.latest_diagonal ultimate = self.X_.copy() cdf = self._align_cdf(ultimate.val_to_dev(), self.expectation_) backend = cdf.array_backend xp = cdf.get_array_module() cdf = cdf.sort_index() ld = ld.sort_index() self.expectation_ = self.expectation_.sort_index() ultimate = ultimate.sort_index() cdf = (1 - 1 / num_to_nan(cdf.values))[None] exponents = xp.arange(self.n_iters + 1) exponents = xp.reshape(exponents, tuple([len(exponents)] + [1] * 4)) cdf = cdf ** ((cdf + 1e-16) / (cdf + 1e-16) * exponents) cdf = xp.nan_to_num(cdf) a = xp.sum(cdf[:-1, ...], 0) * xp.nan_to_num(ld.set_backend(backend).values) b = cdf[-1, ...] * xp.nan_to_num(self.expectation_.set_backend(backend).values) ultimate.values = num_to_nan(a + b) ultimate.array_backend = backend ultimate.ddims = self.cdf_.ddims[:ultimate.shape[-1]] self.ultimate_ = self._set_ult_attr(ultimate) self.process_variance_ = self._include_process_variance() return self
chainladder-python
positive
@crash_safe def build(context, props): verify_facemaps_for_object(context.object) me = get_edit_mesh() bm = bmesh.from_edit_mesh(me) <DeepExtract> [face for face in bm.faces if face.select] = list(filter(lambda f: abs(round(f.normal.z, 3)) == 0.0, [face for face in bm.faces if face.select])) [face for face in bm.faces if face.select] = list(filter(lambda f: is_rectangle(f), [face for face in bm.faces if face.select])) [face for face in bm.faces if face.select] = [face for face in bm.faces if face.select] </DeepExtract> if faces: <DeepExtract> groups = (FaceMap.DOOR, FaceMap.FRAME) add_facemap_for_groups(groups) </DeepExtract> if create_door(bm, faces, props): bmesh.update_edit_mesh(me, loop_triangles=True) return {'FINISHED'} bmesh.update_edit_mesh(me, loop_triangles=True) return {'CANCELLED'}
@crash_safe def build(context, props): verify_facemaps_for_object(context.object) me = get_edit_mesh() bm = bmesh.from_edit_mesh(me) [face for face in bm.faces if face.select] = list(filter(lambda f: abs(round(f.normal.z, 3)) == 0.0, [face for face in bm.faces if face.select])) [face for face in bm.faces if face.select] = list(filter(lambda f: is_rectangle(f), [face for face in bm.faces if face.select])) [face for face in bm.faces if face.select] = [face for face in bm.faces if face.select] if faces: groups = (FaceMap.DOOR, FaceMap.FRAME) add_facemap_for_groups(groups) if create_door(bm, faces, props): bmesh.update_edit_mesh(me, loop_triangles=True) return {'FINISHED'} bmesh.update_edit_mesh(me, loop_triangles=True) return {'CANCELLED'}
building_tools
positive
def test_step(self, data, sample_from='dist'): """One test step Arguments: data {dict of data} -- required keys and values: 'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences 'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences 'Y_floor' {LongTensor [batch_size]} -- floor of response sentence sample_from {str} -- "dist": sample mechanism from computed probabilities "random": sample mechanisms uniformly Returns: dict of data -- returned keys and values 'symbols' {LongTensor [batch_size, max_decode_len]} -- token ids of response hypothesis dict of statistics -- returned keys and values """ (X, Y) = (data['X'], data['Y']) (X_floor, Y_floor) = (data['X_floor'], data['Y_floor']) batch_size = X.size(0) with torch.no_grad(): <DeepExtract> (batch_size, history_len, max_x_sent_len) = X.size() flat_inputs = X.view(batch_size * history_len, max_x_sent_len) input_lens = (X != self.pad_token_id).sum(-1) flat_input_lens = input_lens.view(batch_size * history_len) (word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens) word_encodings = word_encodings.view(batch_size, history_len, max_x_sent_len, -1) sent_encodings = sent_encodings.view(batch_size, history_len, -1) if self.floor_encoder is not None: src_floors = X_floor.view(-1) tgt_floors = Y_floor.unsqueeze(1).repeat(1, history_len).view(-1) sent_encodings = sent_encodings.view(batch_size * history_len, -1) sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors) sent_encodings = sent_encodings.view(batch_size, history_len, -1) dial_lens = (input_lens > 0).long().sum(1) (_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens) (word_encodings, sent_encodings, dial_encodings) = (word_encodings, sent_encodings, dial_encodings) </DeepExtract> attn_ctx = word_encodings.view(batch_size, -1, word_encodings.size(-1)) <DeepExtract> attn_mask = X.view(batch_size, -1) != self.pad_token_id attn_mask = attn_mask </DeepExtract> if sample_from == 'dist': <DeepExtract> ctx_mech = self.ctx2mech_fc(dial_encodings) mech_scores = torch.matmul(torch.matmul(ctx_mech, self.score_bilinear), self.mechanism_embeddings.weight.T) mech_probs = F.softmax(mech_scores, dim=1) mech_probs = mech_probs </DeepExtract> mech_dist = torch.distributions.Categorical(mech_probs) mech_embed_inputs = mech_dist.sample() elif sample_from == 'random': mech_embed_inputs = [random.randint(0, self.n_mechanisms - 1) for _ in range(batch_size)] mech_embed_inputs = torch.LongTensor(mech_embed_inputs).to(DEVICE) mech_embeds = self.mechanism_embeddings(mech_embed_inputs) dec_ctx = self.ctx_mech_combine_fc(torch.cat([dial_encodings, mech_embeds], dim=1)) <DeepExtract> batch_size = dec_ctx.size(0) hiddens = self._init_dec_hiddens(dec_ctx) feats = None feats = dec_ctx.unsqueeze(1).repeat(1, self.decode_max_len, 1) ret_dict = self.decoder.forward(batch_size=batch_size, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_FREE_RUN, gen_type=self.gen_type, temp=self.temp, top_p=self.top_p, top_k=self.top_k) decoder_ret_dict = ret_dict </DeepExtract> ret_data = {'symbols': decoder_ret_dict['symbols']} ret_stat = {} return (ret_data, ret_stat)
def test_step(self, data, sample_from='dist'): """One test step Arguments: data {dict of data} -- required keys and values: 'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences 'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences 'Y_floor' {LongTensor [batch_size]} -- floor of response sentence sample_from {str} -- "dist": sample mechanism from computed probabilities "random": sample mechanisms uniformly Returns: dict of data -- returned keys and values 'symbols' {LongTensor [batch_size, max_decode_len]} -- token ids of response hypothesis dict of statistics -- returned keys and values """ (X, Y) = (data['X'], data['Y']) (X_floor, Y_floor) = (data['X_floor'], data['Y_floor']) batch_size = X.size(0) with torch.no_grad(): (batch_size, history_len, max_x_sent_len) = X.size() flat_inputs = X.view(batch_size * history_len, max_x_sent_len) input_lens = (X != self.pad_token_id).sum(-1) flat_input_lens = input_lens.view(batch_size * history_len) (word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens) word_encodings = word_encodings.view(batch_size, history_len, max_x_sent_len, -1) sent_encodings = sent_encodings.view(batch_size, history_len, -1) if self.floor_encoder is not None: src_floors = X_floor.view(-1) tgt_floors = Y_floor.unsqueeze(1).repeat(1, history_len).view(-1) sent_encodings = sent_encodings.view(batch_size * history_len, -1) sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors) sent_encodings = sent_encodings.view(batch_size, history_len, -1) dial_lens = (input_lens > 0).long().sum(1) (_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens) (word_encodings, sent_encodings, dial_encodings) = (word_encodings, sent_encodings, dial_encodings) attn_ctx = word_encodings.view(batch_size, -1, word_encodings.size(-1)) attn_mask = X.view(batch_size, -1) != self.pad_token_id attn_mask = attn_mask if sample_from == 'dist': ctx_mech = self.ctx2mech_fc(dial_encodings) mech_scores = torch.matmul(torch.matmul(ctx_mech, self.score_bilinear), self.mechanism_embeddings.weight.T) mech_probs = F.softmax(mech_scores, dim=1) mech_probs = mech_probs mech_dist = torch.distributions.Categorical(mech_probs) mech_embed_inputs = mech_dist.sample() elif sample_from == 'random': mech_embed_inputs = [random.randint(0, self.n_mechanisms - 1) for _ in range(batch_size)] mech_embed_inputs = torch.LongTensor(mech_embed_inputs).to(DEVICE) mech_embeds = self.mechanism_embeddings(mech_embed_inputs) dec_ctx = self.ctx_mech_combine_fc(torch.cat([dial_encodings, mech_embeds], dim=1)) batch_size = dec_ctx.size(0) hiddens = self._init_dec_hiddens(dec_ctx) feats = None feats = dec_ctx.unsqueeze(1).repeat(1, self.decode_max_len, 1) ret_dict = self.decoder.forward(batch_size=batch_size, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_FREE_RUN, gen_type=self.gen_type, temp=self.temp, top_p=self.top_p, top_k=self.top_k) decoder_ret_dict = ret_dict ret_data = {'symbols': decoder_ret_dict['symbols']} ret_stat = {} return (ret_data, ret_stat)
dialog-processing
positive
def has_only_vertex(surf): """Check if `surf` has only vertex cells. Parameters ---------- surf : BSDataSet Input data. Returns ------- bool True if `surf` has only vertex cells. False, otherwise. """ <DeepExtract> lid = vtkCellTypes() surf.GetCellTypes(lid) types = [lid.GetCellType(i) for i in range(lid.GetNumberOfTypes())] ct = np.asarray(types) </DeepExtract> if ct.size != 1: return False return ct[0] == VTK_VERTEX
def has_only_vertex(surf): """Check if `surf` has only vertex cells. Parameters ---------- surf : BSDataSet Input data. Returns ------- bool True if `surf` has only vertex cells. False, otherwise. """ lid = vtkCellTypes() surf.GetCellTypes(lid) types = [lid.GetCellType(i) for i in range(lid.GetNumberOfTypes())] ct = np.asarray(types) if ct.size != 1: return False return ct[0] == VTK_VERTEX
BrainSpace
positive
def __remove_dir(ftp, remote_path): """ Helper function to perform delete operation on the remote server :param ftp: SFTP handle to perform delete operation(s) :param remote_path: Remote path to remove """ files = ftp.listdir(remote_path) for filename in files: path = remote_path + self.separator + filename try: ftp.remove(path) except IOError: <DeepExtract> files = ftp.listdir(path) for filename in files: path = path + self.separator + filename try: ftp.remove(path) except IOError: self.__remove_dir(ftp, path) ftp.rmdir(path) </DeepExtract> ftp.rmdir(remote_path)
def __remove_dir(ftp, remote_path): """ Helper function to perform delete operation on the remote server :param ftp: SFTP handle to perform delete operation(s) :param remote_path: Remote path to remove """ files = ftp.listdir(remote_path) for filename in files: path = remote_path + self.separator + filename try: ftp.remove(path) except IOError: files = ftp.listdir(path) for filename in files: path = path + self.separator + filename try: ftp.remove(path) except IOError: self.__remove_dir(ftp, path) ftp.rmdir(path) ftp.rmdir(remote_path)
ccm
positive
def _set_bindings(self) -> None: """Sets the bindings for the different events triggered by the user.""" self._settings_canvas.bind('<Configure>', self._configure_canvas) self._settings_frame.bind('<Enter>', self._bind_mouse) self._settings_frame.bind('<Leave>', self._unbind_mouse) if system() == 'Linux': self._img_canvas.bind('<4>', self._on_wheel_img) self._img_canvas.bind('<5>', self._on_wheel_img) else: self._img_canvas.bind('<MouseWheel>', self._on_wheel_img) self._img_canvas.bind('<Motion>', self._update_coord) self._img_canvas.bind('<ButtonPress-3>', self._start_move) self._img_canvas.bind('<B3-Motion>', self._move) <DeepExtract> pass </DeepExtract> self._graphical_frame.bind('<Configure>', self._on_img_resize) self._graphical_frame.bind('<Configure>', self._on_hist_resize)
def _set_bindings(self) -> None: """Sets the bindings for the different events triggered by the user.""" self._settings_canvas.bind('<Configure>', self._configure_canvas) self._settings_frame.bind('<Enter>', self._bind_mouse) self._settings_frame.bind('<Leave>', self._unbind_mouse) if system() == 'Linux': self._img_canvas.bind('<4>', self._on_wheel_img) self._img_canvas.bind('<5>', self._on_wheel_img) else: self._img_canvas.bind('<MouseWheel>', self._on_wheel_img) self._img_canvas.bind('<Motion>', self._update_coord) self._img_canvas.bind('<ButtonPress-3>', self._start_move) self._img_canvas.bind('<B3-Motion>', self._move) pass self._graphical_frame.bind('<Configure>', self._on_img_resize) self._graphical_frame.bind('<Configure>', self._on_hist_resize)
crappy
positive
def testUnknownBatchSize(self): batch = 2 (height, width) = (65, 65) global_pool = True num_classes = 10 <DeepExtract> if None in [None, height, width, 3]: inputs = tf.placeholder(tf.float32, (None, height, width, 3)) else: inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [None, 1, 1, 3])) </DeepExtract> with slim.arg_scope(resnet_utils.resnet_arg_scope()): <DeepExtract> block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] (logits, _) = resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=False, reuse=reuse, scope='resnet') </DeepExtract> self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes]) <DeepExtract> if None in [batch, height, width, 3]: images = tf.placeholder(tf.float32, (batch, height, width, 3)) else: images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3])) </DeepExtract> with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testUnknownBatchSize(self): batch = 2 (height, width) = (65, 65) global_pool = True num_classes = 10 if None in [None, height, width, 3]: inputs = tf.placeholder(tf.float32, (None, height, width, 3)) else: inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [None, 1, 1, 3])) with slim.arg_scope(resnet_utils.resnet_arg_scope()): block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] (logits, _) = resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=False, reuse=reuse, scope='resnet') self.assertTrue(logits.op.name.startswith('resnet/logits')) self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes]) if None in [batch, height, width, 3]: images = tf.placeholder(tf.float32, (batch, height, width, 3)) else: images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3])) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes))
ctw-baseline
positive
def retrieve(self, request, pk=None): <DeepExtract> try: zone = ThemeManager.Templates.get(pk) except TemplateNotFound: raise NotFound("The template with id '%s' does not exist" % pk) </DeepExtract> serializer = ZoneSerializer(zone) return Response(serializer.data)
def retrieve(self, request, pk=None): try: zone = ThemeManager.Templates.get(pk) except TemplateNotFound: raise NotFound("The template with id '%s' does not exist" % pk) serializer = ZoneSerializer(zone) return Response(serializer.data)
dispatch
positive
def insertBefore(*args): if len(args) == 2: programName = self.DEFAULT_PROGRAM_NAME index = args[0] text = args[1] elif len(args) == 3: programName = args[0] index = args[1] text = args[2] else: raise TypeError('Invalid arguments') if isinstance(index, Token): index = index.index op = InsertBeforeOp(self, index, text) <DeepExtract> p = self.programs.get(programName, None) if p is None: p = self.initializeProgram(programName) rewrites = p </DeepExtract> rewrites.append(op)
def insertBefore(*args): if len(args) == 2: programName = self.DEFAULT_PROGRAM_NAME index = args[0] text = args[1] elif len(args) == 3: programName = args[0] index = args[1] text = args[2] else: raise TypeError('Invalid arguments') if isinstance(index, Token): index = index.index op = InsertBeforeOp(self, index, text) p = self.programs.get(programName, None) if p is None: p = self.initializeProgram(programName) rewrites = p rewrites.append(op)
cpy
positive
def run_pyprover(**kwargs): """Runs pyprover.""" <DeepExtract> if isinstance(['pip', 'install', '-e', pyprover], str): cmd = ['pip', 'install', '-e', pyprover].split() else: cmd = ['pip', 'install', '-e', pyprover] print() logger.log_cmd(cmd) if assert_output is False: assert_output = ('',) elif assert_output is True: assert_output = ('<success>',) elif isinstance(assert_output, str): if '\n' not in assert_output: assert_output = (assert_output,) else: assert_output = tuple((x if x is not True else '<success>' for x in assert_output)) if convert_to_import is None: convert_to_import = cmd[0] == sys.executable and cmd[1] != '-c' and (cmd[1:3] != ['-m', 'coconut']) if convert_to_import: assert cmd[0] == sys.executable if cmd[1] == '-m': module_name = cmd[2] extra_argv = cmd[3:] (stdout, stderr, retcode) = call_with_import(module_name, extra_argv) else: module_path = cmd[1] extra_argv = cmd[2:] module_dir = os.path.dirname(module_path) module_name = os.path.splitext(os.path.basename(module_path))[0] if os.path.isdir(module_path): module_name += '.__main__' with using_sys_path(module_dir): (stdout, stderr, retcode) = call_with_import(module_name, extra_argv) else: (stdout, stderr, retcode) = call_output(['pip', 'install', '-e', pyprover], **kwargs) if expect_retcode is not None: assert retcode == expect_retcode, 'Return code not as expected ({retcode} != {expect_retcode}) in: {cmd!r}'.format(retcode=retcode, expect_retcode=expect_retcode, cmd=['pip', 'install', '-e', pyprover]) if stderr_first: out = stderr + stdout else: out = stdout + stderr out = ''.join(out) raw_lines = out.splitlines() lines = [] i = 0 while True: if i >= len(raw_lines): break line = raw_lines[i] if sys.version_info < (3, 9) and line == 'Error in atexit._run_exitfuncs:': while True: i += 1 if i >= len(raw_lines): break new_line = raw_lines[i] if not new_line.startswith(' ') and (not any((test in new_line for test in ignore_atexit_errors_with))): i -= 1 break continue if any((infix in line for infix in mypy_err_infixes)): line += '\n' + raw_lines[i + 1] i += 1 for j in range(i + 2, len(raw_lines)): next_line = raw_lines[j] if next_line.lstrip() == next_line: break line += '\n' + next_line i += 1 lines.append(line) i += 1 for line in lines: for errstr in always_err_strs: assert errstr not in line, '{errstr!r} in {line!r}'.format(errstr=errstr, line=line) if check_errors: assert 'Traceback (most recent call last):' not in line, 'Traceback in ' + repr(line) assert 'Exception' not in line, 'Exception in ' + repr(line) assert 'Error' not in line, 'Error in ' + repr(line) if check_mypy and all((test not in line for test in ignore_mypy_errs_with)): assert 'error:' not in line, 'MyPy error in ' + repr(line) if isinstance(assert_output, str): got_output = '\n'.join(raw_lines) + '\n' assert assert_output in got_output, 'Expected ' + repr(assert_output) + '; got ' + repr(got_output) else: last_line = '' for line in reversed(lines): if not any((ignore in line for ignore in ignore_last_lines_with)): last_line = line break if assert_output is None: assert not last_line, 'Expected nothing; got:\n' + '\n'.join((repr(li) for li in raw_lines)) else: assert any((x in last_line for x in assert_output)), 'Expected ' + ', '.join((repr(s) for s in assert_output)) + ' in ' + repr(last_line) + '; got:\n' + '\n'.join((repr(li) for li in raw_lines)) </DeepExtract> <DeepExtract> call([sys.executable] + [os.path.join(pyprover, 'pyprover', 'tests.py')], **kwargs) </DeepExtract>
def run_pyprover(**kwargs): """Runs pyprover.""" if isinstance(['pip', 'install', '-e', pyprover], str): cmd = ['pip', 'install', '-e', pyprover].split() else: cmd = ['pip', 'install', '-e', pyprover] print() logger.log_cmd(cmd) if assert_output is False: assert_output = ('',) elif assert_output is True: assert_output = ('<success>',) elif isinstance(assert_output, str): if '\n' not in assert_output: assert_output = (assert_output,) else: assert_output = tuple((x if x is not True else '<success>' for x in assert_output)) if convert_to_import is None: convert_to_import = cmd[0] == sys.executable and cmd[1] != '-c' and (cmd[1:3] != ['-m', 'coconut']) if convert_to_import: assert cmd[0] == sys.executable if cmd[1] == '-m': module_name = cmd[2] extra_argv = cmd[3:] (stdout, stderr, retcode) = call_with_import(module_name, extra_argv) else: module_path = cmd[1] extra_argv = cmd[2:] module_dir = os.path.dirname(module_path) module_name = os.path.splitext(os.path.basename(module_path))[0] if os.path.isdir(module_path): module_name += '.__main__' with using_sys_path(module_dir): (stdout, stderr, retcode) = call_with_import(module_name, extra_argv) else: (stdout, stderr, retcode) = call_output(['pip', 'install', '-e', pyprover], **kwargs) if expect_retcode is not None: assert retcode == expect_retcode, 'Return code not as expected ({retcode} != {expect_retcode}) in: {cmd!r}'.format(retcode=retcode, expect_retcode=expect_retcode, cmd=['pip', 'install', '-e', pyprover]) if stderr_first: out = stderr + stdout else: out = stdout + stderr out = ''.join(out) raw_lines = out.splitlines() lines = [] i = 0 while True: if i >= len(raw_lines): break line = raw_lines[i] if sys.version_info < (3, 9) and line == 'Error in atexit._run_exitfuncs:': while True: i += 1 if i >= len(raw_lines): break new_line = raw_lines[i] if not new_line.startswith(' ') and (not any((test in new_line for test in ignore_atexit_errors_with))): i -= 1 break continue if any((infix in line for infix in mypy_err_infixes)): line += '\n' + raw_lines[i + 1] i += 1 for j in range(i + 2, len(raw_lines)): next_line = raw_lines[j] if next_line.lstrip() == next_line: break line += '\n' + next_line i += 1 lines.append(line) i += 1 for line in lines: for errstr in always_err_strs: assert errstr not in line, '{errstr!r} in {line!r}'.format(errstr=errstr, line=line) if check_errors: assert 'Traceback (most recent call last):' not in line, 'Traceback in ' + repr(line) assert 'Exception' not in line, 'Exception in ' + repr(line) assert 'Error' not in line, 'Error in ' + repr(line) if check_mypy and all((test not in line for test in ignore_mypy_errs_with)): assert 'error:' not in line, 'MyPy error in ' + repr(line) if isinstance(assert_output, str): got_output = '\n'.join(raw_lines) + '\n' assert assert_output in got_output, 'Expected ' + repr(assert_output) + '; got ' + repr(got_output) else: last_line = '' for line in reversed(lines): if not any((ignore in line for ignore in ignore_last_lines_with)): last_line = line break if assert_output is None: assert not last_line, 'Expected nothing; got:\n' + '\n'.join((repr(li) for li in raw_lines)) else: assert any((x in last_line for x in assert_output)), 'Expected ' + ', '.join((repr(s) for s in assert_output)) + ' in ' + repr(last_line) + '; got:\n' + '\n'.join((repr(li) for li in raw_lines)) call([sys.executable] + [os.path.join(pyprover, 'pyprover', 'tests.py')], **kwargs) </DeepExtract>
coconut
positive
def test_clone(fake_hg_repo): (hg, local, remote) = fake_hg_repo <DeepExtract> path = os.path.join(local, 'file1') with open(path, 'w') as f: f.write('1\n2\n3\n4\n5\n6\n7\n') hg.add(files=[bytes(path, 'ascii')]) </DeepExtract> <DeepExtract> commit_message = commit_message if commit_message is not None else 'Commit {}'.format(' '.join([elem.decode('ascii') for status in hg.status() for elem in status])) (i, revision) = hg.commit(message=commit_message, user='Moz Illa <milla@mozilla.org>', date=date, amend=amend) return str(revision, 'ascii') </DeepExtract> hg.push(dest=bytes(remote, 'ascii')) tmp_repo_dir = 'ciao' repository.clone(tmp_repo_dir, url=remote) assert not os.path.exists(os.path.join(tmp_repo_dir, 'file1')) remote_revs = repository.get_revs(hg) with hglib.open(tmp_repo_dir) as tmp_hg: assert repository.get_revs(tmp_hg) == remote_revs with hglib.open(tmp_repo_dir) as tmp_hg: <DeepExtract> path = os.path.join(tmp_repo_dir, 'file1') with open(path, 'w') as f: f.write('1\n2\n3\n') tmp_hg.add(files=[bytes(path, 'ascii')]) </DeepExtract> <DeepExtract> commit_message = commit_message if commit_message is not None else 'Commit {}'.format(' '.join([elem.decode('ascii') for status in tmp_hg.status() for elem in status])) (i, revision) = tmp_hg.commit(message=commit_message, user='Moz Illa <milla@mozilla.org>', date=date, amend=amend) return str(revision, 'ascii') </DeepExtract> <DeepExtract> path = os.path.join(local, 'file1') with open(path, 'w') as f: f.write('1\n2\n') hg.add(files=[bytes(path, 'ascii')]) </DeepExtract> <DeepExtract> commit_message = commit_message if commit_message is not None else 'Commit {}'.format(' '.join([elem.decode('ascii') for status in hg.status() for elem in status])) (i, revision) = hg.commit(message=commit_message, user='Moz Illa <milla@mozilla.org>', date=date, amend=amend) return str(revision, 'ascii') </DeepExtract> hg.push(dest=bytes(remote, 'ascii')) repository.clone(tmp_repo_dir, url=remote) remote_revs = repository.get_revs(hg) with hglib.open(tmp_repo_dir) as tmp_hg: assert repository.get_revs(tmp_hg) == remote_revs repository.clone(f'{tmp_repo_dir}2', url=remote, update=True) assert os.path.exists(os.path.join(f'{tmp_repo_dir}2', 'file1'))
def test_clone(fake_hg_repo): (hg, local, remote) = fake_hg_repo path = os.path.join(local, 'file1') with open(path, 'w') as f: f.write('1\n2\n3\n4\n5\n6\n7\n') hg.add(files=[bytes(path, 'ascii')]) commit_message = commit_message if commit_message is not None else 'Commit {}'.format(' '.join([elem.decode('ascii') for status in hg.status() for elem in status])) (i, revision) = hg.commit(message=commit_message, user='Moz Illa <milla@mozilla.org>', date=date, amend=amend) return str(revision, 'ascii') hg.push(dest=bytes(remote, 'ascii')) tmp_repo_dir = 'ciao' repository.clone(tmp_repo_dir, url=remote) assert not os.path.exists(os.path.join(tmp_repo_dir, 'file1')) remote_revs = repository.get_revs(hg) with hglib.open(tmp_repo_dir) as tmp_hg: assert repository.get_revs(tmp_hg) == remote_revs with hglib.open(tmp_repo_dir) as tmp_hg: path = os.path.join(tmp_repo_dir, 'file1') with open(path, 'w') as f: f.write('1\n2\n3\n') tmp_hg.add(files=[bytes(path, 'ascii')]) commit_message = commit_message if commit_message is not None else 'Commit {}'.format(' '.join([elem.decode('ascii') for status in tmp_hg.status() for elem in status])) (i, revision) = tmp_hg.commit(message=commit_message, user='Moz Illa <milla@mozilla.org>', date=date, amend=amend) return str(revision, 'ascii') path = os.path.join(local, 'file1') with open(path, 'w') as f: f.write('1\n2\n') hg.add(files=[bytes(path, 'ascii')]) commit_message = commit_message if commit_message is not None else 'Commit {}'.format(' '.join([elem.decode('ascii') for status in hg.status() for elem in status])) (i, revision) = hg.commit(message=commit_message, user='Moz Illa <milla@mozilla.org>', date=date, amend=amend) return str(revision, 'ascii') hg.push(dest=bytes(remote, 'ascii')) repository.clone(tmp_repo_dir, url=remote) remote_revs = repository.get_revs(hg) with hglib.open(tmp_repo_dir) as tmp_hg: assert repository.get_revs(tmp_hg) == remote_revs repository.clone(f'{tmp_repo_dir}2', url=remote, update=True) assert os.path.exists(os.path.join(f'{tmp_repo_dir}2', 'file1'))
bugbug
positive
def proj_frame(xyz_file, mo_dir, dump_dir=None, basis='ccpvtz', ename='e_hf', intor='ovlp', verbose=False): <DeepExtract> with open(xyz_file) as fp: natoms = int(fp.readline()) comments = fp.readline() xyz_str = ''.join(fp.readlines()) mol = gto.Mole() mol.verbose = 4 if verbose else 0 mol.atom = xyz_str mol.basis = basis try: mol.build(0, 0, unit='Ang') except RuntimeError as e: mol.spin = 1 mol.build(0, 0, unit='Ang') mol = mol </DeepExtract> <DeepExtract> meta = np.loadtxt(os.path.join(mo_dir, 'system.raw'), dtype=int).reshape(-1) natm = meta[0] nao = meta[1] nocc = meta[2] nvir = meta[3] ehf = np.loadtxt(os.path.join(mo_dir, f'{ename}.raw')).reshape(-1, 1) e_occ = np.loadtxt(os.path.join(mo_dir, 'ener_occ.raw')).reshape(-1, nocc) c_occ = np.loadtxt(os.path.join(mo_dir, 'coeff_occ.raw')).reshape([-1, nocc, nao]) (meta, ehf, e_occ, c_occ) = (meta, ehf, e_occ, c_occ) </DeepExtract> <DeepExtract> natm = mol.natm mole_coords = mol.atom_coords(unit='Ang') test_mol = gto.Mole() if verbose: test_mol.verbose = 4 else: test_mol.verbose = 0 test_mol.atom = [['Ne', coord] for coord in mole_coords] test_mol.basis = BASIS test_mol.spin = 0 test_mol.build(0, 0, unit='Ang') proj = gto.intor_cross(f'int1e_{intor}_sph', mol, test_mol) def proj_func(mo): proj_coeff = np.matmul(mo, proj).reshape(*mo.shape[:2], natm, -1) if verbose: print('shape of coeff data ', proj_coeff.shape) proj_func = (proj_coeff, proj_coeff.shape[-1]) proj_func = proj_func </DeepExtract> <DeepExtract> proj_coeff = np.matmul(c_occ, proj).reshape(*c_occ.shape[:2], natm, -1) if verbose: print('shape of coeff data ', proj_coeff.shape) (c_proj_occ, nproj) = (proj_coeff, proj_coeff.shape[-1]) </DeepExtract> c_occ = c_proj_occ meta = np.append(meta, nproj) if dump_dir is not None: <DeepExtract> os.makedirs(dump_dir, exist_ok=True) np.savetxt(os.path.join(dump_dir, 'system.raw'), meta.reshape(1, -1), fmt='%d', header='natm nao nocc nvir nproj') nframe = e_occ.shape[0] natm = meta[0] nao = meta[1] nocc = meta[2] nvir = meta[3] nproj = meta[4] assert all(c_occ.shape == np.array([nframe, nocc, natm, nproj], dtype=int)) assert all(e_occ.shape == np.array([nframe, nocc], dtype=int)) assert all((all(dm.shape == np.array([nframe, natm, nproj], dtype=int)) for dm in dm_dict.values())) np.save(os.path.join(dump_dir, 'e_hf.npy'), ehf) np.save(os.path.join(dump_dir, 'ener_occ.npy'), e_occ) np.save(os.path.join(dump_dir, 'coeff_occ.npy'), c_occ) for (name, dm) in dm_dict.items(): np.save(os.path.join(dump_dir, f'{name}.npy'), dm) </DeepExtract> return (meta, ehf, e_occ, c_occ)
def proj_frame(xyz_file, mo_dir, dump_dir=None, basis='ccpvtz', ename='e_hf', intor='ovlp', verbose=False): with open(xyz_file) as fp: natoms = int(fp.readline()) comments = fp.readline() xyz_str = ''.join(fp.readlines()) mol = gto.Mole() mol.verbose = 4 if verbose else 0 mol.atom = xyz_str mol.basis = basis try: mol.build(0, 0, unit='Ang') except RuntimeError as e: mol.spin = 1 mol.build(0, 0, unit='Ang') mol = mol meta = np.loadtxt(os.path.join(mo_dir, 'system.raw'), dtype=int).reshape(-1) natm = meta[0] nao = meta[1] nocc = meta[2] nvir = meta[3] ehf = np.loadtxt(os.path.join(mo_dir, f'{ename}.raw')).reshape(-1, 1) e_occ = np.loadtxt(os.path.join(mo_dir, 'ener_occ.raw')).reshape(-1, nocc) c_occ = np.loadtxt(os.path.join(mo_dir, 'coeff_occ.raw')).reshape([-1, nocc, nao]) (meta, ehf, e_occ, c_occ) = (meta, ehf, e_occ, c_occ) natm = mol.natm mole_coords = mol.atom_coords(unit='Ang') test_mol = gto.Mole() if verbose: test_mol.verbose = 4 else: test_mol.verbose = 0 test_mol.atom = [['Ne', coord] for coord in mole_coords] test_mol.basis = BASIS test_mol.spin = 0 test_mol.build(0, 0, unit='Ang') proj = gto.intor_cross(f'int1e_{intor}_sph', mol, test_mol) def proj_func(mo): proj_coeff = np.matmul(mo, proj).reshape(*mo.shape[:2], natm, -1) if verbose: print('shape of coeff data ', proj_coeff.shape) proj_func = (proj_coeff, proj_coeff.shape[-1]) proj_func = proj_func proj_coeff = np.matmul(c_occ, proj).reshape(*c_occ.shape[:2], natm, -1) if verbose: print('shape of coeff data ', proj_coeff.shape) (c_proj_occ, nproj) = (proj_coeff, proj_coeff.shape[-1]) c_occ = c_proj_occ meta = np.append(meta, nproj) if dump_dir is not None: os.makedirs(dump_dir, exist_ok=True) np.savetxt(os.path.join(dump_dir, 'system.raw'), meta.reshape(1, -1), fmt='%d', header='natm nao nocc nvir nproj') nframe = e_occ.shape[0] natm = meta[0] nao = meta[1] nocc = meta[2] nvir = meta[3] nproj = meta[4] assert all(c_occ.shape == np.array([nframe, nocc, natm, nproj], dtype=int)) assert all(e_occ.shape == np.array([nframe, nocc], dtype=int)) assert all((all(dm.shape == np.array([nframe, natm, nproj], dtype=int)) for dm in dm_dict.values())) np.save(os.path.join(dump_dir, 'e_hf.npy'), ehf) np.save(os.path.join(dump_dir, 'ener_occ.npy'), e_occ) np.save(os.path.join(dump_dir, 'coeff_occ.npy'), c_occ) for (name, dm) in dm_dict.items(): np.save(os.path.join(dump_dir, f'{name}.npy'), dm) return (meta, ehf, e_occ, c_occ)
deepks-kit
positive
def run(self): """ Handle the server-side of the session's reverse TCP shell """ while True: if self._active.wait(): task = self.recv_task() if not self._prompt else self._prompt if isinstance(task, dict): if 'help' in task.get('task'): self._active.clear() globals()['c2'].help(task.get('result')) self._active.set() elif 'prompt' in task.get('task'): self._prompt = task command = globals()['c2']._get_prompt(task.get('result') % int(self.id)) (cmd, _, action) = command.partition(' ') if cmd in ('\n', ' ', ''): continue elif cmd in globals()['c2'].commands and callable(globals()['c2'].commands[cmd]['method']): method = globals()['c2'].commands[cmd]['method'] if callable(method): result = method(action) if len(action) else method() if result: task = {'task': cmd, 'result': result, 'session': self.info.get('uid')} globals()['c2'].display(result.encode()) globals()['c2'].database.handle_task(task) continue else: task = globals()['c2'].database.handle_task({'task': command, 'session': self.info.get('uid')}) <DeepExtract> if not isinstance(task, dict): raise TypeError('task must be a dictionary object') if not 'session' in task: task['session'] = self.info.get('uid') data = security.encrypt_aes(json.dumps(task), self.key) msg = struct.pack('!L', len(data)) + data self.connection.sendall(msg) return True </DeepExtract> elif 'result' in task: if task.get('result') and task.get('result') != 'None': globals()['c2'].display(task.get('result').encode()) globals()['c2'].database.handle_task(task) elif self._abort: break elif isinstance(task, int) and task == 0: break self._prompt = None time.sleep(1) globals()['c2'].session_remove(self.id) self._active.clear() globals()['c2']._return()
def run(self): """ Handle the server-side of the session's reverse TCP shell """ while True: if self._active.wait(): task = self.recv_task() if not self._prompt else self._prompt if isinstance(task, dict): if 'help' in task.get('task'): self._active.clear() globals()['c2'].help(task.get('result')) self._active.set() elif 'prompt' in task.get('task'): self._prompt = task command = globals()['c2']._get_prompt(task.get('result') % int(self.id)) (cmd, _, action) = command.partition(' ') if cmd in ('\n', ' ', ''): continue elif cmd in globals()['c2'].commands and callable(globals()['c2'].commands[cmd]['method']): method = globals()['c2'].commands[cmd]['method'] if callable(method): result = method(action) if len(action) else method() if result: task = {'task': cmd, 'result': result, 'session': self.info.get('uid')} globals()['c2'].display(result.encode()) globals()['c2'].database.handle_task(task) continue else: task = globals()['c2'].database.handle_task({'task': command, 'session': self.info.get('uid')}) if not isinstance(task, dict): raise TypeError('task must be a dictionary object') if not 'session' in task: task['session'] = self.info.get('uid') data = security.encrypt_aes(json.dumps(task), self.key) msg = struct.pack('!L', len(data)) + data self.connection.sendall(msg) return True elif 'result' in task: if task.get('result') and task.get('result') != 'None': globals()['c2'].display(task.get('result').encode()) globals()['c2'].database.handle_task(task) elif self._abort: break elif isinstance(task, int) and task == 0: break self._prompt = None time.sleep(1) globals()['c2'].session_remove(self.id) self._active.clear() globals()['c2']._return()
byob
positive
@common.skipUnlessDestructive def test_get_recycle_bin_destructive(self): """Unit test the destructive part of get_recycle_bin""" <DeepExtract> tests = ('regular', 'unicode-emdash-u—', 'long' + 'x' * 100) for test in tests: (fd, filename) = tempfile.mkstemp(prefix='bleachbit-recycle-file', suffix=test) os.close(fd) move_to_recycle_bin(filename) dirname = tempfile.mkdtemp(prefix='bleachbit-recycle-folder') common.touch_file(os.path.join(dirname, 'file')) move_to_recycle_bin(dirname) </DeepExtract> counter = 0 for f in get_recycle_bin(): counter += 1 FileUtilities.delete(f) self.assertGreaterEqual(counter, 3, 'deleted %d' % counter) for _f in get_recycle_bin(): self.fail('recycle bin should be empty, but it is not')
@common.skipUnlessDestructive def test_get_recycle_bin_destructive(self): """Unit test the destructive part of get_recycle_bin""" tests = ('regular', 'unicode-emdash-u—', 'long' + 'x' * 100) for test in tests: (fd, filename) = tempfile.mkstemp(prefix='bleachbit-recycle-file', suffix=test) os.close(fd) move_to_recycle_bin(filename) dirname = tempfile.mkdtemp(prefix='bleachbit-recycle-folder') common.touch_file(os.path.join(dirname, 'file')) move_to_recycle_bin(dirname) counter = 0 for f in get_recycle_bin(): counter += 1 FileUtilities.delete(f) self.assertGreaterEqual(counter, 3, 'deleted %d' % counter) for _f in get_recycle_bin(): self.fail('recycle bin should be empty, but it is not')
bleachbit
positive
def mutual_information(M_c, X_Ls, X_Ds, Q, get_next_seed, n_samples=1000): assert len(X_Ds) == len(X_Ls) n_postertior_samples = len(X_Ds) n_cols = len(M_c['column_metadata']) MI = [] Linfoot = [] for query in Q: assert len(query) == 2 assert query[0] >= 0 and query[0] < n_cols assert query[1] >= 0 and query[1] < n_cols X = query[0] Y = query[1] MI_sample = [] Linfoot_sample = [] for sample in range(n_postertior_samples): X_L = X_Ls[sample] X_D = X_Ds[sample] if column_is_bounded_discrete(M_c, X) and column_is_bounded_discrete(M_c, Y): <DeepExtract> get_view_index = lambda which_column: X_L['column_partition']['assignments'][which_column] view_X = get_view_index(X) view_Y = get_view_index(Y) if view_X != view_Y: MI_s = 0.0 view_state = X_L['view_state'][view_X] cluster_logps = numpy.array(su.determine_cluster_crp_logps(view_state)) n_clusters = len(cluster_logps) x_values = M_c['column_metadata'][X]['code_to_value'].values() y_values = M_c['column_metadata'][Y]['code_to_value'].values() component_models_X = [0] * n_clusters component_models_Y = [0] * n_clusters for i in range(n_clusters): cluster_models = su.create_cluster_model_from_X_L(M_c, X_L, view_X, i) component_models_X[i] = cluster_models[X] component_models_Y[i] = cluster_models[Y] def marginal_predictive_logps_by_cluster(value, component_models): MI_s = numpy.array([component_models[j].calc_element_predictive_logp(value) + cluster_logps[j] for j in range(n_clusters)]) x_marginal_predictive_logps_by_cluster = [marginal_predictive_logps_by_cluster(x, component_models_X) for x in x_values] x_net_marginal_predictive_logps = [logsumexp(ps) for ps in x_marginal_predictive_logps_by_cluster] y_marginal_predictive_logps_by_cluster = [marginal_predictive_logps_by_cluster(y, component_models_Y) for y in y_values] y_net_marginal_predictive_logps = [logsumexp(ps) for ps in y_marginal_predictive_logps_by_cluster] MI = 0.0 for (i, x) in enumerate(x_values): x_marginals = x_marginal_predictive_logps_by_cluster[i] for (j, y) in enumerate(y_values): y_marginals = y_marginal_predictive_logps_by_cluster[j] joint_predictive_logp_by_cluster = x_marginals + y_marginals - cluster_logps joint_predictive_logp = logsumexp(joint_predictive_logp_by_cluster) MI += math.exp(joint_predictive_logp) * (joint_predictive_logp - (x_net_marginal_predictive_logps[i] + y_net_marginal_predictive_logps[j])) if MI <= 0.0: MI = 0.0 MI_s = MI </DeepExtract> else: <DeepExtract> random_state = numpy.random.RandomState(get_next_seed()) get_view_index = lambda which_column: X_L['column_partition']['assignments'][which_column] view_X = get_view_index(X) view_Y = get_view_index(Y) if view_X != view_Y: MI_s = 0.0 view_state = X_L['view_state'][view_X] cluster_logps = su.determine_cluster_crp_logps(view_state) cluster_crps = numpy.exp(cluster_logps) n_clusters = len(cluster_crps) component_models_X = [0] * n_clusters component_models_Y = [0] * n_clusters for i in range(n_clusters): cluster_models = su.create_cluster_model_from_X_L(M_c, X_L, view_X, i) component_models_X[i] = cluster_models[X] component_models_Y[i] = cluster_models[Y] MI = numpy.zeros(n_samples) weights = numpy.zeros(n_samples) for i in range(n_samples): cluster_idx = numpy.nonzero(random_state.multinomial(1, cluster_crps))[0][0] x = component_models_X[cluster_idx].get_draw(get_next_seed()) y = component_models_Y[cluster_idx].get_draw(get_next_seed()) Pxy = numpy.zeros(n_clusters) Px = numpy.zeros(n_clusters) Py = numpy.zeros(n_clusters) for j in range(n_clusters): Px[j] = component_models_X[j].calc_element_predictive_logp(x) Py[j] = component_models_Y[j].calc_element_predictive_logp(y) Pxy[j] = Px[j] + Py[j] + cluster_logps[j] Px[j] += cluster_logps[j] Py[j] += cluster_logps[j] Px = logsumexp(Px) Py = logsumexp(Py) Pxy = logsumexp(Pxy) MI[i] = Pxy - (Px + Py) weights[i] = Pxy Z = logsumexp(weights) weights = numpy.exp(weights - Z) MI_ret = numpy.sum(MI * weights) if MI_ret <= 0.0: MI_ret = 0.0 MI_s = MI_ret </DeepExtract> <DeepExtract> if MI_s < 0: MI_s = 0 linfoot = (1.0 - math.exp(-2.0 * MI_s)) ** 0.5 </DeepExtract> MI_sample.append(MI_s) Linfoot_sample.append(linfoot) MI.append(MI_sample) Linfoot.append(Linfoot_sample) assert len(MI) == len(Q) assert len(Linfoot) == len(Q) return (MI, Linfoot)
def mutual_information(M_c, X_Ls, X_Ds, Q, get_next_seed, n_samples=1000): assert len(X_Ds) == len(X_Ls) n_postertior_samples = len(X_Ds) n_cols = len(M_c['column_metadata']) MI = [] Linfoot = [] for query in Q: assert len(query) == 2 assert query[0] >= 0 and query[0] < n_cols assert query[1] >= 0 and query[1] < n_cols X = query[0] Y = query[1] MI_sample = [] Linfoot_sample = [] for sample in range(n_postertior_samples): X_L = X_Ls[sample] X_D = X_Ds[sample] if column_is_bounded_discrete(M_c, X) and column_is_bounded_discrete(M_c, Y): get_view_index = lambda which_column: X_L['column_partition']['assignments'][which_column] view_X = get_view_index(X) view_Y = get_view_index(Y) if view_X != view_Y: MI_s = 0.0 view_state = X_L['view_state'][view_X] cluster_logps = numpy.array(su.determine_cluster_crp_logps(view_state)) n_clusters = len(cluster_logps) x_values = M_c['column_metadata'][X]['code_to_value'].values() y_values = M_c['column_metadata'][Y]['code_to_value'].values() component_models_X = [0] * n_clusters component_models_Y = [0] * n_clusters for i in range(n_clusters): cluster_models = su.create_cluster_model_from_X_L(M_c, X_L, view_X, i) component_models_X[i] = cluster_models[X] component_models_Y[i] = cluster_models[Y] def marginal_predictive_logps_by_cluster(value, component_models): MI_s = numpy.array([component_models[j].calc_element_predictive_logp(value) + cluster_logps[j] for j in range(n_clusters)]) x_marginal_predictive_logps_by_cluster = [marginal_predictive_logps_by_cluster(x, component_models_X) for x in x_values] x_net_marginal_predictive_logps = [logsumexp(ps) for ps in x_marginal_predictive_logps_by_cluster] y_marginal_predictive_logps_by_cluster = [marginal_predictive_logps_by_cluster(y, component_models_Y) for y in y_values] y_net_marginal_predictive_logps = [logsumexp(ps) for ps in y_marginal_predictive_logps_by_cluster] MI = 0.0 for (i, x) in enumerate(x_values): x_marginals = x_marginal_predictive_logps_by_cluster[i] for (j, y) in enumerate(y_values): y_marginals = y_marginal_predictive_logps_by_cluster[j] joint_predictive_logp_by_cluster = x_marginals + y_marginals - cluster_logps joint_predictive_logp = logsumexp(joint_predictive_logp_by_cluster) MI += math.exp(joint_predictive_logp) * (joint_predictive_logp - (x_net_marginal_predictive_logps[i] + y_net_marginal_predictive_logps[j])) if MI <= 0.0: MI = 0.0 MI_s = MI else: random_state = numpy.random.RandomState(get_next_seed()) get_view_index = lambda which_column: X_L['column_partition']['assignments'][which_column] view_X = get_view_index(X) view_Y = get_view_index(Y) if view_X != view_Y: MI_s = 0.0 view_state = X_L['view_state'][view_X] cluster_logps = su.determine_cluster_crp_logps(view_state) cluster_crps = numpy.exp(cluster_logps) n_clusters = len(cluster_crps) component_models_X = [0] * n_clusters component_models_Y = [0] * n_clusters for i in range(n_clusters): cluster_models = su.create_cluster_model_from_X_L(M_c, X_L, view_X, i) component_models_X[i] = cluster_models[X] component_models_Y[i] = cluster_models[Y] MI = numpy.zeros(n_samples) weights = numpy.zeros(n_samples) for i in range(n_samples): cluster_idx = numpy.nonzero(random_state.multinomial(1, cluster_crps))[0][0] x = component_models_X[cluster_idx].get_draw(get_next_seed()) y = component_models_Y[cluster_idx].get_draw(get_next_seed()) Pxy = numpy.zeros(n_clusters) Px = numpy.zeros(n_clusters) Py = numpy.zeros(n_clusters) for j in range(n_clusters): Px[j] = component_models_X[j].calc_element_predictive_logp(x) Py[j] = component_models_Y[j].calc_element_predictive_logp(y) Pxy[j] = Px[j] + Py[j] + cluster_logps[j] Px[j] += cluster_logps[j] Py[j] += cluster_logps[j] Px = logsumexp(Px) Py = logsumexp(Py) Pxy = logsumexp(Pxy) MI[i] = Pxy - (Px + Py) weights[i] = Pxy Z = logsumexp(weights) weights = numpy.exp(weights - Z) MI_ret = numpy.sum(MI * weights) if MI_ret <= 0.0: MI_ret = 0.0 MI_s = MI_ret if MI_s < 0: MI_s = 0 linfoot = (1.0 - math.exp(-2.0 * MI_s)) ** 0.5 MI_sample.append(MI_s) Linfoot_sample.append(linfoot) MI.append(MI_sample) Linfoot.append(Linfoot_sample) assert len(MI) == len(Q) assert len(Linfoot) == len(Q) return (MI, Linfoot)
crosscat
positive
def _add_implemented_requirements(row: int, control_implementation: ControlImplementation, controls: Dict[str, List[str]], component_name: str, parameter_name: str, responsible_roles: List[ResponsibleRole], goal_name_id: str) -> None: """Add implemented requirements.""" goal_remarks = self.xlsx_helper.get_goal_remarks(row) parameter_value_default = self.xlsx_helper.get_parameter_value_default(row) for control in controls.keys(): control_uuid = str(uuid.uuid4()) prop1 = Property(name='goal_name_id', class_=self._get_class_for_property_name('goal_name_id'), value=goal_name_id, ns=self._get_namespace(), remarks=str(goal_remarks)) prop2 = Property(name='goal_version', class_=self._get_class_for_property_name('goal_version'), value=self._get_goal_version(), ns=self._get_namespace(), remarks=str(goal_name_id)) props = [prop1, prop2] (control_id, _) = self.catalog_interface.get_control_id_and_status(control) if not control_id: logger.info(f'row {row} control {control} not found in catalog') control_id = control implemented_requirement = ImplementedRequirement(uuid=control_uuid, description=control, props=props, control_id=control_id, responsible_roles=responsible_roles) <DeepExtract> control_statements = controls[control] if control_statements: statements = [] for control_statement in control_statements: statement_id = control + control_statement if any((i in control for i in '()')): control = control.replace('(', '_') control = control.replace(')', '') logger.info(f'row {row} control {control} edited to remove parentheses') statement = Statement(statement_id=control, uuid=str(uuid.uuid4()), description=f'{component_name} implements {statement_id}') statements.append(statement) implemented_requirement.statements = statements </DeepExtract> <DeepExtract> if parameter_name is not None: parameter_name = parameter_name.replace(' ', '_') if parameter_value_default is not None: if implemented_requirement.set_parameters is None: implemented_requirement.set_parameters = [] values = [parameter_value_default] set_parameter = SetParameter(param_id=parameter_name, values=values) set_parameters = [set_parameter] implemented_requirement.set_parameters.extend(set_parameters) </DeepExtract> control_implementation.implemented_requirements.append(implemented_requirement)
def _add_implemented_requirements(row: int, control_implementation: ControlImplementation, controls: Dict[str, List[str]], component_name: str, parameter_name: str, responsible_roles: List[ResponsibleRole], goal_name_id: str) -> None: """Add implemented requirements.""" goal_remarks = self.xlsx_helper.get_goal_remarks(row) parameter_value_default = self.xlsx_helper.get_parameter_value_default(row) for control in controls.keys(): control_uuid = str(uuid.uuid4()) prop1 = Property(name='goal_name_id', class_=self._get_class_for_property_name('goal_name_id'), value=goal_name_id, ns=self._get_namespace(), remarks=str(goal_remarks)) prop2 = Property(name='goal_version', class_=self._get_class_for_property_name('goal_version'), value=self._get_goal_version(), ns=self._get_namespace(), remarks=str(goal_name_id)) props = [prop1, prop2] (control_id, _) = self.catalog_interface.get_control_id_and_status(control) if not control_id: logger.info(f'row {row} control {control} not found in catalog') control_id = control implemented_requirement = ImplementedRequirement(uuid=control_uuid, description=control, props=props, control_id=control_id, responsible_roles=responsible_roles) control_statements = controls[control] if control_statements: statements = [] for control_statement in control_statements: statement_id = control + control_statement if any((i in control for i in '()')): control = control.replace('(', '_') control = control.replace(')', '') logger.info(f'row {row} control {control} edited to remove parentheses') statement = Statement(statement_id=control, uuid=str(uuid.uuid4()), description=f'{component_name} implements {statement_id}') statements.append(statement) implemented_requirement.statements = statements if parameter_name is not None: parameter_name = parameter_name.replace(' ', '_') if parameter_value_default is not None: if implemented_requirement.set_parameters is None: implemented_requirement.set_parameters = [] values = [parameter_value_default] set_parameter = SetParameter(param_id=parameter_name, values=values) set_parameters = [set_parameter] implemented_requirement.set_parameters.extend(set_parameters) control_implementation.implemented_requirements.append(implemented_requirement)
compliance-trestle
positive
@pytest.fixture def validator_election_votes_2(b_mock, ongoing_validator_election_2, ed25519_node_keys): voters = ValidatorElection.recipients(b_mock) <DeepExtract> votes = [] for (voter, _) in enumerate(voters): v = gen_vote(ongoing_validator_election_2, voter, ed25519_node_keys) votes.append(v) votes = votes </DeepExtract> return votes
@pytest.fixture def validator_election_votes_2(b_mock, ongoing_validator_election_2, ed25519_node_keys): voters = ValidatorElection.recipients(b_mock) votes = [] for (voter, _) in enumerate(voters): v = gen_vote(ongoing_validator_election_2, voter, ed25519_node_keys) votes.append(v) votes = votes return votes
bigchaindb
positive
def install_viralassembly_cleanall(env): try: <DeepExtract> if 'viral' == 'viral': env.VIRAL_ROOT_DIR = '/usr/local/VHTNGS' if not _path_exists(env.VIRAL_ROOT_DIR): sudo('mkdir -p %s' % env.VIRAL_ROOT_DIR) elif 'viral' == 'vigor': env.VIGOR_ROOT_DIR = '/usr/local/VIGOR' if not _path_exists(env.VIGOR_ROOT_DIR): sudo('mkdir -p %s' % env.VIGOR_ROOT_DIR) env.VIGOR_SCRATCH_DIR = '/usr/local/scratch/vigor' if not _path_exists(env.VIGOR_SCRATCH_DIR): sudo('mkdir -p %s' % env.VIGOR_SCRATCH_DIR) sudo('find %s -type f -exec chmod 666 {} \\;' % env.VIGOR_SCRATCH_DIR) sudo('find %s -type d -exec chmod 777 {} \\;' % env.VIGOR_SCRATCH_DIR) else: env.VICVB_LOCAL_DIR = '/usr/local/VICVB' env.VICVB_GALAXY_DIR = '/mnt/galaxyTools/galaxy-central/static/vicvb' </DeepExtract> <DeepExtract> if _path_is_dir('%(VIRAL_ROOT_DIR)s' % env): _unlock_dir('%(VIRAL_ROOT_DIR)s' % env) sudo('rm -rf %s' % '%(VIRAL_ROOT_DIR)s' % env) else: print('DEBUG: _remove_dir[%s] -- NOT FOUND' % '%(VIRAL_ROOT_DIR)s' % env) </DeepExtract> print('Viral Assembly Removed\n') finally: disconnect_all()
def install_viralassembly_cleanall(env): try: if 'viral' == 'viral': env.VIRAL_ROOT_DIR = '/usr/local/VHTNGS' if not _path_exists(env.VIRAL_ROOT_DIR): sudo('mkdir -p %s' % env.VIRAL_ROOT_DIR) elif 'viral' == 'vigor': env.VIGOR_ROOT_DIR = '/usr/local/VIGOR' if not _path_exists(env.VIGOR_ROOT_DIR): sudo('mkdir -p %s' % env.VIGOR_ROOT_DIR) env.VIGOR_SCRATCH_DIR = '/usr/local/scratch/vigor' if not _path_exists(env.VIGOR_SCRATCH_DIR): sudo('mkdir -p %s' % env.VIGOR_SCRATCH_DIR) sudo('find %s -type f -exec chmod 666 {} \\;' % env.VIGOR_SCRATCH_DIR) sudo('find %s -type d -exec chmod 777 {} \\;' % env.VIGOR_SCRATCH_DIR) else: env.VICVB_LOCAL_DIR = '/usr/local/VICVB' env.VICVB_GALAXY_DIR = '/mnt/galaxyTools/galaxy-central/static/vicvb' if _path_is_dir('%(VIRAL_ROOT_DIR)s' % env): _unlock_dir('%(VIRAL_ROOT_DIR)s' % env) sudo('rm -rf %s' % '%(VIRAL_ROOT_DIR)s' % env) else: print('DEBUG: _remove_dir[%s] -- NOT FOUND' % '%(VIRAL_ROOT_DIR)s' % env) print('Viral Assembly Removed\n') finally: disconnect_all()
cloudbiolinux
positive
@staticmethod def from_json(s): """Deserialize a RocketLanding from a JSON string :type s: str :rtype: RocketLanding """ assert type(s) is str, 'incorrect type of arg s: should be str, is {}'.format(type(s)) result = _lib.bc_RocketLanding_from_json(_ffi.new('char[]', s.encode())) <DeepExtract> if _lib.bc_has_err(): _lasterror = _ffi.new('char**') err = _lib.bc_get_last_err(_lasterror) errtext = _ffi.string(_lasterror[0]) _lib.bc_free_string(_lasterror[0]) raise Exception(errtext) </DeepExtract> _result = RocketLanding.__new__(RocketLanding) if result != _ffi.NULL: _result._ptr = result result = _result return result
@staticmethod def from_json(s): """Deserialize a RocketLanding from a JSON string :type s: str :rtype: RocketLanding """ assert type(s) is str, 'incorrect type of arg s: should be str, is {}'.format(type(s)) result = _lib.bc_RocketLanding_from_json(_ffi.new('char[]', s.encode())) if _lib.bc_has_err(): _lasterror = _ffi.new('char**') err = _lib.bc_get_last_err(_lasterror) errtext = _ffi.string(_lasterror[0]) _lib.bc_free_string(_lasterror[0]) raise Exception(errtext) _result = RocketLanding.__new__(RocketLanding) if result != _ffi.NULL: _result._ptr = result result = _result return result
bc18-scaffold
positive
def _process_instance_to_semantic(anns, output_semantic, img): img_size = (img['height'], img['width']) output = np.zeros(img_size, dtype=np.uint8) for ann in anns: <DeepExtract> rle = annToRLE(ann, img_size) m = maskUtils.decode(rle) mask = m </DeepExtract> output[mask == 1] = ann['category_id'] // 5 np.savez_compressed(output_semantic, mask=output)
def _process_instance_to_semantic(anns, output_semantic, img): img_size = (img['height'], img['width']) output = np.zeros(img_size, dtype=np.uint8) for ann in anns: rle = annToRLE(ann, img_size) m = maskUtils.decode(rle) mask = m output[mask == 1] = ann['category_id'] // 5 np.savez_compressed(output_semantic, mask=output)
AdelaiDet
positive
def optimize_hyper_params(reconstructor, validation_data, measure, dataset=None, HYPER_PARAMS_override=None, hyperopt_max_evals=1000, hyperopt_max_evals_retrain=1000, hyperopt_rstate=None, show_progressbar=True, tqdm_file=None): """Optimize hyper parameters of a reconstructor. Parameters ---------- reconstructor : :class:`.Reconstructor` The reconstructor. validation_data : :class:`.DataPairs` The test data on which the performance is measured. measure : :class:`.Measure` or str The measure to use as the objective. The sign is chosen automatically depending on the measures :attr:`~Measure.measure_type`. dataset : :class:`.Dataset`, optional The dataset used for training `reconstructor` if it is a :class:`LearnedReconstructor`. HYPER_PARAMS_override : dict, optional Hyper parameter specification overriding the defaults in ``type(reconstructor).HYPER_PARAMS``. The structure of this dict is the same as the structure of :attr:`Reconstructor.HYPER_PARAMS`, except that all fields are optional. Here, each value of a dict for one parameter is treated as an entity, i.e. specifying the dict ``HYPER_PARAMS[...]['grid_search_options']`` overrides the whole dict, not only the specified keys in it. hyperopt_max_evals : int, optional Number of evaluations for different combinations of the parameters that are optimized by ``hyperopt`` and that do not require retraining. Should be chosen depending on the complexity of dependence and the number of such parameters. hyperopt_max_evals_retrain : int, optional Number of evaluations for different combinations of the parameters that are optimized by ``hyperopt`` and that require retraining. Should be chosen depending on the complexity of dependence and the number of such parameters. hyperopt_rstate : :class:`np.random.RandomState`, optional Random state for the random searches performed by ``hyperopt``. show_progressbar : bool, optional Whether to show a progress bar for the optimization. Default: ``True``. tqdm_file : file-like object File/stream to pass to ``tqdm``. """ if isinstance(measure, str): measure = Measure.get_by_short_name(measure) if dataset is None and isinstance(reconstructor, LearnedReconstructor): raise ValueError('dataset required for training of `LearnedReconstructor`') if HYPER_PARAMS_override is None: HYPER_PARAMS_override = {} for k in HYPER_PARAMS_override.keys(): if k not in type(reconstructor).HYPER_PARAMS.keys(): warn("unknown hyper param '{}' for reconstructor of type '{}'".format(k, type(reconstructor))) params = {} params_retrain = {} for (k, v) in type(reconstructor).HYPER_PARAMS.items(): param = v.copy() param.update(HYPER_PARAMS_override.get(k, {})) param.setdefault('method', 'grid_search') retrain = v.get('retrain', False) if retrain: params_retrain[k] = param else: params[k] = param loss_sign = 1 if measure.measure_type == 'distance' else -1 def fn(x): reconstructor.hyper_params.update(x) reconstructions = [reconstructor.reconstruct(observation) for observation in validation_data.observations] measure_values = [measure.apply(r, g) for (r, g) in zip(reconstructions, validation_data.ground_truth)] loss = loss_sign * np.mean(measure_values) return {'status': 'ok', 'loss': loss} def fn_retrain(x): reconstructor.hyper_params.update(x) reconstructor.train(dataset) <DeepExtract> grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for (k, param) in params.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.0) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError("unknown grid type '{grid_type}' in {reco_cls}.HYPER_PARAMS['{k}']['grid_search_options']".format(grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}'], one of them must be specified for grid search".format(reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError("unknown hyperopt space type '{space_type}' in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']".format(space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']. One of these or {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']['space'] must be specified for hyperopt param search".format(reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for {reco_cls}.HYPER_PARAMS['{k}']".format(method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm(product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format(reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not False): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) best_sub_hp = best_hyper_params </DeepExtract> reconstructions = [reconstructor.reconstruct(observation) for observation in validation_data.observations] measure_values = [measure.apply(r, g) for (r, g) in zip(reconstructions, validation_data.ground_truth)] loss = loss_sign * np.mean(measure_values) return {'status': 'ok', 'loss': loss, 'best_sub_hp': best_sub_hp} if params_retrain: <DeepExtract> grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for (k, param) in params_retrain.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.0) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError("unknown grid type '{grid_type}' in {reco_cls}.HYPER_PARAMS['{k}']['grid_search_options']".format(grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}'], one of them must be specified for grid search".format(reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError("unknown hyperopt space type '{space_type}' in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']".format(space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']. One of these or {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']['space'] must be specified for hyperopt param search".format(reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for {reco_cls}.HYPER_PARAMS['{k}']".format(method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm(product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format(reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not show_progressbar): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn_retrain({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn_retrain, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals_retrain, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) best_hyper_params = best_hyper_params </DeepExtract> else: <DeepExtract> grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for (k, param) in params.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.0) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError("unknown grid type '{grid_type}' in {reco_cls}.HYPER_PARAMS['{k}']['grid_search_options']".format(grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}'], one of them must be specified for grid search".format(reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError("unknown hyperopt space type '{space_type}' in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']".format(space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']. One of these or {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']['space'] must be specified for hyperopt param search".format(reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for {reco_cls}.HYPER_PARAMS['{k}']".format(method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm(product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format(reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not show_progressbar): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) best_hyper_params = best_hyper_params </DeepExtract> return best_hyper_params
def optimize_hyper_params(reconstructor, validation_data, measure, dataset=None, HYPER_PARAMS_override=None, hyperopt_max_evals=1000, hyperopt_max_evals_retrain=1000, hyperopt_rstate=None, show_progressbar=True, tqdm_file=None): """Optimize hyper parameters of a reconstructor. Parameters ---------- reconstructor : :class:`.Reconstructor` The reconstructor. validation_data : :class:`.DataPairs` The test data on which the performance is measured. measure : :class:`.Measure` or str The measure to use as the objective. The sign is chosen automatically depending on the measures :attr:`~Measure.measure_type`. dataset : :class:`.Dataset`, optional The dataset used for training `reconstructor` if it is a :class:`LearnedReconstructor`. HYPER_PARAMS_override : dict, optional Hyper parameter specification overriding the defaults in ``type(reconstructor).HYPER_PARAMS``. The structure of this dict is the same as the structure of :attr:`Reconstructor.HYPER_PARAMS`, except that all fields are optional. Here, each value of a dict for one parameter is treated as an entity, i.e. specifying the dict ``HYPER_PARAMS[...]['grid_search_options']`` overrides the whole dict, not only the specified keys in it. hyperopt_max_evals : int, optional Number of evaluations for different combinations of the parameters that are optimized by ``hyperopt`` and that do not require retraining. Should be chosen depending on the complexity of dependence and the number of such parameters. hyperopt_max_evals_retrain : int, optional Number of evaluations for different combinations of the parameters that are optimized by ``hyperopt`` and that require retraining. Should be chosen depending on the complexity of dependence and the number of such parameters. hyperopt_rstate : :class:`np.random.RandomState`, optional Random state for the random searches performed by ``hyperopt``. show_progressbar : bool, optional Whether to show a progress bar for the optimization. Default: ``True``. tqdm_file : file-like object File/stream to pass to ``tqdm``. """ if isinstance(measure, str): measure = Measure.get_by_short_name(measure) if dataset is None and isinstance(reconstructor, LearnedReconstructor): raise ValueError('dataset required for training of `LearnedReconstructor`') if HYPER_PARAMS_override is None: HYPER_PARAMS_override = {} for k in HYPER_PARAMS_override.keys(): if k not in type(reconstructor).HYPER_PARAMS.keys(): warn("unknown hyper param '{}' for reconstructor of type '{}'".format(k, type(reconstructor))) params = {} params_retrain = {} for (k, v) in type(reconstructor).HYPER_PARAMS.items(): param = v.copy() param.update(HYPER_PARAMS_override.get(k, {})) param.setdefault('method', 'grid_search') retrain = v.get('retrain', False) if retrain: params_retrain[k] = param else: params[k] = param loss_sign = 1 if measure.measure_type == 'distance' else -1 def fn(x): reconstructor.hyper_params.update(x) reconstructions = [reconstructor.reconstruct(observation) for observation in validation_data.observations] measure_values = [measure.apply(r, g) for (r, g) in zip(reconstructions, validation_data.ground_truth)] loss = loss_sign * np.mean(measure_values) return {'status': 'ok', 'loss': loss} def fn_retrain(x): reconstructor.hyper_params.update(x) reconstructor.train(dataset) grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for (k, param) in params.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.0) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError("unknown grid type '{grid_type}' in {reco_cls}.HYPER_PARAMS['{k}']['grid_search_options']".format(grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}'], one of them must be specified for grid search".format(reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError("unknown hyperopt space type '{space_type}' in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']".format(space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']. One of these or {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']['space'] must be specified for hyperopt param search".format(reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for {reco_cls}.HYPER_PARAMS['{k}']".format(method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm(product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format(reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not False): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) best_sub_hp = best_hyper_params reconstructions = [reconstructor.reconstruct(observation) for observation in validation_data.observations] measure_values = [measure.apply(r, g) for (r, g) in zip(reconstructions, validation_data.ground_truth)] loss = loss_sign * np.mean(measure_values) return {'status': 'ok', 'loss': loss, 'best_sub_hp': best_sub_hp} if params_retrain: grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for (k, param) in params_retrain.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.0) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError("unknown grid type '{grid_type}' in {reco_cls}.HYPER_PARAMS['{k}']['grid_search_options']".format(grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}'], one of them must be specified for grid search".format(reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError("unknown hyperopt space type '{space_type}' in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']".format(space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']. One of these or {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']['space'] must be specified for hyperopt param search".format(reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for {reco_cls}.HYPER_PARAMS['{k}']".format(method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm(product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format(reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not show_progressbar): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn_retrain({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn_retrain, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals_retrain, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) best_hyper_params = best_hyper_params else: grid_search_params = [] grid_search_param_choices = [] hyperopt_space = {} for (k, param) in params.items(): method = param['method'] if method == 'grid_search': grid_search_options = param.get('grid_search_options', {}) choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: grid_type = grid_search_options.get('type', 'linear') if grid_type == 'linear': n = grid_search_options.get('num_samples', 10) choices = np.linspace(range_[0], range_[1], n) elif grid_type == 'logarithmic': n = grid_search_options.get('num_samples', 10) b = grid_search_options.get('log_base', 10.0) choices = np.logspace(range_[0], range_[1], n, base=b) else: raise ValueError("unknown grid type '{grid_type}' in {reco_cls}.HYPER_PARAMS['{k}']['grid_search_options']".format(grid_type=grid_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}'], one of them must be specified for grid search".format(reco_cls=reconstructor.__class__.__name__, k=k)) grid_search_params.append(k) grid_search_param_choices.append(choices) elif method == 'hyperopt': hyperopt_options = param.get('hyperopt_options', {}) space = hyperopt_options.get('space') if space is None: choices = param.get('choices') if choices is None: range_ = param.get('range') if range_ is not None: space_type = hyperopt_options.get('type', 'uniform') if space_type == 'uniform': space = hp.uniform(k, range_[0], range_[1]) else: raise ValueError("unknown hyperopt space type '{space_type}' in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']".format(space_type=space_type, reco_cls=reconstructor.__class__.__name__, k=k)) else: raise ValueError("neither 'choices' nor 'range' is specified in {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']. One of these or {reco_cls}.HYPER_PARAMS['{k}']['hyperopt_options']['space'] must be specified for hyperopt param search".format(reco_cls=reconstructor.__class__.__name__, k=k)) else: space = hp.choice(k, choices) hyperopt_space[k] = space else: raise ValueError("unknown method '{method}' for {reco_cls}.HYPER_PARAMS['{k}']".format(method=method, reco_cls=reconstructor.__class__.__name__, k=k)) best_loss = np.inf best_hyper_params = None with std_out_err_redirect_tqdm(tqdm_file) as orig_stdout: grid_search_total = np.prod([len(c) for c in grid_search_param_choices]) for grid_search_values in tqdm(product(*grid_search_param_choices), desc='hyper param opt. for {reco_cls}'.format(reco_cls=type(reconstructor).__name__), total=grid_search_total, file=orig_stdout, leave=False, disable=not show_progressbar): grid_search_param_dict = dict(zip(grid_search_params, grid_search_values)) reconstructor.hyper_params.update(grid_search_param_dict) if len(hyperopt_space) == 0: result = fn({}) if result['loss'] < best_loss: best_loss = result['loss'] best_hyper_params = result.get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) else: trials = Trials() argmin = fmin(fn=fn, space=hyperopt_space, algo=tpe.suggest, max_evals=hyperopt_max_evals, trials=trials, rstate=hyperopt_rstate, show_progressbar=False) best_trial = trials.best_trial if best_trial['result']['loss'] < best_loss: best_loss = best_trial['result']['loss'] best_hyper_params = best_trial['result'].get('best_sub_hp', {}) best_hyper_params.update(grid_search_param_dict) best_hyper_params.update(space_eval(hyperopt_space, argmin)) if best_hyper_params is not None: reconstructor.hyper_params.update(best_hyper_params) best_hyper_params = best_hyper_params return best_hyper_params
dival
positive
def reduce_dict(input_dict, average=True): """ Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as input_dict, after reduction. """ <DeepExtract> if not dist.is_available(): world_size = 1 if not dist.is_initialized(): world_size = 1 world_size = dist.get_world_size() </DeepExtract> if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if dist.get_rank() == 0 and average: values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
def reduce_dict(input_dict, average=True): """ Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as input_dict, after reduction. """ if not dist.is_available(): world_size = 1 if not dist.is_initialized(): world_size = 1 world_size = dist.get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if dist.get_rank() == 0 and average: values /= world_size reduced_dict = {k: v for (k, v) in zip(names, values)} return reduced_dict
DetNAS
positive
def diff_cleanupSemanticLossless(diffs): """Look for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. Args: diffs: Array of diff tuples. """ def diff_cleanupSemanticScore(one, two): """Given two strings, compute a score representing whether the internal boundary falls on logical boundaries. Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. Args: one: First string. two: Second string. Returns: The score. """ if not one or not two: return 6 char1 = one[-1] char2 = two[0] nonAlphaNumeric1 = not char1.isalnum() nonAlphaNumeric2 = not char2.isalnum() whitespace1 = nonAlphaNumeric1 and char1.isspace() whitespace2 = nonAlphaNumeric2 and char2.isspace() lineBreak1 = whitespace1 and (char1 == '\r' or char1 == '\n') lineBreak2 = whitespace2 and (char2 == '\r' or char2 == '\n') blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one) blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two) if blankLine1 or blankLine2: return 5 elif lineBreak1 or lineBreak2: return 4 elif nonAlphaNumeric1 and (not whitespace1) and whitespace2: return 3 elif whitespace1 or whitespace2: return 2 elif nonAlphaNumeric1 or nonAlphaNumeric2: return 1 return 0 pointer = 1 while pointer < len(diffs) - 1: if diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL: equality1 = diffs[pointer - 1][1] edit = diffs[pointer][1] equality2 = diffs[pointer + 1][1] <DeepExtract> if not equality1 or not edit or equality1[-1] != edit[-1]: commonOffset = 0 pointermin = 0 pointermax = min(len(equality1), len(edit)) pointermid = pointermax pointerend = 0 while pointermin < pointermid: if equality1[-pointermid:len(equality1) - pointerend] == edit[-pointermid:len(edit) - pointerend]: pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin commonOffset = pointermid </DeepExtract> if commonOffset: commonString = edit[-commonOffset:] equality1 = equality1[:-commonOffset] edit = commonString + edit[:-commonOffset] equality2 = commonString + equality2 bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 bestScore = diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2) while edit and equality2 and (edit[0] == equality2[0]): equality1 += edit[0] edit = edit[1:] + equality2[0] equality2 = equality2[1:] score = diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2) if score >= bestScore: bestScore = score bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 if diffs[pointer - 1][1] != bestEquality1: if bestEquality1: diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1) else: del diffs[pointer - 1] pointer -= 1 diffs[pointer] = (diffs[pointer][0], bestEdit) if bestEquality2: diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2) else: del diffs[pointer + 1] pointer -= 1 pointer += 1
def diff_cleanupSemanticLossless(diffs): """Look for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. Args: diffs: Array of diff tuples. """ def diff_cleanupSemanticScore(one, two): """Given two strings, compute a score representing whether the internal boundary falls on logical boundaries. Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. Args: one: First string. two: Second string. Returns: The score. """ if not one or not two: return 6 char1 = one[-1] char2 = two[0] nonAlphaNumeric1 = not char1.isalnum() nonAlphaNumeric2 = not char2.isalnum() whitespace1 = nonAlphaNumeric1 and char1.isspace() whitespace2 = nonAlphaNumeric2 and char2.isspace() lineBreak1 = whitespace1 and (char1 == '\r' or char1 == '\n') lineBreak2 = whitespace2 and (char2 == '\r' or char2 == '\n') blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one) blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two) if blankLine1 or blankLine2: return 5 elif lineBreak1 or lineBreak2: return 4 elif nonAlphaNumeric1 and (not whitespace1) and whitespace2: return 3 elif whitespace1 or whitespace2: return 2 elif nonAlphaNumeric1 or nonAlphaNumeric2: return 1 return 0 pointer = 1 while pointer < len(diffs) - 1: if diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL: equality1 = diffs[pointer - 1][1] edit = diffs[pointer][1] equality2 = diffs[pointer + 1][1] if not equality1 or not edit or equality1[-1] != edit[-1]: commonOffset = 0 pointermin = 0 pointermax = min(len(equality1), len(edit)) pointermid = pointermax pointerend = 0 while pointermin < pointermid: if equality1[-pointermid:len(equality1) - pointerend] == edit[-pointermid:len(edit) - pointerend]: pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin commonOffset = pointermid if commonOffset: commonString = edit[-commonOffset:] equality1 = equality1[:-commonOffset] edit = commonString + edit[:-commonOffset] equality2 = commonString + equality2 bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 bestScore = diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2) while edit and equality2 and (edit[0] == equality2[0]): equality1 += edit[0] edit = edit[1:] + equality2[0] equality2 = equality2[1:] score = diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2) if score >= bestScore: bestScore = score bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 if diffs[pointer - 1][1] != bestEquality1: if bestEquality1: diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1) else: del diffs[pointer - 1] pointer -= 1 diffs[pointer] = (diffs[pointer][0], bestEdit) if bestEquality2: diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2) else: del diffs[pointer + 1] pointer -= 1 pointer += 1
apkutils
positive