before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def test_remove_10000(self): <DeepExtract> numbers_trad = [] numbers_array = [] for i in range(0, 20000): numbertrad = NumberTraditional(index=i) numbers_trad.append(numbertrad) numberarray = NumberArray(index=i) numbers_array.append(numberarray) numbers_trad = NumberTraditional.objects.bulk_create(numbers_trad) numbers_trad = [x.pk for x in numbers_trad] numbers_array = NumberArray.objects.bulk_create(numbers_array) numbers_array = [x.pk for x in numbers_array] if add: self.traditional.numbers.add(*numbers_trad) self.array.numbers.add(*numbers_array) self.assertEqual(self.traditional.numbers.count(), 20000) self.assertEqual(self.array.numbers.count(), 20000) (numbers_trad, numbers_array) = (numbers_trad, numbers_array) </DeepExtract> numbers_trad1 = numbers_trad[0:10000] numbers_trad2 = numbers_trad[10001:20000] numbers_array1 = numbers_array[0:10000] numbers_array2 = numbers_array[10001:20000] <DeepExtract> start = time.clock() kwargs = kwargs1 or {} result1 = self.traditional.numbers.remove(*numbers_trad1, **kwargs) traditional_time = time.clock() - start start = time.clock() kwargs = kwargs2 or {} result2 = self.array.numbers.remove(*numbers_array1, **kwargs) array_time = time.clock() - start if verify_result: verify_result(result1, result2) times = '%s: %s. %s: %s' % (first, traditional_time, second, array_time) if traditional_time > array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 Partial', second, times)) elif traditional_time < array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 Partial', first, times)) else: print('Test %s: Times are equal. %s' % ('Remove 10000 Partial', times)) </DeepExtract> <DeepExtract> start = time.clock() kwargs = kwargs1 or {} result1 = self.traditional.numbers.remove(*numbers_trad2, **kwargs) traditional_time = time.clock() - start start = time.clock() kwargs = kwargs2 or {} result2 = self.array.numbers.remove(*numbers_array2, **kwargs) array_time = time.clock() - start if verify_result: verify_result(result1, result2) times = '%s: %s. %s: %s' % (first, traditional_time, second, array_time) if traditional_time > array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 All', second, times)) elif traditional_time < array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 All', first, times)) else: print('Test %s: Times are equal. %s' % ('Remove 10000 All', times)) </DeepExtract>
def test_remove_10000(self): numbers_trad = [] numbers_array = [] for i in range(0, 20000): numbertrad = NumberTraditional(index=i) numbers_trad.append(numbertrad) numberarray = NumberArray(index=i) numbers_array.append(numberarray) numbers_trad = NumberTraditional.objects.bulk_create(numbers_trad) numbers_trad = [x.pk for x in numbers_trad] numbers_array = NumberArray.objects.bulk_create(numbers_array) numbers_array = [x.pk for x in numbers_array] if add: self.traditional.numbers.add(*numbers_trad) self.array.numbers.add(*numbers_array) self.assertEqual(self.traditional.numbers.count(), 20000) self.assertEqual(self.array.numbers.count(), 20000) (numbers_trad, numbers_array) = (numbers_trad, numbers_array) numbers_trad1 = numbers_trad[0:10000] numbers_trad2 = numbers_trad[10001:20000] numbers_array1 = numbers_array[0:10000] numbers_array2 = numbers_array[10001:20000] start = time.clock() kwargs = kwargs1 or {} result1 = self.traditional.numbers.remove(*numbers_trad1, **kwargs) traditional_time = time.clock() - start start = time.clock() kwargs = kwargs2 or {} result2 = self.array.numbers.remove(*numbers_array1, **kwargs) array_time = time.clock() - start if verify_result: verify_result(result1, result2) times = '%s: %s. %s: %s' % (first, traditional_time, second, array_time) if traditional_time > array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 Partial', second, times)) elif traditional_time < array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 Partial', first, times)) else: print('Test %s: Times are equal. %s' % ('Remove 10000 Partial', times)) start = time.clock() kwargs = kwargs1 or {} result1 = self.traditional.numbers.remove(*numbers_trad2, **kwargs) traditional_time = time.clock() - start start = time.clock() kwargs = kwargs2 or {} result2 = self.array.numbers.remove(*numbers_array2, **kwargs) array_time = time.clock() - start if verify_result: verify_result(result1, result2) times = '%s: %s. %s: %s' % (first, traditional_time, second, array_time) if traditional_time > array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 All', second, times)) elif traditional_time < array_time: print('Test %s: %s is faster. %s' % ('Remove 10000 All', first, times)) else: print('Test %s: Times are equal. %s' % ('Remove 10000 All', times)) </DeepExtract>
django_postgres_extensions
positive
def get_pagination_info(arguments): """Get pagination arguments""" <DeepExtract> if arguments.pop('page', DEFAULT_PAGE): try: arguments.pop('page', DEFAULT_PAGE) = int(arguments.pop('page', DEFAULT_PAGE)) except ValueError as e: raise ValidationError('invalid input integer value: %s' % e) page = arguments.pop('page', DEFAULT_PAGE) </DeepExtract> <DeepExtract> if arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE): try: arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE) = int(arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE)) except ValueError as e: raise ValidationError('invalid input integer value: %s' % e) items_per_page = arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE) </DeepExtract> <DeepExtract> if arguments.pop('itemsPerPage', items_per_page): try: arguments.pop('itemsPerPage', items_per_page) = int(arguments.pop('itemsPerPage', items_per_page)) except ValueError as e: raise ValidationError('invalid input integer value: %s' % e) items_per_page = arguments.pop('itemsPerPage', items_per_page) </DeepExtract> if page is not None and page < 0: raise ValidationError('invalid page number. Must be positive integer') if items_per_page is not None and items_per_page < 0: raise ValidationError('invalid number of items per page. Must be positive integer') return (page, items_per_page)
def get_pagination_info(arguments): """Get pagination arguments""" if arguments.pop('page', DEFAULT_PAGE): try: arguments.pop('page', DEFAULT_PAGE) = int(arguments.pop('page', DEFAULT_PAGE)) except ValueError as e: raise ValidationError('invalid input integer value: %s' % e) page = arguments.pop('page', DEFAULT_PAGE) if arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE): try: arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE) = int(arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE)) except ValueError as e: raise ValidationError('invalid input integer value: %s' % e) items_per_page = arguments.pop('limit', DEFAULT_ITEMS_PER_PAGE) if arguments.pop('itemsPerPage', items_per_page): try: arguments.pop('itemsPerPage', items_per_page) = int(arguments.pop('itemsPerPage', items_per_page)) except ValueError as e: raise ValidationError('invalid input integer value: %s' % e) items_per_page = arguments.pop('itemsPerPage', items_per_page) if page is not None and page < 0: raise ValidationError('invalid page number. Must be positive integer') if items_per_page is not None and items_per_page < 0: raise ValidationError('invalid number of items per page. Must be positive integer') return (page, items_per_page)
eodag
positive
def all_reduce_and_rescale_tensors(tensors, rescale_denom, buffer_size=10485760): """All-reduce and rescale tensors in chunks of the specified size. Args: tensors: list of Tensors to all-reduce rescale_denom: denominator for rescaling summed Tensors buffer_size: all-reduce chunk size in bytes """ buffer_t = tensors[0].new(math.ceil(buffer_size / tensors[0].element_size())).zero_() buffer = [] def all_reduce_buffer(): offset = 0 for t in buffer: numel = t.numel() buffer_t[offset:offset + numel].copy_(t.view(-1)) offset += numel torch.distributed.all_reduce(buffer_t[:offset]) buffer_t.div_(rescale_denom) offset = 0 for t in buffer: numel = t.numel() t.view(-1).copy_(buffer_t[offset:offset + numel]) offset += numel filled = 0 for t in tensors: sz = t.numel() * t.element_size() if sz > buffer_size: torch.distributed.all_reduce(t) t.div_(rescale_denom) elif filled + sz > buffer_size: <DeepExtract> offset = 0 for t in buffer: numel = t.numel() buffer_t[offset:offset + numel].copy_(t.view(-1)) offset += numel torch.distributed.all_reduce(buffer_t[:offset]) buffer_t.div_(rescale_denom) offset = 0 for t in buffer: numel = t.numel() t.view(-1).copy_(buffer_t[offset:offset + numel]) offset += numel </DeepExtract> buffer = [t] filled = sz else: buffer.append(t) filled += sz if len(buffer) > 0: <DeepExtract> offset = 0 for t in buffer: numel = t.numel() buffer_t[offset:offset + numel].copy_(t.view(-1)) offset += numel torch.distributed.all_reduce(buffer_t[:offset]) buffer_t.div_(rescale_denom) offset = 0 for t in buffer: numel = t.numel() t.view(-1).copy_(buffer_t[offset:offset + numel]) offset += numel </DeepExtract>
def all_reduce_and_rescale_tensors(tensors, rescale_denom, buffer_size=10485760): """All-reduce and rescale tensors in chunks of the specified size. Args: tensors: list of Tensors to all-reduce rescale_denom: denominator for rescaling summed Tensors buffer_size: all-reduce chunk size in bytes """ buffer_t = tensors[0].new(math.ceil(buffer_size / tensors[0].element_size())).zero_() buffer = [] def all_reduce_buffer(): offset = 0 for t in buffer: numel = t.numel() buffer_t[offset:offset + numel].copy_(t.view(-1)) offset += numel torch.distributed.all_reduce(buffer_t[:offset]) buffer_t.div_(rescale_denom) offset = 0 for t in buffer: numel = t.numel() t.view(-1).copy_(buffer_t[offset:offset + numel]) offset += numel filled = 0 for t in tensors: sz = t.numel() * t.element_size() if sz > buffer_size: torch.distributed.all_reduce(t) t.div_(rescale_denom) elif filled + sz > buffer_size: offset = 0 for t in buffer: numel = t.numel() buffer_t[offset:offset + numel].copy_(t.view(-1)) offset += numel torch.distributed.all_reduce(buffer_t[:offset]) buffer_t.div_(rescale_denom) offset = 0 for t in buffer: numel = t.numel() t.view(-1).copy_(buffer_t[offset:offset + numel]) offset += numel buffer = [t] filled = sz else: buffer.append(t) filled += sz if len(buffer) > 0: offset = 0 for t in buffer: numel = t.numel() buffer_t[offset:offset + numel].copy_(t.view(-1)) offset += numel torch.distributed.all_reduce(buffer_t[:offset]) buffer_t.div_(rescale_denom) offset = 0 for t in buffer: numel = t.numel() t.view(-1).copy_(buffer_t[offset:offset + numel]) offset += numel </DeepExtract>
BiSET
positive
def l_state(self, value): """A Pilot has changed state, keep track of it. Args: value (dict): dict containing `state` . """ self.logger.debug(f'updating pilot state: {value}') if value['pilot'] not in self.pilots.keys(): self.logger.info('Got state info from an unknown pilot, adding...') <DeepExtract> if value['pilot'] is None: (value['pilot'], ok) = QtWidgets.QInputDialog.getText(self, 'Pilot ID', 'Pilot ID:') if not ok or not value['pilot']: self.logger.info('Cancel button clicked, not adding new pilot') return if value['pilot'] in self.pilots.keys(): self.logger.warning(f"pilot with id {value['pilot']} already in pilot db, overwriting...") if pilot_prefs is None: pilot_prefs = {} self.control_panel.add_pilot(value['pilot']) new_pilot = {value['pilot']: {'subjects': [], 'ip': ip, 'prefs': pilot_prefs}} self.control_panel.update_db(new=new_pilot) self.logger.info(f"added new pilot {value['pilot']}") </DeepExtract> self.pilots[value['pilot']]['state'] = value['state'] self.control_panel.panels[value['pilot']].button.set_state(value['state'])
def l_state(self, value): """A Pilot has changed state, keep track of it. Args: value (dict): dict containing `state` . """ self.logger.debug(f'updating pilot state: {value}') if value['pilot'] not in self.pilots.keys(): self.logger.info('Got state info from an unknown pilot, adding...') if value['pilot'] is None: (value['pilot'], ok) = QtWidgets.QInputDialog.getText(self, 'Pilot ID', 'Pilot ID:') if not ok or not value['pilot']: self.logger.info('Cancel button clicked, not adding new pilot') return if value['pilot'] in self.pilots.keys(): self.logger.warning(f"pilot with id {value['pilot']} already in pilot db, overwriting...") if pilot_prefs is None: pilot_prefs = {} self.control_panel.add_pilot(value['pilot']) new_pilot = {value['pilot']: {'subjects': [], 'ip': ip, 'prefs': pilot_prefs}} self.control_panel.update_db(new=new_pilot) self.logger.info(f"added new pilot {value['pilot']}") self.pilots[value['pilot']]['state'] = value['state'] self.control_panel.panels[value['pilot']].button.set_state(value['state'])
autopilot
positive
def test_send_find_value_known(port, version, public_key, private_key): """ Ensures that a "findvalue" message for an existing key is sent to the test node and the reply is checked. """ item = get_signed_item('item_name', "the item's value", public_key, private_key) signature = item['signature'] item['uuid'] = str(uuid4()) item['recipient'] = REMOTE_NODE_PUBLIC_KEY item['sender'] = public_key item['reply_port'] = 1908 item['version'] = version <DeepExtract> seal = get_seal(item, private_key) item['seal'] = seal item['message'] = 'store' msg = item </DeepExtract> <DeepExtract> result = requests.post('http://localhost:{}/'.format(port), json.dumps(msg), headers={'content-type': 'application/json'}) </DeepExtract> assert result.status_code == 200 item = {'uuid': str(uuid.uuid4()), 'recipient': REMOTE_NODE_PUBLIC_KEY, 'sender': public_key, 'reply_port': 1908, 'version': version, 'key': construct_key(public_key, 'item_name')} <DeepExtract> seal = get_seal(item, private_key) item['seal'] = seal item['message'] = 'findvalue' msg = item </DeepExtract> <DeepExtract> result = requests.post('http://localhost:{}/'.format(port), json.dumps(msg), headers={'content-type': 'application/json'}) </DeepExtract> assert result.status_code == 200 reply = result.json() assert reply['uuid'] == item['uuid'] assert reply['sender'] == REMOTE_NODE_PUBLIC_KEY assert reply['recipient'] == public_key assert reply['message'] == 'value' assert reply['reply_port'] == port assert reply['version'] == version assert reply['name'] == 'item_name' assert reply['value'] == "the item's value" assert reply['key'] == construct_key(public_key, 'item_name') assert reply['public_key'] == public_key assert reply['signature'] == signature assert reply['expires'] == 0.0 assert reply['created_with'] == version assert isinstance(reply['timestamp'], float) assert 'seal' in reply assert check_seal(from_dict(reply))
def test_send_find_value_known(port, version, public_key, private_key): """ Ensures that a "findvalue" message for an existing key is sent to the test node and the reply is checked. """ item = get_signed_item('item_name', "the item's value", public_key, private_key) signature = item['signature'] item['uuid'] = str(uuid4()) item['recipient'] = REMOTE_NODE_PUBLIC_KEY item['sender'] = public_key item['reply_port'] = 1908 item['version'] = version seal = get_seal(item, private_key) item['seal'] = seal item['message'] = 'store' msg = item result = requests.post('http://localhost:{}/'.format(port), json.dumps(msg), headers={'content-type': 'application/json'}) assert result.status_code == 200 item = {'uuid': str(uuid.uuid4()), 'recipient': REMOTE_NODE_PUBLIC_KEY, 'sender': public_key, 'reply_port': 1908, 'version': version, 'key': construct_key(public_key, 'item_name')} seal = get_seal(item, private_key) item['seal'] = seal item['message'] = 'findvalue' msg = item result = requests.post('http://localhost:{}/'.format(port), json.dumps(msg), headers={'content-type': 'application/json'}) assert result.status_code == 200 reply = result.json() assert reply['uuid'] == item['uuid'] assert reply['sender'] == REMOTE_NODE_PUBLIC_KEY assert reply['recipient'] == public_key assert reply['message'] == 'value' assert reply['reply_port'] == port assert reply['version'] == version assert reply['name'] == 'item_name' assert reply['value'] == "the item's value" assert reply['key'] == construct_key(public_key, 'item_name') assert reply['public_key'] == public_key assert reply['signature'] == signature assert reply['expires'] == 0.0 assert reply['created_with'] == version assert isinstance(reply['timestamp'], float) assert 'seal' in reply assert check_seal(from_dict(reply))
drogulus
positive
def initUI(resolution, debug, main_window): """ This method is responsible for controlling the size of the application window. It is also responsible for each application component, in terms of: - setting its dimensions ; - setting its position within the application layout; - creating its relevant buttons and connecting them to desired actions; - populating it with the created buttons; and - spacing out these buttons within the application component itself. :param resolution: the desktop screen resolution of the user :type resolution: PyQt5.QtCore.QRect(0, 0, screen_width, screen_height), required :return: a QWidget that is displayed within the application from bdEdit.py :rtype: QWidget """ main_window.setGeometry(100, 100, int(resolution.width() / 2), int(resolution.height() / 2)) self.layout = QGridLayout() self.layout.setContentsMargins(0, 0, 0, 0) self.setLayout(self.layout) self.layout.scale = int(2560 / resolution.width()) self.scene = Scene(resolution, self.layout, main_window) self.canvasView = GraphicsView(self.scene.grScene, main_window, self) self.blockLibrary = import_blocks(self.scene, self.layout) <DeepExtract> for sub_list in self.blockLibrary: sub_list[1].sort(key=lambda x: x[0]) self.blockLibrary.sort(key=lambda x: x[0]) </DeepExtract> self.libraryBrowser.layout = QVBoxLayout(self.libraryBrowser) self.tool_logo = QLabel() self.tool_logo.setPixmap(QPixmap(':/Icons_Reference/Icons/bdsim_logo2.png').scaledToWidth(230 * self.layout.scale)) self.libraryBrowser.layout.addWidget(self.tool_logo) self.libraryBrowser.layout.setAlignment(self.tool_logo, Qt.AlignmentFlag.AlignHCenter) self.libraryBrowserBox.layout = QVBoxLayout() self.libraryBrowserBox.layout.setSpacing(2) self.canvasItems_button = QPushButton(' + Canvas Items') self.canvasItems_button.setStyleSheet('QPushButton { text-align: left }') self.canvasItems_button.clicked.connect(self.toggleCanvasItems) self.canvas_items_hidden = True self.list_of_scrollbar_buttons = [] self.libraryBrowserBox.layout.addWidget(self.canvasItems_button) self.connector_block_button = QPushButton('Connector Block') self.main_block_button = QPushButton('Main Block') self.text_item_button = QPushButton('Text Item') self.grouping_box_button = QPushButton('Grouping Box') self.connector_block_button.setVisible(False) self.main_block_button.setVisible(False) self.text_item_button.setVisible(False) self.grouping_box_button.setVisible(False) self.connector_block_button.clicked.connect(lambda checked: Connector(self.scene, self.layout, 'Connector Block')) self.main_block_button.clicked.connect(lambda checked: Main(self.scene, self.layout)) self.text_item_button.clicked.connect(lambda checked: Floating_Label(self.scene, self.layout, main_window)) self.grouping_box_button.clicked.connect(lambda checked: Grouping_Box(self.scene, self.layout)) self.connector_block_button.clicked.connect(lambda checked, desc='Added connector block to scene': self.scene.history.storeHistory(desc)) self.main_block_button.clicked.connect(lambda checked, desc='Added main block to scene': self.scene.history.storeHistory(desc)) self.text_item_button.clicked.connect(lambda checked, desc='Added text label to scene': self.scene.history.storeHistory(desc)) self.grouping_box_button.clicked.connect(lambda checked, desc='Added grouping box to scene': self.scene.history.storeHistory(desc)) self.libraryBrowserBox.layout.addWidget(self.text_item_button) self.libraryBrowserBox.layout.addWidget(self.main_block_button) self.libraryBrowserBox.layout.addWidget(self.grouping_box_button) self.libraryBrowserBox.layout.addWidget(self.connector_block_button) for sub_class_group in self.blockLibrary: group_of_buttons = [] cleaned_class_group = sub_class_group[0][:-1] if sub_class_group[0].endswith('s') else sub_class_group[0] group_button = QPushButton(' + ' + cleaned_class_group.capitalize() + ' Blocks') group_button.setStyleSheet('QPushButton { text-align: left }') group_button.is_hidden = True group_button.clicked.connect(self.toggle_sub_buttons) group_of_buttons.append((group_button, cleaned_class_group.capitalize())) self.libraryBrowserBox.layout.addWidget(group_button) list_of_sub_buttons = [] for class_block in sub_class_group[1]: button = QPushButton(class_block[0]) button.setVisible(False) button.clicked.connect(lambda checked, blockClass=class_block[1]: blockClass()) button.clicked.connect(lambda checked, desc='Added imported block to scene': self.scene.history.storeHistory(desc)) list_of_sub_buttons.append((button, class_block[1])) self.libraryBrowserBox.layout.addWidget(button) group_of_buttons.append(list_of_sub_buttons) self.list_of_scrollbar_buttons.append(group_of_buttons) self.libraryBrowserBox.layout.addStretch() self.libraryBrowserBox.layout.setAlignment(Qt.AlignTop) self.libraryBrowserBox.setLayout(self.libraryBrowserBox.layout) self.scroll = QScrollArea() self.scroll.setWidget(self.libraryBrowserBox) self.scroll.setWidgetResizable(True) self.scroll.setMinimumHeight(300) self.libraryBrowser.setFixedWidth(250 * self.layout.scale) self.libraryBrowser.layout.addWidget(self.scroll) self.libraryBrowser.layout.setAlignment(Qt.AlignTop) self.libraryBrowser.setLayout(self.libraryBrowser.layout) self.layout.addWidget(self.libraryBrowser, 0, 0, 10, 1) self.layout.addWidget(self.canvasView, 0, 1, 10, 9)
def initUI(resolution, debug, main_window): """ This method is responsible for controlling the size of the application window. It is also responsible for each application component, in terms of: - setting its dimensions ; - setting its position within the application layout; - creating its relevant buttons and connecting them to desired actions; - populating it with the created buttons; and - spacing out these buttons within the application component itself. :param resolution: the desktop screen resolution of the user :type resolution: PyQt5.QtCore.QRect(0, 0, screen_width, screen_height), required :return: a QWidget that is displayed within the application from bdEdit.py :rtype: QWidget """ main_window.setGeometry(100, 100, int(resolution.width() / 2), int(resolution.height() / 2)) self.layout = QGridLayout() self.layout.setContentsMargins(0, 0, 0, 0) self.setLayout(self.layout) self.layout.scale = int(2560 / resolution.width()) self.scene = Scene(resolution, self.layout, main_window) self.canvasView = GraphicsView(self.scene.grScene, main_window, self) self.blockLibrary = import_blocks(self.scene, self.layout) for sub_list in self.blockLibrary: sub_list[1].sort(key=lambda x: x[0]) self.blockLibrary.sort(key=lambda x: x[0]) self.libraryBrowser.layout = QVBoxLayout(self.libraryBrowser) self.tool_logo = QLabel() self.tool_logo.setPixmap(QPixmap(':/Icons_Reference/Icons/bdsim_logo2.png').scaledToWidth(230 * self.layout.scale)) self.libraryBrowser.layout.addWidget(self.tool_logo) self.libraryBrowser.layout.setAlignment(self.tool_logo, Qt.AlignmentFlag.AlignHCenter) self.libraryBrowserBox.layout = QVBoxLayout() self.libraryBrowserBox.layout.setSpacing(2) self.canvasItems_button = QPushButton(' + Canvas Items') self.canvasItems_button.setStyleSheet('QPushButton { text-align: left }') self.canvasItems_button.clicked.connect(self.toggleCanvasItems) self.canvas_items_hidden = True self.list_of_scrollbar_buttons = [] self.libraryBrowserBox.layout.addWidget(self.canvasItems_button) self.connector_block_button = QPushButton('Connector Block') self.main_block_button = QPushButton('Main Block') self.text_item_button = QPushButton('Text Item') self.grouping_box_button = QPushButton('Grouping Box') self.connector_block_button.setVisible(False) self.main_block_button.setVisible(False) self.text_item_button.setVisible(False) self.grouping_box_button.setVisible(False) self.connector_block_button.clicked.connect(lambda checked: Connector(self.scene, self.layout, 'Connector Block')) self.main_block_button.clicked.connect(lambda checked: Main(self.scene, self.layout)) self.text_item_button.clicked.connect(lambda checked: Floating_Label(self.scene, self.layout, main_window)) self.grouping_box_button.clicked.connect(lambda checked: Grouping_Box(self.scene, self.layout)) self.connector_block_button.clicked.connect(lambda checked, desc='Added connector block to scene': self.scene.history.storeHistory(desc)) self.main_block_button.clicked.connect(lambda checked, desc='Added main block to scene': self.scene.history.storeHistory(desc)) self.text_item_button.clicked.connect(lambda checked, desc='Added text label to scene': self.scene.history.storeHistory(desc)) self.grouping_box_button.clicked.connect(lambda checked, desc='Added grouping box to scene': self.scene.history.storeHistory(desc)) self.libraryBrowserBox.layout.addWidget(self.text_item_button) self.libraryBrowserBox.layout.addWidget(self.main_block_button) self.libraryBrowserBox.layout.addWidget(self.grouping_box_button) self.libraryBrowserBox.layout.addWidget(self.connector_block_button) for sub_class_group in self.blockLibrary: group_of_buttons = [] cleaned_class_group = sub_class_group[0][:-1] if sub_class_group[0].endswith('s') else sub_class_group[0] group_button = QPushButton(' + ' + cleaned_class_group.capitalize() + ' Blocks') group_button.setStyleSheet('QPushButton { text-align: left }') group_button.is_hidden = True group_button.clicked.connect(self.toggle_sub_buttons) group_of_buttons.append((group_button, cleaned_class_group.capitalize())) self.libraryBrowserBox.layout.addWidget(group_button) list_of_sub_buttons = [] for class_block in sub_class_group[1]: button = QPushButton(class_block[0]) button.setVisible(False) button.clicked.connect(lambda checked, blockClass=class_block[1]: blockClass()) button.clicked.connect(lambda checked, desc='Added imported block to scene': self.scene.history.storeHistory(desc)) list_of_sub_buttons.append((button, class_block[1])) self.libraryBrowserBox.layout.addWidget(button) group_of_buttons.append(list_of_sub_buttons) self.list_of_scrollbar_buttons.append(group_of_buttons) self.libraryBrowserBox.layout.addStretch() self.libraryBrowserBox.layout.setAlignment(Qt.AlignTop) self.libraryBrowserBox.setLayout(self.libraryBrowserBox.layout) self.scroll = QScrollArea() self.scroll.setWidget(self.libraryBrowserBox) self.scroll.setWidgetResizable(True) self.scroll.setMinimumHeight(300) self.libraryBrowser.setFixedWidth(250 * self.layout.scale) self.libraryBrowser.layout.addWidget(self.scroll) self.libraryBrowser.layout.setAlignment(Qt.AlignTop) self.libraryBrowser.setLayout(self.libraryBrowser.layout) self.layout.addWidget(self.libraryBrowser, 0, 0, 10, 1) self.layout.addWidget(self.canvasView, 0, 1, 10, 9)
bdsim
positive
@ddt.data((ProctoredExamStudentAttemptStatus.created, 'Set up and start your proctored exam'), (ProctoredExamStudentAttemptStatus.download_software_clicked, 'Set up and start your proctored exam'), (ProctoredExamStudentAttemptStatus.ready_to_start, 'Proctored Exam Rules'), (ProctoredExamStudentAttemptStatus.error, 'There was a problem with your onboarding session'), (ProctoredExamStudentAttemptStatus.submitted, 'You have submitted this onboarding exam'), (ProctoredExamStudentAttemptStatus.second_review_required, 'You have submitted this onboarding exam'), (ProctoredExamStudentAttemptStatus.ready_to_submit, 'and submit your proctoring session to complete onboarding'), (ProctoredExamStudentAttemptStatus.verified, 'Your onboarding profile was reviewed successfully'), (ProctoredExamStudentAttemptStatus.rejected, 'Your onboarding session was reviewed, but did not pass all requirements')) @ddt.unpack def test_get_studentview_created_status_onboarding(self, status, expected_message): """ Test for get_student_view practice exam which has been created. """ exam_attempt = self._create_onboarding_attempt() exam_attempt.status = status exam_attempt.save() <DeepExtract> rendered_response = self._render_exam(self.onboarding_exam_id) </DeepExtract> self.assertIn(expected_message, rendered_response)
@ddt.data((ProctoredExamStudentAttemptStatus.created, 'Set up and start your proctored exam'), (ProctoredExamStudentAttemptStatus.download_software_clicked, 'Set up and start your proctored exam'), (ProctoredExamStudentAttemptStatus.ready_to_start, 'Proctored Exam Rules'), (ProctoredExamStudentAttemptStatus.error, 'There was a problem with your onboarding session'), (ProctoredExamStudentAttemptStatus.submitted, 'You have submitted this onboarding exam'), (ProctoredExamStudentAttemptStatus.second_review_required, 'You have submitted this onboarding exam'), (ProctoredExamStudentAttemptStatus.ready_to_submit, 'and submit your proctoring session to complete onboarding'), (ProctoredExamStudentAttemptStatus.verified, 'Your onboarding profile was reviewed successfully'), (ProctoredExamStudentAttemptStatus.rejected, 'Your onboarding session was reviewed, but did not pass all requirements')) @ddt.unpack def test_get_studentview_created_status_onboarding(self, status, expected_message): """ Test for get_student_view practice exam which has been created. """ exam_attempt = self._create_onboarding_attempt() exam_attempt.status = status exam_attempt.save() rendered_response = self._render_exam(self.onboarding_exam_id) self.assertIn(expected_message, rendered_response)
edx-proctoring
positive
def embed_in_notebook(self, resolution=(640, 360), **kwargs): from tempfile import mkstemp from IPython.core.display import Video filepath = mkstemp(suffix='.ogv')[1] <DeepExtract> renderer = vtk.vtkRenderer() renderer.SetBackground(1, 1, 1) for actor in self.actors: renderer.AddActor(actor) renderer.Modified() camera = vtk.vtkCamera() camera.SetPosition(*camera_position) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) renderer.SetActiveCamera(camera) render_window = vtk.vtkRenderWindow() render_window.SetSize(*resolution) render_window.OffScreenRenderingOn() render_window.AddRenderer(renderer) image_filter = vtk.vtkWindowToImageFilter() image_filter.SetInput(render_window) image_filter.SetInputBufferTypeToRGB() image_filter.ReadFrontBufferOff() writer = vtk.vtkOggTheoraWriter() writer.SetInputConnection(image_filter.GetOutputPort()) writer.SetFileName(filepath) writer.SetRate(self.fps) writer.Start() for i_frame in range(1 * self.frames_per_loop): self._callback(renderer, None) image_filter.Modified() writer.Write() writer.End() render_window.Finalize() del image_filter del writer del render_window </DeepExtract> return Video(filepath, embed=True, width=resolution[0], html_attributes='controls loop autoplay')
def embed_in_notebook(self, resolution=(640, 360), **kwargs): from tempfile import mkstemp from IPython.core.display import Video filepath = mkstemp(suffix='.ogv')[1] renderer = vtk.vtkRenderer() renderer.SetBackground(1, 1, 1) for actor in self.actors: renderer.AddActor(actor) renderer.Modified() camera = vtk.vtkCamera() camera.SetPosition(*camera_position) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) renderer.SetActiveCamera(camera) render_window = vtk.vtkRenderWindow() render_window.SetSize(*resolution) render_window.OffScreenRenderingOn() render_window.AddRenderer(renderer) image_filter = vtk.vtkWindowToImageFilter() image_filter.SetInput(render_window) image_filter.SetInputBufferTypeToRGB() image_filter.ReadFrontBufferOff() writer = vtk.vtkOggTheoraWriter() writer.SetInputConnection(image_filter.GetOutputPort()) writer.SetFileName(filepath) writer.SetRate(self.fps) writer.Start() for i_frame in range(1 * self.frames_per_loop): self._callback(renderer, None) image_filter.Modified() writer.Write() writer.End() render_window.Finalize() del image_filter del writer del render_window return Video(filepath, embed=True, width=resolution[0], html_attributes='controls loop autoplay')
capytaine
positive
def _call_eob_endpoint_outpatient(v2=False): client = APIClient() <DeepExtract> base_api_test = BaseApiTest() base_api_test.client = client base_api_test.read_capability = base_api_test._create_capability('Read', []) base_api_test.write_capability = base_api_test._create_capability('Write', []) first_name = fn if fn is not None else 'John' last_name = ln if ln is not None else 'Doe' access_token = base_api_test.create_token(first_name, last_name, fhir_id, hicn_hash, mbi_hash) at = AccessToken.objects.get(token=access_token) client.credentials(HTTP_AUTHORIZATION='Bearer ' + at.token) </DeepExtract> response = client.get(self._get_fhir_url(FHIR_RES_TYPE_EOB, 'outpatient--4412920419', v2)) content = json.loads(response.content) self.assertEqual(response.status_code, 200) if not v2: self.assertEqual(validate_json_schema(EOB_READ_INPT_SCHEMA, content), True) else: self.assertEqual(response.status_code, 200) <DeepExtract> meta_profile = None try: meta_profile = content['meta']['profile'][0] except KeyError: pass if not v2: self.assertIsNone(meta_profile) else: self.assertIsNotNone(meta_profile) self.assertEqual(meta_profile, C4BB_PROFILE_URLS['OUTPATIENT']) </DeepExtract>
def _call_eob_endpoint_outpatient(v2=False): client = APIClient() base_api_test = BaseApiTest() base_api_test.client = client base_api_test.read_capability = base_api_test._create_capability('Read', []) base_api_test.write_capability = base_api_test._create_capability('Write', []) first_name = fn if fn is not None else 'John' last_name = ln if ln is not None else 'Doe' access_token = base_api_test.create_token(first_name, last_name, fhir_id, hicn_hash, mbi_hash) at = AccessToken.objects.get(token=access_token) client.credentials(HTTP_AUTHORIZATION='Bearer ' + at.token) response = client.get(self._get_fhir_url(FHIR_RES_TYPE_EOB, 'outpatient--4412920419', v2)) content = json.loads(response.content) self.assertEqual(response.status_code, 200) if not v2: self.assertEqual(validate_json_schema(EOB_READ_INPT_SCHEMA, content), True) else: self.assertEqual(response.status_code, 200) meta_profile = None try: meta_profile = content['meta']['profile'][0] except KeyError: pass if not v2: self.assertIsNone(meta_profile) else: self.assertIsNotNone(meta_profile) self.assertEqual(meta_profile, C4BB_PROFILE_URLS['OUTPATIENT']) </DeepExtract>
bluebutton-web-server
positive
def lookup_path(params, folder, components): """ Lookup a path of components within the folder cache. Get all the folders starting from the left end of component, plus the index of the first component that isn't present in the folder cache. """ remainder = 0 for (index, component) in enumerate(components): <DeepExtract> if component == '.': temp_folder = folder if component == '..': if folder.parent_uid is None: temp_folder = params.root_folder temp_folder = params.folder_cache[folder.parent_uid] if component in params.folder_cache: temp_folder = params.folder_cache[component] for subfolder_uid in folder.subfolders: subfolder = params.folder_cache[subfolder_uid] if subfolder.name == component: temp_folder = subfolder temp_folder = None </DeepExtract> if temp_folder is None: break folder = temp_folder remainder = index + 1 return (remainder, folder)
def lookup_path(params, folder, components): """ Lookup a path of components within the folder cache. Get all the folders starting from the left end of component, plus the index of the first component that isn't present in the folder cache. """ remainder = 0 for (index, component) in enumerate(components): if component == '.': temp_folder = folder if component == '..': if folder.parent_uid is None: temp_folder = params.root_folder temp_folder = params.folder_cache[folder.parent_uid] if component in params.folder_cache: temp_folder = params.folder_cache[component] for subfolder_uid in folder.subfolders: subfolder = params.folder_cache[subfolder_uid] if subfolder.name == component: temp_folder = subfolder temp_folder = None if temp_folder is None: break folder = temp_folder remainder = index + 1 return (remainder, folder)
Commander
positive
def numerical_diff(f, x, eps=0.0001): x0 = Variable(x.data - eps) x1 = Variable(x.data + eps) <DeepExtract> A = Square() B = Exp() C = Square() y0 = C(B(A(x0))) </DeepExtract> <DeepExtract> A = Square() B = Exp() C = Square() y1 = C(B(A(x1))) </DeepExtract> return (y1.data - y0.data) / (2 * eps)
def numerical_diff(f, x, eps=0.0001): x0 = Variable(x.data - eps) x1 = Variable(x.data + eps) A = Square() B = Exp() C = Square() y0 = C(B(A(x0))) A = Square() B = Exp() C = Square() y1 = C(B(A(x1))) return (y1.data - y0.data) / (2 * eps)
deep-learning-from-scratch-3
positive
def acConnect(self, dom): dom.inner('', open('Main.html').read()) dom.focus('Input') <DeepExtract> xml = atlastk.create_XML('XDHTML') xml.pushTag('Todos') for index in range(len(self.todos)): todo = self.todos[index] if self.exclude is None or todo['completed'] != self.exclude: self.push(todo, index, xml) xml.popTag() dom.inner('Todos', xml, 'Todos.xsl') self.handleCount(dom) </DeepExtract> dom.disableElements(['HideActive', 'HideCompleted'])
def acConnect(self, dom): dom.inner('', open('Main.html').read()) dom.focus('Input') xml = atlastk.create_XML('XDHTML') xml.pushTag('Todos') for index in range(len(self.todos)): todo = self.todos[index] if self.exclude is None or todo['completed'] != self.exclude: self.push(todo, index, xml) xml.popTag() dom.inner('Todos', xml, 'Todos.xsl') self.handleCount(dom) dom.disableElements(['HideActive', 'HideCompleted'])
atlas-python
positive
def test_tag_title_non_ascii(self): <DeepExtract> params = {'title': 'My test entry', 'content': 'My test content with image <img src="/image.jpg" />', 'slug': 'my-test-entry', 'tags': 'tests', 'publication_date': datetime(2010, 1, 1, 12), 'status': PUBLISHED} entry = Entry.objects.create(**params) entry.sites.add(self.site) entry.categories.add(self.category) entry.authors.add(self.author) entry = entry </DeepExtract> tag_unicode = smart_str('accentué') entry.tags = tag_unicode entry.save() feed = TagEntries() tag = Tag(name=tag_unicode) self.assertEqual(feed.get_title(tag), 'Entries for the tag %s' % tag_unicode) self.assertEqual(feed.description(tag), 'The last entries tagged with %s' % tag_unicode)
def test_tag_title_non_ascii(self): params = {'title': 'My test entry', 'content': 'My test content with image <img src="/image.jpg" />', 'slug': 'my-test-entry', 'tags': 'tests', 'publication_date': datetime(2010, 1, 1, 12), 'status': PUBLISHED} entry = Entry.objects.create(**params) entry.sites.add(self.site) entry.categories.add(self.category) entry.authors.add(self.author) entry = entry tag_unicode = smart_str('accentué') entry.tags = tag_unicode entry.save() feed = TagEntries() tag = Tag(name=tag_unicode) self.assertEqual(feed.get_title(tag), 'Entries for the tag %s' % tag_unicode) self.assertEqual(feed.description(tag), 'The last entries tagged with %s' % tag_unicode)
django-blog-zinnia
positive
def apply_overrides(config, raw_overrides): """ Given a configuration dict and a sequence of configuration file override strings in the form of "section.option=value", parse each override and set it the configuration dict. """ <DeepExtract> if not raw_overrides: overrides = () parsed_overrides = [] for raw_override in raw_overrides: try: (raw_keys, value) = raw_override.split('=', 1) parsed_overrides.append((tuple(raw_keys.split('.')), convert_value_type(value))) except ValueError: raise ValueError(f"Invalid override '{raw_override}'. Make sure you use the form: SECTION.OPTION=VALUE") except ruamel.yaml.error.YAMLError as error: raise ValueError(f"Invalid override '{raw_override}': {error.problem}") overrides = tuple(parsed_overrides) </DeepExtract> for (keys, value) in overrides: <DeepExtract> if not keys: return first_key = keys[0] if len(keys) == 1: config[first_key] = value return if first_key not in config: config[first_key] = {} set_values(config[first_key], keys[1:], value) </DeepExtract>
def apply_overrides(config, raw_overrides): """ Given a configuration dict and a sequence of configuration file override strings in the form of "section.option=value", parse each override and set it the configuration dict. """ if not raw_overrides: overrides = () parsed_overrides = [] for raw_override in raw_overrides: try: (raw_keys, value) = raw_override.split('=', 1) parsed_overrides.append((tuple(raw_keys.split('.')), convert_value_type(value))) except ValueError: raise ValueError(f"Invalid override '{raw_override}'. Make sure you use the form: SECTION.OPTION=VALUE") except ruamel.yaml.error.YAMLError as error: raise ValueError(f"Invalid override '{raw_override}': {error.problem}") overrides = tuple(parsed_overrides) for (keys, value) in overrides: if not keys: return first_key = keys[0] if len(keys) == 1: config[first_key] = value return if first_key not in config: config[first_key] = {} set_values(config[first_key], keys[1:], value) </DeepExtract>
borgmatic
positive
def cross_formula_table(actualDict, expectedDict, calculation, probs=False): data = dict() for (ka, actual) in actualDict.items(): for (ke, expected) in expectedDict.items(): if probs: <DeepExtract> actualC = 0 expectedC = 0 for k in set(list(actual.keys()) + list(expected.keys())): expectedC += _get_count(k, expected) actualC += _get_count(k, actual) p = 0 Et = 0 Ot = 0 for k in set(list(actual.keys()) + list(expected.keys())): E = _get_count(k, expected) O = _get_count(k, actual) Ep = E / expectedC Op = O / actualC p += abs(Ep - Op) p /= 2 data[ka, ke] = p </DeepExtract> else: <DeepExtract> (df, p) = calculation(actual, expected) if df not in _ptable: raise Exception('{} degrees of freedom does not have a corresponding chi squared value.' + ' Please look up the value and add it to the table in copycat/statistics.py'.format(df)) data[ka, ke] = p < _ptable[df] </DeepExtract> return data
def cross_formula_table(actualDict, expectedDict, calculation, probs=False): data = dict() for (ka, actual) in actualDict.items(): for (ke, expected) in expectedDict.items(): if probs: actualC = 0 expectedC = 0 for k in set(list(actual.keys()) + list(expected.keys())): expectedC += _get_count(k, expected) actualC += _get_count(k, actual) p = 0 Et = 0 Ot = 0 for k in set(list(actual.keys()) + list(expected.keys())): E = _get_count(k, expected) O = _get_count(k, actual) Ep = E / expectedC Op = O / actualC p += abs(Ep - Op) p /= 2 data[ka, ke] = p else: (df, p) = calculation(actual, expected) if df not in _ptable: raise Exception('{} degrees of freedom does not have a corresponding chi squared value.' + ' Please look up the value and add it to the table in copycat/statistics.py'.format(df)) data[ka, ke] = p < _ptable[df] return data
copycat
positive
def choropleth(self, variable, filename, title='Choropleth map', logscale=True, **kwargs): """Create choropleth map. Args: variable (str): variable name to show filename (str or None): filename to save the figure or None (display) title (str): title of the map logscale (bool): whether convert the value to log10 scale values or not kwargs: keyword arguments of the following classes and methods. - covsirphy.GIS.to_geopandas() except for @variables, - matplotlib.pyplot.savefig(), matplotlib.pyplot.legend(), and - pandas.DataFrame.plot() """ v = Validator(kwargs, 'keyword arguments') <DeepExtract> if self._country not in self._layers: raise ValueError('This cannot be done because country layer is not included in the dataset.') df = self.layer(geo=geo, variables=[variable]) if on is None: df = df.sort_values(self._date, ascending=True).groupby(self._layers).last().reset_index() else: df = df.loc[df[self._date] == Validator(on).date()] focused_layer = [layer for layer in self._layers if df[layer][df[layer] != self.NA].nunique() > 0][-1] geometry = _Geometry(data=df, layer=focused_layer, directory=directory or Path(__file__).with_name('Natural_Earth')) iso3 = None if focused_layer == self._country else self._to_iso3(list(df[self._country].unique())[0]) gdf = geometry.to_geopandas(iso3=iso3, natural_earth=natural_earth).drop(set(self._layers) - {focused_layer}, axis=1) </DeepExtract> focused_layer = [layer for layer in self._layers if layer in gdf.columns][0] gdf.rename(columns={focused_layer: 'Location', variable: 'Variable'}, inplace=True) with _ChoroplethMap(filename=filename, **v.kwargs(functions=plt.savefig, default=None)) as cm: cm.title = str(title) cm.plot(data=gdf, logscale=logscale, **v.kwargs(functions=gpd.GeoDataFrame.plot, default=None))
def choropleth(self, variable, filename, title='Choropleth map', logscale=True, **kwargs): """Create choropleth map. Args: variable (str): variable name to show filename (str or None): filename to save the figure or None (display) title (str): title of the map logscale (bool): whether convert the value to log10 scale values or not kwargs: keyword arguments of the following classes and methods. - covsirphy.GIS.to_geopandas() except for @variables, - matplotlib.pyplot.savefig(), matplotlib.pyplot.legend(), and - pandas.DataFrame.plot() """ v = Validator(kwargs, 'keyword arguments') if self._country not in self._layers: raise ValueError('This cannot be done because country layer is not included in the dataset.') df = self.layer(geo=geo, variables=[variable]) if on is None: df = df.sort_values(self._date, ascending=True).groupby(self._layers).last().reset_index() else: df = df.loc[df[self._date] == Validator(on).date()] focused_layer = [layer for layer in self._layers if df[layer][df[layer] != self.NA].nunique() > 0][-1] geometry = _Geometry(data=df, layer=focused_layer, directory=directory or Path(__file__).with_name('Natural_Earth')) iso3 = None if focused_layer == self._country else self._to_iso3(list(df[self._country].unique())[0]) gdf = geometry.to_geopandas(iso3=iso3, natural_earth=natural_earth).drop(set(self._layers) - {focused_layer}, axis=1) focused_layer = [layer for layer in self._layers if layer in gdf.columns][0] gdf.rename(columns={focused_layer: 'Location', variable: 'Variable'}, inplace=True) with _ChoroplethMap(filename=filename, **v.kwargs(functions=plt.savefig, default=None)) as cm: cm.title = str(title) cm.plot(data=gdf, logscale=logscale, **v.kwargs(functions=gpd.GeoDataFrame.plot, default=None))
covid19-sir
positive
def testRegisterUserNewTeam(self): <DeepExtract> data = {'email': 'test@example.com', 'nick': 'test3', 'password': 'test3', 'team_id': 'new', 'team_name': 'New Team', 'team_code': None} </DeepExtract> with self.client: with self.queryLimit(9): resp = self.postJSON('/api/users', data) self.assert200(resp) self.assertItemsEqual(self.USER_FIELDS, resp.json.keys()) self.assertEqual(resp.json['uid'], flask.session['user']) self.assertEqual(resp.json['admin'], flask.session['admin']) self.assertEqual(resp.json['team_tid'], flask.session['team'])
def testRegisterUserNewTeam(self): data = {'email': 'test@example.com', 'nick': 'test3', 'password': 'test3', 'team_id': 'new', 'team_name': 'New Team', 'team_code': None} with self.client: with self.queryLimit(9): resp = self.postJSON('/api/users', data) self.assert200(resp) self.assertItemsEqual(self.USER_FIELDS, resp.json.keys()) self.assertEqual(resp.json['uid'], flask.session['user']) self.assertEqual(resp.json['admin'], flask.session['admin']) self.assertEqual(resp.json['team_tid'], flask.session['team'])
ctfscoreboard
positive
def test_365_opensuse15_redis_user_dockerfile(self): """ WHEN using a dockerfile for systemd-enabled Opensuse15 and redis, THEN check that redis replies to 'ping' with a 'PONG' AND that AUTH works along with a USER process""" if not os.path.exists(DOCKER_SOCKET): self.skipTest('docker-based test') if not os.path.exists(PSQL_TOOL): self.skipTest('postgres tools missing on host') docker = _docker curl = _curl python = _python or _python3 <DeepExtract> name = self.caller_testname() if suffix: testname = name + '_' + suffix testname = name </DeepExtract> <DeepExtract> testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) os.makedirs(newdir) testdir = newdir </DeepExtract> dockerfile = 'opensuse15-redis-user.dockerfile' <DeepExtract> image = '' for line in open(dockerfile): m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line) if m: image = m.group(1) break m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line) if m: image = m.group(1).strip() break logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image) if image: addhosts = self.start_mirror(image, extras) addhosts = '' </DeepExtract> <DeepExtract> savename = os.path.splitext(os.path.basename(dockerfile))[0] </DeepExtract> saveto = SAVETO images = IMAGES psql = PSQL_TOOL <DeepExtract> if _password: password = _password out = 'Password.' out += random.choice(string.ascii_uppercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(',.-+') out += random.choice('0123456789') out += random.choice('0123456789') password = out </DeepExtract> cmd = '{docker} build . -f {dockerfile} {addhosts} --build-arg PASSWORD={password} --tag {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}-client' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} run -d --name {testname} {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> <DeepExtract> docker = _docker cmd = '{docker} inspect {name}' values = output(cmd.format(**locals())) values = json.loads(values) if not values or 'NetworkSettings' not in values[0]: logg.critical(' docker inspect %s => %s ', testname, values) container = values[0]['NetworkSettings']['IPAddress'] </DeepExtract> cmd = 'sleep 2' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} run -d --name {testname}-client {images}:{testname} sleep 3' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} exec -t {testname}-client redis-cli -h {container} -a {password} ping | tee {testdir}/{testname}.txt' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = 'grep PONG {testdir}/{testname}.txt' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} exec {testname} ps axu' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE) (out, err) = run.communicate() (out, end) = (decodes(out), run.returncode) </DeepExtract> logg.info(' %s =>%s\n%s', cmd, end, out) self.assertFalse(greps(out, 'root')) cmd = '{docker} stop {testname}-client' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}-client' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} stop {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rmi {saveto}/{savename}:latest' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rmi {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> <DeepExtract> testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) return newdir </DeepExtract>
def test_365_opensuse15_redis_user_dockerfile(self): """ WHEN using a dockerfile for systemd-enabled Opensuse15 and redis, THEN check that redis replies to 'ping' with a 'PONG' AND that AUTH works along with a USER process""" if not os.path.exists(DOCKER_SOCKET): self.skipTest('docker-based test') if not os.path.exists(PSQL_TOOL): self.skipTest('postgres tools missing on host') docker = _docker curl = _curl python = _python or _python3 name = self.caller_testname() if suffix: testname = name + '_' + suffix testname = name testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) os.makedirs(newdir) testdir = newdir dockerfile = 'opensuse15-redis-user.dockerfile' image = '' for line in open(dockerfile): m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line) if m: image = m.group(1) break m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line) if m: image = m.group(1).strip() break logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image) if image: addhosts = self.start_mirror(image, extras) addhosts = '' savename = os.path.splitext(os.path.basename(dockerfile))[0] saveto = SAVETO images = IMAGES psql = PSQL_TOOL if _password: password = _password out = 'Password.' out += random.choice(string.ascii_uppercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(',.-+') out += random.choice('0123456789') out += random.choice('0123456789') password = out cmd = '{docker} build . -f {dockerfile} {addhosts} --build-arg PASSWORD={password} --tag {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}-client' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} run -d --name {testname} {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) docker = _docker cmd = '{docker} inspect {name}' values = output(cmd.format(**locals())) values = json.loads(values) if not values or 'NetworkSettings' not in values[0]: logg.critical(' docker inspect %s => %s ', testname, values) container = values[0]['NetworkSettings']['IPAddress'] cmd = 'sleep 2' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} run -d --name {testname}-client {images}:{testname} sleep 3' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} exec -t {testname}-client redis-cli -h {container} -a {password} ping | tee {testdir}/{testname}.txt' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = 'grep PONG {testdir}/{testname}.txt' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} exec {testname} ps axu' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE) (out, err) = run.communicate() (out, end) = (decodes(out), run.returncode) logg.info(' %s =>%s\n%s', cmd, end, out) self.assertFalse(greps(out, 'root')) cmd = '{docker} stop {testname}-client' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}-client' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} stop {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rmi {saveto}/{savename}:latest' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rmi {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) return newdir </DeepExtract>
docker-systemctl-images
positive
def genJSON(basedir): sel_labels = ['person'] sel_attrib = {'weather': ['clear'], 'scene': [], 'timeofday': ['daytime']} filelist = '' choose_inverted_attrib = False img_dir = os.path.join(basedir, 'images', '100k') ann_dir = os.path.join(basedir, 'labels', '100k') vid_dir = os.path.join(basedir, 'videos', '100k') categories = [{'id': cat2id(cat), 'name': cat} for cat in sel_labels] for subdir in ['train', 'val']: img_file_list = [] vid_file_list = [] img_subdir = os.path.join(img_dir, subdir) subdir = os.path.join(ann_dir, subdir) images = [] annotations = [] ann_dict = {} img_id = 0 ann_id = 0 if len(filelist) == 0: img_samples = os.listdir(img_subdir) samples = os.listdir(subdir) else: with open(filelist, 'r') as f: flist = f.readlines() f.close() samples = [] for i in range(len(flist)): flist[i] = flist[i].strip() if flist[i].endswith('.mov'): flist[i] = flist[i][:-4] + '.jpg' samples.append(flist[i][:-4] + '.json') img_samples = flist img_files = sorted([os.path.join(img_subdir, s) for s in img_samples]) lab_files = sorted([os.path.join(subdir, s) for s in samples]) for (img_file, lab_file) in zip(img_files, lab_files): with open(lab_file, 'r') as f: data = json.load(f) name = data['name'] attrib = data['attributes'] frames = data['frames'] allowed = True for attr in ['weather', 'scene', 'timeofday']: if len(sel_attrib[attr]) > 0: if not attrib[attr] in sel_attrib[attr]: allowed = False break if choose_inverted_attrib: allowed = not allowed if not allowed: continue frame = frames[0] timestamp = frame['timestamp'] objects = frame['objects'] image = {} image['width'] = 720 image['height'] = 1280 image['id'] = img_id img_id += 1 print('Reading:', img_file) image['file_name'] = img_file images.append(image) img_file_list.append(img_file) vid_file_list.append(os.path.join(vid_dir, os.path.split(subdir)[-1], os.path.split(img_file)[-1].split('.')[0] + '.mov')) for (i, obj) in enumerate(objects): lab = obj['category'] <DeepExtract> if lab == 'motor': lab = 'motorcycle' elif lab == 'bike': lab = 'bicycle' else: lab = lab </DeepExtract> obj_attrib = obj['attributes'] if 'box2d' in obj: bbox = obj['box2d'] elif 'poly2d' in obj: poly = obj['poly2d'] <DeepExtract> (x, y, _) = zip(*poly) (x1, x2) = (min(x), max(x)) (y1, y2) = (min(y), max(y)) bbox = ((x1, y1), (x2, y2)) </DeepExtract> if not lab in sel_labels: continue ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = [] <DeepExtract> cat2id_map = {'person': 1} ann['category_id'] = cat2id_map[lab] </DeepExtract> ann['iscrowd'] = 0 ann['bbox'] = list(map(int, [bbox['x1'], bbox['y1'], bbox['x2'] - bbox['x1'], bbox['y2'] - bbox['y1']])) bbox = ann['bbox'] ann['area'] = bbox[2] * bbox[3] annotations.append(ann) ann_dict['images'] = images ann_dict['categories'] = categories ann_dict['annotations'] = annotations with open('bdd_' + attrib2str(sel_attrib) + '_' + os.path.split(subdir)[-1].strip() + '.json', 'w', encoding='utf8') as f: f.write(json.dumps(ann_dict, indent=2)) f.close() with open('img_files_' + attrib2str(sel_attrib) + '_' + os.path.split(subdir)[-1].strip() + '.txt', 'w', encoding='utf8') as f: f.write('\n'.join(img_file_list) + '\n') with open('vid_files_' + attrib2str(sel_attrib) + '_' + os.path.split(subdir)[-1].strip() + '.txt', 'w', encoding='utf8') as f: f.write('\n'.join(vid_file_list) + '\n') f.close()
def genJSON(basedir): sel_labels = ['person'] sel_attrib = {'weather': ['clear'], 'scene': [], 'timeofday': ['daytime']} filelist = '' choose_inverted_attrib = False img_dir = os.path.join(basedir, 'images', '100k') ann_dir = os.path.join(basedir, 'labels', '100k') vid_dir = os.path.join(basedir, 'videos', '100k') categories = [{'id': cat2id(cat), 'name': cat} for cat in sel_labels] for subdir in ['train', 'val']: img_file_list = [] vid_file_list = [] img_subdir = os.path.join(img_dir, subdir) subdir = os.path.join(ann_dir, subdir) images = [] annotations = [] ann_dict = {} img_id = 0 ann_id = 0 if len(filelist) == 0: img_samples = os.listdir(img_subdir) samples = os.listdir(subdir) else: with open(filelist, 'r') as f: flist = f.readlines() f.close() samples = [] for i in range(len(flist)): flist[i] = flist[i].strip() if flist[i].endswith('.mov'): flist[i] = flist[i][:-4] + '.jpg' samples.append(flist[i][:-4] + '.json') img_samples = flist img_files = sorted([os.path.join(img_subdir, s) for s in img_samples]) lab_files = sorted([os.path.join(subdir, s) for s in samples]) for (img_file, lab_file) in zip(img_files, lab_files): with open(lab_file, 'r') as f: data = json.load(f) name = data['name'] attrib = data['attributes'] frames = data['frames'] allowed = True for attr in ['weather', 'scene', 'timeofday']: if len(sel_attrib[attr]) > 0: if not attrib[attr] in sel_attrib[attr]: allowed = False break if choose_inverted_attrib: allowed = not allowed if not allowed: continue frame = frames[0] timestamp = frame['timestamp'] objects = frame['objects'] image = {} image['width'] = 720 image['height'] = 1280 image['id'] = img_id img_id += 1 print('Reading:', img_file) image['file_name'] = img_file images.append(image) img_file_list.append(img_file) vid_file_list.append(os.path.join(vid_dir, os.path.split(subdir)[-1], os.path.split(img_file)[-1].split('.')[0] + '.mov')) for (i, obj) in enumerate(objects): lab = obj['category'] if lab == 'motor': lab = 'motorcycle' elif lab == 'bike': lab = 'bicycle' else: lab = lab obj_attrib = obj['attributes'] if 'box2d' in obj: bbox = obj['box2d'] elif 'poly2d' in obj: poly = obj['poly2d'] (x, y, _) = zip(*poly) (x1, x2) = (min(x), max(x)) (y1, y2) = (min(y), max(y)) bbox = ((x1, y1), (x2, y2)) if not lab in sel_labels: continue ann = {} ann['id'] = ann_id ann_id += 1 ann['image_id'] = image['id'] ann['segmentation'] = [] cat2id_map = {'person': 1} ann['category_id'] = cat2id_map[lab] ann['iscrowd'] = 0 ann['bbox'] = list(map(int, [bbox['x1'], bbox['y1'], bbox['x2'] - bbox['x1'], bbox['y2'] - bbox['y1']])) bbox = ann['bbox'] ann['area'] = bbox[2] * bbox[3] annotations.append(ann) ann_dict['images'] = images ann_dict['categories'] = categories ann_dict['annotations'] = annotations with open('bdd_' + attrib2str(sel_attrib) + '_' + os.path.split(subdir)[-1].strip() + '.json', 'w', encoding='utf8') as f: f.write(json.dumps(ann_dict, indent=2)) f.close() with open('img_files_' + attrib2str(sel_attrib) + '_' + os.path.split(subdir)[-1].strip() + '.txt', 'w', encoding='utf8') as f: f.write('\n'.join(img_file_list) + '\n') with open('vid_files_' + attrib2str(sel_attrib) + '_' + os.path.split(subdir)[-1].strip() + '.txt', 'w', encoding='utf8') as f: f.write('\n'.join(vid_file_list) + '\n') f.close()
detectron-self-train
positive
def __add_document_structure(docgraph, remove_redundant_layers=True): """return an Exmaralda XML etree representation a docgraph""" E = self.E <DeepExtract> E = self.E root = E('basic-transcription') head = E('head') meta = E('meta-information') project = E('project-name') tname = E('transcription-name') ref_file = E('referenced-file', url='') ud = E('ud-meta-information') comment = E('comment') tconvention = E('transcription-convention') meta.append(project) meta.append(tname) meta.append(ref_file) meta.append(ud) meta.append(comment) meta.append(tconvention) speakers = E('speakertable') head.append(meta) head.append(speakers) root.append(head) root = root </DeepExtract> body = E('basic-body') timeline = E('common-timeline') for i in xrange(len(docgraph.tokens) + 1): idx = str(i) timeline.append(E('tli', {'id': 'T' + idx, 'time': idx})) body.append(timeline) <DeepExtract> E = self.E token_tier = E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': 'tok', 'type': 't', 'display-name': '[tok]'}) self.tier_count += 1 token_attribs = defaultdict(lambda : defaultdict(str)) for token_node_id in docgraph.tokens: for attrib in docgraph.node[token_node_id]: is_boring_attrib = attrib in ('layers', 'label') is_boring_cat = attrib.split(':')[-1] in ('token', 'id', 'word', 'morph', 'lemma') if not is_boring_attrib and (not is_boring_cat): token_attribs[attrib][token_node_id] = docgraph.node[token_node_id][attrib] for (i, (_tok_id, token_str)) in enumerate(docgraph.get_tokens()): token_tier.append(E('event', {'start': 'T{}'.format(i), 'end': 'T{}'.format(i + 1)}, token_str)) body.append(token_tier) for anno_tier in token_attribs: category = anno_tier.split(':')[-1] temp_tier = E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': category, 'type': 't', 'display-name': '[{}]'.format(anno_tier)}) self.tier_count += 1 for token_node_id in token_attribs[anno_tier]: token_tier_id = self.toknode2id[token_node_id] token_attrib = token_attribs[anno_tier][token_node_id] temp_tier.append(E('event', {'start': 'T{}'.format(token_tier_id), 'end': 'T{}'.format(token_tier_id + 1)}, token_attrib)) body.append(temp_tier) body = body </DeepExtract> annotation_layers = get_annotation_layers(docgraph) for layer in annotation_layers: if not remove_redundant_layers: <DeepExtract> layer_cat = layer.split(':')[-1] temp_tier = self.E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': layer_cat, 'type': 't', 'display-name': '[{}]'.format(layer)}) self.tier_count += 1 for node_id in select_nodes_by_layer(docgraph, layer): span_node_ids = get_span(docgraph, node_id) if span_node_ids: (start_id, end_id) = self.__span2event(span_node_ids) event_label = docgraph.node[node_id].get('label', '') event = self.E('event', {'start': 'T{}'.format(start_id), 'end': 'T{}'.format(end_id)}, event_label) temp_tier.append(event) body.append(temp_tier) </DeepExtract> elif is_informative(layer): <DeepExtract> layer_cat = layer.split(':')[-1] temp_tier = self.E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': layer_cat, 'type': 't', 'display-name': '[{}]'.format(layer)}) self.tier_count += 1 for node_id in select_nodes_by_layer(docgraph, layer): span_node_ids = get_span(docgraph, node_id) if span_node_ids: (start_id, end_id) = self.__span2event(span_node_ids) event_label = docgraph.node[node_id].get('label', '') event = self.E('event', {'start': 'T{}'.format(start_id), 'end': 'T{}'.format(end_id)}, event_label) temp_tier.append(event) body.append(temp_tier) </DeepExtract> <DeepExtract> E = self.E for (i, chain) in enumerate(get_pointing_chains(docgraph)): chain_tier = E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': 'chain', 'type': 't', 'display-name': '[coref-chain-{}]'.format(i)}) self.tier_count += 1 chain_length = len(chain) if chain_length < min_chain_length: continue for (j, node_id) in enumerate(chain): span_node_ids = get_span(docgraph, node_id) if span_node_ids: (start_id, end_id) = self.__span2event(span_node_ids) element_str = 'chain_{0}: {1}/{2}'.format(i, chain_length - j, chain_length) chain_tier.append(E('event', {'start': 'T{}'.format(start_id), 'end': 'T{}'.format(end_id)}, element_str)) body.append(chain_tier) </DeepExtract> root.append(body) return root
def __add_document_structure(docgraph, remove_redundant_layers=True): """return an Exmaralda XML etree representation a docgraph""" E = self.E E = self.E root = E('basic-transcription') head = E('head') meta = E('meta-information') project = E('project-name') tname = E('transcription-name') ref_file = E('referenced-file', url='') ud = E('ud-meta-information') comment = E('comment') tconvention = E('transcription-convention') meta.append(project) meta.append(tname) meta.append(ref_file) meta.append(ud) meta.append(comment) meta.append(tconvention) speakers = E('speakertable') head.append(meta) head.append(speakers) root.append(head) root = root body = E('basic-body') timeline = E('common-timeline') for i in xrange(len(docgraph.tokens) + 1): idx = str(i) timeline.append(E('tli', {'id': 'T' + idx, 'time': idx})) body.append(timeline) E = self.E token_tier = E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': 'tok', 'type': 't', 'display-name': '[tok]'}) self.tier_count += 1 token_attribs = defaultdict(lambda : defaultdict(str)) for token_node_id in docgraph.tokens: for attrib in docgraph.node[token_node_id]: is_boring_attrib = attrib in ('layers', 'label') is_boring_cat = attrib.split(':')[-1] in ('token', 'id', 'word', 'morph', 'lemma') if not is_boring_attrib and (not is_boring_cat): token_attribs[attrib][token_node_id] = docgraph.node[token_node_id][attrib] for (i, (_tok_id, token_str)) in enumerate(docgraph.get_tokens()): token_tier.append(E('event', {'start': 'T{}'.format(i), 'end': 'T{}'.format(i + 1)}, token_str)) body.append(token_tier) for anno_tier in token_attribs: category = anno_tier.split(':')[-1] temp_tier = E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': category, 'type': 't', 'display-name': '[{}]'.format(anno_tier)}) self.tier_count += 1 for token_node_id in token_attribs[anno_tier]: token_tier_id = self.toknode2id[token_node_id] token_attrib = token_attribs[anno_tier][token_node_id] temp_tier.append(E('event', {'start': 'T{}'.format(token_tier_id), 'end': 'T{}'.format(token_tier_id + 1)}, token_attrib)) body.append(temp_tier) body = body annotation_layers = get_annotation_layers(docgraph) for layer in annotation_layers: if not remove_redundant_layers: layer_cat = layer.split(':')[-1] temp_tier = self.E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': layer_cat, 'type': 't', 'display-name': '[{}]'.format(layer)}) self.tier_count += 1 for node_id in select_nodes_by_layer(docgraph, layer): span_node_ids = get_span(docgraph, node_id) if span_node_ids: (start_id, end_id) = self.__span2event(span_node_ids) event_label = docgraph.node[node_id].get('label', '') event = self.E('event', {'start': 'T{}'.format(start_id), 'end': 'T{}'.format(end_id)}, event_label) temp_tier.append(event) body.append(temp_tier) elif is_informative(layer): layer_cat = layer.split(':')[-1] temp_tier = self.E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': layer_cat, 'type': 't', 'display-name': '[{}]'.format(layer)}) self.tier_count += 1 for node_id in select_nodes_by_layer(docgraph, layer): span_node_ids = get_span(docgraph, node_id) if span_node_ids: (start_id, end_id) = self.__span2event(span_node_ids) event_label = docgraph.node[node_id].get('label', '') event = self.E('event', {'start': 'T{}'.format(start_id), 'end': 'T{}'.format(end_id)}, event_label) temp_tier.append(event) body.append(temp_tier) E = self.E for (i, chain) in enumerate(get_pointing_chains(docgraph)): chain_tier = E('tier', {'id': 'TIE{}'.format(self.tier_count), 'category': 'chain', 'type': 't', 'display-name': '[coref-chain-{}]'.format(i)}) self.tier_count += 1 chain_length = len(chain) if chain_length < min_chain_length: continue for (j, node_id) in enumerate(chain): span_node_ids = get_span(docgraph, node_id) if span_node_ids: (start_id, end_id) = self.__span2event(span_node_ids) element_str = 'chain_{0}: {1}/{2}'.format(i, chain_length - j, chain_length) chain_tier.append(E('event', {'start': 'T{}'.format(start_id), 'end': 'T{}'.format(end_id)}, element_str)) body.append(chain_tier) root.append(body) return root
discoursegraphs
positive
def _loop_forever(): """Run the main loop forever """ global monitor, stepsize, elapsed, naptime while True: start = time.time() monitor.loop() <DeepExtract> global patch, name, path, monitor global stepsize, scale_frequency, scale_spread, scale_amplitude, scale_offset, scale_noise, scale_dutycycle, offset_frequency, offset_spread, offset_amplitude, offset_offset, offset_noise, offset_dutycycle, sample, phase global frequency, spread, amplitude, offset, noise, dutycycle, key, val, elapsed, naptime if patch.getint('signal', 'rewind', default=0): monitor.info('Rewind pressed, jumping back to start of signal') sample = 0 phase = 0 if not patch.getint('signal', 'play', default=1): monitor.info('Stopped') time.sleep(0.1) sample = 0 phase = 0 return if patch.getint('signal', 'pause', default=0): monitor.info('Paused') time.sleep(0.1) return frequency = patch.getfloat('signal', 'frequency', default=0.2) spread = patch.getfloat('signal', 'spread', default=0) amplitude = patch.getfloat('signal', 'amplitude', default=0.3) offset = patch.getfloat('signal', 'offset', default=0.5) noise = patch.getfloat('signal', 'noise', default=0.1) dutycycle = patch.getfloat('signal', 'dutycycle', default=0.5) frequency = EEGsynth.rescale(frequency, slope=scale_frequency, offset=offset_frequency) spread = EEGsynth.rescale(spread, slope=scale_spread, offset=offset_spread) amplitude = EEGsynth.rescale(amplitude, slope=scale_amplitude, offset=offset_amplitude) offset = EEGsynth.rescale(offset, slope=scale_offset, offset=offset_offset) noise = EEGsynth.rescale(noise, slope=scale_noise, offset=offset_noise) dutycycle = EEGsynth.rescale(dutycycle, slope=scale_dutycycle, offset=offset_dutycycle) monitor.update('frequency', frequency) monitor.update('spread', spread) monitor.update('amplitude', amplitude) monitor.update('offset ', offset) monitor.update('noise ', noise) monitor.update('dutycycle', dutycycle) instantaneous_frequency = frequency + np.random.randn(1) * spread instantaneous_frequency = min(max(instantaneous_frequency, 0 * frequency), 2 * frequency) phase = phase + 2 * np.pi * instantaneous_frequency * stepsize key = patch.getstring('output', 'prefix') + '.sin' val = np.sin(phase) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) key = patch.getstring('output', 'prefix') + '.square' val = signal.square(phase, dutycycle) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) key = patch.getstring('output', 'prefix') + '.triangle' val = signal.sawtooth(phase, 0.5) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) key = patch.getstring('output', 'prefix') + '.sawtooth' val = signal.sawtooth(phase, 1) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) sample += 1 del instantaneous_frequency if len(locals()): print('LOCALS: ' + ', '.join(locals().keys())) </DeepExtract> elapsed = time.time() - start naptime = stepsize - elapsed if naptime > 0: time.sleep(naptime)
def _loop_forever(): """Run the main loop forever """ global monitor, stepsize, elapsed, naptime while True: start = time.time() monitor.loop() global patch, name, path, monitor global stepsize, scale_frequency, scale_spread, scale_amplitude, scale_offset, scale_noise, scale_dutycycle, offset_frequency, offset_spread, offset_amplitude, offset_offset, offset_noise, offset_dutycycle, sample, phase global frequency, spread, amplitude, offset, noise, dutycycle, key, val, elapsed, naptime if patch.getint('signal', 'rewind', default=0): monitor.info('Rewind pressed, jumping back to start of signal') sample = 0 phase = 0 if not patch.getint('signal', 'play', default=1): monitor.info('Stopped') time.sleep(0.1) sample = 0 phase = 0 return if patch.getint('signal', 'pause', default=0): monitor.info('Paused') time.sleep(0.1) return frequency = patch.getfloat('signal', 'frequency', default=0.2) spread = patch.getfloat('signal', 'spread', default=0) amplitude = patch.getfloat('signal', 'amplitude', default=0.3) offset = patch.getfloat('signal', 'offset', default=0.5) noise = patch.getfloat('signal', 'noise', default=0.1) dutycycle = patch.getfloat('signal', 'dutycycle', default=0.5) frequency = EEGsynth.rescale(frequency, slope=scale_frequency, offset=offset_frequency) spread = EEGsynth.rescale(spread, slope=scale_spread, offset=offset_spread) amplitude = EEGsynth.rescale(amplitude, slope=scale_amplitude, offset=offset_amplitude) offset = EEGsynth.rescale(offset, slope=scale_offset, offset=offset_offset) noise = EEGsynth.rescale(noise, slope=scale_noise, offset=offset_noise) dutycycle = EEGsynth.rescale(dutycycle, slope=scale_dutycycle, offset=offset_dutycycle) monitor.update('frequency', frequency) monitor.update('spread', spread) monitor.update('amplitude', amplitude) monitor.update('offset ', offset) monitor.update('noise ', noise) monitor.update('dutycycle', dutycycle) instantaneous_frequency = frequency + np.random.randn(1) * spread instantaneous_frequency = min(max(instantaneous_frequency, 0 * frequency), 2 * frequency) phase = phase + 2 * np.pi * instantaneous_frequency * stepsize key = patch.getstring('output', 'prefix') + '.sin' val = np.sin(phase) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) key = patch.getstring('output', 'prefix') + '.square' val = signal.square(phase, dutycycle) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) key = patch.getstring('output', 'prefix') + '.triangle' val = signal.sawtooth(phase, 0.5) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) key = patch.getstring('output', 'prefix') + '.sawtooth' val = signal.sawtooth(phase, 1) * amplitude + offset + np.random.randn(1) * noise patch.setvalue(key, val[0]) sample += 1 del instantaneous_frequency if len(locals()): print('LOCALS: ' + ', '.join(locals().keys())) elapsed = time.time() - start naptime = stepsize - elapsed if naptime > 0: time.sleep(naptime)
eegsynth
positive
def test_cli_createbundle_datadir(tmp_path): tmp_path = str(tmp_path) bfile_path = os.path.join(tmp_path, 'test_bundle_datadir.tar.bz2') <DeepExtract> '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path = '{} {} '.format(sys.executable, os.path.join(cli_dir, 'bse_cli.py')) + '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path = '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path.split(' ') output = subprocess.check_output('-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path, stderr=subprocess.STDOUT) </DeepExtract> assert os.path.isfile(bfile_path) assert output.startswith(b'Created ')
def test_cli_createbundle_datadir(tmp_path): tmp_path = str(tmp_path) bfile_path = os.path.join(tmp_path, 'test_bundle_datadir.tar.bz2') '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path = '{} {} '.format(sys.executable, os.path.join(cli_dir, 'bse_cli.py')) + '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path = '-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path.split(' ') output = subprocess.check_output('-d ' + fake_data_dir + ' create-bundle gaussian94 bib ' + bfile_path, stderr=subprocess.STDOUT) assert os.path.isfile(bfile_path) assert output.startswith(b'Created ')
basis_set_exchange
positive
def test_outbound_overflowHostAndReset(self): """ Make sure that we cannot overflow our outbound host bandwidth limit @see dhtbot.constants.host_bandwidth_rate """ <DeepExtract> ksender = KRPC_Sender(TreeRoutingTable, 2 ** 50) ksender.transport = HollowTransport() rate_limited_proto = RateLimiter_Patcher(ksender) rate_limited_proto.startProtocol() rate_limited_proto = rate_limited_proto </DeepExtract> rate_limited_proto.sendKRPC(self.query, self.address) self.assertTrue(rate_limited_proto._original.transport._packet_was_sent()) rate_limited_proto.sendKRPC(self.query, self.address) self.assertFalse(rate_limited_proto._original.transport._packet_was_sent()) self.clock.set(1) rate_limited_proto.sendKRPC(self.query, self.address) self.assertTrue(rate_limited_proto._original.transport._packet_was_sent())
def test_outbound_overflowHostAndReset(self): """ Make sure that we cannot overflow our outbound host bandwidth limit @see dhtbot.constants.host_bandwidth_rate """ ksender = KRPC_Sender(TreeRoutingTable, 2 ** 50) ksender.transport = HollowTransport() rate_limited_proto = RateLimiter_Patcher(ksender) rate_limited_proto.startProtocol() rate_limited_proto = rate_limited_proto rate_limited_proto.sendKRPC(self.query, self.address) self.assertTrue(rate_limited_proto._original.transport._packet_was_sent()) rate_limited_proto.sendKRPC(self.query, self.address) self.assertFalse(rate_limited_proto._original.transport._packet_was_sent()) self.clock.set(1) rate_limited_proto.sendKRPC(self.query, self.address) self.assertTrue(rate_limited_proto._original.transport._packet_was_sent())
DHTBot
positive
def mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options, fallback_ip_address=None): if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint): sys.stdout.write("%s is already mounted, please run 'mount' command to verify\n" % mountpoint) logging.warning('%s is already mounted, mount aborted' % mountpoint) return with bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options, fallback_ip_address=fallback_ip_address) as tunnel_proc: mount_completed = threading.Event() t = threading.Thread(target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed)) t.daemon = True t.start() <DeepExtract> if 'tls' in options: mount_path = '127.0.0.1:%s' % path elif fallback_ip_address: mount_path = '%s:%s' % (fallback_ip_address, path) else: mount_path = '%s:%s' % (dns_name, path) if not check_if_platform_is_mac(): command = ['/sbin/mount.nfs4', mount_path, mountpoint, '-o', get_nfs_mount_options(options)] else: command = ['/sbin/mount_nfs', '-o', get_nfs_mount_options(options), mount_path, mountpoint] if 'netns' in options: command = ['nsenter', '--net=' + options['netns']] + command if call_nfs_mount_command_with_retry_succeed(config, options, command, dns_name, mountpoint): return logging.info('Executing: "%s"', ' '.join(command)) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (out, err) = proc.communicate() if proc.returncode == 0: post_mount_nfs_success(config, options, dns_name, mountpoint) else: message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (dns_name, mountpoint, proc.returncode, err.strip()) fatal_error(err.strip(), message, proc.returncode) </DeepExtract> mount_completed.set() t.join()
def mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options, fallback_ip_address=None): if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint): sys.stdout.write("%s is already mounted, please run 'mount' command to verify\n" % mountpoint) logging.warning('%s is already mounted, mount aborted' % mountpoint) return with bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options, fallback_ip_address=fallback_ip_address) as tunnel_proc: mount_completed = threading.Event() t = threading.Thread(target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed)) t.daemon = True t.start() if 'tls' in options: mount_path = '127.0.0.1:%s' % path elif fallback_ip_address: mount_path = '%s:%s' % (fallback_ip_address, path) else: mount_path = '%s:%s' % (dns_name, path) if not check_if_platform_is_mac(): command = ['/sbin/mount.nfs4', mount_path, mountpoint, '-o', get_nfs_mount_options(options)] else: command = ['/sbin/mount_nfs', '-o', get_nfs_mount_options(options), mount_path, mountpoint] if 'netns' in options: command = ['nsenter', '--net=' + options['netns']] + command if call_nfs_mount_command_with_retry_succeed(config, options, command, dns_name, mountpoint): return logging.info('Executing: "%s"', ' '.join(command)) proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (out, err) = proc.communicate() if proc.returncode == 0: post_mount_nfs_success(config, options, dns_name, mountpoint) else: message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (dns_name, mountpoint, proc.returncode, err.strip()) fatal_error(err.strip(), message, proc.returncode) mount_completed.set() t.join()
efs-utils
positive
def get_rcnn_names(cfg): pred = ['rcnn_cls_prob', 'rcnn_bbox_loss'] label = ['rcnn_label', 'rcnn_bbox_target', 'rcnn_bbox_weight'] if cfg.TRAIN.ENABLE_OHEM or cfg.TRAIN.END2END: pred.append('rcnn_label') if cfg.TRAIN.END2END: <DeepExtract> pred = ['rpn_cls_prob', 'rpn_bbox_loss'] label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight'] (rpn_pred, rpn_label) = (pred, label) </DeepExtract> pred = rpn_pred + pred label = rpn_label return (pred, label)
def get_rcnn_names(cfg): pred = ['rcnn_cls_prob', 'rcnn_bbox_loss'] label = ['rcnn_label', 'rcnn_bbox_target', 'rcnn_bbox_weight'] if cfg.TRAIN.ENABLE_OHEM or cfg.TRAIN.END2END: pred.append('rcnn_label') if cfg.TRAIN.END2END: pred = ['rpn_cls_prob', 'rpn_bbox_loss'] label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight'] (rpn_pred, rpn_label) = (pred, label) pred = rpn_pred + pred label = rpn_label return (pred, label)
Deep-Feature-video
positive
def show_segmentation(img, mask_result, bboxes_result=None, alpha=0.5, num_grid=40, out_prefix='./'): img_mask = np.zeros(img.shape, np.uint8) img_mask[:, :, 2] = (mask_result * 255).astype(np.uint8) img_mask = cv2.addWeighted(img_mask, alpha, img, 1 - alpha, 0) (H, W, _) = img.shape step_h = H / num_grid step_w = W / num_grid for i in range(num_grid): cv2.line(img_mask, (0, int(i * step_h)), (W, int(i * step_h)), (0, 0, 0)) <DeepExtract> if isinstance(img_mask, np.ndarray): img_mask = Image.fromarray(cv2.cvtColor(img_mask, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img_mask) fontStyle = ImageFont.truetype('./simsun.ttc', int(min(step_h, step_w)), encoding='utf-8') draw.text((max(0, 0), max(int(i * step_h), 0)), str(i), (255, 255, 255), font=fontStyle) img_mask = cv2.cvtColor(np.asarray(img_mask), cv2.COLOR_RGB2BGR) </DeepExtract> cv2.line(img_mask, (int(i * step_w), 0), (int(i * step_w), H), (0, 0, 0)) <DeepExtract> if isinstance(img_mask, np.ndarray): img_mask = Image.fromarray(cv2.cvtColor(img_mask, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img_mask) fontStyle = ImageFont.truetype('./simsun.ttc', int(min(step_h, step_w)), encoding='utf-8') draw.text((max(int(i * step_w), 0), max(0, 0)), str(i), (255, 255, 255), font=fontStyle) img_mask = cv2.cvtColor(np.asarray(img_mask), cv2.COLOR_RGB2BGR) </DeepExtract> cv2.imwrite(out_prefix + '_seg.jpg', img_mask)
def show_segmentation(img, mask_result, bboxes_result=None, alpha=0.5, num_grid=40, out_prefix='./'): img_mask = np.zeros(img.shape, np.uint8) img_mask[:, :, 2] = (mask_result * 255).astype(np.uint8) img_mask = cv2.addWeighted(img_mask, alpha, img, 1 - alpha, 0) (H, W, _) = img.shape step_h = H / num_grid step_w = W / num_grid for i in range(num_grid): cv2.line(img_mask, (0, int(i * step_h)), (W, int(i * step_h)), (0, 0, 0)) if isinstance(img_mask, np.ndarray): img_mask = Image.fromarray(cv2.cvtColor(img_mask, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img_mask) fontStyle = ImageFont.truetype('./simsun.ttc', int(min(step_h, step_w)), encoding='utf-8') draw.text((max(0, 0), max(int(i * step_h), 0)), str(i), (255, 255, 255), font=fontStyle) img_mask = cv2.cvtColor(np.asarray(img_mask), cv2.COLOR_RGB2BGR) cv2.line(img_mask, (int(i * step_w), 0), (int(i * step_w), H), (0, 0, 0)) if isinstance(img_mask, np.ndarray): img_mask = Image.fromarray(cv2.cvtColor(img_mask, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img_mask) fontStyle = ImageFont.truetype('./simsun.ttc', int(min(step_h, step_w)), encoding='utf-8') draw.text((max(int(i * step_w), 0), max(0, 0)), str(i), (255, 255, 255), font=fontStyle) img_mask = cv2.cvtColor(np.asarray(img_mask), cv2.COLOR_RGB2BGR) cv2.imwrite(out_prefix + '_seg.jpg', img_mask)
DAVAR-Lab-OCR
positive
def get_boundary(samples, measure): if measure in ('GAN', 'JSD', 'KL', 'RKL', 'H2', 'DV'): b = samples ** 2 elif measure == 'X2': b = (samples / 2.0) ** 2 elif measure == 'W': b = None else: <DeepExtract> supported_measures = ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1'] raise NotImplementedError('Measure `{}` not supported. Supported: {}'.format(measure, supported_measures)) </DeepExtract> return b.mean()
def get_boundary(samples, measure): if measure in ('GAN', 'JSD', 'KL', 'RKL', 'H2', 'DV'): b = samples ** 2 elif measure == 'X2': b = (samples / 2.0) ** 2 elif measure == 'W': b = None else: supported_measures = ['GAN', 'JSD', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1'] raise NotImplementedError('Measure `{}` not supported. Supported: {}'.format(measure, supported_measures)) return b.mean()
cortex
positive
def forward(self, observations: ObservationType, memory: Memory, prev_actions: torch.Tensor, masks: torch.FloatTensor) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]: """Processes input batched observations to produce new actor and critic values. Processes input batched observations (along with prior hidden states, previous actions, and masks denoting which recurrent hidden states should be masked) and returns an `ActorCriticOutput` object containing the model's policy (distribution over actions) and evaluation of the current state (value). # Parameters observations : Batched input observations. memory : `Memory` containing the hidden states from initial timepoints. prev_actions : Tensor of previous actions taken. masks : Masks applied to hidden states. See `RNNStateEncoder`. # Returns Tuple of the `ActorCriticOutput` and recurrent hidden state. """ <DeepExtract> raise NotImplementedError('Obs Encoder Not Implemented') </DeepExtract> if self.prev_action_embedder.input_size == self.action_space.n + 1: prev_actions_embeds = self.prev_action_embedder(torch.where(condition=0 != masks.view(*prev_actions.shape), input=prev_actions + 1, other=torch.zeros_like(prev_actions))) else: prev_actions_embeds = self.prev_action_embedder(prev_actions) joint_embeds = torch.cat((obs_embeds, prev_actions_embeds), dim=-1) beliefs_dict = {} for (key, model) in self.state_encoders.items(): (beliefs_dict[key], rnn_hidden_states) = model(joint_embeds, memory.tensor(key), masks) memory.set_tensor(key, rnn_hidden_states) <DeepExtract> all_beliefs = torch.stack(list(beliefs_dict.values()), dim=-1) if self.multiple_beliefs: (beliefs, task_weights) = self.fusion_model(all_beliefs=all_beliefs, obs_embeds=obs_embeds) beliefs = all_beliefs.squeeze(-1) (beliefs, task_weights) = (beliefs, None) </DeepExtract> extras = {aux_uuid: {'beliefs': beliefs_dict[aux_uuid] if self.multiple_beliefs else beliefs, 'obs_embeds': obs_embeds, 'aux_model': self.aux_models[aux_uuid] if aux_uuid in self.aux_models else None} for aux_uuid in self.auxiliary_uuids} if self.auxiliary_uuids is not None else {} if self.multiple_beliefs: extras[MultiAuxTaskNegEntropyLoss.UUID] = task_weights actor_critic_output = ActorCriticOutput(distributions=self.actor(beliefs), values=self.critic(beliefs), extras=extras) return (actor_critic_output, memory)
def forward(self, observations: ObservationType, memory: Memory, prev_actions: torch.Tensor, masks: torch.FloatTensor) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]: """Processes input batched observations to produce new actor and critic values. Processes input batched observations (along with prior hidden states, previous actions, and masks denoting which recurrent hidden states should be masked) and returns an `ActorCriticOutput` object containing the model's policy (distribution over actions) and evaluation of the current state (value). # Parameters observations : Batched input observations. memory : `Memory` containing the hidden states from initial timepoints. prev_actions : Tensor of previous actions taken. masks : Masks applied to hidden states. See `RNNStateEncoder`. # Returns Tuple of the `ActorCriticOutput` and recurrent hidden state. """ raise NotImplementedError('Obs Encoder Not Implemented') if self.prev_action_embedder.input_size == self.action_space.n + 1: prev_actions_embeds = self.prev_action_embedder(torch.where(condition=0 != masks.view(*prev_actions.shape), input=prev_actions + 1, other=torch.zeros_like(prev_actions))) else: prev_actions_embeds = self.prev_action_embedder(prev_actions) joint_embeds = torch.cat((obs_embeds, prev_actions_embeds), dim=-1) beliefs_dict = {} for (key, model) in self.state_encoders.items(): (beliefs_dict[key], rnn_hidden_states) = model(joint_embeds, memory.tensor(key), masks) memory.set_tensor(key, rnn_hidden_states) all_beliefs = torch.stack(list(beliefs_dict.values()), dim=-1) if self.multiple_beliefs: (beliefs, task_weights) = self.fusion_model(all_beliefs=all_beliefs, obs_embeds=obs_embeds) beliefs = all_beliefs.squeeze(-1) (beliefs, task_weights) = (beliefs, None) extras = {aux_uuid: {'beliefs': beliefs_dict[aux_uuid] if self.multiple_beliefs else beliefs, 'obs_embeds': obs_embeds, 'aux_model': self.aux_models[aux_uuid] if aux_uuid in self.aux_models else None} for aux_uuid in self.auxiliary_uuids} if self.auxiliary_uuids is not None else {} if self.multiple_beliefs: extras[MultiAuxTaskNegEntropyLoss.UUID] = task_weights actor_critic_output = ActorCriticOutput(distributions=self.actor(beliefs), values=self.critic(beliefs), extras=extras) return (actor_critic_output, memory)
allenact
positive
def test_split_validLeftAllTheWayDown(self): rt = SubsecondRoutingTable(1) <DeepExtract> split_correctly = rt._split(rt.root) self.assertTrue(split_correctly) lbucket = rt.root.lchild.kbucket rbucket = rt.root.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(128, rbucket.maxsize) </DeepExtract> lchild = rt.root.lchild <DeepExtract> split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(64, rbucket.maxsize) </DeepExtract> lchild = lchild.lchild <DeepExtract> split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(32, rbucket.maxsize) </DeepExtract> lchild = lchild.lchild <DeepExtract> split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(16, rbucket.maxsize) </DeepExtract> lchild = lchild.lchild <DeepExtract> split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(8, rbucket.maxsize) </DeepExtract>
def test_split_validLeftAllTheWayDown(self): rt = SubsecondRoutingTable(1) split_correctly = rt._split(rt.root) self.assertTrue(split_correctly) lbucket = rt.root.lchild.kbucket rbucket = rt.root.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(128, rbucket.maxsize) lchild = rt.root.lchild split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(64, rbucket.maxsize) lchild = lchild.lchild split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(32, rbucket.maxsize) lchild = lchild.lchild split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(16, rbucket.maxsize) lchild = lchild.lchild split_correctly = rt._split(lchild) self.assertTrue(split_correctly) lbucket = lchild.lchild.kbucket rbucket = lchild.rchild.kbucket self.assertEquals(8, lbucket.maxsize) self.assertEquals(8, rbucket.maxsize) </DeepExtract>
DHTBot
positive
def testConv2DSameEven(self): (n, n2) = (4, 2) <DeepExtract> if None in [1, n, n, 1]: x = tf.placeholder(tf.float32, (1, n, n, 1)) else: x = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(n), [n, 1]) + np.reshape(np.arange(n), [1, n]), [1, n, n, 1]), [1, 1, 1, 1])) </DeepExtract> <DeepExtract> if None in [1, 3, 3, 1]: w = tf.placeholder(tf.float32, (1, 3, 3, 1)) else: w = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(3), [3, 1]) + np.reshape(np.arange(3), [1, 3]), [1, 3, 3, 1]), [1, 1, 1, 1])) </DeepExtract> w = tf.reshape(w, [3, 3, 1, 1]) tf.get_variable('Conv/weights', initializer=w) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43], [43, 84]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = tf.to_float([[48, 37], [37, 22]]) y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameEven(self): (n, n2) = (4, 2) if None in [1, n, n, 1]: x = tf.placeholder(tf.float32, (1, n, n, 1)) else: x = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(n), [n, 1]) + np.reshape(np.arange(n), [1, n]), [1, n, n, 1]), [1, 1, 1, 1])) if None in [1, 3, 3, 1]: w = tf.placeholder(tf.float32, (1, 3, 3, 1)) else: w = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(3), [3, 1]) + np.reshape(np.arange(3), [1, 3]), [1, 3, 3, 1]), [1, 1, 1, 1])) w = tf.reshape(w, [3, 3, 1, 1]) tf.get_variable('Conv/weights', initializer=w) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43], [43, 84]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = tf.to_float([[48, 37], [37, 22]]) y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
ARNet
positive
def show_loading_dialog(run: OperationType, title: str, message: str, parent: wx.Window) -> Any: warnings.warn('show_loading_dialog is depreciated.', DeprecationWarning) dialog = wx.ProgressDialog(title, message, maximum=10000, parent=parent, style=wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE) dialog.Fit() t = time.time() try: <DeepExtract> t = time.time() try: obj = self._operation() if isinstance(obj, GeneratorType): try: while True: if self.stop: raise OperationSilentAbort progress = next(obj) if isinstance(progress, (list, tuple)): if len(progress) >= 2: self.message = progress[1] if len(progress) >= 1: self.progress = progress[0] elif isinstance(progress, (int, float)): self.progress = progress except StopIteration as e: self.out = e.value except BaseException as e: self.error = e time.sleep(max(0.2 - time.time() + t, 0)) </DeepExtract> if isinstance(obj, GeneratorType): try: while True: progress = next(obj) if isinstance(progress, (list, tuple)): if len(progress) >= 2: message = progress[1] if len(progress) >= 1: progress = progress[0] if isinstance(progress, (int, float)) and isinstance(message, str): dialog.Update(min(9999, max(0, int(progress * 10000))), message) wx.Yield() except StopIteration as e: obj = e.value except Exception as e: dialog.Update(10000) raise e time.sleep(max(0.2 - time.time() + t, 0)) dialog.Update(10000) return obj
def show_loading_dialog(run: OperationType, title: str, message: str, parent: wx.Window) -> Any: warnings.warn('show_loading_dialog is depreciated.', DeprecationWarning) dialog = wx.ProgressDialog(title, message, maximum=10000, parent=parent, style=wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE) dialog.Fit() t = time.time() try: t = time.time() try: obj = self._operation() if isinstance(obj, GeneratorType): try: while True: if self.stop: raise OperationSilentAbort progress = next(obj) if isinstance(progress, (list, tuple)): if len(progress) >= 2: self.message = progress[1] if len(progress) >= 1: self.progress = progress[0] elif isinstance(progress, (int, float)): self.progress = progress except StopIteration as e: self.out = e.value except BaseException as e: self.error = e time.sleep(max(0.2 - time.time() + t, 0)) if isinstance(obj, GeneratorType): try: while True: progress = next(obj) if isinstance(progress, (list, tuple)): if len(progress) >= 2: message = progress[1] if len(progress) >= 1: progress = progress[0] if isinstance(progress, (int, float)) and isinstance(message, str): dialog.Update(min(9999, max(0, int(progress * 10000))), message) wx.Yield() except StopIteration as e: obj = e.value except Exception as e: dialog.Update(10000) raise e time.sleep(max(0.2 - time.time() + t, 0)) dialog.Update(10000) return obj
Amulet-Map-Editor
positive
def Value_To_Color(value, range_min, range_max): if range_min == range_max or abs(range_max - range_min) < 0.001: remaped_value = 0.5 else: remaped_value = (value - range_min) / (range_max - range_min) <DeepExtract> remaped_value = max(min(remaped_value, 1), 0) </DeepExtract> hue = (1 - remaped_value) * 0.67 color = colorsys.hsv_to_rgb(hue, 1, 1) color4 = (color[0], color[1], color[2], 1) return color4
def Value_To_Color(value, range_min, range_max): if range_min == range_max or abs(range_max - range_min) < 0.001: remaped_value = 0.5 else: remaped_value = (value - range_min) / (range_max - range_min) remaped_value = max(min(remaped_value, 1), 0) hue = (1 - remaped_value) * 0.67 color = colorsys.hsv_to_rgb(hue, 1, 1) color4 = (color[0], color[1], color[2], 1) return color4
Blender-Texel-Density-Checker
positive
def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: <DeepExtract> name = filesType.__name__ + 'Sub' if hasattr(CurrentSubclassModule_, name): subclass = getattr(CurrentSubclassModule_, name) else: subclass = None </DeepExtract> if subclass is not None: return subclass(*args_, **kwargs_) if filesType.subclass: return filesType.subclass(*args_, **kwargs_) else: return filesType(*args_, **kwargs_)
def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: name = filesType.__name__ + 'Sub' if hasattr(CurrentSubclassModule_, name): subclass = getattr(CurrentSubclassModule_, name) else: subclass = None if subclass is not None: return subclass(*args_, **kwargs_) if filesType.subclass: return filesType.subclass(*args_, **kwargs_) else: return filesType(*args_, **kwargs_)
autopkg
positive
def Close(self): if self.oOutput != None: <DeepExtract> if self.fOut == None or self.console: try: print('Finish', end='%d error(s)' % self.errors) except UnicodeEncodeError: encoding = sys.stdout.encoding print('Finish'.encode(encoding, errors='backslashreplace').decode(encoding), end='%d error(s)' % self.errors) if self.fOut != None: self.fOut.write('Finish' + '\n') self.fOut.flush() </DeepExtract> self.oOutput.Close()
def Close(self): if self.oOutput != None: if self.fOut == None or self.console: try: print('Finish', end='%d error(s)' % self.errors) except UnicodeEncodeError: encoding = sys.stdout.encoding print('Finish'.encode(encoding, errors='backslashreplace').decode(encoding), end='%d error(s)' % self.errors) if self.fOut != None: self.fOut.write('Finish' + '\n') self.fOut.flush() self.oOutput.Close()
Beta
positive
def __init__(self, block, layers, atrous=None, os=16): super(ResNet_Atrous, self).__init__() stride_list = None if os == 8: stride_list = [2, 1, 1] elif os == 16: stride_list = [2, 2, 1] else: raise ValueError('resnet_atrous.py: output stride=%d is not supported.' % os) self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = SynchronizedBatchNorm2d(64, momentum=bn_mom) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) <DeepExtract> downsample = None if atrous == None: atrous = [1] * layers[0] elif isinstance(atrous, int): atrous_list = [atrous] * layers[0] atrous = atrous_list if stride != 1 or 64 != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(64, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), SynchronizedBatchNorm2d(64 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(64, 64, stride=stride, atrous=atrous[0], downsample=downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(64 * block.expansion, 64, stride=1, atrous=atrous[i])) self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if atrous == None: atrous = [1] * layers[1] elif isinstance(atrous, int): atrous_list = [atrous] * layers[1] atrous = atrous_list if stride_list[0] != 1 or 256 != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(256, 128 * block.expansion, kernel_size=1, stride=stride_list[0], bias=False), SynchronizedBatchNorm2d(128 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(256, 128, stride=stride_list[0], atrous=atrous[0], downsample=downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(128 * block.expansion, 128, stride=1, atrous=atrous[i])) self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 16 // os == None: 16 // os = [1] * layers[2] elif isinstance(16 // os, int): atrous_list = [16 // os] * layers[2] 16 // os = atrous_list if stride_list[1] != 1 or 512 != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(512, 256 * block.expansion, kernel_size=1, stride=stride_list[1], bias=False), SynchronizedBatchNorm2d(256 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(512, 256, stride=stride_list[1], atrous=16 // os[0], downsample=downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(256 * block.expansion, 256, stride=1, atrous=16 // os[i])) self.layer3 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if [item * 16 // os for item in atrous] == None: [item * 16 // os for item in atrous] = [1] * layers[3] elif isinstance([item * 16 // os for item in atrous], int): atrous_list = [[item * 16 // os for item in atrous]] * layers[3] [item * 16 // os for item in atrous] = atrous_list if stride_list[2] != 1 or 1024 != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(1024, 512 * block.expansion, kernel_size=1, stride=stride_list[2], bias=False), SynchronizedBatchNorm2d(512 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(1024, 512, stride=stride_list[2], atrous=[item * 16 // os for item in atrous][0], downsample=downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(512 * block.expansion, 512, stride=1, atrous=[item * 16 // os for item in atrous][i])) self.layer4 = nn.Sequential(*layers) </DeepExtract> self.layers = [] for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self, block, layers, atrous=None, os=16): super(ResNet_Atrous, self).__init__() stride_list = None if os == 8: stride_list = [2, 1, 1] elif os == 16: stride_list = [2, 2, 1] else: raise ValueError('resnet_atrous.py: output stride=%d is not supported.' % os) self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = SynchronizedBatchNorm2d(64, momentum=bn_mom) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) downsample = None if atrous == None: atrous = [1] * layers[0] elif isinstance(atrous, int): atrous_list = [atrous] * layers[0] atrous = atrous_list if stride != 1 or 64 != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(64, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), SynchronizedBatchNorm2d(64 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(64, 64, stride=stride, atrous=atrous[0], downsample=downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(64 * block.expansion, 64, stride=1, atrous=atrous[i])) self.layer1 = nn.Sequential(*layers) downsample = None if atrous == None: atrous = [1] * layers[1] elif isinstance(atrous, int): atrous_list = [atrous] * layers[1] atrous = atrous_list if stride_list[0] != 1 or 256 != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(256, 128 * block.expansion, kernel_size=1, stride=stride_list[0], bias=False), SynchronizedBatchNorm2d(128 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(256, 128, stride=stride_list[0], atrous=atrous[0], downsample=downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(128 * block.expansion, 128, stride=1, atrous=atrous[i])) self.layer2 = nn.Sequential(*layers) downsample = None if 16 // os == None: 16 // os = [1] * layers[2] elif isinstance(16 // os, int): atrous_list = [16 // os] * layers[2] 16 // os = atrous_list if stride_list[1] != 1 or 512 != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(512, 256 * block.expansion, kernel_size=1, stride=stride_list[1], bias=False), SynchronizedBatchNorm2d(256 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(512, 256, stride=stride_list[1], atrous=16 // os[0], downsample=downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(256 * block.expansion, 256, stride=1, atrous=16 // os[i])) self.layer3 = nn.Sequential(*layers) downsample = None if [item * 16 // os for item in atrous] == None: [item * 16 // os for item in atrous] = [1] * layers[3] elif isinstance([item * 16 // os for item in atrous], int): atrous_list = [[item * 16 // os for item in atrous]] * layers[3] [item * 16 // os for item in atrous] = atrous_list if stride_list[2] != 1 or 1024 != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(1024, 512 * block.expansion, kernel_size=1, stride=stride_list[2], bias=False), SynchronizedBatchNorm2d(512 * block.expansion, momentum=bn_mom)) layers = [] layers.append(block(1024, 512, stride=stride_list[2], atrous=[item * 16 // os for item in atrous][0], downsample=downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(512 * block.expansion, 512, stride=1, atrous=[item * 16 // os for item in atrous][i])) self.layer4 = nn.Sequential(*layers) self.layers = [] for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
CVPR2020_MANet
positive
def snow(x, severity=1): c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8), (0.2, 0.3, 2, 0.5, 12, 4, 0.7), (0.55, 0.3, 4, 0.9, 12, 8, 0.7), (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65), (0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1] x = np.array(x, dtype=np.float32) / 255.0 snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) <DeepExtract> h = snow_layer[..., np.newaxis].shape[0] w = snow_layer[..., np.newaxis].shape[1] ch = int(np.ceil(h / c[2])) cw = int(np.ceil(w / c[2])) top_h = (h - ch) // 2 top_w = (w - cw) // 2 snow_layer[..., np.newaxis] = scizoom(snow_layer[..., np.newaxis][top_h:top_h + ch, top_w:top_w + cw], (c[2], c[2], 1), order=1) trim_top_h = (snow_layer[..., np.newaxis].shape[0] - h) // 2 trim_top_w = (snow_layer[..., np.newaxis].shape[1] - w) // 2 snow_layer = snow_layer[..., np.newaxis][trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w] </DeepExtract> snow_layer[snow_layer < c[3]] = 0 snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L') output = BytesIO() snow_layer.save(output, format='PNG') snow_layer = MotionImage(blob=output.getvalue()) snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45)) snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8), cv2.IMREAD_UNCHANGED) / 255.0 snow_layer = snow_layer[..., np.newaxis] x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(args.CROP_SIZE, args.CROP_SIZE, 1) * 1.5 + 0.5) return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def snow(x, severity=1): c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8), (0.2, 0.3, 2, 0.5, 12, 4, 0.7), (0.55, 0.3, 4, 0.9, 12, 8, 0.7), (0.55, 0.3, 4.5, 0.85, 12, 8, 0.65), (0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1] x = np.array(x, dtype=np.float32) / 255.0 snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) h = snow_layer[..., np.newaxis].shape[0] w = snow_layer[..., np.newaxis].shape[1] ch = int(np.ceil(h / c[2])) cw = int(np.ceil(w / c[2])) top_h = (h - ch) // 2 top_w = (w - cw) // 2 snow_layer[..., np.newaxis] = scizoom(snow_layer[..., np.newaxis][top_h:top_h + ch, top_w:top_w + cw], (c[2], c[2], 1), order=1) trim_top_h = (snow_layer[..., np.newaxis].shape[0] - h) // 2 trim_top_w = (snow_layer[..., np.newaxis].shape[1] - w) // 2 snow_layer = snow_layer[..., np.newaxis][trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w] snow_layer[snow_layer < c[3]] = 0 snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L') output = BytesIO() snow_layer.save(output, format='PNG') snow_layer = MotionImage(blob=output.getvalue()) snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45)) snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8), cv2.IMREAD_UNCHANGED) / 255.0 snow_layer = snow_layer[..., np.newaxis] x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(args.CROP_SIZE, args.CROP_SIZE, 1) * 1.5 + 0.5) return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
assembled-cnn
positive
def write_cz2010_hourly_temp_data_to_cache(usaf_id, ts): <DeepExtract> key = 'cz2010-hourly-{}'.format(usaf_id) </DeepExtract> store = eeweather.connections.key_value_store_proxy.get_store() return store.save_json(key, serialize_cz2010_hourly_temp_data(ts))
def write_cz2010_hourly_temp_data_to_cache(usaf_id, ts): key = 'cz2010-hourly-{}'.format(usaf_id) store = eeweather.connections.key_value_store_proxy.get_store() return store.save_json(key, serialize_cz2010_hourly_temp_data(ts))
eeweather
positive
def print_D_H_V(f, w, h1): <DeepExtract> (d, h1, v) = (np.degrees(2 * np.arctan(np.sqrt(w * w + h1 * h1) / (2 * f))), np.degrees(2 * np.arctan(w / (2 * f))), np.degrees(2 * np.arctan(h1 / (2 * f)))) </DeepExtract> print(f'DFOV: {d}, HFOV: {h}, VFOV: {v}')
def print_D_H_V(f, w, h1): (d, h1, v) = (np.degrees(2 * np.arctan(np.sqrt(w * w + h1 * h1) / (2 * f))), np.degrees(2 * np.arctan(w / (2 * f))), np.degrees(2 * np.arctan(h1 / (2 * f)))) print(f'DFOV: {d}, HFOV: {h}, VFOV: {v}')
depthai-experiments
positive
def reduce_armor(self, amount): """Reduce armor by AMOUNT, and if the True QueenAnt has no armor remaining, signal the end of the game. """ self.armor -= amount if self.armor <= 0: if self.is_queen: <DeepExtract> raise BeesWinException() </DeepExtract> else: self.place.remove_insect(self)
def reduce_armor(self, amount): """Reduce armor by AMOUNT, and if the True QueenAnt has no armor remaining, signal the end of the game. """ self.armor -= amount if self.armor <= 0: if self.is_queen: raise BeesWinException() else: self.place.remove_insect(self)
cs61a
positive
def get_bpb_media(self): """Get and return the BPB media value.""" <DeepExtract> if self.fat_element_size in [2, 4]: fat_item_offset = 0 * self.fat_element_size if fat_item_offset + self.fat_element_size > self.fat_size or 0 > self.last_valid_cluster: raise FileAllocationTableException('Out of bounds, FAT element: {}'.format(0)) self.fat_object.seek(self.fat_offset + fat_item_offset) next_element_raw = self.fat_object.read(self.fat_element_size) if len(next_element_raw) != self.fat_element_size: raise FileAllocationTableException('Truncated FAT entry, FAT element: {}'.format(0)) if self.fat_element_size == 4: next_cluster = struct.unpack('<L', next_element_raw)[0] & 268435455 else: next_cluster = struct.unpack('<H', next_element_raw)[0] else: fat_item_offset = 0 + 0 // 2 if fat_item_offset + 2 > self.fat_size or 0 > self.last_valid_cluster: raise FileAllocationTableException('Out of bounds, FAT element: {}'.format(0)) self.fat_object.seek(self.fat_offset + fat_item_offset) next_element_raw = self.fat_object.read(2) if len(next_element_raw) != 2: raise FileAllocationTableException('Truncated FAT entry, FAT element: {}'.format(0)) next_item = struct.unpack('<H', next_element_raw)[0] if 0 % 2 == 0: next_cluster = next_item & 4095 else: next_cluster = next_item >> 4 fat_0 = next_cluster </DeepExtract> return fat_0 & 255
def get_bpb_media(self): """Get and return the BPB media value.""" if self.fat_element_size in [2, 4]: fat_item_offset = 0 * self.fat_element_size if fat_item_offset + self.fat_element_size > self.fat_size or 0 > self.last_valid_cluster: raise FileAllocationTableException('Out of bounds, FAT element: {}'.format(0)) self.fat_object.seek(self.fat_offset + fat_item_offset) next_element_raw = self.fat_object.read(self.fat_element_size) if len(next_element_raw) != self.fat_element_size: raise FileAllocationTableException('Truncated FAT entry, FAT element: {}'.format(0)) if self.fat_element_size == 4: next_cluster = struct.unpack('<L', next_element_raw)[0] & 268435455 else: next_cluster = struct.unpack('<H', next_element_raw)[0] else: fat_item_offset = 0 + 0 // 2 if fat_item_offset + 2 > self.fat_size or 0 > self.last_valid_cluster: raise FileAllocationTableException('Out of bounds, FAT element: {}'.format(0)) self.fat_object.seek(self.fat_offset + fat_item_offset) next_element_raw = self.fat_object.read(2) if len(next_element_raw) != 2: raise FileAllocationTableException('Truncated FAT entry, FAT element: {}'.format(0)) next_item = struct.unpack('<H', next_element_raw)[0] if 0 % 2 == 0: next_cluster = next_item & 4095 else: next_cluster = next_item >> 4 fat_0 = next_cluster return fat_0 & 255
dfir_ntfs
positive
def get_all_orders(self, symbol: 'str', orderId: 'long'=None, startTime: 'long'=None, endTime: 'long'=None, limit: 'int'=None) -> any: """ All Orders (USER_DATA) GET /fapi/v1/allOrders (HMAC SHA256) Get all account orders; active, canceled, or filled. """ response = call_sync(self.request_impl.get_all_orders(symbol, orderId, startTime, endTime, limit)) <DeepExtract> for (k, v) in response[1].items(): self.limits[k] = v </DeepExtract> return response[0]
def get_all_orders(self, symbol: 'str', orderId: 'long'=None, startTime: 'long'=None, endTime: 'long'=None, limit: 'int'=None) -> any: """ All Orders (USER_DATA) GET /fapi/v1/allOrders (HMAC SHA256) Get all account orders; active, canceled, or filled. """ response = call_sync(self.request_impl.get_all_orders(symbol, orderId, startTime, endTime, limit)) for (k, v) in response[1].items(): self.limits[k] = v return response[0]
Binance_Futures_python
positive
def build(self): if 'kern_size' in self.conf.keys(): KERN_SIZE = self.conf['kern_size'] else: KERN_SIZE = 5 (batch_size, img_height, img_width, color_channels) = self.images[0].get_shape()[0:4] lstm_func = basic_conv_lstm_cell if self.states != None: current_state = self.states[0] else: current_state = None if self.actions == None: self.actions = [None for _ in self.images] if self.k == -1: feedself = True else: num_ground_truth = tf.to_int32(tf.round(tf.to_float(batch_size) * (self.k / (self.k + tf.exp(self.iter_num / self.k))))) feedself = False if 'lstm_size' in self.conf: lstm_size = self.conf['lstm_size'] print('using lstm size', lstm_size) else: ngf = self.conf['ngf'] lstm_size = np.int32(np.array([ngf, ngf * 2, ngf * 4, ngf * 2, ngf])) (lstm_state1, lstm_state2, lstm_state3, lstm_state4) = (None, None, None, None) (lstm_state5, lstm_state6, lstm_state7) = (None, None, None) for (t, action) in enumerate(self.actions): print(t) reuse = bool(self.gen_images) done_warm_start = len(self.gen_images) > self.context_frames - 1 with slim.arg_scope([lstm_func, slim.layers.conv2d, slim.layers.fully_connected, tf_layers.layer_norm, slim.layers.conv2d_transpose], reuse=reuse): if feedself and done_warm_start: prev_image = self.gen_images[-1] if self.pix_distributions1 != None: prev_pix_distrib1 = self.gen_distrib1[-1] if 'ndesig' in self.conf: prev_pix_distrib2 = self.gen_distrib2[-1] elif done_warm_start: <DeepExtract> idx = tf.random_shuffle(tf.range(int(batch_size))) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size))) ground_truth_examps = tf.gather(self.images[t], ground_truth_idx) generated_examps = tf.gather(self.gen_images[-1], generated_idx) prev_image = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) </DeepExtract> else: prev_image = self.images[t] if self.pix_distributions1 != None: prev_pix_distrib1 = self.pix_distributions1[t] if 'ndesig' in self.conf: prev_pix_distrib2 = self.pix_distributions2[t] if len(prev_pix_distrib1.get_shape()) == 3: prev_pix_distrib1 = tf.expand_dims(prev_pix_distrib1, -1) if 'ndesig' in self.conf: prev_pix_distrib2 = tf.expand_dims(prev_pix_distrib2, -1) if 'refeed_firstimage' in self.conf: assert self.conf['model'] == 'STP' if t > 1: input_image = self.images[1] print('refeed with image 1') else: input_image = prev_image else: input_image = prev_image if not 'ignore_state_action' in self.conf: state_action = tf.concat(axis=1, values=[action, current_state]) enc0 = slim.layers.conv2d(input_image, 32, [5, 5], stride=2, scope='scale1_conv1', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm1'}) (hidden1, lstm_state1) = lstm_func(enc0, lstm_state1, lstm_size[0], scope='state1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') enc1 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [3, 3], stride=2, scope='conv2') (hidden3, lstm_state3) = lstm_func(enc1, lstm_state3, lstm_size[1], scope='state3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4') enc2 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv3') if not 'ignore_state_action' in self.conf: if 'ignore_state' in self.conf: lowdim = action print('ignoring state') else: lowdim = state_action smear = tf.reshape(lowdim, [int(batch_size), 1, 1, int(lowdim.get_shape()[1])]) smear = tf.tile(smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1]) enc2 = tf.concat(axis=3, values=[enc2, smear]) else: print('ignoring states and actions') enc3 = slim.layers.conv2d(enc2, hidden3.get_shape()[3], [1, 1], stride=1, scope='conv4') (hidden5, lstm_state5) = lstm_func(enc3, lstm_state5, lstm_size[2], scope='state5') hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6') enc4 = slim.layers.conv2d_transpose(hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1') (hidden6, lstm_state6) = lstm_func(enc4, lstm_state6, lstm_size[3], scope='state6') hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7') if 'noskip' not in self.conf: hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) enc5 = slim.layers.conv2d_transpose(hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2') (hidden7, lstm_state7) = lstm_func(enc5, lstm_state7, lstm_size[4], scope='state7') hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8') if not 'noskip' in self.conf: hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) enc6 = slim.layers.conv2d_transpose(hidden7, hidden7.get_shape()[3], 3, stride=2, scope='convt3', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm9'}) if 'transform_from_firstimage' in self.conf: prev_image = self.images[1] if self.pix_distributions1 != None: prev_pix_distrib1 = self.pix_distributions1[1] prev_pix_distrib1 = tf.expand_dims(prev_pix_distrib1, -1) print('transform from image 1') if self.conf['model'] == 'DNA': trafo_input = slim.layers.conv2d_transpose(enc6, KERN_SIZE ** 2, 1, stride=1, scope='convt4_cam2') transformed_l = [self.dna_transformation(prev_image, trafo_input, self.conf['kern_size'])] if self.pix_distributions1 != None: transf_distrib_ndesig1 = [self.dna_transformation(prev_pix_distrib1, trafo_input, KERN_SIZE)] if 'ndesig' in self.conf: transf_distrib_ndesig2 = [self.dna_transformation(prev_pix_distrib2, trafo_input, KERN_SIZE)] extra_masks = 1 if self.conf['model'] == 'CDNA': if 'gen_pix' in self.conf: enc7 = slim.layers.conv2d_transpose(enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None) transformed_l = [tf.nn.sigmoid(enc7)] extra_masks = 2 else: transformed_l = [] extra_masks = 1 cdna_input = tf.reshape(hidden5, [int(batch_size), -1]) <DeepExtract> batch_size = int(cdna_input.get_shape()[0]) height = int(prev_image.get_shape()[1]) width = int(prev_image.get_shape()[2]) DNA_KERN_SIZE = self.conf['kern_size'] num_masks = self.conf['num_masks'] color_channels = int(prev_image.get_shape()[3]) cdna_kerns = slim.layers.fully_connected(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None, reuse=reuse) cdna_kerns = tf.reshape(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True) cdna_kerns /= norm_factor cdna_kerns_summary = cdna_kerns cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME') transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(value=transformed, axis=-1) (new_transformed, _) = (transformed, cdna_kerns_summary) </DeepExtract> transformed_l += new_transformed self.moved_images.append(transformed_l) if self.pix_distributions1 != None: <DeepExtract> batch_size = int(cdna_input.get_shape()[0]) height = int(prev_pix_distrib1.get_shape()[1]) width = int(prev_pix_distrib1.get_shape()[2]) DNA_KERN_SIZE = self.conf['kern_size'] num_masks = self.conf['num_masks'] color_channels = int(prev_pix_distrib1.get_shape()[3]) cdna_kerns = slim.layers.fully_connected(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None, reuse=True) cdna_kerns = tf.reshape(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True) cdna_kerns /= norm_factor cdna_kerns_summary = cdna_kerns cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) prev_pix_distrib1 = tf.transpose(prev_pix_distrib1, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d(prev_pix_distrib1, cdna_kerns, [1, 1, 1, 1], 'SAME') transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(value=transformed, axis=-1) (transf_distrib_ndesig1, _) = (transformed, cdna_kerns_summary) </DeepExtract> self.moved_pix_distrib1.append(transf_distrib_ndesig1) if 'ndesig' in self.conf: <DeepExtract> batch_size = int(cdna_input.get_shape()[0]) height = int(prev_pix_distrib2.get_shape()[1]) width = int(prev_pix_distrib2.get_shape()[2]) DNA_KERN_SIZE = self.conf['kern_size'] num_masks = self.conf['num_masks'] color_channels = int(prev_pix_distrib2.get_shape()[3]) cdna_kerns = slim.layers.fully_connected(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None, reuse=True) cdna_kerns = tf.reshape(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True) cdna_kerns /= norm_factor cdna_kerns_summary = cdna_kerns cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) prev_pix_distrib2 = tf.transpose(prev_pix_distrib2, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d(prev_pix_distrib2, cdna_kerns, [1, 1, 1, 1], 'SAME') transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(value=transformed, axis=-1) (transf_distrib_ndesig2, _) = (transformed, cdna_kerns_summary) </DeepExtract> self.moved_pix_distrib2.append(transf_distrib_ndesig2) if self.conf['model'] == 'STP': enc7 = slim.layers.conv2d_transpose(enc6, color_channels, 1, stride=1, scope='convt5', activation_fn=None) if 'gen_pix' in self.conf: transformed_l = [tf.nn.sigmoid(enc7)] extra_masks = 2 else: transformed_l = [] extra_masks = 1 enc_stp = tf.reshape(hidden5, [int(batch_size), -1]) stp_input = slim.layers.fully_connected(enc_stp, 200, scope='fc_stp_cam2') reuse_stp = None if reuse: reuse_stp = reuse <DeepExtract> from spatial_transformer import transformer identity_params = tf.convert_to_tensor(np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] trafos = [] for i in range(self.num_masks): params = slim.layers.fully_connected(stp_input, 6, scope='stp_params' + str(i) + 'cam2', activation_fn=None, reuse=reuse_stp) + identity_params outsize = (prev_image.get_shape()[1], prev_image.get_shape()[2]) transformed.append(transformer(prev_image, params, outsize)) trafos.append(params) (transformed, trafo) = (transformed, trafos) </DeepExtract> transformed_l += transformed self.trafos.append(trafo) self.moved_images.append(transformed_l) if self.pix_distributions1 != None: <DeepExtract> from spatial_transformer import transformer identity_params = tf.convert_to_tensor(np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] trafos = [] for i in range(num_masks): params = slim.layers.fully_connected(stp_input, 6, scope='stp_params' + str(i) + 'cam2', activation_fn=None, reuse=True) + identity_params outsize = (prev_pix_distrib1.get_shape()[1], prev_pix_distrib1.get_shape()[2]) transformed.append(transformer(prev_pix_distrib1, params, outsize)) trafos.append(params) (transf_distrib_ndesig1, _) = (transformed, trafos) </DeepExtract> self.moved_pix_distrib1.append(transf_distrib_ndesig1) if '1stimg_bckgd' in self.conf: background = self.images[0] print('using background from first image..') else: background = prev_image <DeepExtract> masks = slim.layers.conv2d_transpose(enc6, self.conf['num_masks'] + extra_masks, 1, stride=1, activation_fn=None, scope='convt7_cam2') img_height = 64 img_width = 64 num_masks = self.conf['num_masks'] if self.conf['model'] == 'DNA': if num_masks != 1: raise ValueError('Only one mask is supported for DNA model.') masks = tf.reshape(tf.nn.softmax(tf.reshape(masks, [-1, num_masks + extra_masks])), [int(self.batch_size), int(img_height), int(img_width), num_masks + extra_masks]) mask_list = tf.split(axis=3, num_or_size_splits=num_masks + extra_masks, value=masks) output = mask_list[0] * background assert len(transformed_l) == len(mask_list[1:]) for (layer, mask) in zip(transformed_l, mask_list[1:]): output += layer * mask (output, mask_list) = (output, mask_list) </DeepExtract> self.gen_images.append(output) self.gen_masks.append(mask_list) if self.pix_distributions1 != None: <DeepExtract> if '1stimg_bckgd' in self.conf: background_pix = self.pix_distributions1[0] if len(background_pix.get_shape()) == 3: background_pix = tf.expand_dims(background_pix, -1) print('using pix_distrib-background from first image..') else: background_pix = prev_pix_distrib1 pix_distrib_output = mask_list[0] * background_pix if 'gen_pix' in self.conf: pix_distrib_output += mask_list[1] * prev_pix_distrib1 for i in range(self.num_masks): pix_distrib_output += transf_distrib_ndesig1[i] * mask_list[i + extra_masks] pix_distrib_output /= tf.reduce_sum(pix_distrib_output, axis=(1, 2), keepdims=True) pix_distrib_output = pix_distrib_output </DeepExtract> self.gen_distrib1.append(pix_distrib_output) if 'ndesig' in self.conf: <DeepExtract> if '1stimg_bckgd' in self.conf: background_pix = self.pix_distributions2[0] if len(background_pix.get_shape()) == 3: background_pix = tf.expand_dims(background_pix, -1) print('using pix_distrib-background from first image..') else: background_pix = prev_pix_distrib2 pix_distrib_output = mask_list[0] * background_pix if 'gen_pix' in self.conf: pix_distrib_output += mask_list[1] * prev_pix_distrib2 for i in range(self.num_masks): pix_distrib_output += transf_distrib_ndesig2[i] * mask_list[i + extra_masks] pix_distrib_output /= tf.reduce_sum(pix_distrib_output, axis=(1, 2), keepdims=True) pix_distrib_output = pix_distrib_output </DeepExtract> self.gen_distrib2.append(pix_distrib_output) if int(current_state.get_shape()[1]) == 0: current_state = tf.zeros_like(state_action) else: current_state = slim.layers.fully_connected(state_action, int(current_state.get_shape()[1]), scope='state_pred', activation_fn=None) self.gen_states.append(current_state)
def build(self): if 'kern_size' in self.conf.keys(): KERN_SIZE = self.conf['kern_size'] else: KERN_SIZE = 5 (batch_size, img_height, img_width, color_channels) = self.images[0].get_shape()[0:4] lstm_func = basic_conv_lstm_cell if self.states != None: current_state = self.states[0] else: current_state = None if self.actions == None: self.actions = [None for _ in self.images] if self.k == -1: feedself = True else: num_ground_truth = tf.to_int32(tf.round(tf.to_float(batch_size) * (self.k / (self.k + tf.exp(self.iter_num / self.k))))) feedself = False if 'lstm_size' in self.conf: lstm_size = self.conf['lstm_size'] print('using lstm size', lstm_size) else: ngf = self.conf['ngf'] lstm_size = np.int32(np.array([ngf, ngf * 2, ngf * 4, ngf * 2, ngf])) (lstm_state1, lstm_state2, lstm_state3, lstm_state4) = (None, None, None, None) (lstm_state5, lstm_state6, lstm_state7) = (None, None, None) for (t, action) in enumerate(self.actions): print(t) reuse = bool(self.gen_images) done_warm_start = len(self.gen_images) > self.context_frames - 1 with slim.arg_scope([lstm_func, slim.layers.conv2d, slim.layers.fully_connected, tf_layers.layer_norm, slim.layers.conv2d_transpose], reuse=reuse): if feedself and done_warm_start: prev_image = self.gen_images[-1] if self.pix_distributions1 != None: prev_pix_distrib1 = self.gen_distrib1[-1] if 'ndesig' in self.conf: prev_pix_distrib2 = self.gen_distrib2[-1] elif done_warm_start: idx = tf.random_shuffle(tf.range(int(batch_size))) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size))) ground_truth_examps = tf.gather(self.images[t], ground_truth_idx) generated_examps = tf.gather(self.gen_images[-1], generated_idx) prev_image = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) else: prev_image = self.images[t] if self.pix_distributions1 != None: prev_pix_distrib1 = self.pix_distributions1[t] if 'ndesig' in self.conf: prev_pix_distrib2 = self.pix_distributions2[t] if len(prev_pix_distrib1.get_shape()) == 3: prev_pix_distrib1 = tf.expand_dims(prev_pix_distrib1, -1) if 'ndesig' in self.conf: prev_pix_distrib2 = tf.expand_dims(prev_pix_distrib2, -1) if 'refeed_firstimage' in self.conf: assert self.conf['model'] == 'STP' if t > 1: input_image = self.images[1] print('refeed with image 1') else: input_image = prev_image else: input_image = prev_image if not 'ignore_state_action' in self.conf: state_action = tf.concat(axis=1, values=[action, current_state]) enc0 = slim.layers.conv2d(input_image, 32, [5, 5], stride=2, scope='scale1_conv1', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm1'}) (hidden1, lstm_state1) = lstm_func(enc0, lstm_state1, lstm_size[0], scope='state1') hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2') enc1 = slim.layers.conv2d(hidden1, hidden1.get_shape()[3], [3, 3], stride=2, scope='conv2') (hidden3, lstm_state3) = lstm_func(enc1, lstm_state3, lstm_size[1], scope='state3') hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4') enc2 = slim.layers.conv2d(hidden3, hidden3.get_shape()[3], [3, 3], stride=2, scope='conv3') if not 'ignore_state_action' in self.conf: if 'ignore_state' in self.conf: lowdim = action print('ignoring state') else: lowdim = state_action smear = tf.reshape(lowdim, [int(batch_size), 1, 1, int(lowdim.get_shape()[1])]) smear = tf.tile(smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1]) enc2 = tf.concat(axis=3, values=[enc2, smear]) else: print('ignoring states and actions') enc3 = slim.layers.conv2d(enc2, hidden3.get_shape()[3], [1, 1], stride=1, scope='conv4') (hidden5, lstm_state5) = lstm_func(enc3, lstm_state5, lstm_size[2], scope='state5') hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6') enc4 = slim.layers.conv2d_transpose(hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1') (hidden6, lstm_state6) = lstm_func(enc4, lstm_state6, lstm_size[3], scope='state6') hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7') if 'noskip' not in self.conf: hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) enc5 = slim.layers.conv2d_transpose(hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2') (hidden7, lstm_state7) = lstm_func(enc5, lstm_state7, lstm_size[4], scope='state7') hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8') if not 'noskip' in self.conf: hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) enc6 = slim.layers.conv2d_transpose(hidden7, hidden7.get_shape()[3], 3, stride=2, scope='convt3', normalizer_fn=tf_layers.layer_norm, normalizer_params={'scope': 'layer_norm9'}) if 'transform_from_firstimage' in self.conf: prev_image = self.images[1] if self.pix_distributions1 != None: prev_pix_distrib1 = self.pix_distributions1[1] prev_pix_distrib1 = tf.expand_dims(prev_pix_distrib1, -1) print('transform from image 1') if self.conf['model'] == 'DNA': trafo_input = slim.layers.conv2d_transpose(enc6, KERN_SIZE ** 2, 1, stride=1, scope='convt4_cam2') transformed_l = [self.dna_transformation(prev_image, trafo_input, self.conf['kern_size'])] if self.pix_distributions1 != None: transf_distrib_ndesig1 = [self.dna_transformation(prev_pix_distrib1, trafo_input, KERN_SIZE)] if 'ndesig' in self.conf: transf_distrib_ndesig2 = [self.dna_transformation(prev_pix_distrib2, trafo_input, KERN_SIZE)] extra_masks = 1 if self.conf['model'] == 'CDNA': if 'gen_pix' in self.conf: enc7 = slim.layers.conv2d_transpose(enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None) transformed_l = [tf.nn.sigmoid(enc7)] extra_masks = 2 else: transformed_l = [] extra_masks = 1 cdna_input = tf.reshape(hidden5, [int(batch_size), -1]) batch_size = int(cdna_input.get_shape()[0]) height = int(prev_image.get_shape()[1]) width = int(prev_image.get_shape()[2]) DNA_KERN_SIZE = self.conf['kern_size'] num_masks = self.conf['num_masks'] color_channels = int(prev_image.get_shape()[3]) cdna_kerns = slim.layers.fully_connected(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None, reuse=reuse) cdna_kerns = tf.reshape(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True) cdna_kerns /= norm_factor cdna_kerns_summary = cdna_kerns cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME') transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(value=transformed, axis=-1) (new_transformed, _) = (transformed, cdna_kerns_summary) transformed_l += new_transformed self.moved_images.append(transformed_l) if self.pix_distributions1 != None: batch_size = int(cdna_input.get_shape()[0]) height = int(prev_pix_distrib1.get_shape()[1]) width = int(prev_pix_distrib1.get_shape()[2]) DNA_KERN_SIZE = self.conf['kern_size'] num_masks = self.conf['num_masks'] color_channels = int(prev_pix_distrib1.get_shape()[3]) cdna_kerns = slim.layers.fully_connected(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None, reuse=True) cdna_kerns = tf.reshape(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True) cdna_kerns /= norm_factor cdna_kerns_summary = cdna_kerns cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) prev_pix_distrib1 = tf.transpose(prev_pix_distrib1, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d(prev_pix_distrib1, cdna_kerns, [1, 1, 1, 1], 'SAME') transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(value=transformed, axis=-1) (transf_distrib_ndesig1, _) = (transformed, cdna_kerns_summary) self.moved_pix_distrib1.append(transf_distrib_ndesig1) if 'ndesig' in self.conf: batch_size = int(cdna_input.get_shape()[0]) height = int(prev_pix_distrib2.get_shape()[1]) width = int(prev_pix_distrib2.get_shape()[2]) DNA_KERN_SIZE = self.conf['kern_size'] num_masks = self.conf['num_masks'] color_channels = int(prev_pix_distrib2.get_shape()[3]) cdna_kerns = slim.layers.fully_connected(cdna_input, DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks, scope='cdna_params', activation_fn=None, reuse=True) cdna_kerns = tf.reshape(cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks]) cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keepdims=True) cdna_kerns /= norm_factor cdna_kerns_summary = cdna_kerns cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) prev_pix_distrib2 = tf.transpose(prev_pix_distrib2, [3, 1, 2, 0]) transformed = tf.nn.depthwise_conv2d(prev_pix_distrib2, cdna_kerns, [1, 1, 1, 1], 'SAME') transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) transformed = tf.unstack(value=transformed, axis=-1) (transf_distrib_ndesig2, _) = (transformed, cdna_kerns_summary) self.moved_pix_distrib2.append(transf_distrib_ndesig2) if self.conf['model'] == 'STP': enc7 = slim.layers.conv2d_transpose(enc6, color_channels, 1, stride=1, scope='convt5', activation_fn=None) if 'gen_pix' in self.conf: transformed_l = [tf.nn.sigmoid(enc7)] extra_masks = 2 else: transformed_l = [] extra_masks = 1 enc_stp = tf.reshape(hidden5, [int(batch_size), -1]) stp_input = slim.layers.fully_connected(enc_stp, 200, scope='fc_stp_cam2') reuse_stp = None if reuse: reuse_stp = reuse from spatial_transformer import transformer identity_params = tf.convert_to_tensor(np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] trafos = [] for i in range(self.num_masks): params = slim.layers.fully_connected(stp_input, 6, scope='stp_params' + str(i) + 'cam2', activation_fn=None, reuse=reuse_stp) + identity_params outsize = (prev_image.get_shape()[1], prev_image.get_shape()[2]) transformed.append(transformer(prev_image, params, outsize)) trafos.append(params) (transformed, trafo) = (transformed, trafos) transformed_l += transformed self.trafos.append(trafo) self.moved_images.append(transformed_l) if self.pix_distributions1 != None: from spatial_transformer import transformer identity_params = tf.convert_to_tensor(np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32)) transformed = [] trafos = [] for i in range(num_masks): params = slim.layers.fully_connected(stp_input, 6, scope='stp_params' + str(i) + 'cam2', activation_fn=None, reuse=True) + identity_params outsize = (prev_pix_distrib1.get_shape()[1], prev_pix_distrib1.get_shape()[2]) transformed.append(transformer(prev_pix_distrib1, params, outsize)) trafos.append(params) (transf_distrib_ndesig1, _) = (transformed, trafos) self.moved_pix_distrib1.append(transf_distrib_ndesig1) if '1stimg_bckgd' in self.conf: background = self.images[0] print('using background from first image..') else: background = prev_image masks = slim.layers.conv2d_transpose(enc6, self.conf['num_masks'] + extra_masks, 1, stride=1, activation_fn=None, scope='convt7_cam2') img_height = 64 img_width = 64 num_masks = self.conf['num_masks'] if self.conf['model'] == 'DNA': if num_masks != 1: raise ValueError('Only one mask is supported for DNA model.') masks = tf.reshape(tf.nn.softmax(tf.reshape(masks, [-1, num_masks + extra_masks])), [int(self.batch_size), int(img_height), int(img_width), num_masks + extra_masks]) mask_list = tf.split(axis=3, num_or_size_splits=num_masks + extra_masks, value=masks) output = mask_list[0] * background assert len(transformed_l) == len(mask_list[1:]) for (layer, mask) in zip(transformed_l, mask_list[1:]): output += layer * mask (output, mask_list) = (output, mask_list) self.gen_images.append(output) self.gen_masks.append(mask_list) if self.pix_distributions1 != None: if '1stimg_bckgd' in self.conf: background_pix = self.pix_distributions1[0] if len(background_pix.get_shape()) == 3: background_pix = tf.expand_dims(background_pix, -1) print('using pix_distrib-background from first image..') else: background_pix = prev_pix_distrib1 pix_distrib_output = mask_list[0] * background_pix if 'gen_pix' in self.conf: pix_distrib_output += mask_list[1] * prev_pix_distrib1 for i in range(self.num_masks): pix_distrib_output += transf_distrib_ndesig1[i] * mask_list[i + extra_masks] pix_distrib_output /= tf.reduce_sum(pix_distrib_output, axis=(1, 2), keepdims=True) pix_distrib_output = pix_distrib_output self.gen_distrib1.append(pix_distrib_output) if 'ndesig' in self.conf: if '1stimg_bckgd' in self.conf: background_pix = self.pix_distributions2[0] if len(background_pix.get_shape()) == 3: background_pix = tf.expand_dims(background_pix, -1) print('using pix_distrib-background from first image..') else: background_pix = prev_pix_distrib2 pix_distrib_output = mask_list[0] * background_pix if 'gen_pix' in self.conf: pix_distrib_output += mask_list[1] * prev_pix_distrib2 for i in range(self.num_masks): pix_distrib_output += transf_distrib_ndesig2[i] * mask_list[i + extra_masks] pix_distrib_output /= tf.reduce_sum(pix_distrib_output, axis=(1, 2), keepdims=True) pix_distrib_output = pix_distrib_output self.gen_distrib2.append(pix_distrib_output) if int(current_state.get_shape()[1]) == 0: current_state = tf.zeros_like(state_action) else: current_state = slim.layers.fully_connected(state_action, int(current_state.get_shape()[1]), scope='state_pred', activation_fn=None) self.gen_states.append(current_state)
DSGAN
positive
def crop_mirror(img, x, y, w, h, padding=0): assert x >= 0 and y >= 0 and (w > 0) and (h > 0) <DeepExtract> if len(img.shape) == 3: padded_img = np.array([np.pad(ch, padding, 'reflect') for ch in img.transpose((2, 0, 1))]).transpose((1, 2, 0)) else: padded_img = np.pad(img, padding, 'reflect') assert padded_img.shape[0] == img.shape[0] + padding * 2, (img.shape, padded_img.shape) assert padded_img.shape[1] == img.shape[1] + padding * 2, (img.shape, padded_img.shape) if len(img.shape) == 3: assert padded_img.shape[2] == img.shape[2], (img.shape, padded_img.shape) mirror_padded = padded_img </DeepExtract> cropped_img = mirror_padded[y:y + h + padding * 2, x:x + w + padding * 2] return cropped_img
def crop_mirror(img, x, y, w, h, padding=0): assert x >= 0 and y >= 0 and (w > 0) and (h > 0) if len(img.shape) == 3: padded_img = np.array([np.pad(ch, padding, 'reflect') for ch in img.transpose((2, 0, 1))]).transpose((1, 2, 0)) else: padded_img = np.pad(img, padding, 'reflect') assert padded_img.shape[0] == img.shape[0] + padding * 2, (img.shape, padded_img.shape) assert padded_img.shape[1] == img.shape[1] + padding * 2, (img.shape, padded_img.shape) if len(img.shape) == 3: assert padded_img.shape[2] == img.shape[2], (img.shape, padded_img.shape) mirror_padded = padded_img cropped_img = mirror_padded[y:y + h + padding * 2, x:x + w + padding * 2] return cropped_img
data-science-bowl-2018
positive
def _generate_qweb(self, exchange_record, **kw): tmpl = self.template_id <DeepExtract> values = {'exchange_record': exchange_record, 'record': exchange_record.record, 'backend': exchange_record.backend_id, 'template': self, 'render_edi_template': self._render_template, 'get_info_provider': self._get_info_provider, 'info': {}} values.update(kw) values.update(self._time_utils()) values.update(self._evaluate_code_snippet(**values)) values = values </DeepExtract> return tmpl._render(values)
def _generate_qweb(self, exchange_record, **kw): tmpl = self.template_id values = {'exchange_record': exchange_record, 'record': exchange_record.record, 'backend': exchange_record.backend_id, 'template': self, 'render_edi_template': self._render_template, 'get_info_provider': self._get_info_provider, 'info': {}} values.update(kw) values.update(self._time_utils()) values.update(self._evaluate_code_snippet(**values)) values = values return tmpl._render(values)
edi
positive
def test_run_with_ref_seq_no_alignment(self, test_file, test_seqs, ref_file, ref_seq, run): expected_length = len(ref_seq.seq) - ref_seq.seq.count('-') <DeepExtract> def run(args): args = '-s %s --reference-sequence %s' % (test_file, ref_file)(args + ' -o %s' % out_file) align.run(args) output = SeqIO.to_dict(SeqIO.parse(out_file, 'fasta')) output = run </DeepExtract> assert list(output.keys()) == [ref_seq.id] + list(test_seqs.keys()) assert all((len(r.seq) == expected_length for r in output.values())) assert output['PREFIX'].seq.startswith('---') assert output['SUFFIX'].seq.endswith('---')
def test_run_with_ref_seq_no_alignment(self, test_file, test_seqs, ref_file, ref_seq, run): expected_length = len(ref_seq.seq) - ref_seq.seq.count('-') def run(args): args = '-s %s --reference-sequence %s' % (test_file, ref_file)(args + ' -o %s' % out_file) align.run(args) output = SeqIO.to_dict(SeqIO.parse(out_file, 'fasta')) output = run assert list(output.keys()) == [ref_seq.id] + list(test_seqs.keys()) assert all((len(r.seq) == expected_length for r in output.values())) assert output['PREFIX'].seq.startswith('---') assert output['SUFFIX'].seq.endswith('---')
augur
positive
def get_peak_mapping_stages(peaks_analysis, keypair, server, fqcheck): logger.debug('in get_peak_mapping_stages: peaks_analysis is %s named %s' % (peaks_analysis.get('id'), peaks_analysis.get('name'))) if is_unreplicated_analysis(peaks_analysis): reps = [1] else: reps = [1, 2] peaks_stages = [stage['execution'] for stage in peaks_analysis.get('stages')] peaks_stage = next((stage for stage in peaks_stages if stage['name'] == 'ENCODE Peaks')) tas = [dxpy.describe(peaks_stage['input']['rep%s_ta' % n]) for n in reps] mapping_jobs = [dxpy.describe(ta['createdBy']['job']) for ta in tas] mapping_analyses = [dxpy.describe(mapping_job['analysis']) for mapping_job in mapping_jobs if mapping_job] mapping_stages = [] for (i, repn) in enumerate(reps): <DeepExtract> logger.debug('in get_mapping_stages with mapping analysis %s and rep %s' % (mapping_analyses[i]['id'], repn)) if not mapping_analyses[i]: logger.warning('get_mapping_stages got empty mapping_analysis, returning None') mapping_stage = None experiment_accession = get_experiment_accession(mapping_analyses[i]) url = urlparse.urljoin(server, '/experiments/%s' % experiment_accession) r = common.encoded_get(url, keypair, return_response=True) r.raise_for_status() experiment = r.json() encoded_repn = get_encoded_repn(mapping_analyses[i]) experiment_fastqs = get_rep_fastqs(experiment, keypair, server, encoded_repn) experiment_fastq_accessions = [f.get('accession') for f in experiment_fastqs] logger.info('%s: Found accessioned experiment fastqs with accessions %s' % (experiment_accession, experiment_fastq_accessions)) analysis_stages = [stage['execution'] for stage in mapping_analyses[i].get('stages')] input_stage = next((stage for stage in analysis_stages if stage['name'].startswith('Gather inputs'))) logger.debug("input_stage['input'] JSON:") logger.debug(pprint.pformat(input_stage['input'])) input_fastq_accessions = [] input_fastq_accessions.extend(input_stage['input']['reads1']) logger.debug('reads1 only input_fastq_accessions %s' % input_fastq_accessions) if input_stage['input']['reads2']: reads2 = input_stage['input']['reads2'] logger.debug('found reads2 %s' % reads2) if type(reads2) is list: input_fastq_accessions.extend(reads2) else: input_fastq_accessions.extend([reads2]) logger.debug('reads1 and reads2 input_fastq_accessions %s' % input_fastq_accessions) fastqs = [] for acc in input_fastq_accessions: fobj = common.encoded_get(urlparse.urljoin(server, 'files/%s' % acc), keypair) fastqs.append(fobj) logger.info('Found input fastq objects with accessions %s' % [f.get('accession') for f in fastqs]) if fqcheck: if cmp(sorted(flat(experiment_fastq_accessions)), sorted(flat(input_fastq_accessions))): fastqs_match = False assert fastqs_match, '%s rep%s: Accessioned experiment fastqs differ from analysis.' % (experiment_accession, repn) + 'Suppress with fqcheck=False' mapping_stage = None else: logger.warning('--fqcheck is False, so not checking to see if experiment and mapped fastqs match') raw_mapping_stage = next((stage for stage in analysis_stages if stage['name'].startswith('Map ENCSR'))) filter_qc_stage = next((stage for stage in analysis_stages if stage['name'].startswith('Filter and QC'))) scrubbed = any([scrubbed_stage(stage) for stage in analysis_stages]) crop_length = raw_mapping_stage['output'].get('crop_length') if not crop_length or crop_length == 'native': logger.warning('crop_length %s. Inferring mapped_read_length from fastqs' % crop_length) native_lengths = set([fq.get('read_length') for fq in fastqs]) try: assert len(native_lengths) == 1 and all([isinstance(rl, int) for rl in native_lengths]), 'fastqs with different or non-integer read_lengths: %s' % [(fq.get('accession'), fq.get('read_length')) for fq in fastqs] except AssertionError: if fqcheck: raise else: logger.warning('fastqs with different or non-integer read_lengths: %s But fqcheck is False so ignoring' % [(fq.get('accession'), fq.get('read_length')) for fq in fastqs]) except: raise mapped_read_length = int(next((l for l in native_lengths))) else: mapped_read_length = int(crop_length) input_stage_output = input_stage['output'].get('output_JSON') or input_stage['output'] reference_file = dxpy.describe(input_stage_output['reference_tar']) reference_alias = 'dnanexus:' + reference_file.get('id') logger.debug('looking for reference file with alias %s' % reference_alias) reference = common.encoded_get(urlparse.urljoin(server, 'files/%s' % reference_alias), keypair) assert reference, 'Reference file %s not found on Portal' % reference_alias logger.debug('found reference file %s' % reference.get('accession')) bam_metadata = common.merge_dicts({'file_format': 'bam', 'output_type': 'alignments' if not scrubbed else 'redacted alignments', 'mapped_read_length': mapped_read_length, 'assembly': reference.get('assembly')}, COMMON_METADATA) mapping_stages = {get_stage_name('Map ENCSR.*', analysis_stages): {'input_files': [], 'output_files': [], 'qc': [], 'stage_metadata': {}}, get_stage_name('Filter and QC.*', analysis_stages): {'input_files': [{'name': 'rep%s_fastqs' % repn, 'derived_from': None, 'metadata': None, 'encode_object': fastqs}, {'name': 'reference', 'derived_from': None, 'metadata': None, 'encode_object': reference}], 'output_files': [{'name': 'scrubbed_filtered_bam' if scrubbed else 'filtered_bam', 'derived_from': ['rep%s_fastqs' % repn, 'reference'], 'metadata': bam_metadata}], 'qc': [qc, dup_qc, pbc_qc, filtered_qc, xcor_qc], 'stage_metadata': {}}, get_stage_name('Calculate cross-correlation.*', analysis_stages): {'input_files': [], 'output_files': [], 'qc': [], 'stage_metadata': {}}} for stage_name in mapping_stages: if not stage_name.startswith('_'): mapping_stages[stage_name].update({'stage_metadata': get_stage_metadata(mapping_analyses[i], stage_name)}) mapping_stage = mapping_stages </DeepExtract> if not mapping_stage: logger.error('%s: failed to find mapping stages for rep%d' % (peaks_analysis['id'], repn)) return None else: mapping_stages.append(mapping_stage) return mapping_stages
def get_peak_mapping_stages(peaks_analysis, keypair, server, fqcheck): logger.debug('in get_peak_mapping_stages: peaks_analysis is %s named %s' % (peaks_analysis.get('id'), peaks_analysis.get('name'))) if is_unreplicated_analysis(peaks_analysis): reps = [1] else: reps = [1, 2] peaks_stages = [stage['execution'] for stage in peaks_analysis.get('stages')] peaks_stage = next((stage for stage in peaks_stages if stage['name'] == 'ENCODE Peaks')) tas = [dxpy.describe(peaks_stage['input']['rep%s_ta' % n]) for n in reps] mapping_jobs = [dxpy.describe(ta['createdBy']['job']) for ta in tas] mapping_analyses = [dxpy.describe(mapping_job['analysis']) for mapping_job in mapping_jobs if mapping_job] mapping_stages = [] for (i, repn) in enumerate(reps): logger.debug('in get_mapping_stages with mapping analysis %s and rep %s' % (mapping_analyses[i]['id'], repn)) if not mapping_analyses[i]: logger.warning('get_mapping_stages got empty mapping_analysis, returning None') mapping_stage = None experiment_accession = get_experiment_accession(mapping_analyses[i]) url = urlparse.urljoin(server, '/experiments/%s' % experiment_accession) r = common.encoded_get(url, keypair, return_response=True) r.raise_for_status() experiment = r.json() encoded_repn = get_encoded_repn(mapping_analyses[i]) experiment_fastqs = get_rep_fastqs(experiment, keypair, server, encoded_repn) experiment_fastq_accessions = [f.get('accession') for f in experiment_fastqs] logger.info('%s: Found accessioned experiment fastqs with accessions %s' % (experiment_accession, experiment_fastq_accessions)) analysis_stages = [stage['execution'] for stage in mapping_analyses[i].get('stages')] input_stage = next((stage for stage in analysis_stages if stage['name'].startswith('Gather inputs'))) logger.debug("input_stage['input'] JSON:") logger.debug(pprint.pformat(input_stage['input'])) input_fastq_accessions = [] input_fastq_accessions.extend(input_stage['input']['reads1']) logger.debug('reads1 only input_fastq_accessions %s' % input_fastq_accessions) if input_stage['input']['reads2']: reads2 = input_stage['input']['reads2'] logger.debug('found reads2 %s' % reads2) if type(reads2) is list: input_fastq_accessions.extend(reads2) else: input_fastq_accessions.extend([reads2]) logger.debug('reads1 and reads2 input_fastq_accessions %s' % input_fastq_accessions) fastqs = [] for acc in input_fastq_accessions: fobj = common.encoded_get(urlparse.urljoin(server, 'files/%s' % acc), keypair) fastqs.append(fobj) logger.info('Found input fastq objects with accessions %s' % [f.get('accession') for f in fastqs]) if fqcheck: if cmp(sorted(flat(experiment_fastq_accessions)), sorted(flat(input_fastq_accessions))): fastqs_match = False assert fastqs_match, '%s rep%s: Accessioned experiment fastqs differ from analysis.' % (experiment_accession, repn) + 'Suppress with fqcheck=False' mapping_stage = None else: logger.warning('--fqcheck is False, so not checking to see if experiment and mapped fastqs match') raw_mapping_stage = next((stage for stage in analysis_stages if stage['name'].startswith('Map ENCSR'))) filter_qc_stage = next((stage for stage in analysis_stages if stage['name'].startswith('Filter and QC'))) scrubbed = any([scrubbed_stage(stage) for stage in analysis_stages]) crop_length = raw_mapping_stage['output'].get('crop_length') if not crop_length or crop_length == 'native': logger.warning('crop_length %s. Inferring mapped_read_length from fastqs' % crop_length) native_lengths = set([fq.get('read_length') for fq in fastqs]) try: assert len(native_lengths) == 1 and all([isinstance(rl, int) for rl in native_lengths]), 'fastqs with different or non-integer read_lengths: %s' % [(fq.get('accession'), fq.get('read_length')) for fq in fastqs] except AssertionError: if fqcheck: raise else: logger.warning('fastqs with different or non-integer read_lengths: %s But fqcheck is False so ignoring' % [(fq.get('accession'), fq.get('read_length')) for fq in fastqs]) except: raise mapped_read_length = int(next((l for l in native_lengths))) else: mapped_read_length = int(crop_length) input_stage_output = input_stage['output'].get('output_JSON') or input_stage['output'] reference_file = dxpy.describe(input_stage_output['reference_tar']) reference_alias = 'dnanexus:' + reference_file.get('id') logger.debug('looking for reference file with alias %s' % reference_alias) reference = common.encoded_get(urlparse.urljoin(server, 'files/%s' % reference_alias), keypair) assert reference, 'Reference file %s not found on Portal' % reference_alias logger.debug('found reference file %s' % reference.get('accession')) bam_metadata = common.merge_dicts({'file_format': 'bam', 'output_type': 'alignments' if not scrubbed else 'redacted alignments', 'mapped_read_length': mapped_read_length, 'assembly': reference.get('assembly')}, COMMON_METADATA) mapping_stages = {get_stage_name('Map ENCSR.*', analysis_stages): {'input_files': [], 'output_files': [], 'qc': [], 'stage_metadata': {}}, get_stage_name('Filter and QC.*', analysis_stages): {'input_files': [{'name': 'rep%s_fastqs' % repn, 'derived_from': None, 'metadata': None, 'encode_object': fastqs}, {'name': 'reference', 'derived_from': None, 'metadata': None, 'encode_object': reference}], 'output_files': [{'name': 'scrubbed_filtered_bam' if scrubbed else 'filtered_bam', 'derived_from': ['rep%s_fastqs' % repn, 'reference'], 'metadata': bam_metadata}], 'qc': [qc, dup_qc, pbc_qc, filtered_qc, xcor_qc], 'stage_metadata': {}}, get_stage_name('Calculate cross-correlation.*', analysis_stages): {'input_files': [], 'output_files': [], 'qc': [], 'stage_metadata': {}}} for stage_name in mapping_stages: if not stage_name.startswith('_'): mapping_stages[stage_name].update({'stage_metadata': get_stage_metadata(mapping_analyses[i], stage_name)}) mapping_stage = mapping_stages if not mapping_stage: logger.error('%s: failed to find mapping stages for rep%d' % (peaks_analysis['id'], repn)) return None else: mapping_stages.append(mapping_stage) return mapping_stages
chip-seq-pipeline
positive
def _TransCritPhaseH_OnePhaseC_Qimposed(self, Inputs): """ The hot stream is Transcritical phase (supercritical or supercrit_liq), and the cold stream is single phase (SC or SH) Inputs: dictionary of parameters Outputs: dictionary of parameters, but mainly w, pressure drop and heat transfer coefficient This function calculate the fraction of heat exchanger that would be required for given thermal duty "w" and DP and h """ Tmean_c = Inputs['Tmean_c'] if self.HXType == 'Plate-HX': <DeepExtract> if self.HXType == 'Plate-HX': Inputs = {'AS': self.AS_c, 'T': Tmean_c, 'p': Inputs['pin_c'], 'mdot_gap': self.mdot_c / self.NgapsCold, 'PlateAmplitude': self.PlateAmplitude, 'PlateWavelength': self.PlateWavelength, 'InclinationAngle': self.InclinationAngle, 'Bp': self.Bp, 'Lp': self.Lp} Outputs = PHE_1phase_hdP(Inputs) (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) elif self.HXType == 'Coaxial-HX': self.AS_c.update(CP.PT_INPUTS, Inputs['pin_c'], Tmean_c) cp_g = self.AS_c.cpmass() v_g = 1 / self.AS_c.rhomass() if side == 'Hot': (f_g, h_g, Re_g) = f_h_1phase_Tube(self.mdot_c / self.NgapsCold, self.ID_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_i dpdz_g = f_g * v_g * self.G_h ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp elif side == 'Cold': (f_g, h_g, Re_g) = f_h_1phase_Annulus(self.mdot_c / self.NgapsCold, self.ID_o, self.OD_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_o - self.OD_i dpdz_g = f_g * v_g * self.G_c ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp Outputs = {'Dh': dh, 'h': h_g, 'DELTAP': DP_g, 'Re': Re_g, 'cp': cp_g} (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) </DeepExtract> elif self.HXType == 'Coaxial-HX': <DeepExtract> if self.HXType == 'Plate-HX': Inputs = {'AS': self.AS_c, 'T': Tmean_c, 'p': Inputs['pin_c'], 'mdot_gap': self.mdot_c, 'PlateAmplitude': self.PlateAmplitude, 'PlateWavelength': self.PlateWavelength, 'InclinationAngle': self.InclinationAngle, 'Bp': self.Bp, 'Lp': self.Lp} Outputs = PHE_1phase_hdP(Inputs) (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) elif self.HXType == 'Coaxial-HX': self.AS_c.update(CP.PT_INPUTS, Inputs['pin_c'], Tmean_c) cp_g = self.AS_c.cpmass() v_g = 1 / self.AS_c.rhomass() if 'Cold' == 'Hot': (f_g, h_g, Re_g) = f_h_1phase_Tube(self.mdot_c, self.ID_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_i dpdz_g = f_g * v_g * self.G_h ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp elif 'Cold' == 'Cold': (f_g, h_g, Re_g) = f_h_1phase_Annulus(self.mdot_c, self.ID_o, self.OD_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_o - self.OD_i dpdz_g = f_g * v_g * self.G_c ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp Outputs = {'Dh': dh, 'h': h_g, 'DELTAP': DP_g, 'Re': Re_g, 'cp': cp_g} (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) </DeepExtract> cp_c = Inputs['cp_c'] Q = Inputs['Q'] R_w = self.Rw R_c = 1 / (h_c * self.A_c_wetted) T_w = (R_w + R_c) * Q + Inputs['Tmean_c'] change = 999 w = 1 while abs(change) > 1e-06: q_flux = Q / (w * self.A_h_wetted) (h_h, f_h, cp_h, rho_h) = Petterson_supercritical(Inputs['Tmean_h'], T_w, self.AS_h, self.G_h, self.Dh_h, 0, self.Dh_h / self.Lp, 0, Inputs['pin_h'], q_flux) h_h = self.h_r_hot_tuning * h_h R_h = 1 / (h_h * self.A_h_wetted) Tout_h = Inputs['Tin_h'] - Q / (self.mdot_h * cp_h) T_w = Tout_h - R_h * Q UA_total = 1 / (1 / (h_h * self.A_h_wetted) + 1 / (h_c * self.A_c_wetted) + self.Rw) UA_total = 1 / (1 / (h_h * self.A_h_wetted) + 1 / (h_c * self.A_c_wetted) + self.Rw) C = [cp_c * self.mdot_c, cp_h * self.mdot_h] Cmin = min(C) Cr = Cmin / max(C) Qmax = Cmin * (Inputs['Tin_h'] - Inputs['Tin_c']) epsilon = Q / Qmax if epsilon >= 1.0: epsilon = 1.0 - 1e-12 NTU = 1 / (Cr - 1) * log((epsilon - 1) / (epsilon * Cr - 1)) UA_req = Cmin * NTU change = UA_req / UA_total - w w = UA_req / UA_total Charge_h = w * self.V_h * rho_h self.AS_c.update(CP.PT_INPUTS, self.pin_c, Tmean_c) rho_c = self.AS_c.rhomass() Charge_c = w * self.V_c * rho_c v_h = 1.0 / rho_h dpdz_h = -f_h * v_h * self.G_h ** 2 / (2 * self.Dh_h) DP_frict_h = dpdz_h * self.Lp * w Outputs = {'w': w, 'Tout_h': Inputs['Tin_h'] - Q / (self.mdot_h * cp_h), 'Tout_c': Inputs['Tin_c'] + Q / (self.mdot_c * cp_c), 'Charge_c': Charge_c, 'Charge_h': Charge_h, 'DP_h': DP_frict_h, 'DP_c': -PlateOutput_c['DELTAP'], 'h_h': h_h, 'h_c': h_c, 'q_flux': q_flux, 'cp_h': cp_h} o = Inputs o.update(**Outputs) return o
def _TransCritPhaseH_OnePhaseC_Qimposed(self, Inputs): """ The hot stream is Transcritical phase (supercritical or supercrit_liq), and the cold stream is single phase (SC or SH) Inputs: dictionary of parameters Outputs: dictionary of parameters, but mainly w, pressure drop and heat transfer coefficient This function calculate the fraction of heat exchanger that would be required for given thermal duty "w" and DP and h """ Tmean_c = Inputs['Tmean_c'] if self.HXType == 'Plate-HX': if self.HXType == 'Plate-HX': Inputs = {'AS': self.AS_c, 'T': Tmean_c, 'p': Inputs['pin_c'], 'mdot_gap': self.mdot_c / self.NgapsCold, 'PlateAmplitude': self.PlateAmplitude, 'PlateWavelength': self.PlateWavelength, 'InclinationAngle': self.InclinationAngle, 'Bp': self.Bp, 'Lp': self.Lp} Outputs = PHE_1phase_hdP(Inputs) (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) elif self.HXType == 'Coaxial-HX': self.AS_c.update(CP.PT_INPUTS, Inputs['pin_c'], Tmean_c) cp_g = self.AS_c.cpmass() v_g = 1 / self.AS_c.rhomass() if side == 'Hot': (f_g, h_g, Re_g) = f_h_1phase_Tube(self.mdot_c / self.NgapsCold, self.ID_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_i dpdz_g = f_g * v_g * self.G_h ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp elif side == 'Cold': (f_g, h_g, Re_g) = f_h_1phase_Annulus(self.mdot_c / self.NgapsCold, self.ID_o, self.OD_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_o - self.OD_i dpdz_g = f_g * v_g * self.G_c ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp Outputs = {'Dh': dh, 'h': h_g, 'DELTAP': DP_g, 'Re': Re_g, 'cp': cp_g} (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) elif self.HXType == 'Coaxial-HX': if self.HXType == 'Plate-HX': Inputs = {'AS': self.AS_c, 'T': Tmean_c, 'p': Inputs['pin_c'], 'mdot_gap': self.mdot_c, 'PlateAmplitude': self.PlateAmplitude, 'PlateWavelength': self.PlateWavelength, 'InclinationAngle': self.InclinationAngle, 'Bp': self.Bp, 'Lp': self.Lp} Outputs = PHE_1phase_hdP(Inputs) (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) elif self.HXType == 'Coaxial-HX': self.AS_c.update(CP.PT_INPUTS, Inputs['pin_c'], Tmean_c) cp_g = self.AS_c.cpmass() v_g = 1 / self.AS_c.rhomass() if 'Cold' == 'Hot': (f_g, h_g, Re_g) = f_h_1phase_Tube(self.mdot_c, self.ID_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_i dpdz_g = f_g * v_g * self.G_h ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp elif 'Cold' == 'Cold': (f_g, h_g, Re_g) = f_h_1phase_Annulus(self.mdot_c, self.ID_o, self.OD_i, Tmean_c, Inputs['pin_c'], self.AS_c) dh = self.ID_o - self.OD_i dpdz_g = f_g * v_g * self.G_c ** 2 / (2.0 * dh) DP_g = dpdz_g * self.Lp Outputs = {'Dh': dh, 'h': h_g, 'DELTAP': DP_g, 'Re': Re_g, 'cp': cp_g} (h_c, cp_c, PlateOutput_c) = (Outputs['h'], Outputs['cp'], Outputs) cp_c = Inputs['cp_c'] Q = Inputs['Q'] R_w = self.Rw R_c = 1 / (h_c * self.A_c_wetted) T_w = (R_w + R_c) * Q + Inputs['Tmean_c'] change = 999 w = 1 while abs(change) > 1e-06: q_flux = Q / (w * self.A_h_wetted) (h_h, f_h, cp_h, rho_h) = Petterson_supercritical(Inputs['Tmean_h'], T_w, self.AS_h, self.G_h, self.Dh_h, 0, self.Dh_h / self.Lp, 0, Inputs['pin_h'], q_flux) h_h = self.h_r_hot_tuning * h_h R_h = 1 / (h_h * self.A_h_wetted) Tout_h = Inputs['Tin_h'] - Q / (self.mdot_h * cp_h) T_w = Tout_h - R_h * Q UA_total = 1 / (1 / (h_h * self.A_h_wetted) + 1 / (h_c * self.A_c_wetted) + self.Rw) UA_total = 1 / (1 / (h_h * self.A_h_wetted) + 1 / (h_c * self.A_c_wetted) + self.Rw) C = [cp_c * self.mdot_c, cp_h * self.mdot_h] Cmin = min(C) Cr = Cmin / max(C) Qmax = Cmin * (Inputs['Tin_h'] - Inputs['Tin_c']) epsilon = Q / Qmax if epsilon >= 1.0: epsilon = 1.0 - 1e-12 NTU = 1 / (Cr - 1) * log((epsilon - 1) / (epsilon * Cr - 1)) UA_req = Cmin * NTU change = UA_req / UA_total - w w = UA_req / UA_total Charge_h = w * self.V_h * rho_h self.AS_c.update(CP.PT_INPUTS, self.pin_c, Tmean_c) rho_c = self.AS_c.rhomass() Charge_c = w * self.V_c * rho_c v_h = 1.0 / rho_h dpdz_h = -f_h * v_h * self.G_h ** 2 / (2 * self.Dh_h) DP_frict_h = dpdz_h * self.Lp * w Outputs = {'w': w, 'Tout_h': Inputs['Tin_h'] - Q / (self.mdot_h * cp_h), 'Tout_c': Inputs['Tin_c'] + Q / (self.mdot_c * cp_c), 'Charge_c': Charge_c, 'Charge_h': Charge_h, 'DP_h': DP_frict_h, 'DP_c': -PlateOutput_c['DELTAP'], 'h_h': h_h, 'h_c': h_c, 'q_flux': q_flux, 'cp_h': cp_h} o = Inputs o.update(**Outputs) return o
ACHP
positive
def segmentalpino(morphology, functions): """Co-routine that accepts one line at a time. Yields tuples ``(result, status)`` where ... - result is ``None`` or a segment delimited by ``<alpino_ds>`` and ``</alpino_ds>`` as a list of lines; - status is 1 if the line was consumed, else 0.""" cur = [] inblock = 0 line = (yield (None, CONSUMED)) while line is not None: if line.startswith('<alpino_ds'): cur = ['<?xml version="1.0" encoding="UTF-8"?>', line] inblock = 1 line = (yield (None, CONSUMED)) elif line.startswith('</alpino_ds>'): cur.append(line) rawblock = '\n'.join(cur).encode('utf8') xmlblock = ElementTree.fromstring(rawblock) block = (rawblock, xmlblock) <DeepExtract> def getsubtree(node, parentid, morphology, lemmas): """Parse a subtree of an Alpino tree.""" source = [''] * len(FIELDS) nodeid = int(node.get('id')) + 500 source[WORD] = node.get('word') or '#%s' % nodeid source[LEMMA] = node.get('lemma') or node.get('root') source[MORPH] = node.get('postag') or node.get('frame') source[FUNC] = node.get('rel') if 'cat' in node.keys(): source[TAG] = node.get('cat') if node.get('index'): coindexed[int(node.get('index')) + 500] = source label = node.get('cat') result = ParentedTree(label.upper(), []) for child in node: subtree = getsubtree(child, nodeid, morphology, lemmas) if subtree and ('word' in child.keys() or 'cat' in child.keys()): subtree.source[PARENT] = nodeid result.append(subtree) if not result: item = None elif 'word' in node.keys(): source[TAG] = node.get('pt') or node.get('pos') if node.get('index'): coindexed[int(node.get('index')) + 500] = source result = ParentedTree(source[TAG], list(range(int(node.get('begin')), int(node.get('end'))))) handlemorphology(morphology, lemmas, result, source, sent) elif 'index' in node.keys(): coindexation[int(node.get('index')) + 500].extend([node.get('rel'), parentid]) item = None source[:] = [a.replace(' ', '_') if a else a for a in source] result.source = source item = result coindexed = {} coindexation = defaultdict(list) (rawblock, xmlblock) = block sent = xmlblock.find('sentence').text.split(' ') tree = getsubtree(xmlblock.find('node'), 0, morphology, lemmas) for i in coindexation: coindexed[i].extend(coindexation[i]) comment = xmlblock.find('comments/comment') if comment is not None: comment = comment.text handlefunctions(functions, tree, morphology=morphology) item = Item(tree, sent, comment, rawblock) </DeepExtract> line = (yield (((item.tree, item.sent, item.comment),), CONSUMED)) inblock = 0 cur = [] elif line.strip(): if inblock == 1: cur.append(line) line = line.lstrip() line = (yield (None, CONSUMED if inblock or line.startswith('<?xml') else not CONSUMED)) else: line = (yield (None, not CONSUMED))
def segmentalpino(morphology, functions): """Co-routine that accepts one line at a time. Yields tuples ``(result, status)`` where ... - result is ``None`` or a segment delimited by ``<alpino_ds>`` and ``</alpino_ds>`` as a list of lines; - status is 1 if the line was consumed, else 0.""" cur = [] inblock = 0 line = (yield (None, CONSUMED)) while line is not None: if line.startswith('<alpino_ds'): cur = ['<?xml version="1.0" encoding="UTF-8"?>', line] inblock = 1 line = (yield (None, CONSUMED)) elif line.startswith('</alpino_ds>'): cur.append(line) rawblock = '\n'.join(cur).encode('utf8') xmlblock = ElementTree.fromstring(rawblock) block = (rawblock, xmlblock) def getsubtree(node, parentid, morphology, lemmas): """Parse a subtree of an Alpino tree.""" source = [''] * len(FIELDS) nodeid = int(node.get('id')) + 500 source[WORD] = node.get('word') or '#%s' % nodeid source[LEMMA] = node.get('lemma') or node.get('root') source[MORPH] = node.get('postag') or node.get('frame') source[FUNC] = node.get('rel') if 'cat' in node.keys(): source[TAG] = node.get('cat') if node.get('index'): coindexed[int(node.get('index')) + 500] = source label = node.get('cat') result = ParentedTree(label.upper(), []) for child in node: subtree = getsubtree(child, nodeid, morphology, lemmas) if subtree and ('word' in child.keys() or 'cat' in child.keys()): subtree.source[PARENT] = nodeid result.append(subtree) if not result: item = None elif 'word' in node.keys(): source[TAG] = node.get('pt') or node.get('pos') if node.get('index'): coindexed[int(node.get('index')) + 500] = source result = ParentedTree(source[TAG], list(range(int(node.get('begin')), int(node.get('end'))))) handlemorphology(morphology, lemmas, result, source, sent) elif 'index' in node.keys(): coindexation[int(node.get('index')) + 500].extend([node.get('rel'), parentid]) item = None source[:] = [a.replace(' ', '_') if a else a for a in source] result.source = source item = result coindexed = {} coindexation = defaultdict(list) (rawblock, xmlblock) = block sent = xmlblock.find('sentence').text.split(' ') tree = getsubtree(xmlblock.find('node'), 0, morphology, lemmas) for i in coindexation: coindexed[i].extend(coindexation[i]) comment = xmlblock.find('comments/comment') if comment is not None: comment = comment.text handlefunctions(functions, tree, morphology=morphology) item = Item(tree, sent, comment, rawblock) line = (yield (((item.tree, item.sent, item.comment),), CONSUMED)) inblock = 0 cur = [] elif line.strip(): if inblock == 1: cur.append(line) line = line.lstrip() line = (yield (None, CONSUMED if inblock or line.startswith('<?xml') else not CONSUMED)) else: line = (yield (None, not CONSUMED))
disco-dop
positive
def evaluate(generator, model, iou_threshold=0.5, score_threshold=0.01, max_detections=100, visualize=False, epoch=0): """ Evaluate a given dataset using a given model. Args: generator: The generator that represents the dataset to evaluate. model: The model to evaluate. iou_threshold: The threshold used to consider when a detection is positive or negative. score_threshold: The score confidence threshold to use for detections. max_detections: The maximum number of detections to use per image. visualize: Show the visualized detections or not. Returns: A dict mapping class names to mAP scores. """ <DeepExtract> all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())] for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '): image = generator.load_image(i) src_image = image.copy() (h, w) = image.shape[:2] anchors = generator.anchors (image, scale, offset_h, offset_w) = generator.preprocess_image(image) (boxes, scores, labels) = model.predict_on_batch([np.expand_dims(image, axis=0), np.expand_dims(anchors, axis=0)]) boxes[..., [0, 2]] = boxes[..., [0, 2]] - offset_w boxes[..., [1, 3]] = boxes[..., [1, 3]] - offset_h boxes /= scale boxes[:, :, 0] = np.clip(boxes[:, :, 0], 0, w - 1) boxes[:, :, 1] = np.clip(boxes[:, :, 1], 0, h - 1) boxes[:, :, 2] = np.clip(boxes[:, :, 2], 0, w - 1) boxes[:, :, 3] = np.clip(boxes[:, :, 3], 0, h - 1) indices = np.where(scores[0, :] > score_threshold)[0] scores = scores[0][indices] scores_sort = np.argsort(-scores)[:max_detections] image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) if visualize: draw_annotations(src_image, generator.load_annotations(i), label_to_name=generator.label_to_name) draw_detections(src_image, detections[:5, :4], detections[:5, 4], detections[:5, 5].astype(np.int32), label_to_name=generator.label_to_name, score_threshold=score_threshold) cv2.namedWindow('{}'.format(i), cv2.WINDOW_NORMAL) cv2.imshow('{}'.format(i), src_image) cv2.waitKey(0) for class_id in range(generator.num_classes()): all_detections[i][class_id] = detections[detections[:, -1] == class_id, :-1] all_detections = all_detections </DeepExtract> <DeepExtract> all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())] for i in progressbar.progressbar(range(generator.size()), prefix='Parsing annotations: '): annotations = generator.load_annotations(i) for label in range(generator.num_classes()): if not generator.has_label(label): continue all_annotations[i][label] = annotations['bboxes'][annotations['labels'] == label, :].copy() all_annotations = all_annotations </DeepExtract> average_precisions = {} num_tp = 0 num_fp = 0 for label in range(generator.num_classes()): if not generator.has_label(label): continue false_positives = np.zeros((0,)) true_positives = np.zeros((0,)) scores = np.zeros((0,)) num_annotations = 0.0 for i in range(generator.size()): detections = all_detections[i][label] annotations = all_annotations[i][label] num_annotations += annotations.shape[0] detected_annotations = [] for d in detections: scores = np.append(scores, d[4]) if annotations.shape[0] == 0: false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) continue overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations) assigned_annotation = np.argmax(overlaps, axis=1) max_overlap = overlaps[0, assigned_annotation] if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations: false_positives = np.append(false_positives, 0) true_positives = np.append(true_positives, 1) detected_annotations.append(assigned_annotation) else: false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) if num_annotations == 0: average_precisions[label] = (0, 0) continue indices = np.argsort(-scores) false_positives = false_positives[indices] true_positives = true_positives[indices] false_positives = np.cumsum(false_positives) true_positives = np.cumsum(true_positives) if false_positives.shape[0] == 0: num_fp += 0 else: num_fp += false_positives[-1] if true_positives.shape[0] == 0: num_tp += 0 else: num_tp += true_positives[-1] recall = true_positives / num_annotations precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps) <DeepExtract> mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([0.0], precision, [0.0])) for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) i = np.where(mrec[1:] != mrec[:-1])[0] ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) average_precision = ap </DeepExtract> average_precisions[label] = (average_precision, num_annotations) print('num_fp={}, num_tp={}'.format(num_fp, num_tp)) return average_precisions
def evaluate(generator, model, iou_threshold=0.5, score_threshold=0.01, max_detections=100, visualize=False, epoch=0): """ Evaluate a given dataset using a given model. Args: generator: The generator that represents the dataset to evaluate. model: The model to evaluate. iou_threshold: The threshold used to consider when a detection is positive or negative. score_threshold: The score confidence threshold to use for detections. max_detections: The maximum number of detections to use per image. visualize: Show the visualized detections or not. Returns: A dict mapping class names to mAP scores. """ all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())] for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '): image = generator.load_image(i) src_image = image.copy() (h, w) = image.shape[:2] anchors = generator.anchors (image, scale, offset_h, offset_w) = generator.preprocess_image(image) (boxes, scores, labels) = model.predict_on_batch([np.expand_dims(image, axis=0), np.expand_dims(anchors, axis=0)]) boxes[..., [0, 2]] = boxes[..., [0, 2]] - offset_w boxes[..., [1, 3]] = boxes[..., [1, 3]] - offset_h boxes /= scale boxes[:, :, 0] = np.clip(boxes[:, :, 0], 0, w - 1) boxes[:, :, 1] = np.clip(boxes[:, :, 1], 0, h - 1) boxes[:, :, 2] = np.clip(boxes[:, :, 2], 0, w - 1) boxes[:, :, 3] = np.clip(boxes[:, :, 3], 0, h - 1) indices = np.where(scores[0, :] > score_threshold)[0] scores = scores[0][indices] scores_sort = np.argsort(-scores)[:max_detections] image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) if visualize: draw_annotations(src_image, generator.load_annotations(i), label_to_name=generator.label_to_name) draw_detections(src_image, detections[:5, :4], detections[:5, 4], detections[:5, 5].astype(np.int32), label_to_name=generator.label_to_name, score_threshold=score_threshold) cv2.namedWindow('{}'.format(i), cv2.WINDOW_NORMAL) cv2.imshow('{}'.format(i), src_image) cv2.waitKey(0) for class_id in range(generator.num_classes()): all_detections[i][class_id] = detections[detections[:, -1] == class_id, :-1] all_detections = all_detections all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())] for i in progressbar.progressbar(range(generator.size()), prefix='Parsing annotations: '): annotations = generator.load_annotations(i) for label in range(generator.num_classes()): if not generator.has_label(label): continue all_annotations[i][label] = annotations['bboxes'][annotations['labels'] == label, :].copy() all_annotations = all_annotations average_precisions = {} num_tp = 0 num_fp = 0 for label in range(generator.num_classes()): if not generator.has_label(label): continue false_positives = np.zeros((0,)) true_positives = np.zeros((0,)) scores = np.zeros((0,)) num_annotations = 0.0 for i in range(generator.size()): detections = all_detections[i][label] annotations = all_annotations[i][label] num_annotations += annotations.shape[0] detected_annotations = [] for d in detections: scores = np.append(scores, d[4]) if annotations.shape[0] == 0: false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) continue overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations) assigned_annotation = np.argmax(overlaps, axis=1) max_overlap = overlaps[0, assigned_annotation] if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations: false_positives = np.append(false_positives, 0) true_positives = np.append(true_positives, 1) detected_annotations.append(assigned_annotation) else: false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) if num_annotations == 0: average_precisions[label] = (0, 0) continue indices = np.argsort(-scores) false_positives = false_positives[indices] true_positives = true_positives[indices] false_positives = np.cumsum(false_positives) true_positives = np.cumsum(true_positives) if false_positives.shape[0] == 0: num_fp += 0 else: num_fp += false_positives[-1] if true_positives.shape[0] == 0: num_tp += 0 else: num_tp += true_positives[-1] recall = true_positives / num_annotations precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps) mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([0.0], precision, [0.0])) for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) i = np.where(mrec[1:] != mrec[:-1])[0] ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) average_precision = ap average_precisions[label] = (average_precision, num_annotations) print('num_fp={}, num_tp={}'.format(num_fp, num_tp)) return average_precisions
ensembleObjectDetection
positive
def addEdge(self, frm, to): if frm not in self.vertexInfo: <DeepExtract> self.vertexCount += 1 newVertex = Vertex(frm) self.vertexInfo[frm] = newVertex fromVertex = newVertex </DeepExtract> else: fromVertex = self.vertexInfo[frm] if to not in self.vertexInfo: <DeepExtract> self.vertexCount += 1 newVertex = Vertex(to) self.vertexInfo[to] = newVertex toVertex = newVertex </DeepExtract> else: toVertex = self.vertexInfo[to] fromVertex.addNeighbour(toVertex) toVertex.inDegree += 1
def addEdge(self, frm, to): if frm not in self.vertexInfo: self.vertexCount += 1 newVertex = Vertex(frm) self.vertexInfo[frm] = newVertex fromVertex = newVertex else: fromVertex = self.vertexInfo[frm] if to not in self.vertexInfo: self.vertexCount += 1 newVertex = Vertex(to) self.vertexInfo[to] = newVertex toVertex = newVertex else: toVertex = self.vertexInfo[to] fromVertex.addNeighbour(toVertex) toVertex.inDegree += 1
challenges
positive
def testRandomVerticalFlip(self): preprocess_options = [(preprocessor.random_vertical_flip, {})] <DeepExtract> images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) images = images </DeepExtract> <DeepExtract> boxes = tf.constant([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) boxes = boxes </DeepExtract> tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} <DeepExtract> images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -1, 0, 0]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) images_expected1 = images </DeepExtract> <DeepExtract> boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) boxes_expected1 = boxes </DeepExtract> images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = sess.run([images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_)
def testRandomVerticalFlip(self): preprocess_options = [(preprocessor.random_vertical_flip, {})] images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) images = images boxes = tf.constant([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) boxes = boxes tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -1, 0, 0]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) images_expected1 = images boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) boxes_expected1 = boxes images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = sess.run([images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_)
Accident-Detection-on-Indian-Roads
positive
@policy.enforce('armada:get_release') def on_get(self, req, resp): """Controller for listing Helm releases. """ try: with self.get_helm(req, resp) as helm: <DeepExtract> LOG.debug('Getting helm releases') releases = {} for release in helm.list_release_ids(): releases.setdefault(release.namespace, []) releases[release.namespace].append(release.name) releases = releases </DeepExtract> resp.text = json.dumps({'releases': releases}) resp.content_type = 'application/json' resp.status = falcon.HTTP_200 except Exception as e: err_message = 'Unable to find Helm Releases: {}'.format(e) self.error(req.context, err_message) self.return_error(resp, falcon.HTTP_500, message=err_message)
@policy.enforce('armada:get_release') def on_get(self, req, resp): """Controller for listing Helm releases. """ try: with self.get_helm(req, resp) as helm: LOG.debug('Getting helm releases') releases = {} for release in helm.list_release_ids(): releases.setdefault(release.namespace, []) releases[release.namespace].append(release.name) releases = releases resp.text = json.dumps({'releases': releases}) resp.content_type = 'application/json' resp.status = falcon.HTTP_200 except Exception as e: err_message = 'Unable to find Helm Releases: {}'.format(e) self.error(req.context, err_message) self.return_error(resp, falcon.HTTP_500, message=err_message)
armada
positive
def filter_content_keys(obj: dict[Any, Any]) -> dict[Any, Any]: """Filter out some keys when showing image content. :param obj: The object from which keys should be removed :returns: The object with keys removed """ if isinstance(obj, list): working = [filter_content_keys(x) for x in obj] return working if isinstance(obj, dict): working = {} for (k, val) in obj.items(): if not k.startswith('__'): <DeepExtract> if isinstance(val, list): working = [filter_content_keys(x) for x in val] working[k] = working if isinstance(val, dict): working = {} for (k, val) in val.items(): if not k.startswith('__'): working[k] = filter_content_keys(val) working[k] = working working[k] = val </DeepExtract> return working return obj
def filter_content_keys(obj: dict[Any, Any]) -> dict[Any, Any]: """Filter out some keys when showing image content. :param obj: The object from which keys should be removed :returns: The object with keys removed """ if isinstance(obj, list): working = [filter_content_keys(x) for x in obj] return working if isinstance(obj, dict): working = {} for (k, val) in obj.items(): if not k.startswith('__'): if isinstance(val, list): working = [filter_content_keys(x) for x in val] working[k] = working if isinstance(val, dict): working = {} for (k, val) in val.items(): if not k.startswith('__'): working[k] = filter_content_keys(val) working[k] = working working[k] = val return working return obj
ansible-navigator
positive
def main(): parser = argparse.ArgumentParser() parser.add_argument('--agent1', required=True) parser.add_argument('--agent2', required=True) parser.add_argument('--num-games', '-n', type=int, default=10) args = parser.parse_args() agent1 = agent.load_policy_agent(h5py.File(args.agent1)) agent2 = agent.load_policy_agent(h5py.File(args.agent2)) wins = 0 losses = 0 color1 = Player.black for i in range(args.num_games): print('Simulating game %d/%d...' % (i + 1, args.num_games)) if color1 == Player.black: (black_player, white_player) = (agent1, agent2) else: (white_player, black_player) = (agent1, agent2) <DeepExtract> moves = [] game = GameState.new_game(BOARD_SIZE) agents = {Player.black: black_player, Player.white: white_player} while not game.is_over(): next_move = agents[game.next_player].select_move(game) moves.append(next_move) game = game.apply_move(next_move) print_board(game.board) game_result = scoring.compute_game_result(game) print(game_result) game_record = GameRecord(moves=moves, winner=game_result.winner, margin=game_result.winning_margin) </DeepExtract> if game_record.winner == color1: wins += 1 else: losses += 1 color1 = color1.other print('Agent 1 record: %d/%d' % (wins, wins + losses))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--agent1', required=True) parser.add_argument('--agent2', required=True) parser.add_argument('--num-games', '-n', type=int, default=10) args = parser.parse_args() agent1 = agent.load_policy_agent(h5py.File(args.agent1)) agent2 = agent.load_policy_agent(h5py.File(args.agent2)) wins = 0 losses = 0 color1 = Player.black for i in range(args.num_games): print('Simulating game %d/%d...' % (i + 1, args.num_games)) if color1 == Player.black: (black_player, white_player) = (agent1, agent2) else: (white_player, black_player) = (agent1, agent2) moves = [] game = GameState.new_game(BOARD_SIZE) agents = {Player.black: black_player, Player.white: white_player} while not game.is_over(): next_move = agents[game.next_player].select_move(game) moves.append(next_move) game = game.apply_move(next_move) print_board(game.board) game_result = scoring.compute_game_result(game) print(game_result) game_record = GameRecord(moves=moves, winner=game_result.winner, margin=game_result.winning_margin) if game_record.winner == color1: wins += 1 else: losses += 1 color1 = color1.other print('Agent 1 record: %d/%d' % (wins, wins + losses))
deep_learning_and_the_game_of_go
positive
def _reregister(self) -> None: if self._regid: <DeepExtract> if self._regid is not None: self._bus.unregister_object(self._regid) self._regid = None </DeepExtract> <DeepExtract> node_xml = f"<node name='/'><interface name='{self._interface_name}'>" for (property_name, signature) in self._properties.items(): node_xml += f"<property name='{property_name}' type='{signature}' access='read'/>" for (method_name, method_info) in self._methods.items(): node_xml += f"<method name='{method_name}'>" for argument in method_info[0]: node_xml += f"<arg type='{argument}' direction='in'/>" for result in method_info[1]: node_xml += f"<arg type='{result}' direction='out'/>" node_xml += '</method>' for (signal_name, signal_signature) in self._signals.items(): node_xml += f"<signal name='{signal_name}'>" for signature in signal_signature: node_xml += f"<arg type='{signature}'/>" node_xml += '</signal>' node_xml += '</interface></node>' node_info = Gio.DBusNodeInfo.new_for_xml(node_xml) regid = self._bus.register_object(self._path, node_info.interfaces[0], self._handle_method_call, self._get_property, None) if regid: self._regid = regid else: raise GLib.Error(f'Failed to register object with path: {self._path}') </DeepExtract>
def _reregister(self) -> None: if self._regid: if self._regid is not None: self._bus.unregister_object(self._regid) self._regid = None node_xml = f"<node name='/'><interface name='{self._interface_name}'>" for (property_name, signature) in self._properties.items(): node_xml += f"<property name='{property_name}' type='{signature}' access='read'/>" for (method_name, method_info) in self._methods.items(): node_xml += f"<method name='{method_name}'>" for argument in method_info[0]: node_xml += f"<arg type='{argument}' direction='in'/>" for result in method_info[1]: node_xml += f"<arg type='{result}' direction='out'/>" node_xml += '</method>' for (signal_name, signal_signature) in self._signals.items(): node_xml += f"<signal name='{signal_name}'>" for signature in signal_signature: node_xml += f"<arg type='{signature}'/>" node_xml += '</signal>' node_xml += '</interface></node>' node_info = Gio.DBusNodeInfo.new_for_xml(node_xml) regid = self._bus.register_object(self._path, node_info.interfaces[0], self._handle_method_call, self._get_property, None) if regid: self._regid = regid else: raise GLib.Error(f'Failed to register object with path: {self._path}') </DeepExtract>
blueman
positive
def load_optimizer_state_dict(optimizer, state_dict): state_dict = deepcopy(state_dict) groups = optimizer.param_groups saved_groups = state_dict['param_groups'] if len(groups) != len(saved_groups): raise ValueError('loaded state dict has a different number of parameter groups') param_lens = (len(g['params']) for g in groups) saved_lens = (len(g['params']) for g in saved_groups) if any((p_len != s_len for (p_len, s_len) in zip(param_lens, saved_lens))): raise ValueError("loaded state dict contains a parameter group that doesn't match the size of optimizer's group") id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))} def cast(param, value): """Make a deep copy of value, casting all tensors to device of param.""" if torch.is_tensor(value): if isinstance(param.data, (torch.FloatTensor, torch.cuda.FloatTensor, torch.DoubleTensor, torch.cuda.DoubleTensor, torch.HalfTensor, torch.cuda.HalfTensor)): value = value.type_as(param.data) value = value.cuda(param.get_device()) if param.is_cuda else value.cpu() return value elif isinstance(value, dict): return {k: cast(param, v) for (k, v) in value.items()} elif isinstance(value, Iterable): return type(value)((cast(param, v) for v in value)) else: return value state = defaultdict(dict) for (k, v) in state_dict['state'].items(): if k in id_map: param = id_map[k] <DeepExtract> if torch.is_tensor(v): if isinstance(param.data, (torch.FloatTensor, torch.cuda.FloatTensor, torch.DoubleTensor, torch.cuda.DoubleTensor, torch.HalfTensor, torch.cuda.HalfTensor)): v = v.type_as(param.data) v = v.cuda(param.get_device()) if param.is_cuda else v.cpu() state[param] = v elif isinstance(v, dict): state[param] = {k: cast(param, v) for (k, v) in v.items()} elif isinstance(v, Iterable): state[param] = type(v)((cast(param, v) for v in v)) else: state[param] = v </DeepExtract> else: state[k] = v def update_group(group, new_group): new_group['params'] = group['params'] return new_group param_groups = [update_group(g, ng) for (g, ng) in zip(groups, saved_groups)] optimizer.__setstate__({'state': state, 'param_groups': param_groups})
def load_optimizer_state_dict(optimizer, state_dict): state_dict = deepcopy(state_dict) groups = optimizer.param_groups saved_groups = state_dict['param_groups'] if len(groups) != len(saved_groups): raise ValueError('loaded state dict has a different number of parameter groups') param_lens = (len(g['params']) for g in groups) saved_lens = (len(g['params']) for g in saved_groups) if any((p_len != s_len for (p_len, s_len) in zip(param_lens, saved_lens))): raise ValueError("loaded state dict contains a parameter group that doesn't match the size of optimizer's group") id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))} def cast(param, value): """Make a deep copy of value, casting all tensors to device of param.""" if torch.is_tensor(value): if isinstance(param.data, (torch.FloatTensor, torch.cuda.FloatTensor, torch.DoubleTensor, torch.cuda.DoubleTensor, torch.HalfTensor, torch.cuda.HalfTensor)): value = value.type_as(param.data) value = value.cuda(param.get_device()) if param.is_cuda else value.cpu() return value elif isinstance(value, dict): return {k: cast(param, v) for (k, v) in value.items()} elif isinstance(value, Iterable): return type(value)((cast(param, v) for v in value)) else: return value state = defaultdict(dict) for (k, v) in state_dict['state'].items(): if k in id_map: param = id_map[k] if torch.is_tensor(v): if isinstance(param.data, (torch.FloatTensor, torch.cuda.FloatTensor, torch.DoubleTensor, torch.cuda.DoubleTensor, torch.HalfTensor, torch.cuda.HalfTensor)): v = v.type_as(param.data) v = v.cuda(param.get_device()) if param.is_cuda else v.cpu() state[param] = v elif isinstance(v, dict): state[param] = {k: cast(param, v) for (k, v) in v.items()} elif isinstance(v, Iterable): state[param] = type(v)((cast(param, v) for v in v)) else: state[param] = v else: state[k] = v def update_group(group, new_group): new_group['params'] = group['params'] return new_group param_groups = [update_group(g, ng) for (g, ng) in zip(groups, saved_groups)] optimizer.__setstate__({'state': state, 'param_groups': param_groups})
Detectron.pytorch
positive
def insert(self, position, newChild): if isinstance(newChild, basestring) and (not isinstance(newChild, NavigableString)): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent is not None: if newChild.parent is self: <DeepExtract> for (i, child) in enumerate(self.contents): if child is newChild: index = i raise ValueError('Tag.index: element not in tag') </DeepExtract> if index > position: position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position - 1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild)
def insert(self, position, newChild): if isinstance(newChild, basestring) and (not isinstance(newChild, NavigableString)): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent is not None: if newChild.parent is self: for (i, child) in enumerate(self.contents): if child is newChild: index = i raise ValueError('Tag.index: element not in tag') if index > position: position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position - 1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild)
commix
positive
def __init__(self, package_name, package_version, input_dir_path, artifact_paths): self._dry_run = os.environ.get('DRY_RUN', '') self._pkg_name = package_name self._pkg_version = package_version self._input_dir_path = input_dir_path self._universe_url_prefix = os.environ.get('UNIVERSE_URL_PREFIX', 'https://universe-converter.mesosphere.com/transform?url=') if not os.path.isdir(input_dir_path): raise Exception('Provided package path is not a directory: {}'.format(input_dir_path)) self._artifact_paths = [] for artifact_path in artifact_paths: if not os.path.isfile(artifact_path): err = 'Provided package path is not a file: {} (full list: {})'.format(artifact_path, artifact_paths) raise Exception(err) self._artifact_paths.append(artifact_path) <DeepExtract> s3_bucket = os.environ.get('S3_BUCKET') or 'infinity-artifacts' logger.info('Using artifact bucket: {}'.format(s3_bucket)) s3_dir_path = os.environ.get('S3_DIR_PATH') or 'autodelete7d' s3_dir_name = os.environ.get('S3_DIR_NAME') if not s3_dir_name: s3_dir_name = '{}-{}'.format(time.strftime('%Y%m%d-%H%M%S'), ''.join([random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)])) s3_directory_url = os.environ.get('S3_URL', 's3://{}/{}/{}/{}'.format(s3_bucket, s3_dir_path, self._pkg_name, s3_dir_name)) http_directory_url = os.environ.get('ARTIFACT_DIR', 'https://{}.s3.amazonaws.com/{}/{}/{}'.format(s3_bucket, s3_dir_path, self._pkg_name, s3_dir_name)) (s3_directory_url, self._http_directory_url) = (s3_directory_url, http_directory_url) </DeepExtract> self._uploader = universe.S3Uploader(s3_directory_url, self._dry_run)
def __init__(self, package_name, package_version, input_dir_path, artifact_paths): self._dry_run = os.environ.get('DRY_RUN', '') self._pkg_name = package_name self._pkg_version = package_version self._input_dir_path = input_dir_path self._universe_url_prefix = os.environ.get('UNIVERSE_URL_PREFIX', 'https://universe-converter.mesosphere.com/transform?url=') if not os.path.isdir(input_dir_path): raise Exception('Provided package path is not a directory: {}'.format(input_dir_path)) self._artifact_paths = [] for artifact_path in artifact_paths: if not os.path.isfile(artifact_path): err = 'Provided package path is not a file: {} (full list: {})'.format(artifact_path, artifact_paths) raise Exception(err) self._artifact_paths.append(artifact_path) s3_bucket = os.environ.get('S3_BUCKET') or 'infinity-artifacts' logger.info('Using artifact bucket: {}'.format(s3_bucket)) s3_dir_path = os.environ.get('S3_DIR_PATH') or 'autodelete7d' s3_dir_name = os.environ.get('S3_DIR_NAME') if not s3_dir_name: s3_dir_name = '{}-{}'.format(time.strftime('%Y%m%d-%H%M%S'), ''.join([random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)])) s3_directory_url = os.environ.get('S3_URL', 's3://{}/{}/{}/{}'.format(s3_bucket, s3_dir_path, self._pkg_name, s3_dir_name)) http_directory_url = os.environ.get('ARTIFACT_DIR', 'https://{}.s3.amazonaws.com/{}/{}/{}'.format(s3_bucket, s3_dir_path, self._pkg_name, s3_dir_name)) (s3_directory_url, self._http_directory_url) = (s3_directory_url, http_directory_url) self._uploader = universe.S3Uploader(s3_directory_url, self._dry_run)
dcos-kafka-service
positive
def get_known_resources_missing_tags(self): non_compliant_resources = {} audited_types = dbconfig.get('audit_scope', NS_AUDITOR_REQUIRED_TAGS, {'enabled': []})['enabled'] try: resources = filter(lambda resource_info: resource_info[0] in audited_types, self.resource_classes.items()) for (resource_name, resource_class) in resources: for (resource_id, resource) in resource_class.get_all().items(): <DeepExtract> missing_tags = [] notes = [] resource_tags = {tag.key.lower(): tag.value for tag in resource.tags} if resource.resource_type in self.alert_schedule: target_accounts = self.alert_schedule[resource.resource_type]['scope'] else: target_accounts = self.alert_schedule['*']['scope'] if not (resource.account.account_name in target_accounts or '*' in target_accounts): (missing_tags, notes) = (missing_tags, notes) if self.audit_ignore_tag.lower() in resource_tags: (missing_tags, notes) = (missing_tags, notes) required_tags = list(self.required_tags) if self.gdpr_enabled and resource.account.account_name in self.gdpr_accounts: required_tags.append(self.gdpr_tag) for key in [tag.lower() for tag in required_tags]: if key not in resource_tags: missing_tags.append(key) elif not self.validate_tag(key, resource_tags[key]): missing_tags.append(key) notes.append('{} tag is not valid'.format(key)) if missing_tags and resource.resource_type == 'aws_rds_instance': notes.append('Instance name = {}'.format(resource.instance_name)) (missing_tags, notes) = (missing_tags, notes) </DeepExtract> if missing_tags: issue_id = get_resource_id('reqtag', resource_id) non_compliant_resources[issue_id] = {'issue_id': issue_id, 'missing_tags': missing_tags, 'notes': notes, 'resource_id': resource_id, 'resource': resource} finally: db.session.rollback() return non_compliant_resources
def get_known_resources_missing_tags(self): non_compliant_resources = {} audited_types = dbconfig.get('audit_scope', NS_AUDITOR_REQUIRED_TAGS, {'enabled': []})['enabled'] try: resources = filter(lambda resource_info: resource_info[0] in audited_types, self.resource_classes.items()) for (resource_name, resource_class) in resources: for (resource_id, resource) in resource_class.get_all().items(): missing_tags = [] notes = [] resource_tags = {tag.key.lower(): tag.value for tag in resource.tags} if resource.resource_type in self.alert_schedule: target_accounts = self.alert_schedule[resource.resource_type]['scope'] else: target_accounts = self.alert_schedule['*']['scope'] if not (resource.account.account_name in target_accounts or '*' in target_accounts): (missing_tags, notes) = (missing_tags, notes) if self.audit_ignore_tag.lower() in resource_tags: (missing_tags, notes) = (missing_tags, notes) required_tags = list(self.required_tags) if self.gdpr_enabled and resource.account.account_name in self.gdpr_accounts: required_tags.append(self.gdpr_tag) for key in [tag.lower() for tag in required_tags]: if key not in resource_tags: missing_tags.append(key) elif not self.validate_tag(key, resource_tags[key]): missing_tags.append(key) notes.append('{} tag is not valid'.format(key)) if missing_tags and resource.resource_type == 'aws_rds_instance': notes.append('Instance name = {}'.format(resource.instance_name)) (missing_tags, notes) = (missing_tags, notes) if missing_tags: issue_id = get_resource_id('reqtag', resource_id) non_compliant_resources[issue_id] = {'issue_id': issue_id, 'missing_tags': missing_tags, 'notes': notes, 'resource_id': resource_id, 'resource': resource} finally: db.session.rollback() return non_compliant_resources
cloud-inquisitor
positive
@classmethod def add(cls, server, user=''): """Add an entry to server: user dict""" if not cls.inited: addCleanupCallback(cls.cleanup) if not user: <DeepExtract> user = os.environ.get('SUDO_USER', False) or (quietRun('who am i').split() or [False])[0] or quietRun('whoami').strip() </DeepExtract> cls.serveruser[server] = user
@classmethod def add(cls, server, user=''): """Add an entry to server: user dict""" if not cls.inited: addCleanupCallback(cls.cleanup) if not user: user = os.environ.get('SUDO_USER', False) or (quietRun('who am i').split() or [False])[0] or quietRun('whoami').strip() cls.serveruser[server] = user
containernet
positive
def close(self): if not self.closed: <DeepExtract> global _event_handlers _event_handlers[self.event.value] check_status(system_table.BootServices.contents.CloseEvent(self.event)) del _event_handlers[self.event.value] </DeepExtract> self.closed = True
def close(self): if not self.closed: global _event_handlers _event_handlers[self.event.value] check_status(system_table.BootServices.contents.CloseEvent(self.event)) del _event_handlers[self.event.value] self.closed = True
bits
positive
def set_lorbit(self): """ Set the flag that controls the projectors/decomposition onto orbitals. See: https://www.vasp.at/wiki/index.php/LORBIT """ <DeepExtract> try: wigner_seitz_radius = self._parameters.bands.wigner_seitz_radius if isinstance(wigner_seitz_radius, list): if wigner_seitz_radius[0]: self._set_simple('rwigs', wigner_seitz_radius) else: raise ValueError('The parameter wigner_seitz_radius should be supplied as a list of floats bigger than zero.') except AttributeError: pass </DeepExtract> try: if self._parameters.bands.decompose_bands: if self._parameters.bands.decompose_wave: raise ValueError('Only projections/decompositions on the bands or the wave function are allowed.') wigner_seitz_radius = False try: if abs(self._incar.rwigs[0]) > 1e-08: wigner_seitz_radius = True except AttributeError: pass if self._parameters.bands.decompose_auto: <DeepExtract> try: self._incar['lorbit'] = OrbitEnum.NO_RWIGS_ATOM_LM_PHASE_AUTO.value except AttributeError: pass </DeepExtract> else: try: lm = self._parameters.bands.lm except AttributeError: lm = False try: phase = self._parameters.bands.phase except AttributeError: phase = False lorbit = OrbitEnum.get_lorbit_from_combination(lm=lm, phase=phase, wigner_seitz_radius=wigner_seitz_radius).value <DeepExtract> try: self._incar['lorbit'] = lorbit except AttributeError: pass </DeepExtract> else: try: if self._parameters.bands.decompose_wave: <DeepExtract> try: self._incar['lorbit'] = OrbitEnum.ATOM_LM_WAVE.value except AttributeError: pass </DeepExtract> except AttributeError: pass except AttributeError: try: if self._parameters.bands.decompose_wave: <DeepExtract> try: self._incar['lorbit'] = OrbitEnum.ATOM_LM_WAVE.value except AttributeError: pass </DeepExtract> except AttributeError: pass
def set_lorbit(self): """ Set the flag that controls the projectors/decomposition onto orbitals. See: https://www.vasp.at/wiki/index.php/LORBIT """ try: wigner_seitz_radius = self._parameters.bands.wigner_seitz_radius if isinstance(wigner_seitz_radius, list): if wigner_seitz_radius[0]: self._set_simple('rwigs', wigner_seitz_radius) else: raise ValueError('The parameter wigner_seitz_radius should be supplied as a list of floats bigger than zero.') except AttributeError: pass try: if self._parameters.bands.decompose_bands: if self._parameters.bands.decompose_wave: raise ValueError('Only projections/decompositions on the bands or the wave function are allowed.') wigner_seitz_radius = False try: if abs(self._incar.rwigs[0]) > 1e-08: wigner_seitz_radius = True except AttributeError: pass if self._parameters.bands.decompose_auto: try: self._incar['lorbit'] = OrbitEnum.NO_RWIGS_ATOM_LM_PHASE_AUTO.value except AttributeError: pass else: try: lm = self._parameters.bands.lm except AttributeError: lm = False try: phase = self._parameters.bands.phase except AttributeError: phase = False lorbit = OrbitEnum.get_lorbit_from_combination(lm=lm, phase=phase, wigner_seitz_radius=wigner_seitz_radius).value try: self._incar['lorbit'] = lorbit except AttributeError: pass else: try: if self._parameters.bands.decompose_wave: try: self._incar['lorbit'] = OrbitEnum.ATOM_LM_WAVE.value except AttributeError: pass except AttributeError: pass except AttributeError: try: if self._parameters.bands.decompose_wave: try: self._incar['lorbit'] = OrbitEnum.ATOM_LM_WAVE.value except AttributeError: pass except AttributeError: pass
aiida-vasp
positive
@deprecate_param('v2.0.0', 'resources') def parse_path(path, operations, api_version, resources): """ Parse an endpoint into a class where each valid http request on that endpoint is converted into a convenience function and attached to the class as a method. """ path = path.strip('/') modified_base_path = re.sub('-', '_', path.split('/')[0].lower()) methods = [] if exclude_resource(path, api_version, resources): return (modified_base_path, methods) for (verb, op) in operations.items(): <DeepExtract> summary = op['summary'] params = op.get('parameters', []) responses = op['responses'] deprecation_warning = op.get('x-deprecation-warning', None) if 'deprecated' in summary.lower(): method = None (args, param_doc) = parse_params(params, summary, verb) elements = split_method_params(params) (_, _, _, query_params, _) = elements is_iterable = iterable_method(verb, query_params) response_doc = doc_from_responses(responses, is_iterable) deprecation_notice = deprecated_notice(deprecation_warning) docs = join_doc_elements(deprecation_notice, param_doc, response_doc) name = parse_method_name(verb, path) method = create_method(args, verb, name, path, docs) method = (name, method) </DeepExtract> if method is None: continue methods.append(method) return (modified_base_path, methods)
@deprecate_param('v2.0.0', 'resources') def parse_path(path, operations, api_version, resources): """ Parse an endpoint into a class where each valid http request on that endpoint is converted into a convenience function and attached to the class as a method. """ path = path.strip('/') modified_base_path = re.sub('-', '_', path.split('/')[0].lower()) methods = [] if exclude_resource(path, api_version, resources): return (modified_base_path, methods) for (verb, op) in operations.items(): summary = op['summary'] params = op.get('parameters', []) responses = op['responses'] deprecation_warning = op.get('x-deprecation-warning', None) if 'deprecated' in summary.lower(): method = None (args, param_doc) = parse_params(params, summary, verb) elements = split_method_params(params) (_, _, _, query_params, _) = elements is_iterable = iterable_method(verb, query_params) response_doc = doc_from_responses(responses, is_iterable) deprecation_notice = deprecated_notice(deprecation_warning) docs = join_doc_elements(deprecation_notice, param_doc, response_doc) name = parse_method_name(verb, path) method = create_method(args, verb, name, path, docs) method = (name, method) if method is None: continue methods.append(method) return (modified_base_path, methods)
civis-python
positive
def image_batchRemove(p_dir): if os.path.isdir(p_dir): file_list = sorted(os.listdir(p_dir)) <DeepExtract> file_list = file_list img_list = [item for item in file_list if item[-3:] == 'png' or item[-3:] == 'jpg' or item[-4:] == 'jpeg' or (item[-3:] == 'tga')] img_list = img_list </DeepExtract> for img in img_list: dirImage = os.path.join(p_dir, img) tName = os.path.basename(os.path.splitext(img)[0]) for tex in bpy.data.textures: if tex.name == tName: if tex.type == 'IMAGE': image = tex.image imgName = image.name tex.user_clear() bpy.data.textures.remove(tex) bpy.data.images[imgName].user_clear() bpy.data.images.remove(bpy.data.images[imgName])
def image_batchRemove(p_dir): if os.path.isdir(p_dir): file_list = sorted(os.listdir(p_dir)) file_list = file_list img_list = [item for item in file_list if item[-3:] == 'png' or item[-3:] == 'jpg' or item[-4:] == 'jpeg' or (item[-3:] == 'tga')] img_list = img_list for img in img_list: dirImage = os.path.join(p_dir, img) tName = os.path.basename(os.path.splitext(img)[0]) for tex in bpy.data.textures: if tex.name == tName: if tex.type == 'IMAGE': image = tex.image imgName = image.name tex.user_clear() bpy.data.textures.remove(tex) bpy.data.images[imgName].user_clear() bpy.data.images.remove(bpy.data.images[imgName])
-Blender-
positive
def ensure_pipeline_learner(self): from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.pipeline import make_pipeline <DeepExtract> n = 100 beta = 0.4 X = pd.DataFrame(np.random.normal(size=(n, 5))) a = pd.Series([0] * (n // 2) + [1] * (n // 2)) y = a.mul(beta) data = {'X': X, 'a': a, 'y': y, 'beta': beta} </DeepExtract> weight_learner = make_pipeline(StandardScaler(), MinMaxScaler(), LogisticRegression()) outcome_learner = make_pipeline(StandardScaler(), MinMaxScaler(), LinearRegression()) for (ipw, std) in product([IPW], [Standardization, StratifiedStandardization]): with self.subTest('Test combination of {} and {} does not crash'.format(ipw, std)): ipw_model = ipw(weight_learner) std_model = std(outcome_learner) with self.subTest('Test initialization with pipeline learner'): self.estimator = self.estimator.__class__(std_model, ipw_model) self.assertTrue(True) with self.subTest('Test fit with pipeline learner'): self.estimator.fit(data['X'], data['a'], data['y']) self.assertTrue(True) with self.subTest("Test 'predict' with pipeline learner"): self.estimator.estimate_individual_outcome(data['X'], data['a']) self.assertTrue(True)
def ensure_pipeline_learner(self): from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.pipeline import make_pipeline n = 100 beta = 0.4 X = pd.DataFrame(np.random.normal(size=(n, 5))) a = pd.Series([0] * (n // 2) + [1] * (n // 2)) y = a.mul(beta) data = {'X': X, 'a': a, 'y': y, 'beta': beta} weight_learner = make_pipeline(StandardScaler(), MinMaxScaler(), LogisticRegression()) outcome_learner = make_pipeline(StandardScaler(), MinMaxScaler(), LinearRegression()) for (ipw, std) in product([IPW], [Standardization, StratifiedStandardization]): with self.subTest('Test combination of {} and {} does not crash'.format(ipw, std)): ipw_model = ipw(weight_learner) std_model = std(outcome_learner) with self.subTest('Test initialization with pipeline learner'): self.estimator = self.estimator.__class__(std_model, ipw_model) self.assertTrue(True) with self.subTest('Test fit with pipeline learner'): self.estimator.fit(data['X'], data['a'], data['y']) self.assertTrue(True) with self.subTest("Test 'predict' with pipeline learner"): self.estimator.estimate_individual_outcome(data['X'], data['a']) self.assertTrue(True)
causallib
positive
def validate(self): result_val = [{'result': 'failed'}] if self.format == TESTCASEV1: if self.snapshot_id: docs = get_documents(self.collection, dbname=self.dbname, sort=[('timestamp', pymongo.DESCENDING)], query={'snapshotId': self.snapshot_id}, limit=1) logger.info('Number of Snapshot Documents: %s', len(docs)) if docs and len(docs): self.data = docs[0]['json'] if self.op in OPERATORS and OPERATORS[self.op]: result = OPERATORS[self.op](self.data, self.loperand, self.roperand, self.is_not, self.extras) result_val[0]['result'] = 'passed' if result else 'failed' result_val[0]['snapshots'] = [{'id': docs[0]['snapshotId'], 'structure': docs[0]['structure'], 'reference': docs[0]['reference'], 'source': docs[0]['source'], 'collection': docs[0]['collection']}] if 'paths' in docs[0]: result_val[0]['snapshots'][0]['paths'] = docs[0]['paths'] else: result_val[0]['snapshots'][0]['path'] = docs[0]['path'] else: result_val[0].update({'result': 'skipped', 'message': 'Missing documents for the snapshot'}) else: result_val[0].update({'result': 'skipped', 'message': 'Missing snapshotId for testcase'}) elif self.format == TESTCASEV2: if self.type == 'rego': <DeepExtract> tid = '%d_%s' % (int(time.time() * 1000000), generateid(None)) results = [] inputjson = {} result = False rule_expr = get_field_value(self.testcase, 'eval') if not rule_expr: rule_expr = get_field_value(self.testcase, 'evals') if rule_expr: del self.testcase['evals'] if not rule_expr: rule_expr = 'data.rule.rulepass' testId = 'MISSING ID' isMasterTest = False if 'testId' in self.testcase: testId = self.testcase['testId'] elif 'masterTestId' in self.testcase: testId = self.testcase['masterTestId'] isMasterTest = True opa_exe = opa_binary() if not opa_exe: logger.error('\t\tERROR: OPA binary not found!') logger.error('\t\tRESULT: FAILED') results.append({'eval': 'data.rule.rulepass', 'result': 'passed' if result else 'failed', 'message': ''}) results = results if len(self.testcase['snapshotId']) == 1: sid = self.testcase['snapshotId'][0] (toExclude, snapshot_doc) = self.get_snaphotid_doc(sid, testId, isMasterTest) if toExclude: logger.warn('\t\tWARN: Excluded test case: %s' % testId) logger.warn('\t\tRESULT: SKIPPED') results = results inputjson = snapshot_doc if inputjson is None: logger.info('\t\tERROR: Missing snapshot') results = [] if inputjson: results = self.generating_result_for_rego_testcase(inputjson, tid, testId, opa_exe, rule_expr, results) else: results.append({'eval': rule_expr, 'result': 'passed' if result else 'failed', 'message': ''}) self.log_result(results[-1]) results = results else: self.snapshots = [] resource_sid = [] for mastersnapshot_id in self.testcase['masterSnapshotId']: msid = [] for sid in self.testcase['snapshotId']: if sid.startswith(mastersnapshot_id): msid.append(sid) resource_sid.append({mastersnapshot_id: msid}) inputjson = {} for sid_pair in resource_sid: for (ms_id, s_id_list) in sid_pair.items(): snapshot_doc_list = [] for s_id in s_id_list: (toExclude, snapshot_doc) = self.get_snaphotid_doc(s_id, testId, isMasterTest) if toExclude: logger.warn('\t\tWARN: Excluded test case: %s' % testId) logger.warn('\t\tRESULT: SKIPPED') results = results snapshot_doc_list.append(snapshot_doc) input = {ms_id: snapshot_doc_list} inputjson.update(input) if inputjson: results = self.generating_result_for_rego_testcase(inputjson, tid, testId, opa_exe, rule_expr, results, resource_sid) else: results.append({'eval': rule_expr, 'result': 'passed' if result else 'failed', 'message': ''}) self.log_result(results[-1]) results = results </DeepExtract> result_val = [] <DeepExtract> connector_data = {} if self.snapshots: isdb_fetch = get_dbtests() if isdb_fetch: connectors = get_documents('structures', query={'name': self.snapshots[0].get('source'), 'type': 'structure', 'container': self.container}, dbname=self.dbname, limit=1) connector_data = connectors[0].get('json', {}) if connectors else {} else: json_test_dir = get_test_json_dir() snapshot_source = self.snapshots[0].get('source') file_name = '%s.json' % snapshot_source if snapshot_source and (not snapshot_source.endswith('.json')) else snapshot_source connector_path = '%s/../%s' % (json_test_dir, file_name) if exists_file(connector_path): connector_data = json_from_file(connector_path) connector_data = connector_data </DeepExtract> for result in results: result['snapshots'] = self.snapshots result['autoRemediate'] = connector_data.get('autoRemediate', False) result_val.append(result) elif self.type == 'python': <DeepExtract> sys.path.append('/tmp/') results = [] inputjson = {} result = False rule_expr = get_field_value(self.testcase, 'eval') if not rule_expr: rule_expr = get_field_value(self.testcase, 'evals') if rule_expr: del self.testcase['evals'] if not rule_expr: rule_expr = 'data.rule.rulepass' testId = 'MISSING ID' isMasterTest = False if 'testId' in self.testcase: testId = self.testcase['testId'] elif 'masterTestId' in self.testcase: testId = self.testcase['masterTestId'] isMasterTest = True snapshot_id = self.testcase['snapshotId'][0] (toExclude, snapshot_doc) = self.get_snaphotid_doc(snapshot_id, testId, isMasterTest) if toExclude: logger.warn('\t\tWARN: Excluded test case: %s' % testId) logger.warn('\t\tRESULT: SKIPPED') results = results python_testcase = self.rule.split('.')[0] if len(self.testcase['snapshotId']) == 1: inputjson = snapshot_doc if inputjson is None: logger.info('\t\tERROR: Missing snapshot') else: ms_id = dict(zip(self.testcase['snapshotId'], self.testcase['masterSnapshotId'])) for sid in self.testcase['snapshotId']: (toExclude, snapshot_doc) = self.get_snaphotid_doc(sid, testId, isMasterTest) if toExclude: logger.error('Excluded testcase because of testId: %s' % testId) inputjson.update({ms_id[sid]: snapshot_doc}) results = [] if inputjson: test_rule = self.rule rule_matched = re.match('^file\\((.*)\\)$', test_rule, re.I) if rule_matched: rego_file_name = self.rego_rule_filename(rule_matched.groups()[0], self.container) if not rego_file_name: python_testcase = 'processor.comparison.rules.%s.%s' % (self.snapshots[0]['type'], rule_matched.groups()[0].split('.')[0]) module = import_module(python_testcase) if not module and logger.level == logging.DEBUG: self.log_compliance_info(testId) logger.error('\t\tERROR: %s missing', rule_matched.groups()[0]) logger.warning('\t\tRESULT: SKIPPED') results = results else: python_testcase = rule_matched.groups()[0].split('.')[0] if isinstance(rule_expr, list): for rule in rule_expr: function_name = rule['eval'].rsplit('.', 1)[-1] if 'eval' in rule else '' evalmessage = rule['message'].rsplit('.', 1)[-1] if 'message' in rule else '' test_function = import_from(python_testcase, function_name) if not test_function: self.log_compliance_info(testId) logger.info('\t\tERROR: %s missing', rule_matched.groups()[0]) logger.warning('\t\tRESULT: SKIPPED') results = results paths = self.snapshots[0]['paths'] result = test_function(inputjson, kwargs={'paths': paths}) if result.get('issue') == True: result['result'] = 'failed' self.log_compliance_info(testId) self.log_result(result) json_result = {'eval': rule['eval'], 'result': result['result'], 'message': result.get(evalmessage, ''), 'id': rule.get('id'), 'remediation_description': rule.get('remediationDescription'), 'remediation_function': rule.get('remediationFunction')} if result.get('errors'): json_result['errors'] = result.get('errors', []) results.append(json_result) elif result.get('issue') == False: if logger.level == logging.DEBUG: self.log_compliance_info(testId) logger.info('\t\tERROR: %s missing', rule_matched.groups()[0]) logger.warning('\t\tRESULT: SKIPPED') elif result['issue'] == None: logger.error('\t\tERROR: have problem in running test') logger.error(result[evalmessage]) results = results </DeepExtract> result_val = [] <DeepExtract> connector_data = {} if self.snapshots: isdb_fetch = get_dbtests() if isdb_fetch: connectors = get_documents('structures', query={'name': self.snapshots[0].get('source'), 'type': 'structure', 'container': self.container}, dbname=self.dbname, limit=1) connector_data = connectors[0].get('json', {}) if connectors else {} else: json_test_dir = get_test_json_dir() snapshot_source = self.snapshots[0].get('source') file_name = '%s.json' % snapshot_source if snapshot_source and (not snapshot_source.endswith('.json')) else snapshot_source connector_path = '%s/../%s' % (json_test_dir, file_name) if exists_file(connector_path): connector_data = json_from_file(connector_path) connector_data = connector_data </DeepExtract> for result in results: result['snapshots'] = self.snapshots result['autoRemediate'] = connector_data.get('autoRemediate', False) result_val.append(result) else: logger.critical('\tTESTID: %s', self.testcase['testId']) input_stream = InputStream(self.rule) lexer = comparatorLexer(input_stream) stream = CommonTokenStream(lexer) parser = comparatorParser(stream) tree = parser.expression() children = [] for child in tree.getChildren(): children.append(child.getText()) logger.debug('All the parsed tokens: %s', children) otherdata = {'dbname': self.dbname, 'snapshots': self.collection_data, 'container': self.container} r_i = RuleInterpreter(children, **otherdata) (l_val, r_val, result) = r_i.compare() result_val[0]['result'] = 'passed' if result else 'failed' result_val[0]['snapshots'] = r_i.get_snapshots() <DeepExtract> connector_data = {} if self.snapshots: isdb_fetch = get_dbtests() if isdb_fetch: connectors = get_documents('structures', query={'name': self.snapshots[0].get('source'), 'type': 'structure', 'container': self.container}, dbname=self.dbname, limit=1) connector_data = connectors[0].get('json', {}) if connectors else {} else: json_test_dir = get_test_json_dir() snapshot_source = self.snapshots[0].get('source') file_name = '%s.json' % snapshot_source if snapshot_source and (not snapshot_source.endswith('.json')) else snapshot_source connector_path = '%s/../%s' % (json_test_dir, file_name) if exists_file(connector_path): connector_data = json_from_file(connector_path) connector_data = connector_data </DeepExtract> result_val[0]['autoRemediate'] = connector_data.get('autoRemediate', False) if result_val[0]['snapshots']: snapshot = result_val[0]['snapshots'][0] logger.critical('\t\tSNAPSHOTID: %s', snapshot['id']) logger.critical('\t\tPATHS: ') for path in snapshot.get('paths', []): logger.critical('\t\t\t %s', path) if not result: logger.critical('\t\tLHS: %s', l_val) logger.critical('\t\tRHS: %s', r_val) <DeepExtract> if result_val[0].get('result') == 'passed': logger.critical('\t\tTITLE: %s', self.testcase.get('title', '')) logger.critical('\t\tRULE: %s', self.testcase.get('rule', '')) logger.critical('\t\tRESULT: %s', result_val[0].get('result'), extra={'type': 'SUCCESS'}) else: logger.critical('\t\tTITLE: %s', self.testcase.get('title', '')) logger.critical('\t\tDESCRIPTION: %s', self.testcase.get('description', '')) logger.critical('\t\tRULE: %s', self.testcase.get('rule', '')) logger.critical('\t\tERROR: %s', result_val[0].get('message', '')) logger.critical('\t\tREMEDIATION: %s', result_val[0].get('remediation_description', '')) logger.error('\t\tRESULT: %s', result_val[0].get('result')) </DeepExtract> else: result_val[0].update({'result': 'skipped', 'reason': 'Unsupported testcase format'}) if 'dirpath' in self.testcase: del self.testcase['dirpath'] return result_val
def validate(self): result_val = [{'result': 'failed'}] if self.format == TESTCASEV1: if self.snapshot_id: docs = get_documents(self.collection, dbname=self.dbname, sort=[('timestamp', pymongo.DESCENDING)], query={'snapshotId': self.snapshot_id}, limit=1) logger.info('Number of Snapshot Documents: %s', len(docs)) if docs and len(docs): self.data = docs[0]['json'] if self.op in OPERATORS and OPERATORS[self.op]: result = OPERATORS[self.op](self.data, self.loperand, self.roperand, self.is_not, self.extras) result_val[0]['result'] = 'passed' if result else 'failed' result_val[0]['snapshots'] = [{'id': docs[0]['snapshotId'], 'structure': docs[0]['structure'], 'reference': docs[0]['reference'], 'source': docs[0]['source'], 'collection': docs[0]['collection']}] if 'paths' in docs[0]: result_val[0]['snapshots'][0]['paths'] = docs[0]['paths'] else: result_val[0]['snapshots'][0]['path'] = docs[0]['path'] else: result_val[0].update({'result': 'skipped', 'message': 'Missing documents for the snapshot'}) else: result_val[0].update({'result': 'skipped', 'message': 'Missing snapshotId for testcase'}) elif self.format == TESTCASEV2: if self.type == 'rego': tid = '%d_%s' % (int(time.time() * 1000000), generateid(None)) results = [] inputjson = {} result = False rule_expr = get_field_value(self.testcase, 'eval') if not rule_expr: rule_expr = get_field_value(self.testcase, 'evals') if rule_expr: del self.testcase['evals'] if not rule_expr: rule_expr = 'data.rule.rulepass' testId = 'MISSING ID' isMasterTest = False if 'testId' in self.testcase: testId = self.testcase['testId'] elif 'masterTestId' in self.testcase: testId = self.testcase['masterTestId'] isMasterTest = True opa_exe = opa_binary() if not opa_exe: logger.error('\t\tERROR: OPA binary not found!') logger.error('\t\tRESULT: FAILED') results.append({'eval': 'data.rule.rulepass', 'result': 'passed' if result else 'failed', 'message': ''}) results = results if len(self.testcase['snapshotId']) == 1: sid = self.testcase['snapshotId'][0] (toExclude, snapshot_doc) = self.get_snaphotid_doc(sid, testId, isMasterTest) if toExclude: logger.warn('\t\tWARN: Excluded test case: %s' % testId) logger.warn('\t\tRESULT: SKIPPED') results = results inputjson = snapshot_doc if inputjson is None: logger.info('\t\tERROR: Missing snapshot') results = [] if inputjson: results = self.generating_result_for_rego_testcase(inputjson, tid, testId, opa_exe, rule_expr, results) else: results.append({'eval': rule_expr, 'result': 'passed' if result else 'failed', 'message': ''}) self.log_result(results[-1]) results = results else: self.snapshots = [] resource_sid = [] for mastersnapshot_id in self.testcase['masterSnapshotId']: msid = [] for sid in self.testcase['snapshotId']: if sid.startswith(mastersnapshot_id): msid.append(sid) resource_sid.append({mastersnapshot_id: msid}) inputjson = {} for sid_pair in resource_sid: for (ms_id, s_id_list) in sid_pair.items(): snapshot_doc_list = [] for s_id in s_id_list: (toExclude, snapshot_doc) = self.get_snaphotid_doc(s_id, testId, isMasterTest) if toExclude: logger.warn('\t\tWARN: Excluded test case: %s' % testId) logger.warn('\t\tRESULT: SKIPPED') results = results snapshot_doc_list.append(snapshot_doc) input = {ms_id: snapshot_doc_list} inputjson.update(input) if inputjson: results = self.generating_result_for_rego_testcase(inputjson, tid, testId, opa_exe, rule_expr, results, resource_sid) else: results.append({'eval': rule_expr, 'result': 'passed' if result else 'failed', 'message': ''}) self.log_result(results[-1]) results = results result_val = [] connector_data = {} if self.snapshots: isdb_fetch = get_dbtests() if isdb_fetch: connectors = get_documents('structures', query={'name': self.snapshots[0].get('source'), 'type': 'structure', 'container': self.container}, dbname=self.dbname, limit=1) connector_data = connectors[0].get('json', {}) if connectors else {} else: json_test_dir = get_test_json_dir() snapshot_source = self.snapshots[0].get('source') file_name = '%s.json' % snapshot_source if snapshot_source and (not snapshot_source.endswith('.json')) else snapshot_source connector_path = '%s/../%s' % (json_test_dir, file_name) if exists_file(connector_path): connector_data = json_from_file(connector_path) connector_data = connector_data for result in results: result['snapshots'] = self.snapshots result['autoRemediate'] = connector_data.get('autoRemediate', False) result_val.append(result) elif self.type == 'python': sys.path.append('/tmp/') results = [] inputjson = {} result = False rule_expr = get_field_value(self.testcase, 'eval') if not rule_expr: rule_expr = get_field_value(self.testcase, 'evals') if rule_expr: del self.testcase['evals'] if not rule_expr: rule_expr = 'data.rule.rulepass' testId = 'MISSING ID' isMasterTest = False if 'testId' in self.testcase: testId = self.testcase['testId'] elif 'masterTestId' in self.testcase: testId = self.testcase['masterTestId'] isMasterTest = True snapshot_id = self.testcase['snapshotId'][0] (toExclude, snapshot_doc) = self.get_snaphotid_doc(snapshot_id, testId, isMasterTest) if toExclude: logger.warn('\t\tWARN: Excluded test case: %s' % testId) logger.warn('\t\tRESULT: SKIPPED') results = results python_testcase = self.rule.split('.')[0] if len(self.testcase['snapshotId']) == 1: inputjson = snapshot_doc if inputjson is None: logger.info('\t\tERROR: Missing snapshot') else: ms_id = dict(zip(self.testcase['snapshotId'], self.testcase['masterSnapshotId'])) for sid in self.testcase['snapshotId']: (toExclude, snapshot_doc) = self.get_snaphotid_doc(sid, testId, isMasterTest) if toExclude: logger.error('Excluded testcase because of testId: %s' % testId) inputjson.update({ms_id[sid]: snapshot_doc}) results = [] if inputjson: test_rule = self.rule rule_matched = re.match('^file\\((.*)\\)$', test_rule, re.I) if rule_matched: rego_file_name = self.rego_rule_filename(rule_matched.groups()[0], self.container) if not rego_file_name: python_testcase = 'processor.comparison.rules.%s.%s' % (self.snapshots[0]['type'], rule_matched.groups()[0].split('.')[0]) module = import_module(python_testcase) if not module and logger.level == logging.DEBUG: self.log_compliance_info(testId) logger.error('\t\tERROR: %s missing', rule_matched.groups()[0]) logger.warning('\t\tRESULT: SKIPPED') results = results else: python_testcase = rule_matched.groups()[0].split('.')[0] if isinstance(rule_expr, list): for rule in rule_expr: function_name = rule['eval'].rsplit('.', 1)[-1] if 'eval' in rule else '' evalmessage = rule['message'].rsplit('.', 1)[-1] if 'message' in rule else '' test_function = import_from(python_testcase, function_name) if not test_function: self.log_compliance_info(testId) logger.info('\t\tERROR: %s missing', rule_matched.groups()[0]) logger.warning('\t\tRESULT: SKIPPED') results = results paths = self.snapshots[0]['paths'] result = test_function(inputjson, kwargs={'paths': paths}) if result.get('issue') == True: result['result'] = 'failed' self.log_compliance_info(testId) self.log_result(result) json_result = {'eval': rule['eval'], 'result': result['result'], 'message': result.get(evalmessage, ''), 'id': rule.get('id'), 'remediation_description': rule.get('remediationDescription'), 'remediation_function': rule.get('remediationFunction')} if result.get('errors'): json_result['errors'] = result.get('errors', []) results.append(json_result) elif result.get('issue') == False: if logger.level == logging.DEBUG: self.log_compliance_info(testId) logger.info('\t\tERROR: %s missing', rule_matched.groups()[0]) logger.warning('\t\tRESULT: SKIPPED') elif result['issue'] == None: logger.error('\t\tERROR: have problem in running test') logger.error(result[evalmessage]) results = results result_val = [] connector_data = {} if self.snapshots: isdb_fetch = get_dbtests() if isdb_fetch: connectors = get_documents('structures', query={'name': self.snapshots[0].get('source'), 'type': 'structure', 'container': self.container}, dbname=self.dbname, limit=1) connector_data = connectors[0].get('json', {}) if connectors else {} else: json_test_dir = get_test_json_dir() snapshot_source = self.snapshots[0].get('source') file_name = '%s.json' % snapshot_source if snapshot_source and (not snapshot_source.endswith('.json')) else snapshot_source connector_path = '%s/../%s' % (json_test_dir, file_name) if exists_file(connector_path): connector_data = json_from_file(connector_path) connector_data = connector_data for result in results: result['snapshots'] = self.snapshots result['autoRemediate'] = connector_data.get('autoRemediate', False) result_val.append(result) else: logger.critical('\tTESTID: %s', self.testcase['testId']) input_stream = InputStream(self.rule) lexer = comparatorLexer(input_stream) stream = CommonTokenStream(lexer) parser = comparatorParser(stream) tree = parser.expression() children = [] for child in tree.getChildren(): children.append(child.getText()) logger.debug('All the parsed tokens: %s', children) otherdata = {'dbname': self.dbname, 'snapshots': self.collection_data, 'container': self.container} r_i = RuleInterpreter(children, **otherdata) (l_val, r_val, result) = r_i.compare() result_val[0]['result'] = 'passed' if result else 'failed' result_val[0]['snapshots'] = r_i.get_snapshots() connector_data = {} if self.snapshots: isdb_fetch = get_dbtests() if isdb_fetch: connectors = get_documents('structures', query={'name': self.snapshots[0].get('source'), 'type': 'structure', 'container': self.container}, dbname=self.dbname, limit=1) connector_data = connectors[0].get('json', {}) if connectors else {} else: json_test_dir = get_test_json_dir() snapshot_source = self.snapshots[0].get('source') file_name = '%s.json' % snapshot_source if snapshot_source and (not snapshot_source.endswith('.json')) else snapshot_source connector_path = '%s/../%s' % (json_test_dir, file_name) if exists_file(connector_path): connector_data = json_from_file(connector_path) connector_data = connector_data result_val[0]['autoRemediate'] = connector_data.get('autoRemediate', False) if result_val[0]['snapshots']: snapshot = result_val[0]['snapshots'][0] logger.critical('\t\tSNAPSHOTID: %s', snapshot['id']) logger.critical('\t\tPATHS: ') for path in snapshot.get('paths', []): logger.critical('\t\t\t %s', path) if not result: logger.critical('\t\tLHS: %s', l_val) logger.critical('\t\tRHS: %s', r_val) if result_val[0].get('result') == 'passed': logger.critical('\t\tTITLE: %s', self.testcase.get('title', '')) logger.critical('\t\tRULE: %s', self.testcase.get('rule', '')) logger.critical('\t\tRESULT: %s', result_val[0].get('result'), extra={'type': 'SUCCESS'}) else: logger.critical('\t\tTITLE: %s', self.testcase.get('title', '')) logger.critical('\t\tDESCRIPTION: %s', self.testcase.get('description', '')) logger.critical('\t\tRULE: %s', self.testcase.get('rule', '')) logger.critical('\t\tERROR: %s', result_val[0].get('message', '')) logger.critical('\t\tREMEDIATION: %s', result_val[0].get('remediation_description', '')) logger.error('\t\tRESULT: %s', result_val[0].get('result')) else: result_val[0].update({'result': 'skipped', 'reason': 'Unsupported testcase format'}) if 'dirpath' in self.testcase: del self.testcase['dirpath'] return result_val
cloud-validation-framework
positive
def get_value(self, role_id, state): with tf.variable_scope('value_network'): with tf.variable_scope('value_fc'): x = state feats = [1024, 512, 512, 256, 256] for f in feats: for _ in range(3): <DeepExtract> residual = x for _ in range(stack): residual = slim.fully_connected(residual, f) x = x if x.shape[1].value != f: x = slim.fully_connected(x, f) x = tf.contrib.layers.layer_norm(residual + x, scale=False) </DeepExtract> flattened = x value = slim.fully_connected(flattened, num_outputs=1, activation_fn=None) value = tf.squeeze(value, 1) indicator = tf.cast(tf.equal(role_id, LORD_ID), tf.float32) * 2 - 1 return -value * indicator
def get_value(self, role_id, state): with tf.variable_scope('value_network'): with tf.variable_scope('value_fc'): x = state feats = [1024, 512, 512, 256, 256] for f in feats: for _ in range(3): residual = x for _ in range(stack): residual = slim.fully_connected(residual, f) x = x if x.shape[1].value != f: x = slim.fully_connected(x, f) x = tf.contrib.layers.layer_norm(residual + x, scale=False) flattened = x value = slim.fully_connected(flattened, num_outputs=1, activation_fn=None) value = tf.squeeze(value, 1) indicator = tf.cast(tf.equal(role_id, LORD_ID), tf.float32) * 2 - 1 return -value * indicator
doudizhu-C
positive
def nondup_key_for_item(self, item): <DeepExtract> key = (item['motion'], item['chamber'], item['date'], item['yes_count'], item['no_count'], item['other_count']) </DeepExtract> seq_num = self.seq_for_key[key] self.seq_for_key[key] += 1 return key + (seq_num,)
def nondup_key_for_item(self, item): key = (item['motion'], item['chamber'], item['date'], item['yes_count'], item['no_count'], item['other_count']) seq_num = self.seq_for_key[key] self.seq_for_key[key] += 1 return key + (seq_num,)
billy
positive
def __extract_subjects(self, ls): def strip_t(t): toks = [p.strip() for p in t.split('|')] if len(toks) == 1: return ('None', toks[0]) elif len(toks) == 2: return (toks[0], toks[1]) else: raise Exception('must be size 1 or 2', toks) typed_terms = {} for row in ls: head = row.split(' ')[0].strip() term = ' '.join(row.split(' ')[1:]).strip() if head not in typed_terms: typed_terms[head] = [] typed_terms[head].append(term) size = len(typed_terms['[subj]']) rs = [{} for _ in range(size)] for (k, ts) in typed_terms.items(): assert len(ts) == size for (i, t) in enumerate(ts): <DeepExtract> toks = [p.strip() for p in t.split('|')] if len(toks) == 1: (cluster, t) = ('None', toks[0]) elif len(toks) == 2: (cluster, t) = (toks[0], toks[1]) else: raise Exception('must be size 1 or 2', toks) </DeepExtract> rs[i][k] = t rs[i]['cluster'] = cluster return rs
def __extract_subjects(self, ls): def strip_t(t): toks = [p.strip() for p in t.split('|')] if len(toks) == 1: return ('None', toks[0]) elif len(toks) == 2: return (toks[0], toks[1]) else: raise Exception('must be size 1 or 2', toks) typed_terms = {} for row in ls: head = row.split(' ')[0].strip() term = ' '.join(row.split(' ')[1:]).strip() if head not in typed_terms: typed_terms[head] = [] typed_terms[head].append(term) size = len(typed_terms['[subj]']) rs = [{} for _ in range(size)] for (k, ts) in typed_terms.items(): assert len(ts) == size for (i, t) in enumerate(ts): toks = [p.strip() for p in t.split('|')] if len(toks) == 1: (cluster, t) = ('None', toks[0]) elif len(toks) == 2: (cluster, t) = (toks[0], toks[1]) else: raise Exception('must be size 1 or 2', toks) rs[i][k] = t rs[i]['cluster'] = cluster return rs
BIG-bench
positive
def get_verbose_state(self, state): <DeepExtract> err = ValidationError(FlagError.STATE_INVALID.format(state=state), code='invalid') try: state = int(state) if state not in [st[0] for st in self.STATES_CHOICES]: raise err except (ValueError, TypeError): raise err state = state </DeepExtract> for item in self.STATES_CHOICES: if item[0] == state: return item[1] return None
def get_verbose_state(self, state): err = ValidationError(FlagError.STATE_INVALID.format(state=state), code='invalid') try: state = int(state) if state not in [st[0] for st in self.STATES_CHOICES]: raise err except (ValueError, TypeError): raise err state = state for item in self.STATES_CHOICES: if item[0] == state: return item[1] return None
Comment
positive
def ddd2locrot(center, alpha, dim, depth, calib): <DeepExtract> z = depth - calib[2, 3] x = (center[0] * depth - calib[0, 3] - calib[0, 2] * z) / calib[0, 0] y = (center[1] * depth - calib[1, 3] - calib[1, 2] * z) / calib[1, 1] pt_3d = np.array([x, y, z], dtype=np.float32) locations = pt_3d </DeepExtract> locations[1] += dim[0] / 2 <DeepExtract> rot_y = alpha + np.arctan2(center[0] - calib[0, 2], calib[0, 0]) if rot_y > np.pi: rot_y -= 2 * np.pi if rot_y < -np.pi: rot_y += 2 * np.pi rotation_y = rot_y </DeepExtract> return (locations, rotation_y)
def ddd2locrot(center, alpha, dim, depth, calib): z = depth - calib[2, 3] x = (center[0] * depth - calib[0, 3] - calib[0, 2] * z) / calib[0, 0] y = (center[1] * depth - calib[1, 3] - calib[1, 2] * z) / calib[1, 1] pt_3d = np.array([x, y, z], dtype=np.float32) locations = pt_3d locations[1] += dim[0] / 2 rot_y = alpha + np.arctan2(center[0] - calib[0, 2], calib[0, 0]) if rot_y > np.pi: rot_y -= 2 * np.pi if rot_y < -np.pi: rot_y += 2 * np.pi rotation_y = rot_y return (locations, rotation_y)
CenterNet
positive
def asQuaternion(self, time=None): <DeepExtract> unit = unit if unit is not None else self._unit context = None if time is None else DGContext(time=time) try: value = _plug_to_python(self._mplug, unit=unit, context=context) self._node._state['values'][self._key, unit] = value value = value except RuntimeError: raise except TypeError: log.error("'%s': failed to read attribute" % self.path()) raise </DeepExtract> value = Euler(value).asQuaternion() return Quaternion(value)
def asQuaternion(self, time=None): unit = unit if unit is not None else self._unit context = None if time is None else DGContext(time=time) try: value = _plug_to_python(self._mplug, unit=unit, context=context) self._node._state['values'][self._key, unit] = value value = value except RuntimeError: raise except TypeError: log.error("'%s': failed to read attribute" % self.path()) raise value = Euler(value).asQuaternion() return Quaternion(value)
cmdx
positive
@typecheck def getAll(self, prj=None): """ See docu in ``DatastoreBase``. """ if _checkGetAll(prj) is False: self.logit.warning('Invalid GETALL argument') return RetVal(False, 'Argument error', None) docs = copy.deepcopy(self.content) if prj is not None: for doc in docs: <DeepExtract> docs[doc] = copy.deepcopy(docs[doc]) out = {} for p in prj: try: self.setKey(out, p, self.getKey(docs[doc], p)) except KeyError: continue docs[docs[doc]] = out </DeepExtract> return RetVal(True, None, docs)
@typecheck def getAll(self, prj=None): """ See docu in ``DatastoreBase``. """ if _checkGetAll(prj) is False: self.logit.warning('Invalid GETALL argument') return RetVal(False, 'Argument error', None) docs = copy.deepcopy(self.content) if prj is not None: for doc in docs: docs[doc] = copy.deepcopy(docs[doc]) out = {} for p in prj: try: self.setKey(out, p, self.getKey(docs[doc], p)) except KeyError: continue docs[docs[doc]] = out return RetVal(True, None, docs)
azrael
positive
def __init__(self, priv_key: Optional[Union[bytes, IPrivateKey]], pub_key: Optional[Union[bytes, IPoint, IPublicKey]], key_data: Bip32KeyData, key_net_ver: Bip32KeyNetVersions) -> None: """ Construct class. Args: priv_key (bytes or IPrivateKey) : Private key (None for a public-only object) pub_key (bytes, IPoint or IPublicKey) : Public key (only needed for a public-only object) If priv_key is not None, it'll be discarded key_data (Bip32KeyData object) : Key data key_net_ver (Bip32KeyNetVersions object): Bip32KeyNetVersions object Raises: Bip32KeyError: If the constructed key is not valid """ <DeepExtract> curve = EllipticCurveGetter.FromType(cls.CurveType()) </DeepExtract> if priv_key is not None: if not isinstance(priv_key, bytes) and (not isinstance(priv_key, curve.PrivateKeyClass())): raise Bip32KeyError(f'Invalid private key class, a {curve.Name()} key is required') self.m_priv_key = Bip32PrivateKey.FromBytesOrKeyObject(priv_key, key_data, key_net_ver, self.CurveType()) self.m_pub_key = self.m_priv_key.PublicKey() else: if not isinstance(pub_key, bytes) and (not isinstance(pub_key, curve.PointClass())) and (not isinstance(pub_key, curve.PublicKeyClass())): raise Bip32KeyError(f'Invalid public key class, a {curve.Name()} key or point is required') self.m_priv_key = None self.m_pub_key = Bip32PublicKey.FromBytesOrKeyObject(pub_key, key_data, key_net_ver, self.CurveType())
def __init__(self, priv_key: Optional[Union[bytes, IPrivateKey]], pub_key: Optional[Union[bytes, IPoint, IPublicKey]], key_data: Bip32KeyData, key_net_ver: Bip32KeyNetVersions) -> None: """ Construct class. Args: priv_key (bytes or IPrivateKey) : Private key (None for a public-only object) pub_key (bytes, IPoint or IPublicKey) : Public key (only needed for a public-only object) If priv_key is not None, it'll be discarded key_data (Bip32KeyData object) : Key data key_net_ver (Bip32KeyNetVersions object): Bip32KeyNetVersions object Raises: Bip32KeyError: If the constructed key is not valid """ curve = EllipticCurveGetter.FromType(cls.CurveType()) if priv_key is not None: if not isinstance(priv_key, bytes) and (not isinstance(priv_key, curve.PrivateKeyClass())): raise Bip32KeyError(f'Invalid private key class, a {curve.Name()} key is required') self.m_priv_key = Bip32PrivateKey.FromBytesOrKeyObject(priv_key, key_data, key_net_ver, self.CurveType()) self.m_pub_key = self.m_priv_key.PublicKey() else: if not isinstance(pub_key, bytes) and (not isinstance(pub_key, curve.PointClass())) and (not isinstance(pub_key, curve.PublicKeyClass())): raise Bip32KeyError(f'Invalid public key class, a {curve.Name()} key or point is required') self.m_priv_key = None self.m_pub_key = Bip32PublicKey.FromBytesOrKeyObject(pub_key, key_data, key_net_ver, self.CurveType())
bip_utils
positive
def ranking_eval(model, metrics, train_set, test_set, val_set=None, rating_threshold=1.0, exclude_unknowns=True, verbose=False, props=None): """Evaluate model on provided ranking metrics. Parameters ---------- model: :obj:`cornac.models.Recommender`, required Recommender model to be evaluated. metrics: :obj:`iterable`, required List of rating metrics :obj:`cornac.metrics.RankingMetric`. train_set: :obj:`cornac.data.Dataset`, required Dataset to be used for model training. This will be used to exclude observations already appeared during training. test_set: :obj:`cornac.data.Dataset`, required Dataset to be used for evaluation. val_set: :obj:`cornac.data.Dataset`, optional, default: None Dataset to be used for model selection. This will be used to exclude observations already appeared during validation. rating_threshold: float, optional, default: 1.0 The threshold to convert ratings into positive or negative feedback. exclude_unknowns: bool, optional, default: True Ignore unknown users and items during evaluation. verbose: bool, optional, default: False Output evaluation progress. props: dictionary, optional, default: None items propensity scores Returns ------- res: (List, List) Tuple of two lists: - average result for each of the metrics - average result per user for each of the metrics """ if len(metrics) == 0: return ([], []) avg_results = [] user_results = [{} for _ in enumerate(metrics)] gt_mat = test_set.csr_matrix train_mat = train_set.csr_matrix val_mat = None if val_set is None else val_set.csr_matrix def pos_items(csr_row): return [item_idx for (item_idx, rating) in zip(csr_row.indices, csr_row.data) if rating >= rating_threshold] for user_idx in tqdm.tqdm(test_set.user_indices, disable=not verbose, miniters=100): <DeepExtract> test_pos_items = [item_idx for (item_idx, rating) in zip(gt_mat.getrow(user_idx).indices, gt_mat.getrow(user_idx).data) if rating >= rating_threshold] </DeepExtract> if len(test_pos_items) == 0: continue u_gt_pos = np.zeros(test_set.num_items, dtype='float') u_gt_pos[test_pos_items] = 1 val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx)) train_pos_items = [] if train_set.is_unk_user(user_idx) else pos_items(train_mat.getrow(user_idx)) u_gt_neg = np.ones(test_set.num_items, dtype='int') u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0 item_indices = None if exclude_unknowns else np.arange(test_set.num_items) (item_rank, item_scores) = model.rank(user_idx, item_indices) total_pi = 0.0 if props is not None: for (idx, e) in enumerate(u_gt_pos): if e > 0 and props[str(idx)] > 0: u_gt_pos[idx] /= props[str(idx)] total_pi += 1 / props[str(idx)] for (i, mt) in enumerate(metrics): mt_score = mt.compute(gt_pos=u_gt_pos, gt_neg=u_gt_neg, pd_rank=item_rank, pd_scores=item_scores) user_results[i][user_idx] = mt_score for (i, mt) in enumerate(metrics): avg_results.append(sum(user_results[i].values()) / len(user_results[i])) return (avg_results, user_results)
def ranking_eval(model, metrics, train_set, test_set, val_set=None, rating_threshold=1.0, exclude_unknowns=True, verbose=False, props=None): """Evaluate model on provided ranking metrics. Parameters ---------- model: :obj:`cornac.models.Recommender`, required Recommender model to be evaluated. metrics: :obj:`iterable`, required List of rating metrics :obj:`cornac.metrics.RankingMetric`. train_set: :obj:`cornac.data.Dataset`, required Dataset to be used for model training. This will be used to exclude observations already appeared during training. test_set: :obj:`cornac.data.Dataset`, required Dataset to be used for evaluation. val_set: :obj:`cornac.data.Dataset`, optional, default: None Dataset to be used for model selection. This will be used to exclude observations already appeared during validation. rating_threshold: float, optional, default: 1.0 The threshold to convert ratings into positive or negative feedback. exclude_unknowns: bool, optional, default: True Ignore unknown users and items during evaluation. verbose: bool, optional, default: False Output evaluation progress. props: dictionary, optional, default: None items propensity scores Returns ------- res: (List, List) Tuple of two lists: - average result for each of the metrics - average result per user for each of the metrics """ if len(metrics) == 0: return ([], []) avg_results = [] user_results = [{} for _ in enumerate(metrics)] gt_mat = test_set.csr_matrix train_mat = train_set.csr_matrix val_mat = None if val_set is None else val_set.csr_matrix def pos_items(csr_row): return [item_idx for (item_idx, rating) in zip(csr_row.indices, csr_row.data) if rating >= rating_threshold] for user_idx in tqdm.tqdm(test_set.user_indices, disable=not verbose, miniters=100): test_pos_items = [item_idx for (item_idx, rating) in zip(gt_mat.getrow(user_idx).indices, gt_mat.getrow(user_idx).data) if rating >= rating_threshold] if len(test_pos_items) == 0: continue u_gt_pos = np.zeros(test_set.num_items, dtype='float') u_gt_pos[test_pos_items] = 1 val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx)) train_pos_items = [] if train_set.is_unk_user(user_idx) else pos_items(train_mat.getrow(user_idx)) u_gt_neg = np.ones(test_set.num_items, dtype='int') u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0 item_indices = None if exclude_unknowns else np.arange(test_set.num_items) (item_rank, item_scores) = model.rank(user_idx, item_indices) total_pi = 0.0 if props is not None: for (idx, e) in enumerate(u_gt_pos): if e > 0 and props[str(idx)] > 0: u_gt_pos[idx] /= props[str(idx)] total_pi += 1 / props[str(idx)] for (i, mt) in enumerate(metrics): mt_score = mt.compute(gt_pos=u_gt_pos, gt_neg=u_gt_neg, pd_rank=item_rank, pd_scores=item_scores) user_results[i][user_idx] = mt_score for (i, mt) in enumerate(metrics): avg_results.append(sum(user_results[i].values()) / len(user_results[i])) return (avg_results, user_results)
cornac
positive
def database_is_updated(self) -> bool: """ Checks if data is updated or not with database by interval provided in accordance to UTC time. :return: A boolean whether data is updated or not. """ <DeepExtract> with closing(sqlite3.connect(self.database_file)) as connection: with closing(connection.cursor()) as cursor: cursor.execute(f'SELECT * FROM {self.database_table} ORDER BY date_utc DESC LIMIT 1') fetched_values = cursor.fetchone() if fetched_values is not None: result = get_normalized_data(fetched_values, parse_date=True) result = {} </DeepExtract> if not result: return False return self.is_latest_date(result['date_utc'])
def database_is_updated(self) -> bool: """ Checks if data is updated or not with database by interval provided in accordance to UTC time. :return: A boolean whether data is updated or not. """ with closing(sqlite3.connect(self.database_file)) as connection: with closing(connection.cursor()) as cursor: cursor.execute(f'SELECT * FROM {self.database_table} ORDER BY date_utc DESC LIMIT 1') fetched_values = cursor.fetchone() if fetched_values is not None: result = get_normalized_data(fetched_values, parse_date=True) result = {} if not result: return False return self.is_latest_date(result['date_utc'])
algobot
positive
def __init__(self, p: int=1, q: int=1, power: float=2.0, truncation: int=1000) -> None: super().__init__() self.p: int = int(p) self.q: int = int(q) self.power: float = power self._num_params = 2 + p + q self._truncation = int(truncation) if p < 0 or q < 0 or p > 1 or (q > 1): raise ValueError('p and q must be either 0 or 1.') if self._truncation <= 0: raise ValueError('truncation must be a positive integer') if power <= 0.0: raise ValueError('power must be strictly positive, usually larger than 0.25') <DeepExtract> (p, o, q, power) = (self.p, self.o, self.q, self.power) if power == 2.0: if o == 0 and q == 0: self._name = 'ARCH' elif o == 0: self._name = 'GARCH' else: self._name = 'GJR-GARCH' elif power == 1.0: if o == 0 and q == 0: self._name = 'AVARCH' elif o == 0: self._name = 'AVGARCH' else: self._name = 'TARCH/ZARCH' elif o == 0 and q == 0: self._name = f'Power ARCH (power: {self.power:0.1f})' elif o == 0: self._name = f'Power GARCH (power: {self.power:0.1f})' else: self._name = f'Asym. Power GARCH (power: {self.power:0.1f})' </DeepExtract> self._volatility_updater = rec.FIGARCHUpdater(p, q, power, truncation)
def __init__(self, p: int=1, q: int=1, power: float=2.0, truncation: int=1000) -> None: super().__init__() self.p: int = int(p) self.q: int = int(q) self.power: float = power self._num_params = 2 + p + q self._truncation = int(truncation) if p < 0 or q < 0 or p > 1 or (q > 1): raise ValueError('p and q must be either 0 or 1.') if self._truncation <= 0: raise ValueError('truncation must be a positive integer') if power <= 0.0: raise ValueError('power must be strictly positive, usually larger than 0.25') (p, o, q, power) = (self.p, self.o, self.q, self.power) if power == 2.0: if o == 0 and q == 0: self._name = 'ARCH' elif o == 0: self._name = 'GARCH' else: self._name = 'GJR-GARCH' elif power == 1.0: if o == 0 and q == 0: self._name = 'AVARCH' elif o == 0: self._name = 'AVGARCH' else: self._name = 'TARCH/ZARCH' elif o == 0 and q == 0: self._name = f'Power ARCH (power: {self.power:0.1f})' elif o == 0: self._name = f'Power GARCH (power: {self.power:0.1f})' else: self._name = f'Asym. Power GARCH (power: {self.power:0.1f})' self._volatility_updater = rec.FIGARCHUpdater(p, q, power, truncation)
arch
positive
def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 (height, width) = (65, 65) global_pool = False output_stride = 8 <DeepExtract> if None in [batch, None, None, 3]: inputs = tf.placeholder(tf.float32, (batch, None, None, 3)) else: inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3])) </DeepExtract> with slim.arg_scope(resnet_utils.resnet_arg_scope()): <DeepExtract> block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] (output, _) = resnet_v1.resnet_v1(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) </DeepExtract> self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) <DeepExtract> if None in [batch, height, width, 3]: images = tf.placeholder(tf.float32, (batch, height, width, 3)) else: images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3])) </DeepExtract> with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 (height, width) = (65, 65) global_pool = False output_stride = 8 if None in [batch, None, None, 3]: inputs = tf.placeholder(tf.float32, (batch, None, None, 3)) else: inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3])) with slim.arg_scope(resnet_utils.resnet_arg_scope()): block = resnet_v1.resnet_v1_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] (output, _) = resnet_v1.resnet_v1(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) if None in [batch, height, width, 3]: images = tf.placeholder(tf.float32, (batch, height, width, 3)) else: images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3])) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
CAAD2018
positive
def test_compare_table_wrong_stream_type(model, logger): <DeepExtract> description = create_table_request(model.Meta.table_name, model) if model.Meta.encryption: description.pop('SSESpecification') description['SSEDescription'] = {'Status': 'ENABLED'} if model.Meta.ttl: description['TimeToLiveDescription'] = {'AttributeName': model.Meta.ttl['column'].dynamo_name, 'TimeToLiveStatus': 'ENABLED'} if model.Meta.backups: description['ContinuousBackupsDescription'] = {'ContinuousBackupsStatus': 'ENABLED'} description['LatestStreamArn'] = 'not-a-real-arn' description['BillingModeSummary'] = {'BillingMode': description.pop('BillingMode')} description = sanitize_table_description(description) if active is not None: description['TableStatus'] = 'ACTIVE' if active else 'TEST-NOT-ACTIVE' for gsi in description['GlobalSecondaryIndexes']: gsi['IndexStatus'] = 'ACTIVE' if active else 'TEST-NOT-ACTIVE' description = description </DeepExtract> description['StreamSpecification']['StreamViewType'] = 'UNKNOWN' assert not compare_tables(model, description) logger.assert_only_logged("Model expects StreamViewType 'NEW_AND_OLD_IMAGES' but was 'UNKNOWN'")
def test_compare_table_wrong_stream_type(model, logger): description = create_table_request(model.Meta.table_name, model) if model.Meta.encryption: description.pop('SSESpecification') description['SSEDescription'] = {'Status': 'ENABLED'} if model.Meta.ttl: description['TimeToLiveDescription'] = {'AttributeName': model.Meta.ttl['column'].dynamo_name, 'TimeToLiveStatus': 'ENABLED'} if model.Meta.backups: description['ContinuousBackupsDescription'] = {'ContinuousBackupsStatus': 'ENABLED'} description['LatestStreamArn'] = 'not-a-real-arn' description['BillingModeSummary'] = {'BillingMode': description.pop('BillingMode')} description = sanitize_table_description(description) if active is not None: description['TableStatus'] = 'ACTIVE' if active else 'TEST-NOT-ACTIVE' for gsi in description['GlobalSecondaryIndexes']: gsi['IndexStatus'] = 'ACTIVE' if active else 'TEST-NOT-ACTIVE' description = description description['StreamSpecification']['StreamViewType'] = 'UNKNOWN' assert not compare_tables(model, description) logger.assert_only_logged("Model expects StreamViewType 'NEW_AND_OLD_IMAGES' but was 'UNKNOWN'")
bloop
positive
def test_serialize_fails(self): val = 'P' node = DummySchemaNode(None) <DeepExtract> from colander import Invalid exc = Invalid(node, msg, val) typ = exc </DeepExtract> <DeepExtract> from colander import Invalid try: typ.serialize(*arg, **kw) except Invalid as e: e = e else: raise AssertionError('Invalid not raised') </DeepExtract> self.assertTrue(e.msg)
def test_serialize_fails(self): val = 'P' node = DummySchemaNode(None) from colander import Invalid exc = Invalid(node, msg, val) typ = exc from colander import Invalid try: typ.serialize(*arg, **kw) except Invalid as e: e = e else: raise AssertionError('Invalid not raised') self.assertTrue(e.msg)
colander
positive
def __call__(self, data_dict): """ Args: data_dict: gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] Returns: """ gt_boxes = data_dict['gt_boxes'] gt_names = data_dict['gt_names'].astype(str) existed_boxes = gt_boxes total_valid_sampled_dict = [] for (class_name, sample_group) in self.sample_groups.items(): if self.limit_whole_scene: num_gt = np.sum(class_name == gt_names) sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt) if int(sample_group['sample_num']) > 0: <DeepExtract> (sample_num, pointer, indices) = (int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']) if pointer >= len(self.db_infos[class_name]): indices = np.random.permutation(len(self.db_infos[class_name])) pointer = 0 sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer:pointer + sample_num]] pointer += sample_num sample_group['pointer'] = pointer sample_group['indices'] = indices sampled_dict = sampled_dict </DeepExtract> sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32) if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False): sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes) iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7]) iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7]) iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0 iou1 = iou1 if iou1.shape[1] > 0 else iou2 valid_mask = (iou1.max(axis=1) + iou2.max(axis=1) == 0).nonzero()[0] valid_sampled_dict = [sampled_dict[x] for x in valid_mask] valid_sampled_boxes = sampled_boxes[valid_mask] existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0) total_valid_sampled_dict.extend(valid_sampled_dict) sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :] if total_valid_sampled_dict.__len__() > 0: <DeepExtract> gt_boxes_mask = data_dict['gt_boxes_mask'] gt_boxes = data_dict['gt_boxes'][gt_boxes_mask] gt_names = data_dict['gt_names'][gt_boxes_mask] points = data_dict['points'] if self.sampler_cfg.get('USE_ROAD_PLANE', False): (sampled_gt_boxes, mv_height) = self.put_boxes_on_road_planes(sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']) data_dict.pop('calib') data_dict.pop('road_plane') obj_points_list = [] for (idx, info) in enumerate(total_valid_sampled_dict): file_path = self.root_path / info['path'] obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape([-1, self.sampler_cfg.NUM_POINT_FEATURES]) obj_points[:, :3] += info['box3d_lidar'][:3] if self.sampler_cfg.get('USE_ROAD_PLANE', False): obj_points[:, 2] -= mv_height[idx] obj_points_list.append(obj_points) obj_points = np.concatenate(obj_points_list, axis=0) sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict]) large_sampled_gt_boxes = box_utils.enlarge_box3d(sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH) points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes) points = np.concatenate([obj_points, points], axis=0) gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0) gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0) data_dict['gt_boxes'] = gt_boxes data_dict['gt_names'] = gt_names data_dict['points'] = points data_dict = data_dict </DeepExtract> data_dict.pop('gt_boxes_mask') return data_dict
def __call__(self, data_dict): """ Args: data_dict: gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] Returns: """ gt_boxes = data_dict['gt_boxes'] gt_names = data_dict['gt_names'].astype(str) existed_boxes = gt_boxes total_valid_sampled_dict = [] for (class_name, sample_group) in self.sample_groups.items(): if self.limit_whole_scene: num_gt = np.sum(class_name == gt_names) sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt) if int(sample_group['sample_num']) > 0: (sample_num, pointer, indices) = (int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']) if pointer >= len(self.db_infos[class_name]): indices = np.random.permutation(len(self.db_infos[class_name])) pointer = 0 sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer:pointer + sample_num]] pointer += sample_num sample_group['pointer'] = pointer sample_group['indices'] = indices sampled_dict = sampled_dict sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32) if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False): sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes) iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7]) iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7]) iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0 iou1 = iou1 if iou1.shape[1] > 0 else iou2 valid_mask = (iou1.max(axis=1) + iou2.max(axis=1) == 0).nonzero()[0] valid_sampled_dict = [sampled_dict[x] for x in valid_mask] valid_sampled_boxes = sampled_boxes[valid_mask] existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0) total_valid_sampled_dict.extend(valid_sampled_dict) sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :] if total_valid_sampled_dict.__len__() > 0: gt_boxes_mask = data_dict['gt_boxes_mask'] gt_boxes = data_dict['gt_boxes'][gt_boxes_mask] gt_names = data_dict['gt_names'][gt_boxes_mask] points = data_dict['points'] if self.sampler_cfg.get('USE_ROAD_PLANE', False): (sampled_gt_boxes, mv_height) = self.put_boxes_on_road_planes(sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']) data_dict.pop('calib') data_dict.pop('road_plane') obj_points_list = [] for (idx, info) in enumerate(total_valid_sampled_dict): file_path = self.root_path / info['path'] obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape([-1, self.sampler_cfg.NUM_POINT_FEATURES]) obj_points[:, :3] += info['box3d_lidar'][:3] if self.sampler_cfg.get('USE_ROAD_PLANE', False): obj_points[:, 2] -= mv_height[idx] obj_points_list.append(obj_points) obj_points = np.concatenate(obj_points_list, axis=0) sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict]) large_sampled_gt_boxes = box_utils.enlarge_box3d(sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH) points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes) points = np.concatenate([obj_points, points], axis=0) gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0) gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0) data_dict['gt_boxes'] = gt_boxes data_dict['gt_names'] = gt_names data_dict['points'] = points data_dict = data_dict data_dict.pop('gt_boxes_mask') return data_dict
CaDDN
positive
def solve_2(data): <DeepExtract> graph = defaultdict(set) for line in data: (source, destinations) = line.split(' <-> ') for d in destinations.split(','): graph[source.strip()].add(d.strip()) graph[d.strip()].add(source.strip()) graph = dict(graph) </DeepExtract> programs_left = set(graph.keys()) n_groups = 0 while programs_left: node = programs_left.pop() programs_left.add(node) <DeepExtract> seen = {} q = deque() q.appendleft(node) while q: node = q.pop() if node not in seen: seen[node] = True for child in graph[node]: q.appendleft(child) group = seen </DeepExtract> n_groups += 1 programs_left = programs_left.difference(set(group.keys())) return n_groups
def solve_2(data): graph = defaultdict(set) for line in data: (source, destinations) = line.split(' <-> ') for d in destinations.split(','): graph[source.strip()].add(d.strip()) graph[d.strip()].add(source.strip()) graph = dict(graph) programs_left = set(graph.keys()) n_groups = 0 while programs_left: node = programs_left.pop() programs_left.add(node) seen = {} q = deque() q.appendleft(node) while q: node = q.pop() if node not in seen: seen[node] = True for child in graph[node]: q.appendleft(child) group = seen n_groups += 1 programs_left = programs_left.difference(set(group.keys())) return n_groups
advent_of_code_2017
positive
def check_state_topic(wait_new_status=False): global system_status, heartbeat_sub, heartbeat_sub_status if not heartbeat_sub or not heartbeat_sub_status: <DeepExtract> global heartbeat_sub, heartbeat_sub_status heartbeat_sub = rospy.Subscriber('/mavros/state', State, state_callback) heartbeat_sub_status = True </DeepExtract> system_status = -1 if wait_new_status: system_status = -1 start_time = time.time() while system_status == -1: if time.time() - start_time > 1.0: rospy.loginfo('Not connected to fcu. Check connection.') return False rospy.sleep(0.1) return True
def check_state_topic(wait_new_status=False): global system_status, heartbeat_sub, heartbeat_sub_status if not heartbeat_sub or not heartbeat_sub_status: global heartbeat_sub, heartbeat_sub_status heartbeat_sub = rospy.Subscriber('/mavros/state', State, state_callback) heartbeat_sub_status = True system_status = -1 if wait_new_status: system_status = -1 start_time = time.time() while system_status == -1: if time.time() - start_time > 1.0: rospy.loginfo('Not connected to fcu. Check connection.') return False rospy.sleep(0.1) return True
clever-show
positive
def errmsg(msg, doc, pos, end=None): <DeepExtract> lineno = doc.count('\n', 0, pos) + 1 if lineno == 1: colno = pos else: colno = pos - doc.rindex('\n', 0, pos) (lineno, colno) = (lineno, colno) </DeepExtract> if end is None: return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos) <DeepExtract> lineno = doc.count('\n', 0, end) + 1 if lineno == 1: colno = end else: colno = end - doc.rindex('\n', 0, end) (endlineno, endcolno) = (lineno, colno) </DeepExtract> return '%s: line %d column %d - line %d column %d (char %d - %d)' % (msg, lineno, colno, endlineno, endcolno, pos, end)
def errmsg(msg, doc, pos, end=None): lineno = doc.count('\n', 0, pos) + 1 if lineno == 1: colno = pos else: colno = pos - doc.rindex('\n', 0, pos) (lineno, colno) = (lineno, colno) if end is None: return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos) lineno = doc.count('\n', 0, end) + 1 if lineno == 1: colno = end else: colno = end - doc.rindex('\n', 0, end) (endlineno, endcolno) = (lineno, colno) return '%s: line %d column %d - line %d column %d (char %d - %d)' % (msg, lineno, colno, endlineno, endcolno, pos, end)
delicious2google
positive
def match(self, gt_boxes): h_ind = [] w_ind = [] reg_num_ofchannel = np.zeros((2 * self.num_anchors,), np.long) cur_gtw = gt_boxes[:, 2] - gt_boxes[:, 0] cur_gth = gt_boxes[:, 3] - gt_boxes[:, 1] for ith in range(gt_boxes.shape[0]): <DeepExtract> if cur_gth[ith] < self.anchor_strings[0]: th = np.array([0]) if cur_gth[ith] >= self.anchor_strings[-1]: th = np.array([self.anchor_strings.shape[0] - 1]) for i in range(self.anchor_strings.shape[0]): if self.anchor_strings[i] <= cur_gth[ith] < self.anchor_strings[i + 1]: if cur_gth[ith] < self.anchor_strings[i] * (2 ** 0.5 - self.beta): th = np.array([i]) if cur_gth[ith] > self.anchor_strings[i] * (2 ** 0.5 + self.beta): th = np.array([i + 1]) th = np.array([i, i + 1]) </DeepExtract> <DeepExtract> if cur_gtw[ith] < self.anchor_strings[0]: tw = np.array([0]) if cur_gtw[ith] >= self.anchor_strings[-1]: tw = np.array([self.anchor_strings.shape[0] - 1]) for i in range(self.anchor_strings.shape[0]): if self.anchor_strings[i] <= cur_gtw[ith] < self.anchor_strings[i + 1]: if cur_gtw[ith] < self.anchor_strings[i] * (2 ** 0.5 - self.beta): tw = np.array([i]) if cur_gtw[ith] > self.anchor_strings[i] * (2 ** 0.5 + self.beta): tw = np.array([i + 1]) tw = np.array([i, i + 1]) </DeepExtract> reg_num_ofchannel[th] += 1 reg_num_ofchannel[tw + self.num_anchors] += 1 h_ind.append(th) w_ind.append(tw) reg_num_ofchannel = np.hstack((reg_num_ofchannel, reg_num_ofchannel)) return (h_ind, w_ind, reg_num_ofchannel)
def match(self, gt_boxes): h_ind = [] w_ind = [] reg_num_ofchannel = np.zeros((2 * self.num_anchors,), np.long) cur_gtw = gt_boxes[:, 2] - gt_boxes[:, 0] cur_gth = gt_boxes[:, 3] - gt_boxes[:, 1] for ith in range(gt_boxes.shape[0]): if cur_gth[ith] < self.anchor_strings[0]: th = np.array([0]) if cur_gth[ith] >= self.anchor_strings[-1]: th = np.array([self.anchor_strings.shape[0] - 1]) for i in range(self.anchor_strings.shape[0]): if self.anchor_strings[i] <= cur_gth[ith] < self.anchor_strings[i + 1]: if cur_gth[ith] < self.anchor_strings[i] * (2 ** 0.5 - self.beta): th = np.array([i]) if cur_gth[ith] > self.anchor_strings[i] * (2 ** 0.5 + self.beta): th = np.array([i + 1]) th = np.array([i, i + 1]) if cur_gtw[ith] < self.anchor_strings[0]: tw = np.array([0]) if cur_gtw[ith] >= self.anchor_strings[-1]: tw = np.array([self.anchor_strings.shape[0] - 1]) for i in range(self.anchor_strings.shape[0]): if self.anchor_strings[i] <= cur_gtw[ith] < self.anchor_strings[i + 1]: if cur_gtw[ith] < self.anchor_strings[i] * (2 ** 0.5 - self.beta): tw = np.array([i]) if cur_gtw[ith] > self.anchor_strings[i] * (2 ** 0.5 + self.beta): tw = np.array([i + 1]) tw = np.array([i, i + 1]) reg_num_ofchannel[th] += 1 reg_num_ofchannel[tw + self.num_anchors] += 1 h_ind.append(th) w_ind.append(tw) reg_num_ofchannel = np.hstack((reg_num_ofchannel, reg_num_ofchannel)) return (h_ind, w_ind, reg_num_ofchannel)
DeRPN
positive
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ <DeepExtract> if name is None: name = input_tensor.name if 3 is not None: assert_rank(input_tensor, 3, name) shape = input_tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: input_shape = shape dyn_shape = tf.shape(input_tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] input_shape = shape </DeepExtract> batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.') token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings <DeepExtract> output_tensor = layer_norm(output, name) output_tensor = dropout(output_tensor, dropout_prob) output = output_tensor </DeepExtract> return output
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ if name is None: name = input_tensor.name if 3 is not None: assert_rank(input_tensor, 3, name) shape = input_tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: input_shape = shape dyn_shape = tf.shape(input_tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] input_shape = shape batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.') token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output_tensor = layer_norm(output, name) output_tensor = dropout(output_tensor, dropout_prob) output = output_tensor return output
ChineseEHRBert
positive
def push_file(self, filename, pkg_name): """Perform a number of checks against the filename and push the filename if appropriate.""" if filename in self.files or filename in self.files_blacklist: return self.files.add(filename) if self.file_is_locale(filename): return for (k, v) in self.file_maps.items(): for match_name in v['files']: if isinstance(match_name, str): if filename == match_name: <DeepExtract> if k not in self.packages: self.packages[k] = set() if FileManager.banned_path(filename): util.print_warning(f' Content {filename} found in banned path, skipping') self.has_banned = True return if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) self.packages[k].add(filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract> return elif len('/'.join(match_name)) <= len(filename) + 1: if self.globlike_match(filename, match_name): <DeepExtract> if k not in self.packages: self.packages[k] = set() if FileManager.banned_path(os.path.join('/', *match_name)): util.print_warning(f" Content {os.path.join('/', *match_name)} found in banned path, skipping") self.has_banned = True return if os.path.join('/', *match_name) in self.attrs: mod = self.attrs[os.path.join('/', *match_name)][0] u = self.attrs[os.path.join('/', *match_name)][1] g = self.attrs[os.path.join('/', *match_name)][2] os.path.join('/', *match_name) = '%attr({0},{1},{2}) {3}'.format(mod, u, g, os.path.join('/', *match_name)) self.packages[k].add(os.path.join('/', *match_name)) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract> return if filename in self.setuid: if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] newfn = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) else: newfn = '%attr(4755, root, root) ' + filename <DeepExtract> if 'setuid' not in self.packages: self.packages['setuid'] = set() if FileManager.banned_path(newfn): util.print_warning(f' Content {newfn} found in banned path, skipping') self.has_banned = True return if newfn in self.attrs: mod = self.attrs[newfn][0] u = self.attrs[newfn][1] g = self.attrs[newfn][2] newfn = '%attr({0},{1},{2}) {3}'.format(mod, u, g, newfn) self.packages['setuid'].add(newfn) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract> return part = re.compile('^/usr/lib/systemd/system/.+\\.target\\.wants/.+') if part.search(filename) and 'update-triggers.target.wants' not in filename: if filename not in self.excludes: <DeepExtract> if 'autostart' not in self.packages: self.packages['autostart'] = set() if FileManager.banned_path(filename): util.print_warning(f' Content {filename} found in banned path, skipping') self.has_banned = True return if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) self.packages['autostart'].add(filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract> <DeepExtract> if 'services' not in self.packages: self.packages['services'] = set() if FileManager.banned_path('%exclude ' + filename): util.print_warning(f" Content {'%exclude ' + filename} found in banned path, skipping") self.has_banned = True return if '%exclude ' + filename in self.attrs: mod = self.attrs['%exclude ' + filename][0] u = self.attrs['%exclude ' + filename][1] g = self.attrs['%exclude ' + filename][2] '%exclude ' + filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, '%exclude ' + filename) self.packages['services'].add('%exclude ' + filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract> return if self.want_dev_split and self.file_pat_match(filename, '^/usr/.*/include/.*\\.h$', 'dev'): return so_dest = 'lib' if self.config.config_opts.get('so_to_lib') else 'dev' so_dest_ompi = 'openmpi' if self.config.config_opts.get('so_to_lib') else 'dev' patterns = [('^/usr/share/package-licenses/.{1,}/.{1,}', 'license'), ('^/usr/share/man/man2', 'dev'), ('^/usr/share/man/man3', 'dev'), ('^/usr/share/man/', 'man'), ('^/usr/share/pkgconfig/32.*\\.pc$', 'dev32'), ('^/usr/share/pkgconfig/', 'dev'), ('^/usr/share/info/', 'info'), ('^/usr/share/abi/', 'abi'), ('^/usr/share/qt5/examples/', 'examples'), ('^/usr/share/omf', 'main', '/usr/share/omf/*'), ('^/usr/share/installed-tests/', 'tests'), ('^/usr/libexec/installed-tests/', 'tests'), ('^/usr/lib/rustlib/x86_64-unknown-linux-gnu/lib/[a-zA-Z0-9._+-]+\\.rlib', 'lib', '/usr/lib/rustlib/x86_64-unknown-linux-gnu/lib/*.rlib'), ('^/usr/lib/rustlib/x86_64-unknown-linux-gnu/analysis/[a-zA-Z0-9._+-]+\\.json', 'lib', '/usr/lib/rustlib/x86_64-unknown-linux-gnu/analysis/*.json'), ('^/usr/share/clear/optimized-elf/bin', 'bin', '/usr/share/clear/optimized-elf/bin*'), ('^/usr/share/clear/optimized-elf/exec', 'libexec', '/usr/share/clear/optimized-elf/exec*'), ('^/usr/share/clear/optimized-elf/lib', 'lib', '/usr/share/clear/optimized-elf/lib*'), ('^/usr/share/clear/optimized-elf/other', 'lib', '/usr/share/clear/optimized-elf/other*'), ('^/usr/share/clear/optimized-elf/test', 'tests', '/usr/share/clear/optimized-elf/test*'), ('^/usr/share/clear/optimized-elf/', 'lib'), ('^/usr/share/clear/filemap/', 'filemap'), ('^/usr/lib64/openmpi/bin/', 'openmpi'), ('^/usr/lib64/openmpi/share', 'openmpi'), ('^/usr/lib64/openmpi/include/', 'dev'), ('^/usr/lib64/openmpi/lib/[a-zA-Z0-9._+-]*\\.so$', so_dest_ompi), ('^/usr/lib64/openmpi/lib/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/openmpi/lib/[a-zA-Z0-9._+-]*\\.so\\.', 'openmpi'), ('^/usr/lib64/openmpi/lib/python3.*/', 'openmpi'), ('^/usr/lib64/openmpi/lib/', 'dev'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.so\\.', 'plugins'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.so\\.', 'lib'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.so\\.', 'lib32'), ('^/usr/lib64/lib(asm|dw|elf)-[0-9.]+\\.so', 'lib'), ('^/usr/lib64/libkdeinit5', 'lib'), ('^/usr/lib32/lib(asm|dw|elf)-[0-9.]+\\.so', 'lib32'), ('^/usr/lib64/haswell/[a-zA-Z0-9._+-]*\\.so\\.', 'lib'), ('^/usr/lib64/gobject-introspection/', 'lib'), ('^/usr/libexec/', 'libexec'), ('^/usr/bin/', 'bin'), ('^/usr/sbin/', 'bin'), ('^/sbin/', 'bin'), ('^/bin/', 'bin'), ('^/usr/lib/python3.*/', 'python3', '/usr/lib/python3*/*'), ('^/usr/share/gir-[0-9\\.]+/[a-zA-Z0-9._+-]*\\.gir', 'data', '/usr/share/gir-1.0/*.gir'), ('^/usr/share/cmake/', 'data', '/usr/share/cmake/*'), ('^/usr/share/cmake-3.1/', 'data', '/usr/share/cmake-3.1/*'), ('^/usr/share/cmake-3.7/', 'data', '/usr/share/cmake-3.7/*'), ('^/usr/share/cmake-3.8/', 'data', '/usr/share/cmake-3.8/*'), ('^/usr/share/cmake-3.6/', 'data', '/usr/share/cmake-3.6/*'), ('^/usr/share/girepository-1\\.0/.*\\.typelib\\$', 'data', '/usr/share/girepository-1.0/*.typelib'), ('^/usr/include/', 'dev'), ('^/usr/lib64/girepository-1.0/', 'data'), ('^/usr/share/cmake/', 'dev'), ('^/usr/lib/cmake/', 'dev'), ('^/usr/lib64/cmake/', 'dev'), ('^/usr/lib32/cmake/', 'dev32'), ('^/usr/lib/qt5/mkspecs/', 'dev'), ('^/usr/lib64/qt5/mkspecs/', 'dev'), ('^/usr/lib32/qt5/mkspecs/', 'dev32'), ('^/usr/lib/qt5/', 'lib'), ('^/usr/lib64/qt5/', 'lib'), ('^/usr/lib32/qt5/', 'lib32'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/libkdeinit5_[a-zA-Z0-9._+-]*\\.so$', 'lib'), ('^/usr/lib32/libkdeinit5_[a-zA-Z0-9._+-]*\\.so$', 'lib32'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.so$', so_dest + '32'), ('^/usr/lib64/glibc-hwcaps/x86-64-v[0-9]+/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/haswell/avx512_1/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/haswell/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/haswell/avx512_1/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.a$', 'staticdev32'), ('^/usr/lib/haswell/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/glibc-hwcaps/x86-64-v[0-9]+/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/haswell/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/haswell/avx512_1/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib32/haswell/[a-zA-Z0-9._+-]*\\.a$', 'staticdev32'), ('^/usr/lib/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib64/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib32/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev32'), ('^/usr/lib64/glibc-hwcaps/x86-64-v[0-9]+/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib64/haswell/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib64/haswell/avx512_1/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.la$', 'dev'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.la$', 'dev'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.la$', 'dev32'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.prl$', 'dev'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.prl$', 'dev'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.prl$', 'dev32'), ('^/usr/share/aclocal/[a-zA-Z0-9._+-]*\\.ac$', 'dev', '/usr/share/aclocal/*.ac'), ('^/usr/share/aclocal/[a-zA-Z0-9._+-]*\\.m4$', 'dev', '/usr/share/aclocal/*.m4'), ('^/usr/share/aclocal-1.[0-9]+/[a-zA-Z0-9._+-]*\\.ac$', 'dev', '/usr/share/aclocal-1.*/*.ac'), ('^/usr/share/aclocal-1.[0-9]+/[a-zA-Z0-9._+-]*\\.m4$', 'dev', '/usr/share/aclocal-1.*/*.m4'), ('^/usr/share/doc/' + re.escape(pkg_name) + '/', 'doc', '%doc /usr/share/doc/' + re.escape(pkg_name) + '/*'), ('^/usr/share/doc/', 'doc'), ('^/usr/share/gtk-doc/html', 'doc'), ('^/usr/share/help', 'doc'), ('^/usr/share/info/', 'doc', '%doc /usr/share/info/*'), ('^/lib/systemd/system/', 'services'), ('^/lib/systemd/user/', 'services'), ('^/usr/lib/systemd/system/', 'services'), ('^/usr/lib/systemd/user/', 'services'), ('^/usr/lib/udev/rules.d', 'config'), ('^/usr/lib/modules-load.d', 'config'), ('^/usr/lib/tmpfiles.d', 'config'), ('^/usr/lib/sysusers.d', 'config'), ('^/usr/lib/sysctl.d', 'config'), ('^/usr/share/', 'data'), ('^/usr/lib/perl5/', 'perl', '/usr/lib/perl5/*'), ('^/usr/lib/.*/[a-zA-Z0-9._+-]*\\.so', 'lib'), ('^/usr/lib64/.*/[a-zA-Z0-9._+-]*\\.so', 'lib'), ('^/usr/lib32/.*/[a-zA-Z0-9._+-]*\\.so', 'lib32'), ('^/usr/share/locale/', 'ignore')] for pat_args in patterns: if self.file_pat_match(filename, *pat_args): return if filename in self.excludes: return <DeepExtract> if package not in self.packages: self.packages[package] = set() if FileManager.banned_path(filename): util.print_warning(f' Content {filename} found in banned path, skipping') self.has_banned = True return if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) self.packages[package].add(filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract>
def push_file(self, filename, pkg_name): """Perform a number of checks against the filename and push the filename if appropriate.""" if filename in self.files or filename in self.files_blacklist: return self.files.add(filename) if self.file_is_locale(filename): return for (k, v) in self.file_maps.items(): for match_name in v['files']: if isinstance(match_name, str): if filename == match_name: if k not in self.packages: self.packages[k] = set() if FileManager.banned_path(filename): util.print_warning(f' Content {filename} found in banned path, skipping') self.has_banned = True return if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) self.packages[k].add(filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True return elif len('/'.join(match_name)) <= len(filename) + 1: if self.globlike_match(filename, match_name): if k not in self.packages: self.packages[k] = set() if FileManager.banned_path(os.path.join('/', *match_name)): util.print_warning(f" Content {os.path.join('/', *match_name)} found in banned path, skipping") self.has_banned = True return if os.path.join('/', *match_name) in self.attrs: mod = self.attrs[os.path.join('/', *match_name)][0] u = self.attrs[os.path.join('/', *match_name)][1] g = self.attrs[os.path.join('/', *match_name)][2] os.path.join('/', *match_name) = '%attr({0},{1},{2}) {3}'.format(mod, u, g, os.path.join('/', *match_name)) self.packages[k].add(os.path.join('/', *match_name)) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True return if filename in self.setuid: if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] newfn = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) else: newfn = '%attr(4755, root, root) ' + filename if 'setuid' not in self.packages: self.packages['setuid'] = set() if FileManager.banned_path(newfn): util.print_warning(f' Content {newfn} found in banned path, skipping') self.has_banned = True return if newfn in self.attrs: mod = self.attrs[newfn][0] u = self.attrs[newfn][1] g = self.attrs[newfn][2] newfn = '%attr({0},{1},{2}) {3}'.format(mod, u, g, newfn) self.packages['setuid'].add(newfn) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True return part = re.compile('^/usr/lib/systemd/system/.+\\.target\\.wants/.+') if part.search(filename) and 'update-triggers.target.wants' not in filename: if filename not in self.excludes: if 'autostart' not in self.packages: self.packages['autostart'] = set() if FileManager.banned_path(filename): util.print_warning(f' Content {filename} found in banned path, skipping') self.has_banned = True return if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) self.packages['autostart'].add(filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True if 'services' not in self.packages: self.packages['services'] = set() if FileManager.banned_path('%exclude ' + filename): util.print_warning(f" Content {'%exclude ' + filename} found in banned path, skipping") self.has_banned = True return if '%exclude ' + filename in self.attrs: mod = self.attrs['%exclude ' + filename][0] u = self.attrs['%exclude ' + filename][1] g = self.attrs['%exclude ' + filename][2] '%exclude ' + filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, '%exclude ' + filename) self.packages['services'].add('%exclude ' + filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True return if self.want_dev_split and self.file_pat_match(filename, '^/usr/.*/include/.*\\.h$', 'dev'): return so_dest = 'lib' if self.config.config_opts.get('so_to_lib') else 'dev' so_dest_ompi = 'openmpi' if self.config.config_opts.get('so_to_lib') else 'dev' patterns = [('^/usr/share/package-licenses/.{1,}/.{1,}', 'license'), ('^/usr/share/man/man2', 'dev'), ('^/usr/share/man/man3', 'dev'), ('^/usr/share/man/', 'man'), ('^/usr/share/pkgconfig/32.*\\.pc$', 'dev32'), ('^/usr/share/pkgconfig/', 'dev'), ('^/usr/share/info/', 'info'), ('^/usr/share/abi/', 'abi'), ('^/usr/share/qt5/examples/', 'examples'), ('^/usr/share/omf', 'main', '/usr/share/omf/*'), ('^/usr/share/installed-tests/', 'tests'), ('^/usr/libexec/installed-tests/', 'tests'), ('^/usr/lib/rustlib/x86_64-unknown-linux-gnu/lib/[a-zA-Z0-9._+-]+\\.rlib', 'lib', '/usr/lib/rustlib/x86_64-unknown-linux-gnu/lib/*.rlib'), ('^/usr/lib/rustlib/x86_64-unknown-linux-gnu/analysis/[a-zA-Z0-9._+-]+\\.json', 'lib', '/usr/lib/rustlib/x86_64-unknown-linux-gnu/analysis/*.json'), ('^/usr/share/clear/optimized-elf/bin', 'bin', '/usr/share/clear/optimized-elf/bin*'), ('^/usr/share/clear/optimized-elf/exec', 'libexec', '/usr/share/clear/optimized-elf/exec*'), ('^/usr/share/clear/optimized-elf/lib', 'lib', '/usr/share/clear/optimized-elf/lib*'), ('^/usr/share/clear/optimized-elf/other', 'lib', '/usr/share/clear/optimized-elf/other*'), ('^/usr/share/clear/optimized-elf/test', 'tests', '/usr/share/clear/optimized-elf/test*'), ('^/usr/share/clear/optimized-elf/', 'lib'), ('^/usr/share/clear/filemap/', 'filemap'), ('^/usr/lib64/openmpi/bin/', 'openmpi'), ('^/usr/lib64/openmpi/share', 'openmpi'), ('^/usr/lib64/openmpi/include/', 'dev'), ('^/usr/lib64/openmpi/lib/[a-zA-Z0-9._+-]*\\.so$', so_dest_ompi), ('^/usr/lib64/openmpi/lib/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/openmpi/lib/[a-zA-Z0-9._+-]*\\.so\\.', 'openmpi'), ('^/usr/lib64/openmpi/lib/python3.*/', 'openmpi'), ('^/usr/lib64/openmpi/lib/', 'dev'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.so\\.', 'plugins'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.so\\.', 'lib'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.so\\.', 'lib32'), ('^/usr/lib64/lib(asm|dw|elf)-[0-9.]+\\.so', 'lib'), ('^/usr/lib64/libkdeinit5', 'lib'), ('^/usr/lib32/lib(asm|dw|elf)-[0-9.]+\\.so', 'lib32'), ('^/usr/lib64/haswell/[a-zA-Z0-9._+-]*\\.so\\.', 'lib'), ('^/usr/lib64/gobject-introspection/', 'lib'), ('^/usr/libexec/', 'libexec'), ('^/usr/bin/', 'bin'), ('^/usr/sbin/', 'bin'), ('^/sbin/', 'bin'), ('^/bin/', 'bin'), ('^/usr/lib/python3.*/', 'python3', '/usr/lib/python3*/*'), ('^/usr/share/gir-[0-9\\.]+/[a-zA-Z0-9._+-]*\\.gir', 'data', '/usr/share/gir-1.0/*.gir'), ('^/usr/share/cmake/', 'data', '/usr/share/cmake/*'), ('^/usr/share/cmake-3.1/', 'data', '/usr/share/cmake-3.1/*'), ('^/usr/share/cmake-3.7/', 'data', '/usr/share/cmake-3.7/*'), ('^/usr/share/cmake-3.8/', 'data', '/usr/share/cmake-3.8/*'), ('^/usr/share/cmake-3.6/', 'data', '/usr/share/cmake-3.6/*'), ('^/usr/share/girepository-1\\.0/.*\\.typelib\\$', 'data', '/usr/share/girepository-1.0/*.typelib'), ('^/usr/include/', 'dev'), ('^/usr/lib64/girepository-1.0/', 'data'), ('^/usr/share/cmake/', 'dev'), ('^/usr/lib/cmake/', 'dev'), ('^/usr/lib64/cmake/', 'dev'), ('^/usr/lib32/cmake/', 'dev32'), ('^/usr/lib/qt5/mkspecs/', 'dev'), ('^/usr/lib64/qt5/mkspecs/', 'dev'), ('^/usr/lib32/qt5/mkspecs/', 'dev32'), ('^/usr/lib/qt5/', 'lib'), ('^/usr/lib64/qt5/', 'lib'), ('^/usr/lib32/qt5/', 'lib32'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/libkdeinit5_[a-zA-Z0-9._+-]*\\.so$', 'lib'), ('^/usr/lib32/libkdeinit5_[a-zA-Z0-9._+-]*\\.so$', 'lib32'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.so$', so_dest + '32'), ('^/usr/lib64/glibc-hwcaps/x86-64-v[0-9]+/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/haswell/avx512_1/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/haswell/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib64/haswell/avx512_1/[a-zA-Z0-9._+-]*\\.so$', so_dest), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.a$', 'staticdev32'), ('^/usr/lib/haswell/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/glibc-hwcaps/x86-64-v[0-9]+/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/haswell/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib64/haswell/avx512_1/[a-zA-Z0-9._+-]*\\.a$', 'staticdev'), ('^/usr/lib32/haswell/[a-zA-Z0-9._+-]*\\.a$', 'staticdev32'), ('^/usr/lib/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib64/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib32/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev32'), ('^/usr/lib64/glibc-hwcaps/x86-64-v[0-9]+/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib64/haswell/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib64/haswell/avx512_1/pkgconfig/[a-zA-Z0-9._+-]*\\.pc$', 'dev'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.la$', 'dev'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.la$', 'dev'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.la$', 'dev32'), ('^/usr/lib/[a-zA-Z0-9._+-]*\\.prl$', 'dev'), ('^/usr/lib64/[a-zA-Z0-9._+-]*\\.prl$', 'dev'), ('^/usr/lib32/[a-zA-Z0-9._+-]*\\.prl$', 'dev32'), ('^/usr/share/aclocal/[a-zA-Z0-9._+-]*\\.ac$', 'dev', '/usr/share/aclocal/*.ac'), ('^/usr/share/aclocal/[a-zA-Z0-9._+-]*\\.m4$', 'dev', '/usr/share/aclocal/*.m4'), ('^/usr/share/aclocal-1.[0-9]+/[a-zA-Z0-9._+-]*\\.ac$', 'dev', '/usr/share/aclocal-1.*/*.ac'), ('^/usr/share/aclocal-1.[0-9]+/[a-zA-Z0-9._+-]*\\.m4$', 'dev', '/usr/share/aclocal-1.*/*.m4'), ('^/usr/share/doc/' + re.escape(pkg_name) + '/', 'doc', '%doc /usr/share/doc/' + re.escape(pkg_name) + '/*'), ('^/usr/share/doc/', 'doc'), ('^/usr/share/gtk-doc/html', 'doc'), ('^/usr/share/help', 'doc'), ('^/usr/share/info/', 'doc', '%doc /usr/share/info/*'), ('^/lib/systemd/system/', 'services'), ('^/lib/systemd/user/', 'services'), ('^/usr/lib/systemd/system/', 'services'), ('^/usr/lib/systemd/user/', 'services'), ('^/usr/lib/udev/rules.d', 'config'), ('^/usr/lib/modules-load.d', 'config'), ('^/usr/lib/tmpfiles.d', 'config'), ('^/usr/lib/sysusers.d', 'config'), ('^/usr/lib/sysctl.d', 'config'), ('^/usr/share/', 'data'), ('^/usr/lib/perl5/', 'perl', '/usr/lib/perl5/*'), ('^/usr/lib/.*/[a-zA-Z0-9._+-]*\\.so', 'lib'), ('^/usr/lib64/.*/[a-zA-Z0-9._+-]*\\.so', 'lib'), ('^/usr/lib32/.*/[a-zA-Z0-9._+-]*\\.so', 'lib32'), ('^/usr/share/locale/', 'ignore')] for pat_args in patterns: if self.file_pat_match(filename, *pat_args): return if filename in self.excludes: return if package not in self.packages: self.packages[package] = set() if FileManager.banned_path(filename): util.print_warning(f' Content {filename} found in banned path, skipping') self.has_banned = True return if filename in self.attrs: mod = self.attrs[filename][0] u = self.attrs[filename][1] g = self.attrs[filename][2] filename = '%attr({0},{1},{2}) {3}'.format(mod, u, g, filename) self.packages[package].add(filename) self.package.file_restart += 1 if not self.newfiles_printed: print(' New %files content found') self.newfiles_printed = True </DeepExtract>
autospec
positive
def temperature_input(**kwargs): <DeepExtract> try: _prop = kwargs['name'] except KeyError: _prop = 'TEMP_INPUT' kwargs['name'] = _prop </DeepExtract> kwargs['properties'] = {'units': 'degreesCelsius'} return analog_input(**kwargs)
def temperature_input(**kwargs): try: _prop = kwargs['name'] except KeyError: _prop = 'TEMP_INPUT' kwargs['name'] = _prop kwargs['properties'] = {'units': 'degreesCelsius'} return analog_input(**kwargs)
BAC0
positive
@cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ <DeepExtract> from airflow_client.client.model.collection_info import CollectionInfo from airflow_client.client.model.import_error import ImportError from airflow_client.client.model.import_error_collection_all_of import ImportErrorCollectionAllOf globals()['CollectionInfo'] = CollectionInfo globals()['ImportError'] = ImportError globals()['ImportErrorCollectionAllOf'] = ImportErrorCollectionAllOf </DeepExtract> return (bool, date, datetime, dict, float, int, list, str, none_type)
@cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ from airflow_client.client.model.collection_info import CollectionInfo from airflow_client.client.model.import_error import ImportError from airflow_client.client.model.import_error_collection_all_of import ImportErrorCollectionAllOf globals()['CollectionInfo'] = CollectionInfo globals()['ImportError'] = ImportError globals()['ImportErrorCollectionAllOf'] = ImportErrorCollectionAllOf return (bool, date, datetime, dict, float, int, list, str, none_type)
airflow-client-python
positive
def forward(self, input): x = self.features(input) <DeepExtract> if not self.training and self.test_time_pool: x = F.avg_pool2d(x, kernel_size=7, stride=1) out = self.classifier(x) out = adaptive_avgmax_pool2d(out, pool_type='avgmax') else: x = adaptive_avgmax_pool2d(x, pool_type='avg') out = self.classifier(x) x = out.view(out.size(0), -1) </DeepExtract> return x
def forward(self, input): x = self.features(input) if not self.training and self.test_time_pool: x = F.avg_pool2d(x, kernel_size=7, stride=1) out = self.classifier(x) out = adaptive_avgmax_pool2d(out, pool_type='avgmax') else: x = adaptive_avgmax_pool2d(x, pool_type='avg') out = self.classifier(x) x = out.view(out.size(0), -1) return x
DCASE2018Task2
positive
def write_object(obj, parent=None): for transformer in self.object_transformers: tmp_object = transformer.transform(obj) if tmp_object is not obj: obj = tmp_object break <DeepExtract> ba = struct.pack('>B', *(self.TC_OBJECT,)) self.object_stream.write(ba) </DeepExtract> cls = obj.get_class() <DeepExtract> self._writeStruct('>B', 1, (self.TC_CLASSDESC,)) self._writeString(cls.name) self._writeStruct('>LLB', 1, (cls.serialVersionUID, cls.handle, cls.flags)) self._writeStruct('>H', 1, (len(cls.fields_names),)) for (name, type) in zip(cls.fields_names, cls.fields_types): self._writeStruct('>B', 1, (self._convert_type_to_char(type),)) self._writeString(name) if type[0] in (self.TYPE_OBJECT, self.TYPE_ARRAY): self.write_string(type) self._writeStruct('>B', 1, (self.TC_ENDBLOCKDATA,)) if cls.superclass: self.write_classdesc(cls.superclass) else: self.write_null() </DeepExtract> all_names = [] all_types = [] tmpcls = cls while tmpcls: all_names += tmpcls.fields_names all_types += tmpcls.fields_types tmpcls = tmpcls.superclass del tmpcls for (name, type) in zip(all_names, all_types): try: <DeepExtract> if len(type) > 1: type = type[0] if type == self.TYPE_BOOLEAN: self._writeStruct('>B', 1, (1 if getattr(obj, name) else 0,)) elif type == self.TYPE_BYTE: if getattr(obj, name) > 127: self._writeStruct('>B', 1, (getattr(obj, name),)) else: self._writeStruct('>b', 1, (getattr(obj, name),)) elif type == self.TYPE_SHORT: self._writeStruct('>h', 1, (getattr(obj, name),)) elif type == self.TYPE_INTEGER: self._writeStruct('>i', 1, (getattr(obj, name),)) elif type == self.TYPE_LONG: self._writeStruct('>q', 1, (getattr(obj, name),)) elif type == self.TYPE_FLOAT: self._writeStruct('>f', 1, (getattr(obj, name),)) elif type == self.TYPE_DOUBLE: self._writeStruct('>d', 1, (getattr(obj, name),)) elif type == self.TYPE_OBJECT or type == self.TYPE_ARRAY: if getattr(obj, name) == None: self.write_null() elif isinstance(getattr(obj, name), JavaEnum): self.write_enum(getattr(obj, name)) elif isinstance(getattr(obj, name), JavaObject): self.write_object(getattr(obj, name)) elif isinstance(getattr(obj, name), JavaString): self.write_string(getattr(obj, name)) elif isinstance(getattr(obj, name), str): self.write_blockdata(getattr(obj, name)) else: raise RuntimeError('Unknown typecode: %s' % type) else: raise RuntimeError('Unknown typecode: %s' % type) </DeepExtract> except AttributeError as e: <DeepExtract> _log.error(' ' * (ident * 2) + str('%s e, %s %s' % (str(e), repr(obj), repr(dir(obj))))) </DeepExtract> raise del all_names, all_types if cls.flags & self.SC_SERIALIZABLE and cls.flags & self.SC_WRITE_METHOD or (cls.flags & self.SC_EXTERNALIZABLE and cls.flags & self.SC_BLOCK_DATA): for annot in obj.annotations: <DeepExtract> _log.debug(' ' * (ident * 2) + str('Write annotation %s for %s' % (repr(annot), repr(obj)))) </DeepExtract> if annot == None: <DeepExtract> self._writeStruct('>B', 1, (self.TC_NULL,)) </DeepExtract> else: <DeepExtract> log_debug('Writing object of type ' + str(type(annot)) + ' ' + str(annot)) if isinstance(annot, JavaArray): self.write_array(annot) elif isinstance(annot, JavaEnum): self.write_enum(annot) elif isinstance(annot, JavaObject): self.write_object(annot) elif isinstance(annot, JavaString): self.write_string(annot) elif isinstance(annot, JavaClass): self.write_class(annot) elif annot is None: self.write_null() elif type(annot) is str: self.write_blockdata(annot) else: raise RuntimeError('Object serialization of type %s is not supported.' % str(type(annot))) </DeepExtract> <DeepExtract> ba = struct.pack('>B', *(self.TC_ENDBLOCKDATA,)) self.object_stream.write(ba) </DeepExtract>
def write_object(obj, parent=None): for transformer in self.object_transformers: tmp_object = transformer.transform(obj) if tmp_object is not obj: obj = tmp_object break ba = struct.pack('>B', *(self.TC_OBJECT,)) self.object_stream.write(ba) cls = obj.get_class() self._writeStruct('>B', 1, (self.TC_CLASSDESC,)) self._writeString(cls.name) self._writeStruct('>LLB', 1, (cls.serialVersionUID, cls.handle, cls.flags)) self._writeStruct('>H', 1, (len(cls.fields_names),)) for (name, type) in zip(cls.fields_names, cls.fields_types): self._writeStruct('>B', 1, (self._convert_type_to_char(type),)) self._writeString(name) if type[0] in (self.TYPE_OBJECT, self.TYPE_ARRAY): self.write_string(type) self._writeStruct('>B', 1, (self.TC_ENDBLOCKDATA,)) if cls.superclass: self.write_classdesc(cls.superclass) else: self.write_null() all_names = [] all_types = [] tmpcls = cls while tmpcls: all_names += tmpcls.fields_names all_types += tmpcls.fields_types tmpcls = tmpcls.superclass del tmpcls for (name, type) in zip(all_names, all_types): try: if len(type) > 1: type = type[0] if type == self.TYPE_BOOLEAN: self._writeStruct('>B', 1, (1 if getattr(obj, name) else 0,)) elif type == self.TYPE_BYTE: if getattr(obj, name) > 127: self._writeStruct('>B', 1, (getattr(obj, name),)) else: self._writeStruct('>b', 1, (getattr(obj, name),)) elif type == self.TYPE_SHORT: self._writeStruct('>h', 1, (getattr(obj, name),)) elif type == self.TYPE_INTEGER: self._writeStruct('>i', 1, (getattr(obj, name),)) elif type == self.TYPE_LONG: self._writeStruct('>q', 1, (getattr(obj, name),)) elif type == self.TYPE_FLOAT: self._writeStruct('>f', 1, (getattr(obj, name),)) elif type == self.TYPE_DOUBLE: self._writeStruct('>d', 1, (getattr(obj, name),)) elif type == self.TYPE_OBJECT or type == self.TYPE_ARRAY: if getattr(obj, name) == None: self.write_null() elif isinstance(getattr(obj, name), JavaEnum): self.write_enum(getattr(obj, name)) elif isinstance(getattr(obj, name), JavaObject): self.write_object(getattr(obj, name)) elif isinstance(getattr(obj, name), JavaString): self.write_string(getattr(obj, name)) elif isinstance(getattr(obj, name), str): self.write_blockdata(getattr(obj, name)) else: raise RuntimeError('Unknown typecode: %s' % type) else: raise RuntimeError('Unknown typecode: %s' % type) except AttributeError as e: _log.error(' ' * (ident * 2) + str('%s e, %s %s' % (str(e), repr(obj), repr(dir(obj))))) raise del all_names, all_types if cls.flags & self.SC_SERIALIZABLE and cls.flags & self.SC_WRITE_METHOD or (cls.flags & self.SC_EXTERNALIZABLE and cls.flags & self.SC_BLOCK_DATA): for annot in obj.annotations: _log.debug(' ' * (ident * 2) + str('Write annotation %s for %s' % (repr(annot), repr(obj)))) if annot == None: self._writeStruct('>B', 1, (self.TC_NULL,)) else: log_debug('Writing object of type ' + str(type(annot)) + ' ' + str(annot)) if isinstance(annot, JavaArray): self.write_array(annot) elif isinstance(annot, JavaEnum): self.write_enum(annot) elif isinstance(annot, JavaObject): self.write_object(annot) elif isinstance(annot, JavaString): self.write_string(annot) elif isinstance(annot, JavaClass): self.write_class(annot) elif annot is None: self.write_null() elif type(annot) is str: self.write_blockdata(annot) else: raise RuntimeError('Object serialization of type %s is not supported.' % str(type(annot))) ba = struct.pack('>B', *(self.TC_ENDBLOCKDATA,)) self.object_stream.write(ba) </DeepExtract>
decoding-contour-next-link
positive
def test_create(self): self.assertEqual(self.poll.history.all().count(), 1) (record,) = self.poll.history.all() <DeepExtract> for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': self.poll.id, 'history_type': '+'}.items(): self.assertEqual(getattr(record, key), value) self.assertEqual(record.history_object.__class__, self.model) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': self.poll.id, 'history_type': '+'}.items(): if key not in ['history_type', 'history_change_reason']: self.assertEqual(getattr(record.history_object, key), value) </DeepExtract> <DeepExtract> self.assertAlmostEqual(record.history_date, datetime.now(), delta=timedelta(seconds=2)) </DeepExtract> historical_poll = self.poll.history.all()[0] self.assertEqual(historical_poll.places.count(), 0) self.poll.places.add(self.place) self.assertEqual(self.poll.history.all().count(), 2) m2m_record = self.poll.history.all()[0] self.assertEqual(m2m_record.places.count(), 1) historical_place = m2m_record.places.first() self.assertEqual(historical_place.place, self.place)
def test_create(self): self.assertEqual(self.poll.history.all().count(), 1) (record,) = self.poll.history.all() for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': self.poll.id, 'history_type': '+'}.items(): self.assertEqual(getattr(record, key), value) self.assertEqual(record.history_object.__class__, self.model) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': self.poll.id, 'history_type': '+'}.items(): if key not in ['history_type', 'history_change_reason']: self.assertEqual(getattr(record.history_object, key), value) self.assertAlmostEqual(record.history_date, datetime.now(), delta=timedelta(seconds=2)) historical_poll = self.poll.history.all()[0] self.assertEqual(historical_poll.places.count(), 0) self.poll.places.add(self.place) self.assertEqual(self.poll.history.all().count(), 2) m2m_record = self.poll.history.all()[0] self.assertEqual(m2m_record.places.count(), 1) historical_place = m2m_record.places.first() self.assertEqual(historical_place.place, self.place)
django-simple-history
positive
def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) <DeepExtract> lrs = self.args.lr if self.args.force_anneal is None or epoch < self.args.force_anneal: next_lr = lrs[min(epoch, len(lrs) - 1)] else: next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal) self.lr = next_lr </DeepExtract> self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr()
def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) lrs = self.args.lr if self.args.force_anneal is None or epoch < self.args.force_anneal: next_lr = lrs[min(epoch, len(lrs) - 1)] else: next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal) self.lr = next_lr self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr()
DisCo
positive
def delete(self): <DeepExtract> config = MASResourceConfig(module=self.module, resource=self.main_nitro_class, attribute_values_dict=self.module.params, attributes_list=self.attribute_config[self.main_nitro_class]['attributes_list'], transforms=self.attribute_config[self.main_nitro_class]['transforms'], api_path='nitro/v2/config') config = config </DeepExtract> if self.main_object_exists(config): self.module_result['changed'] = True if not self.module.check_mode: config.delete(delete_id_attributes=self.attribute_config[self.main_nitro_class]['delete_id_attributes'])
def delete(self): config = MASResourceConfig(module=self.module, resource=self.main_nitro_class, attribute_values_dict=self.module.params, attributes_list=self.attribute_config[self.main_nitro_class]['attributes_list'], transforms=self.attribute_config[self.main_nitro_class]['transforms'], api_path='nitro/v2/config') config = config if self.main_object_exists(config): self.module_result['changed'] = True if not self.module.check_mode: config.delete(delete_id_attributes=self.attribute_config[self.main_nitro_class]['delete_id_attributes'])
citrix-adc-ansible-modules
positive
@_custom_endpoint(expected_methods=['get']) def browse(self, request, bundle, **kwargs): """Returns all of the entries in a space, optionally at a subpath. Returns a dict with {'entries': [list of entries in the directory], 'directories': [list of directories in the directory]} Directories is a subset of entries, all are just the name. If a path=<path> parameter is provided, will look in that path inside the Space.""" space = bundle.obj path = request.GET.get('path', '') if not path.startswith(space.path): path = os.path.join(space.path, path) if not _is_relative_path(space.path, path): return http.HttpBadRequest(_('The path parameter must be relative to the space path')) <DeepExtract> message = _('This method should be accessed via a versioned subclass') raise NotImplementedError(message) </DeepExtract> return self.create_response(request, objects)
@_custom_endpoint(expected_methods=['get']) def browse(self, request, bundle, **kwargs): """Returns all of the entries in a space, optionally at a subpath. Returns a dict with {'entries': [list of entries in the directory], 'directories': [list of directories in the directory]} Directories is a subset of entries, all are just the name. If a path=<path> parameter is provided, will look in that path inside the Space.""" space = bundle.obj path = request.GET.get('path', '') if not path.startswith(space.path): path = os.path.join(space.path, path) if not _is_relative_path(space.path, path): return http.HttpBadRequest(_('The path parameter must be relative to the space path')) message = _('This method should be accessed via a versioned subclass') raise NotImplementedError(message) return self.create_response(request, objects)
archivematica-storage-service
positive
def nearest(p): index = -1 best = float('inf') for i in range(k): <DeepExtract> d1 = sum(((p[i] - centres[i][i]) ** 2 for i in range(m))) </DeepExtract> if d1 < best: index = i best = d1 return index
def nearest(p): index = -1 best = float('inf') for i in range(k): d1 = sum(((p[i] - centres[i][i]) ** 2 for i in range(m))) if d1 < best: index = i best = d1 return index
bioinformatics
positive
def read_amazon_data(path, max_revs=None, replace_xml=False): """Reads AmazonFields data, formats and enriches by adding the category attribute. Args: path (str): data path to a file with AmazonFields reviews. max_revs (int): the maximum number of reviews to read. replace_xml (bool): if set to True will replace XML/HTML symbols with proper strings. Returns: an iterator over pairs of group_id and list of data-units (reviews with attributes). """ amazon_to_output_map = {AmazonFields.PROD_ID: OutputFields.GROUP_ID, AmazonFields.REV_TEXT: OutputFields.REV_TEXT, AmazonFields.OVERALL: OutputFields.RATING} dus = [] prev_prod_id = None for (indx, du) in enumerate(parse(path)): if any((du_key not in du for du_key in amazon_to_output_map.keys())): continue prod_id = du[AmazonFields.PROD_ID] if replace_xml: du[AmazonFields.REV_TEXT] = unescape(du[AmazonFields.REV_TEXT]) du = {amazon_to_output_map[attr]: du[attr] for attr in amazon_to_output_map.keys()} du[OutputFields.CAT] = get_file_name(path).lower() <DeepExtract> du[OutputFields.REV_TEXT] = du[OutputFields.REV_TEXT].replace('\t', '').replace('\n', '') </DeepExtract> if prev_prod_id is not None and prod_id != prev_prod_id: yield (prev_prod_id, dus) dus = [] prev_prod_id = prod_id dus.append(du) if max_revs and indx >= max_revs - 1: break if len(dus): yield (prev_prod_id, dus)
def read_amazon_data(path, max_revs=None, replace_xml=False): """Reads AmazonFields data, formats and enriches by adding the category attribute. Args: path (str): data path to a file with AmazonFields reviews. max_revs (int): the maximum number of reviews to read. replace_xml (bool): if set to True will replace XML/HTML symbols with proper strings. Returns: an iterator over pairs of group_id and list of data-units (reviews with attributes). """ amazon_to_output_map = {AmazonFields.PROD_ID: OutputFields.GROUP_ID, AmazonFields.REV_TEXT: OutputFields.REV_TEXT, AmazonFields.OVERALL: OutputFields.RATING} dus = [] prev_prod_id = None for (indx, du) in enumerate(parse(path)): if any((du_key not in du for du_key in amazon_to_output_map.keys())): continue prod_id = du[AmazonFields.PROD_ID] if replace_xml: du[AmazonFields.REV_TEXT] = unescape(du[AmazonFields.REV_TEXT]) du = {amazon_to_output_map[attr]: du[attr] for attr in amazon_to_output_map.keys()} du[OutputFields.CAT] = get_file_name(path).lower() du[OutputFields.REV_TEXT] = du[OutputFields.REV_TEXT].replace('\t', '').replace('\n', '') if prev_prod_id is not None and prod_id != prev_prod_id: yield (prev_prod_id, dus) dus = [] prev_prod_id = prod_id dus.append(du) if max_revs and indx >= max_revs - 1: break if len(dus): yield (prev_prod_id, dus)
Copycat-abstractive-opinion-summarizer
positive
def new_unit_declared(self, unit_type): if unit_type == UnitType.alias: <DeepExtract> self.nb_units_declared += 1 self.nb_aliases_declared += 1 </DeepExtract> elif unit_type == UnitType.slot: <DeepExtract> self.nb_units_declared += 1 self.nb_slots_declared += 1 </DeepExtract> elif unit_type == UnitType.intent: <DeepExtract> self.nb_units_declared += 1 self.nb_intents_declared += 1 </DeepExtract> else: raise TypeError('Tried to increase the statistics for unit declarations ' + 'with an unknown unit type (' + str(unit_type) + ').')
def new_unit_declared(self, unit_type): if unit_type == UnitType.alias: self.nb_units_declared += 1 self.nb_aliases_declared += 1 elif unit_type == UnitType.slot: self.nb_units_declared += 1 self.nb_slots_declared += 1 elif unit_type == UnitType.intent: self.nb_units_declared += 1 self.nb_intents_declared += 1 else: raise TypeError('Tried to increase the statistics for unit declarations ' + 'with an unknown unit type (' + str(unit_type) + ').')
Chatette
positive
def stack_fn(x): <DeepExtract> x = block1(x, 64, stride=1, name='conv2' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 64, conv_shortcut=False, name='conv2' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 128, stride=stride1, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 8 + 1): x = block1(x, 128, conv_shortcut=False, name='conv3' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 256, stride=stride1, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 36 + 1): x = block1(x, 256, conv_shortcut=False, name='conv4' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 512, stride=stride1, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 512, conv_shortcut=False, name='conv5' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> return x
def stack_fn(x): x = block1(x, 64, stride=1, name='conv2' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 64, conv_shortcut=False, name='conv2' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x x = block1(x, 128, stride=stride1, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 8 + 1): x = block1(x, 128, conv_shortcut=False, name='conv3' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x x = block1(x, 256, stride=stride1, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 36 + 1): x = block1(x, 256, conv_shortcut=False, name='conv4' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x x = block1(x, 512, stride=stride1, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 512, conv_shortcut=False, name='conv5' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay) x = x return x
deep-learning-models
positive
@print_timing('selenium_login') def measure(): @print_timing('selenium_login:open_login_page') def sub_measure(): login_page.go_to() <DeepExtract> login_page.go_to() </DeepExtract> login_page.set_credentials(username=datasets['username'], password=datasets['password']) login_page.click_login_button()
@print_timing('selenium_login') def measure(): @print_timing('selenium_login:open_login_page') def sub_measure(): login_page.go_to() login_page.go_to() login_page.set_credentials(username=datasets['username'], password=datasets['password']) login_page.click_login_button()
dc-app-performance-toolkit
positive