before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def updateVault(vault):
"""Update a vault, which may or may not exist already."""
if isinstance(vault, str):
deleted = True
vault = self.vaults[vault]
else:
deleted = False
logger = self.logger
uuid = vault['id']
name = vault.get('name', '')
logger.debug('updateVault() for %s (name: "%s")', uuid, name)
if not deleted and (uuid not in self.vaults or (uuid in self.vaults and self.vaults[uuid].get('deleted', False))):
logger.debug('this is a new vault')
self.vaults[uuid] = vault
unlocker = UnlockVaultWidget(uuid, self.stack)
self.vaultCountChanged.connect(unlocker.vaultCountChanged)
self.stack.addWidget(unlocker)
noitems = NoItemWidget(uuid, self.stack)
self.stack.addWidget(noitems)
items = VersionList(self.stack)
self.stack.addWidget(items)
widgets = (unlocker, noitems, items)
pos = self.vault_order.insert(name, uuid, widgets)
backend = QApplication.instance().backend()
if backend.get_vault_status(uuid) == 'UNLOCKED':
logger.debug('new vault is unlocked')
self.versions[uuid] = {}
self.version_order[uuid] = SortedList()
self.current_item[uuid] = None
versions = backend.get_secrets(vault['id'])
<DeepExtract>
logger = self.logger
logger.debug('updating {} versions for vault {}', len(versions), uuid)
assert uuid in self.vaults
vault = self.vaults[uuid]
name = vault.get('name', '')
pos = self.vault_order.find(name, uuid)
assert pos != -1
(unlocker, noitems, items) = self.vault_order.dataat(pos)
modifications = []
current_versions = self.versions[uuid]
current_order = self.version_order[uuid]
for version in versions:
vuuid = version['id']
print('VERSION', repr(version))
key = sortkey(version)
present = not version.get('deleted', False)
cur_present = vuuid in current_versions
cur_deleted = current_versions.get('vuuid', {}).get('deleted', False)
if present:
if not cur_present:
modifications.append((key, 'new', version))
elif cur_present and (not cur_deleted):
modifications.append((key, 'update', version))
elif cur_deleted:
modifications.append((key, 'undelete', version))
elif cur_present and (not cur_deleted):
modifications.append((key, 'delete', version))
modifications.sort()
for (key, mod, version) in modifications:
vuuid = version['id']
curversion = current_versions.get(vuuid)
if mod in ('new', 'update'):
group = version['fields'].get('group', '')
grouppos = current_order.find(group)
if grouppos == -1:
item = GroupItem(uuid, group)
item.openStateChanged.connect(self.setGroupOpenState)
pos = current_order.insert(group, None, (item, None))
items.insertItem(pos, item)
self.groupAdded.emit(uuid, group)
if mod == 'new':
assert curversion is None
pos = current_order.find(key)
print('KEY', repr(key))
assert pos == -1
item = PasswordItem(uuid, version)
item.clicked.connect(self.changeCurrentItem)
search = searchkey(version)
pos = current_order.insert(key, vuuid, (item, search))
items.insertItem(pos, item)
elif mod == 'update':
assert curversion is not None
curkey = sortkey(curversion)
curpos = current_order.find(curkey, vuuid)
assert curpos != -1
(item, search) = current_order.dataat(curpos)
item.updateData(version)
if key != curkey:
current_order.removeat(curpos)
newpos = current_order.insert(key, vuuid, (item, search))
items.removeItem(item)
items.insertItem(newpos, item)
elif mod == 'delete':
assert curversion is not None
curkey = sortkey(current_versions[vuuid])
curpos = current_order.find(curkey, vuuid)
assert curpos != -1
(item, search) = current_order.dataat(curpos)
current_order.removeat(curpos)
items.removeItem(item)
item.hide()
item.destroy()
if self.current_item[uuid] == vuuid:
self.current_item[uuid] = None
if mod in ('update', 'delete'):
curgroup = curversion['fields'].get('group', '')
curpos = current_order.find(curgroup)
assert curpos != -1
prefix = '%s\x00' % curgroup
if curpos == len(current_order) - 1 or not current_order.keyat(curpos + 1).startswith(prefix):
(item, search) = current_order.dataat(curpos)
current_order.removeat(curpos)
items.removeItem(item)
item.hide()
item.destroy()
self.groupRemoved.emit(uuid, curgroup)
for version in versions:
current_versions[version['id']] = version
if uuid != self.current_vault:
return
if len(current_order) > 0:
self.stack.setCurrentWidget(items)
else:
self.stack.setCurrentWidget(items)
self.stack.setCurrentWidget(noitems)
self.currentVaultItemCountChanged.emit(len(current_order))
</DeepExtract>
self.tabbar.insertTab(pos, name)
elif not deleted and uuid in self.vaults and (self.vaults[uuid].get('name', '') != name):
logger.debug('this vault was renamed')
curname = self.vaults[uuid].get('name', '')
curpos = self.vault_order.find(curname, uuid)
assert curpos != -1
self.vaults[uuid] = vault
widgets = self.vault_order.dataat(curpos)
self.vault_order.removeat(curpos)
pos = self.vault_order.insert(name, uuid, widgets)
self.tabbar.removeTab(curpos)
self.tabbar.insertTab(pos, name)
elif deleted and uuid in self.vaults and (not self.vaults[uuid].get('deleted', False)):
logger.debug('this vault was deleted')
curname = self.vaults[uuid].get('name', '')
pos = self.vault_order.find(curname, uuid)
assert pos != -1
self.vaults[uuid] = vault
widgets = self.vault_order.dataat(pos)
self.vault_order.removeat(pos)
self.tabbar.removeTab(pos)
for widget in widgets:
self.stack.removeWidget(widget)
else:
self.vaults[uuid] = vault
self.tabbar.setVisible(len(self.vault_order) > 1)
self.vaultCountChanged.emit(len(self.vault_order))
|
def updateVault(vault):
"""Update a vault, which may or may not exist already."""
if isinstance(vault, str):
deleted = True
vault = self.vaults[vault]
else:
deleted = False
logger = self.logger
uuid = vault['id']
name = vault.get('name', '')
logger.debug('updateVault() for %s (name: "%s")', uuid, name)
if not deleted and (uuid not in self.vaults or (uuid in self.vaults and self.vaults[uuid].get('deleted', False))):
logger.debug('this is a new vault')
self.vaults[uuid] = vault
unlocker = UnlockVaultWidget(uuid, self.stack)
self.vaultCountChanged.connect(unlocker.vaultCountChanged)
self.stack.addWidget(unlocker)
noitems = NoItemWidget(uuid, self.stack)
self.stack.addWidget(noitems)
items = VersionList(self.stack)
self.stack.addWidget(items)
widgets = (unlocker, noitems, items)
pos = self.vault_order.insert(name, uuid, widgets)
backend = QApplication.instance().backend()
if backend.get_vault_status(uuid) == 'UNLOCKED':
logger.debug('new vault is unlocked')
self.versions[uuid] = {}
self.version_order[uuid] = SortedList()
self.current_item[uuid] = None
versions = backend.get_secrets(vault['id'])
logger = self.logger
logger.debug('updating {} versions for vault {}', len(versions), uuid)
assert uuid in self.vaults
vault = self.vaults[uuid]
name = vault.get('name', '')
pos = self.vault_order.find(name, uuid)
assert pos != -1
(unlocker, noitems, items) = self.vault_order.dataat(pos)
modifications = []
current_versions = self.versions[uuid]
current_order = self.version_order[uuid]
for version in versions:
vuuid = version['id']
print('VERSION', repr(version))
key = sortkey(version)
present = not version.get('deleted', False)
cur_present = vuuid in current_versions
cur_deleted = current_versions.get('vuuid', {}).get('deleted', False)
if present:
if not cur_present:
modifications.append((key, 'new', version))
elif cur_present and (not cur_deleted):
modifications.append((key, 'update', version))
elif cur_deleted:
modifications.append((key, 'undelete', version))
elif cur_present and (not cur_deleted):
modifications.append((key, 'delete', version))
modifications.sort()
for (key, mod, version) in modifications:
vuuid = version['id']
curversion = current_versions.get(vuuid)
if mod in ('new', 'update'):
group = version['fields'].get('group', '')
grouppos = current_order.find(group)
if grouppos == -1:
item = GroupItem(uuid, group)
item.openStateChanged.connect(self.setGroupOpenState)
pos = current_order.insert(group, None, (item, None))
items.insertItem(pos, item)
self.groupAdded.emit(uuid, group)
if mod == 'new':
assert curversion is None
pos = current_order.find(key)
print('KEY', repr(key))
assert pos == -1
item = PasswordItem(uuid, version)
item.clicked.connect(self.changeCurrentItem)
search = searchkey(version)
pos = current_order.insert(key, vuuid, (item, search))
items.insertItem(pos, item)
elif mod == 'update':
assert curversion is not None
curkey = sortkey(curversion)
curpos = current_order.find(curkey, vuuid)
assert curpos != -1
(item, search) = current_order.dataat(curpos)
item.updateData(version)
if key != curkey:
current_order.removeat(curpos)
newpos = current_order.insert(key, vuuid, (item, search))
items.removeItem(item)
items.insertItem(newpos, item)
elif mod == 'delete':
assert curversion is not None
curkey = sortkey(current_versions[vuuid])
curpos = current_order.find(curkey, vuuid)
assert curpos != -1
(item, search) = current_order.dataat(curpos)
current_order.removeat(curpos)
items.removeItem(item)
item.hide()
item.destroy()
if self.current_item[uuid] == vuuid:
self.current_item[uuid] = None
if mod in ('update', 'delete'):
curgroup = curversion['fields'].get('group', '')
curpos = current_order.find(curgroup)
assert curpos != -1
prefix = '%s\x00' % curgroup
if curpos == len(current_order) - 1 or not current_order.keyat(curpos + 1).startswith(prefix):
(item, search) = current_order.dataat(curpos)
current_order.removeat(curpos)
items.removeItem(item)
item.hide()
item.destroy()
self.groupRemoved.emit(uuid, curgroup)
for version in versions:
current_versions[version['id']] = version
if uuid != self.current_vault:
return
if len(current_order) > 0:
self.stack.setCurrentWidget(items)
else:
self.stack.setCurrentWidget(items)
self.stack.setCurrentWidget(noitems)
self.currentVaultItemCountChanged.emit(len(current_order))
self.tabbar.insertTab(pos, name)
elif not deleted and uuid in self.vaults and (self.vaults[uuid].get('name', '') != name):
logger.debug('this vault was renamed')
curname = self.vaults[uuid].get('name', '')
curpos = self.vault_order.find(curname, uuid)
assert curpos != -1
self.vaults[uuid] = vault
widgets = self.vault_order.dataat(curpos)
self.vault_order.removeat(curpos)
pos = self.vault_order.insert(name, uuid, widgets)
self.tabbar.removeTab(curpos)
self.tabbar.insertTab(pos, name)
elif deleted and uuid in self.vaults and (not self.vaults[uuid].get('deleted', False)):
logger.debug('this vault was deleted')
curname = self.vaults[uuid].get('name', '')
pos = self.vault_order.find(curname, uuid)
assert pos != -1
self.vaults[uuid] = vault
widgets = self.vault_order.dataat(pos)
self.vault_order.removeat(pos)
self.tabbar.removeTab(pos)
for widget in widgets:
self.stack.removeWidget(widget)
else:
self.vaults[uuid] = vault
self.tabbar.setVisible(len(self.vault_order) > 1)
self.vaultCountChanged.emit(len(self.vault_order))
|
bluepass
|
positive
|
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
<DeepExtract>
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if self.C[i][j] == 0 and (not self.row_covered[i]) and (not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
(row, col) = (row, col)
</DeepExtract>
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
<DeepExtract>
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
star_col = col
</DeepExtract>
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
|
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if self.C[i][j] == 0 and (not self.row_covered[i]) and (not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
(row, col) = (row, col)
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
star_col = col
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
|
CenterFusion
|
positive
|
def __init__(self, environment=None, params=None, observer=None):
self._sac_params = params['ML']['BehaviorSACAgent']
BehaviorTFAAgent.__init__(self, environment=environment, params=params, observer=observer)
BehaviorModel.__init__(self, params)
<DeepExtract>
self._replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(data_spec=self._agent.collect_data_spec, batch_size=self._wrapped_env.batch_size, max_length=self._sac_params['ReplayBufferCapacity', '', 100000])
</DeepExtract>
<DeepExtract>
dataset = self._replay_buffer.as_dataset(num_parallel_calls=self._sac_params['ParallelBufferCalls', '', 1], sample_batch_size=self._sac_params['BatchSize', '', 512], num_steps=self._sac_params['BufferNumSteps', '', 2]).prefetch(self._sac_params['BufferPrefetch', '', 3])
self._dataset = dataset
</DeepExtract>
<DeepExtract>
self._collect_policy = self._agent.collect_policy
</DeepExtract>
<DeepExtract>
self._eval_policy = greedy_policy.GreedyPolicy(self._agent.policy)
</DeepExtract>
|
def __init__(self, environment=None, params=None, observer=None):
self._sac_params = params['ML']['BehaviorSACAgent']
BehaviorTFAAgent.__init__(self, environment=environment, params=params, observer=observer)
BehaviorModel.__init__(self, params)
self._replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(data_spec=self._agent.collect_data_spec, batch_size=self._wrapped_env.batch_size, max_length=self._sac_params['ReplayBufferCapacity', '', 100000])
dataset = self._replay_buffer.as_dataset(num_parallel_calls=self._sac_params['ParallelBufferCalls', '', 1], sample_batch_size=self._sac_params['BatchSize', '', 512], num_steps=self._sac_params['BufferNumSteps', '', 2]).prefetch(self._sac_params['BufferPrefetch', '', 3])
self._dataset = dataset
self._collect_policy = self._agent.collect_policy
self._eval_policy = greedy_policy.GreedyPolicy(self._agent.policy)
</DeepExtract>
|
bark-ml
|
positive
|
def duplicate_entries(self, duplicate, week_update):
def duplicate_builder(queryset, new_date):
for instance in queryset:
duplicate = deepcopy(instance)
duplicate.id = None
duplicate.published = False
duplicate.week_start = new_date
yield duplicate
def duplicate_helper(queryset, new_date):
try:
ProjectHours.objects.bulk_create(duplicate_builder(queryset, new_date))
except AttributeError:
for entry in duplicate_builder(queryset, new_date):
entry.save()
msg = 'Project hours were copied'
messages.info(self.request, msg)
this_week = datetime.datetime.strptime(week_update, DATE_FORM_FORMAT).date()
prev_week = this_week - relativedelta(days=7)
<DeepExtract>
prev_week = prev_week if prev_week else self.week_start
week_end = prev_week + relativedelta(days=7)
prev_week_qs = ProjectHours.objects.filter(week_start__gte=prev_week, week_start__lt=week_end)
</DeepExtract>
<DeepExtract>
this_week = this_week if this_week else self.week_start
week_end = this_week + relativedelta(days=7)
this_week_qs = ProjectHours.objects.filter(week_start__gte=this_week, week_start__lt=week_end)
</DeepExtract>
param = {'week_start': week_update}
url = '?'.join((reverse('edit_schedule'), urlencode(param)))
if not prev_week_qs.exists():
msg = 'There are no hours to copy'
messages.warning(self.request, msg)
else:
this_week_qs.delete()
<DeepExtract>
try:
ProjectHours.objects.bulk_create(duplicate_builder(prev_week_qs, this_week))
except AttributeError:
for entry in duplicate_builder(prev_week_qs, this_week):
entry.save()
msg = 'Project hours were copied'
messages.info(self.request, msg)
</DeepExtract>
return HttpResponseRedirect(url)
|
def duplicate_entries(self, duplicate, week_update):
def duplicate_builder(queryset, new_date):
for instance in queryset:
duplicate = deepcopy(instance)
duplicate.id = None
duplicate.published = False
duplicate.week_start = new_date
yield duplicate
def duplicate_helper(queryset, new_date):
try:
ProjectHours.objects.bulk_create(duplicate_builder(queryset, new_date))
except AttributeError:
for entry in duplicate_builder(queryset, new_date):
entry.save()
msg = 'Project hours were copied'
messages.info(self.request, msg)
this_week = datetime.datetime.strptime(week_update, DATE_FORM_FORMAT).date()
prev_week = this_week - relativedelta(days=7)
prev_week = prev_week if prev_week else self.week_start
week_end = prev_week + relativedelta(days=7)
prev_week_qs = ProjectHours.objects.filter(week_start__gte=prev_week, week_start__lt=week_end)
this_week = this_week if this_week else self.week_start
week_end = this_week + relativedelta(days=7)
this_week_qs = ProjectHours.objects.filter(week_start__gte=this_week, week_start__lt=week_end)
param = {'week_start': week_update}
url = '?'.join((reverse('edit_schedule'), urlencode(param)))
if not prev_week_qs.exists():
msg = 'There are no hours to copy'
messages.warning(self.request, msg)
else:
this_week_qs.delete()
try:
ProjectHours.objects.bulk_create(duplicate_builder(prev_week_qs, this_week))
except AttributeError:
for entry in duplicate_builder(prev_week_qs, this_week):
entry.save()
msg = 'Project hours were copied'
messages.info(self.request, msg)
return HttpResponseRedirect(url)
|
django-timepiece
|
positive
|
def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
cmdlines = command.splitlines()
if command.endswith('\n'):
cmdlines.append('')
if not cmdlines:
raise ValueError('No command was given')
self.child.sendline(cmdlines[0])
for line in cmdlines[1:]:
<DeepExtract>
return self.child.expect_exact([self.prompt, self.continuation_prompt], timeout=1)
</DeepExtract>
self.child.sendline(line)
if self._expect_prompt(timeout=timeout) == 1:
self.child.kill(signal.SIGINT)
<DeepExtract>
return self.child.expect_exact([self.prompt, self.continuation_prompt], timeout=1)
</DeepExtract>
raise ValueError('Continuation prompt found - input was incomplete:\n' + command)
return self.child.before
|
def run_command(self, command, timeout=-1):
"""Send a command to the REPL, wait for and return output.
:param str command: The command to send. Trailing newlines are not needed.
This should be a complete block of input that will trigger execution;
if a continuation prompt is found after sending input, :exc:`ValueError`
will be raised.
:param int timeout: How long to wait for the next prompt. -1 means the
default from the :class:`pexpect.spawn` object (default 30 seconds).
None means to wait indefinitely.
"""
cmdlines = command.splitlines()
if command.endswith('\n'):
cmdlines.append('')
if not cmdlines:
raise ValueError('No command was given')
self.child.sendline(cmdlines[0])
for line in cmdlines[1:]:
return self.child.expect_exact([self.prompt, self.continuation_prompt], timeout=1)
self.child.sendline(line)
if self._expect_prompt(timeout=timeout) == 1:
self.child.kill(signal.SIGINT)
return self.child.expect_exact([self.prompt, self.continuation_prompt], timeout=1)
raise ValueError('Continuation prompt found - input was incomplete:\n' + command)
return self.child.before
|
camr
|
positive
|
def get_range_boundaries(range_string, row=0, column=0):
if ':' in range_string:
(min_range, max_range) = range_string.split(':')
(min_col, min_row) = coordinate_from_string(min_range)
(max_col, max_row) = coordinate_from_string(max_range)
min_col = column_index_from_string(min_col) + column
max_col = column_index_from_string(max_col) + column
min_row += row
max_row += row
else:
(min_col, min_row) = coordinate_from_string(range_string)
<DeepExtract>
min_col = _col_conversion_cache[min_col]
</DeepExtract>
max_col = min_col + 1
max_row = min_row
return (min_col, min_row, max_col, max_row)
|
def get_range_boundaries(range_string, row=0, column=0):
if ':' in range_string:
(min_range, max_range) = range_string.split(':')
(min_col, min_row) = coordinate_from_string(min_range)
(max_col, max_row) = coordinate_from_string(max_range)
min_col = column_index_from_string(min_col) + column
max_col = column_index_from_string(max_col) + column
min_row += row
max_row += row
else:
(min_col, min_row) = coordinate_from_string(range_string)
min_col = _col_conversion_cache[min_col]
max_col = min_col + 1
max_row = min_row
return (min_col, min_row, max_col, max_row)
|
dataproxy
|
positive
|
def test_assert_if_mon_none_and_empty_True(self):
<DeepExtract>
cfg = CephConf()
</DeepExtract>
with pytest.raises(exc.NeedHostError):
mon.get_mon_initial_members(Mock(), True, cfg)
|
def test_assert_if_mon_none_and_empty_True(self):
cfg = CephConf()
with pytest.raises(exc.NeedHostError):
mon.get_mon_initial_members(Mock(), True, cfg)
|
ceph-deploy
|
positive
|
def _get_data_from_txt_file(txt_file_path: str) -> List[Dict[str, Any]]:
"""Returns a list of event data in dictionaries from a *.txt file.
Args:
txt_file_path: The file path.
Returns:
A list of event data in dictionaries, or an empty list
if the data is not valid.
"""
calendar_content: List[Dict[str, Any]] = []
for event in _get_event_from_txt_file(txt_file_path):
if not _is_event_text_valid(event):
return []
<DeepExtract>
if len(event.split(', ')) == 5:
(head, content, start_date, end_date, location) = event.split(', ')
location = location.replace('\n', '')
else:
(head, content, start_date, end_date) = event.split(', ')
end_date = end_date.replace('\n', '')
location = ''
event_data = {'head': head, 'content': content, 'start_date': start_date, 'end_date': end_date, 'location': location}
</DeepExtract>
if not _is_event_dates_valid(event_data['start_date'], event_data['end_date']):
return []
<DeepExtract>
if ':' in event_data['start_date'] and ':' in event_data['start_date']:
start_date = datetime.strptime(event_data['start_date'], DATE_FORMAT2)
end_date = datetime.strptime(event_data['end_date'], DATE_FORMAT2)
else:
start_date = datetime.strptime(event_data['start_date'], DATE_FORMAT)
end_date = datetime.strptime(event_data['end_date'], DATE_FORMAT)
calendar_content.append({'Head': event_data['head'], 'Content': event_data['content'], 'S_Date': start_date, 'E_Date': end_date, 'Location': event_data['location']})
</DeepExtract>
return calendar_content
|
def _get_data_from_txt_file(txt_file_path: str) -> List[Dict[str, Any]]:
"""Returns a list of event data in dictionaries from a *.txt file.
Args:
txt_file_path: The file path.
Returns:
A list of event data in dictionaries, or an empty list
if the data is not valid.
"""
calendar_content: List[Dict[str, Any]] = []
for event in _get_event_from_txt_file(txt_file_path):
if not _is_event_text_valid(event):
return []
if len(event.split(', ')) == 5:
(head, content, start_date, end_date, location) = event.split(', ')
location = location.replace('\n', '')
else:
(head, content, start_date, end_date) = event.split(', ')
end_date = end_date.replace('\n', '')
location = ''
event_data = {'head': head, 'content': content, 'start_date': start_date, 'end_date': end_date, 'location': location}
if not _is_event_dates_valid(event_data['start_date'], event_data['end_date']):
return []
if ':' in event_data['start_date'] and ':' in event_data['start_date']:
start_date = datetime.strptime(event_data['start_date'], DATE_FORMAT2)
end_date = datetime.strptime(event_data['end_date'], DATE_FORMAT2)
else:
start_date = datetime.strptime(event_data['start_date'], DATE_FORMAT)
end_date = datetime.strptime(event_data['end_date'], DATE_FORMAT)
calendar_content.append({'Head': event_data['head'], 'Content': event_data['content'], 'S_Date': start_date, 'E_Date': end_date, 'Location': event_data['location']})
return calendar_content
|
calendar
|
positive
|
def save_cfg(self, cfg_files, file_path):
"""Merge cfg_files and save merged configuration to file_path
Args:
cfg_files (str): configuration file paths [default, custom]
file_path (str): path of text file for writing the configurations
"""
<DeepExtract>
if cfg_files[0] is not None:
with open(cfg_files[0], 'r') as f:
default = yaml.load(f, Loader=yaml.FullLoader)
else:
default = {}
</DeepExtract>
<DeepExtract>
cfg = {}
for f in cfg_files:
if f is not None:
cfg = self.update_dict(cfg, read_yaml(f))
merged = edict(cfg)
</DeepExtract>
f = open(file_path, 'w')
line = '# ' + '-' * 20 + ' Setup ' + '-' * 74
line += '|' + '-' * 10 + ' Default ' + '-' * 20 + '\n'
f.writelines(line)
<DeepExtract>
offset_len = 100
for item in merged:
if isinstance(merged[item], dict):
line = ' ' * level_cnt + item + ': '
offset = offset_len - len(line)
line += ' ' * offset + ' # | '
if default.get(item, -1) == -1:
default[item] = {}
line += ' --NEW-- '
f.writelines(line + '\n')
self.write_cfg(default[item], merged[item], f, level_cnt + 1)
else:
line = ' ' * level_cnt + item + ': '
if merged[item] is not None:
line += str(merged[item])
else:
line += ' '
offset = offset_len - len(line)
line += ' ' * offset + ' # | '
f.writelines(line)
if default.get(item, -1) != -1:
line = ' '
if merged[item] != default[item]:
line = str(default[item])
f.writelines(line)
else:
line = ' --NEW-- '
f.writelines(line)
f.writelines('\n')
</DeepExtract>
f.close()
|
def save_cfg(self, cfg_files, file_path):
"""Merge cfg_files and save merged configuration to file_path
Args:
cfg_files (str): configuration file paths [default, custom]
file_path (str): path of text file for writing the configurations
"""
if cfg_files[0] is not None:
with open(cfg_files[0], 'r') as f:
default = yaml.load(f, Loader=yaml.FullLoader)
else:
default = {}
cfg = {}
for f in cfg_files:
if f is not None:
cfg = self.update_dict(cfg, read_yaml(f))
merged = edict(cfg)
f = open(file_path, 'w')
line = '# ' + '-' * 20 + ' Setup ' + '-' * 74
line += '|' + '-' * 10 + ' Default ' + '-' * 20 + '\n'
f.writelines(line)
offset_len = 100
for item in merged:
if isinstance(merged[item], dict):
line = ' ' * level_cnt + item + ': '
offset = offset_len - len(line)
line += ' ' * offset + ' # | '
if default.get(item, -1) == -1:
default[item] = {}
line += ' --NEW-- '
f.writelines(line + '\n')
self.write_cfg(default[item], merged[item], f, level_cnt + 1)
else:
line = ' ' * level_cnt + item + ': '
if merged[item] is not None:
line += str(merged[item])
else:
line += ' '
offset = offset_len - len(line)
line += ' ' * offset + ' # | '
f.writelines(line)
if default.get(item, -1) != -1:
line = ' '
if merged[item] != default[item]:
line = str(default[item])
f.writelines(line)
else:
line = ' --NEW-- '
f.writelines(line)
f.writelines('\n')
f.close()
|
DF-VO
|
positive
|
def test_carbon_tracker_offline_flush(self):
tracker = OfflineEmissionsTracker(country_iso_code='USA', output_dir=self.emissions_path, output_file=self.emissions_file)
tracker.start()
<DeepExtract>
end_time: float = time.time() + 1
while time.time() < end_time:
pass
</DeepExtract>
tracker.flush()
<DeepExtract>
end_time: float = time.time() + 1
while time.time() < end_time:
pass
</DeepExtract>
emissions = tracker.stop()
assert isinstance(emissions, float)
self.assertNotEqual(emissions, 0.0)
self.assertAlmostEqual(emissions, 6.262572537957655e-05, places=2)
<DeepExtract>
with open(self.emissions_file_path, 'r') as f:
lines = [line.rstrip() for line in f]
assert len(lines) == expected_lines
</DeepExtract>
|
def test_carbon_tracker_offline_flush(self):
tracker = OfflineEmissionsTracker(country_iso_code='USA', output_dir=self.emissions_path, output_file=self.emissions_file)
tracker.start()
end_time: float = time.time() + 1
while time.time() < end_time:
pass
tracker.flush()
end_time: float = time.time() + 1
while time.time() < end_time:
pass
emissions = tracker.stop()
assert isinstance(emissions, float)
self.assertNotEqual(emissions, 0.0)
self.assertAlmostEqual(emissions, 6.262572537957655e-05, places=2)
with open(self.emissions_file_path, 'r') as f:
lines = [line.rstrip() for line in f]
assert len(lines) == expected_lines
</DeepExtract>
|
codecarbon
|
positive
|
def get_poison_index(self, y, fraction=None):
if fraction is None:
fraction = self.fraction
else:
<DeepExtract>
fraction = float(fraction)
if not 0.0 <= fraction <= 1.0:
raise ValueError(f'fraction {fraction} not in [0.0, 1.0] range')
fraction = fraction
</DeepExtract>
source_index = np.where(y == self.source_class)[0]
total = len(source_index)
poison_count = int(fraction * total)
if poison_count == 0:
log.warning(f'0 of {total} poisoned for class {self.source_class}.')
return np.sort(np.random.choice(source_index, size=poison_count, replace=False))
|
def get_poison_index(self, y, fraction=None):
if fraction is None:
fraction = self.fraction
else:
fraction = float(fraction)
if not 0.0 <= fraction <= 1.0:
raise ValueError(f'fraction {fraction} not in [0.0, 1.0] range')
fraction = fraction
source_index = np.where(y == self.source_class)[0]
total = len(source_index)
poison_count = int(fraction * total)
if poison_count == 0:
log.warning(f'0 of {total} poisoned for class {self.source_class}.')
return np.sort(np.random.choice(source_index, size=poison_count, replace=False))
|
armory
|
positive
|
def multistate_input(**kwargs):
<DeepExtract>
try:
_prop = kwargs['name']
except KeyError:
_prop = 'MSI'
kwargs['name'] = _prop
</DeepExtract>
kwargs['objectType'] = MultiStateInputObject
kwargs['is_commandable'] = True
kwargs['relinquish_default'] = Unsigned(1)
return multistate(**kwargs)
|
def multistate_input(**kwargs):
try:
_prop = kwargs['name']
except KeyError:
_prop = 'MSI'
kwargs['name'] = _prop
kwargs['objectType'] = MultiStateInputObject
kwargs['is_commandable'] = True
kwargs['relinquish_default'] = Unsigned(1)
return multistate(**kwargs)
|
BAC0
|
positive
|
def forward(self, batch_size, inputs=None, hiddens=None, feats=None, attn_ctx=None, attn_mask=None, mode='teacher forcing', gen_type='greedy', top_k=1, top_p=1.0, temp=1.0, mmi_args={'lm': None, 'lambda': 0, 'gamma': 0, 'tokenizer': None}):
ret_dict = {'outputs': None, 'logits': None, 'hiddens': None, 'attn': None}
if mode == DecoderRNN.MODE_TEACHER_FORCE:
<DeepExtract>
embedded = embedded_dropout(embed=self.embedding, words=inputs, dropout=self.dropout_emb if self.training else 0)
rnn_inputs = embedded
if self.feat_dim > 0:
rnn_inputs = torch.cat([rnn_inputs, feats], dim=2)
rnn_inputs = self.feat_fc(rnn_inputs)
rnn_inputs = self.lockdrop(rnn_inputs, self.dropout_input)
(outputs, new_hiddens) = self.rnn(rnn_inputs, hiddens)
outputs = self.lockdrop(outputs, self.dropout_output)
attns = None
if self.use_attention:
(outputs, attns) = self.attention(query=outputs, context=attn_ctx, mask=attn_mask)
projected = self.project_fc(outputs)
logits = self.word_classifier(projected)
step_ret_dict = {'outputs': outputs, 'logits': logits, 'hiddens': new_hiddens, 'attns': attns}
</DeepExtract>
ret_dict['outputs'] = step_ret_dict['outputs']
ret_dict['logits'] = step_ret_dict['logits']
ret_dict['hiddens'] = step_ret_dict['hiddens']
ret_dict['attns'] = step_ret_dict['attns']
elif mode == DecoderRNN.MODE_FREE_RUN:
if feats is None:
n_unrolling_steps = self.max_len
else:
n_unrolling_steps = feats.size(1)
bos_input = torch.LongTensor([self.bos_token_id]).to(DEVICE)
bos_input.requires_grad_(False)
step_input = bos_input.expand(batch_size, 1)
step_hidden = hiddens
def beamize_data(data, k, batch_dim=0):
if batch_dim != 0:
data = data.transpose(0, batch_dim).contiguous()
batch_size = data.size()[0]
data_size = data.size()[1:]
num_dims = data.dim()
data = data.unsqueeze(1).repeat((1,) + (k,) + (1,) * (num_dims - 1)).view((batch_size * k,) + data_size)
if batch_dim != 0:
data = data.transpose(0, batch_dim).contiguous()
return data
if gen_type in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_SAMPLE, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
<DeepExtract>
if batch_dim != 0:
step_input = step_input.transpose(0, batch_dim).contiguous()
batch_size = step_input.size()[0]
data_size = step_input.size()[1:]
num_dims = step_input.dim()
step_input = step_input.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if batch_dim != 0:
step_input = step_input.transpose(0, batch_dim).contiguous()
step_input = step_input
</DeepExtract>
<DeepExtract>
if 1 != 0:
step_hidden = step_hidden.transpose(0, 1).contiguous()
batch_size = step_hidden.size()[0]
data_size = step_hidden.size()[1:]
num_dims = step_hidden.dim()
step_hidden = step_hidden.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if 1 != 0:
step_hidden = step_hidden.transpose(0, 1).contiguous()
step_hidden = step_hidden
</DeepExtract>
<DeepExtract>
if batch_dim != 0:
attn_ctx = attn_ctx.transpose(0, batch_dim).contiguous()
batch_size = attn_ctx.size()[0]
data_size = attn_ctx.size()[1:]
num_dims = attn_ctx.dim()
attn_ctx = attn_ctx.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if batch_dim != 0:
attn_ctx = attn_ctx.transpose(0, batch_dim).contiguous()
attn_ctx = attn_ctx
</DeepExtract>
<DeepExtract>
if batch_dim != 0:
attn_mask = attn_mask.transpose(0, batch_dim).contiguous()
batch_size = attn_mask.size()[0]
data_size = attn_mask.size()[1:]
num_dims = attn_mask.dim()
attn_mask = attn_mask.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if batch_dim != 0:
attn_mask = attn_mask.transpose(0, batch_dim).contiguous()
attn_mask = attn_mask
</DeepExtract>
feats = None if feats is None else beamize_data(feats, top_k)
cur_scores = torch.full((batch_size, top_k), -float('inf')).to(DEVICE)
cur_scores[:, 0] = 0.0
beam_scores = []
beam_predecessors = []
beam_emmited_symbols = []
partial_seqs = step_input.tolist()
else:
step_output_lst = []
step_logit_lst = []
step_attn_lst = []
step_symbol_lst = []
for step_idx in range(n_unrolling_steps):
if feats is None:
step_feat = None
else:
step_feat = feats[:, step_idx, :].unsqueeze(1)
<DeepExtract>
embedded = embedded_dropout(embed=self.embedding, words=step_input, dropout=self.dropout_emb if self.training else 0)
rnn_inputs = embedded
if self.feat_dim > 0:
rnn_inputs = torch.cat([rnn_inputs, step_feat], dim=2)
rnn_inputs = self.feat_fc(rnn_inputs)
rnn_inputs = self.lockdrop(rnn_inputs, self.dropout_input)
(outputs, new_hiddens) = self.rnn(rnn_inputs, step_hidden)
outputs = self.lockdrop(outputs, self.dropout_output)
attns = None
if self.use_attention:
(outputs, attns) = self.attention(query=outputs, context=attn_ctx, mask=attn_mask)
projected = self.project_fc(outputs)
logits = self.word_classifier(projected)
step_ret_dict = {'outputs': outputs, 'logits': logits, 'hiddens': new_hiddens, 'attns': attns}
</DeepExtract>
<DeepExtract>
step_ret_dict['logits'].squeeze(1) = step_ret_dict['logits'].squeeze(1) / temp
scores = step_ret_dict['logits'].squeeze(1)
if gen_type == DecoderRNN.GEN_GREEDY:
symbols = step_ret_dict['logits'].squeeze(1).topk(1)[1]
elif gen_type in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
logsoftmax_scores = F.log_softmax(step_ret_dict['logits'].squeeze(1), dim=1)
(scores, symbols) = logsoftmax_scores.topk(top_k)
elif gen_type == DecoderRNN.GEN_SAMPLE:
probs = F.softmax(step_ret_dict['logits'].squeeze(1), dim=-1)
dist = torch.distributions.Categorical(probs)
symbols = dist.sample().unsqueeze(1)
elif gen_type == DecoderRNN.GEN_TOP:
top_k = min(top_k, step_ret_dict['logits'].squeeze(1).size(-1))
if top_k > 0:
indices_to_remove = step_ret_dict['logits'].squeeze(1) < torch.topk(step_ret_dict['logits'].squeeze(1), top_k)[0][..., -1, None]
step_ret_dict['logits'].squeeze(1)[indices_to_remove] = -float('inf')
if top_p > 0.0:
(sorted_logits, sorted_indices) = torch.sort(step_ret_dict['logits'].squeeze(1), descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 0] = 0
for batch_idx in range(step_ret_dict['logits'].squeeze(1).size(0)):
indices_to_remove = sorted_indices[batch_idx][sorted_indices_to_remove[batch_idx]]
step_ret_dict['logits'].squeeze(1)[batch_idx, indices_to_remove] = -float('inf')
probs = F.softmax(step_ret_dict['logits'].squeeze(1), dim=-1)
dist = torch.distributions.Categorical(probs)
symbols = dist.sample().unsqueeze(1)
elif gen_type == DecoderRNN.GEN_BEAM_SAMPLE:
softmax_scores = F.softmax(step_ret_dict['logits'].squeeze(1), dim=1)
symbols = torch.multinomial(softmax_scores, top_k)
scores = torch.gather(softmax_scores, 1, symbols).log()
else:
raise Exception('unsupported generation type {}'.format(gen_type))
decode_dict = {'scores': scores, 'symbols': symbols}
</DeepExtract>
if gen_type not in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_SAMPLE, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
step_input = decode_dict['symbols']
step_hidden = step_ret_dict['hiddens']
step_output_lst.append(step_ret_dict['outputs'])
step_logit_lst.append(step_ret_dict['logits'])
step_attn_lst.append(step_ret_dict['attns'])
step_symbol_lst.append(step_input)
else:
step_scores = decode_dict['scores']
step_symbols = decode_dict['symbols']
cur_scores = cur_scores.view(batch_size * top_k, 1)
if gen_type == DecoderRNN.GEN_BEAM_MMI_ANTI_LM:
step_symbol_lst = step_symbols.tolist()
new_partial_seqs = []
for partial_seq_idx in range(len(partial_seqs)):
partial_seq = partial_seqs[partial_seq_idx]
for symbol in step_symbol_lst[partial_seq_idx]:
new_partial_seqs.append(partial_seq + [symbol])
lm_seqs = [mmi_args['tokenizer'].convert_ids_to_tokens(ids) for ids in new_partial_seqs]
lm_seqs = [mmi_args['tokenizer'].convert_tokens_to_string(tokens) for tokens in lm_seqs]
lm_outputs = mmi_args['lm'].compute_prob(lm_seqs)
lm_word_ll = lm_outputs['word_loglikelihood']
length_mask = torch.LongTensor(list(range(lm_word_ll.size(1)))).to(DEVICE).unsqueeze(0)
length_mask = length_mask + 1 <= mmi_args['gamma']
length_penalty = length_mask.float().log()
masked_lm_word_ll = lm_word_ll + length_penalty
masked_lm_sent_ll = masked_lm_word_ll.sum(1)
U_t = masked_lm_sent_ll.exp().view(batch_size * top_k, top_k)
P_t_given_s = cur_scores.exp()
mmi_scores = P_t_given_s - mmi_args['lambda'] * U_t + mmi_args['gamma'] * (step_idx + 1)
new_score_candidates = mmi_scores.view(batch_size, top_k * top_k)
(_, cand_idcs) = new_score_candidates.topk(top_k, dim=1)
new_cur_scores = []
step_scores_flat = step_scores.view(batch_size, top_k * top_k)
for batch_idx in range(len(cand_idcs)):
for cand_idx in cand_idcs[batch_idx]:
pred = cand_idx // top_k + batch_idx * top_k
new_cur_scores.append(cur_scores[pred] + step_scores_flat[batch_idx][cand_idx])
cur_scores = torch.FloatTensor(new_cur_scores).to(DEVICE)
else:
new_score_candidates = (cur_scores * (step_idx + 1) + step_scores) / (step_idx + 2)
new_score_candidates = new_score_candidates.view(batch_size, top_k * top_k)
(cur_scores, cand_idcs) = new_score_candidates.topk(top_k, dim=1)
cur_scores = cur_scores.view(batch_size * top_k)
cand_idcs = cand_idcs.tolist()
to_emit_symbol_candidates = step_symbols.view(batch_size, top_k * top_k).tolist()
step_predecessors = []
step_emitted_symbols = []
for batch_idx in range(len(cand_idcs)):
for cand_idx in cand_idcs[batch_idx]:
pred = cand_idx // top_k + batch_idx * top_k
emit = to_emit_symbol_candidates[batch_idx][cand_idx]
step_predecessors.append(pred)
step_emitted_symbols.append(emit)
beam_emmited_symbols.append(step_emitted_symbols)
beam_predecessors.append(step_predecessors)
beam_scores.append(cur_scores.tolist())
new_partial_seqs = []
for (step_e, step_pred) in zip(step_emitted_symbols, step_predecessors):
pred_partial_seq = partial_seqs[step_pred]
new_partial_seq = pred_partial_seq + [step_e]
new_partial_seqs.append(new_partial_seq)
partial_seqs = new_partial_seqs
eos_token_masks = torch.LongTensor(step_emitted_symbols).to(DEVICE) == self.eos_token_id
cur_scores = cur_scores.masked_fill(eos_token_masks, -float('inf'))
step_emitted_symbols = torch.LongTensor(step_emitted_symbols).view(batch_size * top_k, 1).to(DEVICE)
step_input = step_emitted_symbols
step_hidden = step_ret_dict['hiddens']
step_hidden = step_hidden.transpose(0, 1).contiguous()
new_step_hidden = []
for batch_idx in range(len(cand_idcs)):
for cand_idx in cand_idcs[batch_idx]:
pred = cand_idx // top_k + batch_idx * top_k
new_step_hidden.append(step_hidden[pred])
new_step_hidden = torch.stack(new_step_hidden)
step_hidden = new_step_hidden
step_hidden = step_hidden.transpose(0, 1).contiguous()
if gen_type not in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_SAMPLE, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
outputs = torch.cat(step_output_lst, dim=1)
logits = torch.cat(step_logit_lst, dim=1)
if self.use_attention:
attns = torch.cat(step_attn_lst, dim=1)
else:
attns = None
symbols = torch.cat(step_symbol_lst, dim=1)
ret_dict['outputs'] = outputs
ret_dict['logits'] = logits
ret_dict['hiddens'] = step_hidden
ret_dict['attns'] = attns
ret_dict['symbols'] = symbols
else:
<DeepExtract>
beam_scores = np.array(beam_scores)
beam_predecessors = np.array(beam_predecessors)
beam_emmited_symbols = np.array(beam_emmited_symbols)
(L, N) = beam_scores.shape
assert beam_scores.shape == beam_predecessors.shape == beam_emmited_symbols.shape
assert N == batch_size * top_k
def backtrack_from_coordinate(i, j):
"""
arguments
i - step axis
j - batch*beam axis
"""
score = beam_scores[i, j]
seq = [beam_emmited_symbols[i, j]]
while i > 0:
j = beam_predecessors[i, j]
i = i - 1
seq.append(beam_emmited_symbols[i, j])
seq.reverse()
batch_seqs = (seq, score)
batch_seqs = [[] for _ in range(batch_size)]
for i in range(L - 1):
for j in range(N):
if beam_emmited_symbols[i, j] == self.eos_token_id:
(seq, score) = backtrack_from_coordinate(i, j)
batch_idx = j // top_k
batch_seqs[batch_idx].append((seq, score))
i = L - 1
for j in range(N):
(seq, score) = backtrack_from_coordinate(i, j)
batch_idx = j // top_k
batch_seqs[batch_idx].append((seq, score))
batch_seqs = [sorted(seqs, key=lambda x: x[1], reverse=True) for seqs in batch_seqs]
batch_seqs = batch_seqs
</DeepExtract>
batch_best_seqs = [seq_score_pairs[0][0] for seq_score_pairs in batch_seqs]
seq_lens = [len(seq) for seq in batch_best_seqs]
max_seq_len = max(seq_lens)
symbols = [seq + [self.pad_token_id] * (max_seq_len - len(seq)) for seq in batch_best_seqs]
symbols = torch.LongTensor(symbols).to(DEVICE)
ret_dict['symbols'] = symbols
ret_dict['beam_hypotheses'] = batch_seqs
return ret_dict
|
def forward(self, batch_size, inputs=None, hiddens=None, feats=None, attn_ctx=None, attn_mask=None, mode='teacher forcing', gen_type='greedy', top_k=1, top_p=1.0, temp=1.0, mmi_args={'lm': None, 'lambda': 0, 'gamma': 0, 'tokenizer': None}):
ret_dict = {'outputs': None, 'logits': None, 'hiddens': None, 'attn': None}
if mode == DecoderRNN.MODE_TEACHER_FORCE:
embedded = embedded_dropout(embed=self.embedding, words=inputs, dropout=self.dropout_emb if self.training else 0)
rnn_inputs = embedded
if self.feat_dim > 0:
rnn_inputs = torch.cat([rnn_inputs, feats], dim=2)
rnn_inputs = self.feat_fc(rnn_inputs)
rnn_inputs = self.lockdrop(rnn_inputs, self.dropout_input)
(outputs, new_hiddens) = self.rnn(rnn_inputs, hiddens)
outputs = self.lockdrop(outputs, self.dropout_output)
attns = None
if self.use_attention:
(outputs, attns) = self.attention(query=outputs, context=attn_ctx, mask=attn_mask)
projected = self.project_fc(outputs)
logits = self.word_classifier(projected)
step_ret_dict = {'outputs': outputs, 'logits': logits, 'hiddens': new_hiddens, 'attns': attns}
ret_dict['outputs'] = step_ret_dict['outputs']
ret_dict['logits'] = step_ret_dict['logits']
ret_dict['hiddens'] = step_ret_dict['hiddens']
ret_dict['attns'] = step_ret_dict['attns']
elif mode == DecoderRNN.MODE_FREE_RUN:
if feats is None:
n_unrolling_steps = self.max_len
else:
n_unrolling_steps = feats.size(1)
bos_input = torch.LongTensor([self.bos_token_id]).to(DEVICE)
bos_input.requires_grad_(False)
step_input = bos_input.expand(batch_size, 1)
step_hidden = hiddens
def beamize_data(data, k, batch_dim=0):
if batch_dim != 0:
data = data.transpose(0, batch_dim).contiguous()
batch_size = data.size()[0]
data_size = data.size()[1:]
num_dims = data.dim()
data = data.unsqueeze(1).repeat((1,) + (k,) + (1,) * (num_dims - 1)).view((batch_size * k,) + data_size)
if batch_dim != 0:
data = data.transpose(0, batch_dim).contiguous()
return data
if gen_type in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_SAMPLE, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
if batch_dim != 0:
step_input = step_input.transpose(0, batch_dim).contiguous()
batch_size = step_input.size()[0]
data_size = step_input.size()[1:]
num_dims = step_input.dim()
step_input = step_input.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if batch_dim != 0:
step_input = step_input.transpose(0, batch_dim).contiguous()
step_input = step_input
if 1 != 0:
step_hidden = step_hidden.transpose(0, 1).contiguous()
batch_size = step_hidden.size()[0]
data_size = step_hidden.size()[1:]
num_dims = step_hidden.dim()
step_hidden = step_hidden.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if 1 != 0:
step_hidden = step_hidden.transpose(0, 1).contiguous()
step_hidden = step_hidden
if batch_dim != 0:
attn_ctx = attn_ctx.transpose(0, batch_dim).contiguous()
batch_size = attn_ctx.size()[0]
data_size = attn_ctx.size()[1:]
num_dims = attn_ctx.dim()
attn_ctx = attn_ctx.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if batch_dim != 0:
attn_ctx = attn_ctx.transpose(0, batch_dim).contiguous()
attn_ctx = attn_ctx
if batch_dim != 0:
attn_mask = attn_mask.transpose(0, batch_dim).contiguous()
batch_size = attn_mask.size()[0]
data_size = attn_mask.size()[1:]
num_dims = attn_mask.dim()
attn_mask = attn_mask.unsqueeze(1).repeat((1,) + (top_k,) + (1,) * (num_dims - 1)).view((batch_size * top_k,) + data_size)
if batch_dim != 0:
attn_mask = attn_mask.transpose(0, batch_dim).contiguous()
attn_mask = attn_mask
feats = None if feats is None else beamize_data(feats, top_k)
cur_scores = torch.full((batch_size, top_k), -float('inf')).to(DEVICE)
cur_scores[:, 0] = 0.0
beam_scores = []
beam_predecessors = []
beam_emmited_symbols = []
partial_seqs = step_input.tolist()
else:
step_output_lst = []
step_logit_lst = []
step_attn_lst = []
step_symbol_lst = []
for step_idx in range(n_unrolling_steps):
if feats is None:
step_feat = None
else:
step_feat = feats[:, step_idx, :].unsqueeze(1)
embedded = embedded_dropout(embed=self.embedding, words=step_input, dropout=self.dropout_emb if self.training else 0)
rnn_inputs = embedded
if self.feat_dim > 0:
rnn_inputs = torch.cat([rnn_inputs, step_feat], dim=2)
rnn_inputs = self.feat_fc(rnn_inputs)
rnn_inputs = self.lockdrop(rnn_inputs, self.dropout_input)
(outputs, new_hiddens) = self.rnn(rnn_inputs, step_hidden)
outputs = self.lockdrop(outputs, self.dropout_output)
attns = None
if self.use_attention:
(outputs, attns) = self.attention(query=outputs, context=attn_ctx, mask=attn_mask)
projected = self.project_fc(outputs)
logits = self.word_classifier(projected)
step_ret_dict = {'outputs': outputs, 'logits': logits, 'hiddens': new_hiddens, 'attns': attns}
step_ret_dict['logits'].squeeze(1) = step_ret_dict['logits'].squeeze(1) / temp
scores = step_ret_dict['logits'].squeeze(1)
if gen_type == DecoderRNN.GEN_GREEDY:
symbols = step_ret_dict['logits'].squeeze(1).topk(1)[1]
elif gen_type in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
logsoftmax_scores = F.log_softmax(step_ret_dict['logits'].squeeze(1), dim=1)
(scores, symbols) = logsoftmax_scores.topk(top_k)
elif gen_type == DecoderRNN.GEN_SAMPLE:
probs = F.softmax(step_ret_dict['logits'].squeeze(1), dim=-1)
dist = torch.distributions.Categorical(probs)
symbols = dist.sample().unsqueeze(1)
elif gen_type == DecoderRNN.GEN_TOP:
top_k = min(top_k, step_ret_dict['logits'].squeeze(1).size(-1))
if top_k > 0:
indices_to_remove = step_ret_dict['logits'].squeeze(1) < torch.topk(step_ret_dict['logits'].squeeze(1), top_k)[0][..., -1, None]
step_ret_dict['logits'].squeeze(1)[indices_to_remove] = -float('inf')
if top_p > 0.0:
(sorted_logits, sorted_indices) = torch.sort(step_ret_dict['logits'].squeeze(1), descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 0] = 0
for batch_idx in range(step_ret_dict['logits'].squeeze(1).size(0)):
indices_to_remove = sorted_indices[batch_idx][sorted_indices_to_remove[batch_idx]]
step_ret_dict['logits'].squeeze(1)[batch_idx, indices_to_remove] = -float('inf')
probs = F.softmax(step_ret_dict['logits'].squeeze(1), dim=-1)
dist = torch.distributions.Categorical(probs)
symbols = dist.sample().unsqueeze(1)
elif gen_type == DecoderRNN.GEN_BEAM_SAMPLE:
softmax_scores = F.softmax(step_ret_dict['logits'].squeeze(1), dim=1)
symbols = torch.multinomial(softmax_scores, top_k)
scores = torch.gather(softmax_scores, 1, symbols).log()
else:
raise Exception('unsupported generation type {}'.format(gen_type))
decode_dict = {'scores': scores, 'symbols': symbols}
if gen_type not in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_SAMPLE, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
step_input = decode_dict['symbols']
step_hidden = step_ret_dict['hiddens']
step_output_lst.append(step_ret_dict['outputs'])
step_logit_lst.append(step_ret_dict['logits'])
step_attn_lst.append(step_ret_dict['attns'])
step_symbol_lst.append(step_input)
else:
step_scores = decode_dict['scores']
step_symbols = decode_dict['symbols']
cur_scores = cur_scores.view(batch_size * top_k, 1)
if gen_type == DecoderRNN.GEN_BEAM_MMI_ANTI_LM:
step_symbol_lst = step_symbols.tolist()
new_partial_seqs = []
for partial_seq_idx in range(len(partial_seqs)):
partial_seq = partial_seqs[partial_seq_idx]
for symbol in step_symbol_lst[partial_seq_idx]:
new_partial_seqs.append(partial_seq + [symbol])
lm_seqs = [mmi_args['tokenizer'].convert_ids_to_tokens(ids) for ids in new_partial_seqs]
lm_seqs = [mmi_args['tokenizer'].convert_tokens_to_string(tokens) for tokens in lm_seqs]
lm_outputs = mmi_args['lm'].compute_prob(lm_seqs)
lm_word_ll = lm_outputs['word_loglikelihood']
length_mask = torch.LongTensor(list(range(lm_word_ll.size(1)))).to(DEVICE).unsqueeze(0)
length_mask = length_mask + 1 <= mmi_args['gamma']
length_penalty = length_mask.float().log()
masked_lm_word_ll = lm_word_ll + length_penalty
masked_lm_sent_ll = masked_lm_word_ll.sum(1)
U_t = masked_lm_sent_ll.exp().view(batch_size * top_k, top_k)
P_t_given_s = cur_scores.exp()
mmi_scores = P_t_given_s - mmi_args['lambda'] * U_t + mmi_args['gamma'] * (step_idx + 1)
new_score_candidates = mmi_scores.view(batch_size, top_k * top_k)
(_, cand_idcs) = new_score_candidates.topk(top_k, dim=1)
new_cur_scores = []
step_scores_flat = step_scores.view(batch_size, top_k * top_k)
for batch_idx in range(len(cand_idcs)):
for cand_idx in cand_idcs[batch_idx]:
pred = cand_idx // top_k + batch_idx * top_k
new_cur_scores.append(cur_scores[pred] + step_scores_flat[batch_idx][cand_idx])
cur_scores = torch.FloatTensor(new_cur_scores).to(DEVICE)
else:
new_score_candidates = (cur_scores * (step_idx + 1) + step_scores) / (step_idx + 2)
new_score_candidates = new_score_candidates.view(batch_size, top_k * top_k)
(cur_scores, cand_idcs) = new_score_candidates.topk(top_k, dim=1)
cur_scores = cur_scores.view(batch_size * top_k)
cand_idcs = cand_idcs.tolist()
to_emit_symbol_candidates = step_symbols.view(batch_size, top_k * top_k).tolist()
step_predecessors = []
step_emitted_symbols = []
for batch_idx in range(len(cand_idcs)):
for cand_idx in cand_idcs[batch_idx]:
pred = cand_idx // top_k + batch_idx * top_k
emit = to_emit_symbol_candidates[batch_idx][cand_idx]
step_predecessors.append(pred)
step_emitted_symbols.append(emit)
beam_emmited_symbols.append(step_emitted_symbols)
beam_predecessors.append(step_predecessors)
beam_scores.append(cur_scores.tolist())
new_partial_seqs = []
for (step_e, step_pred) in zip(step_emitted_symbols, step_predecessors):
pred_partial_seq = partial_seqs[step_pred]
new_partial_seq = pred_partial_seq + [step_e]
new_partial_seqs.append(new_partial_seq)
partial_seqs = new_partial_seqs
eos_token_masks = torch.LongTensor(step_emitted_symbols).to(DEVICE) == self.eos_token_id
cur_scores = cur_scores.masked_fill(eos_token_masks, -float('inf'))
step_emitted_symbols = torch.LongTensor(step_emitted_symbols).view(batch_size * top_k, 1).to(DEVICE)
step_input = step_emitted_symbols
step_hidden = step_ret_dict['hiddens']
step_hidden = step_hidden.transpose(0, 1).contiguous()
new_step_hidden = []
for batch_idx in range(len(cand_idcs)):
for cand_idx in cand_idcs[batch_idx]:
pred = cand_idx // top_k + batch_idx * top_k
new_step_hidden.append(step_hidden[pred])
new_step_hidden = torch.stack(new_step_hidden)
step_hidden = new_step_hidden
step_hidden = step_hidden.transpose(0, 1).contiguous()
if gen_type not in (DecoderRNN.GEN_BEAM, DecoderRNN.GEN_BEAM_SAMPLE, DecoderRNN.GEN_BEAM_MMI_ANTI_LM):
outputs = torch.cat(step_output_lst, dim=1)
logits = torch.cat(step_logit_lst, dim=1)
if self.use_attention:
attns = torch.cat(step_attn_lst, dim=1)
else:
attns = None
symbols = torch.cat(step_symbol_lst, dim=1)
ret_dict['outputs'] = outputs
ret_dict['logits'] = logits
ret_dict['hiddens'] = step_hidden
ret_dict['attns'] = attns
ret_dict['symbols'] = symbols
else:
beam_scores = np.array(beam_scores)
beam_predecessors = np.array(beam_predecessors)
beam_emmited_symbols = np.array(beam_emmited_symbols)
(L, N) = beam_scores.shape
assert beam_scores.shape == beam_predecessors.shape == beam_emmited_symbols.shape
assert N == batch_size * top_k
def backtrack_from_coordinate(i, j):
"""
arguments
i - step axis
j - batch*beam axis
"""
score = beam_scores[i, j]
seq = [beam_emmited_symbols[i, j]]
while i > 0:
j = beam_predecessors[i, j]
i = i - 1
seq.append(beam_emmited_symbols[i, j])
seq.reverse()
batch_seqs = (seq, score)
batch_seqs = [[] for _ in range(batch_size)]
for i in range(L - 1):
for j in range(N):
if beam_emmited_symbols[i, j] == self.eos_token_id:
(seq, score) = backtrack_from_coordinate(i, j)
batch_idx = j // top_k
batch_seqs[batch_idx].append((seq, score))
i = L - 1
for j in range(N):
(seq, score) = backtrack_from_coordinate(i, j)
batch_idx = j // top_k
batch_seqs[batch_idx].append((seq, score))
batch_seqs = [sorted(seqs, key=lambda x: x[1], reverse=True) for seqs in batch_seqs]
batch_seqs = batch_seqs
batch_best_seqs = [seq_score_pairs[0][0] for seq_score_pairs in batch_seqs]
seq_lens = [len(seq) for seq in batch_best_seqs]
max_seq_len = max(seq_lens)
symbols = [seq + [self.pad_token_id] * (max_seq_len - len(seq)) for seq in batch_best_seqs]
symbols = torch.LongTensor(symbols).to(DEVICE)
ret_dict['symbols'] = symbols
ret_dict['beam_hypotheses'] = batch_seqs
return ret_dict
|
dialog-processing
|
positive
|
def _strip_href(eles: List[BeautifulSoup]):
for ele in eles:
for a in ele.find_all('a'):
<DeepExtract>
if a and a.attrs and ('href' in a.attrs):
a['href'] = 'stripped_href'
</DeepExtract>
for link in ele.find_all('link'):
<DeepExtract>
if link and link.attrs and ('href' in link.attrs):
link['href'] = 'stripped_href'
</DeepExtract>
for meta in ele.find_all('meta'):
<DeepExtract>
if meta and meta.attrs and ('content' in meta.attrs):
meta['content'] = 'stripped_content'
</DeepExtract>
for script in ele.find_all('script'):
<DeepExtract>
if script and script.attrs and ('src' in script.attrs):
script['src'] = 'stripped_src'
</DeepExtract>
return eles
|
def _strip_href(eles: List[BeautifulSoup]):
for ele in eles:
for a in ele.find_all('a'):
if a and a.attrs and ('href' in a.attrs):
a['href'] = 'stripped_href'
for link in ele.find_all('link'):
if link and link.attrs and ('href' in link.attrs):
link['href'] = 'stripped_href'
for meta in ele.find_all('meta'):
if meta and meta.attrs and ('content' in meta.attrs):
meta['content'] = 'stripped_content'
for script in ele.find_all('script'):
if script and script.attrs and ('src' in script.attrs):
script['src'] = 'stripped_src'
return eles
|
arxiv-browse
|
positive
|
def test_define_scope_agent(self):
url = 'http://testserver/XAPI/statements'
guid = str(uuid.uuid1())
stmt_data = {'id': guid, 'actor': {'objectType': 'Agent', 'mbox': 'mailto:bob@bob.com', 'name': 'bob'}, 'verb': {'id': 'http://example.com/verbs/helped', 'display': {'en-US': 'helped'}}, 'object': {'objectType': 'Agent', 'mbox': 'mailto:tim@tim.com', 'name': 'tim'}}
stmt_post = self.client.post(reverse('lrs:statements'), json.dumps(stmt_data), content_type='application/json', Authorization=self.jane_auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post.status_code, 200)
put_guid = str(uuid.uuid1())
stmt = json.dumps({'actor': {'objectType': 'Agent', 'mbox': 'mailto:bill@bill.com', 'name': 'bill'}, 'verb': {'id': 'http://example.com/verbs/talked', 'display': {'en-US': 'talked'}}, 'object': {'objectType': 'Agent', 'mbox': 'mailto:tim@tim.com', 'name': 'tim timson'}})
param = {'statementId': put_guid}
path = '%s?%s' % (url, urllib.parse.urlencode(param))
<DeepExtract>
if not request_nonce:
request_nonce = ''.join((random.choice(string.ascii_uppercase + string.digits) for _ in range(6)))
if not consumer:
consumer = self.consumer
oauth_header_request_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_callback="http://example.com/access_token_ready"' % (consumer.key, str(int(time.time())), request_nonce)
request_token_params = {}
if parameters:
request_token_params = parameters
if scope:
if 'statements/write statements/read':
request_token_params['scope'] = 'statements/write statements/read'
else:
request_token_params['scope'] = 'all'
if param_type == 'qs':
request_token_path = '%s?%s' % (INITIATE_ENDPOINT, urllib.parse.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
oauth_header_request_token_params_list = oauth_header_request_token_params.split(',')
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split('=')
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_request_token_params_dict['OAuth realm']
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='GET', http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='POST', http_url=request_token_path, parameters=dict(list(oauth_header_request_token_params_dict.items()) + list(request_token_params.items())))
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ',oauth_signature=%s' % signature
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params, content_type='application/x-www-form-urlencoded')
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token_secret = request_resp.content.split('&')[0].split('=')[1]
request_token = Token.objects.get(secret=token_secret)
authorize_param = {'oauth_token': request_token.key}
authorize_path = '%s?%s' % (AUTHORIZATION_ENDPOINT, urllib.parse.urlencode(authorize_param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(request_token.is_approved, False)
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200)
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
if change_scope:
data['scope'] = change_scope
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
token_key = auth_post['Location'].split('?')[1].split('&')[1].split('=')[1]
request_token_after_auth = Token.objects.get(key=token_key)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
if not access_nonce:
access_nonce = 'access_nonce'
oauth_header_access_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_verifier="%s"' % (consumer.key, request_token_after_auth.key, str(int(time.time())), access_nonce, request_token_after_auth.verifier)
param_list = oauth_header_access_token_params.split(',')
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split('=')
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET', http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
if not resource_nonce:
resource_nonce = 'resource_nonce'
oauth_header_resource_params = 'OAuth realm="test", oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0"' % (consumer.key, access_token.key, str(int(time.time())), resource_nonce)
self.client.logout()
(oauth_header_resource_params, access_token) = (oauth_header_resource_params, access_token)
</DeepExtract>
param_list = oauth_header_resource_params.split(',')
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split('=')
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_resource_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT', http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.put(path, data=stmt, content_type='application/json', Authorization=oauth_header_resource_params, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 204)
agents = Agent.objects.all().values_list('name', flat=True)
self.assertEqual(len(agents), 7)
self.assertIn('tim', agents)
self.assertNotIn('tim timson', agents)
get_params = {'agent': {'objectType': 'Agent', 'mbox': 'mailto:tim@tim.com'}, 'related_agents': True}
path = '%s?%s' % (url, urllib.parse.urlencode(get_params))
oauth_header_resource_params_dict['oauth_nonce'] = 'get_differ_nonce'
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='GET', http_url=path, parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"', 'oauth_nonce="get_differ_nonce"')
get_resp = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=new_oauth_headers)
self.assertEqual(get_resp.status_code, 200)
content = json.loads(get_resp.content)
self.assertEqual(len(content['statements']), 2)
self.client.logout()
ot = 'Group'
members = [{'name': 'john doe', 'mbox': 'mailto:jd@example.com'}, {'name': 'jan doe', 'mbox': 'mailto:jandoe@example.com'}]
kwargs = {'objectType': ot, 'member': members, 'name': 'doe group'}
(global_group, created) = Agent.objects.retrieve_or_create(**kwargs)
members = [{'name': 'john doe', 'mbox': 'mailto:jd@example.com'}, {'name': 'jan doe', 'mbox': 'mailto:jandoe@example.com'}, {'name': 'dave doe', 'mbox': 'mailto:dd@example.com'}]
kwargs1 = {'objectType': ot, 'member': members, 'name': 'doe group'}
post_stmt = {'actor': {'objectType': 'Agent', 'mbox': 'mailto:dom@dom.com', 'name': 'dom'}, 'verb': {'id': 'http://example.com/verbs/assisted', 'display': {'en-US': 'assisted'}}, 'object': kwargs1}
stmt_json = json.dumps(post_stmt)
<DeepExtract>
if not request_nonce:
request_nonce = 'request_nonce2'
oauth_header_request_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_callback="http://example.com/access_token_ready"' % (self.consumer2.key, str(int(time.time())), request_nonce)
request_token_params = {}
if parameters:
request_token_params = parameters
if scope:
if 'statements/write statements/read':
request_token_params['scope'] = 'statements/write statements/read'
else:
request_token_params['scope'] = 'all'
if param_type == 'qs':
request_token_path = '%s?%s' % (INITIATE_ENDPOINT, urllib.parse.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
oauth_header_request_token_params_list = oauth_header_request_token_params.split(',')
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split('=')
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_request_token_params_dict['OAuth realm']
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(self.consumer2, token=None, http_method='GET', http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(self.consumer2, token=None, http_method='POST', http_url=request_token_path, parameters=dict(list(oauth_header_request_token_params_dict.items()) + list(request_token_params.items())))
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer2, None)
oauth_header_request_token_params = oauth_header_request_token_params + ',oauth_signature=%s' % signature
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params, content_type='application/x-www-form-urlencoded')
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
request_token = Token.objects.get(consumer=self.consumer2)
authorize_param = {'oauth_token': request_token.key}
authorize_path = '%s?%s' % (AUTHORIZATION_ENDPOINT, urllib.parse.urlencode(authorize_param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='dick', password='lassie')
self.assertEqual(request_token.is_approved, False)
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200)
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
if change_scope:
data['scope'] = change_scope
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
request_token_after_auth = Token.objects.get(consumer=self.consumer2)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
if not access_nonce:
access_nonce = 'access_nonce2'
oauth_header_access_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_verifier="%s"' % (self.consumer2.key, request_token_after_auth.key, str(int(time.time())), access_nonce, request_token_after_auth.verifier)
param_list = oauth_header_access_token_params.split(',')
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split('=')
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET', http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer2, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
if not resource_nonce:
resource_nonce = 'resource_nonce2'
oauth_header_resource_params = 'OAuth realm="test", oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0"' % (self.consumer2.key, access_token.key, str(int(time.time())), resource_nonce)
self.client.logout()
(post_oauth_header_resource_params, post_access_token) = (oauth_header_resource_params, access_token)
</DeepExtract>
post_param_list = post_oauth_header_resource_params.split(',')
post_oauth_header_resource_params_dict = {}
for p in post_param_list:
item = p.split('=')
post_oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del post_oauth_header_resource_params_dict['OAuth realm']
post_oauth_request = oauth.Request.from_token_and_callback(post_access_token, http_method='POST', http_url='http://testserver/XAPI/statements', parameters=post_oauth_header_resource_params_dict)
post_signature_method = oauth.SignatureMethod_HMAC_SHA1()
post_signature = post_signature_method.sign(post_oauth_request, self.consumer2, post_access_token)
post_oauth_header_resource_params += ',oauth_signature="%s"' % post_signature
post = self.client.post(reverse('lrs:statements'), data=stmt_json, content_type='application/json', Authorization=post_oauth_header_resource_params, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 200)
agents = Agent.objects.all().values_list('name', flat=True)
self.assertEqual(len(agents), 15)
self.assertIn('bill', agents)
self.assertNotIn('tim timson', agents)
self.assertIn('dom', agents)
self.assertIn('bob', agents)
self.assertIn('tim', agents)
self.assertIn('jan doe', agents)
self.assertIn('john doe', agents)
self.assertIn('dave doe', agents)
self.assertIn('jane', agents)
self.assertIn('dick', agents)
self.assertIn('doe group', agents)
|
def test_define_scope_agent(self):
url = 'http://testserver/XAPI/statements'
guid = str(uuid.uuid1())
stmt_data = {'id': guid, 'actor': {'objectType': 'Agent', 'mbox': 'mailto:bob@bob.com', 'name': 'bob'}, 'verb': {'id': 'http://example.com/verbs/helped', 'display': {'en-US': 'helped'}}, 'object': {'objectType': 'Agent', 'mbox': 'mailto:tim@tim.com', 'name': 'tim'}}
stmt_post = self.client.post(reverse('lrs:statements'), json.dumps(stmt_data), content_type='application/json', Authorization=self.jane_auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(stmt_post.status_code, 200)
put_guid = str(uuid.uuid1())
stmt = json.dumps({'actor': {'objectType': 'Agent', 'mbox': 'mailto:bill@bill.com', 'name': 'bill'}, 'verb': {'id': 'http://example.com/verbs/talked', 'display': {'en-US': 'talked'}}, 'object': {'objectType': 'Agent', 'mbox': 'mailto:tim@tim.com', 'name': 'tim timson'}})
param = {'statementId': put_guid}
path = '%s?%s' % (url, urllib.parse.urlencode(param))
if not request_nonce:
request_nonce = ''.join((random.choice(string.ascii_uppercase + string.digits) for _ in range(6)))
if not consumer:
consumer = self.consumer
oauth_header_request_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_callback="http://example.com/access_token_ready"' % (consumer.key, str(int(time.time())), request_nonce)
request_token_params = {}
if parameters:
request_token_params = parameters
if scope:
if 'statements/write statements/read':
request_token_params['scope'] = 'statements/write statements/read'
else:
request_token_params['scope'] = 'all'
if param_type == 'qs':
request_token_path = '%s?%s' % (INITIATE_ENDPOINT, urllib.parse.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
oauth_header_request_token_params_list = oauth_header_request_token_params.split(',')
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split('=')
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_request_token_params_dict['OAuth realm']
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='GET', http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='POST', http_url=request_token_path, parameters=dict(list(oauth_header_request_token_params_dict.items()) + list(request_token_params.items())))
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ',oauth_signature=%s' % signature
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params, content_type='application/x-www-form-urlencoded')
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token_secret = request_resp.content.split('&')[0].split('=')[1]
request_token = Token.objects.get(secret=token_secret)
authorize_param = {'oauth_token': request_token.key}
authorize_path = '%s?%s' % (AUTHORIZATION_ENDPOINT, urllib.parse.urlencode(authorize_param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(request_token.is_approved, False)
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200)
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
if change_scope:
data['scope'] = change_scope
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
token_key = auth_post['Location'].split('?')[1].split('&')[1].split('=')[1]
request_token_after_auth = Token.objects.get(key=token_key)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
if not access_nonce:
access_nonce = 'access_nonce'
oauth_header_access_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_verifier="%s"' % (consumer.key, request_token_after_auth.key, str(int(time.time())), access_nonce, request_token_after_auth.verifier)
param_list = oauth_header_access_token_params.split(',')
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split('=')
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET', http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
if not resource_nonce:
resource_nonce = 'resource_nonce'
oauth_header_resource_params = 'OAuth realm="test", oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0"' % (consumer.key, access_token.key, str(int(time.time())), resource_nonce)
self.client.logout()
(oauth_header_resource_params, access_token) = (oauth_header_resource_params, access_token)
param_list = oauth_header_resource_params.split(',')
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split('=')
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_resource_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT', http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.put(path, data=stmt, content_type='application/json', Authorization=oauth_header_resource_params, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 204)
agents = Agent.objects.all().values_list('name', flat=True)
self.assertEqual(len(agents), 7)
self.assertIn('tim', agents)
self.assertNotIn('tim timson', agents)
get_params = {'agent': {'objectType': 'Agent', 'mbox': 'mailto:tim@tim.com'}, 'related_agents': True}
path = '%s?%s' % (url, urllib.parse.urlencode(get_params))
oauth_header_resource_params_dict['oauth_nonce'] = 'get_differ_nonce'
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='GET', http_url=path, parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"', 'oauth_nonce="get_differ_nonce"')
get_resp = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=new_oauth_headers)
self.assertEqual(get_resp.status_code, 200)
content = json.loads(get_resp.content)
self.assertEqual(len(content['statements']), 2)
self.client.logout()
ot = 'Group'
members = [{'name': 'john doe', 'mbox': 'mailto:jd@example.com'}, {'name': 'jan doe', 'mbox': 'mailto:jandoe@example.com'}]
kwargs = {'objectType': ot, 'member': members, 'name': 'doe group'}
(global_group, created) = Agent.objects.retrieve_or_create(**kwargs)
members = [{'name': 'john doe', 'mbox': 'mailto:jd@example.com'}, {'name': 'jan doe', 'mbox': 'mailto:jandoe@example.com'}, {'name': 'dave doe', 'mbox': 'mailto:dd@example.com'}]
kwargs1 = {'objectType': ot, 'member': members, 'name': 'doe group'}
post_stmt = {'actor': {'objectType': 'Agent', 'mbox': 'mailto:dom@dom.com', 'name': 'dom'}, 'verb': {'id': 'http://example.com/verbs/assisted', 'display': {'en-US': 'assisted'}}, 'object': kwargs1}
stmt_json = json.dumps(post_stmt)
if not request_nonce:
request_nonce = 'request_nonce2'
oauth_header_request_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_callback="http://example.com/access_token_ready"' % (self.consumer2.key, str(int(time.time())), request_nonce)
request_token_params = {}
if parameters:
request_token_params = parameters
if scope:
if 'statements/write statements/read':
request_token_params['scope'] = 'statements/write statements/read'
else:
request_token_params['scope'] = 'all'
if param_type == 'qs':
request_token_path = '%s?%s' % (INITIATE_ENDPOINT, urllib.parse.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
oauth_header_request_token_params_list = oauth_header_request_token_params.split(',')
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split('=')
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_request_token_params_dict['OAuth realm']
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(self.consumer2, token=None, http_method='GET', http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(self.consumer2, token=None, http_method='POST', http_url=request_token_path, parameters=dict(list(oauth_header_request_token_params_dict.items()) + list(request_token_params.items())))
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer2, None)
oauth_header_request_token_params = oauth_header_request_token_params + ',oauth_signature=%s' % signature
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params, content_type='application/x-www-form-urlencoded')
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
request_token = Token.objects.get(consumer=self.consumer2)
authorize_param = {'oauth_token': request_token.key}
authorize_path = '%s?%s' % (AUTHORIZATION_ENDPOINT, urllib.parse.urlencode(authorize_param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='dick', password='lassie')
self.assertEqual(request_token.is_approved, False)
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200)
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
if change_scope:
data['scope'] = change_scope
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
request_token_after_auth = Token.objects.get(consumer=self.consumer2)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
if not access_nonce:
access_nonce = 'access_nonce2'
oauth_header_access_token_params = 'OAuth realm="test",oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0",oauth_verifier="%s"' % (self.consumer2.key, request_token_after_auth.key, str(int(time.time())), access_nonce, request_token_after_auth.verifier)
param_list = oauth_header_access_token_params.split(',')
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split('=')
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET', http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer2, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
if not resource_nonce:
resource_nonce = 'resource_nonce2'
oauth_header_resource_params = 'OAuth realm="test", oauth_consumer_key="%s",oauth_token="%s",oauth_signature_method="HMAC-SHA1",oauth_timestamp="%s",oauth_nonce="%s",oauth_version="1.0"' % (self.consumer2.key, access_token.key, str(int(time.time())), resource_nonce)
self.client.logout()
(post_oauth_header_resource_params, post_access_token) = (oauth_header_resource_params, access_token)
post_param_list = post_oauth_header_resource_params.split(',')
post_oauth_header_resource_params_dict = {}
for p in post_param_list:
item = p.split('=')
post_oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
del post_oauth_header_resource_params_dict['OAuth realm']
post_oauth_request = oauth.Request.from_token_and_callback(post_access_token, http_method='POST', http_url='http://testserver/XAPI/statements', parameters=post_oauth_header_resource_params_dict)
post_signature_method = oauth.SignatureMethod_HMAC_SHA1()
post_signature = post_signature_method.sign(post_oauth_request, self.consumer2, post_access_token)
post_oauth_header_resource_params += ',oauth_signature="%s"' % post_signature
post = self.client.post(reverse('lrs:statements'), data=stmt_json, content_type='application/json', Authorization=post_oauth_header_resource_params, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post.status_code, 200)
agents = Agent.objects.all().values_list('name', flat=True)
self.assertEqual(len(agents), 15)
self.assertIn('bill', agents)
self.assertNotIn('tim timson', agents)
self.assertIn('dom', agents)
self.assertIn('bob', agents)
self.assertIn('tim', agents)
self.assertIn('jan doe', agents)
self.assertIn('john doe', agents)
self.assertIn('dave doe', agents)
self.assertIn('jane', agents)
self.assertIn('dick', agents)
self.assertIn('doe group', agents)
|
ADL_LRS
|
positive
|
def brentq(self, tau_init, tol=0.001, max_iter=20):
self.in_search = False
<DeepExtract>
tau = tau_init
h = 1 + self.hs[0]
g = -1
i = 0
while (g < 0 and h - self.hs[0] > 1e-13) and i < max_itr:
self.clear_hist()
self.f(tau)
ind = self.taus.index(tau)
h = self.hs[ind]
g = self.gs[ind]
tau *= 0.5
i += 1
i = ind
</DeepExtract>
tau_init = self.taus[i]
g_init = self.gs[i]
if self.wolfe(g_init, self.hs[i], tau_init):
log.debug('CG: Using initial step, since Wolfe satisfied!')
tau_opt = tau_init
else:
self.in_search = True
try:
taus = None
if g_init > 0:
taus = [0, tau_init]
if taus is None:
for i in range(3):
<DeepExtract>
log.debug('CG: Polynomial extrapolation BEGIN')
g0 = self.f(tau_init / (i + 1.0))
tau1 = tau_init / (i + 1.0)
tau_prev = tau1
g1 = g0
i = 0
while g1 * g0 > 0 and i < max_itr:
tau_prev = tau1
if len(self.gs) > 1:
gs = sp.array(self.gs)
pts = min(max_points, len(gs))
res = sp.polyfit(self.taus[-pts:], gs[-pts:], max(max_deg, pts - 1), full=True)
po = res[0]
por = sp.roots(po)
if por.max().real > 0:
tau1 = por.max().real
if tau1 > tau_init / (i + 1.0) * 10:
tau1 = min(tau_prev * 2, tau1)
if g1 < 0:
tau1 *= fac_inc
else:
tau1 *= fac_red
g1 = self.f(tau1)
i += 1
if i == max_itr:
log.debug('CG: Polynomial extrapolation MAX ITR')
taus = None
else:
log.debug('CG: Polynomial extrapolation DONE')
taus = sorted([tau_prev, tau1])
</DeepExtract>
if not taus is None:
break
<DeepExtract>
self.taus = self.taus[:1]
self.hs = self.hs[:1]
self.gs = self.gs[:1]
self.lLs = self.lLs[:1]
self.rLs = self.rLs[:1]
self.K0s = self.K0s[:1]
self.Ws = self.Ws[:1]
</DeepExtract>
if taus is None:
return (0, self.hs[0], 0)
try:
tau_opt = opti.brentq(self.f, taus[0], taus[-1], xtol=tol, maxiter=max_iter)
except ValueError:
log.warning('CG: Failed to find a valid bracket.')
return (0, self.hs[0], 0)
except EvoMPS_line_search_wolfe_sat as e:
log.debug('CG: Aborting early due to Wolfe')
tau_opt = e.tau
i = self.taus.index(tau_opt)
h_min = self.hs[i]
self.in_search = False
return (tau_opt, h_min, i)
|
def brentq(self, tau_init, tol=0.001, max_iter=20):
self.in_search = False
tau = tau_init
h = 1 + self.hs[0]
g = -1
i = 0
while (g < 0 and h - self.hs[0] > 1e-13) and i < max_itr:
self.clear_hist()
self.f(tau)
ind = self.taus.index(tau)
h = self.hs[ind]
g = self.gs[ind]
tau *= 0.5
i += 1
i = ind
tau_init = self.taus[i]
g_init = self.gs[i]
if self.wolfe(g_init, self.hs[i], tau_init):
log.debug('CG: Using initial step, since Wolfe satisfied!')
tau_opt = tau_init
else:
self.in_search = True
try:
taus = None
if g_init > 0:
taus = [0, tau_init]
if taus is None:
for i in range(3):
log.debug('CG: Polynomial extrapolation BEGIN')
g0 = self.f(tau_init / (i + 1.0))
tau1 = tau_init / (i + 1.0)
tau_prev = tau1
g1 = g0
i = 0
while g1 * g0 > 0 and i < max_itr:
tau_prev = tau1
if len(self.gs) > 1:
gs = sp.array(self.gs)
pts = min(max_points, len(gs))
res = sp.polyfit(self.taus[-pts:], gs[-pts:], max(max_deg, pts - 1), full=True)
po = res[0]
por = sp.roots(po)
if por.max().real > 0:
tau1 = por.max().real
if tau1 > tau_init / (i + 1.0) * 10:
tau1 = min(tau_prev * 2, tau1)
if g1 < 0:
tau1 *= fac_inc
else:
tau1 *= fac_red
g1 = self.f(tau1)
i += 1
if i == max_itr:
log.debug('CG: Polynomial extrapolation MAX ITR')
taus = None
else:
log.debug('CG: Polynomial extrapolation DONE')
taus = sorted([tau_prev, tau1])
if not taus is None:
break
self.taus = self.taus[:1]
self.hs = self.hs[:1]
self.gs = self.gs[:1]
self.lLs = self.lLs[:1]
self.rLs = self.rLs[:1]
self.K0s = self.K0s[:1]
self.Ws = self.Ws[:1]
if taus is None:
return (0, self.hs[0], 0)
try:
tau_opt = opti.brentq(self.f, taus[0], taus[-1], xtol=tol, maxiter=max_iter)
except ValueError:
log.warning('CG: Failed to find a valid bracket.')
return (0, self.hs[0], 0)
except EvoMPS_line_search_wolfe_sat as e:
log.debug('CG: Aborting early due to Wolfe')
tau_opt = e.tau
i = self.taus.index(tau_opt)
h_min = self.hs[i]
self.in_search = False
return (tau_opt, h_min, i)
|
evoMPS
|
positive
|
def OnKeyDown(self, event):
objs = self.myOlv.GetSelectedObjects()
key = event.GetKeyCode()
if wx.WXK_DELETE == key:
<DeepExtract>
for obj in objs:
pass
self.myOlv.RemoveObjects(objs)
</DeepExtract>
elif 3 == key:
<DeepExtract>
self.dataObj = wx.TextDataObject()
file_ids = ','.join([obj.file_id for obj in objs])
wx.MessageBox(file_ids, 'MD5 code')
</DeepExtract>
|
def OnKeyDown(self, event):
objs = self.myOlv.GetSelectedObjects()
key = event.GetKeyCode()
if wx.WXK_DELETE == key:
for obj in objs:
pass
self.myOlv.RemoveObjects(objs)
elif 3 == key:
self.dataObj = wx.TextDataObject()
file_ids = ','.join([obj.file_id for obj in objs])
wx.MessageBox(file_ids, 'MD5 code')
</DeepExtract>
|
bookhub
|
positive
|
def get_conn(self, auto_open=False):
"""Return a connection to our HTTP server."""
<DeepExtract>
cls_name = '{scheme}Connection'.format(scheme=self.scheme.upper())
conn = getattr(http.client, cls_name)
</DeepExtract>
conn.auto_open = auto_open
conn.connect()
return conn
|
def get_conn(self, auto_open=False):
"""Return a connection to our HTTP server."""
cls_name = '{scheme}Connection'.format(scheme=self.scheme.upper())
conn = getattr(http.client, cls_name)
conn.auto_open = auto_open
conn.connect()
return conn
|
cheroot
|
positive
|
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--input_pc', type=str, help='Input labelled point cloud. The point cloud should has geon type label, output txt from roof_segmentation.py. ')
parser.add_argument('--output_png', type=str, help='Output png result file.')
parser.add_argument('--output_txt', type=str, help='Output txt result file includes all planar points for Purdue to process.')
parser.add_argument('--output_geon', type=str, help='Output geon file.')
args = parser.parse_args(args)
(point_list, building_label_list, geon_label_list) = utils.read_geon_type_pc(args.input_pc)
center_of_mess = np.mean(point_list, axis=0)
point_list = point_list - center_of_mess
point_list = point_list.astype(np.float32)
cloud_filtered = pcl.PointCloud()
cloud_filtered.from_array(point_list)
print(cloud_filtered.size)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
geon_model = []
all_remaining_index = []
index = 0
building_index_list = []
building_max_index = np.max(building_label_list)
total_list = np.arange(building_label_list.shape[0])
for i in range(building_max_index + 1):
current_list = total_list[building_label_list == i]
building_index_list.append(current_list)
geon_type_number = 4
cylinder_index = 2
sphere_index = 3
point_number_scale = 1
for indices in building_index_list:
if len(indices) < 300:
if len(all_remaining_index) == 0:
all_remaining_index = copy.copy(indices)
else:
all_remaining_index = np.concatenate((all_remaining_index, indices), axis=None)
continue
geon_index_list = []
for i in range(geon_type_number):
geon_index_list.append([])
building_points = np.zeros((len(indices), 3), dtype=np.float32)
num_building_points = len(indices)
for (i, indice) in enumerate(indices):
building_points[i][0] = cloud_filtered[indice][0]
building_points[i][1] = cloud_filtered[indice][1]
building_points[i][2] = cloud_filtered[indice][2]
for (i, indice) in enumerate(indices):
geon_index_list[geon_label_list[indice]].append(indice)
fitted_index = np.zeros(len(indices), dtype=np.int32)
fitted_index = fitted_index == 1
if len(geon_index_list[cylinder_index]) > 0.1 * len(indices):
points = np.zeros((len(geon_index_list[cylinder_index]), 3), dtype=np.float32)
for (i, indice) in enumerate(geon_index_list[cylinder_index]):
points[i][0] = cloud_filtered[indice][0]
points[i][1] = cloud_filtered[indice][1]
points[i][2] = cloud_filtered[indice][2]
current_cloud = pcl.PointCloud()
current_cloud.from_array(points)
num_current_cylinder_point = current_cloud.size
if num_building_points > 15000:
vg = current_cloud.make_voxel_grid_filter()
vg.set_leaf_size(1, 1, 1)
current_cloud = vg.filter()
num_filtered_building_points = current_cloud.size
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i] = current_cloud[i]
max_r = 80
min_r = 40
if num_current_cylinder_point > 10000:
max_r = 80
min_r = 40
else:
max_r = 30
min_r = 10
while True:
<DeepExtract>
section_pc = pcl.PointCloud()
section_pc.from_array(current_points)
cylinder_seg = section_pc.make_segmenter_normals(ksearch=50)
cylinder_seg.set_optimize_coefficients(True)
cylinder_seg.set_model_type(pcl.SACMODEL_CYLINDER)
cylinder_seg.set_normal_distance_weight(0.1)
cylinder_seg.set_method_type(pcl.SAC_RANSAC)
cylinder_seg.set_max_iterations(1000)
cylinder_seg.set_distance_threshold(3)
cylinder_seg.set_radius_limits(min_r, max_r)
(cylinder_indices, cylinder_coefficients) = cylinder_seg.segment()
(cylinder_indices, cylinder_coefficients) = (cylinder_indices, cylinder_coefficients)
</DeepExtract>
if len(cylinder_indices) < 1000 * point_number_scale:
break
cylinder_points = np.zeros((len(cylinder_indices), 3), dtype=np.float32)
for (i, indice) in enumerate(cylinder_indices):
cylinder_points[i][0] = current_points[indice][0]
cylinder_points[i][1] = current_points[indice][1]
cylinder_points[i][2] = current_points[indice][2]
(centroid, ex, ey, ez, fitted_indices, coefficients, min_axis_z, max_axis_z, mean_diff) = two_D_fitting.fit_2D_curve(cylinder_coefficients[3:-1], cylinder_points, fit_type='poly2', dist_threshold=10)
for i in range(len(fitted_indices)):
if len(fitted_indices[i]) < max(500, 0.05 * num_filtered_building_points):
continue
fitted_points = np.zeros((len(fitted_indices[i]), 3), np.float32)
for (j, tmp_idx) in enumerate(fitted_indices[i]):
fitted_points[j, :] = cylinder_points[tmp_idx, :]
ax.scatter(fitted_points[:, 0], fitted_points[:, 1], fitted_points[:, 2], zdir='z', s=1, c='C{}'.format(2), rasterized=True, alpha=0.5)
(all_fitted_indices, ortho_x_max, ortho_x_min, error) = two_D_fitting.check_2D_curve(ex, ey, ez, coefficients, centroid, building_points, min_axis_z[i], max_axis_z[i], fit_type='poly2')
fitted_index[all_fitted_indices] = True
geon_model.append({'name': 'poly_cylinder', 'model': [centroid, ex, ey, coefficients, min_axis_z[i], max_axis_z[i], ortho_x_min, ortho_x_max, len(fitted_indices[i]), mean_diff]})
current_cloud = current_cloud.extract(cylinder_indices, True)
if current_cloud.size < max(500, 0.1 * num_filtered_building_points):
break
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i][0] = current_cloud[i][0]
current_points[i][1] = current_cloud[i][1]
current_points[i][2] = current_cloud[i][2]
if len(geon_index_list[sphere_index]) > 0.3 * len(indices):
points = np.zeros((len(geon_index_list[sphere_index]), 3), dtype=np.float32)
for (i, indice) in enumerate(geon_index_list[sphere_index]):
points[i][0] = cloud_filtered[indice][0]
points[i][1] = cloud_filtered[indice][1]
points[i][2] = cloud_filtered[indice][2]
current_cloud = pcl.PointCloud()
current_cloud.from_array(points)
if num_building_points > 10000:
vg = current_cloud.make_voxel_grid_filter()
vg.set_leaf_size(1, 1, 1)
current_cloud = vg.filter()
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i] = current_cloud[i]
while True:
<DeepExtract>
section_pc = pcl.PointCloud()
section_pc.from_array(current_points)
sphere_seg = section_pc.make_segmenter_normals(ksearch=50)
sphere_seg.set_optimize_coefficients(True)
sphere_seg.set_model_type(pcl.SACMODEL_SPHERE)
sphere_seg.set_normal_distance_weight(0.3)
sphere_seg.set_method_type(pcl.SAC_RANSAC)
sphere_seg.set_max_iterations(1000)
sphere_seg.set_distance_threshold(2)
sphere_seg.set_radius_limits(5, 20)
(sphere_indices, sphere_coefficients) = sphere_seg.segment()
min_lst = []
fitted_indices = []
max_lst = []
if len(sphere_indices) > 100:
sphere_points = current_points[sphere_indices, :]
sphere_indices = np.asarray(sphere_indices)
points_z = sphere_points[:, 2] - sphere_coefficients[2]
if np.max(points_z) - np.min(points_z) < 8:
sphere_indices = []
(sphere_indices, sphere_coefficients, min_lst, max_lst) = ([], sphere_coefficients, min_lst, max_lst)
(min_lst, max_lst, fitted_indices) = two_D_fitting.get_z_length(points_z, sphere_indices)
for i in range(len(max_lst)):
if min_lst[i] < -0.8 * sphere_coefficients[-1]:
min_lst[i] = -1 * sphere_coefficients[-1]
if max_lst[i] > 0.9 * sphere_coefficients[-1]:
max_lst[i] = sphere_coefficients[-1]
(sphere_indices, sphere_coefficients, min_lst, max_lst) = (sphere_indices, sphere_coefficients, min_lst, max_lst)
</DeepExtract>
if len(sphere_indices) < 200 * point_number_scale:
break
if sphere_coefficients[-1] > 0:
<DeepExtract>
theta_max = get_theta(min_lst[0], sphere_coefficients[-1])
theta_min = get_theta(max_lst[0], sphere_coefficients[-1])
(u, v) = np.mgrid[0:2 * np.pi:10j, theta_min:theta_max:10j]
x = np.cos(u) * np.sin(v) * sphere_coefficients[-1]
y = np.sin(u) * np.sin(v) * sphere_coefficients[-1]
z = np.cos(v) * sphere_coefficients[-1]
x = x + sphere_coefficients[0:3][0]
y = y + sphere_coefficients[0:3][1]
z = z + sphere_coefficients[0:3][2]
ax.plot_wireframe(x, y, z, color='r', alpha=0.5)
</DeepExtract>
sphere_points = np.zeros((len(sphere_indices), 3), dtype=np.float32)
for (i, indice) in enumerate(sphere_indices):
sphere_points[i][0] = current_points[indice][0]
sphere_points[i][1] = current_points[indice][1]
sphere_points[i][2] = current_points[indice][2]
ax.scatter(sphere_points[:, 0], sphere_points[:, 1], sphere_points[:, 2], zdir='z', s=1, c='C{}'.format(3), rasterized=True, alpha=0.5)
geon_model.append({'name': 'sphere', 'model': [sphere_coefficients[0:3], sphere_coefficients[-1], min_lst[0], max_lst[0], len(sphere_indices)]})
<DeepExtract>
distance = building_points - sphere_coefficients[0:3]
distance = distance * distance
distance = np.sqrt(np.sum(distance, axis=1))
error = distance - sphere_coefficients[-1]
sphere_indices = np.arange(building_points.shape[0])
sphere_indices = sphere_indices[np.logical_and(error < 3, error > -3)]
(all_fitted_indices, error) = (sphere_indices, error)
</DeepExtract>
fitted_index[all_fitted_indices] = True
current_cloud = current_cloud.extract(sphere_indices, True)
if current_cloud.size < 1000 * point_number_scale:
break
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i][0] = current_cloud[i][0]
current_points[i][1] = current_cloud[i][1]
current_points[i][2] = current_cloud[i][2]
remaining_index_list = indices[fitted_index == False]
if len(all_remaining_index) == 0:
all_remaining_index = copy.copy(remaining_index_list)
else:
all_remaining_index = np.concatenate((all_remaining_index, remaining_index_list), axis=None)
remaining_point_list = []
remaining_geon_list = []
for index in all_remaining_index:
remaining_point_list.append(point_list[index, :])
remaining_geon_list.append(geon_label_list[index])
remaining_point_list = np.asarray(remaining_point_list)
show_cloud = pcl.PointCloud()
show_cloud.from_array(remaining_point_list)
vg = show_cloud.make_voxel_grid_filter()
vg.set_leaf_size(2, 2, 2)
show_cloud = vg.filter()
show_points = np.zeros((show_cloud.size, 3), dtype=np.float32)
for i in range(show_cloud.size):
show_points[i, :] = show_cloud[i]
ax.scatter(show_points[:, 0], show_points[:, 1], show_points[:, 2], zdir='z', s=1, c='C{}'.format(9), alpha=0.01)
remaining_point_list = remaining_point_list + center_of_mess
fout = open('{}'.format(args.output_txt), mode='w')
for point_idx in range(remaining_point_list.shape[0]):
fout.write('{} {} {} {}\n'.format(remaining_point_list[point_idx, 0], remaining_point_list[point_idx, 1], remaining_point_list[point_idx, 2], remaining_geon_list[point_idx]))
utils.axisEqual3D(ax)
plt.savefig(args.output_png, bbox_inches='tight')
plt.close()
pickle.dump([center_of_mess, geon_model], open(args.output_geon, 'wb'))
|
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--input_pc', type=str, help='Input labelled point cloud. The point cloud should has geon type label, output txt from roof_segmentation.py. ')
parser.add_argument('--output_png', type=str, help='Output png result file.')
parser.add_argument('--output_txt', type=str, help='Output txt result file includes all planar points for Purdue to process.')
parser.add_argument('--output_geon', type=str, help='Output geon file.')
args = parser.parse_args(args)
(point_list, building_label_list, geon_label_list) = utils.read_geon_type_pc(args.input_pc)
center_of_mess = np.mean(point_list, axis=0)
point_list = point_list - center_of_mess
point_list = point_list.astype(np.float32)
cloud_filtered = pcl.PointCloud()
cloud_filtered.from_array(point_list)
print(cloud_filtered.size)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
geon_model = []
all_remaining_index = []
index = 0
building_index_list = []
building_max_index = np.max(building_label_list)
total_list = np.arange(building_label_list.shape[0])
for i in range(building_max_index + 1):
current_list = total_list[building_label_list == i]
building_index_list.append(current_list)
geon_type_number = 4
cylinder_index = 2
sphere_index = 3
point_number_scale = 1
for indices in building_index_list:
if len(indices) < 300:
if len(all_remaining_index) == 0:
all_remaining_index = copy.copy(indices)
else:
all_remaining_index = np.concatenate((all_remaining_index, indices), axis=None)
continue
geon_index_list = []
for i in range(geon_type_number):
geon_index_list.append([])
building_points = np.zeros((len(indices), 3), dtype=np.float32)
num_building_points = len(indices)
for (i, indice) in enumerate(indices):
building_points[i][0] = cloud_filtered[indice][0]
building_points[i][1] = cloud_filtered[indice][1]
building_points[i][2] = cloud_filtered[indice][2]
for (i, indice) in enumerate(indices):
geon_index_list[geon_label_list[indice]].append(indice)
fitted_index = np.zeros(len(indices), dtype=np.int32)
fitted_index = fitted_index == 1
if len(geon_index_list[cylinder_index]) > 0.1 * len(indices):
points = np.zeros((len(geon_index_list[cylinder_index]), 3), dtype=np.float32)
for (i, indice) in enumerate(geon_index_list[cylinder_index]):
points[i][0] = cloud_filtered[indice][0]
points[i][1] = cloud_filtered[indice][1]
points[i][2] = cloud_filtered[indice][2]
current_cloud = pcl.PointCloud()
current_cloud.from_array(points)
num_current_cylinder_point = current_cloud.size
if num_building_points > 15000:
vg = current_cloud.make_voxel_grid_filter()
vg.set_leaf_size(1, 1, 1)
current_cloud = vg.filter()
num_filtered_building_points = current_cloud.size
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i] = current_cloud[i]
max_r = 80
min_r = 40
if num_current_cylinder_point > 10000:
max_r = 80
min_r = 40
else:
max_r = 30
min_r = 10
while True:
section_pc = pcl.PointCloud()
section_pc.from_array(current_points)
cylinder_seg = section_pc.make_segmenter_normals(ksearch=50)
cylinder_seg.set_optimize_coefficients(True)
cylinder_seg.set_model_type(pcl.SACMODEL_CYLINDER)
cylinder_seg.set_normal_distance_weight(0.1)
cylinder_seg.set_method_type(pcl.SAC_RANSAC)
cylinder_seg.set_max_iterations(1000)
cylinder_seg.set_distance_threshold(3)
cylinder_seg.set_radius_limits(min_r, max_r)
(cylinder_indices, cylinder_coefficients) = cylinder_seg.segment()
(cylinder_indices, cylinder_coefficients) = (cylinder_indices, cylinder_coefficients)
if len(cylinder_indices) < 1000 * point_number_scale:
break
cylinder_points = np.zeros((len(cylinder_indices), 3), dtype=np.float32)
for (i, indice) in enumerate(cylinder_indices):
cylinder_points[i][0] = current_points[indice][0]
cylinder_points[i][1] = current_points[indice][1]
cylinder_points[i][2] = current_points[indice][2]
(centroid, ex, ey, ez, fitted_indices, coefficients, min_axis_z, max_axis_z, mean_diff) = two_D_fitting.fit_2D_curve(cylinder_coefficients[3:-1], cylinder_points, fit_type='poly2', dist_threshold=10)
for i in range(len(fitted_indices)):
if len(fitted_indices[i]) < max(500, 0.05 * num_filtered_building_points):
continue
fitted_points = np.zeros((len(fitted_indices[i]), 3), np.float32)
for (j, tmp_idx) in enumerate(fitted_indices[i]):
fitted_points[j, :] = cylinder_points[tmp_idx, :]
ax.scatter(fitted_points[:, 0], fitted_points[:, 1], fitted_points[:, 2], zdir='z', s=1, c='C{}'.format(2), rasterized=True, alpha=0.5)
(all_fitted_indices, ortho_x_max, ortho_x_min, error) = two_D_fitting.check_2D_curve(ex, ey, ez, coefficients, centroid, building_points, min_axis_z[i], max_axis_z[i], fit_type='poly2')
fitted_index[all_fitted_indices] = True
geon_model.append({'name': 'poly_cylinder', 'model': [centroid, ex, ey, coefficients, min_axis_z[i], max_axis_z[i], ortho_x_min, ortho_x_max, len(fitted_indices[i]), mean_diff]})
current_cloud = current_cloud.extract(cylinder_indices, True)
if current_cloud.size < max(500, 0.1 * num_filtered_building_points):
break
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i][0] = current_cloud[i][0]
current_points[i][1] = current_cloud[i][1]
current_points[i][2] = current_cloud[i][2]
if len(geon_index_list[sphere_index]) > 0.3 * len(indices):
points = np.zeros((len(geon_index_list[sphere_index]), 3), dtype=np.float32)
for (i, indice) in enumerate(geon_index_list[sphere_index]):
points[i][0] = cloud_filtered[indice][0]
points[i][1] = cloud_filtered[indice][1]
points[i][2] = cloud_filtered[indice][2]
current_cloud = pcl.PointCloud()
current_cloud.from_array(points)
if num_building_points > 10000:
vg = current_cloud.make_voxel_grid_filter()
vg.set_leaf_size(1, 1, 1)
current_cloud = vg.filter()
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i] = current_cloud[i]
while True:
section_pc = pcl.PointCloud()
section_pc.from_array(current_points)
sphere_seg = section_pc.make_segmenter_normals(ksearch=50)
sphere_seg.set_optimize_coefficients(True)
sphere_seg.set_model_type(pcl.SACMODEL_SPHERE)
sphere_seg.set_normal_distance_weight(0.3)
sphere_seg.set_method_type(pcl.SAC_RANSAC)
sphere_seg.set_max_iterations(1000)
sphere_seg.set_distance_threshold(2)
sphere_seg.set_radius_limits(5, 20)
(sphere_indices, sphere_coefficients) = sphere_seg.segment()
min_lst = []
fitted_indices = []
max_lst = []
if len(sphere_indices) > 100:
sphere_points = current_points[sphere_indices, :]
sphere_indices = np.asarray(sphere_indices)
points_z = sphere_points[:, 2] - sphere_coefficients[2]
if np.max(points_z) - np.min(points_z) < 8:
sphere_indices = []
(sphere_indices, sphere_coefficients, min_lst, max_lst) = ([], sphere_coefficients, min_lst, max_lst)
(min_lst, max_lst, fitted_indices) = two_D_fitting.get_z_length(points_z, sphere_indices)
for i in range(len(max_lst)):
if min_lst[i] < -0.8 * sphere_coefficients[-1]:
min_lst[i] = -1 * sphere_coefficients[-1]
if max_lst[i] > 0.9 * sphere_coefficients[-1]:
max_lst[i] = sphere_coefficients[-1]
(sphere_indices, sphere_coefficients, min_lst, max_lst) = (sphere_indices, sphere_coefficients, min_lst, max_lst)
if len(sphere_indices) < 200 * point_number_scale:
break
if sphere_coefficients[-1] > 0:
theta_max = get_theta(min_lst[0], sphere_coefficients[-1])
theta_min = get_theta(max_lst[0], sphere_coefficients[-1])
(u, v) = np.mgrid[0:2 * np.pi:10j, theta_min:theta_max:10j]
x = np.cos(u) * np.sin(v) * sphere_coefficients[-1]
y = np.sin(u) * np.sin(v) * sphere_coefficients[-1]
z = np.cos(v) * sphere_coefficients[-1]
x = x + sphere_coefficients[0:3][0]
y = y + sphere_coefficients[0:3][1]
z = z + sphere_coefficients[0:3][2]
ax.plot_wireframe(x, y, z, color='r', alpha=0.5)
sphere_points = np.zeros((len(sphere_indices), 3), dtype=np.float32)
for (i, indice) in enumerate(sphere_indices):
sphere_points[i][0] = current_points[indice][0]
sphere_points[i][1] = current_points[indice][1]
sphere_points[i][2] = current_points[indice][2]
ax.scatter(sphere_points[:, 0], sphere_points[:, 1], sphere_points[:, 2], zdir='z', s=1, c='C{}'.format(3), rasterized=True, alpha=0.5)
geon_model.append({'name': 'sphere', 'model': [sphere_coefficients[0:3], sphere_coefficients[-1], min_lst[0], max_lst[0], len(sphere_indices)]})
distance = building_points - sphere_coefficients[0:3]
distance = distance * distance
distance = np.sqrt(np.sum(distance, axis=1))
error = distance - sphere_coefficients[-1]
sphere_indices = np.arange(building_points.shape[0])
sphere_indices = sphere_indices[np.logical_and(error < 3, error > -3)]
(all_fitted_indices, error) = (sphere_indices, error)
fitted_index[all_fitted_indices] = True
current_cloud = current_cloud.extract(sphere_indices, True)
if current_cloud.size < 1000 * point_number_scale:
break
current_points = np.zeros((current_cloud.size, 3), dtype=np.float32)
for i in range(current_cloud.size):
current_points[i][0] = current_cloud[i][0]
current_points[i][1] = current_cloud[i][1]
current_points[i][2] = current_cloud[i][2]
remaining_index_list = indices[fitted_index == False]
if len(all_remaining_index) == 0:
all_remaining_index = copy.copy(remaining_index_list)
else:
all_remaining_index = np.concatenate((all_remaining_index, remaining_index_list), axis=None)
remaining_point_list = []
remaining_geon_list = []
for index in all_remaining_index:
remaining_point_list.append(point_list[index, :])
remaining_geon_list.append(geon_label_list[index])
remaining_point_list = np.asarray(remaining_point_list)
show_cloud = pcl.PointCloud()
show_cloud.from_array(remaining_point_list)
vg = show_cloud.make_voxel_grid_filter()
vg.set_leaf_size(2, 2, 2)
show_cloud = vg.filter()
show_points = np.zeros((show_cloud.size, 3), dtype=np.float32)
for i in range(show_cloud.size):
show_points[i, :] = show_cloud[i]
ax.scatter(show_points[:, 0], show_points[:, 1], show_points[:, 2], zdir='z', s=1, c='C{}'.format(9), alpha=0.01)
remaining_point_list = remaining_point_list + center_of_mess
fout = open('{}'.format(args.output_txt), mode='w')
for point_idx in range(remaining_point_list.shape[0]):
fout.write('{} {} {} {}\n'.format(remaining_point_list[point_idx, 0], remaining_point_list[point_idx, 1], remaining_point_list[point_idx, 2], remaining_geon_list[point_idx]))
utils.axisEqual3D(ax)
plt.savefig(args.output_png, bbox_inches='tight')
plt.close()
pickle.dump([center_of_mess, geon_model], open(args.output_geon, 'wb'))
|
Danesfield
|
positive
|
def parse_cli(output, tmpl):
if not isinstance(output, string_types):
raise AnsibleError('parse_cli input should be a string, but was given a input of %s' % type(output))
if not os.path.exists(tmpl):
raise AnsibleError('unable to locate parse_cli template: %s' % tmpl)
try:
template = Template()
except ImportError as exc:
raise AnsibleError(to_native(exc))
with open(tmpl) as tmpl_fh:
tmpl_content = tmpl_fh.read()
spec = yaml.safe_load(tmpl_content)
obj = {}
for (name, attrs) in iteritems(spec['keys']):
value = attrs['value']
try:
variables = spec.get('vars', {})
value = template(value, variables)
except Exception:
pass
if 'start_block' in attrs and 'end_block' in attrs:
start_block = re.compile(attrs['start_block'])
end_block = re.compile(attrs['end_block'])
blocks = list()
lines = None
block_started = False
for line in output.split('\n'):
match_start = start_block.match(line)
match_end = end_block.match(line)
if match_start:
lines = list()
lines.append(line)
block_started = True
elif match_end:
if lines:
lines.append(line)
blocks.append('\n'.join(lines))
lines = None
block_started = False
elif block_started:
if lines:
lines.append(line)
regex_items = [re.compile(r) for r in attrs['items']]
objects = list()
for block in blocks:
if isinstance(value, Mapping) and 'key' not in value:
items = list()
for regex in regex_items:
items.append(re_finditer(regex, block))
obj = {}
for (k, v) in iteritems(value):
try:
obj[k] = template(v, {'item': items}, fail_on_undefined=False)
except Exception:
obj[k] = None
objects.append(obj)
elif isinstance(value, Mapping):
items = list()
for regex in regex_items:
items.append(re_finditer(regex, block))
key = template(value['key'], {'item': items})
values = dict([(k, template(v, {'item': items})) for (k, v) in iteritems(value['values'])])
objects.append({key: values})
return objects
elif 'items' in attrs:
regexp = re.compile(attrs['items'])
when = attrs.get('when')
conditional = '{%% if %s %%}True{%% else %%}False{%% endif %%}' % when
if isinstance(value, Mapping) and 'key' not in value:
values = list()
for item in re_matchall(regexp, output):
entry = {}
for (item_key, item_value) in iteritems(value):
entry[item_key] = template(item_value, {'item': item})
if when:
if template(conditional, {'item': entry}):
values.append(entry)
else:
values.append(entry)
obj[name] = values
elif isinstance(value, Mapping):
values = dict()
for item in re_matchall(regexp, output):
entry = {}
for (item_key, item_value) in iteritems(value['values']):
entry[item_key] = template(item_value, {'item': item})
key = template(value['key'], {'item': item})
if when:
if template(conditional, {'item': {'key': key, 'value': entry}}):
values[key] = entry
else:
values[key] = entry
obj[name] = values
else:
<DeepExtract>
obj = {}
match = regexp.search(output, re.M)
if match:
items = list(match.groups())
if regexp.groupindex:
for (name, index) in iteritems(regexp.groupindex):
obj[name] = items[index - 1]
item = obj
</DeepExtract>
obj[name] = template(value, {'item': item})
else:
obj[name] = value
return obj
|
def parse_cli(output, tmpl):
if not isinstance(output, string_types):
raise AnsibleError('parse_cli input should be a string, but was given a input of %s' % type(output))
if not os.path.exists(tmpl):
raise AnsibleError('unable to locate parse_cli template: %s' % tmpl)
try:
template = Template()
except ImportError as exc:
raise AnsibleError(to_native(exc))
with open(tmpl) as tmpl_fh:
tmpl_content = tmpl_fh.read()
spec = yaml.safe_load(tmpl_content)
obj = {}
for (name, attrs) in iteritems(spec['keys']):
value = attrs['value']
try:
variables = spec.get('vars', {})
value = template(value, variables)
except Exception:
pass
if 'start_block' in attrs and 'end_block' in attrs:
start_block = re.compile(attrs['start_block'])
end_block = re.compile(attrs['end_block'])
blocks = list()
lines = None
block_started = False
for line in output.split('\n'):
match_start = start_block.match(line)
match_end = end_block.match(line)
if match_start:
lines = list()
lines.append(line)
block_started = True
elif match_end:
if lines:
lines.append(line)
blocks.append('\n'.join(lines))
lines = None
block_started = False
elif block_started:
if lines:
lines.append(line)
regex_items = [re.compile(r) for r in attrs['items']]
objects = list()
for block in blocks:
if isinstance(value, Mapping) and 'key' not in value:
items = list()
for regex in regex_items:
items.append(re_finditer(regex, block))
obj = {}
for (k, v) in iteritems(value):
try:
obj[k] = template(v, {'item': items}, fail_on_undefined=False)
except Exception:
obj[k] = None
objects.append(obj)
elif isinstance(value, Mapping):
items = list()
for regex in regex_items:
items.append(re_finditer(regex, block))
key = template(value['key'], {'item': items})
values = dict([(k, template(v, {'item': items})) for (k, v) in iteritems(value['values'])])
objects.append({key: values})
return objects
elif 'items' in attrs:
regexp = re.compile(attrs['items'])
when = attrs.get('when')
conditional = '{%% if %s %%}True{%% else %%}False{%% endif %%}' % when
if isinstance(value, Mapping) and 'key' not in value:
values = list()
for item in re_matchall(regexp, output):
entry = {}
for (item_key, item_value) in iteritems(value):
entry[item_key] = template(item_value, {'item': item})
if when:
if template(conditional, {'item': entry}):
values.append(entry)
else:
values.append(entry)
obj[name] = values
elif isinstance(value, Mapping):
values = dict()
for item in re_matchall(regexp, output):
entry = {}
for (item_key, item_value) in iteritems(value['values']):
entry[item_key] = template(item_value, {'item': item})
key = template(value['key'], {'item': item})
if when:
if template(conditional, {'item': {'key': key, 'value': entry}}):
values[key] = entry
else:
values[key] = entry
obj[name] = values
else:
obj = {}
match = regexp.search(output, re.M)
if match:
items = list(match.groups())
if regexp.groupindex:
for (name, index) in iteritems(regexp.groupindex):
obj[name] = items[index - 1]
item = obj
obj[name] = template(value, {'item': item})
else:
obj[name] = value
return obj
|
ansible.netcommon
|
positive
|
def get_task(task_id):
<DeepExtract>
mc = MetaController()
task_report = mc.read_task(task_id)
</DeepExtract>
if task_report is not None:
return change_date_to_str(task_report)
else:
return add_error({}, 8, 'Task not found')
|
def get_task(task_id):
mc = MetaController()
task_report = mc.read_task(task_id)
if task_report is not None:
return change_date_to_str(task_report)
else:
return add_error({}, 8, 'Task not found')
|
codex-backend
|
positive
|
def _compute_sampling_prob(self):
"""Class Diversity Sensitive Sampling
http://www.objects365.org/slides/Obj365_BaiduVIS.pdf
"""
self.probs = np.zeros(len(self), dtype=np.float)
P = np.load('counts.npy')[1:]
for i in range(len(self)):
<DeepExtract>
ann = self.img_infos[i]['ann']
</DeepExtract>
gt_labels = ann['labels']
labels = np.unique(gt_labels)
Ci = len(labels)
Pc = P[labels]
self.probs[i] = np.log(Ci) * np.sum(Pc)
|
def _compute_sampling_prob(self):
"""Class Diversity Sensitive Sampling
http://www.objects365.org/slides/Obj365_BaiduVIS.pdf
"""
self.probs = np.zeros(len(self), dtype=np.float)
P = np.load('counts.npy')[1:]
for i in range(len(self)):
ann = self.img_infos[i]['ann']
gt_labels = ann['labels']
labels = np.unique(gt_labels)
Ci = len(labels)
Pc = P[labels]
self.probs[i] = np.log(Ci) * np.sum(Pc)
|
C-HOI
|
positive
|
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor, sequence_id: Union[int, str]=0, weights: Optional[tf.Tensor]=None):
"""Accumulates the segmentation and tracking quality statistics.
Args:
y_true: The ground-truth panoptic label map for a particular video frame
(defined as semantic_map * max_instances_per_category + instance_map).
y_pred: The predicted panoptic label map for a particular video frame
(defined as semantic_map * max_instances_per_category + instance_map).
sequence_id: The optional ID of the sequence the frames belong to. When no
sequence is given, all frames are considered to belong to the same
sequence (default: 0).
weights: The weights for each pixel with the same shape of `y_true`.
"""
y_true = tf.cast(y_true, dtype=tf.int64)
y_pred = tf.cast(y_pred, dtype=tf.int64)
if weights is not None:
weights = tf.reshape(weights, y_true.shape)
semantic_label = y_true // self._max_instances_per_category
semantic_prediction = y_pred // self._max_instances_per_category
if self._ignore_label > self._num_classes:
semantic_label = tf.where(tf.not_equal(semantic_label, self._ignore_label), semantic_label, self._num_classes)
semantic_prediction = tf.where(tf.not_equal(semantic_prediction, self._ignore_label), semantic_prediction, self._num_classes)
if sequence_id in self._iou_confusion_matrix_per_sequence:
self._iou_confusion_matrix_per_sequence[sequence_id] += tf.math.confusion_matrix(tf.reshape(semantic_label, [-1]), tf.reshape(semantic_prediction, [-1]), self._confusion_matrix_size, dtype=tf.float64, weights=tf.reshape(weights, [-1]) if weights is not None else None)
self._sequence_length[sequence_id] += 1
else:
self._iou_confusion_matrix_per_sequence[sequence_id] = tf.math.confusion_matrix(tf.reshape(semantic_label, [-1]), tf.reshape(semantic_prediction, [-1]), self._confusion_matrix_size, dtype=tf.float64, weights=tf.reshape(weights, [-1]) if weights is not None else None)
self._predictions[sequence_id] = {}
self._ground_truth[sequence_id] = {}
self._intersections[sequence_id] = {}
self._sequence_length[sequence_id] = 1
instance_label = y_true % self._max_instances_per_category
label_mask = tf.zeros_like(semantic_label, dtype=tf.bool)
prediction_mask = tf.zeros_like(semantic_prediction, dtype=tf.bool)
for things_class_id in self._things_list:
label_mask = tf.logical_or(label_mask, tf.equal(semantic_label, things_class_id))
prediction_mask = tf.logical_or(prediction_mask, tf.equal(semantic_prediction, things_class_id))
is_crowd = tf.logical_and(tf.equal(instance_label, 0), label_mask)
label_mask = tf.logical_and(label_mask, tf.logical_not(is_crowd))
prediction_mask = tf.logical_and(prediction_mask, tf.logical_not(is_crowd))
seq_preds = self._predictions[sequence_id]
seq_gts = self._ground_truth[sequence_id]
seq_intersects = self._intersections[sequence_id]
<DeepExtract>
if weights[prediction_mask] if weights is not None else None is None:
unique_weight_list = [1.0]
else:
(unique_weight_list, _) = tf.unique(weights[prediction_mask] if weights is not None else None)
unique_weight_list = unique_weight_list.numpy().tolist()
_check_weights(unique_weight_list)
for weight in unique_weight_list:
if weights[prediction_mask] if weights is not None else None is None:
(ids, _, counts) = tf.unique_with_counts(y_pred[prediction_mask])
else:
(ids, _, counts) = tf.unique_with_counts(tf.boolean_mask(y_pred[prediction_mask], tf.equal(weight, weights[prediction_mask] if weights is not None else None)))
for (idx, count) in zip(ids.numpy(), tf.cast(counts, tf.float32)):
if idx in seq_preds:
seq_preds[idx] += count * weight
else:
seq_preds[idx] = count * weight
</DeepExtract>
<DeepExtract>
if weights[label_mask] if weights is not None else None is None:
unique_weight_list = [1.0]
else:
(unique_weight_list, _) = tf.unique(weights[label_mask] if weights is not None else None)
unique_weight_list = unique_weight_list.numpy().tolist()
_check_weights(unique_weight_list)
for weight in unique_weight_list:
if weights[label_mask] if weights is not None else None is None:
(ids, _, counts) = tf.unique_with_counts(y_true[label_mask])
else:
(ids, _, counts) = tf.unique_with_counts(tf.boolean_mask(y_true[label_mask], tf.equal(weight, weights[label_mask] if weights is not None else None)))
for (idx, count) in zip(ids.numpy(), tf.cast(counts, tf.float32)):
if idx in seq_gts:
seq_gts[idx] += count * weight
else:
seq_gts[idx] = count * weight
</DeepExtract>
non_crowd_intersection = tf.logical_and(label_mask, prediction_mask)
intersection_ids = y_true[non_crowd_intersection] * self._offset + y_pred[non_crowd_intersection]
<DeepExtract>
if weights[non_crowd_intersection] if weights is not None else None is None:
unique_weight_list = [1.0]
else:
(unique_weight_list, _) = tf.unique(weights[non_crowd_intersection] if weights is not None else None)
unique_weight_list = unique_weight_list.numpy().tolist()
_check_weights(unique_weight_list)
for weight in unique_weight_list:
if weights[non_crowd_intersection] if weights is not None else None is None:
(ids, _, counts) = tf.unique_with_counts(intersection_ids)
else:
(ids, _, counts) = tf.unique_with_counts(tf.boolean_mask(intersection_ids, tf.equal(weight, weights[non_crowd_intersection] if weights is not None else None)))
for (idx, count) in zip(ids.numpy(), tf.cast(counts, tf.float32)):
if idx in seq_intersects:
seq_intersects[idx] += count * weight
else:
seq_intersects[idx] = count * weight
</DeepExtract>
|
def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor, sequence_id: Union[int, str]=0, weights: Optional[tf.Tensor]=None):
"""Accumulates the segmentation and tracking quality statistics.
Args:
y_true: The ground-truth panoptic label map for a particular video frame
(defined as semantic_map * max_instances_per_category + instance_map).
y_pred: The predicted panoptic label map for a particular video frame
(defined as semantic_map * max_instances_per_category + instance_map).
sequence_id: The optional ID of the sequence the frames belong to. When no
sequence is given, all frames are considered to belong to the same
sequence (default: 0).
weights: The weights for each pixel with the same shape of `y_true`.
"""
y_true = tf.cast(y_true, dtype=tf.int64)
y_pred = tf.cast(y_pred, dtype=tf.int64)
if weights is not None:
weights = tf.reshape(weights, y_true.shape)
semantic_label = y_true // self._max_instances_per_category
semantic_prediction = y_pred // self._max_instances_per_category
if self._ignore_label > self._num_classes:
semantic_label = tf.where(tf.not_equal(semantic_label, self._ignore_label), semantic_label, self._num_classes)
semantic_prediction = tf.where(tf.not_equal(semantic_prediction, self._ignore_label), semantic_prediction, self._num_classes)
if sequence_id in self._iou_confusion_matrix_per_sequence:
self._iou_confusion_matrix_per_sequence[sequence_id] += tf.math.confusion_matrix(tf.reshape(semantic_label, [-1]), tf.reshape(semantic_prediction, [-1]), self._confusion_matrix_size, dtype=tf.float64, weights=tf.reshape(weights, [-1]) if weights is not None else None)
self._sequence_length[sequence_id] += 1
else:
self._iou_confusion_matrix_per_sequence[sequence_id] = tf.math.confusion_matrix(tf.reshape(semantic_label, [-1]), tf.reshape(semantic_prediction, [-1]), self._confusion_matrix_size, dtype=tf.float64, weights=tf.reshape(weights, [-1]) if weights is not None else None)
self._predictions[sequence_id] = {}
self._ground_truth[sequence_id] = {}
self._intersections[sequence_id] = {}
self._sequence_length[sequence_id] = 1
instance_label = y_true % self._max_instances_per_category
label_mask = tf.zeros_like(semantic_label, dtype=tf.bool)
prediction_mask = tf.zeros_like(semantic_prediction, dtype=tf.bool)
for things_class_id in self._things_list:
label_mask = tf.logical_or(label_mask, tf.equal(semantic_label, things_class_id))
prediction_mask = tf.logical_or(prediction_mask, tf.equal(semantic_prediction, things_class_id))
is_crowd = tf.logical_and(tf.equal(instance_label, 0), label_mask)
label_mask = tf.logical_and(label_mask, tf.logical_not(is_crowd))
prediction_mask = tf.logical_and(prediction_mask, tf.logical_not(is_crowd))
seq_preds = self._predictions[sequence_id]
seq_gts = self._ground_truth[sequence_id]
seq_intersects = self._intersections[sequence_id]
if weights[prediction_mask] if weights is not None else None is None:
unique_weight_list = [1.0]
else:
(unique_weight_list, _) = tf.unique(weights[prediction_mask] if weights is not None else None)
unique_weight_list = unique_weight_list.numpy().tolist()
_check_weights(unique_weight_list)
for weight in unique_weight_list:
if weights[prediction_mask] if weights is not None else None is None:
(ids, _, counts) = tf.unique_with_counts(y_pred[prediction_mask])
else:
(ids, _, counts) = tf.unique_with_counts(tf.boolean_mask(y_pred[prediction_mask], tf.equal(weight, weights[prediction_mask] if weights is not None else None)))
for (idx, count) in zip(ids.numpy(), tf.cast(counts, tf.float32)):
if idx in seq_preds:
seq_preds[idx] += count * weight
else:
seq_preds[idx] = count * weight
if weights[label_mask] if weights is not None else None is None:
unique_weight_list = [1.0]
else:
(unique_weight_list, _) = tf.unique(weights[label_mask] if weights is not None else None)
unique_weight_list = unique_weight_list.numpy().tolist()
_check_weights(unique_weight_list)
for weight in unique_weight_list:
if weights[label_mask] if weights is not None else None is None:
(ids, _, counts) = tf.unique_with_counts(y_true[label_mask])
else:
(ids, _, counts) = tf.unique_with_counts(tf.boolean_mask(y_true[label_mask], tf.equal(weight, weights[label_mask] if weights is not None else None)))
for (idx, count) in zip(ids.numpy(), tf.cast(counts, tf.float32)):
if idx in seq_gts:
seq_gts[idx] += count * weight
else:
seq_gts[idx] = count * weight
non_crowd_intersection = tf.logical_and(label_mask, prediction_mask)
intersection_ids = y_true[non_crowd_intersection] * self._offset + y_pred[non_crowd_intersection]
if weights[non_crowd_intersection] if weights is not None else None is None:
unique_weight_list = [1.0]
else:
(unique_weight_list, _) = tf.unique(weights[non_crowd_intersection] if weights is not None else None)
unique_weight_list = unique_weight_list.numpy().tolist()
_check_weights(unique_weight_list)
for weight in unique_weight_list:
if weights[non_crowd_intersection] if weights is not None else None is None:
(ids, _, counts) = tf.unique_with_counts(intersection_ids)
else:
(ids, _, counts) = tf.unique_with_counts(tf.boolean_mask(intersection_ids, tf.equal(weight, weights[non_crowd_intersection] if weights is not None else None)))
for (idx, count) in zip(ids.numpy(), tf.cast(counts, tf.float32)):
if idx in seq_intersects:
seq_intersects[idx] += count * weight
else:
seq_intersects[idx] = count * weight
</DeepExtract>
|
deeplab2
|
positive
|
@patch('ckanext.dcat.harvesters.DCATRDFHarvester._save_gather_error')
@responses.activate
@pytest.mark.ckan_config('ckanext.dcat.max_file_size', 100)
def test_harvest_config_file_size(self, mock_save_gather_error):
harvester = DCATRDFHarvester()
<DeepExtract>
responses.add_passthru(re.compile(config.get('solr_url', 'http://127.0.0.1:8983/solr').rstrip('/') + '/\\w+'))
</DeepExtract>
actual_file_size = 1024 * 1024 * 110
allowed_file_size = 1024 * 1024 * 100
responses.add(responses.HEAD, self.ttl_mock_url, status=200, content_type=self.ttl_content_type, adding_headers={'content-length': six.text_type(actual_file_size)})
<DeepExtract>
source_dict = {'title': 'Test RDF DCAT Source', 'name': 'test-rdf-dcat-source', 'url': self.ttl_mock_url, 'source_type': 'dcat_rdf'}
source_dict.update(**kwargs)
harvest_source = helpers.call_action('harvest_source_create', {}, **source_dict)
harvest_source = harvest_source
</DeepExtract>
<DeepExtract>
harvest_job = helpers.call_action('harvest_job_create', {}, source_id=harvest_source['id'])
harvest_job = harvest_job
</DeepExtract>
harvester._get_content_and_type(self.ttl_mock_url, harvest_job, 1, self.ttl_content_type)
msg = 'Remote file is too big. Allowed\n file size: {allowed}, Content-Length: {actual}.'.format(allowed=allowed_file_size, actual=actual_file_size)
mock_save_gather_error.assert_called_once_with(msg, harvest_job)
|
@patch('ckanext.dcat.harvesters.DCATRDFHarvester._save_gather_error')
@responses.activate
@pytest.mark.ckan_config('ckanext.dcat.max_file_size', 100)
def test_harvest_config_file_size(self, mock_save_gather_error):
harvester = DCATRDFHarvester()
responses.add_passthru(re.compile(config.get('solr_url', 'http://127.0.0.1:8983/solr').rstrip('/') + '/\\w+'))
actual_file_size = 1024 * 1024 * 110
allowed_file_size = 1024 * 1024 * 100
responses.add(responses.HEAD, self.ttl_mock_url, status=200, content_type=self.ttl_content_type, adding_headers={'content-length': six.text_type(actual_file_size)})
source_dict = {'title': 'Test RDF DCAT Source', 'name': 'test-rdf-dcat-source', 'url': self.ttl_mock_url, 'source_type': 'dcat_rdf'}
source_dict.update(**kwargs)
harvest_source = helpers.call_action('harvest_source_create', {}, **source_dict)
harvest_source = harvest_source
harvest_job = helpers.call_action('harvest_job_create', {}, source_id=harvest_source['id'])
harvest_job = harvest_job
harvester._get_content_and_type(self.ttl_mock_url, harvest_job, 1, self.ttl_content_type)
msg = 'Remote file is too big. Allowed\n file size: {allowed}, Content-Length: {actual}.'.format(allowed=allowed_file_size, actual=actual_file_size)
mock_save_gather_error.assert_called_once_with(msg, harvest_job)
|
ckanext-dcat
|
positive
|
def test_advancedSearching(self):
parser = self.parser
items = parser.getElementsByName('items')
assert items, 'Failed to get items'
<DeepExtract>
itemsUnder = []
for item in items:
priceEms = item.getElementsByName('price')
assert priceEms, 'Failed to find price elements'
assert len(priceEms) == 1, 'Expected 1 price element, got %d' % (len(priceEms),)
priceEm = priceEms[0]
priceInner = priceEm.innerHTML.strip()
assert priceInner, 'Got blank innerHTML in price element'
try:
priceValue = round(float(priceEm.innerHTML.strip()), 2)
except:
raise AssertionError('Failed to parse price value, not a float? (%f)' % (priceValue,))
if priceValue < 4.0:
itemsUnder.append(item)
itemsUnderFour = itemsUnder
</DeepExtract>
assert len(itemsUnderFour) == 3, 'Asserted to find 3 items under 4.00, but found %d' % (len(itemsUnderFour),)
names = [self._getItemName(item) for item in itemsUnderFour]
for name in names:
assert name, 'Expected name not to be blank'
names = set(names)
assert 'Sponges' in names, 'Expected to find Sponges'
assert 'Turtles' in names, 'Expected to find Turtles'
assert 'Pudding Cups' in names, 'Expected to find Pudding Cups'
assert 'Gold Brick' not in names, 'Expected NOT TO find Gold Brick'
|
def test_advancedSearching(self):
parser = self.parser
items = parser.getElementsByName('items')
assert items, 'Failed to get items'
itemsUnder = []
for item in items:
priceEms = item.getElementsByName('price')
assert priceEms, 'Failed to find price elements'
assert len(priceEms) == 1, 'Expected 1 price element, got %d' % (len(priceEms),)
priceEm = priceEms[0]
priceInner = priceEm.innerHTML.strip()
assert priceInner, 'Got blank innerHTML in price element'
try:
priceValue = round(float(priceEm.innerHTML.strip()), 2)
except:
raise AssertionError('Failed to parse price value, not a float? (%f)' % (priceValue,))
if priceValue < 4.0:
itemsUnder.append(item)
itemsUnderFour = itemsUnder
assert len(itemsUnderFour) == 3, 'Asserted to find 3 items under 4.00, but found %d' % (len(itemsUnderFour),)
names = [self._getItemName(item) for item in itemsUnderFour]
for name in names:
assert name, 'Expected name not to be blank'
names = set(names)
assert 'Sponges' in names, 'Expected to find Sponges'
assert 'Turtles' in names, 'Expected to find Turtles'
assert 'Pudding Cups' in names, 'Expected to find Pudding Cups'
assert 'Gold Brick' not in names, 'Expected NOT TO find Gold Brick'
|
AdvancedHTMLParser
|
positive
|
def test_is_native_enabled_default_false(self):
<DeepExtract>
chart = {'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': {'name': 'test'}, const.KEYWORD_DATA: {}}
unit = wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId('test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
</DeepExtract>
self.assertEquals(unit.is_native_enabled(), False)
|
def test_is_native_enabled_default_false(self):
chart = {'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': {'name': 'test'}, const.KEYWORD_DATA: {}}
unit = wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId('test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
self.assertEquals(unit.is_native_enabled(), False)
|
armada
|
positive
|
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Run inference on a single image.
Args:
img (Tensor): must be in shape (N, C, H, W)
img_metas (list[dict]): a list with one dictionary element.
See `mmdet/datasets/pipelines/formatting.py:Collect` for
details of meta dicts.
proposals : if specified overrides rpn proposals
rescale (bool): if True returns boxes in original image space
Returns:
dict: results
"""
<DeepExtract>
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
</DeepExtract>
proposal_list = self.simple_test_rpn(x, img_metas, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_metas[0]['img_shape']
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
bbox_feats = bbox_roi_extractor(x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
(cls_score, bbox_pred) = bbox_head(bbox_feats)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_metas[0])
cls_score = sum(ms_scores) / self.num_stages
(det_bboxes, det_labels) = self.bbox_head[-1].get_det_bboxes(rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
mask_classes = self.mask_head[-1].num_classes - 1
segm_result = [[] for _ in range(mask_classes)]
else:
if isinstance(scale_factor, float):
_bboxes = det_bboxes[:, :4] * scale_factor if rescale else det_bboxes
else:
_bboxes = det_bboxes[:, :4] * torch.from_numpy(scale_factor).to(det_bboxes.device) if rescale else det_bboxes
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
mask_feats = mask_roi_extractor(x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head[i](mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, [img_metas] * self.num_stages, self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(merged_masks, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if self.with_mask:
results = (ms_bbox_result['ensemble'], ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
return results
|
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Run inference on a single image.
Args:
img (Tensor): must be in shape (N, C, H, W)
img_metas (list[dict]): a list with one dictionary element.
See `mmdet/datasets/pipelines/formatting.py:Collect` for
details of meta dicts.
proposals : if specified overrides rpn proposals
rescale (bool): if True returns boxes in original image space
Returns:
dict: results
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
x = x
proposal_list = self.simple_test_rpn(x, img_metas, self.test_cfg.rpn) if proposals is None else proposals
img_shape = img_metas[0]['img_shape']
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg.rcnn
rois = bbox2roi(proposal_list)
for i in range(self.num_stages):
bbox_roi_extractor = self.bbox_roi_extractor[i]
bbox_head = self.bbox_head[i]
bbox_feats = bbox_roi_extractor(x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
(cls_score, bbox_pred) = bbox_head(bbox_feats)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
bbox_label = cls_score.argmax(dim=1)
rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_metas[0])
cls_score = sum(ms_scores) / self.num_stages
(det_bboxes, det_labels) = self.bbox_head[-1].get_det_bboxes(rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg)
bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes)
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if det_bboxes.shape[0] == 0:
mask_classes = self.mask_head[-1].num_classes - 1
segm_result = [[] for _ in range(mask_classes)]
else:
if isinstance(scale_factor, float):
_bboxes = det_bboxes[:, :4] * scale_factor if rescale else det_bboxes
else:
_bboxes = det_bboxes[:, :4] * torch.from_numpy(scale_factor).to(det_bboxes.device) if rescale else det_bboxes
mask_rois = bbox2roi([_bboxes])
aug_masks = []
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
mask_feats = mask_roi_extractor(x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head[i](mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, [img_metas] * self.num_stages, self.test_cfg.rcnn)
segm_result = self.mask_head[-1].get_seg_masks(merged_masks, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale)
ms_segm_result['ensemble'] = segm_result
if self.with_mask:
results = (ms_bbox_result['ensemble'], ms_segm_result['ensemble'])
else:
results = ms_bbox_result['ensemble']
return results
|
EfficientDet-bifpn
|
positive
|
def flush():
while True:
try:
log.debug('Flushing metrics in greenlet')
<DeepExtract>
try:
if self._is_flush_in_progress:
log.debug('A flush is already in progress. Skipping this one.')
return False
if self._disabled:
log.info("Not flushing because we're disabled.")
return False
self._is_flush_in_progress = True
(metrics, dists) = self._get_aggregate_metrics_and_dists(timestamp or time())
count_metrics = len(metrics)
if count_metrics:
self.flush_count += 1
log.debug('Flush #%s sending %s metrics' % (self.flush_count, count_metrics))
self.reporter.flush_metrics(metrics)
else:
log.debug('No metrics to flush. Continuing.')
count_dists = len(dists)
if count_dists:
self.flush_count += 1
log.debug('Flush #%s sending %s distributions' % (self.flush_count, count_dists))
self.reporter.flush_distributions(dists)
else:
log.debug('No distributions to flush. Continuing.')
events = self._get_aggregate_events()
count_events = len(events)
if count_events:
self.flush_count += 1
log.debug('Flush #%s sending %s events' % (self.flush_count, count_events))
self.reporter.flush_events(events)
else:
log.debug('No events to flush. Continuing.')
except ApiNotInitialized:
raise
except Exception:
try:
log.exception('Error flushing metrics and events')
except Exception:
pass
finally:
self._is_flush_in_progress = False
</DeepExtract>
gevent.sleep(self.flush_interval)
except Exception:
try:
log.exception('Error flushing in greenlet')
except Exception:
pass
|
def flush():
while True:
try:
log.debug('Flushing metrics in greenlet')
try:
if self._is_flush_in_progress:
log.debug('A flush is already in progress. Skipping this one.')
return False
if self._disabled:
log.info("Not flushing because we're disabled.")
return False
self._is_flush_in_progress = True
(metrics, dists) = self._get_aggregate_metrics_and_dists(timestamp or time())
count_metrics = len(metrics)
if count_metrics:
self.flush_count += 1
log.debug('Flush #%s sending %s metrics' % (self.flush_count, count_metrics))
self.reporter.flush_metrics(metrics)
else:
log.debug('No metrics to flush. Continuing.')
count_dists = len(dists)
if count_dists:
self.flush_count += 1
log.debug('Flush #%s sending %s distributions' % (self.flush_count, count_dists))
self.reporter.flush_distributions(dists)
else:
log.debug('No distributions to flush. Continuing.')
events = self._get_aggregate_events()
count_events = len(events)
if count_events:
self.flush_count += 1
log.debug('Flush #%s sending %s events' % (self.flush_count, count_events))
self.reporter.flush_events(events)
else:
log.debug('No events to flush. Continuing.')
except ApiNotInitialized:
raise
except Exception:
try:
log.exception('Error flushing metrics and events')
except Exception:
pass
finally:
self._is_flush_in_progress = False
gevent.sleep(self.flush_interval)
except Exception:
try:
log.exception('Error flushing in greenlet')
except Exception:
pass
|
datadogpy
|
positive
|
def test_swiz(self):
<DeepExtract>
func = TestPluginIl.bv.get_functions_by_name('test_swiz')[0]
</DeepExtract>
self.assertEqual(self.list_asm(func), '\n{ R1 = swiz(R0) }\n{ jumpr LR }')
self.assertEqual(self.list_llil(func), '\n0: temp1.d = (R0 & 0xff) << 0x18 | (R0 & 0xff00) << 8 | (R0 & 0xff0000) u>> 8 | (R0 & 0xff000000) u>> 0x18\n1: R1 = temp1.d\n2: temp200.d = LR\n3: <return> jump(LR)')
|
def test_swiz(self):
func = TestPluginIl.bv.get_functions_by_name('test_swiz')[0]
self.assertEqual(self.list_asm(func), '\n{ R1 = swiz(R0) }\n{ jumpr LR }')
self.assertEqual(self.list_llil(func), '\n0: temp1.d = (R0 & 0xff) << 0x18 | (R0 & 0xff00) << 8 | (R0 & 0xff0000) u>> 8 | (R0 & 0xff000000) u>> 0x18\n1: R1 = temp1.d\n2: temp200.d = LR\n3: <return> jump(LR)')
|
binja-hexagon
|
positive
|
def write(self, shape_id):
root = Element('c:userShapes', {'xmlns:c': 'http://schemas.openxmlformats.org/drawingml/2006/chart'})
for shape in self._shapes:
anchor = SubElement(root, 'cdr:relSizeAnchor', {'xmlns:cdr': 'http://schemas.openxmlformats.org/drawingml/2006/chartDrawing'})
(xstart, ystart, xend, yend) = shape.get_coordinates()
_from = SubElement(anchor, 'cdr:from')
SubElement(_from, 'cdr:x').text = str(xstart)
SubElement(_from, 'cdr:y').text = str(ystart)
_to = SubElement(anchor, 'cdr:to')
SubElement(_to, 'cdr:x').text = str(xend)
SubElement(_to, 'cdr:y').text = str(yend)
sp = SubElement(anchor, 'cdr:sp', {'macro': '', 'textlink': ''})
nvspr = SubElement(sp, 'cdr:nvSpPr')
SubElement(nvspr, 'cdr:cNvPr', {'id': str(shape_id), 'name': 'shape %s' % shape_id})
SubElement(nvspr, 'cdr:cNvSpPr')
sppr = SubElement(sp, 'cdr:spPr')
frm = SubElement(sppr, 'a:xfrm', {'xmlns:a': self.schema})
SubElement(frm, 'a:off', {'x': '0', 'y': '0'})
SubElement(frm, 'a:ext', {'cx': '0', 'cy': '0'})
prstgeom = SubElement(sppr, 'a:prstGeom', {'xmlns:a': self.schema, 'prst': str(shape.style)})
SubElement(prstgeom, 'a:avLst')
fill = SubElement(sppr, 'a:solidFill', {'xmlns:a': self.schema})
SubElement(fill, 'a:srgbClr', {'val': shape.color})
border = SubElement(sppr, 'a:ln', {'xmlns:a': self.schema, 'w': str(shape._border_width)})
sf = SubElement(border, 'a:solidFill')
SubElement(sf, 'a:srgbClr', {'val': shape.border_color})
<DeepExtract>
style = SubElement(sp, 'cdr:style')
ln_ref = SubElement(style, 'a:lnRef', {'xmlns:a': self.schema, 'idx': '2'})
scheme_clr = SubElement(ln_ref, 'a:schemeClr', {'val': 'accent1'})
SubElement(scheme_clr, 'a:shade', {'val': '50000'})
fill_ref = SubElement(style, 'a:fillRef', {'xmlns:a': self.schema, 'idx': '1'})
SubElement(fill_ref, 'a:schemeClr', {'val': 'accent1'})
effect_ref = SubElement(style, 'a:effectRef', {'xmlns:a': self.schema, 'idx': '0'})
SubElement(effect_ref, 'a:schemeClr', {'val': 'accent1'})
font_ref = SubElement(style, 'a:fontRef', {'xmlns:a': self.schema, 'idx': 'minor'})
SubElement(font_ref, 'a:schemeClr', {'val': 'lt1'})
</DeepExtract>
<DeepExtract>
tx_body = SubElement(sp, 'cdr:txBody')
SubElement(tx_body, 'a:bodyPr', {'xmlns:a': self.schema, 'vertOverflow': 'clip'})
SubElement(tx_body, 'a:lstStyle', {'xmlns:a': self.schema})
p = SubElement(tx_body, 'a:p', {'xmlns:a': self.schema})
if shape.text:
r = SubElement(p, 'a:r')
rpr = SubElement(r, 'a:rPr', {'lang': 'en-US'})
fill = SubElement(rpr, 'a:solidFill')
SubElement(fill, 'a:srgbClr', {'val': shape.text_color})
SubElement(r, 'a:t').text = shape.text
else:
SubElement(p, 'a:endParaRPr', {'lang': 'en-US'})
</DeepExtract>
shape_id += 1
return get_document_content(root)
|
def write(self, shape_id):
root = Element('c:userShapes', {'xmlns:c': 'http://schemas.openxmlformats.org/drawingml/2006/chart'})
for shape in self._shapes:
anchor = SubElement(root, 'cdr:relSizeAnchor', {'xmlns:cdr': 'http://schemas.openxmlformats.org/drawingml/2006/chartDrawing'})
(xstart, ystart, xend, yend) = shape.get_coordinates()
_from = SubElement(anchor, 'cdr:from')
SubElement(_from, 'cdr:x').text = str(xstart)
SubElement(_from, 'cdr:y').text = str(ystart)
_to = SubElement(anchor, 'cdr:to')
SubElement(_to, 'cdr:x').text = str(xend)
SubElement(_to, 'cdr:y').text = str(yend)
sp = SubElement(anchor, 'cdr:sp', {'macro': '', 'textlink': ''})
nvspr = SubElement(sp, 'cdr:nvSpPr')
SubElement(nvspr, 'cdr:cNvPr', {'id': str(shape_id), 'name': 'shape %s' % shape_id})
SubElement(nvspr, 'cdr:cNvSpPr')
sppr = SubElement(sp, 'cdr:spPr')
frm = SubElement(sppr, 'a:xfrm', {'xmlns:a': self.schema})
SubElement(frm, 'a:off', {'x': '0', 'y': '0'})
SubElement(frm, 'a:ext', {'cx': '0', 'cy': '0'})
prstgeom = SubElement(sppr, 'a:prstGeom', {'xmlns:a': self.schema, 'prst': str(shape.style)})
SubElement(prstgeom, 'a:avLst')
fill = SubElement(sppr, 'a:solidFill', {'xmlns:a': self.schema})
SubElement(fill, 'a:srgbClr', {'val': shape.color})
border = SubElement(sppr, 'a:ln', {'xmlns:a': self.schema, 'w': str(shape._border_width)})
sf = SubElement(border, 'a:solidFill')
SubElement(sf, 'a:srgbClr', {'val': shape.border_color})
style = SubElement(sp, 'cdr:style')
ln_ref = SubElement(style, 'a:lnRef', {'xmlns:a': self.schema, 'idx': '2'})
scheme_clr = SubElement(ln_ref, 'a:schemeClr', {'val': 'accent1'})
SubElement(scheme_clr, 'a:shade', {'val': '50000'})
fill_ref = SubElement(style, 'a:fillRef', {'xmlns:a': self.schema, 'idx': '1'})
SubElement(fill_ref, 'a:schemeClr', {'val': 'accent1'})
effect_ref = SubElement(style, 'a:effectRef', {'xmlns:a': self.schema, 'idx': '0'})
SubElement(effect_ref, 'a:schemeClr', {'val': 'accent1'})
font_ref = SubElement(style, 'a:fontRef', {'xmlns:a': self.schema, 'idx': 'minor'})
SubElement(font_ref, 'a:schemeClr', {'val': 'lt1'})
tx_body = SubElement(sp, 'cdr:txBody')
SubElement(tx_body, 'a:bodyPr', {'xmlns:a': self.schema, 'vertOverflow': 'clip'})
SubElement(tx_body, 'a:lstStyle', {'xmlns:a': self.schema})
p = SubElement(tx_body, 'a:p', {'xmlns:a': self.schema})
if shape.text:
r = SubElement(p, 'a:r')
rpr = SubElement(r, 'a:rPr', {'lang': 'en-US'})
fill = SubElement(rpr, 'a:solidFill')
SubElement(fill, 'a:srgbClr', {'val': shape.text_color})
SubElement(r, 'a:t').text = shape.text
else:
SubElement(p, 'a:endParaRPr', {'lang': 'en-US'})
shape_id += 1
return get_document_content(root)
|
dataproxy
|
positive
|
def cache_bi_event(event, multi_events=False):
bi_file_in_use_lock.acquire()
outFile = None
try:
<DeepExtract>
if not os.path.isfile(bi_cfg_file):
cached_events = []
else:
f = None
try:
if False:
bi_file_in_use_lock.acquire()
f = open(bi_cfg_file)
cached_events = json.load(f)
f.close()
if not isinstance(cached_events, list):
cached_events = []
except:
cached_events = []
finally:
if f is not None:
f.close()
if False:
bi_file_in_use_lock.release()
cache_events = cached_events
</DeepExtract>
if multi_events:
need_cache_size = len(event)
else:
need_cache_size = 1
events_size = len(cache_events)
if events_size >= Statistic.MAX_CACHE_EVENTS:
start_idx = events_size - (Statistic.MAX_CACHE_EVENTS - need_cache_size)
cache_events = cache_events[start_idx:]
if multi_events:
for e in event:
cache_events.append(e)
else:
cache_events.append(event)
outFile = open(bi_cfg_file, 'w')
json.dump(cache_events, outFile)
outFile.close()
except:
if outFile is not None:
outFile.close()
finally:
bi_file_in_use_lock.release()
|
def cache_bi_event(event, multi_events=False):
bi_file_in_use_lock.acquire()
outFile = None
try:
if not os.path.isfile(bi_cfg_file):
cached_events = []
else:
f = None
try:
if False:
bi_file_in_use_lock.acquire()
f = open(bi_cfg_file)
cached_events = json.load(f)
f.close()
if not isinstance(cached_events, list):
cached_events = []
except:
cached_events = []
finally:
if f is not None:
f.close()
if False:
bi_file_in_use_lock.release()
cache_events = cached_events
if multi_events:
need_cache_size = len(event)
else:
need_cache_size = 1
events_size = len(cache_events)
if events_size >= Statistic.MAX_CACHE_EVENTS:
start_idx = events_size - (Statistic.MAX_CACHE_EVENTS - need_cache_size)
cache_events = cache_events[start_idx:]
if multi_events:
for e in event:
cache_events.append(e)
else:
cache_events.append(event)
outFile = open(bi_cfg_file, 'w')
json.dump(cache_events, outFile)
outFile.close()
except:
if outFile is not None:
outFile.close()
finally:
bi_file_in_use_lock.release()
|
cocos2d-console
|
positive
|
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
print('Creating dataset')
(grd_train_dataset, grd_test_dataset) = create_dataset('grounding', config)
datasets = [grd_train_dataset, grd_test_dataset]
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False], num_tasks, global_rank)
else:
samplers = [None, None]
(train_loader, test_loader) = create_loader(datasets, samplers, batch_size=[config['batch_size'], config['batch_size']], num_workers=[4, 4], is_trains=[True, False], collate_fns=[None, None])
tokenizer = BertTokenizer.from_pretrained(args.text_encoder)
refer = REFER(config['refcoco_data'], 'refcoco+', 'unc')
dets = json.load(open(config['det_file'], 'r'))
cocos = json.load(open(config['coco_file'], 'r'))
print('Creating model')
model = ALBEF(config=config, text_encoder=args.text_encoder, tokenizer=tokenizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'], model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], model.visual_encoder_m)
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.', '')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
msg = model.load_state_dict(state_dict, strict=False)
print('load checkpoint from %s' % args.checkpoint)
print(msg)
model = model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
(lr_scheduler, _) = create_scheduler(arg_sche, optimizer)
max_epoch = config['schedular']['epochs']
warmup_steps = config['schedular']['warmup_epochs']
best = 0
print('Start training')
start_time = time.time()
for epoch in range(0, max_epoch):
if not args.evaluate:
if args.distributed:
train_loader.sampler.set_epoch(epoch)
<DeepExtract>
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps * step_size
for (i, (image, text, idx)) in enumerate(metric_logger.log_every(train_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
idx = idx.to(device, non_blocking=True)
text_input = tokenizer(text, padding='longest', max_length=30, return_tensors='pt').to(device)
if epoch > 0 or not config['warm_up']:
alpha = config['alpha']
else:
alpha = config['alpha'] * min(1, i / len(train_loader))
(loss_ita, loss_itm) = model(image, text_input, alpha=alpha, idx=idx)
loss = loss_ita + loss_itm
optimizer.zero_grad()
loss.backward()
optimizer.step()
metric_logger.update(loss_itm=loss_itm.item())
metric_logger.update(loss_ita=loss_ita.item())
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
if epoch == 0 and i % step_size == 0 and (i <= warmup_iterations):
lr_scheduler.step(i // step_size)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger.global_avg())
train_stats = {k: '{:.3f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()}
</DeepExtract>
<DeepExtract>
model_without_ddp.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Evaluation:'
print_freq = 50
if args.gradcam_mode == 'itm':
model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.save_attention = True
result = []
for (image, text, ref_ids) in metric_logger.log_every(test_loader, print_freq, header):
image = image.to(device)
text_input = tokenizer(text, padding='longest', return_tensors='pt').to(device)
if args.gradcam_mode == 'itm':
image_embeds = model_without_ddp.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device)
output = model_without_ddp.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True)
vl_embeddings = output.last_hidden_state[:, 0, :]
vl_output = model_without_ddp.itm_head(vl_embeddings)
loss = vl_output[:, 1].sum()
model_without_ddp.zero_grad()
loss.backward()
with torch.no_grad():
mask = text_input.attention_mask.view(text_input.attention_mask.size(0), 1, -1, 1, 1)
grads = model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.get_attn_gradients().detach()
cams = model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.get_attention_map().detach()
cams = cams[:, :, :, 1:].reshape(image.size(0), 12, -1, 24, 24) * mask
grads = grads[:, :, :, 1:].clamp(min=0).reshape(image.size(0), 12, -1, 24, 24) * mask
gradcam = cams * grads
gradcam = gradcam.mean(1).mean(1)
elif args.gradcam_mode == 'itc':
image_embeds = model_without_ddp.visual_encoder(image, register_blk=args.block_num)
image_feat = F.normalize(model_without_ddp.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_output = model_without_ddp.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask, return_dict=True, mode='text')
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(model_without_ddp.text_proj(text_embeds[:, 0, :]), dim=-1)
sim = image_feat @ text_feat.t() / model_without_ddp.temp
loss = sim.diag().sum()
model_without_ddp.zero_grad()
loss.backward()
with torch.no_grad():
grad = model_without_ddp.visual_encoder.blocks[args.block_num].attn.get_attn_gradients().detach()
cam = model_without_ddp.visual_encoder.blocks[args.block_num].attn.get_attention_map().detach()
cam = cam[:, :, 0, 1:].reshape(image.size(0), -1, 24, 24)
grad = grad[:, :, 0, 1:].reshape(image.size(0), -1, 24, 24).clamp(0)
gradcam = (cam * grad).mean(1)
for (r_id, cam) in zip(ref_ids, gradcam):
result.append({'ref_id': r_id.item(), 'pred': cam})
if args.gradcam_mode == 'itm':
model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.save_attention = False
result = result
</DeepExtract>
results = collect_result(result, args.result_dir, 'epoch%d' % epoch, is_json=False, is_list=True)
if utils.is_main_process():
grounding_acc = grounding_eval(results, dets, cocos, refer, alpha=0.5, mask_size=24)
if args.evaluate:
log_stats = {**{f'{k}': v for (k, v) in grounding_acc.items()}, 'epoch': epoch}
else:
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'{k}': v for (k, v) in grounding_acc.items()}, 'epoch': epoch}
if grounding_acc['val_d'] > best:
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'config': config, 'epoch': epoch}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
best = grounding_acc['val_d']
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write(json.dumps(log_stats) + '\n')
if args.evaluate:
break
lr_scheduler.step(epoch + warmup_steps + 1)
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
|
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
print('Creating dataset')
(grd_train_dataset, grd_test_dataset) = create_dataset('grounding', config)
datasets = [grd_train_dataset, grd_test_dataset]
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False], num_tasks, global_rank)
else:
samplers = [None, None]
(train_loader, test_loader) = create_loader(datasets, samplers, batch_size=[config['batch_size'], config['batch_size']], num_workers=[4, 4], is_trains=[True, False], collate_fns=[None, None])
tokenizer = BertTokenizer.from_pretrained(args.text_encoder)
refer = REFER(config['refcoco_data'], 'refcoco+', 'unc')
dets = json.load(open(config['det_file'], 'r'))
cocos = json.load(open(config['coco_file'], 'r'))
print('Creating model')
model = ALBEF(config=config, text_encoder=args.text_encoder, tokenizer=tokenizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'], model.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], model.visual_encoder_m)
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.', '')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
msg = model.load_state_dict(state_dict, strict=False)
print('load checkpoint from %s' % args.checkpoint)
print(msg)
model = model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
(lr_scheduler, _) = create_scheduler(arg_sche, optimizer)
max_epoch = config['schedular']['epochs']
warmup_steps = config['schedular']['warmup_epochs']
best = 0
print('Start training')
start_time = time.time()
for epoch in range(0, max_epoch):
if not args.evaluate:
if args.distributed:
train_loader.sampler.set_epoch(epoch)
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps * step_size
for (i, (image, text, idx)) in enumerate(metric_logger.log_every(train_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
idx = idx.to(device, non_blocking=True)
text_input = tokenizer(text, padding='longest', max_length=30, return_tensors='pt').to(device)
if epoch > 0 or not config['warm_up']:
alpha = config['alpha']
else:
alpha = config['alpha'] * min(1, i / len(train_loader))
(loss_ita, loss_itm) = model(image, text_input, alpha=alpha, idx=idx)
loss = loss_ita + loss_itm
optimizer.zero_grad()
loss.backward()
optimizer.step()
metric_logger.update(loss_itm=loss_itm.item())
metric_logger.update(loss_ita=loss_ita.item())
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
if epoch == 0 and i % step_size == 0 and (i <= warmup_iterations):
lr_scheduler.step(i // step_size)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger.global_avg())
train_stats = {k: '{:.3f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()}
model_without_ddp.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Evaluation:'
print_freq = 50
if args.gradcam_mode == 'itm':
model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.save_attention = True
result = []
for (image, text, ref_ids) in metric_logger.log_every(test_loader, print_freq, header):
image = image.to(device)
text_input = tokenizer(text, padding='longest', return_tensors='pt').to(device)
if args.gradcam_mode == 'itm':
image_embeds = model_without_ddp.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device)
output = model_without_ddp.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True)
vl_embeddings = output.last_hidden_state[:, 0, :]
vl_output = model_without_ddp.itm_head(vl_embeddings)
loss = vl_output[:, 1].sum()
model_without_ddp.zero_grad()
loss.backward()
with torch.no_grad():
mask = text_input.attention_mask.view(text_input.attention_mask.size(0), 1, -1, 1, 1)
grads = model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.get_attn_gradients().detach()
cams = model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.get_attention_map().detach()
cams = cams[:, :, :, 1:].reshape(image.size(0), 12, -1, 24, 24) * mask
grads = grads[:, :, :, 1:].clamp(min=0).reshape(image.size(0), 12, -1, 24, 24) * mask
gradcam = cams * grads
gradcam = gradcam.mean(1).mean(1)
elif args.gradcam_mode == 'itc':
image_embeds = model_without_ddp.visual_encoder(image, register_blk=args.block_num)
image_feat = F.normalize(model_without_ddp.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_output = model_without_ddp.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask, return_dict=True, mode='text')
text_embeds = text_output.last_hidden_state
text_feat = F.normalize(model_without_ddp.text_proj(text_embeds[:, 0, :]), dim=-1)
sim = image_feat @ text_feat.t() / model_without_ddp.temp
loss = sim.diag().sum()
model_without_ddp.zero_grad()
loss.backward()
with torch.no_grad():
grad = model_without_ddp.visual_encoder.blocks[args.block_num].attn.get_attn_gradients().detach()
cam = model_without_ddp.visual_encoder.blocks[args.block_num].attn.get_attention_map().detach()
cam = cam[:, :, 0, 1:].reshape(image.size(0), -1, 24, 24)
grad = grad[:, :, 0, 1:].reshape(image.size(0), -1, 24, 24).clamp(0)
gradcam = (cam * grad).mean(1)
for (r_id, cam) in zip(ref_ids, gradcam):
result.append({'ref_id': r_id.item(), 'pred': cam})
if args.gradcam_mode == 'itm':
model_without_ddp.text_encoder.base_model.base_model.encoder.layer[args.block_num].crossattention.self.save_attention = False
result = result
results = collect_result(result, args.result_dir, 'epoch%d' % epoch, is_json=False, is_list=True)
if utils.is_main_process():
grounding_acc = grounding_eval(results, dets, cocos, refer, alpha=0.5, mask_size=24)
if args.evaluate:
log_stats = {**{f'{k}': v for (k, v) in grounding_acc.items()}, 'epoch': epoch}
else:
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'{k}': v for (k, v) in grounding_acc.items()}, 'epoch': epoch}
if grounding_acc['val_d'] > best:
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'config': config, 'epoch': epoch}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_best.pth'))
best = grounding_acc['val_d']
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write(json.dumps(log_stats) + '\n')
if args.evaluate:
break
lr_scheduler.step(epoch + warmup_steps + 1)
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
|
ALBEF
|
positive
|
def exitDataModelQuery(self, ctx):
<DeepExtract>
query = self._stack.pop()
</DeepExtract>
<DeepExtract>
action = self._stack.pop()
</DeepExtract>
<DeepExtract>
event_type = self._stack.pop()
</DeepExtract>
<DeepExtract>
self._stack.append(DataModelQuery(object_name=event_type, action=action, query=query))
</DeepExtract>
|
def exitDataModelQuery(self, ctx):
query = self._stack.pop()
action = self._stack.pop()
event_type = self._stack.pop()
self._stack.append(DataModelQuery(object_name=event_type, action=action, query=query))
</DeepExtract>
|
cascade-server
|
positive
|
def stack_with_locals(f):
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
else:
limit = None
frames = []
n = 0
while f is not None and (limit is None or n < limit):
(lineno, co) = (f.f_lineno, f.f_code)
(name, filename) = (co.co_name, co.co_filename)
args = inspect.getargvalues(f)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
frames.append((filename, lineno, name, line, f.f_locals, args))
f = f.f_back
n += 1
frames.reverse()
out = []
for (filename, lineno, name, line, localvars, args) in frames:
out.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
out.append(' %s' % line.strip())
args = inspect.formatargvalues(*args, formatvalue=formatvalue)
out.append('\n Arguments: %s%s' % (name, args))
if localvars:
out.append(' Local variables:\n')
try:
<DeepExtract>
reprs = SafePrettyPrinter(indent=1, width=76, depth=depth).pformat(localvars)
</DeepExtract>
except Exception:
reprs = 'failed to format local variables'
out += [' ' + l for l in reprs.splitlines()]
out.append('')
return '\n'.join(out)
|
def stack_with_locals(f):
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
else:
limit = None
frames = []
n = 0
while f is not None and (limit is None or n < limit):
(lineno, co) = (f.f_lineno, f.f_code)
(name, filename) = (co.co_name, co.co_filename)
args = inspect.getargvalues(f)
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
frames.append((filename, lineno, name, line, f.f_locals, args))
f = f.f_back
n += 1
frames.reverse()
out = []
for (filename, lineno, name, line, localvars, args) in frames:
out.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
out.append(' %s' % line.strip())
args = inspect.formatargvalues(*args, formatvalue=formatvalue)
out.append('\n Arguments: %s%s' % (name, args))
if localvars:
out.append(' Local variables:\n')
try:
reprs = SafePrettyPrinter(indent=1, width=76, depth=depth).pformat(localvars)
except Exception:
reprs = 'failed to format local variables'
out += [' ' + l for l in reprs.splitlines()]
out.append('')
return '\n'.join(out)
|
agraph-python
|
positive
|
def test_unknown(self, client):
<DeepExtract>
self._create_fixture_user()
self.login(client, 'fixture@example.com', 'fixture')
</DeepExtract>
unknown_url = self.url.format('unknown')
response = client.get(unknown_url, follow_redirects=True)
if 'compare' in unknown_url and response.status_code == 404:
if 'not found: ellipsis (...) expected as part of URL':
return
if getattr(self, 'redirect_on_unknown', True):
assert b'local-dev-conbench - Home' in response.data, response.data
else:
title = f'local-dev-conbench - {self.title}'.encode()
assert title in response.data, response.data
|
def test_unknown(self, client):
self._create_fixture_user()
self.login(client, 'fixture@example.com', 'fixture')
unknown_url = self.url.format('unknown')
response = client.get(unknown_url, follow_redirects=True)
if 'compare' in unknown_url and response.status_code == 404:
if 'not found: ellipsis (...) expected as part of URL':
return
if getattr(self, 'redirect_on_unknown', True):
assert b'local-dev-conbench - Home' in response.data, response.data
else:
title = f'local-dev-conbench - {self.title}'.encode()
assert title in response.data, response.data
|
conbench
|
positive
|
def execute(self):
image = self.camera_0.getImage().data
image_1 = self.camera_1.getImage().data
image_2 = self.camera_2.getImage().data
image_3 = self.camera_3.getImage().data
bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle)
bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB)
if self.cameras_first_images == []:
self.cameras_first_images.append(image)
self.cameras_first_images.append(image_1)
self.cameras_first_images.append(image_2)
self.cameras_first_images.append(image_3)
self.cameras_first_images.append(bird_eye_view_1)
self.cameras_last_images = [image, image_1, image_2, image_3, bird_eye_view_1]
<DeepExtract>
if image_1.shape[0] != image_1.shape[1]:
if image_1.shape[0] > image_1.shape[1]:
difference = image_1.shape[0] - image_1.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = image_1.shape[1] - image_1.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
image_1 = np.pad(image_1, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_1', image_1)
</DeepExtract>
<DeepExtract>
if image_2.shape[0] != image_2.shape[1]:
if image_2.shape[0] > image_2.shape[1]:
difference = image_2.shape[0] - image_2.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = image_2.shape[1] - image_2.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
image_2 = np.pad(image_2, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_2', image_2)
</DeepExtract>
<DeepExtract>
if image_3.shape[0] != image_3.shape[1]:
if image_3.shape[0] > image_3.shape[1]:
difference = image_3.shape[0] - image_3.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = image_3.shape[1] - image_3.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
image_3 = np.pad(image_3, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_3', image_3)
</DeepExtract>
<DeepExtract>
if bird_eye_view_1.shape[0] != bird_eye_view_1.shape[1]:
if bird_eye_view_1.shape[0] > bird_eye_view_1.shape[1]:
difference = bird_eye_view_1.shape[0] - bird_eye_view_1.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = bird_eye_view_1.shape[1] - bird_eye_view_1.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
bird_eye_view_1 = np.pad(bird_eye_view_1, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_0', bird_eye_view_1)
</DeepExtract>
<DeepExtract>
self.handler.update_pose3d(self.pose.getPose3d())
</DeepExtract>
image_shape = (50, 150)
img_base = cv2.resize(bird_eye_view_1, image_shape)
AUGMENTATIONS_TEST = Compose([Normalize()])
image = AUGMENTATIONS_TEST(image=img_base)
img = image['image']
self.bird_eye_view_images += 1
if (self.previous_bird_eye_view_image == img).all() == False:
self.bird_eye_view_unique_images += 1
self.previous_bird_eye_view_image = img
velocity_dim = np.full((150, 50), self.previous_speed / 30)
new_img_vel = np.dstack((img, velocity_dim))
img = new_img_vel
img = np.expand_dims(img, axis=0)
start_time = time.time()
try:
prediction = self.net.predict(img, verbose=0)
self.inference_times.append(time.time() - start_time)
throttle = prediction[0][0]
steer = prediction[0][1] * (1 - -1) + -1
break_command = prediction[0][2]
speed = self.vehicle.get_velocity()
vehicle_speed = 3.6 * math.sqrt(speed.x ** 2 + speed.y ** 2 + speed.z ** 2)
self.previous_speed = vehicle_speed
if vehicle_speed < 5:
self.motors.sendThrottle(1.0)
self.motors.sendSteer(0.0)
self.motors.sendBrake(0)
else:
self.motors.sendThrottle(throttle)
self.motors.sendSteer(steer)
self.motors.sendBrake(break_command)
except NotFoundError as ex:
logger.info('Error inside brain: NotFoundError!')
logger.warning(type(ex).__name__)
print_exc()
raise Exception(ex)
except UnimplementedError as ex:
logger.info('Error inside brain: UnimplementedError!')
logger.warning(type(ex).__name__)
print_exc()
raise Exception(ex)
except Exception as ex:
logger.info('Error inside brain: Exception!')
logger.warning(type(ex).__name__)
print_exc()
raise Exception(ex)
|
def execute(self):
image = self.camera_0.getImage().data
image_1 = self.camera_1.getImage().data
image_2 = self.camera_2.getImage().data
image_3 = self.camera_3.getImage().data
bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle)
bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB)
if self.cameras_first_images == []:
self.cameras_first_images.append(image)
self.cameras_first_images.append(image_1)
self.cameras_first_images.append(image_2)
self.cameras_first_images.append(image_3)
self.cameras_first_images.append(bird_eye_view_1)
self.cameras_last_images = [image, image_1, image_2, image_3, bird_eye_view_1]
if image_1.shape[0] != image_1.shape[1]:
if image_1.shape[0] > image_1.shape[1]:
difference = image_1.shape[0] - image_1.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = image_1.shape[1] - image_1.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
image_1 = np.pad(image_1, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_1', image_1)
if image_2.shape[0] != image_2.shape[1]:
if image_2.shape[0] > image_2.shape[1]:
difference = image_2.shape[0] - image_2.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = image_2.shape[1] - image_2.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
image_2 = np.pad(image_2, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_2', image_2)
if image_3.shape[0] != image_3.shape[1]:
if image_3.shape[0] > image_3.shape[1]:
difference = image_3.shape[0] - image_3.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = image_3.shape[1] - image_3.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
image_3 = np.pad(image_3, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_3', image_3)
if bird_eye_view_1.shape[0] != bird_eye_view_1.shape[1]:
if bird_eye_view_1.shape[0] > bird_eye_view_1.shape[1]:
difference = bird_eye_view_1.shape[0] - bird_eye_view_1.shape[1]
(extra_left, extra_right) = (int(difference / 2), int(difference / 2))
(extra_top, extra_bottom) = (0, 0)
else:
difference = bird_eye_view_1.shape[1] - bird_eye_view_1.shape[0]
(extra_left, extra_right) = (0, 0)
(extra_top, extra_bottom) = (int(difference / 2), int(difference / 2))
bird_eye_view_1 = np.pad(bird_eye_view_1, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
self.handler.update_frame('frame_0', bird_eye_view_1)
self.handler.update_pose3d(self.pose.getPose3d())
image_shape = (50, 150)
img_base = cv2.resize(bird_eye_view_1, image_shape)
AUGMENTATIONS_TEST = Compose([Normalize()])
image = AUGMENTATIONS_TEST(image=img_base)
img = image['image']
self.bird_eye_view_images += 1
if (self.previous_bird_eye_view_image == img).all() == False:
self.bird_eye_view_unique_images += 1
self.previous_bird_eye_view_image = img
velocity_dim = np.full((150, 50), self.previous_speed / 30)
new_img_vel = np.dstack((img, velocity_dim))
img = new_img_vel
img = np.expand_dims(img, axis=0)
start_time = time.time()
try:
prediction = self.net.predict(img, verbose=0)
self.inference_times.append(time.time() - start_time)
throttle = prediction[0][0]
steer = prediction[0][1] * (1 - -1) + -1
break_command = prediction[0][2]
speed = self.vehicle.get_velocity()
vehicle_speed = 3.6 * math.sqrt(speed.x ** 2 + speed.y ** 2 + speed.z ** 2)
self.previous_speed = vehicle_speed
if vehicle_speed < 5:
self.motors.sendThrottle(1.0)
self.motors.sendSteer(0.0)
self.motors.sendBrake(0)
else:
self.motors.sendThrottle(throttle)
self.motors.sendSteer(steer)
self.motors.sendBrake(break_command)
except NotFoundError as ex:
logger.info('Error inside brain: NotFoundError!')
logger.warning(type(ex).__name__)
print_exc()
raise Exception(ex)
except UnimplementedError as ex:
logger.info('Error inside brain: UnimplementedError!')
logger.warning(type(ex).__name__)
print_exc()
raise Exception(ex)
except Exception as ex:
logger.info('Error inside brain: Exception!')
logger.warning(type(ex).__name__)
print_exc()
raise Exception(ex)
|
BehaviorMetrics
|
positive
|
def train_custom_model(nlp_inputs, meta_inputs, meta_outputs, nlp_outputs, full_ds, target, keras_model_type, keras_options, model_options, var_df, cat_vocab_dict, project_name='', save_model_flag=True, use_my_model='', verbose=0):
"""
Given a keras model and a tf.data.dataset that is batched, this function will
train a keras model. It will first split the batched_data into train_ds and
valid_ds (80/20). Then it will select the right parameters based on model type and
train the model and evaluate it on valid_ds. It will return a keras model fully
trained on the full batched_data finally and train history.
"""
save_model_path = model_options['save_model_path']
inputs = nlp_inputs + meta_inputs
nlps = var_df['nlp_vars']
lats = var_df['lat_vars']
lons = var_df['lon_vars']
if nlp_inputs:
nlp_flag = True
else:
nlp_flag = False
start_time = time.time()
targets = cat_vocab_dict['target_variables']
max_trials = model_options['max_trials']
overwrite_flag = True
data_size = check_keras_options(keras_options, 'data_size', 10000)
batch_size = check_keras_options(keras_options, 'batchsize', 64)
class_weights = check_keras_options(keras_options, 'class_weight', {})
if not isinstance(model_options['label_encode_flag'], str):
if not model_options['label_encode_flag']:
print(' removing class weights since label_encode_flag is set to False which means classes can be anything.')
class_weights = {}
print(' Class weights: %s' % class_weights)
num_classes = model_options['num_classes']
num_labels = model_options['num_labels']
modeltype = model_options['modeltype']
patience = keras_options['patience']
cols_len = len([item for sublist in list(var_df.values()) for item in sublist])
if isinstance(meta_outputs, list):
data_dim = int(data_size)
NON_NLP_VARS = []
else:
<DeepExtract>
lst = []
for i in cat_vocab_dict['predictors_in_train']:
if i not in nlps:
lst.append(i)
NON_NLP_VARS = lst
</DeepExtract>
try:
data_dim = int(data_size * meta_outputs.shape[1])
except:
data_dim = int(data_size * meta_outputs[0].shape[1])
optimizer = keras_options['optimizer']
early_stopping = check_keras_options(keras_options, 'early_stopping', False)
print(' original datasize = %s, initial batchsize = %s' % (data_size, batch_size))
print(' Early stopping : %s' % early_stopping)
NUMBER_OF_EPOCHS = check_keras_options(keras_options, 'epochs', 100)
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
print(' chosen ExponentialDecay learning rate scheduler')
expo_steps = NUMBER_OF_EPOCHS * data_size // batch_size
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.0001, expo_steps, 0.1)
else:
learning_rate = check_keras_options(keras_options, 'learning_rate', 0.05)
if len(var_df['nlp_vars']) > 0:
steps = 10
else:
steps = max(10, data_size // (batch_size * 2))
steps = min(300, steps)
print(' recommended steps per epoch = %d' % steps)
STEPS_PER_EPOCH = check_keras_options(keras_options, 'steps_per_epoch', steps)
kernel_initializer = check_keras_options(keras_options, 'kernel_initializer', 'lecun_normal')
activation = 'selu'
print(' default initializer = %s, default activation = %s' % (kernel_initializer, activation))
use_bias = check_keras_options(keras_options, 'use_bias', True)
lr_scheduler = check_keras_options(keras_options, 'lr_scheduler', '')
onecycle_steps = max(10, np.ceil(data_size / (2 * batch_size)) * NUMBER_OF_EPOCHS)
print(' Onecycle steps = %d' % onecycle_steps)
(keras_options, model_options, num_predicts, output_activation) = get_model_defaults(keras_options, model_options, targets)
val_mode = keras_options['mode']
val_monitor = keras_options['monitor']
val_loss = keras_options['loss']
val_metrics = keras_options['metrics']
try:
print(' number of classes = %s, output_activation = %s' % (num_predicts, output_activation))
print(' loss function: %s' % str(val_loss).split('.')[-1].split(' ')[0])
except:
print(' loss fn = %s number of classes = %s, output_activation = %s' % (val_loss, num_predicts, output_activation))
modeltype = cat_vocab_dict['modeltype']
regular_body = True
if isinstance(meta_outputs, list):
if nlp_flag:
if len(nlp_outputs) > 0:
regular_body = False
else:
regular_body = True
else:
regular_body = False
save_weights_only = check_keras_options(keras_options, 'save_weights_only', False)
print(' steps_per_epoch = %s, number epochs = %s' % (STEPS_PER_EPOCH, NUMBER_OF_EPOCHS))
print(' val mode = %s, val monitor = %s, patience = %s' % (val_mode, val_monitor, patience))
(callbacks_dict, tb_logpath) = get_callbacks(val_mode, val_monitor, patience, learning_rate, save_weights_only, onecycle_steps, save_model_path)
chosen_callback = get_chosen_callback(callbacks_dict, keras_options)
if not keras_options['lr_scheduler']:
print(' chosen keras LR scheduler = default')
else:
print(' chosen keras LR scheduler = %s' % keras_options['lr_scheduler'])
recover = lambda x, y: y
print('\nSplitting train into 80+20 percent: train and validation data')
valid_ds1 = full_ds.enumerate().filter(is_valid).map(recover)
train_ds = full_ds.enumerate().filter(is_train).map(recover)
heldout_ds1 = valid_ds1
print(' Splitting validation 20 into 10+10 percent: valid and heldout data')
valid_ds = heldout_ds1.enumerate().filter(is_test).map(recover)
heldout_ds = heldout_ds1.enumerate().filter(is_test).map(recover)
print('\nLoading model and setting params. Will take 2-3 mins. Please be patient.')
shuffle_size = 1000
if num_labels <= 1:
try:
y_test = np.concatenate(list(heldout_ds.map(lambda x, y: y).as_numpy_iterator()))
print(' Single-Label: Heldout data shape: %s' % (y_test.shape,))
max_batch_size = int(min(y_test.shape[0], 4096))
except:
max_batch_size = 48
pass
else:
iters = int(data_size / batch_size) + 1
for (inum, each_target) in enumerate(target):
add_ls = []
for (feats, labs) in heldout_ds.take(iters):
add_ls.append(list(labs[each_target].numpy()))
flat_list = [item for sublist in add_ls for item in sublist]
if inum == 0:
each_array = np.array(flat_list)
else:
each_array = np.c_[each_array, np.array(flat_list)]
y_test = copy.deepcopy(each_array)
print(' Multi-Label: Heldout data shape: %s' % (y_test.shape,))
max_batch_size = y_test.shape[0]
if modeltype == 'Regression':
if (y_test >= 0).all():
output_activation = 'softplus'
print('Setting output activation layer as softplus since there are no negative values')
print(' Shuffle size = %d' % shuffle_size)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE).shuffle(shuffle_size, reshuffle_each_iteration=False, seed=42)
valid_ds = valid_ds.prefetch(tf.data.AUTOTUNE)
if not isinstance(use_my_model, str):
tuner = 'None'
else:
tuner = model_options['tuner']
print(' Training %s model using %s. This will take time...' % (keras_model_type, tuner))
from secrets import randbelow
rand_num = randbelow(10000)
tf.compat.v1.reset_default_graph()
K.clear_session()
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
callbacks_list_tuner = callbacks_dict['early_stop']
else:
callbacks_list_tuner = [chosen_callback, callbacks_dict['early_stop']]
targets = cat_vocab_dict['target_variables']
tune_mode = 'min'
trials_saved_path = os.path.join(save_model_path, 'trials')
if num_labels > 1 and modeltype != 'Regression':
tune_mode = 'max'
else:
tune_mode = val_mode
if tuner.lower() == 'storm':
randomization_factor = 0.5
tuner = MyTuner(project_dir=trials_saved_path, build_fn=build_model_storm, objective_direction=tune_mode, init_random=5, max_iters=max_trials, randomize_axis_factor=randomization_factor, overwrite=True)
start_time1 = time.time()
print(' STORM Tuner max_trials = %d, randomization factor = %0.2f' % (max_trials, randomization_factor))
tuner_epochs = 100
tuner_steps = STEPS_PER_EPOCH
batch_limit = min(max_batch_size, int(5 * find_batch_size(data_size)))
batch_nums = int(min(8, math.log(batch_limit, 3)))
print('Max. batch size = %d, number of batch sizes to try: %d' % (batch_limit, batch_nums))
tuner.search(train_ds, valid_ds, tuner_epochs, tuner_steps, inputs, meta_outputs, cols_len, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, patience, val_mode, data_size, learning_rate, val_monitor, callbacks_list_tuner, modeltype, class_weights, batch_size, batch_limit, batch_nums, targets, nlp_flag, regular_body, project_name, keras_model_type, cat_vocab_dict, model_options)
best_trial = tuner.get_best_trial()
print(' best trial selected as %s' % best_trial)
print('Time taken for tuning hyperparameters = %0.0f (mins)' % ((time.time() - start_time1) / 60))
try:
hpq = tuner.get_best_config()
<DeepExtract>
keras.backend.clear_session()
if len(args) == 2:
(batch_limit, batch_nums) = (args[0], args[1])
batch_size = hpq.Param('batch_size', [32, 64, 128, 256, 512, 1024, 2048], ordered=True)
elif len(args) == 1:
batch_size = args[0]
batch_size = hpq.Param('batch_size', [batch_size])
else:
batch_size = hpq.Param('batch_size', [32, 64, 128, 256, 512, 1024, 2048])
num_layers = hpq.Param('num_layers', [1, 2, 3], ordered=True)
model_body = Sequential([])
activation_fn = hpq.Param('activation', ['relu', 'selu', 'elu'])
use_bias = hpq.Param('use_bias', [True, False])
weight_decay = hpq.Param('weight_decay', np.logspace(-8, -3, 10))
batch_norm = hpq.Param('batch_norm', [True, False])
kernel_initializer = hpq.Param('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'], ordered=False)
dropout_flag = hpq.Param('use_dropout', [True, False])
batch_norm_flag = hpq.Param('use_batch_norm', [True, False])
num_hidden = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
model_body.add(Dense(hpq.Param('kernel_size_' + str(0), num_hidden, ordered=True), use_bias=use_bias, kernel_initializer=kernel_initializer, name='storm_dense_0'))
model_body.add(Activation(activation_fn, name='activation_0'))
if batch_norm_flag:
model_body.add(BatchNormalization(name='batch_norm_0'))
if dropout_flag:
model_body.add(Dropout(hpq.Param('dropout_value', [0.5, 0.6, 0.7, 0.8, 0.9], ordered=True), name='dropout_0'))
kernel_size = hpq.values['kernel_size_' + str(0)]
if dropout_flag:
dropout_value = hpq.values['dropout_value']
else:
dropout_value = 0.5
batch_norm_flag = hpq.values['use_batch_norm']
num_copy = copy.deepcopy(num_layers)
for x in range(num_copy):
kernel_size = int(0.75 * kernel_size)
model_body.add(Dense(kernel_size, name='storm_dense_' + str(x + 1), use_bias=use_bias, kernel_initializer=kernel_initializer))
model_body.add(Activation(activation_fn, name='activation_' + str(x + 100)))
if batch_norm_flag:
model_body.add(BatchNormalization(name='batch_norm_' + str(x + 1)))
if dropout_flag:
model_body.add(Dropout(dropout_value, name='dropout_' + str(x + 1)))
selected_optimizer = hpq.Param('optimizer', ['Adam', 'AdaMax', 'Adagrad', 'SGD', 'RMSprop', 'Nadam', 'nesterov'], ordered=False)
optimizer = return_optimizer_trials(hpq, selected_optimizer)
(best_model, best_optimizer) = (model_body, optimizer)
</DeepExtract>
best_batch = hpq.values['batch_size']
hpq_optimizer = hpq.values['optimizer']
if best_trial.metrics['final_lr'] < 0:
print(' best learning rate less than zero. Resetting it....')
optimizer_lr = 0.01
else:
optimizer_lr = best_trial.metrics['final_lr']
print('Best hyperparameters: %s' % hpq.values)
except:
<DeepExtract>
num_layers = check_keras_options(keras_options, 'num_layers', 2)
model_body = tf.keras.Sequential([])
for l_ in range(num_layers):
model_body.add(layers.Dense(64, activation='relu', kernel_initializer='lecun_normal'))
deep_model = model_body
</DeepExtract>
best_batch = batch_size
hpq_optimizer = 'SGD'
best_optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
optimizer_lr = 0.01
print(' Storm Tuner is erroring. Hence picking defaults including lr = %s' % optimizer_lr)
print('\nSetting best optimizer %s its best learning_rate = %s' % (hpq_optimizer, optimizer_lr))
best_optimizer = return_optimizer(hpq_optimizer)
K.set_value(best_optimizer.learning_rate, optimizer_lr)
if regular_body:
storm_outputs = add_outputs_to_model_body(best_model, meta_outputs)
else:
storm_outputs = add_outputs_to_auto_model_body(best_model, meta_outputs, nlp_flag)
best_model = get_compiled_model(inputs, storm_outputs, output_activation, num_predicts, modeltype, best_optimizer, val_loss, val_metrics, cols_len, targets)
deep_model = best_model
elif tuner.lower() == 'optuna':
optuna_scores = []
def objective(trial):
optimizer_options = ''
<DeepExtract>
n_layers = trial.suggest_int('n_layers', 2, 8)
num_hidden = trial.suggest_categorical('n_units', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
weight_decay = trial.suggest_float('weight_decay', 1e-08, 0.001, log=True)
use_bias = trial.suggest_categorical('use_bias', [True, False])
batch_norm = trial.suggest_categorical('batch_norm', [True, False])
add_noise = trial.suggest_categorical('add_noise', [True, False])
dropout = trial.suggest_float('dropout', 0.5, 0.9)
activation_fn = trial.suggest_categorical('activation', ['relu', 'elu', 'selu'])
kernel_initializer = trial.suggest_categorical('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'])
kernel_size = num_hidden
model = tf.keras.Sequential()
for i in range(n_layers):
kernel_size = int(kernel_size * 0.8)
model.add(tf.keras.layers.Dense(kernel_size, name='opt_dense_' + str(i), use_bias=use_bias, kernel_initializer=kernel_initializer))
model.add(Activation(activation_fn, name='opt_activation_' + str(i)))
if batch_norm:
model.add(BatchNormalization(name='opt_batchnorm_' + str(i)))
if add_noise:
model.add(GaussianNoise(trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)))
model.add(Dropout(dropout, name='opt_drop_' + str(i)))
kwargs = {}
if isinstance(optimizer_options, str):
if optimizer_options == '':
optimizer_options = ['Adam', 'SGD']
optimizer_selected = trial.suggest_categorical('optimizer', optimizer_options)
else:
optimizer_selected = optimizer_options
else:
optimizer_selected = trial.suggest_categorical('optimizer', optimizer_options)
if optimizer_selected == 'Adam':
kwargs['learning_rate'] = trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)
kwargs['epsilon'] = trial.suggest_float('adam_epsilon', 1e-14, 0.0001, log=True)
elif optimizer_selected == 'SGD':
kwargs['learning_rate'] = trial.suggest_float('sgd_opt_learning_rate', 1e-07, 0.001, log=True)
kwargs['momentum'] = trial.suggest_float('sgd_opt_momentum', 0.8, 0.95)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
if regular_body:
opt_outputs = add_outputs_to_model_body(model, meta_outputs)
else:
opt_outputs = add_outputs_to_auto_model_body(model, meta_outputs, nlp_flag)
comp_model = get_compiled_model(inputs, opt_outputs, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, cols_len, targets)
opt_model = comp_model
</DeepExtract>
optuna_epochs = 5
history = opt_model.fit(train_ds, validation_data=valid_ds, epochs=optuna_epochs, shuffle=True, callbacks=callbacks_list_tuner, verbose=0)
if num_labels == 1:
score = np.mean(history.history[val_monitor][-5:])
else:
for i in range(num_labels):
metric1 = [x for x in history.history.keys() if (targets[i] in x) & ('loss' not in x)]
val_metric = metric1[0]
if i == 0:
results = history.history[val_metric][-5:]
else:
results = np.c_[results, history.history[val_metric][-5:]]
score = results.mean(axis=1).mean()
optuna_scores.append(score)
return score
study_name = project_name + '_' + keras_model_type + '_study_' + str(rand_num)
if tune_mode == 'max':
study = optuna.create_study(study_name=study_name, direction='maximize', load_if_exists=False)
else:
study = optuna.create_study(study_name=study_name, direction='minimize', load_if_exists=False)
study.optimize(objective, n_trials=max_trials)
print('Best trial score in Optuna: %s' % study.best_trial.value)
print(' Scores mean:', np.mean(optuna_scores), 'std:', np.std(optuna_scores))
print(' Best params: %s' % study.best_params)
optimizer_options = study.best_params['optimizer']
<DeepExtract>
n_layers = study.best_trial.suggest_int('n_layers', 2, 8)
num_hidden = study.best_trial.suggest_categorical('n_units', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
weight_decay = study.best_trial.suggest_float('weight_decay', 1e-08, 0.001, log=True)
use_bias = study.best_trial.suggest_categorical('use_bias', [True, False])
batch_norm = study.best_trial.suggest_categorical('batch_norm', [True, False])
add_noise = study.best_trial.suggest_categorical('add_noise', [True, False])
dropout = study.best_trial.suggest_float('dropout', 0.5, 0.9)
activation_fn = study.best_trial.suggest_categorical('activation', ['relu', 'elu', 'selu'])
kernel_initializer = study.best_trial.suggest_categorical('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'])
kernel_size = num_hidden
model = tf.keras.Sequential()
for i in range(n_layers):
kernel_size = int(kernel_size * 0.8)
model.add(tf.keras.layers.Dense(kernel_size, name='opt_dense_' + str(i), use_bias=use_bias, kernel_initializer=kernel_initializer))
model.add(Activation(activation_fn, name='opt_activation_' + str(i)))
if batch_norm:
model.add(BatchNormalization(name='opt_batchnorm_' + str(i)))
if add_noise:
model.add(GaussianNoise(study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)))
model.add(Dropout(dropout, name='opt_drop_' + str(i)))
kwargs = {}
if isinstance(optimizer_options, str):
if optimizer_options == '':
optimizer_options = ['Adam', 'SGD']
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
else:
optimizer_selected = optimizer_options
else:
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
if optimizer_selected == 'Adam':
kwargs['learning_rate'] = study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)
kwargs['epsilon'] = study.best_trial.suggest_float('adam_epsilon', 1e-14, 0.0001, log=True)
elif optimizer_selected == 'SGD':
kwargs['learning_rate'] = study.best_trial.suggest_float('sgd_opt_learning_rate', 1e-07, 0.001, log=True)
kwargs['momentum'] = study.best_trial.suggest_float('sgd_opt_momentum', 0.8, 0.95)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
if regular_body:
opt_outputs = add_outputs_to_model_body(model, meta_outputs)
else:
opt_outputs = add_outputs_to_auto_model_body(model, meta_outputs, nlp_flag)
comp_model = get_compiled_model(inputs, opt_outputs, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, cols_len, targets)
best_model = comp_model
</DeepExtract>
best_optimizer = best_model.optimizer
<DeepExtract>
n_layers = study.best_trial.suggest_int('n_layers', 2, 8)
num_hidden = study.best_trial.suggest_categorical('n_units', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
weight_decay = study.best_trial.suggest_float('weight_decay', 1e-08, 0.001, log=True)
use_bias = study.best_trial.suggest_categorical('use_bias', [True, False])
batch_norm = study.best_trial.suggest_categorical('batch_norm', [True, False])
add_noise = study.best_trial.suggest_categorical('add_noise', [True, False])
dropout = study.best_trial.suggest_float('dropout', 0.5, 0.9)
activation_fn = study.best_trial.suggest_categorical('activation', ['relu', 'elu', 'selu'])
kernel_initializer = study.best_trial.suggest_categorical('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'])
kernel_size = num_hidden
model = tf.keras.Sequential()
for i in range(n_layers):
kernel_size = int(kernel_size * 0.8)
model.add(tf.keras.layers.Dense(kernel_size, name='opt_dense_' + str(i), use_bias=use_bias, kernel_initializer=kernel_initializer))
model.add(Activation(activation_fn, name='opt_activation_' + str(i)))
if batch_norm:
model.add(BatchNormalization(name='opt_batchnorm_' + str(i)))
if add_noise:
model.add(GaussianNoise(study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)))
model.add(Dropout(dropout, name='opt_drop_' + str(i)))
kwargs = {}
if isinstance(optimizer_options, str):
if optimizer_options == '':
optimizer_options = ['Adam', 'SGD']
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
else:
optimizer_selected = optimizer_options
else:
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
if optimizer_selected == 'Adam':
kwargs['learning_rate'] = study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)
kwargs['epsilon'] = study.best_trial.suggest_float('adam_epsilon', 1e-14, 0.0001, log=True)
elif optimizer_selected == 'SGD':
kwargs['learning_rate'] = study.best_trial.suggest_float('sgd_opt_learning_rate', 1e-07, 0.001, log=True)
kwargs['momentum'] = study.best_trial.suggest_float('sgd_opt_momentum', 0.8, 0.95)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
if regular_body:
opt_outputs = add_outputs_to_model_body(model, meta_outputs)
else:
opt_outputs = add_outputs_to_auto_model_body(model, meta_outputs, nlp_flag)
comp_model = get_compiled_model(inputs, opt_outputs, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, cols_len, targets)
deep_model = comp_model
</DeepExtract>
best_batch = batch_size
optimizer_lr = best_optimizer.learning_rate.numpy()
print('\nBest optimizer = %s and best learning_rate = %s' % (best_optimizer, optimizer_lr))
K.set_value(best_optimizer.learning_rate, optimizer_lr)
elif tuner.lower() == 'none':
print('skipping tuner search since use_my_model flag set to True...')
best_model = use_my_model
deep_model = use_my_model
if regular_body:
best_outputs = add_outputs_to_model_body(best_model, meta_outputs)
deep_outputs = add_outputs_to_model_body(deep_model, meta_outputs)
else:
best_outputs = add_outputs_to_auto_model_body(best_model, meta_outputs, nlp_flag)
deep_outputs = add_outputs_to_auto_model_body(deep_model, meta_outputs, nlp_flag)
best_optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
best_batch = batch_size
optimizer_lr = best_optimizer.learning_rate.numpy()
print('\nBest optimizer = %s and best learning_rate = %s' % (best_optimizer, optimizer_lr))
K.set_value(best_optimizer.learning_rate, optimizer_lr)
best_model = get_compiled_model(inputs, best_outputs, output_activation, num_predicts, modeltype, best_optimizer, val_loss, val_metrics, cols_len, targets)
deep_model = get_compiled_model(inputs, deep_outputs, output_activation, num_predicts, modeltype, best_optimizer, val_loss, val_metrics, cols_len, targets)
train_ds = train_ds.unbatch().batch(best_batch, drop_remainder=True)
train_ds = train_ds.shuffle(shuffle_size, reshuffle_each_iteration=False, seed=42).prefetch(tf.data.AUTOTUNE)
valid_ds = valid_ds.unbatch().batch(best_batch, drop_remainder=True)
valid_ds = valid_ds.prefetch(tf.data.AUTOTUNE)
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
if early_stopping:
callbacks_list = [callbacks_dict['early_stop'], callbacks_dict['tensor_board']]
else:
callbacks_list = [callbacks_dict['tensor_board']]
elif early_stopping:
callbacks_list = [callbacks_dict['early_stop'], callbacks_dict['tensor_board'], chosen_callback]
else:
callbacks_list = [callbacks_dict['tensor_board'], chosen_callback]
print('Model training with best hyperparameters for %d epochs' % NUMBER_OF_EPOCHS)
for each_callback in callbacks_list:
print(' Callback added: %s' % str(each_callback).split('.')[-1])
np.random.seed(42)
tf.random.set_seed(42)
history = best_model.fit(train_ds, validation_data=valid_ds, epochs=NUMBER_OF_EPOCHS, callbacks=callbacks_list, class_weight=class_weights, shuffle=True)
print(' Model training completed. Following metrics available: %s' % history.history.keys())
print('Time taken to train model (in mins) = %0.0f' % ((time.time() - start_time) / 60))
K.clear_session()
<DeepExtract>
sess = get_session()
K.clear_session()
sess.close()
sess = get_session()
try:
del opt_model
del best_model
del deep_model
print('deleted deep and best models from memory')
except:
pass
print(gc.collect())
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.visible_device_list = '0'
set_session(tf.compat.v1.Session(config=config))
</DeepExtract>
tf.compat.v1.reset_default_graph()
tf.keras.backend.reset_uids()
try:
stopped_epoch = max(5, int(pd.DataFrame(history.history).shape[0] - patience))
except:
stopped_epoch = 100
print(' Stopped epoch = %s' % stopped_epoch)
try:
if modeltype == 'Regression':
plot_history(history, val_monitor[4:], target)
elif modeltype == 'Classification':
plot_history(history, val_monitor[4:], target)
else:
plot_history(history, val_monitor[4:], target)
except:
print(' Plot history is erroring. Tensorboard logs can be found here: %s' % tb_logpath)
print('Time taken to train model (in mins) = %0.0f' % ((time.time() - start_time) / 60))
print(' Stopped epoch = %s' % stopped_epoch)
scores = []
ls = []
print('Held out data actuals shape: %s' % (y_test.shape,))
if verbose >= 1:
try:
print_one_row_from_tf_label(heldout_ds)
except:
print('could not print samples from heldout ds labels')
y_probas = best_model.predict(heldout_ds)
if isinstance(target, str):
if modeltype != 'Regression':
y_test_preds = y_probas.argmax(axis=1)
elif y_test.dtype == 'int':
y_test_preds = y_probas.round().astype(int)
else:
y_test_preds = y_probas.ravel()
elif modeltype != 'Regression':
for each_t in range(len(target)):
if each_t == 0:
y_test_preds = y_probas[each_t].argmax(axis=1).astype(int)
else:
y_test_preds = np.c_[y_test_preds, y_probas[each_t].argmax(axis=1).astype(int)]
else:
for each_t in range(len(target)):
if each_t == 0:
y_test_preds = y_probas[each_t].mean(axis=1)
else:
y_test_preds = np.c_[y_test_preds, y_probas[each_t].mean(axis=1)]
if y_test.dtype == 'int':
y_test_preds = y_test_preds.round().astype(int)
print('\nHeld out predictions shape:%s' % (y_test_preds.shape,))
if verbose >= 1:
if modeltype != 'Regression':
print(' Sample predictions: %s' % y_test_preds[:10])
elif num_labels == 1:
print(' Sample predictions: %s' % y_test_preds.ravel()[:10])
else:
print(' Sample predictions:\n%s' % y_test_preds[:10])
print('\n###########################################################')
print(' Held-out test data set Results:')
num_labels = cat_vocab_dict['num_labels']
num_classes = cat_vocab_dict['num_classes']
if check_for_nan_in_array(y_probas):
y_probas = pd.DataFrame(y_probas).fillna(0).values
elif check_for_nan_in_array(y_test_preds):
y_test_preds = pd.DataFrame(y_test_preds).fillna(0).values.ravel()
if num_labels <= 1:
if modeltype == 'Regression':
print_regression_model_stats(y_test, y_test_preds, target, plot_name=project_name)
plot_regression_residuals(y_test, y_test_preds, target, project_name, num_labels)
else:
print_classification_header(num_classes, num_labels, target)
labels = cat_vocab_dict['original_classes']
if cat_vocab_dict['target_transformed']:
target_names = cat_vocab_dict['transformed_classes']
target_le = cat_vocab_dict['target_le']
y_pred = y_probas.argmax(axis=1)
y_test_trans = target_le.inverse_transform(y_test)
y_pred_trans = target_le.inverse_transform(y_pred)
labels = np.unique(y_test_trans)
plot_classification_results(y_test_trans, y_pred_trans, labels, labels, target)
else:
y_pred = y_probas.argmax(axis=1)
labels = np.unique(y_test)
plot_classification_results(y_test, y_pred, labels, labels, target)
print_classification_metrics(y_test, y_probas, proba_flag=True)
elif modeltype == 'Regression':
print_regression_model_stats(y_test, y_test_preds, target, plot_name=project_name)
plot_regression_residuals(y_test, y_test_preds, target, project_name, num_labels)
else:
try:
targets = cat_vocab_dict['target_variables']
for (i, each_target) in enumerate(targets):
print_classification_header(num_classes, num_labels, each_target)
labels = cat_vocab_dict[each_target + '_original_classes']
if cat_vocab_dict['target_transformed']:
target_names = cat_vocab_dict[each_target + '_transformed_classes']
target_le = cat_vocab_dict['target_le'][i]
y_pred = y_probas[i].argmax(axis=1)
y_test_trans = target_le.inverse_transform(y_test[:, i])
y_pred_trans = target_le.inverse_transform(y_pred)
labels = np.unique(y_test_trans)
plot_classification_results(y_test_trans, y_pred_trans, labels, labels, each_target)
else:
y_pred = y_probas[i].argmax(axis=1)
labels = np.unique(y_test[:, i])
plot_classification_results(y_test[:, i], y_pred, labels, labels, each_target)
print_classification_metrics(y_test[:, i], y_probas[i], proba_flag=True)
print(classification_report(y_test[:, i], y_test_preds[:, i]))
print(confusion_matrix(y_test[:, i], y_test_preds[:, i]))
except:
print_classification_metrics(y_test, y_test_preds, False)
print(classification_report(y_test, y_test_preds))
print('\nFinally, training on full train dataset. This will take time...')
full_ds = full_ds.unbatch().batch(best_batch)
full_ds = full_ds.shuffle(shuffle_size, reshuffle_each_iteration=False, seed=42).prefetch(best_batch)
best_rate = best_model.optimizer.lr.numpy()
if best_rate < 0:
print(' best learning rate less than zero. Resetting it....')
best_rate = 0.01
else:
pass
print(' best learning rate = %s' % best_rate)
K.set_value(deep_model.optimizer.learning_rate, best_rate)
print(' set learning rate using best model:', deep_model.optimizer.learning_rate.numpy())
print(' max epochs for training = %d' % stopped_epoch)
callbacks_list = [callbacks_dict['check_point']]
deep_model.fit(full_ds, epochs=stopped_epoch, class_weight=class_weights, callbacks=callbacks_list, shuffle=True, verbose=0)
save_model_artifacts(deep_model, cat_vocab_dict, var_df, save_model_path, save_model_flag, model_options)
K.clear_session()
tf.compat.v1.reset_default_graph()
<DeepExtract>
sess = get_session()
K.clear_session()
sess.close()
sess = get_session()
try:
del opt_model
del best_model
del deep_model
print('deleted deep and best models from memory')
except:
pass
print(gc.collect())
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.visible_device_list = '0'
set_session(tf.compat.v1.Session(config=config))
</DeepExtract>
tf.keras.backend.reset_uids()
print('\nDeep_Auto_ViML completed. Total time taken = %0.0f (in mins)' % ((time.time() - start_time) / 60))
return (deep_model, cat_vocab_dict)
|
def train_custom_model(nlp_inputs, meta_inputs, meta_outputs, nlp_outputs, full_ds, target, keras_model_type, keras_options, model_options, var_df, cat_vocab_dict, project_name='', save_model_flag=True, use_my_model='', verbose=0):
"""
Given a keras model and a tf.data.dataset that is batched, this function will
train a keras model. It will first split the batched_data into train_ds and
valid_ds (80/20). Then it will select the right parameters based on model type and
train the model and evaluate it on valid_ds. It will return a keras model fully
trained on the full batched_data finally and train history.
"""
save_model_path = model_options['save_model_path']
inputs = nlp_inputs + meta_inputs
nlps = var_df['nlp_vars']
lats = var_df['lat_vars']
lons = var_df['lon_vars']
if nlp_inputs:
nlp_flag = True
else:
nlp_flag = False
start_time = time.time()
targets = cat_vocab_dict['target_variables']
max_trials = model_options['max_trials']
overwrite_flag = True
data_size = check_keras_options(keras_options, 'data_size', 10000)
batch_size = check_keras_options(keras_options, 'batchsize', 64)
class_weights = check_keras_options(keras_options, 'class_weight', {})
if not isinstance(model_options['label_encode_flag'], str):
if not model_options['label_encode_flag']:
print(' removing class weights since label_encode_flag is set to False which means classes can be anything.')
class_weights = {}
print(' Class weights: %s' % class_weights)
num_classes = model_options['num_classes']
num_labels = model_options['num_labels']
modeltype = model_options['modeltype']
patience = keras_options['patience']
cols_len = len([item for sublist in list(var_df.values()) for item in sublist])
if isinstance(meta_outputs, list):
data_dim = int(data_size)
NON_NLP_VARS = []
else:
lst = []
for i in cat_vocab_dict['predictors_in_train']:
if i not in nlps:
lst.append(i)
NON_NLP_VARS = lst
try:
data_dim = int(data_size * meta_outputs.shape[1])
except:
data_dim = int(data_size * meta_outputs[0].shape[1])
optimizer = keras_options['optimizer']
early_stopping = check_keras_options(keras_options, 'early_stopping', False)
print(' original datasize = %s, initial batchsize = %s' % (data_size, batch_size))
print(' Early stopping : %s' % early_stopping)
NUMBER_OF_EPOCHS = check_keras_options(keras_options, 'epochs', 100)
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
print(' chosen ExponentialDecay learning rate scheduler')
expo_steps = NUMBER_OF_EPOCHS * data_size // batch_size
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.0001, expo_steps, 0.1)
else:
learning_rate = check_keras_options(keras_options, 'learning_rate', 0.05)
if len(var_df['nlp_vars']) > 0:
steps = 10
else:
steps = max(10, data_size // (batch_size * 2))
steps = min(300, steps)
print(' recommended steps per epoch = %d' % steps)
STEPS_PER_EPOCH = check_keras_options(keras_options, 'steps_per_epoch', steps)
kernel_initializer = check_keras_options(keras_options, 'kernel_initializer', 'lecun_normal')
activation = 'selu'
print(' default initializer = %s, default activation = %s' % (kernel_initializer, activation))
use_bias = check_keras_options(keras_options, 'use_bias', True)
lr_scheduler = check_keras_options(keras_options, 'lr_scheduler', '')
onecycle_steps = max(10, np.ceil(data_size / (2 * batch_size)) * NUMBER_OF_EPOCHS)
print(' Onecycle steps = %d' % onecycle_steps)
(keras_options, model_options, num_predicts, output_activation) = get_model_defaults(keras_options, model_options, targets)
val_mode = keras_options['mode']
val_monitor = keras_options['monitor']
val_loss = keras_options['loss']
val_metrics = keras_options['metrics']
try:
print(' number of classes = %s, output_activation = %s' % (num_predicts, output_activation))
print(' loss function: %s' % str(val_loss).split('.')[-1].split(' ')[0])
except:
print(' loss fn = %s number of classes = %s, output_activation = %s' % (val_loss, num_predicts, output_activation))
modeltype = cat_vocab_dict['modeltype']
regular_body = True
if isinstance(meta_outputs, list):
if nlp_flag:
if len(nlp_outputs) > 0:
regular_body = False
else:
regular_body = True
else:
regular_body = False
save_weights_only = check_keras_options(keras_options, 'save_weights_only', False)
print(' steps_per_epoch = %s, number epochs = %s' % (STEPS_PER_EPOCH, NUMBER_OF_EPOCHS))
print(' val mode = %s, val monitor = %s, patience = %s' % (val_mode, val_monitor, patience))
(callbacks_dict, tb_logpath) = get_callbacks(val_mode, val_monitor, patience, learning_rate, save_weights_only, onecycle_steps, save_model_path)
chosen_callback = get_chosen_callback(callbacks_dict, keras_options)
if not keras_options['lr_scheduler']:
print(' chosen keras LR scheduler = default')
else:
print(' chosen keras LR scheduler = %s' % keras_options['lr_scheduler'])
recover = lambda x, y: y
print('\nSplitting train into 80+20 percent: train and validation data')
valid_ds1 = full_ds.enumerate().filter(is_valid).map(recover)
train_ds = full_ds.enumerate().filter(is_train).map(recover)
heldout_ds1 = valid_ds1
print(' Splitting validation 20 into 10+10 percent: valid and heldout data')
valid_ds = heldout_ds1.enumerate().filter(is_test).map(recover)
heldout_ds = heldout_ds1.enumerate().filter(is_test).map(recover)
print('\nLoading model and setting params. Will take 2-3 mins. Please be patient.')
shuffle_size = 1000
if num_labels <= 1:
try:
y_test = np.concatenate(list(heldout_ds.map(lambda x, y: y).as_numpy_iterator()))
print(' Single-Label: Heldout data shape: %s' % (y_test.shape,))
max_batch_size = int(min(y_test.shape[0], 4096))
except:
max_batch_size = 48
pass
else:
iters = int(data_size / batch_size) + 1
for (inum, each_target) in enumerate(target):
add_ls = []
for (feats, labs) in heldout_ds.take(iters):
add_ls.append(list(labs[each_target].numpy()))
flat_list = [item for sublist in add_ls for item in sublist]
if inum == 0:
each_array = np.array(flat_list)
else:
each_array = np.c_[each_array, np.array(flat_list)]
y_test = copy.deepcopy(each_array)
print(' Multi-Label: Heldout data shape: %s' % (y_test.shape,))
max_batch_size = y_test.shape[0]
if modeltype == 'Regression':
if (y_test >= 0).all():
output_activation = 'softplus'
print('Setting output activation layer as softplus since there are no negative values')
print(' Shuffle size = %d' % shuffle_size)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE).shuffle(shuffle_size, reshuffle_each_iteration=False, seed=42)
valid_ds = valid_ds.prefetch(tf.data.AUTOTUNE)
if not isinstance(use_my_model, str):
tuner = 'None'
else:
tuner = model_options['tuner']
print(' Training %s model using %s. This will take time...' % (keras_model_type, tuner))
from secrets import randbelow
rand_num = randbelow(10000)
tf.compat.v1.reset_default_graph()
K.clear_session()
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
callbacks_list_tuner = callbacks_dict['early_stop']
else:
callbacks_list_tuner = [chosen_callback, callbacks_dict['early_stop']]
targets = cat_vocab_dict['target_variables']
tune_mode = 'min'
trials_saved_path = os.path.join(save_model_path, 'trials')
if num_labels > 1 and modeltype != 'Regression':
tune_mode = 'max'
else:
tune_mode = val_mode
if tuner.lower() == 'storm':
randomization_factor = 0.5
tuner = MyTuner(project_dir=trials_saved_path, build_fn=build_model_storm, objective_direction=tune_mode, init_random=5, max_iters=max_trials, randomize_axis_factor=randomization_factor, overwrite=True)
start_time1 = time.time()
print(' STORM Tuner max_trials = %d, randomization factor = %0.2f' % (max_trials, randomization_factor))
tuner_epochs = 100
tuner_steps = STEPS_PER_EPOCH
batch_limit = min(max_batch_size, int(5 * find_batch_size(data_size)))
batch_nums = int(min(8, math.log(batch_limit, 3)))
print('Max. batch size = %d, number of batch sizes to try: %d' % (batch_limit, batch_nums))
tuner.search(train_ds, valid_ds, tuner_epochs, tuner_steps, inputs, meta_outputs, cols_len, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, patience, val_mode, data_size, learning_rate, val_monitor, callbacks_list_tuner, modeltype, class_weights, batch_size, batch_limit, batch_nums, targets, nlp_flag, regular_body, project_name, keras_model_type, cat_vocab_dict, model_options)
best_trial = tuner.get_best_trial()
print(' best trial selected as %s' % best_trial)
print('Time taken for tuning hyperparameters = %0.0f (mins)' % ((time.time() - start_time1) / 60))
try:
hpq = tuner.get_best_config()
keras.backend.clear_session()
if len(args) == 2:
(batch_limit, batch_nums) = (args[0], args[1])
batch_size = hpq.Param('batch_size', [32, 64, 128, 256, 512, 1024, 2048], ordered=True)
elif len(args) == 1:
batch_size = args[0]
batch_size = hpq.Param('batch_size', [batch_size])
else:
batch_size = hpq.Param('batch_size', [32, 64, 128, 256, 512, 1024, 2048])
num_layers = hpq.Param('num_layers', [1, 2, 3], ordered=True)
model_body = Sequential([])
activation_fn = hpq.Param('activation', ['relu', 'selu', 'elu'])
use_bias = hpq.Param('use_bias', [True, False])
weight_decay = hpq.Param('weight_decay', np.logspace(-8, -3, 10))
batch_norm = hpq.Param('batch_norm', [True, False])
kernel_initializer = hpq.Param('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'], ordered=False)
dropout_flag = hpq.Param('use_dropout', [True, False])
batch_norm_flag = hpq.Param('use_batch_norm', [True, False])
num_hidden = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
model_body.add(Dense(hpq.Param('kernel_size_' + str(0), num_hidden, ordered=True), use_bias=use_bias, kernel_initializer=kernel_initializer, name='storm_dense_0'))
model_body.add(Activation(activation_fn, name='activation_0'))
if batch_norm_flag:
model_body.add(BatchNormalization(name='batch_norm_0'))
if dropout_flag:
model_body.add(Dropout(hpq.Param('dropout_value', [0.5, 0.6, 0.7, 0.8, 0.9], ordered=True), name='dropout_0'))
kernel_size = hpq.values['kernel_size_' + str(0)]
if dropout_flag:
dropout_value = hpq.values['dropout_value']
else:
dropout_value = 0.5
batch_norm_flag = hpq.values['use_batch_norm']
num_copy = copy.deepcopy(num_layers)
for x in range(num_copy):
kernel_size = int(0.75 * kernel_size)
model_body.add(Dense(kernel_size, name='storm_dense_' + str(x + 1), use_bias=use_bias, kernel_initializer=kernel_initializer))
model_body.add(Activation(activation_fn, name='activation_' + str(x + 100)))
if batch_norm_flag:
model_body.add(BatchNormalization(name='batch_norm_' + str(x + 1)))
if dropout_flag:
model_body.add(Dropout(dropout_value, name='dropout_' + str(x + 1)))
selected_optimizer = hpq.Param('optimizer', ['Adam', 'AdaMax', 'Adagrad', 'SGD', 'RMSprop', 'Nadam', 'nesterov'], ordered=False)
optimizer = return_optimizer_trials(hpq, selected_optimizer)
(best_model, best_optimizer) = (model_body, optimizer)
best_batch = hpq.values['batch_size']
hpq_optimizer = hpq.values['optimizer']
if best_trial.metrics['final_lr'] < 0:
print(' best learning rate less than zero. Resetting it....')
optimizer_lr = 0.01
else:
optimizer_lr = best_trial.metrics['final_lr']
print('Best hyperparameters: %s' % hpq.values)
except:
num_layers = check_keras_options(keras_options, 'num_layers', 2)
model_body = tf.keras.Sequential([])
for l_ in range(num_layers):
model_body.add(layers.Dense(64, activation='relu', kernel_initializer='lecun_normal'))
deep_model = model_body
best_batch = batch_size
hpq_optimizer = 'SGD'
best_optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
optimizer_lr = 0.01
print(' Storm Tuner is erroring. Hence picking defaults including lr = %s' % optimizer_lr)
print('\nSetting best optimizer %s its best learning_rate = %s' % (hpq_optimizer, optimizer_lr))
best_optimizer = return_optimizer(hpq_optimizer)
K.set_value(best_optimizer.learning_rate, optimizer_lr)
if regular_body:
storm_outputs = add_outputs_to_model_body(best_model, meta_outputs)
else:
storm_outputs = add_outputs_to_auto_model_body(best_model, meta_outputs, nlp_flag)
best_model = get_compiled_model(inputs, storm_outputs, output_activation, num_predicts, modeltype, best_optimizer, val_loss, val_metrics, cols_len, targets)
deep_model = best_model
elif tuner.lower() == 'optuna':
optuna_scores = []
def objective(trial):
optimizer_options = ''
n_layers = trial.suggest_int('n_layers', 2, 8)
num_hidden = trial.suggest_categorical('n_units', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
weight_decay = trial.suggest_float('weight_decay', 1e-08, 0.001, log=True)
use_bias = trial.suggest_categorical('use_bias', [True, False])
batch_norm = trial.suggest_categorical('batch_norm', [True, False])
add_noise = trial.suggest_categorical('add_noise', [True, False])
dropout = trial.suggest_float('dropout', 0.5, 0.9)
activation_fn = trial.suggest_categorical('activation', ['relu', 'elu', 'selu'])
kernel_initializer = trial.suggest_categorical('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'])
kernel_size = num_hidden
model = tf.keras.Sequential()
for i in range(n_layers):
kernel_size = int(kernel_size * 0.8)
model.add(tf.keras.layers.Dense(kernel_size, name='opt_dense_' + str(i), use_bias=use_bias, kernel_initializer=kernel_initializer))
model.add(Activation(activation_fn, name='opt_activation_' + str(i)))
if batch_norm:
model.add(BatchNormalization(name='opt_batchnorm_' + str(i)))
if add_noise:
model.add(GaussianNoise(trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)))
model.add(Dropout(dropout, name='opt_drop_' + str(i)))
kwargs = {}
if isinstance(optimizer_options, str):
if optimizer_options == '':
optimizer_options = ['Adam', 'SGD']
optimizer_selected = trial.suggest_categorical('optimizer', optimizer_options)
else:
optimizer_selected = optimizer_options
else:
optimizer_selected = trial.suggest_categorical('optimizer', optimizer_options)
if optimizer_selected == 'Adam':
kwargs['learning_rate'] = trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)
kwargs['epsilon'] = trial.suggest_float('adam_epsilon', 1e-14, 0.0001, log=True)
elif optimizer_selected == 'SGD':
kwargs['learning_rate'] = trial.suggest_float('sgd_opt_learning_rate', 1e-07, 0.001, log=True)
kwargs['momentum'] = trial.suggest_float('sgd_opt_momentum', 0.8, 0.95)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
if regular_body:
opt_outputs = add_outputs_to_model_body(model, meta_outputs)
else:
opt_outputs = add_outputs_to_auto_model_body(model, meta_outputs, nlp_flag)
comp_model = get_compiled_model(inputs, opt_outputs, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, cols_len, targets)
opt_model = comp_model
optuna_epochs = 5
history = opt_model.fit(train_ds, validation_data=valid_ds, epochs=optuna_epochs, shuffle=True, callbacks=callbacks_list_tuner, verbose=0)
if num_labels == 1:
score = np.mean(history.history[val_monitor][-5:])
else:
for i in range(num_labels):
metric1 = [x for x in history.history.keys() if (targets[i] in x) & ('loss' not in x)]
val_metric = metric1[0]
if i == 0:
results = history.history[val_metric][-5:]
else:
results = np.c_[results, history.history[val_metric][-5:]]
score = results.mean(axis=1).mean()
optuna_scores.append(score)
return score
study_name = project_name + '_' + keras_model_type + '_study_' + str(rand_num)
if tune_mode == 'max':
study = optuna.create_study(study_name=study_name, direction='maximize', load_if_exists=False)
else:
study = optuna.create_study(study_name=study_name, direction='minimize', load_if_exists=False)
study.optimize(objective, n_trials=max_trials)
print('Best trial score in Optuna: %s' % study.best_trial.value)
print(' Scores mean:', np.mean(optuna_scores), 'std:', np.std(optuna_scores))
print(' Best params: %s' % study.best_params)
optimizer_options = study.best_params['optimizer']
n_layers = study.best_trial.suggest_int('n_layers', 2, 8)
num_hidden = study.best_trial.suggest_categorical('n_units', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
weight_decay = study.best_trial.suggest_float('weight_decay', 1e-08, 0.001, log=True)
use_bias = study.best_trial.suggest_categorical('use_bias', [True, False])
batch_norm = study.best_trial.suggest_categorical('batch_norm', [True, False])
add_noise = study.best_trial.suggest_categorical('add_noise', [True, False])
dropout = study.best_trial.suggest_float('dropout', 0.5, 0.9)
activation_fn = study.best_trial.suggest_categorical('activation', ['relu', 'elu', 'selu'])
kernel_initializer = study.best_trial.suggest_categorical('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'])
kernel_size = num_hidden
model = tf.keras.Sequential()
for i in range(n_layers):
kernel_size = int(kernel_size * 0.8)
model.add(tf.keras.layers.Dense(kernel_size, name='opt_dense_' + str(i), use_bias=use_bias, kernel_initializer=kernel_initializer))
model.add(Activation(activation_fn, name='opt_activation_' + str(i)))
if batch_norm:
model.add(BatchNormalization(name='opt_batchnorm_' + str(i)))
if add_noise:
model.add(GaussianNoise(study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)))
model.add(Dropout(dropout, name='opt_drop_' + str(i)))
kwargs = {}
if isinstance(optimizer_options, str):
if optimizer_options == '':
optimizer_options = ['Adam', 'SGD']
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
else:
optimizer_selected = optimizer_options
else:
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
if optimizer_selected == 'Adam':
kwargs['learning_rate'] = study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)
kwargs['epsilon'] = study.best_trial.suggest_float('adam_epsilon', 1e-14, 0.0001, log=True)
elif optimizer_selected == 'SGD':
kwargs['learning_rate'] = study.best_trial.suggest_float('sgd_opt_learning_rate', 1e-07, 0.001, log=True)
kwargs['momentum'] = study.best_trial.suggest_float('sgd_opt_momentum', 0.8, 0.95)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
if regular_body:
opt_outputs = add_outputs_to_model_body(model, meta_outputs)
else:
opt_outputs = add_outputs_to_auto_model_body(model, meta_outputs, nlp_flag)
comp_model = get_compiled_model(inputs, opt_outputs, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, cols_len, targets)
best_model = comp_model
best_optimizer = best_model.optimizer
n_layers = study.best_trial.suggest_int('n_layers', 2, 8)
num_hidden = study.best_trial.suggest_categorical('n_units', [50, 100, 150, 200, 250, 300, 350, 400, 450, 500])
weight_decay = study.best_trial.suggest_float('weight_decay', 1e-08, 0.001, log=True)
use_bias = study.best_trial.suggest_categorical('use_bias', [True, False])
batch_norm = study.best_trial.suggest_categorical('batch_norm', [True, False])
add_noise = study.best_trial.suggest_categorical('add_noise', [True, False])
dropout = study.best_trial.suggest_float('dropout', 0.5, 0.9)
activation_fn = study.best_trial.suggest_categorical('activation', ['relu', 'elu', 'selu'])
kernel_initializer = study.best_trial.suggest_categorical('kernel_initializer', ['glorot_uniform', 'he_normal', 'lecun_normal', 'he_uniform'])
kernel_size = num_hidden
model = tf.keras.Sequential()
for i in range(n_layers):
kernel_size = int(kernel_size * 0.8)
model.add(tf.keras.layers.Dense(kernel_size, name='opt_dense_' + str(i), use_bias=use_bias, kernel_initializer=kernel_initializer))
model.add(Activation(activation_fn, name='opt_activation_' + str(i)))
if batch_norm:
model.add(BatchNormalization(name='opt_batchnorm_' + str(i)))
if add_noise:
model.add(GaussianNoise(study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)))
model.add(Dropout(dropout, name='opt_drop_' + str(i)))
kwargs = {}
if isinstance(optimizer_options, str):
if optimizer_options == '':
optimizer_options = ['Adam', 'SGD']
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
else:
optimizer_selected = optimizer_options
else:
optimizer_selected = study.best_trial.suggest_categorical('optimizer', optimizer_options)
if optimizer_selected == 'Adam':
kwargs['learning_rate'] = study.best_trial.suggest_float('adam_learning_rate', 1e-07, 0.001, log=True)
kwargs['epsilon'] = study.best_trial.suggest_float('adam_epsilon', 1e-14, 0.0001, log=True)
elif optimizer_selected == 'SGD':
kwargs['learning_rate'] = study.best_trial.suggest_float('sgd_opt_learning_rate', 1e-07, 0.001, log=True)
kwargs['momentum'] = study.best_trial.suggest_float('sgd_opt_momentum', 0.8, 0.95)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
if regular_body:
opt_outputs = add_outputs_to_model_body(model, meta_outputs)
else:
opt_outputs = add_outputs_to_auto_model_body(model, meta_outputs, nlp_flag)
comp_model = get_compiled_model(inputs, opt_outputs, output_activation, num_predicts, modeltype, optimizer, val_loss, val_metrics, cols_len, targets)
deep_model = comp_model
best_batch = batch_size
optimizer_lr = best_optimizer.learning_rate.numpy()
print('\nBest optimizer = %s and best learning_rate = %s' % (best_optimizer, optimizer_lr))
K.set_value(best_optimizer.learning_rate, optimizer_lr)
elif tuner.lower() == 'none':
print('skipping tuner search since use_my_model flag set to True...')
best_model = use_my_model
deep_model = use_my_model
if regular_body:
best_outputs = add_outputs_to_model_body(best_model, meta_outputs)
deep_outputs = add_outputs_to_model_body(deep_model, meta_outputs)
else:
best_outputs = add_outputs_to_auto_model_body(best_model, meta_outputs, nlp_flag)
deep_outputs = add_outputs_to_auto_model_body(deep_model, meta_outputs, nlp_flag)
best_optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
best_batch = batch_size
optimizer_lr = best_optimizer.learning_rate.numpy()
print('\nBest optimizer = %s and best learning_rate = %s' % (best_optimizer, optimizer_lr))
K.set_value(best_optimizer.learning_rate, optimizer_lr)
best_model = get_compiled_model(inputs, best_outputs, output_activation, num_predicts, modeltype, best_optimizer, val_loss, val_metrics, cols_len, targets)
deep_model = get_compiled_model(inputs, deep_outputs, output_activation, num_predicts, modeltype, best_optimizer, val_loss, val_metrics, cols_len, targets)
train_ds = train_ds.unbatch().batch(best_batch, drop_remainder=True)
train_ds = train_ds.shuffle(shuffle_size, reshuffle_each_iteration=False, seed=42).prefetch(tf.data.AUTOTUNE)
valid_ds = valid_ds.unbatch().batch(best_batch, drop_remainder=True)
valid_ds = valid_ds.prefetch(tf.data.AUTOTUNE)
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
if early_stopping:
callbacks_list = [callbacks_dict['early_stop'], callbacks_dict['tensor_board']]
else:
callbacks_list = [callbacks_dict['tensor_board']]
elif early_stopping:
callbacks_list = [callbacks_dict['early_stop'], callbacks_dict['tensor_board'], chosen_callback]
else:
callbacks_list = [callbacks_dict['tensor_board'], chosen_callback]
print('Model training with best hyperparameters for %d epochs' % NUMBER_OF_EPOCHS)
for each_callback in callbacks_list:
print(' Callback added: %s' % str(each_callback).split('.')[-1])
np.random.seed(42)
tf.random.set_seed(42)
history = best_model.fit(train_ds, validation_data=valid_ds, epochs=NUMBER_OF_EPOCHS, callbacks=callbacks_list, class_weight=class_weights, shuffle=True)
print(' Model training completed. Following metrics available: %s' % history.history.keys())
print('Time taken to train model (in mins) = %0.0f' % ((time.time() - start_time) / 60))
K.clear_session()
sess = get_session()
K.clear_session()
sess.close()
sess = get_session()
try:
del opt_model
del best_model
del deep_model
print('deleted deep and best models from memory')
except:
pass
print(gc.collect())
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.visible_device_list = '0'
set_session(tf.compat.v1.Session(config=config))
tf.compat.v1.reset_default_graph()
tf.keras.backend.reset_uids()
try:
stopped_epoch = max(5, int(pd.DataFrame(history.history).shape[0] - patience))
except:
stopped_epoch = 100
print(' Stopped epoch = %s' % stopped_epoch)
try:
if modeltype == 'Regression':
plot_history(history, val_monitor[4:], target)
elif modeltype == 'Classification':
plot_history(history, val_monitor[4:], target)
else:
plot_history(history, val_monitor[4:], target)
except:
print(' Plot history is erroring. Tensorboard logs can be found here: %s' % tb_logpath)
print('Time taken to train model (in mins) = %0.0f' % ((time.time() - start_time) / 60))
print(' Stopped epoch = %s' % stopped_epoch)
scores = []
ls = []
print('Held out data actuals shape: %s' % (y_test.shape,))
if verbose >= 1:
try:
print_one_row_from_tf_label(heldout_ds)
except:
print('could not print samples from heldout ds labels')
y_probas = best_model.predict(heldout_ds)
if isinstance(target, str):
if modeltype != 'Regression':
y_test_preds = y_probas.argmax(axis=1)
elif y_test.dtype == 'int':
y_test_preds = y_probas.round().astype(int)
else:
y_test_preds = y_probas.ravel()
elif modeltype != 'Regression':
for each_t in range(len(target)):
if each_t == 0:
y_test_preds = y_probas[each_t].argmax(axis=1).astype(int)
else:
y_test_preds = np.c_[y_test_preds, y_probas[each_t].argmax(axis=1).astype(int)]
else:
for each_t in range(len(target)):
if each_t == 0:
y_test_preds = y_probas[each_t].mean(axis=1)
else:
y_test_preds = np.c_[y_test_preds, y_probas[each_t].mean(axis=1)]
if y_test.dtype == 'int':
y_test_preds = y_test_preds.round().astype(int)
print('\nHeld out predictions shape:%s' % (y_test_preds.shape,))
if verbose >= 1:
if modeltype != 'Regression':
print(' Sample predictions: %s' % y_test_preds[:10])
elif num_labels == 1:
print(' Sample predictions: %s' % y_test_preds.ravel()[:10])
else:
print(' Sample predictions:\n%s' % y_test_preds[:10])
print('\n###########################################################')
print(' Held-out test data set Results:')
num_labels = cat_vocab_dict['num_labels']
num_classes = cat_vocab_dict['num_classes']
if check_for_nan_in_array(y_probas):
y_probas = pd.DataFrame(y_probas).fillna(0).values
elif check_for_nan_in_array(y_test_preds):
y_test_preds = pd.DataFrame(y_test_preds).fillna(0).values.ravel()
if num_labels <= 1:
if modeltype == 'Regression':
print_regression_model_stats(y_test, y_test_preds, target, plot_name=project_name)
plot_regression_residuals(y_test, y_test_preds, target, project_name, num_labels)
else:
print_classification_header(num_classes, num_labels, target)
labels = cat_vocab_dict['original_classes']
if cat_vocab_dict['target_transformed']:
target_names = cat_vocab_dict['transformed_classes']
target_le = cat_vocab_dict['target_le']
y_pred = y_probas.argmax(axis=1)
y_test_trans = target_le.inverse_transform(y_test)
y_pred_trans = target_le.inverse_transform(y_pred)
labels = np.unique(y_test_trans)
plot_classification_results(y_test_trans, y_pred_trans, labels, labels, target)
else:
y_pred = y_probas.argmax(axis=1)
labels = np.unique(y_test)
plot_classification_results(y_test, y_pred, labels, labels, target)
print_classification_metrics(y_test, y_probas, proba_flag=True)
elif modeltype == 'Regression':
print_regression_model_stats(y_test, y_test_preds, target, plot_name=project_name)
plot_regression_residuals(y_test, y_test_preds, target, project_name, num_labels)
else:
try:
targets = cat_vocab_dict['target_variables']
for (i, each_target) in enumerate(targets):
print_classification_header(num_classes, num_labels, each_target)
labels = cat_vocab_dict[each_target + '_original_classes']
if cat_vocab_dict['target_transformed']:
target_names = cat_vocab_dict[each_target + '_transformed_classes']
target_le = cat_vocab_dict['target_le'][i]
y_pred = y_probas[i].argmax(axis=1)
y_test_trans = target_le.inverse_transform(y_test[:, i])
y_pred_trans = target_le.inverse_transform(y_pred)
labels = np.unique(y_test_trans)
plot_classification_results(y_test_trans, y_pred_trans, labels, labels, each_target)
else:
y_pred = y_probas[i].argmax(axis=1)
labels = np.unique(y_test[:, i])
plot_classification_results(y_test[:, i], y_pred, labels, labels, each_target)
print_classification_metrics(y_test[:, i], y_probas[i], proba_flag=True)
print(classification_report(y_test[:, i], y_test_preds[:, i]))
print(confusion_matrix(y_test[:, i], y_test_preds[:, i]))
except:
print_classification_metrics(y_test, y_test_preds, False)
print(classification_report(y_test, y_test_preds))
print('\nFinally, training on full train dataset. This will take time...')
full_ds = full_ds.unbatch().batch(best_batch)
full_ds = full_ds.shuffle(shuffle_size, reshuffle_each_iteration=False, seed=42).prefetch(best_batch)
best_rate = best_model.optimizer.lr.numpy()
if best_rate < 0:
print(' best learning rate less than zero. Resetting it....')
best_rate = 0.01
else:
pass
print(' best learning rate = %s' % best_rate)
K.set_value(deep_model.optimizer.learning_rate, best_rate)
print(' set learning rate using best model:', deep_model.optimizer.learning_rate.numpy())
print(' max epochs for training = %d' % stopped_epoch)
callbacks_list = [callbacks_dict['check_point']]
deep_model.fit(full_ds, epochs=stopped_epoch, class_weight=class_weights, callbacks=callbacks_list, shuffle=True, verbose=0)
save_model_artifacts(deep_model, cat_vocab_dict, var_df, save_model_path, save_model_flag, model_options)
K.clear_session()
tf.compat.v1.reset_default_graph()
sess = get_session()
K.clear_session()
sess.close()
sess = get_session()
try:
del opt_model
del best_model
del deep_model
print('deleted deep and best models from memory')
except:
pass
print(gc.collect())
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1
config.gpu_options.visible_device_list = '0'
set_session(tf.compat.v1.Session(config=config))
tf.keras.backend.reset_uids()
print('\nDeep_Auto_ViML completed. Total time taken = %0.0f (in mins)' % ((time.time() - start_time) / 60))
return (deep_model, cat_vocab_dict)
|
deep_autoviml
|
positive
|
def main():
"""Print a clients_daily_scalar_aggregates query to stdout."""
parser = argparse.ArgumentParser()
parser.add_argument('--no-parameterize', action='store_true', help='Generate a query without parameters')
parser.add_argument('--source-table', type=str, help='Name of Glean table', default='org_mozilla_fenix_stable.metrics_v1')
parser.add_argument('--product', type=str, default='org_mozilla_fenix')
args = parser.parse_args()
submission_date = 'date_sub(current_date, interval 2 day)' if args.no_parameterize else '@submission_date'
header = f'-- Query generated by: python3 -m bigquery_etl.glam.clients_daily_histogram_aggregates --source-table {args.source_table}' + (' --no-parameterize' if args.no_parameterize else '')
filter_desktop_builds = True if args.product == 'firefox_desktop' else False
schema = get_schema(args.source_table)
<DeepExtract>
metric_type_set = {'timing_distribution', 'memory_distribution', 'custom_distribution'}
metrics: Dict[str, List[str]] = {metric_type: [] for metric_type in metric_type_set}
excluded_metrics = get_etl_excluded_probes_quickfix('fenix')
for root_field in schema:
if root_field['name'] != 'metrics':
continue
for metric_field in root_field['fields']:
metric_type = metric_field['name']
if metric_type not in metric_type_set:
continue
for field in metric_field['fields']:
if field['name'] not in excluded_metrics:
metrics[metric_type].append(field['name'])
distributions = metrics
</DeepExtract>
metrics_sql = get_metrics_sql(distributions).strip()
if not metrics_sql:
print(header)
print('-- Empty query: no probes found!')
sys.exit(1)
print(render_main(header=header, filter_desktop_builds=filter_desktop_builds, source_table=args.source_table, submission_date=submission_date, attributes=ATTRIBUTES, histograms=metrics_sql, ping_type=ping_type_from_table(args.source_table)))
|
def main():
"""Print a clients_daily_scalar_aggregates query to stdout."""
parser = argparse.ArgumentParser()
parser.add_argument('--no-parameterize', action='store_true', help='Generate a query without parameters')
parser.add_argument('--source-table', type=str, help='Name of Glean table', default='org_mozilla_fenix_stable.metrics_v1')
parser.add_argument('--product', type=str, default='org_mozilla_fenix')
args = parser.parse_args()
submission_date = 'date_sub(current_date, interval 2 day)' if args.no_parameterize else '@submission_date'
header = f'-- Query generated by: python3 -m bigquery_etl.glam.clients_daily_histogram_aggregates --source-table {args.source_table}' + (' --no-parameterize' if args.no_parameterize else '')
filter_desktop_builds = True if args.product == 'firefox_desktop' else False
schema = get_schema(args.source_table)
metric_type_set = {'timing_distribution', 'memory_distribution', 'custom_distribution'}
metrics: Dict[str, List[str]] = {metric_type: [] for metric_type in metric_type_set}
excluded_metrics = get_etl_excluded_probes_quickfix('fenix')
for root_field in schema:
if root_field['name'] != 'metrics':
continue
for metric_field in root_field['fields']:
metric_type = metric_field['name']
if metric_type not in metric_type_set:
continue
for field in metric_field['fields']:
if field['name'] not in excluded_metrics:
metrics[metric_type].append(field['name'])
distributions = metrics
metrics_sql = get_metrics_sql(distributions).strip()
if not metrics_sql:
print(header)
print('-- Empty query: no probes found!')
sys.exit(1)
print(render_main(header=header, filter_desktop_builds=filter_desktop_builds, source_table=args.source_table, submission_date=submission_date, attributes=ATTRIBUTES, histograms=metrics_sql, ping_type=ping_type_from_table(args.source_table)))
|
bigquery-etl
|
positive
|
def write_dispense(self, instr) -> str:
"""
An MFSim DISPENSE node has 5 parameters:
nodeid, type, fluidName, volume, nodeName
nodeName <- this means nothing to MFSim
:param instr:
:return:
"""
_ret = 'NODE (%s, DISPENSE, ' % str(self.opid)
capture = instr.defs['var'].volumes
volume = next(iter(capture.values()))
if hasattr(instr.defs['var'], 'points_to'):
_ret += '%s, %s, %s)\n' % (instr.uses[0]['name'], str(volume), instr.defs['var'].points_to.name)
else:
_ret += '%s, %s, %s)\n' % (instr.uses[0]['name'], str(volume), instr.defs['var'].name)
to = list(self.cblock.dag._succ[instr.defs['var'].name])
if len(to) > 1:
<DeepExtract>
_ret = list()
check = instr.uses[0]['name']
found_instr = False
for i in self.cblock.instructions:
if i is instr:
found_instr = True
continue
if not found_instr:
continue
if i.op in {IRInstruction.NOP, IRInstruction.CONDITIONAL}:
continue
self.log.info(i)
if instr.op is IRInstruction.DETECT:
for use in i.uses:
if use['name'] in to:
if i.iid != check:
if instr.uses[1]['name'] == use['name'] and instr.uses[1]['offset'] == use['offset']:
if not _ret:
_ret.append(use['name'])
elif i.defs['name'] in to:
if i.iid != check:
for u in i.uses:
if u['name'] == instr.defs['name'] and u['offset'] == instr.defs['offset']:
if not _ret:
_ret.append(i.defs['var'].name)
if len(_ret) < 1:
self.log.fatal('A non-split instruction has multiple successors!')
exit(-1)
to = _ret
</DeepExtract>
for key in to:
to_instr = [x for x in self.cblock.instructions if x.defs is not None and x.defs['var'].name is key]
for ti in to_instr:
_ret += self.write_edge(self.opid, ti.iid)
self.num_dispense += 1
return _ret
|
def write_dispense(self, instr) -> str:
"""
An MFSim DISPENSE node has 5 parameters:
nodeid, type, fluidName, volume, nodeName
nodeName <- this means nothing to MFSim
:param instr:
:return:
"""
_ret = 'NODE (%s, DISPENSE, ' % str(self.opid)
capture = instr.defs['var'].volumes
volume = next(iter(capture.values()))
if hasattr(instr.defs['var'], 'points_to'):
_ret += '%s, %s, %s)\n' % (instr.uses[0]['name'], str(volume), instr.defs['var'].points_to.name)
else:
_ret += '%s, %s, %s)\n' % (instr.uses[0]['name'], str(volume), instr.defs['var'].name)
to = list(self.cblock.dag._succ[instr.defs['var'].name])
if len(to) > 1:
_ret = list()
check = instr.uses[0]['name']
found_instr = False
for i in self.cblock.instructions:
if i is instr:
found_instr = True
continue
if not found_instr:
continue
if i.op in {IRInstruction.NOP, IRInstruction.CONDITIONAL}:
continue
self.log.info(i)
if instr.op is IRInstruction.DETECT:
for use in i.uses:
if use['name'] in to:
if i.iid != check:
if instr.uses[1]['name'] == use['name'] and instr.uses[1]['offset'] == use['offset']:
if not _ret:
_ret.append(use['name'])
elif i.defs['name'] in to:
if i.iid != check:
for u in i.uses:
if u['name'] == instr.defs['name'] and u['offset'] == instr.defs['offset']:
if not _ret:
_ret.append(i.defs['var'].name)
if len(_ret) < 1:
self.log.fatal('A non-split instruction has multiple successors!')
exit(-1)
to = _ret
for key in to:
to_instr = [x for x in self.cblock.instructions if x.defs is not None and x.defs['var'].name is key]
for ti in to_instr:
_ret += self.write_edge(self.opid, ti.iid)
self.num_dispense += 1
return _ret
|
BioScript
|
positive
|
def std_(self, x_cond, n_samples=10 ** 6):
""" Standard deviation of the fitted distribution conditioned on x_cond
Args:
x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
Returns:
Standard deviations sqrt(Var[y|x]) corresponding to x_cond - numpy array of shape (n_values, ndim_y)
"""
<DeepExtract>
if x_cond.ndim == 1:
x_cond = np.expand_dims(x_cond, axis=1)
if Y is not None:
if Y.ndim == 1:
Y = np.expand_dims(Y, axis=1)
assert x_cond.shape[0] == Y.shape[0], 'X and Y must have the same length along axis 0'
assert x_cond.ndim == Y.ndim == 2, 'X and Y must be matrices'
if Y is None:
x_cond = x_cond
else:
x_cond = (x_cond, Y)
</DeepExtract>
assert x_cond.ndim == 2
return self._std_pdf(x_cond, n_samples=n_samples)
|
def std_(self, x_cond, n_samples=10 ** 6):
""" Standard deviation of the fitted distribution conditioned on x_cond
Args:
x_cond: different x values to condition on - numpy array of shape (n_values, ndim_x)
Returns:
Standard deviations sqrt(Var[y|x]) corresponding to x_cond - numpy array of shape (n_values, ndim_y)
"""
if x_cond.ndim == 1:
x_cond = np.expand_dims(x_cond, axis=1)
if Y is not None:
if Y.ndim == 1:
Y = np.expand_dims(Y, axis=1)
assert x_cond.shape[0] == Y.shape[0], 'X and Y must have the same length along axis 0'
assert x_cond.ndim == Y.ndim == 2, 'X and Y must be matrices'
if Y is None:
x_cond = x_cond
else:
x_cond = (x_cond, Y)
assert x_cond.ndim == 2
return self._std_pdf(x_cond, n_samples=n_samples)
|
Conditional_Density_Estimation
|
positive
|
def _handle_error(self, error):
message = ''
if 'name' in error and 'path' in error:
message = "\t* Name: {0} - Path: '{1}'".format(error['name'], error['path'])
else:
<DeepExtract>
message = 'The following error was received: {0}'.format(self.response.text)
</DeepExtract>
if 'value' in error:
message = "{0} - Value: '{1}'".format(message, error['value'])
return message
|
def _handle_error(self, error):
message = ''
if 'name' in error and 'path' in error:
message = "\t* Name: {0} - Path: '{1}'".format(error['name'], error['path'])
else:
message = 'The following error was received: {0}'.format(self.response.text)
if 'value' in error:
message = "{0} - Value: '{1}'".format(message, error['value'])
return message
|
contentful-management.py
|
positive
|
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
(qlen, bsz) = (w.size(0), w.size(1))
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
rw_head_q = w_head_q + r_w_bias[None]
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb))
D_ = r_bias[None, :, None]
<DeepExtract>
zero_pad = torch.zeros((B_ + D_.size(0), 1, *B_ + D_.size()[2:]), device=B_ + D_.device, dtype=B_ + D_.dtype)
x_padded = torch.cat([zero_pad, B_ + D_], dim=1)
x_padded = x_padded.view(B_ + D_.size(1) + 1, B_ + D_.size(0), *B_ + D_.size()[2:])
B_ + D_ = x_padded[1:].view_as(B_ + D_)
if zero_triu:
ones = torch.ones((B_ + D_.size(0), B_ + D_.size(1)))
B_ + D_ = B_ + D_ * torch.tril(ones, B_ + D_.size(1) - B_ + D_.size(0))[:, :, None, None]
BD = B_ + D_
</DeepExtract>
attn_score = AC + BD
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf'))
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = w + attn_out
else:
output = self.layer_norm(w + attn_out)
return output
|
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
(qlen, bsz) = (w.size(0), w.size(1))
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
rw_head_q = w_head_q + r_w_bias[None]
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb))
D_ = r_bias[None, :, None]
zero_pad = torch.zeros((B_ + D_.size(0), 1, *B_ + D_.size()[2:]), device=B_ + D_.device, dtype=B_ + D_.dtype)
x_padded = torch.cat([zero_pad, B_ + D_], dim=1)
x_padded = x_padded.view(B_ + D_.size(1) + 1, B_ + D_.size(0), *B_ + D_.size()[2:])
B_ + D_ = x_padded[1:].view_as(B_ + D_)
if zero_triu:
ones = torch.ones((B_ + D_.size(0), B_ + D_.size(1)))
B_ + D_ = B_ + D_ * torch.tril(ones, B_ + D_.size(1) - B_ + D_.size(0))[:, :, None, None]
BD = B_ + D_
attn_score = AC + BD
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf'))
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
output = w + attn_out
else:
output = self.layer_norm(w + attn_out)
return output
|
complex-order
|
positive
|
def run(self):
try:
<DeepExtract>
print('\x1b[1m{0}\x1b[0m'.format('Removing previous builds…'))
</DeepExtract>
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
<DeepExtract>
print('\x1b[1m{0}\x1b[0m'.format('Building Source and Wheel (universal) distribution…'))
</DeepExtract>
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
<DeepExtract>
print('\x1b[1m{0}\x1b[0m'.format('Uploading the package to PyPI via Twine…'))
</DeepExtract>
os.system('twine upload --repository-url https://test.pypi.org/legacy/ dist/*')
sys.exit()
|
def run(self):
try:
print('\x1b[1m{0}\x1b[0m'.format('Removing previous builds…'))
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
print('\x1b[1m{0}\x1b[0m'.format('Building Source and Wheel (universal) distribution…'))
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
print('\x1b[1m{0}\x1b[0m'.format('Uploading the package to PyPI via Twine…'))
os.system('twine upload --repository-url https://test.pypi.org/legacy/ dist/*')
sys.exit()
|
deephyper
|
positive
|
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers={}, **kwargs):
assert urls
if json_output:
json_output_.download_urls(urls=urls, title=title, ext=ext, total_size=total_size, refer=refer)
return
if dry_run:
print('Real URLs:\n%s' % '\n'.join(urls))
return
if player:
<DeepExtract>
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
</DeepExtract>
return
if not total_size:
try:
<DeepExtract>
total_size = sum([url_size(url, faker=faker, headers=headers) for url in urls])
</DeepExtract>
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
<DeepExtract>
if default_encoding == 'utf-8':
title = get_filename(title)
else:
title = get_filename(title)
</DeepExtract>
<DeepExtract>
global output_filename
if output_filename:
if ext:
output_filename = output_filename + '.' + ext
output_filename = output_filename
merged_ext = ext
if len(urls) > 1 and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
output_filename = '%s.%s' % (title, merged_ext)
</DeepExtract>
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and (os.path.getsize(output_filepath) >= total_size * 0.9):
print('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
<DeepExtract>
if refer is not None:
headers['Referer'] = refer
file_size = url_size(url, faker=faker, headers=headers)
if os.path.exists(output_filepath):
if not force and file_size == os.path.getsize(output_filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(output_filepath)))
elif bar:
bar.update_received(file_size)
return
elif not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(output_filepath)), '...')
elif not os.path.exists(os.path.dirname(output_filepath)):
os.mkdir(os.path.dirname(output_filepath))
temp_filepath = output_filepath + '.download' if file_size != float('inf') else output_filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
if timeout:
response = urlopen_with_retry(request.Request(url, headers=headers), timeout=timeout)
else:
response = urlopen_with_retry(request.Request(url, headers=headers))
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length != None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size:
break
else:
headers['Range'] = 'bytes=' + str(received) + '-'
response = urlopen_with_retry(request.Request(url, headers=headers))
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(output_filepath, os.W_OK):
os.remove(output_filepath)
os.rename(temp_filepath, output_filepath)
</DeepExtract>
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for (i, url) in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
bar.update_piece(i + 1)
<DeepExtract>
if refer is not None:
headers['Referer'] = refer
file_size = url_size(url, faker=faker, headers=headers)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not True:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
elif bar:
bar.update_received(file_size)
return
elif not True:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size != float('inf') else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
if timeout:
response = urlopen_with_retry(request.Request(url, headers=headers), timeout=timeout)
else:
response = urlopen_with_retry(request.Request(url, headers=headers))
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length != None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size:
break
else:
headers['Range'] = 'bytes=' + str(received) + '-'
response = urlopen_with_retry(request.Request(url, headers=headers))
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath)
os.rename(temp_filepath, filepath)
</DeepExtract>
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts:
os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'ts':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
|
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers={}, **kwargs):
assert urls
if json_output:
json_output_.download_urls(urls=urls, title=title, ext=ext, total_size=total_size, refer=refer)
return
if dry_run:
print('Real URLs:\n%s' % '\n'.join(urls))
return
if player:
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
return
if not total_size:
try:
total_size = sum([url_size(url, faker=faker, headers=headers) for url in urls])
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
if default_encoding == 'utf-8':
title = get_filename(title)
else:
title = get_filename(title)
global output_filename
if output_filename:
if ext:
output_filename = output_filename + '.' + ext
output_filename = output_filename
merged_ext = ext
if len(urls) > 1 and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
output_filename = '%s.%s' % (title, merged_ext)
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and (os.path.getsize(output_filepath) >= total_size * 0.9):
print('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
if refer is not None:
headers['Referer'] = refer
file_size = url_size(url, faker=faker, headers=headers)
if os.path.exists(output_filepath):
if not force and file_size == os.path.getsize(output_filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(output_filepath)))
elif bar:
bar.update_received(file_size)
return
elif not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(output_filepath)), '...')
elif not os.path.exists(os.path.dirname(output_filepath)):
os.mkdir(os.path.dirname(output_filepath))
temp_filepath = output_filepath + '.download' if file_size != float('inf') else output_filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
if timeout:
response = urlopen_with_retry(request.Request(url, headers=headers), timeout=timeout)
else:
response = urlopen_with_retry(request.Request(url, headers=headers))
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length != None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size:
break
else:
headers['Range'] = 'bytes=' + str(received) + '-'
response = urlopen_with_retry(request.Request(url, headers=headers))
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(output_filepath, os.W_OK):
os.remove(output_filepath)
os.rename(temp_filepath, output_filepath)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for (i, url) in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
bar.update_piece(i + 1)
if refer is not None:
headers['Referer'] = refer
file_size = url_size(url, faker=faker, headers=headers)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not True:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
elif bar:
bar.update_received(file_size)
return
elif not True:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size != float('inf') else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
if timeout:
response = urlopen_with_retry(request.Request(url, headers=headers), timeout=timeout)
else:
response = urlopen_with_retry(request.Request(url, headers=headers))
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length != None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size:
break
else:
headers['Range'] = 'bytes=' + str(received) + '-'
response = urlopen_with_retry(request.Request(url, headers=headers))
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath)
os.rename(temp_filepath, filepath)
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts:
os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'ts':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
|
acmpv
|
positive
|
@classmethod
def create(cls, opt_func, lr, layer_groups, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
<DeepExtract>
split_groups = []
for l in layer_groups:
(l1, l2) = ([], [])
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
split_groups = split_groups
</DeepExtract>
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
(opt.lr, opt.opt_func) = (listify(lr, layer_groups), opt_func)
return opt
|
@classmethod
def create(cls, opt_func, lr, layer_groups, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
split_groups = []
for l in layer_groups:
(l1, l2) = ([], [])
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
split_groups = split_groups
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
(opt.lr, opt.opt_func) = (listify(lr, layer_groups), opt_func)
return opt
|
End-to-End-Multi-View-Fusion-for-3D-Object-Detection-in-LiDAR-Point-Clouds
|
positive
|
def __init__(self, label, fn_callback, icon=None, tooltip=None, color=(1, 1, 1, 1), align=0, bgcolor=None, bordercolor=(0, 0, 0, 0.4), hovercolor=(1, 1, 1, 0.1), presscolor=(0, 0, 0, 0.2), margin=0, padding=4):
super().__init__(vertical=False, margin=margin)
self.defer_recalc = True
if icon:
<DeepExtract>
self.ui_items.append(icon)
icon.register_dirty_callback(self)
self.dirty()
return icon
</DeepExtract>
<DeepExtract>
self.ui_items.append(UI_Spacer(width=4))
UI_Spacer(width=4).register_dirty_callback(self)
self.dirty()
return UI_Spacer(width=4)
</DeepExtract>
self.tooltip = tooltip
<DeepExtract>
self.ui_items.append(UI_Label(label, color=color, align=align, margin=padding))
UI_Label(label, color=color, align=align, margin=padding).register_dirty_callback(self)
self.dirty()
self.label = UI_Label(label, color=color, align=align, margin=padding)
</DeepExtract>
self.fn_callback = fn_callback
self.pressed = False
self.bgcolor = bgcolor
self.bordercolor = bordercolor
self.presscolor = presscolor
self.hovercolor = hovercolor
self.mouse = None
self.hovering = False
self.defer_recalc = False
|
def __init__(self, label, fn_callback, icon=None, tooltip=None, color=(1, 1, 1, 1), align=0, bgcolor=None, bordercolor=(0, 0, 0, 0.4), hovercolor=(1, 1, 1, 0.1), presscolor=(0, 0, 0, 0.2), margin=0, padding=4):
super().__init__(vertical=False, margin=margin)
self.defer_recalc = True
if icon:
self.ui_items.append(icon)
icon.register_dirty_callback(self)
self.dirty()
return icon
self.ui_items.append(UI_Spacer(width=4))
UI_Spacer(width=4).register_dirty_callback(self)
self.dirty()
return UI_Spacer(width=4)
self.tooltip = tooltip
self.ui_items.append(UI_Label(label, color=color, align=align, margin=padding))
UI_Label(label, color=color, align=align, margin=padding).register_dirty_callback(self)
self.dirty()
self.label = UI_Label(label, color=color, align=align, margin=padding)
self.fn_callback = fn_callback
self.pressed = False
self.bgcolor = bgcolor
self.bordercolor = bordercolor
self.presscolor = presscolor
self.hovercolor = hovercolor
self.mouse = None
self.hovering = False
self.defer_recalc = False
|
addon_common
|
positive
|
def detect_object(inference, camera, classes, threshold, out_dir, range_x=[0, 1], range_y=[0, 1]):
"""Detects objects belonging to given classes in camera stream."""
stream = io.BytesIO()
camera.capture(stream, format='jpeg')
stream.seek(0)
image = Image.open(stream)
rgb_histogram = np.array(image.histogram()).reshape((3, 256))
green_peak = np.argmax(rgb_histogram[1, :])
if green_peak < 3:
time.sleep(1.0)
return (False, None, None)
debug_data = []
detection = False
max_accumulator = 0.0
print('Inferring...')
for p in crop_parameters(image, range_x, range_y):
im_crop = image.crop(p)
accumulator = 0.0
infer_classes = image_classification.get_classes(inference.run(im_crop), top_k=5, threshold=0.05)
corner = [p[0], p[1]]
print(corner)
for (idx, (label, score)) in enumerate(infer_classes):
debug_data.append((corner, im_crop.size, idx, label, score))
if label in classes:
accumulator += score
if accumulator > max_accumulator:
max_accumulator = accumulator
if accumulator >= threshold:
detection = True
break
if out_dir:
<DeepExtract>
global debug_idx
if debug_idx == 0:
for filepath in [f for f in os.listdir(out_dir) if f.startswith('image_')]:
try:
path_idx = int(filepath[6:12]) + 1
debug_idx = max(debug_idx, path_idx)
except BaseException:
pass
print('debug_idx:', debug_idx)
if filename is None:
output_path = os.path.join(out_dir, 'image_%06d.jpg' % debug_idx)
debug_idx += 1
else:
output_path = os.path.join(out_dir, filename)
image.save(output_path)
with open(output_path + '_classes.txt', 'w') as f:
for debug_tuple in debug_data:
f.write('%s + %s Result %d: %s (prob=%f)\n' % debug_tuple)
with open(output_path + '_classes.pkl', 'wb') as f:
pickle.dump(debug_data, f, protocol=0)
</DeepExtract>
print('Accumulator: %f' % max_accumulator)
print('Detection!' if detection else 'Non Detection')
return (detection, image, debug_data)
|
def detect_object(inference, camera, classes, threshold, out_dir, range_x=[0, 1], range_y=[0, 1]):
"""Detects objects belonging to given classes in camera stream."""
stream = io.BytesIO()
camera.capture(stream, format='jpeg')
stream.seek(0)
image = Image.open(stream)
rgb_histogram = np.array(image.histogram()).reshape((3, 256))
green_peak = np.argmax(rgb_histogram[1, :])
if green_peak < 3:
time.sleep(1.0)
return (False, None, None)
debug_data = []
detection = False
max_accumulator = 0.0
print('Inferring...')
for p in crop_parameters(image, range_x, range_y):
im_crop = image.crop(p)
accumulator = 0.0
infer_classes = image_classification.get_classes(inference.run(im_crop), top_k=5, threshold=0.05)
corner = [p[0], p[1]]
print(corner)
for (idx, (label, score)) in enumerate(infer_classes):
debug_data.append((corner, im_crop.size, idx, label, score))
if label in classes:
accumulator += score
if accumulator > max_accumulator:
max_accumulator = accumulator
if accumulator >= threshold:
detection = True
break
if out_dir:
global debug_idx
if debug_idx == 0:
for filepath in [f for f in os.listdir(out_dir) if f.startswith('image_')]:
try:
path_idx = int(filepath[6:12]) + 1
debug_idx = max(debug_idx, path_idx)
except BaseException:
pass
print('debug_idx:', debug_idx)
if filename is None:
output_path = os.path.join(out_dir, 'image_%06d.jpg' % debug_idx)
debug_idx += 1
else:
output_path = os.path.join(out_dir, filename)
image.save(output_path)
with open(output_path + '_classes.txt', 'w') as f:
for debug_tuple in debug_data:
f.write('%s + %s Result %d: %s (prob=%f)\n' % debug_tuple)
with open(output_path + '_classes.pkl', 'wb') as f:
pickle.dump(debug_data, f, protocol=0)
print('Accumulator: %f' % max_accumulator)
print('Detection!' if detection else 'Non Detection')
return (detection, image, debug_data)
|
aiyprojects-raspbian
|
positive
|
def collect_stats_rows(rows, response, path):
if isinstance(response, dict):
for (k, v) in response.iteritems():
<DeepExtract>
if isinstance(v, dict):
for (k, v) in v.iteritems():
collect_stats_rows(rows, v, path + [k] + [k])
elif isinstance(v, (tuple, list)):
for e in v:
collect_stats_rows(rows, e, path + [k])
else:
rows.append({'_source': {'_metric_name': '.'.join(path + [k]), 'value': v}})
</DeepExtract>
elif isinstance(response, (tuple, list)):
for e in response:
<DeepExtract>
if isinstance(e, dict):
for (k, v) in e.iteritems():
collect_stats_rows(rows, v, path + [k])
elif isinstance(e, (tuple, list)):
for e in e:
collect_stats_rows(rows, e, path)
else:
rows.append({'_source': {'_metric_name': '.'.join(path), 'value': e}})
</DeepExtract>
else:
rows.append({'_source': {'_metric_name': '.'.join(path), 'value': response}})
|
def collect_stats_rows(rows, response, path):
if isinstance(response, dict):
for (k, v) in response.iteritems():
if isinstance(v, dict):
for (k, v) in v.iteritems():
collect_stats_rows(rows, v, path + [k] + [k])
elif isinstance(v, (tuple, list)):
for e in v:
collect_stats_rows(rows, e, path + [k])
else:
rows.append({'_source': {'_metric_name': '.'.join(path + [k]), 'value': v}})
elif isinstance(response, (tuple, list)):
for e in response:
if isinstance(e, dict):
for (k, v) in e.iteritems():
collect_stats_rows(rows, v, path + [k])
elif isinstance(e, (tuple, list)):
for e in e:
collect_stats_rows(rows, e, path)
else:
rows.append({'_source': {'_metric_name': '.'.join(path), 'value': e}})
else:
rows.append({'_source': {'_metric_name': '.'.join(path), 'value': response}})
|
es-monitor
|
positive
|
def _create_table(df: pd.DataFrame, cursor: pg8000.Cursor, table: str, schema: str, mode: str, index: bool, dtype: Optional[Dict[str, str]], varchar_lengths: Optional[Dict[str, int]]) -> None:
if mode == 'overwrite':
<DeepExtract>
schema_str = f'"{schema}".' if schema else ''
sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"'
_logger.debug('Drop table query:\n%s', sql)
cursor.execute(sql)
</DeepExtract>
elif _does_table_exist(cursor=cursor, schema=schema, table=table):
return
postgresql_types: Dict[str, str] = _data_types.database_types_from_pandas(df=df, index=index, dtype=dtype, varchar_lengths_default='TEXT', varchar_lengths=varchar_lengths, converter_func=_data_types.pyarrow2postgresql)
cols_str: str = ''.join([f'"{k}" {v},\n' for (k, v) in postgresql_types.items()])[:-2]
sql = f'CREATE TABLE IF NOT EXISTS "{schema}"."{table}" (\n{cols_str})'
_logger.debug('Create table query:\n%s', sql)
cursor.execute(sql)
|
def _create_table(df: pd.DataFrame, cursor: pg8000.Cursor, table: str, schema: str, mode: str, index: bool, dtype: Optional[Dict[str, str]], varchar_lengths: Optional[Dict[str, int]]) -> None:
if mode == 'overwrite':
schema_str = f'"{schema}".' if schema else ''
sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"'
_logger.debug('Drop table query:\n%s', sql)
cursor.execute(sql)
elif _does_table_exist(cursor=cursor, schema=schema, table=table):
return
postgresql_types: Dict[str, str] = _data_types.database_types_from_pandas(df=df, index=index, dtype=dtype, varchar_lengths_default='TEXT', varchar_lengths=varchar_lengths, converter_func=_data_types.pyarrow2postgresql)
cols_str: str = ''.join([f'"{k}" {v},\n' for (k, v) in postgresql_types.items()])[:-2]
sql = f'CREATE TABLE IF NOT EXISTS "{schema}"."{table}" (\n{cols_str})'
_logger.debug('Create table query:\n%s', sql)
cursor.execute(sql)
|
aws-data-wrangler
|
positive
|
def F(x, CDFVAL):
<DeepExtract>
_complementary = False
order = parameter.order
if int(x) == 1:
F = 1
value = F
if int(x) == -1:
F = 0
value = F
if order > 0:
median = (beta ** 2 - alpha ** 2) / (2 * order + alpha + beta) ** 2
else:
median = 2.0 / (1.0 + (alpha + 1.0) / (beta + 1.0)) - 1.0
if x > median:
x = -x
_complementary = True
(alpha, beta) = (beta, alpha)
(parameter.shape_parameter_A, parameter.shape_parameter_B) = (parameter.shape_parameter_B, parameter.shape_parameter_A)
logfactor = 0.0
for i in range(0, int(order)):
quadratic_root = 2.0 / (x + 1.0) * (zeroes[i] + 1.0) - 1.0
recurrence_ab = self._quadratic_modification(recurrence_ab, quadratic_root)
logfactor += np.log(recurrence_ab[0, 1] * ((x + 1.0) / 2.0) ** 2 * scaling_kn_factor)
recurrence_ab[0, 1] = 1
linear_root = (3 - x) / (1 + x)
for j in range(0, int(A)):
recurrence_ab = self._linear_modification(recurrence_ab, linear_root)
logfactor += logfactor + np.log(recurrence_ab[0, 1] * 1.0 / 2.0 * (x + 1.0))
recurrence_ab[0, 1] = 1
(u, w) = parameter._get_local_quadrature(M - 1, recurrence_ab)
integral = np.dot(w, (2.0 - 1.0 / 2.0 * (u + 1.0) * (x + 1.0)) ** (alpha - A))
F = np.exp(logfactor - alpha * np.log(2.0) - betaln(beta + 1.0, alpha + 1.0) - np.log(beta + 1.0) + (beta + 1) * np.log((x + 1.0) / 2.0)) * integral
F = np.asscalar(F)
if _complementary:
F = 1 - F
value = F
</DeepExtract>
value = value - CDFVAL
return value
|
def F(x, CDFVAL):
_complementary = False
order = parameter.order
if int(x) == 1:
F = 1
value = F
if int(x) == -1:
F = 0
value = F
if order > 0:
median = (beta ** 2 - alpha ** 2) / (2 * order + alpha + beta) ** 2
else:
median = 2.0 / (1.0 + (alpha + 1.0) / (beta + 1.0)) - 1.0
if x > median:
x = -x
_complementary = True
(alpha, beta) = (beta, alpha)
(parameter.shape_parameter_A, parameter.shape_parameter_B) = (parameter.shape_parameter_B, parameter.shape_parameter_A)
logfactor = 0.0
for i in range(0, int(order)):
quadratic_root = 2.0 / (x + 1.0) * (zeroes[i] + 1.0) - 1.0
recurrence_ab = self._quadratic_modification(recurrence_ab, quadratic_root)
logfactor += np.log(recurrence_ab[0, 1] * ((x + 1.0) / 2.0) ** 2 * scaling_kn_factor)
recurrence_ab[0, 1] = 1
linear_root = (3 - x) / (1 + x)
for j in range(0, int(A)):
recurrence_ab = self._linear_modification(recurrence_ab, linear_root)
logfactor += logfactor + np.log(recurrence_ab[0, 1] * 1.0 / 2.0 * (x + 1.0))
recurrence_ab[0, 1] = 1
(u, w) = parameter._get_local_quadrature(M - 1, recurrence_ab)
integral = np.dot(w, (2.0 - 1.0 / 2.0 * (u + 1.0) * (x + 1.0)) ** (alpha - A))
F = np.exp(logfactor - alpha * np.log(2.0) - betaln(beta + 1.0, alpha + 1.0) - np.log(beta + 1.0) + (beta + 1) * np.log((x + 1.0) / 2.0)) * integral
F = np.asscalar(F)
if _complementary:
F = 1 - F
value = F
value = value - CDFVAL
return value
|
equadratures
|
positive
|
def _build_cluster_args(**pars: Any) -> Dict[str, Any]:
account_id: str = sts.get_account_id(boto3_session=pars['boto3_session'])
region: str = _utils.get_region_from_session(boto3_session=pars['boto3_session'])
if pars.get('logging_s3_path') is None:
<DeepExtract>
if account_id is None:
_account_id: str = sts.get_account_id(boto3_session=pars['boto3_session'])
else:
_account_id = account_id
if region is None and None is not None:
_region: str = _utils.get_region_from_session(boto3_session=pars['boto3_session'])
elif region is None and None is None:
raise exceptions.InvalidArgumentCombination('You must pass region or subnet_id or both.')
else:
_region = region
pars['logging_s3_path'] = f's3://aws-logs-{_account_id}-{_region}/elasticmapreduce/'
</DeepExtract>
spark_env: Optional[Dict[str, str]] = None
yarn_env: Optional[Dict[str, str]] = None
livy_env: Optional[Dict[str, str]] = None
if pars['spark_pyarrow'] is True:
if pars['spark_defaults'] is None:
pars['spark_defaults'] = {'spark.sql.execution.arrow.enabled': 'true'}
else:
pars['spark_defaults']['spark.sql.execution.arrow.enabled'] = 'true'
spark_env = {'ARROW_PRE_0_15_IPC_FORMAT': '1'}
yarn_env = {'ARROW_PRE_0_15_IPC_FORMAT': '1'}
livy_env = {'ARROW_PRE_0_15_IPC_FORMAT': '1'}
if pars['python3'] is True:
if spark_env is None:
spark_env = {'PYSPARK_PYTHON': '/usr/bin/python3'}
else:
spark_env['PYSPARK_PYTHON'] = '/usr/bin/python3'
if pars['spark_jars_path'] is not None:
paths: str = ','.join(pars['spark_jars_path'])
if pars['spark_defaults'] is None:
pars['spark_defaults'] = {'spark.jars': paths}
else:
pars['spark_defaults']['spark.jars'] = paths
args: Dict[str, Any] = {'Name': pars['cluster_name'], 'LogUri': pars['logging_s3_path'], 'ReleaseLabel': pars['emr_release'], 'VisibleToAllUsers': pars['visible_to_all_users'], 'JobFlowRole': pars['emr_ec2_role'], 'ServiceRole': pars['emr_role'], 'Instances': {'KeepJobFlowAliveWhenNoSteps': pars['keep_cluster_alive_when_no_steps'], 'TerminationProtected': pars['termination_protected'], 'Ec2SubnetId': pars['subnet_id'], 'InstanceFleets': []}, 'StepConcurrencyLevel': pars['step_concurrency_level']}
if pars['auto_termination_policy'] is not None:
args['AutoTerminationPolicy'] = pars['auto_termination_policy']
if pars['custom_ami_id'] is not None:
args['CustomAmiId'] = pars['custom_ami_id']
if pars['key_pair_name'] is not None:
args['Instances']['Ec2KeyName'] = pars['key_pair_name']
if pars['security_group_master'] is not None:
args['Instances']['EmrManagedMasterSecurityGroup'] = pars['security_group_master']
if pars['security_groups_master_additional'] is not None:
args['Instances']['AdditionalMasterSecurityGroups'] = pars['security_groups_master_additional']
if pars['security_group_slave'] is not None:
args['Instances']['EmrManagedSlaveSecurityGroup'] = pars['security_group_slave']
if pars['security_groups_slave_additional'] is not None:
args['Instances']['AdditionalSlaveSecurityGroups'] = pars['security_groups_slave_additional']
if pars['security_group_service_access'] is not None:
args['Instances']['ServiceAccessSecurityGroup'] = pars['security_group_service_access']
args['Configurations'] = [{'Classification': 'spark-log4j', 'Properties': {'log4j.rootCategory': f"{pars['spark_log_level']}, console"}}] if not pars['configurations'] else pars['configurations']
if pars['docker'] is True:
if pars.get('extra_public_registries') is None:
extra_public_registries: List[str] = []
else:
extra_public_registries = pars['extra_public_registries']
registries: str = f"local,centos,{account_id}.dkr.ecr.{region}.amazonaws.com,{','.join(extra_public_registries)}"
registries = registries[:-1] if registries.endswith(',') else registries
args['Configurations'].append({'Classification': 'container-executor', 'Properties': {}, 'Configurations': [{'Classification': 'docker', 'Properties': {'docker.privileged-containers.registries': registries, 'docker.trusted.registries': registries}, 'Configurations': []}]})
if spark_env is not None:
args['Configurations'].append({'Classification': 'spark-env', 'Properties': {}, 'Configurations': [{'Classification': 'export', 'Properties': spark_env, 'Configurations': []}]})
if yarn_env is not None:
args['Configurations'].append({'Classification': 'yarn-env', 'Properties': {}, 'Configurations': [{'Classification': 'export', 'Properties': yarn_env, 'Configurations': []}]})
if livy_env is not None:
args['Configurations'].append({'Classification': 'livy-env', 'Properties': {}, 'Configurations': [{'Classification': 'export', 'Properties': livy_env, 'Configurations': []}]})
if pars['spark_glue_catalog'] is True:
args['Configurations'].append({'Classification': 'spark-hive-site', 'Properties': {'hive.metastore.client.factory.class': 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'}, 'Configurations': []})
if pars['hive_glue_catalog'] is True:
hive_conf: Dict[str, Any] = {'Classification': 'hive-site', 'Properties': {}, 'Configurations': []}
hive_conf['Properties']['hive.metastore.client.factory.class'] = 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'
args['Configurations'].append(hive_conf)
if pars['presto_glue_catalog'] is True:
args['Configurations'].append({'Classification': 'presto-connector-hive', 'Properties': {'hive.metastore.glue.datacatalog.enabled': 'true'}, 'Configurations': []})
if pars['consistent_view'] is True:
args['Configurations'].append({'Classification': 'emrfs-site', 'Properties': {'fs.s3.consistent.retryPeriodSeconds': str(pars.get('consistent_view_retry_seconds', '10')), 'fs.s3.consistent': 'true', 'fs.s3.consistent.retryCount': str(pars.get('consistent_view_retry_count', '5')), 'fs.s3.consistent.metadata.tableName': pars.get('consistent_view_table_name', 'EmrFSMetadata')}})
if pars['maximize_resource_allocation'] is True:
args['Configurations'].append({'Classification': 'spark', 'Properties': {'maximizeResourceAllocation': 'true'}})
if pars['spark_defaults'] is not None:
spark_defaults: Dict[str, Union[str, Dict[str, str]]] = {'Classification': 'spark-defaults', 'Properties': pars['spark_defaults']}
args['Configurations'].append(spark_defaults)
if pars.get('custom_classifications') is not None:
for c in pars['custom_classifications']:
args['Configurations'].append(c)
if pars['applications']:
args['Applications'] = [{'Name': x} for x in pars['applications']]
if pars['bootstraps_paths']:
args['BootstrapActions'] = [{'Name': x, 'ScriptBootstrapAction': {'Path': x}} for x in pars['bootstraps_paths']]
if pars['debugging'] is True or pars['steps'] is not None:
args['Steps'] = []
if pars['debugging'] is True:
args['Steps'].append({'Name': 'Setup Hadoop Debugging', 'ActionOnFailure': 'TERMINATE_CLUSTER', 'HadoopJarStep': {'Jar': 'command-runner.jar', 'Args': ['state-pusher-script']}})
if pars['steps'] is not None:
args['Steps'] += pars['steps']
timeout_action_master: str = 'SWITCH_TO_ON_DEMAND' if pars['spot_timeout_to_on_demand_master'] else 'TERMINATE_CLUSTER'
fleet_master: Dict[str, Any] = {'Name': 'MASTER', 'InstanceFleetType': 'MASTER', 'TargetOnDemandCapacity': pars['instance_num_on_demand_master'], 'TargetSpotCapacity': pars['instance_num_spot_master'], 'InstanceTypeConfigs': [{'InstanceType': pars['instance_type_master'], 'WeightedCapacity': 1, 'BidPriceAsPercentageOfOnDemandPrice': pars['spot_bid_percentage_of_on_demand_master'], 'EbsConfiguration': {'EbsBlockDeviceConfigs': [{'VolumeSpecification': {'SizeInGB': pars['instance_ebs_size_master'], 'VolumeType': 'gp2'}, 'VolumesPerInstance': 1}], 'EbsOptimized': True}}]}
if pars['instance_num_spot_master'] > 0:
fleet_master['LaunchSpecifications'] = {'SpotSpecification': {'TimeoutDurationMinutes': pars['spot_provisioning_timeout_master'], 'TimeoutAction': timeout_action_master}}
args['Instances']['InstanceFleets'].append(fleet_master)
if pars['instance_num_spot_core'] > 0 or pars['instance_num_on_demand_core'] > 0:
timeout_action_core = 'SWITCH_TO_ON_DEMAND' if pars['spot_timeout_to_on_demand_core'] else 'TERMINATE_CLUSTER'
fleet_core: Dict[str, Any] = {'Name': 'CORE', 'InstanceFleetType': 'CORE', 'TargetOnDemandCapacity': pars['instance_num_on_demand_core'], 'TargetSpotCapacity': pars['instance_num_spot_core'], 'InstanceTypeConfigs': [{'InstanceType': pars['instance_type_core'], 'WeightedCapacity': 1, 'BidPriceAsPercentageOfOnDemandPrice': pars['spot_bid_percentage_of_on_demand_core'], 'EbsConfiguration': {'EbsBlockDeviceConfigs': [{'VolumeSpecification': {'SizeInGB': pars['instance_ebs_size_core'], 'VolumeType': 'gp2'}, 'VolumesPerInstance': 1}], 'EbsOptimized': True}}]}
if pars['instance_num_spot_core'] > 0:
fleet_core['LaunchSpecifications'] = {'SpotSpecification': {'TimeoutDurationMinutes': pars['spot_provisioning_timeout_core'], 'TimeoutAction': timeout_action_core}}
args['Instances']['InstanceFleets'].append(fleet_core)
if pars['instance_num_spot_task'] > 0 or pars['instance_num_on_demand_task'] > 0:
timeout_action_task: str = 'SWITCH_TO_ON_DEMAND' if pars['spot_timeout_to_on_demand_task'] else 'TERMINATE_CLUSTER'
fleet_task: Dict[str, Any] = {'Name': 'TASK', 'InstanceFleetType': 'TASK', 'TargetOnDemandCapacity': pars['instance_num_on_demand_task'], 'TargetSpotCapacity': pars['instance_num_spot_task'], 'InstanceTypeConfigs': [{'InstanceType': pars['instance_type_task'], 'WeightedCapacity': 1, 'BidPriceAsPercentageOfOnDemandPrice': pars['spot_bid_percentage_of_on_demand_task'], 'EbsConfiguration': {'EbsBlockDeviceConfigs': [{'VolumeSpecification': {'SizeInGB': pars['instance_ebs_size_task'], 'VolumeType': 'gp2'}, 'VolumesPerInstance': 1}], 'EbsOptimized': True}}]}
if pars['instance_num_spot_task'] > 0:
fleet_task['LaunchSpecifications'] = {'SpotSpecification': {'TimeoutDurationMinutes': pars['spot_provisioning_timeout_task'], 'TimeoutAction': timeout_action_task}}
args['Instances']['InstanceFleets'].append(fleet_task)
if pars['tags'] is not None:
args['Tags'] = [{'Key': k, 'Value': v} for (k, v) in pars['tags'].items()]
_logger.debug('args: \n%s', pprint.pformat(args))
return args
|
def _build_cluster_args(**pars: Any) -> Dict[str, Any]:
account_id: str = sts.get_account_id(boto3_session=pars['boto3_session'])
region: str = _utils.get_region_from_session(boto3_session=pars['boto3_session'])
if pars.get('logging_s3_path') is None:
if account_id is None:
_account_id: str = sts.get_account_id(boto3_session=pars['boto3_session'])
else:
_account_id = account_id
if region is None and None is not None:
_region: str = _utils.get_region_from_session(boto3_session=pars['boto3_session'])
elif region is None and None is None:
raise exceptions.InvalidArgumentCombination('You must pass region or subnet_id or both.')
else:
_region = region
pars['logging_s3_path'] = f's3://aws-logs-{_account_id}-{_region}/elasticmapreduce/'
spark_env: Optional[Dict[str, str]] = None
yarn_env: Optional[Dict[str, str]] = None
livy_env: Optional[Dict[str, str]] = None
if pars['spark_pyarrow'] is True:
if pars['spark_defaults'] is None:
pars['spark_defaults'] = {'spark.sql.execution.arrow.enabled': 'true'}
else:
pars['spark_defaults']['spark.sql.execution.arrow.enabled'] = 'true'
spark_env = {'ARROW_PRE_0_15_IPC_FORMAT': '1'}
yarn_env = {'ARROW_PRE_0_15_IPC_FORMAT': '1'}
livy_env = {'ARROW_PRE_0_15_IPC_FORMAT': '1'}
if pars['python3'] is True:
if spark_env is None:
spark_env = {'PYSPARK_PYTHON': '/usr/bin/python3'}
else:
spark_env['PYSPARK_PYTHON'] = '/usr/bin/python3'
if pars['spark_jars_path'] is not None:
paths: str = ','.join(pars['spark_jars_path'])
if pars['spark_defaults'] is None:
pars['spark_defaults'] = {'spark.jars': paths}
else:
pars['spark_defaults']['spark.jars'] = paths
args: Dict[str, Any] = {'Name': pars['cluster_name'], 'LogUri': pars['logging_s3_path'], 'ReleaseLabel': pars['emr_release'], 'VisibleToAllUsers': pars['visible_to_all_users'], 'JobFlowRole': pars['emr_ec2_role'], 'ServiceRole': pars['emr_role'], 'Instances': {'KeepJobFlowAliveWhenNoSteps': pars['keep_cluster_alive_when_no_steps'], 'TerminationProtected': pars['termination_protected'], 'Ec2SubnetId': pars['subnet_id'], 'InstanceFleets': []}, 'StepConcurrencyLevel': pars['step_concurrency_level']}
if pars['auto_termination_policy'] is not None:
args['AutoTerminationPolicy'] = pars['auto_termination_policy']
if pars['custom_ami_id'] is not None:
args['CustomAmiId'] = pars['custom_ami_id']
if pars['key_pair_name'] is not None:
args['Instances']['Ec2KeyName'] = pars['key_pair_name']
if pars['security_group_master'] is not None:
args['Instances']['EmrManagedMasterSecurityGroup'] = pars['security_group_master']
if pars['security_groups_master_additional'] is not None:
args['Instances']['AdditionalMasterSecurityGroups'] = pars['security_groups_master_additional']
if pars['security_group_slave'] is not None:
args['Instances']['EmrManagedSlaveSecurityGroup'] = pars['security_group_slave']
if pars['security_groups_slave_additional'] is not None:
args['Instances']['AdditionalSlaveSecurityGroups'] = pars['security_groups_slave_additional']
if pars['security_group_service_access'] is not None:
args['Instances']['ServiceAccessSecurityGroup'] = pars['security_group_service_access']
args['Configurations'] = [{'Classification': 'spark-log4j', 'Properties': {'log4j.rootCategory': f"{pars['spark_log_level']}, console"}}] if not pars['configurations'] else pars['configurations']
if pars['docker'] is True:
if pars.get('extra_public_registries') is None:
extra_public_registries: List[str] = []
else:
extra_public_registries = pars['extra_public_registries']
registries: str = f"local,centos,{account_id}.dkr.ecr.{region}.amazonaws.com,{','.join(extra_public_registries)}"
registries = registries[:-1] if registries.endswith(',') else registries
args['Configurations'].append({'Classification': 'container-executor', 'Properties': {}, 'Configurations': [{'Classification': 'docker', 'Properties': {'docker.privileged-containers.registries': registries, 'docker.trusted.registries': registries}, 'Configurations': []}]})
if spark_env is not None:
args['Configurations'].append({'Classification': 'spark-env', 'Properties': {}, 'Configurations': [{'Classification': 'export', 'Properties': spark_env, 'Configurations': []}]})
if yarn_env is not None:
args['Configurations'].append({'Classification': 'yarn-env', 'Properties': {}, 'Configurations': [{'Classification': 'export', 'Properties': yarn_env, 'Configurations': []}]})
if livy_env is not None:
args['Configurations'].append({'Classification': 'livy-env', 'Properties': {}, 'Configurations': [{'Classification': 'export', 'Properties': livy_env, 'Configurations': []}]})
if pars['spark_glue_catalog'] is True:
args['Configurations'].append({'Classification': 'spark-hive-site', 'Properties': {'hive.metastore.client.factory.class': 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'}, 'Configurations': []})
if pars['hive_glue_catalog'] is True:
hive_conf: Dict[str, Any] = {'Classification': 'hive-site', 'Properties': {}, 'Configurations': []}
hive_conf['Properties']['hive.metastore.client.factory.class'] = 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'
args['Configurations'].append(hive_conf)
if pars['presto_glue_catalog'] is True:
args['Configurations'].append({'Classification': 'presto-connector-hive', 'Properties': {'hive.metastore.glue.datacatalog.enabled': 'true'}, 'Configurations': []})
if pars['consistent_view'] is True:
args['Configurations'].append({'Classification': 'emrfs-site', 'Properties': {'fs.s3.consistent.retryPeriodSeconds': str(pars.get('consistent_view_retry_seconds', '10')), 'fs.s3.consistent': 'true', 'fs.s3.consistent.retryCount': str(pars.get('consistent_view_retry_count', '5')), 'fs.s3.consistent.metadata.tableName': pars.get('consistent_view_table_name', 'EmrFSMetadata')}})
if pars['maximize_resource_allocation'] is True:
args['Configurations'].append({'Classification': 'spark', 'Properties': {'maximizeResourceAllocation': 'true'}})
if pars['spark_defaults'] is not None:
spark_defaults: Dict[str, Union[str, Dict[str, str]]] = {'Classification': 'spark-defaults', 'Properties': pars['spark_defaults']}
args['Configurations'].append(spark_defaults)
if pars.get('custom_classifications') is not None:
for c in pars['custom_classifications']:
args['Configurations'].append(c)
if pars['applications']:
args['Applications'] = [{'Name': x} for x in pars['applications']]
if pars['bootstraps_paths']:
args['BootstrapActions'] = [{'Name': x, 'ScriptBootstrapAction': {'Path': x}} for x in pars['bootstraps_paths']]
if pars['debugging'] is True or pars['steps'] is not None:
args['Steps'] = []
if pars['debugging'] is True:
args['Steps'].append({'Name': 'Setup Hadoop Debugging', 'ActionOnFailure': 'TERMINATE_CLUSTER', 'HadoopJarStep': {'Jar': 'command-runner.jar', 'Args': ['state-pusher-script']}})
if pars['steps'] is not None:
args['Steps'] += pars['steps']
timeout_action_master: str = 'SWITCH_TO_ON_DEMAND' if pars['spot_timeout_to_on_demand_master'] else 'TERMINATE_CLUSTER'
fleet_master: Dict[str, Any] = {'Name': 'MASTER', 'InstanceFleetType': 'MASTER', 'TargetOnDemandCapacity': pars['instance_num_on_demand_master'], 'TargetSpotCapacity': pars['instance_num_spot_master'], 'InstanceTypeConfigs': [{'InstanceType': pars['instance_type_master'], 'WeightedCapacity': 1, 'BidPriceAsPercentageOfOnDemandPrice': pars['spot_bid_percentage_of_on_demand_master'], 'EbsConfiguration': {'EbsBlockDeviceConfigs': [{'VolumeSpecification': {'SizeInGB': pars['instance_ebs_size_master'], 'VolumeType': 'gp2'}, 'VolumesPerInstance': 1}], 'EbsOptimized': True}}]}
if pars['instance_num_spot_master'] > 0:
fleet_master['LaunchSpecifications'] = {'SpotSpecification': {'TimeoutDurationMinutes': pars['spot_provisioning_timeout_master'], 'TimeoutAction': timeout_action_master}}
args['Instances']['InstanceFleets'].append(fleet_master)
if pars['instance_num_spot_core'] > 0 or pars['instance_num_on_demand_core'] > 0:
timeout_action_core = 'SWITCH_TO_ON_DEMAND' if pars['spot_timeout_to_on_demand_core'] else 'TERMINATE_CLUSTER'
fleet_core: Dict[str, Any] = {'Name': 'CORE', 'InstanceFleetType': 'CORE', 'TargetOnDemandCapacity': pars['instance_num_on_demand_core'], 'TargetSpotCapacity': pars['instance_num_spot_core'], 'InstanceTypeConfigs': [{'InstanceType': pars['instance_type_core'], 'WeightedCapacity': 1, 'BidPriceAsPercentageOfOnDemandPrice': pars['spot_bid_percentage_of_on_demand_core'], 'EbsConfiguration': {'EbsBlockDeviceConfigs': [{'VolumeSpecification': {'SizeInGB': pars['instance_ebs_size_core'], 'VolumeType': 'gp2'}, 'VolumesPerInstance': 1}], 'EbsOptimized': True}}]}
if pars['instance_num_spot_core'] > 0:
fleet_core['LaunchSpecifications'] = {'SpotSpecification': {'TimeoutDurationMinutes': pars['spot_provisioning_timeout_core'], 'TimeoutAction': timeout_action_core}}
args['Instances']['InstanceFleets'].append(fleet_core)
if pars['instance_num_spot_task'] > 0 or pars['instance_num_on_demand_task'] > 0:
timeout_action_task: str = 'SWITCH_TO_ON_DEMAND' if pars['spot_timeout_to_on_demand_task'] else 'TERMINATE_CLUSTER'
fleet_task: Dict[str, Any] = {'Name': 'TASK', 'InstanceFleetType': 'TASK', 'TargetOnDemandCapacity': pars['instance_num_on_demand_task'], 'TargetSpotCapacity': pars['instance_num_spot_task'], 'InstanceTypeConfigs': [{'InstanceType': pars['instance_type_task'], 'WeightedCapacity': 1, 'BidPriceAsPercentageOfOnDemandPrice': pars['spot_bid_percentage_of_on_demand_task'], 'EbsConfiguration': {'EbsBlockDeviceConfigs': [{'VolumeSpecification': {'SizeInGB': pars['instance_ebs_size_task'], 'VolumeType': 'gp2'}, 'VolumesPerInstance': 1}], 'EbsOptimized': True}}]}
if pars['instance_num_spot_task'] > 0:
fleet_task['LaunchSpecifications'] = {'SpotSpecification': {'TimeoutDurationMinutes': pars['spot_provisioning_timeout_task'], 'TimeoutAction': timeout_action_task}}
args['Instances']['InstanceFleets'].append(fleet_task)
if pars['tags'] is not None:
args['Tags'] = [{'Key': k, 'Value': v} for (k, v) in pars['tags'].items()]
_logger.debug('args: \n%s', pprint.pformat(args))
return args
|
aws-data-wrangler
|
positive
|
def designator(self):
localctx = BraketPragmasParser.DesignatorContext(self, self._ctx, self.state)
<DeepExtract>
if hasattr(localctx, 'enterBraketPragma'):
localctx.enterBraketPragma(self)
</DeepExtract>
try:
self.enterOuterAlt(localctx, 1)
self.state = 891
self.match(BraketPragmasParser.LBRACKET)
self.state = 892
<DeepExtract>
_parentctx = self._ctx
_parentState = self.state
localctx = BraketPragmasParser.ExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 124
self.enterRecursionRule(localctx, 124, self.RULE_expression, 0)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 682
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 60, self._ctx)
if la_ == 1:
localctx = BraketPragmasParser.ParenthesisExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 656
self.match(BraketPragmasParser.LPAREN)
self.state = 657
self.expression(0)
self.state = 658
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 2:
localctx = BraketPragmasParser.UnaryExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 660
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not (_la - 94 & ~63 == 0 and 1 << _la - 94 & (1 << BraketPragmasParser.MINUS - 94 | 1 << BraketPragmasParser.TILDE - 94 | 1 << BraketPragmasParser.EXCLAMATION_POINT - 94) != 0):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 661
self.expression(15)
pass
elif la_ == 3:
localctx = BraketPragmasParser.CastExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 664
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [BraketPragmasParser.BOOL, BraketPragmasParser.BIT, BraketPragmasParser.INT, BraketPragmasParser.UINT, BraketPragmasParser.FLOAT, BraketPragmasParser.ANGLE, BraketPragmasParser.COMPLEX, BraketPragmasParser.DURATION, BraketPragmasParser.STRETCH]:
self.state = 662
self.scalarType()
pass
elif token in [BraketPragmasParser.ARRAY]:
self.state = 663
self.arrayType()
pass
else:
raise NoViableAltException(self)
self.state = 666
self.match(BraketPragmasParser.LPAREN)
self.state = 667
self.expression(0)
self.state = 668
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 4:
localctx = BraketPragmasParser.DurationofExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 670
self.match(BraketPragmasParser.DURATIONOF)
self.state = 671
self.match(BraketPragmasParser.LPAREN)
self.state = 672
self.scope()
self.state = 673
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 5:
localctx = BraketPragmasParser.CallExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 675
self.match(BraketPragmasParser.Identifier)
self.state = 676
self.match(BraketPragmasParser.LPAREN)
self.state = 678
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la - 58 & ~63 == 0 and 1 << _la - 58 & (1 << BraketPragmasParser.BOOL - 58 | 1 << BraketPragmasParser.BIT - 58 | 1 << BraketPragmasParser.INT - 58 | 1 << BraketPragmasParser.UINT - 58 | 1 << BraketPragmasParser.FLOAT - 58 | 1 << BraketPragmasParser.ANGLE - 58 | 1 << BraketPragmasParser.COMPLEX - 58 | 1 << BraketPragmasParser.ARRAY - 58 | 1 << BraketPragmasParser.DURATION - 58 | 1 << BraketPragmasParser.STRETCH - 58 | 1 << BraketPragmasParser.DURATIONOF - 58 | 1 << BraketPragmasParser.BooleanLiteral - 58 | 1 << BraketPragmasParser.LPAREN - 58 | 1 << BraketPragmasParser.MINUS - 58 | 1 << BraketPragmasParser.TILDE - 58 | 1 << BraketPragmasParser.EXCLAMATION_POINT - 58 | 1 << BraketPragmasParser.ImaginaryLiteral - 58 | 1 << BraketPragmasParser.BinaryIntegerLiteral - 58 | 1 << BraketPragmasParser.OctalIntegerLiteral - 58 | 1 << BraketPragmasParser.DecimalIntegerLiteral - 58 | 1 << BraketPragmasParser.HexIntegerLiteral - 58 | 1 << BraketPragmasParser.Identifier - 58 | 1 << BraketPragmasParser.HardwareQubit - 58 | 1 << BraketPragmasParser.FloatLiteral - 58 | 1 << BraketPragmasParser.TimingLiteral - 58 | 1 << BraketPragmasParser.BitstringLiteral - 58) != 0:
self.state = 677
self.expressionList()
self.state = 680
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 6:
localctx = BraketPragmasParser.LiteralExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 681
_la = self._input.LA(1)
if not (_la - 79 & ~63 == 0 and 1 << _la - 79 & (1 << BraketPragmasParser.BooleanLiteral - 79 | 1 << BraketPragmasParser.ImaginaryLiteral - 79 | 1 << BraketPragmasParser.BinaryIntegerLiteral - 79 | 1 << BraketPragmasParser.OctalIntegerLiteral - 79 | 1 << BraketPragmasParser.DecimalIntegerLiteral - 79 | 1 << BraketPragmasParser.HexIntegerLiteral - 79 | 1 << BraketPragmasParser.Identifier - 79 | 1 << BraketPragmasParser.HardwareQubit - 79 | 1 << BraketPragmasParser.FloatLiteral - 79 | 1 << BraketPragmasParser.TimingLiteral - 79 | 1 << BraketPragmasParser.BitstringLiteral - 79) != 0):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
self._ctx.stop = self._input.LT(-1)
self.state = 721
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 62, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 719
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 61, self._ctx)
if la_ == 1:
localctx = BraketPragmasParser.PowerExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 684
if not self.precpred(self._ctx, 16):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 16)')
self.state = 685
localctx.op = self.match(BraketPragmasParser.DOUBLE_ASTERISK)
self.state = 686
self.expression(16)
pass
elif la_ == 2:
localctx = BraketPragmasParser.MultiplicativeExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 687
if not self.precpred(self._ctx, 14):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 14)')
self.state = 688
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not (_la - 95 & ~63 == 0 and 1 << _la - 95 & (1 << BraketPragmasParser.ASTERISK - 95 | 1 << BraketPragmasParser.SLASH - 95 | 1 << BraketPragmasParser.PERCENT - 95) != 0):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 689
self.expression(15)
pass
elif la_ == 3:
localctx = BraketPragmasParser.AdditiveExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 690
if not self.precpred(self._ctx, 13):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 13)')
self.state = 691
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not (_la == BraketPragmasParser.PLUS or _la == BraketPragmasParser.MINUS):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 692
self.expression(14)
pass
elif la_ == 4:
localctx = BraketPragmasParser.BitshiftExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 693
if not self.precpred(self._ctx, 12):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 12)')
self.state = 694
localctx.op = self.match(BraketPragmasParser.BitshiftOperator)
self.state = 695
self.expression(13)
pass
elif la_ == 5:
localctx = BraketPragmasParser.ComparisonExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 696
if not self.precpred(self._ctx, 11):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 11)')
self.state = 697
localctx.op = self.match(BraketPragmasParser.ComparisonOperator)
self.state = 698
self.expression(12)
pass
elif la_ == 6:
localctx = BraketPragmasParser.EqualityExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 699
if not self.precpred(self._ctx, 10):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 10)')
self.state = 700
localctx.op = self.match(BraketPragmasParser.EqualityOperator)
self.state = 701
self.expression(11)
pass
elif la_ == 7:
localctx = BraketPragmasParser.BitwiseAndExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 702
if not self.precpred(self._ctx, 9):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 9)')
self.state = 703
localctx.op = self.match(BraketPragmasParser.AMPERSAND)
self.state = 704
self.expression(10)
pass
elif la_ == 8:
localctx = BraketPragmasParser.BitwiseXorExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 705
if not self.precpred(self._ctx, 8):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 8)')
self.state = 706
localctx.op = self.match(BraketPragmasParser.CARET)
self.state = 707
self.expression(9)
pass
elif la_ == 9:
localctx = BraketPragmasParser.BitwiseOrExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 708
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 7)')
self.state = 709
localctx.op = self.match(BraketPragmasParser.PIPE)
self.state = 710
self.expression(8)
pass
elif la_ == 10:
localctx = BraketPragmasParser.LogicalAndExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 711
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 6)')
self.state = 712
localctx.op = self.match(BraketPragmasParser.DOUBLE_AMPERSAND)
self.state = 713
self.expression(7)
pass
elif la_ == 11:
localctx = BraketPragmasParser.LogicalOrExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 714
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 5)')
self.state = 715
localctx.op = self.match(BraketPragmasParser.DOUBLE_PIPE)
self.state = 716
self.expression(6)
pass
elif la_ == 12:
localctx = BraketPragmasParser.IndexExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 717
if not self.precpred(self._ctx, 17):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 17)')
self.state = 718
self.indexOperator()
pass
self.state = 723
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 62, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
</DeepExtract>
self.state = 893
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
<DeepExtract>
if hasattr(listener, 'exitBraketPragma'):
listener.exitBraketPragma(self)
</DeepExtract>
return localctx
|
def designator(self):
localctx = BraketPragmasParser.DesignatorContext(self, self._ctx, self.state)
if hasattr(localctx, 'enterBraketPragma'):
localctx.enterBraketPragma(self)
try:
self.enterOuterAlt(localctx, 1)
self.state = 891
self.match(BraketPragmasParser.LBRACKET)
self.state = 892
_parentctx = self._ctx
_parentState = self.state
localctx = BraketPragmasParser.ExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 124
self.enterRecursionRule(localctx, 124, self.RULE_expression, 0)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 682
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 60, self._ctx)
if la_ == 1:
localctx = BraketPragmasParser.ParenthesisExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 656
self.match(BraketPragmasParser.LPAREN)
self.state = 657
self.expression(0)
self.state = 658
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 2:
localctx = BraketPragmasParser.UnaryExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 660
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not (_la - 94 & ~63 == 0 and 1 << _la - 94 & (1 << BraketPragmasParser.MINUS - 94 | 1 << BraketPragmasParser.TILDE - 94 | 1 << BraketPragmasParser.EXCLAMATION_POINT - 94) != 0):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 661
self.expression(15)
pass
elif la_ == 3:
localctx = BraketPragmasParser.CastExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 664
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [BraketPragmasParser.BOOL, BraketPragmasParser.BIT, BraketPragmasParser.INT, BraketPragmasParser.UINT, BraketPragmasParser.FLOAT, BraketPragmasParser.ANGLE, BraketPragmasParser.COMPLEX, BraketPragmasParser.DURATION, BraketPragmasParser.STRETCH]:
self.state = 662
self.scalarType()
pass
elif token in [BraketPragmasParser.ARRAY]:
self.state = 663
self.arrayType()
pass
else:
raise NoViableAltException(self)
self.state = 666
self.match(BraketPragmasParser.LPAREN)
self.state = 667
self.expression(0)
self.state = 668
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 4:
localctx = BraketPragmasParser.DurationofExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 670
self.match(BraketPragmasParser.DURATIONOF)
self.state = 671
self.match(BraketPragmasParser.LPAREN)
self.state = 672
self.scope()
self.state = 673
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 5:
localctx = BraketPragmasParser.CallExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 675
self.match(BraketPragmasParser.Identifier)
self.state = 676
self.match(BraketPragmasParser.LPAREN)
self.state = 678
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la - 58 & ~63 == 0 and 1 << _la - 58 & (1 << BraketPragmasParser.BOOL - 58 | 1 << BraketPragmasParser.BIT - 58 | 1 << BraketPragmasParser.INT - 58 | 1 << BraketPragmasParser.UINT - 58 | 1 << BraketPragmasParser.FLOAT - 58 | 1 << BraketPragmasParser.ANGLE - 58 | 1 << BraketPragmasParser.COMPLEX - 58 | 1 << BraketPragmasParser.ARRAY - 58 | 1 << BraketPragmasParser.DURATION - 58 | 1 << BraketPragmasParser.STRETCH - 58 | 1 << BraketPragmasParser.DURATIONOF - 58 | 1 << BraketPragmasParser.BooleanLiteral - 58 | 1 << BraketPragmasParser.LPAREN - 58 | 1 << BraketPragmasParser.MINUS - 58 | 1 << BraketPragmasParser.TILDE - 58 | 1 << BraketPragmasParser.EXCLAMATION_POINT - 58 | 1 << BraketPragmasParser.ImaginaryLiteral - 58 | 1 << BraketPragmasParser.BinaryIntegerLiteral - 58 | 1 << BraketPragmasParser.OctalIntegerLiteral - 58 | 1 << BraketPragmasParser.DecimalIntegerLiteral - 58 | 1 << BraketPragmasParser.HexIntegerLiteral - 58 | 1 << BraketPragmasParser.Identifier - 58 | 1 << BraketPragmasParser.HardwareQubit - 58 | 1 << BraketPragmasParser.FloatLiteral - 58 | 1 << BraketPragmasParser.TimingLiteral - 58 | 1 << BraketPragmasParser.BitstringLiteral - 58) != 0:
self.state = 677
self.expressionList()
self.state = 680
self.match(BraketPragmasParser.RPAREN)
pass
elif la_ == 6:
localctx = BraketPragmasParser.LiteralExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 681
_la = self._input.LA(1)
if not (_la - 79 & ~63 == 0 and 1 << _la - 79 & (1 << BraketPragmasParser.BooleanLiteral - 79 | 1 << BraketPragmasParser.ImaginaryLiteral - 79 | 1 << BraketPragmasParser.BinaryIntegerLiteral - 79 | 1 << BraketPragmasParser.OctalIntegerLiteral - 79 | 1 << BraketPragmasParser.DecimalIntegerLiteral - 79 | 1 << BraketPragmasParser.HexIntegerLiteral - 79 | 1 << BraketPragmasParser.Identifier - 79 | 1 << BraketPragmasParser.HardwareQubit - 79 | 1 << BraketPragmasParser.FloatLiteral - 79 | 1 << BraketPragmasParser.TimingLiteral - 79 | 1 << BraketPragmasParser.BitstringLiteral - 79) != 0):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
self._ctx.stop = self._input.LT(-1)
self.state = 721
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 62, self._ctx)
while _alt != 2 and _alt != ATN.INVALID_ALT_NUMBER:
if _alt == 1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 719
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 61, self._ctx)
if la_ == 1:
localctx = BraketPragmasParser.PowerExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 684
if not self.precpred(self._ctx, 16):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 16)')
self.state = 685
localctx.op = self.match(BraketPragmasParser.DOUBLE_ASTERISK)
self.state = 686
self.expression(16)
pass
elif la_ == 2:
localctx = BraketPragmasParser.MultiplicativeExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 687
if not self.precpred(self._ctx, 14):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 14)')
self.state = 688
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not (_la - 95 & ~63 == 0 and 1 << _la - 95 & (1 << BraketPragmasParser.ASTERISK - 95 | 1 << BraketPragmasParser.SLASH - 95 | 1 << BraketPragmasParser.PERCENT - 95) != 0):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 689
self.expression(15)
pass
elif la_ == 3:
localctx = BraketPragmasParser.AdditiveExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 690
if not self.precpred(self._ctx, 13):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 13)')
self.state = 691
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not (_la == BraketPragmasParser.PLUS or _la == BraketPragmasParser.MINUS):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 692
self.expression(14)
pass
elif la_ == 4:
localctx = BraketPragmasParser.BitshiftExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 693
if not self.precpred(self._ctx, 12):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 12)')
self.state = 694
localctx.op = self.match(BraketPragmasParser.BitshiftOperator)
self.state = 695
self.expression(13)
pass
elif la_ == 5:
localctx = BraketPragmasParser.ComparisonExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 696
if not self.precpred(self._ctx, 11):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 11)')
self.state = 697
localctx.op = self.match(BraketPragmasParser.ComparisonOperator)
self.state = 698
self.expression(12)
pass
elif la_ == 6:
localctx = BraketPragmasParser.EqualityExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 699
if not self.precpred(self._ctx, 10):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 10)')
self.state = 700
localctx.op = self.match(BraketPragmasParser.EqualityOperator)
self.state = 701
self.expression(11)
pass
elif la_ == 7:
localctx = BraketPragmasParser.BitwiseAndExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 702
if not self.precpred(self._ctx, 9):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 9)')
self.state = 703
localctx.op = self.match(BraketPragmasParser.AMPERSAND)
self.state = 704
self.expression(10)
pass
elif la_ == 8:
localctx = BraketPragmasParser.BitwiseXorExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 705
if not self.precpred(self._ctx, 8):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 8)')
self.state = 706
localctx.op = self.match(BraketPragmasParser.CARET)
self.state = 707
self.expression(9)
pass
elif la_ == 9:
localctx = BraketPragmasParser.BitwiseOrExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 708
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 7)')
self.state = 709
localctx.op = self.match(BraketPragmasParser.PIPE)
self.state = 710
self.expression(8)
pass
elif la_ == 10:
localctx = BraketPragmasParser.LogicalAndExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 711
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 6)')
self.state = 712
localctx.op = self.match(BraketPragmasParser.DOUBLE_AMPERSAND)
self.state = 713
self.expression(7)
pass
elif la_ == 11:
localctx = BraketPragmasParser.LogicalOrExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 714
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 5)')
self.state = 715
localctx.op = self.match(BraketPragmasParser.DOUBLE_PIPE)
self.state = 716
self.expression(6)
pass
elif la_ == 12:
localctx = BraketPragmasParser.IndexExpressionContext(self, BraketPragmasParser.ExpressionContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 717
if not self.precpred(self._ctx, 17):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, 'self.precpred(self._ctx, 17)')
self.state = 718
self.indexOperator()
pass
self.state = 723
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input, 62, self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
self.state = 893
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
if hasattr(listener, 'exitBraketPragma'):
listener.exitBraketPragma(self)
return localctx
|
amazon-braket-default-simulator-python
|
positive
|
def compose_vis_ddd(self, img_path, flipped, dets, calib, vis_thresh, pred, bev, img_id='out'):
self.imgs[img_id] = cv2.imread(img_path)
if flipped:
self.imgs[img_id] = self.imgs[img_id][:, ::-1].copy()
(h, w) = pred.shape[:2]
(hs, ws) = (self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w)
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
<DeepExtract>
if self.theme == 'white':
pred = 255 - pred
if pred.shape[0] != self.imgs[img_id].shape[0] or pred.shape[0] != self.imgs[img_id].shape[1]:
pred = cv2.resize(pred, (self.imgs[img_id].shape[1], self.imgs[img_id].shape[0]))
if len(pred.shape) == 2:
pred = pred.reshape(pred.shape[0], pred.shape[1], 1)
self.imgs[img_id] = self.imgs[img_id] * (1.0 - self.opt.hm_transparency) + pred * self.opt.hm_transparency
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
</DeepExtract>
for item in dets:
if item['score'] > vis_thresh:
dim = item['dim']
loc = item['loc']
rot_y = item['rot_y']
cl = self.colors[int(item['class']) - 1, 0, 0].tolist()
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
box_2d[:, 0] /= hs
box_2d[:, 1] /= ws
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
self.imgs[img_id] = np.concatenate([self.imgs[img_id], self.imgs[bev]], axis=1)
|
def compose_vis_ddd(self, img_path, flipped, dets, calib, vis_thresh, pred, bev, img_id='out'):
self.imgs[img_id] = cv2.imread(img_path)
if flipped:
self.imgs[img_id] = self.imgs[img_id][:, ::-1].copy()
(h, w) = pred.shape[:2]
(hs, ws) = (self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w)
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
if self.theme == 'white':
pred = 255 - pred
if pred.shape[0] != self.imgs[img_id].shape[0] or pred.shape[0] != self.imgs[img_id].shape[1]:
pred = cv2.resize(pred, (self.imgs[img_id].shape[1], self.imgs[img_id].shape[0]))
if len(pred.shape) == 2:
pred = pred.reshape(pred.shape[0], pred.shape[1], 1)
self.imgs[img_id] = self.imgs[img_id] * (1.0 - self.opt.hm_transparency) + pred * self.opt.hm_transparency
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
for item in dets:
if item['score'] > vis_thresh:
dim = item['dim']
loc = item['loc']
rot_y = item['rot_y']
cl = self.colors[int(item['class']) - 1, 0, 0].tolist()
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
box_2d[:, 0] /= hs
box_2d[:, 1] /= ws
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
self.imgs[img_id] = np.concatenate([self.imgs[img_id], self.imgs[bev]], axis=1)
|
CenterFusion
|
positive
|
def __call__(self, *args, **kwargs):
assert len(args) <= len(self.inputs), 'Too many arguments provided'
feed_dict = {}
for (inpt, value) in zip(self.inputs, args):
<DeepExtract>
if issubclass(type(inpt), TfInput):
feed_dict.update(inpt.make_feed_dict(value))
elif is_placeholder(inpt):
feed_dict[inpt] = value
</DeepExtract>
kwargs_passed_inpt_names = set()
for inpt in self.inputs[len(args):]:
inpt_name = inpt.name.split(':')[0]
inpt_name = inpt_name.split('/')[-1]
assert inpt_name not in kwargs_passed_inpt_names, 'this function has two arguments with the same name "{}", so kwargs cannot be used.'.format(inpt_name)
if inpt_name in kwargs:
kwargs_passed_inpt_names.add(inpt_name)
<DeepExtract>
if issubclass(type(inpt), TfInput):
feed_dict.update(inpt.make_feed_dict(kwargs.pop(inpt_name)))
elif is_placeholder(inpt):
feed_dict[inpt] = kwargs.pop(inpt_name)
</DeepExtract>
else:
assert inpt in self.givens, 'Missing argument ' + inpt_name
assert len(kwargs) == 0, 'Function got extra arguments ' + str(list(kwargs.keys()))
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
if self.check_nan:
if any((np.isnan(r).any() for r in results)):
raise RuntimeError('Nan detected')
return results
|
def __call__(self, *args, **kwargs):
assert len(args) <= len(self.inputs), 'Too many arguments provided'
feed_dict = {}
for (inpt, value) in zip(self.inputs, args):
if issubclass(type(inpt), TfInput):
feed_dict.update(inpt.make_feed_dict(value))
elif is_placeholder(inpt):
feed_dict[inpt] = value
kwargs_passed_inpt_names = set()
for inpt in self.inputs[len(args):]:
inpt_name = inpt.name.split(':')[0]
inpt_name = inpt_name.split('/')[-1]
assert inpt_name not in kwargs_passed_inpt_names, 'this function has two arguments with the same name "{}", so kwargs cannot be used.'.format(inpt_name)
if inpt_name in kwargs:
kwargs_passed_inpt_names.add(inpt_name)
if issubclass(type(inpt), TfInput):
feed_dict.update(inpt.make_feed_dict(kwargs.pop(inpt_name)))
elif is_placeholder(inpt):
feed_dict[inpt] = kwargs.pop(inpt_name)
else:
assert inpt in self.givens, 'Missing argument ' + inpt_name
assert len(kwargs) == 0, 'Function got extra arguments ' + str(list(kwargs.keys()))
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
if self.check_nan:
if any((np.isnan(r).any() for r in results)):
raise RuntimeError('Nan detected')
return results
|
deeprl-baselines
|
positive
|
def list_lc_collections(lcc_server):
"""This lists all light curve collections made available on the LCC-Server.
If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is
associated with an LCC-Server user account, light curve collections visible
to this user will be returned as well, even if they are not visible to the
public.
Parameters
----------
lcc_server : str
The base URL of the LCC-Server to talk to.
Returns
-------
dict
Returns a dict containing lists of info items per collection. This
includes collection_ids, lists of columns, lists of indexed columns,
lists of full-text indexed columns, detailed column descriptions, number
of objects in each collection, collection sky coverage, etc.
"""
url = '%s/api/collections' % lcc_server
try:
LOGINFO('getting list of recent publicly visible and owned LC collections from %s' % (lcc_server,))
<DeepExtract>
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME, '.astrobase', 'lccs', 'apikey-%s' % lcc_server.replace('https://', 'https-').replace('http://', 'http-'))
if os.path.exists(APIKEYFILE):
fileperm = oct(os.stat(APIKEYFILE)[stat.ST_MODE])
if fileperm == '0100600' or fileperm == '0o100600':
with open(APIKEYFILE) as infd:
(apikey, expires) = infd.read().strip('\n').split()
now = datetime.now(utc)
if sys.version_info[:2] < (3, 7):
expdt = datetime.strptime(expires.replace('Z', ''), '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=utc)
else:
expdt = datetime.fromisoformat(expires.replace('Z', '+00:00'))
if now > expdt:
LOGERROR('API key has expired. expiry was on: %s' % expires)
(have_apikey, apikey, expires) = (False, apikey, expires)
else:
(have_apikey, apikey, expires) = (True, apikey, expires)
else:
LOGWARNING('The API key file %s has bad permissions and is insecure, not reading it.\n(you need to chmod 600 this file)' % APIKEYFILE)
(have_apikey, apikey, expires) = (False, None, None)
else:
LOGWARNING('No LCC-Server API key found in: {apikeyfile}'.format(apikeyfile=APIKEYFILE))
(have_apikey, apikey, expires) = (False, None, None)
</DeepExtract>
if not have_apikey:
<DeepExtract>
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME, '.astrobase', 'lccs', 'apikey-%s' % lcc_server.replace('https://', 'https-').replace('http://', 'http-'))
url = '%s/api/key' % lcc_server
resp = urlopen(url)
if resp.code == 200:
respdict = json.loads(resp.read())
else:
LOGERROR('could not fetch the API key from LCC-Server at: %s' % lcc_server)
LOGERROR('the HTTP status code was: %s' % resp.status_code)
(apikey, expires) = None
apikey = respdict['result']['apikey']
expires = respdict['result']['expires']
if not os.path.exists(os.path.dirname(APIKEYFILE)):
os.makedirs(os.path.dirname(APIKEYFILE))
with open(APIKEYFILE, 'w') as outfd:
outfd.write('%s %s\n' % (apikey, expires))
os.chmod(APIKEYFILE, 33152)
LOGINFO('key fetched successfully from: %s. expires on: %s' % (lcc_server, expires))
LOGINFO('written to: %s' % APIKEYFILE)
(apikey, expires) = (apikey, expires)
</DeepExtract>
if apikey:
headers = {'Authorization': 'Bearer: %s' % apikey}
else:
headers = {}
req = Request(url, data=None, headers=headers)
resp = urlopen(req)
lcc_list = json.loads(resp.read())['result']['collections']
return lcc_list
except HTTPError as e:
LOGERROR('could not retrieve list of collections, URL used: %s, error code: %s, reason: %s' % (url, e.code, e.reason))
return None
|
def list_lc_collections(lcc_server):
"""This lists all light curve collections made available on the LCC-Server.
If you have an LCC-Server API key present in `~/.astrobase/lccs/` that is
associated with an LCC-Server user account, light curve collections visible
to this user will be returned as well, even if they are not visible to the
public.
Parameters
----------
lcc_server : str
The base URL of the LCC-Server to talk to.
Returns
-------
dict
Returns a dict containing lists of info items per collection. This
includes collection_ids, lists of columns, lists of indexed columns,
lists of full-text indexed columns, detailed column descriptions, number
of objects in each collection, collection sky coverage, etc.
"""
url = '%s/api/collections' % lcc_server
try:
LOGINFO('getting list of recent publicly visible and owned LC collections from %s' % (lcc_server,))
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME, '.astrobase', 'lccs', 'apikey-%s' % lcc_server.replace('https://', 'https-').replace('http://', 'http-'))
if os.path.exists(APIKEYFILE):
fileperm = oct(os.stat(APIKEYFILE)[stat.ST_MODE])
if fileperm == '0100600' or fileperm == '0o100600':
with open(APIKEYFILE) as infd:
(apikey, expires) = infd.read().strip('\n').split()
now = datetime.now(utc)
if sys.version_info[:2] < (3, 7):
expdt = datetime.strptime(expires.replace('Z', ''), '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=utc)
else:
expdt = datetime.fromisoformat(expires.replace('Z', '+00:00'))
if now > expdt:
LOGERROR('API key has expired. expiry was on: %s' % expires)
(have_apikey, apikey, expires) = (False, apikey, expires)
else:
(have_apikey, apikey, expires) = (True, apikey, expires)
else:
LOGWARNING('The API key file %s has bad permissions and is insecure, not reading it.\n(you need to chmod 600 this file)' % APIKEYFILE)
(have_apikey, apikey, expires) = (False, None, None)
else:
LOGWARNING('No LCC-Server API key found in: {apikeyfile}'.format(apikeyfile=APIKEYFILE))
(have_apikey, apikey, expires) = (False, None, None)
if not have_apikey:
USERHOME = os.path.expanduser('~')
APIKEYFILE = os.path.join(USERHOME, '.astrobase', 'lccs', 'apikey-%s' % lcc_server.replace('https://', 'https-').replace('http://', 'http-'))
url = '%s/api/key' % lcc_server
resp = urlopen(url)
if resp.code == 200:
respdict = json.loads(resp.read())
else:
LOGERROR('could not fetch the API key from LCC-Server at: %s' % lcc_server)
LOGERROR('the HTTP status code was: %s' % resp.status_code)
(apikey, expires) = None
apikey = respdict['result']['apikey']
expires = respdict['result']['expires']
if not os.path.exists(os.path.dirname(APIKEYFILE)):
os.makedirs(os.path.dirname(APIKEYFILE))
with open(APIKEYFILE, 'w') as outfd:
outfd.write('%s %s\n' % (apikey, expires))
os.chmod(APIKEYFILE, 33152)
LOGINFO('key fetched successfully from: %s. expires on: %s' % (lcc_server, expires))
LOGINFO('written to: %s' % APIKEYFILE)
(apikey, expires) = (apikey, expires)
if apikey:
headers = {'Authorization': 'Bearer: %s' % apikey}
else:
headers = {}
req = Request(url, data=None, headers=headers)
resp = urlopen(req)
lcc_list = json.loads(resp.read())['result']['collections']
return lcc_list
except HTTPError as e:
LOGERROR('could not retrieve list of collections, URL used: %s, error code: %s, reason: %s' % (url, e.code, e.reason))
return None
|
astrobase
|
positive
|
def validate_expression_arguments(self, function, operator, quantity, units, name=None):
"""
Where a date expression contains a reference to another column we can't
evaluate it here, but we can check that the rest of the expression is
valid
"""
if function:
<DeepExtract>
prefix = f"date_{'function'}_"
try:
return getattr(self, f'{prefix}{function}')
except AttributeError:
methods = [n[len(prefix):] for n in dir(self) if n.startswith(prefix)]
raise InvalidExpressionError(f"Unknown date {'function'} '{function}' (allowed are {', '.join(methods)})")
</DeepExtract>
if operator:
int(quantity)
<DeepExtract>
prefix = f"date_{'unit'}_"
try:
return getattr(self, f'{prefix}{units}')
except AttributeError:
methods = [n[len(prefix):] for n in dir(self) if n.startswith(prefix)]
raise InvalidExpressionError(f"Unknown date {'unit'} '{units}' (allowed are {', '.join(methods)})")
</DeepExtract>
|
def validate_expression_arguments(self, function, operator, quantity, units, name=None):
"""
Where a date expression contains a reference to another column we can't
evaluate it here, but we can check that the rest of the expression is
valid
"""
if function:
prefix = f"date_{'function'}_"
try:
return getattr(self, f'{prefix}{function}')
except AttributeError:
methods = [n[len(prefix):] for n in dir(self) if n.startswith(prefix)]
raise InvalidExpressionError(f"Unknown date {'function'} '{function}' (allowed are {', '.join(methods)})")
if operator:
int(quantity)
prefix = f"date_{'unit'}_"
try:
return getattr(self, f'{prefix}{units}')
except AttributeError:
methods = [n[len(prefix):] for n in dir(self) if n.startswith(prefix)]
raise InvalidExpressionError(f"Unknown date {'unit'} '{units}' (allowed are {', '.join(methods)})")
</DeepExtract>
|
cohort-extractor
|
positive
|
def __init__(self, timeout_secs, message=None, log_level=logging.WARNING, interrupt='thread', sig=signal.SIGINT, id=None, interruptions: Union[Dict, List[Dict]]=None, wait_retry_secs=1, before_interrupt=None):
def interruption():
inter_iter = iter(self._interruptions)
while not self._interrupt_event.is_set():
inter = self._last_attempt = next(inter_iter, self._last_attempt)
log.log(self._log_level, inter.message)
if inter.before_interrupt is not None:
try:
inter.before_interrupt()
except Exception:
log.warning('Swallowing the error raised by `before_interrupt` hook: %s', inter.before_interrupt, exc_info=True)
try:
if inter.interrupt == 'thread':
if isinstance(inter.sig, (type(None), BaseException)):
exc = TimeoutError(inter.message) if inter.sig is None else inter.sig
<DeepExtract>
import ctypes
tid = ctypes.c_long(inter.id)
exc_class = exc if inspect.isclass(exc) else type(exc.__class__.__name__, (exc.__class__,), dict(__init__=lambda s: super(s.__class__, s).__init__(str(exc))))
exc_class = ctypes.py_object(exc_class)
ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, exc_class)
if ret == 0:
raise ValueError(f'Nonexistent thread {inter.id}')
elif ret > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError(f'Failed raising exception in thread {inter.id}')
</DeepExtract>
else:
signal.pthread_kill(inter.id, inter.sig)
elif inter.interrupt == 'process':
os.kill(inter.id, inter.sig)
except Exception:
raise
finally:
self._interrupt_event.wait(inter.wait)
super().__init__(timeout_secs, on_timeout=interruption)
self._timeout_secs = timeout_secs
self._message = message
self._log_level = log_level
self._interrupt = interrupt
self._sig = sig
self._id = id
self._wait_retry_secs = wait_retry_secs
self._before_interrupt = before_interrupt
self._interruptions = [self._make_interruption(i) for i in (interruptions if isinstance(interruptions, list) else [interruptions] if isinstance(interruptions, dict) else [dict()])]
self._interrupt_event = threading.Event()
self._last_attempt = None
|
def __init__(self, timeout_secs, message=None, log_level=logging.WARNING, interrupt='thread', sig=signal.SIGINT, id=None, interruptions: Union[Dict, List[Dict]]=None, wait_retry_secs=1, before_interrupt=None):
def interruption():
inter_iter = iter(self._interruptions)
while not self._interrupt_event.is_set():
inter = self._last_attempt = next(inter_iter, self._last_attempt)
log.log(self._log_level, inter.message)
if inter.before_interrupt is not None:
try:
inter.before_interrupt()
except Exception:
log.warning('Swallowing the error raised by `before_interrupt` hook: %s', inter.before_interrupt, exc_info=True)
try:
if inter.interrupt == 'thread':
if isinstance(inter.sig, (type(None), BaseException)):
exc = TimeoutError(inter.message) if inter.sig is None else inter.sig
import ctypes
tid = ctypes.c_long(inter.id)
exc_class = exc if inspect.isclass(exc) else type(exc.__class__.__name__, (exc.__class__,), dict(__init__=lambda s: super(s.__class__, s).__init__(str(exc))))
exc_class = ctypes.py_object(exc_class)
ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, exc_class)
if ret == 0:
raise ValueError(f'Nonexistent thread {inter.id}')
elif ret > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError(f'Failed raising exception in thread {inter.id}')
else:
signal.pthread_kill(inter.id, inter.sig)
elif inter.interrupt == 'process':
os.kill(inter.id, inter.sig)
except Exception:
raise
finally:
self._interrupt_event.wait(inter.wait)
super().__init__(timeout_secs, on_timeout=interruption)
self._timeout_secs = timeout_secs
self._message = message
self._log_level = log_level
self._interrupt = interrupt
self._sig = sig
self._id = id
self._wait_retry_secs = wait_retry_secs
self._before_interrupt = before_interrupt
self._interruptions = [self._make_interruption(i) for i in (interruptions if isinstance(interruptions, list) else [interruptions] if isinstance(interruptions, dict) else [dict()])]
self._interrupt_event = threading.Event()
self._last_attempt = None
|
automlbenchmark
|
positive
|
@pytest.mark.hps_slow_test
def test_parallel_cv():
"""
Test whether parallel jobs work
"""
<DeepExtract>
(X, y) = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, random_state=1, n_clusters_per_class=1)
opt = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=1, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
opt2 = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=1, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt2.fit(X, y)
assert opt.score(X, y) == opt2.score(X, y)
</DeepExtract>
<DeepExtract>
(X, y) = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, random_state=1, n_clusters_per_class=1)
opt = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=2, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
opt2 = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=2, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt2.fit(X, y)
assert opt.score(X, y) == opt2.score(X, y)
</DeepExtract>
|
@pytest.mark.hps_slow_test
def test_parallel_cv():
"""
Test whether parallel jobs work
"""
(X, y) = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, random_state=1, n_clusters_per_class=1)
opt = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=1, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
opt2 = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=1, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt2.fit(X, y)
assert opt.score(X, y) == opt2.score(X, y)
(X, y) = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, random_state=1, n_clusters_per_class=1)
opt = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=2, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
opt2 = BayesSearchCV(SVC(), {'C': Real(0.001, 1000.0, prior='log-uniform'), 'gamma': Real(0.001, 10.0, prior='log-uniform'), 'degree': Integer(1, 3)}, n_jobs=2, n_iter=11, n_points=n_points, cv=5, random_state=42)
opt2.fit(X, y)
assert opt.score(X, y) == opt2.score(X, y)
</DeepExtract>
|
deephyper
|
positive
|
def test_dict_output():
vsim = elfi.tools.vectorize(simulator)
vsum = elfi.tools.vectorize(summary)
<DeepExtract>
n = 30
rs = random_state or np.random.RandomState()
data = rs.multinomial(n, [0.2, 0.8])
obs = dict(zip(range(n), data))
</DeepExtract>
elfi.new_model()
p = elfi.Prior('dirichlet', [2, 2])
sim = elfi.Simulator(vsim, p, observed=obs)
S = elfi.Summary(vsum, sim)
d = elfi.Distance('euclidean', S)
pool = elfi.OutputPool(['sim'])
rej = elfi.Rejection(d, batch_size=100, pool=pool, output_names=['sim'])
sample = rej.sample(100, n_sim=1000)
mean = np.mean(sample.samples['p'], axis=0)
assert mean[1] > mean[0]
|
def test_dict_output():
vsim = elfi.tools.vectorize(simulator)
vsum = elfi.tools.vectorize(summary)
n = 30
rs = random_state or np.random.RandomState()
data = rs.multinomial(n, [0.2, 0.8])
obs = dict(zip(range(n), data))
elfi.new_model()
p = elfi.Prior('dirichlet', [2, 2])
sim = elfi.Simulator(vsim, p, observed=obs)
S = elfi.Summary(vsum, sim)
d = elfi.Distance('euclidean', S)
pool = elfi.OutputPool(['sim'])
rej = elfi.Rejection(d, batch_size=100, pool=pool, output_names=['sim'])
sample = rej.sample(100, n_sim=1000)
mean = np.mean(sample.samples['p'], axis=0)
assert mean[1] > mean[0]
|
elfi
|
positive
|
@auto_fp16()
def forward(self, feats):
""" Forward computation in multiple levels
Args:
feats (list(Tensor)): input feature maps, in shape of [B, C, H, W]
Returns:
list(Tensor): predict feature map, in shape of [B, 2, H, W]
"""
preds = []
for i in range(len(self.featmap_indices)):
<DeepExtract>
x_4 = feats[i]
x_p4 = self.P4_conv(x_4)
x_4_1x7 = self.channel4_1x7_conv(x_4)
x_p4_1x7 = self.P4_1x7_conv(x_p4)
x_4 = x_p4_1x7 + x_p4 + x_4_1x7
x_4 = self.rpn4(x_4)
score_pred_text_4 = self.conv_logits_text(x_4)
score_pred_text_4 = torch.sigmoid(score_pred_text_4)
pred = score_pred_text_4
</DeepExtract>
preds.append(pred)
return preds
|
@auto_fp16()
def forward(self, feats):
""" Forward computation in multiple levels
Args:
feats (list(Tensor)): input feature maps, in shape of [B, C, H, W]
Returns:
list(Tensor): predict feature map, in shape of [B, 2, H, W]
"""
preds = []
for i in range(len(self.featmap_indices)):
x_4 = feats[i]
x_p4 = self.P4_conv(x_4)
x_4_1x7 = self.channel4_1x7_conv(x_4)
x_p4_1x7 = self.P4_1x7_conv(x_p4)
x_4 = x_p4_1x7 + x_p4 + x_4_1x7
x_4 = self.rpn4(x_4)
score_pred_text_4 = self.conv_logits_text(x_4)
score_pred_text_4 = torch.sigmoid(score_pred_text_4)
pred = score_pred_text_4
preds.append(pred)
return preds
|
DAVAR-Lab-OCR
|
positive
|
def test_actual_backend_noproxy(self):
<DeepExtract>
reg = CacheRegion(**init_args)
reg.configure(backend, **config_args)
reg = reg
</DeepExtract>
assert isinstance(reg.backend, CacheBackend)
assert isinstance(reg.actual_backend, CacheBackend)
|
def test_actual_backend_noproxy(self):
reg = CacheRegion(**init_args)
reg.configure(backend, **config_args)
reg = reg
assert isinstance(reg.backend, CacheBackend)
assert isinstance(reg.actual_backend, CacheBackend)
|
dogpile.cache
|
positive
|
def test_var_undefined(self):
unparser = Unparser()
<DeepExtract>
ast = es5.parse(textwrap.dedent('\n var x, y;\n ').strip())
</DeepExtract>
self.assertEqual(dict(unparser(ast)), {'x': None, 'y': None})
|
def test_var_undefined(self):
unparser = Unparser()
ast = es5.parse(textwrap.dedent('\n var x, y;\n ').strip())
self.assertEqual(dict(unparser(ast)), {'x': None, 'y': None})
|
calmjs.parse
|
positive
|
def doTest():
<DeepExtract>
equal(doExtraDetect('@media screen and (max-device-width: 480px)'.replace(' ', '')), WEBKIT, 'webkit mobile hack is ok')
equal(doExtraDetect('@media screen and (-webkit-min-device-pixel-ratio:0)'.replace(' ', '')), WEBKIT, 'webkit hack is ok')
equal(doExtraDetect('@media all and (-webkit-min-device-pixel-ratio:10000), not all and (-webkit-min-device-pixel-ratio:0)'.replace(' ', '')), OPERA, 'opera hack is ok')
</DeepExtract>
<DeepExtract>
equal(doExtraDetect('@keyframes fda'), NONEIE | IE9PLUS, '@keyframes')
equal(doExtraDetect('@-webkit-keyframes fda'), WEBKIT, '@-webkit-keyframes')
equal(doExtraDetect('@-moz-keyframes fda'), FIREFOX, '@-moz-keyframes')
equal(doExtraDetect('@-ms-keyframes fda'), IE9PLUS, '@-ms-keyframes')
equal(doExtraDetect('@-o-keyframes fda'), OPERA, '@-o-keyframes')
</DeepExtract>
|
def doTest():
equal(doExtraDetect('@media screen and (max-device-width: 480px)'.replace(' ', '')), WEBKIT, 'webkit mobile hack is ok')
equal(doExtraDetect('@media screen and (-webkit-min-device-pixel-ratio:0)'.replace(' ', '')), WEBKIT, 'webkit hack is ok')
equal(doExtraDetect('@media all and (-webkit-min-device-pixel-ratio:10000), not all and (-webkit-min-device-pixel-ratio:0)'.replace(' ', '')), OPERA, 'opera hack is ok')
equal(doExtraDetect('@keyframes fda'), NONEIE | IE9PLUS, '@keyframes')
equal(doExtraDetect('@-webkit-keyframes fda'), WEBKIT, '@-webkit-keyframes')
equal(doExtraDetect('@-moz-keyframes fda'), FIREFOX, '@-moz-keyframes')
equal(doExtraDetect('@-ms-keyframes fda'), IE9PLUS, '@-ms-keyframes')
equal(doExtraDetect('@-o-keyframes fda'), OPERA, '@-o-keyframes')
</DeepExtract>
|
CSSCheckStyle
|
positive
|
def mlp_model_adv_q(input, num_outputs, scope, index, n_adv=3, n_good=5, n_land=6, share_weights=False, num_units=64, reuse=None):
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope(scope, reuse=reuse):
basic = 0
self_dim = n_land * 3 + 5
shorton = 1
num_test = num_units // 2
input_action = input[:, -5 * (n_adv + n_good):]
self_action = input_action[:, index * 5:(index + 1) * 5]
other_good_action = input_action[:, 5 * n_adv:]
adv_action = input_action[:, :5 * n_adv]
other_adv_action = tf.concat([input_action[:, :5 * index], input_action[:, 5 * (index + 1):5 * n_adv]], 1)
length_wolf = n_land * 3 + (n_good + n_adv) * 5
length_sheep = length_wolf
self_start = index * length_wolf
input_obs_self = input[:, self_start:self_start + length_wolf]
batch_size = input.shape[0].value
self_in = tf.concat([input_obs_self, self_action], 1)
with tf.variable_scope('self', reuse=reuse):
<DeepExtract>
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope('mlp', reuse=reuse):
num_test = num_units // 2
batch_size = self_in.shape[0].value
self_dim = 5 + 3 * n_land
self_land = self_in[:, 5:5 + 3 * n_land]
if True:
self_action = self_in[:, -5:]
else:
self_action = None
self_in = self_in[:, :5]
if True:
self_in = tf.concat([self_in, self_action], axis=1)
with tf.variable_scope('self', reuse=reuse):
self_out = FULLY_CONNECTED(self_in, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
self_out = FULLY_CONNECTED(self_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_mark_input = self_in[:, 5:5 + 3 * n_land]
land_mark_input = tf.split(land_mark_input, n_land, axis=1)
land_info = []
land_outs = []
with tf.variable_scope('landmark', reuse=reuse):
fc1_out = FULLY_CONNECTED(land_mark_input, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
land_outs = FULLY_CONNECTED(fc1_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_out = tf.transpose(land_outs, [1, 2, 0])
land_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), land_out) / math.sqrt(num_test))
land_out = tf.squeeze(tf.matmul(land_out_attn, tf.transpose(land_out, [0, 2, 1])), 1)
land_out = tf.contrib.layers.layer_norm(land_out)
land_out = tf.nn.relu(land_out)
other_good_in = self_in[:, self_dim:]
other_good_ins = []
for i in range(n_good):
pos = other_good_in[:, 2 * (n_adv - 1) + i * 2:2 * (n_adv - 1) + (i + 1) * 2]
vel = other_good_in[:, 4 * (n_adv - 1) + 2 * n_good + i * 2:4 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_good_in[:, 5 * (n_adv - 1) + 4 * n_good + i:5 * (n_adv - 1) + 4 * n_good + i + 1]
if True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_good_ins.append(tmp)
with tf.variable_scope('good', reuse=reuse):
fc1_good = FULLY_CONNECTED(other_good_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_good_outs = FULLY_CONNECTED(fc1_good, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_good_out = tf.transpose(other_good_outs, [1, 2, 0])
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_good_out) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(other_good_out, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
if n_adv == 1:
input_merge = tf.concat([self_out, other_good_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, activation_fn=tf.nn.relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, activation_fn=None)
self_out = out
other_adv_beg = self_dim
other_adv_in = self_in[:, self_dim:]
other_adv_ins = []
for i in range(n_adv - 1):
pos = other_adv_in[:, i * 2:(i + 1) * 2]
vel = other_adv_in[:, 2 * (n_adv - 1) + 2 * n_good + i * 2:2 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_adv_in[:, 4 * (n_adv - 1) + 4 * n_good + i:4 * (n_adv - 1) + 4 * n_good + i + 1]
if not True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_adv_ins.append(tmp)
other_adv_outs = []
with tf.variable_scope('adv', reuse=reuse):
fc1_adv = FULLY_CONNECTED(other_adv_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_adv_outs = FULLY_CONNECTED(fc1_adv, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_adv_out = tf.transpose(other_adv_outs, [1, 2, 0])
if n_adv > 0:
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_adv_out) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(other_adv_out, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
else:
other_adv_out = None
if n_adv <= 0:
input_merge = tf.concat([self_out, land_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, land_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, scope='last_2', activation_fn=None)
self_out = out
</DeepExtract>
other_good_ins = []
for i in range(n_good):
other_good_beg = n_adv * length_wolf + i * length_sheep
other_good_in = input[:, other_good_beg:other_good_beg + length_sheep]
tmp = tf.concat([other_good_in, other_good_action[:, i * 5:(i + 1) * 5]], axis=1)
other_good_ins.append(tmp)
other_good_outs = []
if basic:
other_good_out = tf.concat([i for i in other_good_ins], 1)
else:
for i in range(n_good):
with tf.variable_scope('good{}'.format('' if share_weights else i), reuse=reuse):
<DeepExtract>
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope('mlp', reuse=reuse):
num_test = num_units // 2
batch_size = other_good_ins[i].shape[0].value
self_land = other_good_ins[i][:, 5:5 + 3 * n_land]
if True:
self_action = other_good_ins[i][:, -5:]
else:
self_action = None
self_dim = 5 + 3 * n_land
self_in = other_good_ins[i][:, :5]
if True:
self_in = tf.concat([self_in, self_action], axis=1)
with tf.variable_scope('self', reuse=reuse):
self_out = FULLY_CONNECTED(self_in, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
self_out = FULLY_CONNECTED(self_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_mark_input = other_good_ins[i][:, 5:5 + 3 * n_land]
land_mark_input = tf.split(land_mark_input, n_land, axis=1)
with tf.variable_scope('landmark', reuse=reuse):
fc1_out = FULLY_CONNECTED(land_mark_input, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
land_outs = FULLY_CONNECTED(fc1_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_out = tf.transpose(land_outs, [1, 2, 0])
land_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), land_out) / math.sqrt(num_test))
land_out = tf.squeeze(tf.matmul(land_out_attn, tf.transpose(land_out, [0, 2, 1])), 1)
land_out = tf.contrib.layers.layer_norm(land_out)
land_out = tf.nn.relu(land_out)
if n_good != 1:
other_good_dim = (2 + 2 + 1) * (n_good - 1)
other_good_in = other_good_ins[i][:, self_dim:]
other_good_ins = []
for i in range(n_good - 1):
pos = other_good_in[:, 2 * n_adv + i * 2:2 * n_adv + (i + 1) * 2]
vel = other_good_in[:, 2 * n_adv + 2 * (n_good - 1) + 2 * n_adv + i * 2:2 * n_adv + 2 * (n_good - 1) + 2 * n_adv + (i + 1) * 2]
is_live = other_good_in[:, 5 * n_adv + 4 * (n_good - 1) + i:5 * n_adv + 4 * (n_good - 1) + i + 1]
if True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_good_ins.append(tmp)
with tf.variable_scope('good', reuse=reuse):
fc1_good = FULLY_CONNECTED(other_good_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_good_outs = FULLY_CONNECTED(fc1_good, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_good_out = tf.transpose(other_good_outs, [1, 2, 0])
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_good_out) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(other_good_out, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
other_adv_dim = 5 * n_adv
other_adv_beg = self_dim
other_adv_in = other_good_ins[i][:, other_adv_beg:]
other_adv_ins = []
for i in range(n_adv):
pos = other_adv_in[:, i * 2:(i + 1) * 2]
vel = other_adv_in[:, 2 * n_adv + 2 * (n_good - 1) + i * 2:2 * n_adv + 2 * (n_good - 1) + (i + 1) * 2]
is_live = other_adv_in[:, 4 * n_adv + 4 * (n_good - 1) + i:4 * n_adv + 4 * (n_good - 1) + i + 1]
if not True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_adv_ins.append(tmp)
with tf.variable_scope('adv', reuse=reuse):
fc1_adv = FULLY_CONNECTED(other_adv_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_adv_outs = FULLY_CONNECTED(fc1_adv, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
if n_adv > 0:
other_adv_out = tf.transpose(other_adv_outs, [1, 2, 0])
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_adv_out) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(other_adv_out, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
else:
other_adv_out = None
if n_good == 1:
input_merge = tf.concat([self_out, land_out, other_adv_out], 1)
elif n_adv <= 0:
input_merge = tf.concat([self_out, land_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, land_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_units, scope='last_11', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, scope='last_2', activation_fn=None)
out = out
</DeepExtract>
other_good_outs.append(out)
if n_adv != 1:
other_adv_ins = []
for i in range(n_adv):
if i == index:
continue
other_adv_beg = length_wolf * i
other_adv_in = input[:, other_adv_beg:other_adv_beg + length_wolf]
tmp = tf.concat([other_adv_in, adv_action[:, i * 5:(i + 1) * 5]], 1)
other_adv_ins.append(tmp)
other_adv_outs = []
if basic:
other_adv_out = tf.concat([i for i in other_adv_ins], 1)
else:
for i in range(n_adv - 1):
true_id = i if i < index else i + 1
with tf.variable_scope('adv{}'.format('' if share_weights else true_id), reuse=reuse):
<DeepExtract>
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope('mlp', reuse=reuse):
num_test = num_units // 2
batch_size = other_adv_ins[i].shape[0].value
self_dim = 5 + 3 * n_land
self_land = other_adv_ins[i][:, 5:5 + 3 * n_land]
if True:
self_action = other_adv_ins[i][:, -5:]
else:
self_action = None
self_in = other_adv_ins[i][:, :5]
if True:
self_in = tf.concat([self_in, self_action], axis=1)
with tf.variable_scope('self', reuse=reuse):
self_out = FULLY_CONNECTED(self_in, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
self_out = FULLY_CONNECTED(self_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_mark_input = other_adv_ins[i][:, 5:5 + 3 * n_land]
land_mark_input = tf.split(land_mark_input, n_land, axis=1)
land_info = []
land_outs = []
with tf.variable_scope('landmark', reuse=reuse):
fc1_out = FULLY_CONNECTED(land_mark_input, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
land_outs = FULLY_CONNECTED(fc1_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_out = tf.transpose(land_outs, [1, 2, 0])
land_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), land_out) / math.sqrt(num_test))
land_out = tf.squeeze(tf.matmul(land_out_attn, tf.transpose(land_out, [0, 2, 1])), 1)
land_out = tf.contrib.layers.layer_norm(land_out)
land_out = tf.nn.relu(land_out)
other_good_in = other_adv_ins[i][:, self_dim:]
other_good_ins = []
for i in range(n_good):
pos = other_good_in[:, 2 * (n_adv - 1) + i * 2:2 * (n_adv - 1) + (i + 1) * 2]
vel = other_good_in[:, 4 * (n_adv - 1) + 2 * n_good + i * 2:4 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_good_in[:, 5 * (n_adv - 1) + 4 * n_good + i:5 * (n_adv - 1) + 4 * n_good + i + 1]
if True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_good_ins.append(tmp)
with tf.variable_scope('good', reuse=reuse):
fc1_good = FULLY_CONNECTED(other_good_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_good_outs = FULLY_CONNECTED(fc1_good, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_good_out = tf.transpose(other_good_outs, [1, 2, 0])
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_good_out) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(other_good_out, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
if n_adv == 1:
input_merge = tf.concat([self_out, other_good_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, activation_fn=tf.nn.relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, activation_fn=None)
out = out
other_adv_beg = self_dim
other_adv_in = other_adv_ins[i][:, self_dim:]
other_adv_ins = []
for i in range(n_adv - 1):
pos = other_adv_in[:, i * 2:(i + 1) * 2]
vel = other_adv_in[:, 2 * (n_adv - 1) + 2 * n_good + i * 2:2 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_adv_in[:, 4 * (n_adv - 1) + 4 * n_good + i:4 * (n_adv - 1) + 4 * n_good + i + 1]
if not True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_adv_ins.append(tmp)
other_adv_outs = []
with tf.variable_scope('adv', reuse=reuse):
fc1_adv = FULLY_CONNECTED(other_adv_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_adv_outs = FULLY_CONNECTED(fc1_adv, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_adv_out = tf.transpose(other_adv_outs, [1, 2, 0])
if n_adv > 0:
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_adv_out) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(other_adv_out, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
else:
other_adv_out = None
if n_adv <= 0:
input_merge = tf.concat([self_out, land_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, land_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, scope='last_2', activation_fn=None)
out = out
</DeepExtract>
other_adv_outs.append(out)
else:
other_adv_outs = []
theta_out = []
phi_out = []
g_out = []
theta_out.append(FULLY_CONNECTED(self_out, num_outputs=num_test, scope='theta_f', reuse=tf.AUTO_REUSE, activation_fn=None))
phi_out.append(FULLY_CONNECTED(self_out, num_outputs=num_test, scope='phi_f', reuse=tf.AUTO_REUSE, activation_fn=None))
g_out.append(FULLY_CONNECTED(self_out, num_outputs=num_test, scope='g_f', reuse=tf.AUTO_REUSE, activation_fn=None))
for (i, out) in enumerate(other_good_outs):
theta_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='theta_f', reuse=tf.AUTO_REUSE, activation_fn=None))
phi_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='phi_f', reuse=tf.AUTO_REUSE, activation_fn=None))
g_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='g_f', reuse=tf.AUTO_REUSE, activation_fn=None))
for (i, out) in enumerate(other_adv_outs):
theta_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='theta_f', reuse=tf.AUTO_REUSE, activation_fn=None))
phi_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='phi_f', reuse=tf.AUTO_REUSE, activation_fn=None))
g_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='g_f', reuse=tf.AUTO_REUSE, activation_fn=None))
theta_outs = tf.stack(theta_out, 2)
phi_outs = tf.stack(phi_out, 2)
g_outs = tf.stack(g_out, 2)
self_attention = tf.nn.softmax(tf.matmul(theta_outs, tf.transpose(phi_outs, [0, 2, 1])) / math.sqrt(num_test))
input_all = tf.matmul(self_attention, g_outs)
input_all_new = []
for i in range(n_adv + n_good):
input_all_new.append(tf.contrib.layers.layer_norm(input_all[:, :, i], scope='qlayernorm1', reuse=tf.AUTO_REUSE))
input_all = tf.stack(input_all_new, 2)
'\n input_all = tf.contrib.layers.layer_norm(input_all)\n '
input_all = tf.nn.relu(input_all)
self_out_new = input_all[:, :, 0]
good_out_new = input_all[:, :, 1:1 + n_good]
adv_out_new = input_all[:, :, 1 + n_good:]
if n_adv > 0:
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out_new, 1), adv_out_new) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(adv_out_new, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out_new, 1), good_out_new) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(good_out_new, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
if n_adv == 1:
input_merge = tf.concat([self_out, other_good_out], 1)
elif n_adv <= 0:
input_merge = tf.concat([self_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_units, scope='last_11', activation_fn=tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_outputs, scope='last_2', activation_fn=None)
return out
|
def mlp_model_adv_q(input, num_outputs, scope, index, n_adv=3, n_good=5, n_land=6, share_weights=False, num_units=64, reuse=None):
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope(scope, reuse=reuse):
basic = 0
self_dim = n_land * 3 + 5
shorton = 1
num_test = num_units // 2
input_action = input[:, -5 * (n_adv + n_good):]
self_action = input_action[:, index * 5:(index + 1) * 5]
other_good_action = input_action[:, 5 * n_adv:]
adv_action = input_action[:, :5 * n_adv]
other_adv_action = tf.concat([input_action[:, :5 * index], input_action[:, 5 * (index + 1):5 * n_adv]], 1)
length_wolf = n_land * 3 + (n_good + n_adv) * 5
length_sheep = length_wolf
self_start = index * length_wolf
input_obs_self = input[:, self_start:self_start + length_wolf]
batch_size = input.shape[0].value
self_in = tf.concat([input_obs_self, self_action], 1)
with tf.variable_scope('self', reuse=reuse):
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope('mlp', reuse=reuse):
num_test = num_units // 2
batch_size = self_in.shape[0].value
self_dim = 5 + 3 * n_land
self_land = self_in[:, 5:5 + 3 * n_land]
if True:
self_action = self_in[:, -5:]
else:
self_action = None
self_in = self_in[:, :5]
if True:
self_in = tf.concat([self_in, self_action], axis=1)
with tf.variable_scope('self', reuse=reuse):
self_out = FULLY_CONNECTED(self_in, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
self_out = FULLY_CONNECTED(self_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_mark_input = self_in[:, 5:5 + 3 * n_land]
land_mark_input = tf.split(land_mark_input, n_land, axis=1)
land_info = []
land_outs = []
with tf.variable_scope('landmark', reuse=reuse):
fc1_out = FULLY_CONNECTED(land_mark_input, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
land_outs = FULLY_CONNECTED(fc1_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_out = tf.transpose(land_outs, [1, 2, 0])
land_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), land_out) / math.sqrt(num_test))
land_out = tf.squeeze(tf.matmul(land_out_attn, tf.transpose(land_out, [0, 2, 1])), 1)
land_out = tf.contrib.layers.layer_norm(land_out)
land_out = tf.nn.relu(land_out)
other_good_in = self_in[:, self_dim:]
other_good_ins = []
for i in range(n_good):
pos = other_good_in[:, 2 * (n_adv - 1) + i * 2:2 * (n_adv - 1) + (i + 1) * 2]
vel = other_good_in[:, 4 * (n_adv - 1) + 2 * n_good + i * 2:4 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_good_in[:, 5 * (n_adv - 1) + 4 * n_good + i:5 * (n_adv - 1) + 4 * n_good + i + 1]
if True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_good_ins.append(tmp)
with tf.variable_scope('good', reuse=reuse):
fc1_good = FULLY_CONNECTED(other_good_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_good_outs = FULLY_CONNECTED(fc1_good, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_good_out = tf.transpose(other_good_outs, [1, 2, 0])
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_good_out) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(other_good_out, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
if n_adv == 1:
input_merge = tf.concat([self_out, other_good_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, activation_fn=tf.nn.relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, activation_fn=None)
self_out = out
other_adv_beg = self_dim
other_adv_in = self_in[:, self_dim:]
other_adv_ins = []
for i in range(n_adv - 1):
pos = other_adv_in[:, i * 2:(i + 1) * 2]
vel = other_adv_in[:, 2 * (n_adv - 1) + 2 * n_good + i * 2:2 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_adv_in[:, 4 * (n_adv - 1) + 4 * n_good + i:4 * (n_adv - 1) + 4 * n_good + i + 1]
if not True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_adv_ins.append(tmp)
other_adv_outs = []
with tf.variable_scope('adv', reuse=reuse):
fc1_adv = FULLY_CONNECTED(other_adv_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_adv_outs = FULLY_CONNECTED(fc1_adv, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_adv_out = tf.transpose(other_adv_outs, [1, 2, 0])
if n_adv > 0:
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_adv_out) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(other_adv_out, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
else:
other_adv_out = None
if n_adv <= 0:
input_merge = tf.concat([self_out, land_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, land_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, scope='last_2', activation_fn=None)
self_out = out
other_good_ins = []
for i in range(n_good):
other_good_beg = n_adv * length_wolf + i * length_sheep
other_good_in = input[:, other_good_beg:other_good_beg + length_sheep]
tmp = tf.concat([other_good_in, other_good_action[:, i * 5:(i + 1) * 5]], axis=1)
other_good_ins.append(tmp)
other_good_outs = []
if basic:
other_good_out = tf.concat([i for i in other_good_ins], 1)
else:
for i in range(n_good):
with tf.variable_scope('good{}'.format('' if share_weights else i), reuse=reuse):
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope('mlp', reuse=reuse):
num_test = num_units // 2
batch_size = other_good_ins[i].shape[0].value
self_land = other_good_ins[i][:, 5:5 + 3 * n_land]
if True:
self_action = other_good_ins[i][:, -5:]
else:
self_action = None
self_dim = 5 + 3 * n_land
self_in = other_good_ins[i][:, :5]
if True:
self_in = tf.concat([self_in, self_action], axis=1)
with tf.variable_scope('self', reuse=reuse):
self_out = FULLY_CONNECTED(self_in, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
self_out = FULLY_CONNECTED(self_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_mark_input = other_good_ins[i][:, 5:5 + 3 * n_land]
land_mark_input = tf.split(land_mark_input, n_land, axis=1)
with tf.variable_scope('landmark', reuse=reuse):
fc1_out = FULLY_CONNECTED(land_mark_input, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
land_outs = FULLY_CONNECTED(fc1_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_out = tf.transpose(land_outs, [1, 2, 0])
land_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), land_out) / math.sqrt(num_test))
land_out = tf.squeeze(tf.matmul(land_out_attn, tf.transpose(land_out, [0, 2, 1])), 1)
land_out = tf.contrib.layers.layer_norm(land_out)
land_out = tf.nn.relu(land_out)
if n_good != 1:
other_good_dim = (2 + 2 + 1) * (n_good - 1)
other_good_in = other_good_ins[i][:, self_dim:]
other_good_ins = []
for i in range(n_good - 1):
pos = other_good_in[:, 2 * n_adv + i * 2:2 * n_adv + (i + 1) * 2]
vel = other_good_in[:, 2 * n_adv + 2 * (n_good - 1) + 2 * n_adv + i * 2:2 * n_adv + 2 * (n_good - 1) + 2 * n_adv + (i + 1) * 2]
is_live = other_good_in[:, 5 * n_adv + 4 * (n_good - 1) + i:5 * n_adv + 4 * (n_good - 1) + i + 1]
if True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_good_ins.append(tmp)
with tf.variable_scope('good', reuse=reuse):
fc1_good = FULLY_CONNECTED(other_good_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_good_outs = FULLY_CONNECTED(fc1_good, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_good_out = tf.transpose(other_good_outs, [1, 2, 0])
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_good_out) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(other_good_out, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
other_adv_dim = 5 * n_adv
other_adv_beg = self_dim
other_adv_in = other_good_ins[i][:, other_adv_beg:]
other_adv_ins = []
for i in range(n_adv):
pos = other_adv_in[:, i * 2:(i + 1) * 2]
vel = other_adv_in[:, 2 * n_adv + 2 * (n_good - 1) + i * 2:2 * n_adv + 2 * (n_good - 1) + (i + 1) * 2]
is_live = other_adv_in[:, 4 * n_adv + 4 * (n_good - 1) + i:4 * n_adv + 4 * (n_good - 1) + i + 1]
if not True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_adv_ins.append(tmp)
with tf.variable_scope('adv', reuse=reuse):
fc1_adv = FULLY_CONNECTED(other_adv_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_adv_outs = FULLY_CONNECTED(fc1_adv, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
if n_adv > 0:
other_adv_out = tf.transpose(other_adv_outs, [1, 2, 0])
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_adv_out) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(other_adv_out, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
else:
other_adv_out = None
if n_good == 1:
input_merge = tf.concat([self_out, land_out, other_adv_out], 1)
elif n_adv <= 0:
input_merge = tf.concat([self_out, land_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, land_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_units, scope='last_11', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, scope='last_2', activation_fn=None)
out = out
other_good_outs.append(out)
if n_adv != 1:
other_adv_ins = []
for i in range(n_adv):
if i == index:
continue
other_adv_beg = length_wolf * i
other_adv_in = input[:, other_adv_beg:other_adv_beg + length_wolf]
tmp = tf.concat([other_adv_in, adv_action[:, i * 5:(i + 1) * 5]], 1)
other_adv_ins.append(tmp)
other_adv_outs = []
if basic:
other_adv_out = tf.concat([i for i in other_adv_ins], 1)
else:
for i in range(n_adv - 1):
true_id = i if i < index else i + 1
with tf.variable_scope('adv{}'.format('' if share_weights else true_id), reuse=reuse):
if reuse is None:
reuse = tf.AUTO_REUSE if share_weights else False
with tf.variable_scope('mlp', reuse=reuse):
num_test = num_units // 2
batch_size = other_adv_ins[i].shape[0].value
self_dim = 5 + 3 * n_land
self_land = other_adv_ins[i][:, 5:5 + 3 * n_land]
if True:
self_action = other_adv_ins[i][:, -5:]
else:
self_action = None
self_in = other_adv_ins[i][:, :5]
if True:
self_in = tf.concat([self_in, self_action], axis=1)
with tf.variable_scope('self', reuse=reuse):
self_out = FULLY_CONNECTED(self_in, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
self_out = FULLY_CONNECTED(self_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_mark_input = other_adv_ins[i][:, 5:5 + 3 * n_land]
land_mark_input = tf.split(land_mark_input, n_land, axis=1)
land_info = []
land_outs = []
with tf.variable_scope('landmark', reuse=reuse):
fc1_out = FULLY_CONNECTED(land_mark_input, num_outputs=num_units, scope='l1', activation_fn=tf.nn.relu)
land_outs = FULLY_CONNECTED(fc1_out, num_outputs=num_test, scope='l2', activation_fn=tf.nn.relu)
land_out = tf.transpose(land_outs, [1, 2, 0])
land_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), land_out) / math.sqrt(num_test))
land_out = tf.squeeze(tf.matmul(land_out_attn, tf.transpose(land_out, [0, 2, 1])), 1)
land_out = tf.contrib.layers.layer_norm(land_out)
land_out = tf.nn.relu(land_out)
other_good_in = other_adv_ins[i][:, self_dim:]
other_good_ins = []
for i in range(n_good):
pos = other_good_in[:, 2 * (n_adv - 1) + i * 2:2 * (n_adv - 1) + (i + 1) * 2]
vel = other_good_in[:, 4 * (n_adv - 1) + 2 * n_good + i * 2:4 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_good_in[:, 5 * (n_adv - 1) + 4 * n_good + i:5 * (n_adv - 1) + 4 * n_good + i + 1]
if True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_good_ins.append(tmp)
with tf.variable_scope('good', reuse=reuse):
fc1_good = FULLY_CONNECTED(other_good_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_good_outs = FULLY_CONNECTED(fc1_good, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_good_out = tf.transpose(other_good_outs, [1, 2, 0])
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_good_out) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(other_good_out, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
if n_adv == 1:
input_merge = tf.concat([self_out, other_good_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, activation_fn=tf.nn.relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, activation_fn=None)
out = out
other_adv_beg = self_dim
other_adv_in = other_adv_ins[i][:, self_dim:]
other_adv_ins = []
for i in range(n_adv - 1):
pos = other_adv_in[:, i * 2:(i + 1) * 2]
vel = other_adv_in[:, 2 * (n_adv - 1) + 2 * n_good + i * 2:2 * (n_adv - 1) + 2 * n_good + (i + 1) * 2]
is_live = other_adv_in[:, 4 * (n_adv - 1) + 4 * n_good + i:4 * (n_adv - 1) + 4 * n_good + i + 1]
if not True:
tmp = tf.concat([pos, vel, is_live], axis=1)
else:
tmp = tf.concat([pos, vel, is_live], axis=1)
other_adv_ins.append(tmp)
other_adv_outs = []
with tf.variable_scope('adv', reuse=reuse):
fc1_adv = FULLY_CONNECTED(other_adv_ins, num_outputs=num_units, activation_fn=tf.nn.relu, scope='l1', reuse=reuse)
other_adv_outs = FULLY_CONNECTED(fc1_adv, num_outputs=num_test, activation_fn=tf.nn.relu, scope='l2', reuse=reuse)
other_adv_out = tf.transpose(other_adv_outs, [1, 2, 0])
if n_adv > 0:
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out, 1), other_adv_out) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(other_adv_out, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
else:
other_adv_out = None
if n_adv <= 0:
input_merge = tf.concat([self_out, land_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, land_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.relu if True else tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_test, scope='last_2', activation_fn=None)
out = out
other_adv_outs.append(out)
else:
other_adv_outs = []
theta_out = []
phi_out = []
g_out = []
theta_out.append(FULLY_CONNECTED(self_out, num_outputs=num_test, scope='theta_f', reuse=tf.AUTO_REUSE, activation_fn=None))
phi_out.append(FULLY_CONNECTED(self_out, num_outputs=num_test, scope='phi_f', reuse=tf.AUTO_REUSE, activation_fn=None))
g_out.append(FULLY_CONNECTED(self_out, num_outputs=num_test, scope='g_f', reuse=tf.AUTO_REUSE, activation_fn=None))
for (i, out) in enumerate(other_good_outs):
theta_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='theta_f', reuse=tf.AUTO_REUSE, activation_fn=None))
phi_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='phi_f', reuse=tf.AUTO_REUSE, activation_fn=None))
g_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='g_f', reuse=tf.AUTO_REUSE, activation_fn=None))
for (i, out) in enumerate(other_adv_outs):
theta_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='theta_f', reuse=tf.AUTO_REUSE, activation_fn=None))
phi_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='phi_f', reuse=tf.AUTO_REUSE, activation_fn=None))
g_out.append(FULLY_CONNECTED(out, num_outputs=num_test, scope='g_f', reuse=tf.AUTO_REUSE, activation_fn=None))
theta_outs = tf.stack(theta_out, 2)
phi_outs = tf.stack(phi_out, 2)
g_outs = tf.stack(g_out, 2)
self_attention = tf.nn.softmax(tf.matmul(theta_outs, tf.transpose(phi_outs, [0, 2, 1])) / math.sqrt(num_test))
input_all = tf.matmul(self_attention, g_outs)
input_all_new = []
for i in range(n_adv + n_good):
input_all_new.append(tf.contrib.layers.layer_norm(input_all[:, :, i], scope='qlayernorm1', reuse=tf.AUTO_REUSE))
input_all = tf.stack(input_all_new, 2)
'\n input_all = tf.contrib.layers.layer_norm(input_all)\n '
input_all = tf.nn.relu(input_all)
self_out_new = input_all[:, :, 0]
good_out_new = input_all[:, :, 1:1 + n_good]
adv_out_new = input_all[:, :, 1 + n_good:]
if n_adv > 0:
other_adv_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out_new, 1), adv_out_new) / math.sqrt(num_test))
other_adv_out = tf.squeeze(tf.matmul(other_adv_out_attn, tf.transpose(adv_out_new, [0, 2, 1])), 1)
other_adv_out = tf.contrib.layers.layer_norm(other_adv_out)
other_adv_out = tf.nn.relu(other_adv_out)
other_good_out_attn = tf.nn.softmax(tf.matmul(tf.expand_dims(self_out_new, 1), good_out_new) / math.sqrt(num_test))
other_good_out = tf.squeeze(tf.matmul(other_good_out_attn, tf.transpose(good_out_new, [0, 2, 1])), 1)
other_good_out = tf.contrib.layers.layer_norm(other_good_out)
other_good_out = tf.nn.relu(other_good_out)
if n_adv == 1:
input_merge = tf.concat([self_out, other_good_out], 1)
elif n_adv <= 0:
input_merge = tf.concat([self_out, other_good_out], 1)
else:
input_merge = tf.concat([self_out, other_good_out, other_adv_out], 1)
out = FULLY_CONNECTED(input_merge, num_outputs=num_units, scope='last_1', activation_fn=tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_units, scope='last_11', activation_fn=tf.nn.leaky_relu)
out = FULLY_CONNECTED(out, num_outputs=num_outputs, scope='last_2', activation_fn=None)
return out
|
epciclr2020
|
positive
|
def _reapplyEvents(newEvents=None):
""" reapply all uncertain events on top of the baseline.
This should be the last thing which is done as part of adding an event as it may recurse into addEvent at the end.
"""
if newEvents is None:
newEvents = []
self.pauseListeners = True
oldGameState = self.currGameState
self.currGameState = deepcopy(self.baselineGameState)
for event in self.uncertainEvents:
newEvent = event.apply(self)
if newEvent:
newEvents.append(newEvent)
self.pauseListeners = False
for oldPlayer in oldGameState.players.itervalues():
<DeepExtract>
with self.stateLock:
if not (oldPlayer.team_id, oldPlayer.player_id) in self.currGameState.players:
self.currGameState.players[oldPlayer.team_id, oldPlayer.player_id] = Player(team_id=oldPlayer.team_id, player_id=oldPlayer.player_id)
if oldPlayer.team_id > self.currGameState.teamCount:
self.currGameState.teamCount = oldPlayer.team_id
if oldPlayer.player_id > self.currGameState.largestTeam:
self.currGameState.largestTeam = oldPlayer.player_id
newPlayer = self.currGameState.players[oldPlayer.team_id, oldPlayer.player_id]
</DeepExtract>
if oldPlayer != newPlayer:
print('Detected a Player in need of adjusting: ', oldPlayer, '->', newPlayer)
parametersSnapshot = deepcopy(self.currGameState.parameters)
<DeepExtract>
if not self.pauseListeners:
for listener in self.playerAdjustedListeners:
listener(oldPlayer.team_id, oldPlayer.player_id, newPlayer, parametersSnapshot)
</DeepExtract>
for newEvent in newEvents:
<DeepExtract>
with self.stateLock:
currentServerTime = time.time()
if newEvent.serverTime > currentServerTime:
self._addFutureEvent(newEvent)
else:
self._addPastEvent(newEvent)
</DeepExtract>
|
def _reapplyEvents(newEvents=None):
""" reapply all uncertain events on top of the baseline.
This should be the last thing which is done as part of adding an event as it may recurse into addEvent at the end.
"""
if newEvents is None:
newEvents = []
self.pauseListeners = True
oldGameState = self.currGameState
self.currGameState = deepcopy(self.baselineGameState)
for event in self.uncertainEvents:
newEvent = event.apply(self)
if newEvent:
newEvents.append(newEvent)
self.pauseListeners = False
for oldPlayer in oldGameState.players.itervalues():
with self.stateLock:
if not (oldPlayer.team_id, oldPlayer.player_id) in self.currGameState.players:
self.currGameState.players[oldPlayer.team_id, oldPlayer.player_id] = Player(team_id=oldPlayer.team_id, player_id=oldPlayer.player_id)
if oldPlayer.team_id > self.currGameState.teamCount:
self.currGameState.teamCount = oldPlayer.team_id
if oldPlayer.player_id > self.currGameState.largestTeam:
self.currGameState.largestTeam = oldPlayer.player_id
newPlayer = self.currGameState.players[oldPlayer.team_id, oldPlayer.player_id]
if oldPlayer != newPlayer:
print('Detected a Player in need of adjusting: ', oldPlayer, '->', newPlayer)
parametersSnapshot = deepcopy(self.currGameState.parameters)
if not self.pauseListeners:
for listener in self.playerAdjustedListeners:
listener(oldPlayer.team_id, oldPlayer.player_id, newPlayer, parametersSnapshot)
for newEvent in newEvents:
with self.stateLock:
currentServerTime = time.time()
if newEvent.serverTime > currentServerTime:
self._addFutureEvent(newEvent)
else:
self._addPastEvent(newEvent)
</DeepExtract>
|
arduino-milestag
|
positive
|
def message_with_print(url, chat_id, text):
print('HUMAN: {}'.format(text))
<DeepExtract>
url = url + '/message'
r = requests.post(url, json={'text': text, 'chat_id': chat_id})
res = r.json()
</DeepExtract>
print('BOT: {}'.format(res['text']))
|
def message_with_print(url, chat_id, text):
print('HUMAN: {}'.format(text))
url = url + '/message'
r = requests.post(url, json={'text': text, 'chat_id': chat_id})
res = r.json()
print('BOT: {}'.format(res['text']))
|
convai-bot-1337
|
positive
|
def adapt_theory_for_maps(cls, data_params):
if self.aberration_coeff:
<DeepExtract>
ells = np.arange(self.pcl_lmin, self.pcl_lmax + 1)
cl_norm = ells * (ells + 1)
for i in range(self.nmaps_required):
for j in range(i + 1):
CL = cls[i, j]
if CL is not None:
if CL.theory_ij[0] <= 2 and CL.theory_ij[1] <= 2:
cl_deriv = CL.CL / cl_norm
cl_deriv[1:-1] = (cl_deriv[2:] - cl_deriv[:-2]) / 2
cl_deriv[0] = cl_deriv[1]
cl_deriv[-1] = cl_deriv[-2]
cl_deriv *= cl_norm
cl_deriv *= ells
CL.CL += self.aberration_coeff * cl_deriv
</DeepExtract>
<DeepExtract>
pass
</DeepExtract>
if self.calibration_param is not None and self.calibration_param in data_params:
for i in range(self.nmaps_required):
for j in range(i + 1):
CL = cls[i, j]
if CL is not None:
if CL.theory_ij[0] <= 2 and CL.theory_ij[1] <= 2:
CL.CL /= data_params[self.calibration_param] ** 2
|
def adapt_theory_for_maps(cls, data_params):
if self.aberration_coeff:
ells = np.arange(self.pcl_lmin, self.pcl_lmax + 1)
cl_norm = ells * (ells + 1)
for i in range(self.nmaps_required):
for j in range(i + 1):
CL = cls[i, j]
if CL is not None:
if CL.theory_ij[0] <= 2 and CL.theory_ij[1] <= 2:
cl_deriv = CL.CL / cl_norm
cl_deriv[1:-1] = (cl_deriv[2:] - cl_deriv[:-2]) / 2
cl_deriv[0] = cl_deriv[1]
cl_deriv[-1] = cl_deriv[-2]
cl_deriv *= cl_norm
cl_deriv *= ells
CL.CL += self.aberration_coeff * cl_deriv
pass
if self.calibration_param is not None and self.calibration_param in data_params:
for i in range(self.nmaps_required):
for j in range(i + 1):
CL = cls[i, j]
if CL is not None:
if CL.theory_ij[0] <= 2 and CL.theory_ij[1] <= 2:
CL.CL /= data_params[self.calibration_param] ** 2
|
cobaya
|
positive
|
def draw_3d_bbox_meshes_in_pyqt(widget, bboxes, colors=GLColor.Gray, alpha=1.0, edgecolors=None):
<DeepExtract>
bbox_faces = np.array([[0, 1, 2], [0, 2, 3], [4, 5, 6], [4, 6, 7], [0, 4, 7], [0, 7, 3], [1, 5, 6], [1, 6, 2], [3, 2, 6], [3, 6, 7], [0, 1, 5], [0, 5, 4]])
verts_list = []
faces_list = []
for (i, bbox) in enumerate(bboxes):
verts_list.append(bbox)
faces_list.append(bbox_faces + 8 * i)
verts = np.concatenate(verts_list, axis=0)
faces = np.concatenate(faces_list, axis=0)
(verts, faces) = (verts, faces)
</DeepExtract>
if not isinstance(colors, list):
if isinstance(colors, GLColor):
<DeepExtract>
colors = (*colors.value, alpha)
</DeepExtract>
colors = np.array([colors for i in range(len(verts))])
m1 = gl.GLMeshItem(vertexes=verts, faces=faces, faceColors=colors, smooth=False)
m1.setGLOptions('additive')
widget.addItem(m1)
return widget
|
def draw_3d_bbox_meshes_in_pyqt(widget, bboxes, colors=GLColor.Gray, alpha=1.0, edgecolors=None):
bbox_faces = np.array([[0, 1, 2], [0, 2, 3], [4, 5, 6], [4, 6, 7], [0, 4, 7], [0, 7, 3], [1, 5, 6], [1, 6, 2], [3, 2, 6], [3, 6, 7], [0, 1, 5], [0, 5, 4]])
verts_list = []
faces_list = []
for (i, bbox) in enumerate(bboxes):
verts_list.append(bbox)
faces_list.append(bbox_faces + 8 * i)
verts = np.concatenate(verts_list, axis=0)
faces = np.concatenate(faces_list, axis=0)
(verts, faces) = (verts, faces)
if not isinstance(colors, list):
if isinstance(colors, GLColor):
colors = (*colors.value, alpha)
colors = np.array([colors for i in range(len(verts))])
m1 = gl.GLMeshItem(vertexes=verts, faces=faces, faceColors=colors, smooth=False)
m1.setGLOptions('additive')
widget.addItem(m1)
return widget
|
3D-CVF
|
positive
|
def pio_subtract(ref: Union[ParameterIO, ParameterList], mod: Union[ParameterIO, ParameterList]) -> Union[ParameterIO, ParameterList]:
if isinstance(ref, ParameterIO):
merged = deepcopy(ref)
else:
merged = ref
for (key, plist) in mod.lists.items():
if key in merged.lists:
<DeepExtract>
if isinstance(merged.lists[key], ParameterIO):
merged = deepcopy(merged.lists[key])
else:
merged = merged.lists[key]
for (key, plist) in plist.lists.items():
if key in merged.lists:
pio_subtract(merged.lists[key], plist)
if len(merged.lists[key].objects) == 0 and len(merged.lists[key].lists) == 0:
del merged.lists[key]
for (key, pobj) in plist.objects.items():
if key in merged.objects:
merged_pobj = merged.objects[key]
for pkey in pobj.params:
if pkey in merged_pobj.params:
del merged_pobj.params[pkey]
if len(merged_pobj.params) == 0:
del merged.objects[key]
return merged
</DeepExtract>
if len(merged.lists[key].objects) == 0 and len(merged.lists[key].lists) == 0:
del merged.lists[key]
for (key, pobj) in mod.objects.items():
if key in merged.objects:
merged_pobj = merged.objects[key]
for pkey in pobj.params:
if pkey in merged_pobj.params:
del merged_pobj.params[pkey]
if len(merged_pobj.params) == 0:
del merged.objects[key]
return merged
|
def pio_subtract(ref: Union[ParameterIO, ParameterList], mod: Union[ParameterIO, ParameterList]) -> Union[ParameterIO, ParameterList]:
if isinstance(ref, ParameterIO):
merged = deepcopy(ref)
else:
merged = ref
for (key, plist) in mod.lists.items():
if key in merged.lists:
if isinstance(merged.lists[key], ParameterIO):
merged = deepcopy(merged.lists[key])
else:
merged = merged.lists[key]
for (key, plist) in plist.lists.items():
if key in merged.lists:
pio_subtract(merged.lists[key], plist)
if len(merged.lists[key].objects) == 0 and len(merged.lists[key].lists) == 0:
del merged.lists[key]
for (key, pobj) in plist.objects.items():
if key in merged.objects:
merged_pobj = merged.objects[key]
for pkey in pobj.params:
if pkey in merged_pobj.params:
del merged_pobj.params[pkey]
if len(merged_pobj.params) == 0:
del merged.objects[key]
return merged
if len(merged.lists[key].objects) == 0 and len(merged.lists[key].lists) == 0:
del merged.lists[key]
for (key, pobj) in mod.objects.items():
if key in merged.objects:
merged_pobj = merged.objects[key]
for pkey in pobj.params:
if pkey in merged_pobj.params:
del merged_pobj.params[pkey]
if len(merged_pobj.params) == 0:
del merged.objects[key]
return merged
|
BCML
|
positive
|
def minmax(self):
if self.firstfree[0]:
<DeepExtract>
self._update_extremes(torch.min(self.data[0][:, :self.firstfree[0]].t(), dim=0)[0], torch.max(self.data[0][:, :self.firstfree[0]].t(), dim=0)[0])
</DeepExtract>
return self.extremes.clone()
|
def minmax(self):
if self.firstfree[0]:
self._update_extremes(torch.min(self.data[0][:, :self.firstfree[0]].t(), dim=0)[0], torch.max(self.data[0][:, :self.firstfree[0]].t(), dim=0)[0])
return self.extremes.clone()
|
dissect
|
positive
|
def __init__(self, id, question_text, ground_truth, model_names):
self.id = id
<DeepExtract>
def remove_articles(text):
self.question_text = re.sub('\\b(a|an|the)\\b', ' ', text)
def white_space_fix(text):
self.question_text = ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
self.question_text = ''.join((ch for ch in text if ch not in exclude))
def lower(text):
self.question_text = text.lower()
self.question_text = white_space_fix(remove_articles(remove_punc(lower(question_text))))
</DeepExtract>
self.question_head_ngram = []
self.question_tokens = nltk.word_tokenize(self.question_text)
for nc in range(3):
self.question_head_ngram.append(' '.join(self.question_tokens[0:nc]))
self.ground_truth = ground_truth
self.model_names = model_names
self.em = np.zeros(2)
self.f1 = np.zeros(2)
self.answer_text = []
|
def __init__(self, id, question_text, ground_truth, model_names):
self.id = id
def remove_articles(text):
self.question_text = re.sub('\\b(a|an|the)\\b', ' ', text)
def white_space_fix(text):
self.question_text = ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
self.question_text = ''.join((ch for ch in text if ch not in exclude))
def lower(text):
self.question_text = text.lower()
self.question_text = white_space_fix(remove_articles(remove_punc(lower(question_text))))
self.question_head_ngram = []
self.question_tokens = nltk.word_tokenize(self.question_text)
for nc in range(3):
self.question_head_ngram.append(' '.join(self.question_tokens[0:nc]))
self.ground_truth = ground_truth
self.model_names = model_names
self.em = np.zeros(2)
self.f1 = np.zeros(2)
self.answer_text = []
|
bi-att-flow
|
positive
|
def get_inverse_ohe_min_max_normalized_data(self, transformed_data):
"""Transforms one-hot-encoded and min-max normalized data into raw user-fed data format. transformed_data
should be a dataframe or an array"""
<DeepExtract>
out = transformed_data.copy()
for feat in self.categorical_feature_names:
cat_col_values = []
for val in list(self.data_df[feat].unique()):
cat_col_values.append(feat + prefix_sep + str(val))
match_cols = [c for c in transformed_data.columns if c in cat_col_values]
(cols, labs) = [[c.replace(x, '') for c in match_cols] for x in ['', feat + prefix_sep]]
out[feat] = pd.Categorical(np.array(labs)[np.argmax(transformed_data[cols].values, axis=1)])
out.drop(cols, axis=1, inplace=True)
raw_data = out
</DeepExtract>
<DeepExtract>
if len(raw_data) == 0:
raw_data = raw_data
result = raw_data.copy()
for feature_name in self.continuous_feature_names:
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
result[feature_name] = raw_data[feature_name] * (max_value - min_value) + min_value
raw_data = result
</DeepExtract>
<DeepExtract>
precisions_dict = defaultdict(int)
precisions = [0] * len(self.feature_names)
for (ix, col) in enumerate(self.continuous_feature_names):
if self.continuous_features_precision is not None and col in self.continuous_features_precision:
precisions[ix] = self.continuous_features_precision[col]
precisions_dict[col] = self.continuous_features_precision[col]
elif self.data_df[col].dtype == np.float32 or self.data_df[col].dtype == np.float64:
modes = self.data_df[col].mode()
maxp = len(str(modes[0]).split('.')[1])
for mx in range(len(modes)):
prec = len(str(modes[mx]).split('.')[1])
if prec > maxp:
maxp = prec
precisions[ix] = maxp
precisions_dict[col] = maxp
if output_type == 'list':
precisions = precisions
elif output_type == 'dict':
precisions = precisions_dict
</DeepExtract>
for (ix, feature) in enumerate(self.continuous_feature_names):
raw_data[feature] = raw_data[feature].astype(float).round(precisions[ix])
raw_data = raw_data[self.feature_names]
return raw_data
|
def get_inverse_ohe_min_max_normalized_data(self, transformed_data):
"""Transforms one-hot-encoded and min-max normalized data into raw user-fed data format. transformed_data
should be a dataframe or an array"""
out = transformed_data.copy()
for feat in self.categorical_feature_names:
cat_col_values = []
for val in list(self.data_df[feat].unique()):
cat_col_values.append(feat + prefix_sep + str(val))
match_cols = [c for c in transformed_data.columns if c in cat_col_values]
(cols, labs) = [[c.replace(x, '') for c in match_cols] for x in ['', feat + prefix_sep]]
out[feat] = pd.Categorical(np.array(labs)[np.argmax(transformed_data[cols].values, axis=1)])
out.drop(cols, axis=1, inplace=True)
raw_data = out
if len(raw_data) == 0:
raw_data = raw_data
result = raw_data.copy()
for feature_name in self.continuous_feature_names:
max_value = self.data_df[feature_name].max()
min_value = self.data_df[feature_name].min()
result[feature_name] = raw_data[feature_name] * (max_value - min_value) + min_value
raw_data = result
precisions_dict = defaultdict(int)
precisions = [0] * len(self.feature_names)
for (ix, col) in enumerate(self.continuous_feature_names):
if self.continuous_features_precision is not None and col in self.continuous_features_precision:
precisions[ix] = self.continuous_features_precision[col]
precisions_dict[col] = self.continuous_features_precision[col]
elif self.data_df[col].dtype == np.float32 or self.data_df[col].dtype == np.float64:
modes = self.data_df[col].mode()
maxp = len(str(modes[0]).split('.')[1])
for mx in range(len(modes)):
prec = len(str(modes[mx]).split('.')[1])
if prec > maxp:
maxp = prec
precisions[ix] = maxp
precisions_dict[col] = maxp
if output_type == 'list':
precisions = precisions
elif output_type == 'dict':
precisions = precisions_dict
for (ix, feature) in enumerate(self.continuous_feature_names):
raw_data[feature] = raw_data[feature].astype(float).round(precisions[ix])
raw_data = raw_data[self.feature_names]
return raw_data
|
DiCE
|
positive
|
def test_datagram_frame(self):
with client_and_server(client_options={'max_datagram_frame_size': 65536}, server_options={'max_datagram_frame_size': 65536}) as (client, server):
<DeepExtract>
event = client.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
self.assertEqual(event.alpn_protocol, None)
event = client.next_event()
self.assertEqual(type(event), events.HandshakeCompleted)
self.assertEqual(event.alpn_protocol, None)
self.assertEqual(event.early_data_accepted, False)
self.assertEqual(event.session_resumed, False)
for i in range(7):
self.assertEqual(type(client.next_event()), events.ConnectionIdIssued)
self.assertIsNone(client.next_event())
event = server.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
self.assertEqual(event.alpn_protocol, None)
event = server.next_event()
self.assertEqual(type(event), events.HandshakeCompleted)
self.assertEqual(event.alpn_protocol, None)
for i in range(7):
self.assertEqual(type(server.next_event()), events.ConnectionIdIssued)
self.assertIsNone(server.next_event())
</DeepExtract>
client.send_datagram_frame(b'hello')
self.assertEqual(transfer(client, server), 1)
event = server.next_event()
self.assertEqual(type(event), events.DatagramFrameReceived)
self.assertEqual(event.data, b'hello')
|
def test_datagram_frame(self):
with client_and_server(client_options={'max_datagram_frame_size': 65536}, server_options={'max_datagram_frame_size': 65536}) as (client, server):
event = client.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
self.assertEqual(event.alpn_protocol, None)
event = client.next_event()
self.assertEqual(type(event), events.HandshakeCompleted)
self.assertEqual(event.alpn_protocol, None)
self.assertEqual(event.early_data_accepted, False)
self.assertEqual(event.session_resumed, False)
for i in range(7):
self.assertEqual(type(client.next_event()), events.ConnectionIdIssued)
self.assertIsNone(client.next_event())
event = server.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
self.assertEqual(event.alpn_protocol, None)
event = server.next_event()
self.assertEqual(type(event), events.HandshakeCompleted)
self.assertEqual(event.alpn_protocol, None)
for i in range(7):
self.assertEqual(type(server.next_event()), events.ConnectionIdIssued)
self.assertIsNone(server.next_event())
client.send_datagram_frame(b'hello')
self.assertEqual(transfer(client, server), 1)
event = server.next_event()
self.assertEqual(type(event), events.DatagramFrameReceived)
self.assertEqual(event.data, b'hello')
|
aioquic
|
positive
|
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for (k, v) in state_dict.items():
<DeepExtract>
if isinstance(v, dict):
cpu_dict = OrderedDict()
for (k, v) in v.items():
cpu_dict[k] = convert_state_dict_type(v)
cpu_dict[k] = cpu_dict
elif isinstance(v, list):
cpu_dict[k] = [convert_state_dict_type(v) for v in v]
elif torch.is_tensor(v):
cpu_dict[k] = v.type(ttype)
else:
cpu_dict[k] = v
</DeepExtract>
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
|
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for (k, v) in state_dict.items():
if isinstance(v, dict):
cpu_dict = OrderedDict()
for (k, v) in v.items():
cpu_dict[k] = convert_state_dict_type(v)
cpu_dict[k] = cpu_dict
elif isinstance(v, list):
cpu_dict[k] = [convert_state_dict_type(v) for v in v]
elif torch.is_tensor(v):
cpu_dict[k] = v.type(ttype)
else:
cpu_dict[k] = v
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
|
Abstractive-Text-Summarization
|
positive
|
def paths(root):
res = []
<DeepExtract>
if not root.left and (not root.right):
res.append(str(root.val))
if root.left:
dfs(res, root.left, str(root.val) + '->' + str(root.left.val))
if root.right:
dfs(res, root.right, str(root.val) + '->' + str(root.right.val))
</DeepExtract>
return res
|
def paths(root):
res = []
if not root.left and (not root.right):
res.append(str(root.val))
if root.left:
dfs(res, root.left, str(root.val) + '->' + str(root.left.val))
if root.right:
dfs(res, root.right, str(root.val) + '->' + str(root.right.val))
return res
|
algorithms
|
positive
|
def source_to_records(mappings, source, source_wrapper=None, records_wrapper=None):
if source_wrapper:
source = source_wrapper(source)
<DeepExtract>
callback = partial(_row_cb, mappings)
</DeepExtract>
records = (callback(row) for row in source)
if records_wrapper:
records = records_wrapper(records)
return records
|
def source_to_records(mappings, source, source_wrapper=None, records_wrapper=None):
if source_wrapper:
source = source_wrapper(source)
callback = partial(_row_cb, mappings)
records = (callback(row) for row in source)
if records_wrapper:
records = records_wrapper(records)
return records
|
AlephNull
|
positive
|
def _get_base_counts(ref, alts, alphabet, illegal=('S', '*', '[', ']', '>', '<')):
"""Return a list of A,C,G and T frequencies in given position of the alignment.
******CCCCCcccccCccCCCCcccCcCccCcc^SC^]c
* - deletion
+INT / -INT - insertion
S - ??
[ or ] - ??
"""
dels = alts.count('*')
alts = alts.upper()
<DeepExtract>
for symbol in ('-', '+'):
baseNo = 0
while symbol in alts:
i = alts.index(symbol)
j = 1
digits = []
while alts[i + j].isdigit():
digits.append(alts[i + j])
j += 1
if digits:
baseNo = int(''.join(digits))
alts = alts[:i] + alts[i + baseNo + len(digits) + 1:]
alts = alts
</DeepExtract>
base_counts = []
for base in alphabet:
if base != ref:
base_counts.append(alts.count(base))
else:
base_counts.append(alts.count('.') + alts.count(','))
return (base_counts, dels)
|
def _get_base_counts(ref, alts, alphabet, illegal=('S', '*', '[', ']', '>', '<')):
"""Return a list of A,C,G and T frequencies in given position of the alignment.
******CCCCCcccccCccCCCCcccCcCccCcc^SC^]c
* - deletion
+INT / -INT - insertion
S - ??
[ or ] - ??
"""
dels = alts.count('*')
alts = alts.upper()
for symbol in ('-', '+'):
baseNo = 0
while symbol in alts:
i = alts.index(symbol)
j = 1
digits = []
while alts[i + j].isdigit():
digits.append(alts[i + j])
j += 1
if digits:
baseNo = int(''.join(digits))
alts = alts[:i] + alts[i + baseNo + len(digits) + 1:]
alts = alts
base_counts = []
for base in alphabet:
if base != ref:
base_counts.append(alts.count(base))
else:
base_counts.append(alts.count('.') + alts.count(','))
return (base_counts, dels)
|
bin
|
positive
|
def test_factory_sets_identifiers(self):
model = {'identifiers': [{'name': 'QueueUrl'}, {'name': 'ReceiptHandle'}]}
<DeepExtract>
if model is None:
model = {}
if resource_json_definitions is None:
resource_json_definitions = {}
service_context = ServiceContext(service_name='test', resource_json_definitions=resource_json_definitions, service_model=service_model, service_waiter_model=None)
MessageResource = self.factory.load_from_definition(resource_name='Message', single_resource_json_definition=model, service_context=service_context)
</DeepExtract>
assert 'queue_url' in MessageResource.meta.identifiers
assert 'receipt_handle' in MessageResource.meta.identifiers
|
def test_factory_sets_identifiers(self):
model = {'identifiers': [{'name': 'QueueUrl'}, {'name': 'ReceiptHandle'}]}
if model is None:
model = {}
if resource_json_definitions is None:
resource_json_definitions = {}
service_context = ServiceContext(service_name='test', resource_json_definitions=resource_json_definitions, service_model=service_model, service_waiter_model=None)
MessageResource = self.factory.load_from_definition(resource_name='Message', single_resource_json_definition=model, service_context=service_context)
assert 'queue_url' in MessageResource.meta.identifiers
assert 'receipt_handle' in MessageResource.meta.identifiers
|
boto3
|
positive
|
def parents(ds, tips):
if not isinstance(ds, tuple):
ds = (ds,)
for p in ds:
pds = Dataset(p)
if pds.parent:
<DeepExtract>
if not isinstance(pds.parent, tuple):
pds.parent = (pds.parent,)
for p in pds.parent:
pds = Dataset(p)
if pds.parent:
parents(pds.parent, tips)
else:
tips.add(pds)
return tips
</DeepExtract>
else:
tips.add(pds)
return tips
|
def parents(ds, tips):
if not isinstance(ds, tuple):
ds = (ds,)
for p in ds:
pds = Dataset(p)
if pds.parent:
if not isinstance(pds.parent, tuple):
pds.parent = (pds.parent,)
for p in pds.parent:
pds = Dataset(p)
if pds.parent:
parents(pds.parent, tips)
else:
tips.add(pds)
return tips
else:
tips.add(pds)
return tips
|
accelerator
|
positive
|
def execute(self, context):
obj = bpy.context.active_object
bm = bmesh.from_edit_mesh(obj.data)
<DeepExtract>
context = bpy.context
wm = context.window_manager
wm.progress_begin(0, 100)
def edge_is_intersecting(e2, bm):
for e1 in bm.edges:
edge_visible = not e1.hide
edge_not_same = e1.verts[0] not in e2 and e1.verts[1] not in e2
if edge_visible and edge_not_same:
i = geometry.intersect_line_line_2d(e1.verts[0].co.xz, e1.verts[1].co.xz, e2[0].co.xz, e2[1].co.xz)
if i != None:
return True
return False
def get_linked_verts(vert):
linked_verts = [vert]
outer_verts = [vert]
while len(outer_verts) > 0:
new_verts = []
for outer_vert in outer_verts:
for edge in outer_vert.link_edges:
other_vert = edge.other_vert(outer_vert)
if other_vert not in linked_verts:
linked_verts.append(other_vert)
new_verts.append(other_vert)
outer_verts = new_verts
return linked_verts
faces = []
for face in bm.faces:
if not face.hide:
face_editable = True
for vert in face.verts:
if vert.hide:
face_editable = False
break
if face_editable:
faces.append(face)
bmesh.ops.delete(bm, geom=faces, context='FACES_ONLY')
wm.progress_update(30)
(edges_len_average, shortest_edge) = get_average_edge_length(bm, context.active_object)
verts = []
for edge in bm.edges:
if not edge.hide:
if edge.calc_length() < edges_len_average * 0.01:
if edge.verts[0] not in verts:
verts.append(edge.verts[0])
if edge.verts[1] not in verts:
verts.append(edge.verts[1])
bmesh.ops.remove_doubles(bm, verts=verts, dist=0.01)
intersection_points = []
to_be_deleted = []
for e1 in bm.edges:
for e2 in bm.edges:
edges_not_visible = not e1.hide and (not e2.hide)
edges_share_points = e1.verts[0].co.xz in [e2.verts[0].co.xz, e2.verts[1].co.xz] or e1.verts[1].co.xz in [e2.verts[0].co.xz, e2.verts[1].co.xz]
if e1 != e2 and (not edges_share_points) and edges_not_visible:
i = geometry.intersect_line_line_2d(e1.verts[0].co.xz, e1.verts[1].co.xz, e2.verts[0].co.xz, e2.verts[1].co.xz)
if i != None:
i_3d = Vector((i[0], e1.verts[0].co[1], i[1]))
if e1 not in to_be_deleted:
to_be_deleted.append(e1)
if e2 not in to_be_deleted:
to_be_deleted.append(e2)
if i_3d not in intersection_points:
intersection_points.append(i_3d)
for edge in to_be_deleted:
bm.edges.remove(edge)
for p in intersection_points:
bm.verts.new(p)
def get_vertex_loops(bm):
all_verts = []
vert_loops = []
for vert in bm.verts:
if not vert.hide:
if vert not in all_verts:
loop_list = get_linked_verts(vert)
vert_loops.append(loop_list)
all_verts += loop_list
return vert_loops
connected_edges = []
for vert in bm.verts:
if not vert.hide and len(vert.link_edges) == 0:
distance = 1000000000000000
vert_a = None
vert_b = None
for vert2 in bm.verts:
edge_center = (vert.co + vert2.co) * 0.5
if vert != vert2 and vert2.co != vert.co:
if (vert.co - vert2.co).magnitude < distance:
distance = (vert.co - vert2.co).magnitude
vert_a = vert
vert_b = vert2
if vert_a != None and vert_b != None:
edge_center = (vert_a.co + vert_b.co) * 0.5
if edge_center not in connected_edges:
bm.edges.new([vert_a, vert_b])
connected_edges.append(edge_center)
vert_loops = get_vertex_loops(bm)
connected_edges = []
if len(vert_loops) > 0:
for (i, loop) in enumerate(vert_loops):
for vert in loop:
exclude_verts = [vert.link_edges[0].other_vert(vert), vert] if len(vert.link_edges) > 0 else [vert]
distance = 1000000000000000
vert_a = None
vert_b = None
if len(vert.link_edges) == 1:
for (j, loop_next) in enumerate(vert_loops):
for vert2 in loop_next:
if vert2 != vert:
edge1 = (vert.co - vert.link_edges[0].other_vert(vert).co).normalized()
edge2 = (vert.co - vert2.co).normalized()
if edge1.length > 0 and edge2.length > 0:
angle = degrees(edge1.angle(edge2))
if (vert.co - vert2.co).magnitude < distance and vert2 not in exclude_verts and (abs(angle) > 30) and (not edge_is_intersecting([vert, vert2], bm)):
distance = (vert.co - vert2.co).magnitude
vert_a = vert
vert_b = vert2
if vert_a != None and vert_b != None:
edge_center = (vert_a.co + vert_b.co) * 0.5
if edge_center not in connected_edges:
bm.edges.new([vert_a, vert_b])
connected_edges.append(edge_center)
wm.progress_update(100)
vert_loops = get_vertex_loops(bm)
connected_edges = []
if len(vert_loops) > 1:
for (i, loop) in enumerate(vert_loops):
distance = 1000000000000000
vert_a = None
vert_b = None
for (j, loop_next) in enumerate(vert_loops):
if j != i:
for vert in loop:
for vert2 in loop_next:
if (vert.co - vert2.co).magnitude < distance:
distance = (vert.co - vert2.co).magnitude
vert_a = vert
vert_b = vert2
if vert_a != None and vert_b != None:
edge_center = (vert_a.co + vert_b.co) * 0.5
if edge_center not in connected_edges:
bm.edges.new([vert_a, vert_b])
connected_edges.append(edge_center)
edges_center = []
for edge in bm.edges:
edges_center.append((edge.verts[0].co + edge.verts[1].co) * 0.5)
edges = []
for edge in bm.edges:
if not edge.hide:
edges.append(edge)
bmesh.ops.triangle_fill(bm, use_beauty=True, use_dissolve=True, edges=edges)
delete_edges = []
for edge in bm.edges:
if not edge.hide and (not edge.verts[0].hide) and (not edge.verts[1].hide):
edge_center = (edge.verts[0].co + edge.verts[1].co) * 0.5
if edge_center not in edges_center and edge.is_boundary:
delete_edges.append(edge)
bmesh.ops.delete(bm, geom=delete_edges, context='EDGES')
delete_edges = []
for edge in bm.edges:
if not edge.hide and edge.is_wire and (not edge.verts[0].hide) and (not edge.verts[1].hide):
delete_edges.append(edge)
bmesh.ops.delete(bm, geom=delete_edges, context='EDGES')
faces = []
for face in bm.faces:
if not face.hide:
face_editable = True
for vert in face.verts:
if vert.hide:
face_editable = False
break
if face_editable:
faces.append(face)
bmesh.ops.triangulate(bm, faces=faces)
bmesh.update_edit_mesh(obj.data)
wm.progress_end()
</DeepExtract>
bpy.ops.coa_tools.reproject_sprite_texture()
return {'FINISHED'}
|
def execute(self, context):
obj = bpy.context.active_object
bm = bmesh.from_edit_mesh(obj.data)
context = bpy.context
wm = context.window_manager
wm.progress_begin(0, 100)
def edge_is_intersecting(e2, bm):
for e1 in bm.edges:
edge_visible = not e1.hide
edge_not_same = e1.verts[0] not in e2 and e1.verts[1] not in e2
if edge_visible and edge_not_same:
i = geometry.intersect_line_line_2d(e1.verts[0].co.xz, e1.verts[1].co.xz, e2[0].co.xz, e2[1].co.xz)
if i != None:
return True
return False
def get_linked_verts(vert):
linked_verts = [vert]
outer_verts = [vert]
while len(outer_verts) > 0:
new_verts = []
for outer_vert in outer_verts:
for edge in outer_vert.link_edges:
other_vert = edge.other_vert(outer_vert)
if other_vert not in linked_verts:
linked_verts.append(other_vert)
new_verts.append(other_vert)
outer_verts = new_verts
return linked_verts
faces = []
for face in bm.faces:
if not face.hide:
face_editable = True
for vert in face.verts:
if vert.hide:
face_editable = False
break
if face_editable:
faces.append(face)
bmesh.ops.delete(bm, geom=faces, context='FACES_ONLY')
wm.progress_update(30)
(edges_len_average, shortest_edge) = get_average_edge_length(bm, context.active_object)
verts = []
for edge in bm.edges:
if not edge.hide:
if edge.calc_length() < edges_len_average * 0.01:
if edge.verts[0] not in verts:
verts.append(edge.verts[0])
if edge.verts[1] not in verts:
verts.append(edge.verts[1])
bmesh.ops.remove_doubles(bm, verts=verts, dist=0.01)
intersection_points = []
to_be_deleted = []
for e1 in bm.edges:
for e2 in bm.edges:
edges_not_visible = not e1.hide and (not e2.hide)
edges_share_points = e1.verts[0].co.xz in [e2.verts[0].co.xz, e2.verts[1].co.xz] or e1.verts[1].co.xz in [e2.verts[0].co.xz, e2.verts[1].co.xz]
if e1 != e2 and (not edges_share_points) and edges_not_visible:
i = geometry.intersect_line_line_2d(e1.verts[0].co.xz, e1.verts[1].co.xz, e2.verts[0].co.xz, e2.verts[1].co.xz)
if i != None:
i_3d = Vector((i[0], e1.verts[0].co[1], i[1]))
if e1 not in to_be_deleted:
to_be_deleted.append(e1)
if e2 not in to_be_deleted:
to_be_deleted.append(e2)
if i_3d not in intersection_points:
intersection_points.append(i_3d)
for edge in to_be_deleted:
bm.edges.remove(edge)
for p in intersection_points:
bm.verts.new(p)
def get_vertex_loops(bm):
all_verts = []
vert_loops = []
for vert in bm.verts:
if not vert.hide:
if vert not in all_verts:
loop_list = get_linked_verts(vert)
vert_loops.append(loop_list)
all_verts += loop_list
return vert_loops
connected_edges = []
for vert in bm.verts:
if not vert.hide and len(vert.link_edges) == 0:
distance = 1000000000000000
vert_a = None
vert_b = None
for vert2 in bm.verts:
edge_center = (vert.co + vert2.co) * 0.5
if vert != vert2 and vert2.co != vert.co:
if (vert.co - vert2.co).magnitude < distance:
distance = (vert.co - vert2.co).magnitude
vert_a = vert
vert_b = vert2
if vert_a != None and vert_b != None:
edge_center = (vert_a.co + vert_b.co) * 0.5
if edge_center not in connected_edges:
bm.edges.new([vert_a, vert_b])
connected_edges.append(edge_center)
vert_loops = get_vertex_loops(bm)
connected_edges = []
if len(vert_loops) > 0:
for (i, loop) in enumerate(vert_loops):
for vert in loop:
exclude_verts = [vert.link_edges[0].other_vert(vert), vert] if len(vert.link_edges) > 0 else [vert]
distance = 1000000000000000
vert_a = None
vert_b = None
if len(vert.link_edges) == 1:
for (j, loop_next) in enumerate(vert_loops):
for vert2 in loop_next:
if vert2 != vert:
edge1 = (vert.co - vert.link_edges[0].other_vert(vert).co).normalized()
edge2 = (vert.co - vert2.co).normalized()
if edge1.length > 0 and edge2.length > 0:
angle = degrees(edge1.angle(edge2))
if (vert.co - vert2.co).magnitude < distance and vert2 not in exclude_verts and (abs(angle) > 30) and (not edge_is_intersecting([vert, vert2], bm)):
distance = (vert.co - vert2.co).magnitude
vert_a = vert
vert_b = vert2
if vert_a != None and vert_b != None:
edge_center = (vert_a.co + vert_b.co) * 0.5
if edge_center not in connected_edges:
bm.edges.new([vert_a, vert_b])
connected_edges.append(edge_center)
wm.progress_update(100)
vert_loops = get_vertex_loops(bm)
connected_edges = []
if len(vert_loops) > 1:
for (i, loop) in enumerate(vert_loops):
distance = 1000000000000000
vert_a = None
vert_b = None
for (j, loop_next) in enumerate(vert_loops):
if j != i:
for vert in loop:
for vert2 in loop_next:
if (vert.co - vert2.co).magnitude < distance:
distance = (vert.co - vert2.co).magnitude
vert_a = vert
vert_b = vert2
if vert_a != None and vert_b != None:
edge_center = (vert_a.co + vert_b.co) * 0.5
if edge_center not in connected_edges:
bm.edges.new([vert_a, vert_b])
connected_edges.append(edge_center)
edges_center = []
for edge in bm.edges:
edges_center.append((edge.verts[0].co + edge.verts[1].co) * 0.5)
edges = []
for edge in bm.edges:
if not edge.hide:
edges.append(edge)
bmesh.ops.triangle_fill(bm, use_beauty=True, use_dissolve=True, edges=edges)
delete_edges = []
for edge in bm.edges:
if not edge.hide and (not edge.verts[0].hide) and (not edge.verts[1].hide):
edge_center = (edge.verts[0].co + edge.verts[1].co) * 0.5
if edge_center not in edges_center and edge.is_boundary:
delete_edges.append(edge)
bmesh.ops.delete(bm, geom=delete_edges, context='EDGES')
delete_edges = []
for edge in bm.edges:
if not edge.hide and edge.is_wire and (not edge.verts[0].hide) and (not edge.verts[1].hide):
delete_edges.append(edge)
bmesh.ops.delete(bm, geom=delete_edges, context='EDGES')
faces = []
for face in bm.faces:
if not face.hide:
face_editable = True
for vert in face.verts:
if vert.hide:
face_editable = False
break
if face_editable:
faces.append(face)
bmesh.ops.triangulate(bm, faces=faces)
bmesh.update_edit_mesh(obj.data)
wm.progress_end()
bpy.ops.coa_tools.reproject_sprite_texture()
return {'FINISHED'}
|
coa_tools
|
positive
|
def test_break_lock_with_log_info_calls_borg_with_info_parameter():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
<DeepExtract>
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(('borg', 'break-lock', '--info', 'repo'), borg_local_path='borg', extra_environment=None).once()
</DeepExtract>
insert_logging_mock(logging.INFO)
module.break_lock(repository_path='repo', storage_config={}, local_borg_version='1.2.3')
|
def test_break_lock_with_log_info_calls_borg_with_info_parameter():
flexmock(module.flags).should_receive('make_repository_flags').and_return(('repo',))
flexmock(module.environment).should_receive('make_environment')
flexmock(module).should_receive('execute_command').with_args(('borg', 'break-lock', '--info', 'repo'), borg_local_path='borg', extra_environment=None).once()
insert_logging_mock(logging.INFO)
module.break_lock(repository_path='repo', storage_config={}, local_borg_version='1.2.3')
|
borgmatic
|
positive
|
def execute_FuncCall(self, f, mem):
name = f.args[0].name
try:
<DeepExtract>
fnc = self.prog.getfnc(name)
</DeepExtract>
except KeyError:
raise RuntimeErr("Unknown function: '%s'" % (name,))
args = [self.execute(x, mem) for x in f.args[1:]]
newmem = {VAR_IN: mem.get(VAR_IN, UndefValue()), VAR_OUT: mem.get(VAR_OUT, UndefValue())}
if len(fnc.params) != len(args):
raise RuntimeErr('Wrong number of args: expected %s, got %s' % (len(fnc.params), len(args)))
for ((var, _), arg) in zip(fnc.params, args):
newmem[var] = deepcopy(arg)
oldfnc = self.fnc
oldloc = self.loc
<DeepExtract>
if self.timeout and self.starttime and (time.time() - self.starttime > self.timeout):
raise RuntimeErr('Timeout (%.3f)' % (round(time.time() - self.starttime, 3),))
name = fnc.__class__.__name__
meth = getattr(self, 'execute_%s' % (name,))
try:
trace = meth(fnc, newmem)
except (OverflowError, ZeroDivisionError, AttributeError, TypeError, IndexError, RuntimeError, ValueError, KeyError) as ex:
raise RuntimeErr("Exception '%s' on execution of '%s'" % (ex, fnc))
</DeepExtract>
self.fnc = oldfnc
self.loc = oldloc
return trace[-1][2].get(prime(VAR_RET), self.DEFAULT_RETURN)
|
def execute_FuncCall(self, f, mem):
name = f.args[0].name
try:
fnc = self.prog.getfnc(name)
except KeyError:
raise RuntimeErr("Unknown function: '%s'" % (name,))
args = [self.execute(x, mem) for x in f.args[1:]]
newmem = {VAR_IN: mem.get(VAR_IN, UndefValue()), VAR_OUT: mem.get(VAR_OUT, UndefValue())}
if len(fnc.params) != len(args):
raise RuntimeErr('Wrong number of args: expected %s, got %s' % (len(fnc.params), len(args)))
for ((var, _), arg) in zip(fnc.params, args):
newmem[var] = deepcopy(arg)
oldfnc = self.fnc
oldloc = self.loc
if self.timeout and self.starttime and (time.time() - self.starttime > self.timeout):
raise RuntimeErr('Timeout (%.3f)' % (round(time.time() - self.starttime, 3),))
name = fnc.__class__.__name__
meth = getattr(self, 'execute_%s' % (name,))
try:
trace = meth(fnc, newmem)
except (OverflowError, ZeroDivisionError, AttributeError, TypeError, IndexError, RuntimeError, ValueError, KeyError) as ex:
raise RuntimeErr("Exception '%s' on execution of '%s'" % (ex, fnc))
self.fnc = oldfnc
self.loc = oldloc
return trace[-1][2].get(prime(VAR_RET), self.DEFAULT_RETURN)
|
clara
|
positive
|
def _teardown_features(self):
super(QtGraphicsView, self)._teardown_features()
features = self._extra_features
if features & GraphicFeature.MouseEvent:
<DeepExtract>
widget = self.widget
del widget.mousePressEvent
del widget.mouseMoveEvent
del widget.mouseReleaseEvent
</DeepExtract>
if features & GraphicFeature.WheelEvent:
<DeepExtract>
widget = self.widget
del widget.wheelEvent
</DeepExtract>
<DeepExtract>
widget = self.widget
del widget.resizeEvent
</DeepExtract>
|
def _teardown_features(self):
super(QtGraphicsView, self)._teardown_features()
features = self._extra_features
if features & GraphicFeature.MouseEvent:
widget = self.widget
del widget.mousePressEvent
del widget.mouseMoveEvent
del widget.mouseReleaseEvent
if features & GraphicFeature.WheelEvent:
widget = self.widget
del widget.wheelEvent
widget = self.widget
del widget.resizeEvent
</DeepExtract>
|
enamlx
|
positive
|
def create_cluster_from_parameters(cluster, template):
"""
Function to create the Cluster from the MacroParameters
:param ecs_composex.elasticache.elasticache_stack.CacheCluster cluster:
:param template:
:return:
"""
required_keys = ['Engine', 'EngineVersion']
if not cluster.properties and (not all((key in required_keys for key in cluster.parameters))):
raise KeyError('When using MacroParameters only, you must specify at least', required_keys)
props = {'CacheNodeType': 'cache.t3.small' if not keyisset('CacheNodeType', cluster.parameters) else cluster.parameters['CacheNodeType'], 'Engine': cluster.parameters['Engine'], 'EngineVersion': cluster.parameters['EngineVersion'], 'NumCacheNodes': 1, 'VpcSecurityGroupIds': [GetAtt(cluster.db_sg, 'GroupId')], 'CacheSubnetGroupName': Ref(cluster.db_subnet_group), 'Tags': Tags(Name=cluster.logical_name, ComposeName=cluster.name)}
if keyisset('ParameterGroup', cluster.parameters):
<DeepExtract>
props = import_record_properties(cluster.parameters['ParameterGroup'], ParameterGroup)
cluster.parameter_group = ParameterGroup(f'{cluster.logical_name}ParameterGroup', **props)
</DeepExtract>
template.add_resource(cluster.parameter_group)
cluster.cfn_resource = CacheCluster(cluster.logical_name, **props)
template.add_resource(cluster.cfn_resource)
|
def create_cluster_from_parameters(cluster, template):
"""
Function to create the Cluster from the MacroParameters
:param ecs_composex.elasticache.elasticache_stack.CacheCluster cluster:
:param template:
:return:
"""
required_keys = ['Engine', 'EngineVersion']
if not cluster.properties and (not all((key in required_keys for key in cluster.parameters))):
raise KeyError('When using MacroParameters only, you must specify at least', required_keys)
props = {'CacheNodeType': 'cache.t3.small' if not keyisset('CacheNodeType', cluster.parameters) else cluster.parameters['CacheNodeType'], 'Engine': cluster.parameters['Engine'], 'EngineVersion': cluster.parameters['EngineVersion'], 'NumCacheNodes': 1, 'VpcSecurityGroupIds': [GetAtt(cluster.db_sg, 'GroupId')], 'CacheSubnetGroupName': Ref(cluster.db_subnet_group), 'Tags': Tags(Name=cluster.logical_name, ComposeName=cluster.name)}
if keyisset('ParameterGroup', cluster.parameters):
props = import_record_properties(cluster.parameters['ParameterGroup'], ParameterGroup)
cluster.parameter_group = ParameterGroup(f'{cluster.logical_name}ParameterGroup', **props)
template.add_resource(cluster.parameter_group)
cluster.cfn_resource = CacheCluster(cluster.logical_name, **props)
template.add_resource(cluster.cfn_resource)
|
ecs_composex
|
positive
|
def compileAlias(self, sym):
""" Compiles the given symbol as an alias. """
<DeepExtract>
if sym in self.aliases:
alias = self.aliases[sym]
alias = None
</DeepExtract>
if alias is None:
raise CompilerException('Unknown Local ' + str(sym))
return alias.compile(self)
|
def compileAlias(self, sym):
""" Compiles the given symbol as an alias. """
if sym in self.aliases:
alias = self.aliases[sym]
alias = None
if alias is None:
raise CompilerException('Unknown Local ' + str(sym))
return alias.compile(self)
|
clojure-py
|
positive
|
def get_conn_parent_category_not_linked_Ctx(parse_dict, DocID, sent_index, conn_indices):
<DeepExtract>
C_String = ' '.join([parse_dict[DocID]['sentences'][sent_index]['words'][word_token][0] for word_token in conn_indices])
conn_name = C_String
</DeepExtract>
parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip()
syntax_tree = Syntax_tree(parse_tree)
if syntax_tree.tree == None:
parent_categoryCtx = 'NONE_TREE'
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
<DeepExtract>
if parent_category_node == None:
parent_categoryCtx = 'None'
Ctx = []
Ctx.append(parent_category_node.name)
if parent_category_node.up == None:
Ctx.append('NULL')
else:
Ctx.append(parent_category_node.up.name)
left_siblings = syntax_tree.get_left_siblings(parent_category_node)
if left_siblings == []:
Ctx.append('NULL')
else:
Ctx.append(left_siblings[-1].name)
right_siblings = syntax_tree.get_right_siblings(parent_category_node)
if right_siblings == []:
Ctx.append('NULL')
else:
Ctx.append(right_siblings[0].name)
nodeCtx = '-'.join(Ctx)
parent_categoryCtx = nodeCtx
</DeepExtract>
conn_parent_categoryCtx = '%s|%s' % (conn_name, parent_categoryCtx)
return conn_parent_categoryCtx
|
def get_conn_parent_category_not_linked_Ctx(parse_dict, DocID, sent_index, conn_indices):
C_String = ' '.join([parse_dict[DocID]['sentences'][sent_index]['words'][word_token][0] for word_token in conn_indices])
conn_name = C_String
parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip()
syntax_tree = Syntax_tree(parse_tree)
if syntax_tree.tree == None:
parent_categoryCtx = 'NONE_TREE'
else:
parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices)
if parent_category_node == None:
parent_categoryCtx = 'None'
Ctx = []
Ctx.append(parent_category_node.name)
if parent_category_node.up == None:
Ctx.append('NULL')
else:
Ctx.append(parent_category_node.up.name)
left_siblings = syntax_tree.get_left_siblings(parent_category_node)
if left_siblings == []:
Ctx.append('NULL')
else:
Ctx.append(left_siblings[-1].name)
right_siblings = syntax_tree.get_right_siblings(parent_category_node)
if right_siblings == []:
Ctx.append('NULL')
else:
Ctx.append(right_siblings[0].name)
nodeCtx = '-'.join(Ctx)
parent_categoryCtx = nodeCtx
conn_parent_categoryCtx = '%s|%s' % (conn_name, parent_categoryCtx)
return conn_parent_categoryCtx
|
conll2015_discourse
|
positive
|
def xml2dict(string):
"""Convert and XML string into nested dicts
The lowest element will be a Field object
"""
root = ET.fromstring(string)
data = {}
def _recurse(elem):
data = {}
for subelem in elem:
if hasattr(subelem, 'text') and subelem.text.strip():
field = Field(subelem.text, subelem.attrib)
if subelem.tag not in data:
data[subelem.tag] = field
elif not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], field]
else:
data[subelem.tag].append(field)
elif subelem.tag in data:
if not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], _recurse(subelem)]
else:
data[subelem.tag].append(_recurse(subelem))
else:
<DeepExtract>
data = {}
for subelem in subelem:
if hasattr(subelem, 'text') and subelem.text.strip():
field = Field(subelem.text, subelem.attrib)
if subelem.tag not in data:
data[subelem.tag] = field
elif not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], field]
else:
data[subelem.tag].append(field)
elif subelem.tag in data:
if not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], _recurse(subelem)]
else:
data[subelem.tag].append(_recurse(subelem))
else:
data[subelem.tag] = _recurse(subelem)
data[subelem.tag] = data
</DeepExtract>
return data
return _recurse(root)
|
def xml2dict(string):
"""Convert and XML string into nested dicts
The lowest element will be a Field object
"""
root = ET.fromstring(string)
data = {}
def _recurse(elem):
data = {}
for subelem in elem:
if hasattr(subelem, 'text') and subelem.text.strip():
field = Field(subelem.text, subelem.attrib)
if subelem.tag not in data:
data[subelem.tag] = field
elif not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], field]
else:
data[subelem.tag].append(field)
elif subelem.tag in data:
if not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], _recurse(subelem)]
else:
data[subelem.tag].append(_recurse(subelem))
else:
data = {}
for subelem in subelem:
if hasattr(subelem, 'text') and subelem.text.strip():
field = Field(subelem.text, subelem.attrib)
if subelem.tag not in data:
data[subelem.tag] = field
elif not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], field]
else:
data[subelem.tag].append(field)
elif subelem.tag in data:
if not isinstance(data[subelem.tag], list):
data[subelem.tag] = [data[subelem.tag], _recurse(subelem)]
else:
data[subelem.tag].append(_recurse(subelem))
else:
data[subelem.tag] = _recurse(subelem)
data[subelem.tag] = data
return data
return _recurse(root)
|
beyond
|
positive
|
def cond_log_prob(self, inputs: Union[str, List[str]], targets: Union[List[str], List[List[str]]], batch_size: int=64, absolute_normalization: Optional[bool]=False) -> Union[List[float], List[List[float]]]:
"""Computes conditional log probabilities of targets given inputs.
Args:
`inputs`: A single string input or a list of string inputs.
`targets`: Possible string outputs for each input. If input is a
string, this is a list `[t_1, t_2, ..., t_n]` of possible string
outputs. If input is a list of strings, then this is a nested
list `[[t_1, t_2, ..., t_n], ...]` with length equal to `len(inputs)`.
`absolute_normalization`: When True, the function returns the log
probability of unconstrained generation or the target sequence. When
False (default), log probabilities are normalized so that the probabilities
of generating `targets` sum to 1. Note that setting `absolute_normalization`
to True restricts the class of models that can be evaluated to those that
can assign absolute probabilities to sequences.
Returns:
If a single string input is provided, returns a list of
log-probabilities `[lp_1, lp_2, ..., lp_n]` predicted by the model,
where `lp_i = log(prob(t_i | input)` is the conditional log-prob
to generate target `t_i` given input. If a list of string inputs
was provided, returns a list of such elements of the form
`[[lp_1, lp_2, ..., lp_n], ...]`, where each element contains the
log-probabilities for the corresponding input and targets.
In this case, the length of the returned list is `len(input)`.
"""
if isinstance(inputs, str):
input_list = [inputs]
target_list = [targets]
else:
input_list = inputs
target_list = targets
<DeepExtract>
flat_idx = []
flat_inputs = []
flat_choices = []
for (example_id, (example_input, choices)) in enumerate(zip(input_list, target_list)):
for (choice_id, choice) in enumerate(choices):
flat_idx.append((example_id, choice_id))
flat_inputs.append(example_input)
flat_choices.append(choice)
(flat_idx, flat_inputs, flat_choices) = (flat_idx, flat_inputs, flat_choices)
</DeepExtract>
num_examples = len(flat_idx)
flat_scores = []
for idx in range(0, num_examples, batch_size):
batch_idx = flat_idx[idx:min(idx + batch_size, num_examples)]
batch_inputs = flat_inputs[idx:min(idx + batch_size, num_examples)]
batch_choices = flat_choices[idx:min(idx + batch_size, num_examples)]
batch_scores = self._model.score(batch_inputs, batch_choices)
flat_scores += batch_scores
scores = [[] for _ in range(len(input_list))]
for (idx, score) in zip(flat_idx, flat_scores):
if score == 0:
logging.warning('Found score identical to zero. Probably from empty target. Setting score to -inf.')
scores[idx[0]].append(-np.inf)
else:
scores[idx[0]].append(score)
if not absolute_normalization:
scores = [list(score_row - scipy.special.logsumexp(score_row)) for score_row in scores]
if isinstance(inputs, str):
scores = scores[0]
return scores
|
def cond_log_prob(self, inputs: Union[str, List[str]], targets: Union[List[str], List[List[str]]], batch_size: int=64, absolute_normalization: Optional[bool]=False) -> Union[List[float], List[List[float]]]:
"""Computes conditional log probabilities of targets given inputs.
Args:
`inputs`: A single string input or a list of string inputs.
`targets`: Possible string outputs for each input. If input is a
string, this is a list `[t_1, t_2, ..., t_n]` of possible string
outputs. If input is a list of strings, then this is a nested
list `[[t_1, t_2, ..., t_n], ...]` with length equal to `len(inputs)`.
`absolute_normalization`: When True, the function returns the log
probability of unconstrained generation or the target sequence. When
False (default), log probabilities are normalized so that the probabilities
of generating `targets` sum to 1. Note that setting `absolute_normalization`
to True restricts the class of models that can be evaluated to those that
can assign absolute probabilities to sequences.
Returns:
If a single string input is provided, returns a list of
log-probabilities `[lp_1, lp_2, ..., lp_n]` predicted by the model,
where `lp_i = log(prob(t_i | input)` is the conditional log-prob
to generate target `t_i` given input. If a list of string inputs
was provided, returns a list of such elements of the form
`[[lp_1, lp_2, ..., lp_n], ...]`, where each element contains the
log-probabilities for the corresponding input and targets.
In this case, the length of the returned list is `len(input)`.
"""
if isinstance(inputs, str):
input_list = [inputs]
target_list = [targets]
else:
input_list = inputs
target_list = targets
flat_idx = []
flat_inputs = []
flat_choices = []
for (example_id, (example_input, choices)) in enumerate(zip(input_list, target_list)):
for (choice_id, choice) in enumerate(choices):
flat_idx.append((example_id, choice_id))
flat_inputs.append(example_input)
flat_choices.append(choice)
(flat_idx, flat_inputs, flat_choices) = (flat_idx, flat_inputs, flat_choices)
num_examples = len(flat_idx)
flat_scores = []
for idx in range(0, num_examples, batch_size):
batch_idx = flat_idx[idx:min(idx + batch_size, num_examples)]
batch_inputs = flat_inputs[idx:min(idx + batch_size, num_examples)]
batch_choices = flat_choices[idx:min(idx + batch_size, num_examples)]
batch_scores = self._model.score(batch_inputs, batch_choices)
flat_scores += batch_scores
scores = [[] for _ in range(len(input_list))]
for (idx, score) in zip(flat_idx, flat_scores):
if score == 0:
logging.warning('Found score identical to zero. Probably from empty target. Setting score to -inf.')
scores[idx[0]].append(-np.inf)
else:
scores[idx[0]].append(score)
if not absolute_normalization:
scores = [list(score_row - scipy.special.logsumexp(score_row)) for score_row in scores]
if isinstance(inputs, str):
scores = scores[0]
return scores
|
BIG-bench
|
positive
|
def from_fmt_obj(self, fmtobj, directory, labeled=True, **kwargs):
if not isinstance(fmtobj, dpdata.plugins.deepmd.DeePMDMixedFormat):
for dd in fmtobj.from_multi_systems(directory, **kwargs):
if labeled:
system = LabeledSystem().from_fmt_obj(fmtobj, dd, **kwargs)
else:
system = System().from_fmt_obj(fmtobj, dd, **kwargs)
system.sort_atom_names()
<DeepExtract>
if not len(system.data['atom_numbs']):
return False
elif not len(self.data['atom_numbs']):
self.data = system.data
return False
if system.uniq_formula != self.uniq_formula:
raise RuntimeError('systems with inconsistent formula could not be append: %s v.s. %s' % (self.uniq_formula, system.uniq_formula))
if system.data['atom_names'] != self.data['atom_names']:
system.sort_atom_names()
self.sort_atom_names()
if (system.data['atom_types'] != self.data['atom_types']).any():
system.sort_atom_types()
self.sort_atom_types()
for ii in ['atom_numbs', 'atom_names']:
assert system.data[ii] == self.data[ii]
for ii in ['atom_types', 'orig']:
eq = [v1 == v2 for (v1, v2) in zip(system.data[ii], self.data[ii])]
assert all(eq)
for tt in self.DTYPES:
if tt.shape is not None and Axis.NFRAMES in tt.shape:
if tt.name not in self.data and tt.name in system.data:
raise RuntimeError('system has %s, but this does not' % tt.name)
elif tt.name in self.data and tt.name not in system.data:
raise RuntimeError('this has %s, but system does not' % tt.name)
elif tt.name not in self.data and tt.name not in system.data:
continue
axis_nframes = tt.shape.index(Axis.NFRAMES)
self.data[tt.name] = np.concatenate((self.data[tt.name], system[tt.name]), axis=axis_nframes)
if self.nopbc and (not system.nopbc):
self.data['nopbc'] = False
return True
</DeepExtract>
return self
else:
system_list = []
for dd in fmtobj.from_multi_systems(directory, **kwargs):
if labeled:
data_list = fmtobj.from_labeled_system_mix(dd, **kwargs)
for data_item in data_list:
system_list.append(LabeledSystem(data=data_item, **kwargs))
else:
data_list = fmtobj.from_system_mix(dd, **kwargs)
for data_item in data_list:
system_list.append(System(data=data_item, **kwargs))
<DeepExtract>
if not len(*system_list.data['atom_numbs']):
return False
elif not len(self.data['atom_numbs']):
self.data = *system_list.data
return False
if *system_list.uniq_formula != self.uniq_formula:
raise RuntimeError('systems with inconsistent formula could not be append: %s v.s. %s' % (self.uniq_formula, *system_list.uniq_formula))
if *system_list.data['atom_names'] != self.data['atom_names']:
*system_list.sort_atom_names()
self.sort_atom_names()
if (*system_list.data['atom_types'] != self.data['atom_types']).any():
*system_list.sort_atom_types()
self.sort_atom_types()
for ii in ['atom_numbs', 'atom_names']:
assert *system_list.data[ii] == self.data[ii]
for ii in ['atom_types', 'orig']:
eq = [v1 == v2 for (v1, v2) in zip(*system_list.data[ii], self.data[ii])]
assert all(eq)
for tt in self.DTYPES:
if tt.shape is not None and Axis.NFRAMES in tt.shape:
if tt.name not in self.data and tt.name in *system_list.data:
raise RuntimeError('system has %s, but this does not' % tt.name)
elif tt.name in self.data and tt.name not in *system_list.data:
raise RuntimeError('this has %s, but system does not' % tt.name)
elif tt.name not in self.data and tt.name not in *system_list.data:
continue
axis_nframes = tt.shape.index(Axis.NFRAMES)
self.data[tt.name] = np.concatenate((self.data[tt.name], *system_list[tt.name]), axis=axis_nframes)
if self.nopbc and (not *system_list.nopbc):
self.data['nopbc'] = False
return True
</DeepExtract>
return self
|
def from_fmt_obj(self, fmtobj, directory, labeled=True, **kwargs):
if not isinstance(fmtobj, dpdata.plugins.deepmd.DeePMDMixedFormat):
for dd in fmtobj.from_multi_systems(directory, **kwargs):
if labeled:
system = LabeledSystem().from_fmt_obj(fmtobj, dd, **kwargs)
else:
system = System().from_fmt_obj(fmtobj, dd, **kwargs)
system.sort_atom_names()
if not len(system.data['atom_numbs']):
return False
elif not len(self.data['atom_numbs']):
self.data = system.data
return False
if system.uniq_formula != self.uniq_formula:
raise RuntimeError('systems with inconsistent formula could not be append: %s v.s. %s' % (self.uniq_formula, system.uniq_formula))
if system.data['atom_names'] != self.data['atom_names']:
system.sort_atom_names()
self.sort_atom_names()
if (system.data['atom_types'] != self.data['atom_types']).any():
system.sort_atom_types()
self.sort_atom_types()
for ii in ['atom_numbs', 'atom_names']:
assert system.data[ii] == self.data[ii]
for ii in ['atom_types', 'orig']:
eq = [v1 == v2 for (v1, v2) in zip(system.data[ii], self.data[ii])]
assert all(eq)
for tt in self.DTYPES:
if tt.shape is not None and Axis.NFRAMES in tt.shape:
if tt.name not in self.data and tt.name in system.data:
raise RuntimeError('system has %s, but this does not' % tt.name)
elif tt.name in self.data and tt.name not in system.data:
raise RuntimeError('this has %s, but system does not' % tt.name)
elif tt.name not in self.data and tt.name not in system.data:
continue
axis_nframes = tt.shape.index(Axis.NFRAMES)
self.data[tt.name] = np.concatenate((self.data[tt.name], system[tt.name]), axis=axis_nframes)
if self.nopbc and (not system.nopbc):
self.data['nopbc'] = False
return True
return self
else:
system_list = []
for dd in fmtobj.from_multi_systems(directory, **kwargs):
if labeled:
data_list = fmtobj.from_labeled_system_mix(dd, **kwargs)
for data_item in data_list:
system_list.append(LabeledSystem(data=data_item, **kwargs))
else:
data_list = fmtobj.from_system_mix(dd, **kwargs)
for data_item in data_list:
system_list.append(System(data=data_item, **kwargs))
if not len(*system_list.data['atom_numbs']):
return False
elif not len(self.data['atom_numbs']):
self.data = *system_list.data
return False
if *system_list.uniq_formula != self.uniq_formula:
raise RuntimeError('systems with inconsistent formula could not be append: %s v.s. %s' % (self.uniq_formula, *system_list.uniq_formula))
if *system_list.data['atom_names'] != self.data['atom_names']:
*system_list.sort_atom_names()
self.sort_atom_names()
if (*system_list.data['atom_types'] != self.data['atom_types']).any():
*system_list.sort_atom_types()
self.sort_atom_types()
for ii in ['atom_numbs', 'atom_names']:
assert *system_list.data[ii] == self.data[ii]
for ii in ['atom_types', 'orig']:
eq = [v1 == v2 for (v1, v2) in zip(*system_list.data[ii], self.data[ii])]
assert all(eq)
for tt in self.DTYPES:
if tt.shape is not None and Axis.NFRAMES in tt.shape:
if tt.name not in self.data and tt.name in *system_list.data:
raise RuntimeError('system has %s, but this does not' % tt.name)
elif tt.name in self.data and tt.name not in *system_list.data:
raise RuntimeError('this has %s, but system does not' % tt.name)
elif tt.name not in self.data and tt.name not in *system_list.data:
continue
axis_nframes = tt.shape.index(Axis.NFRAMES)
self.data[tt.name] = np.concatenate((self.data[tt.name], *system_list[tt.name]), axis=axis_nframes)
if self.nopbc and (not *system_list.nopbc):
self.data['nopbc'] = False
return True
return self
|
dpdata
|
positive
|
def contains(self, elem):
"""Test if the filter contains an element
elem may be a COutPoint or bytes
"""
if isinstance(elem, bitcoin.core.COutPoint):
elem = elem.serialize()
if len(self.vData) == 1 and self.vData[0] == 255:
return True
for i in range(0, self.nHashFuncs):
<DeepExtract>
nIndex = MurmurHash3(i * 4221880213 + self.nTweak & 4294967295, elem) % (len(self.vData) * 8)
</DeepExtract>
if not self.vData[nIndex >> 3] & self.__bit_mask[7 & nIndex]:
return False
return True
|
def contains(self, elem):
"""Test if the filter contains an element
elem may be a COutPoint or bytes
"""
if isinstance(elem, bitcoin.core.COutPoint):
elem = elem.serialize()
if len(self.vData) == 1 and self.vData[0] == 255:
return True
for i in range(0, self.nHashFuncs):
nIndex = MurmurHash3(i * 4221880213 + self.nTweak & 4294967295, elem) % (len(self.vData) * 8)
if not self.vData[nIndex >> 3] & self.__bit_mask[7 & nIndex]:
return False
return True
|
dust-b-gone
|
positive
|
@property
def route53(self):
"""
A boto Route53 connection.
"""
if not getattr(self, '_route53', None):
<DeepExtract>
self._route53 = boto.route53.connect_to_region(self._region, aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key)
</DeepExtract>
return self._route53
|
@property
def route53(self):
"""
A boto Route53 connection.
"""
if not getattr(self, '_route53', None):
self._route53 = boto.route53.connect_to_region(self._region, aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key)
return self._route53
|
aurproxy
|
positive
|
def test_get_dns_name_and_fall_back_ip_address_success(mocker):
"""
When the dns name cannot be resolved, and the fallback to mount target ip address is retrieved
"""
<DeepExtract>
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
config.add_section(mount_efs.CONFIG_SECTION)
if mount_efs.CONFIG_SECTION != config_section:
config.add_section(config_section)
config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', dns_name_format)
config.set(config_section, 'dns_name_suffix', dns_name_suffix)
if has_fallback_to_mount_target_ip_address_item:
config.set(mount_efs.CONFIG_SECTION, mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, str(fallback_to_mount_target_ip_address))
config = config
</DeepExtract>
dns_mock = mocker.patch('socket.gethostbyname', side_effect=socket.gaierror)
get_fallback_mount_target_ip_mock = mocker.patch('mount_efs.get_fallback_mount_target_ip_address', return_value=IP_ADDRESS)
(dns_name, ip_address) = mount_efs.get_dns_name_and_fallback_mount_target_ip_address(config, FS_ID, DEFAULT_NFS_OPTIONS)
assert '%s.efs.%s.amazonaws.com' % (FS_ID, DEFAULT_REGION) == dns_name
assert IP_ADDRESS == ip_address
utils.assert_called(dns_mock)
utils.assert_called(get_fallback_mount_target_ip_mock)
|
def test_get_dns_name_and_fall_back_ip_address_success(mocker):
"""
When the dns name cannot be resolved, and the fallback to mount target ip address is retrieved
"""
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
config.add_section(mount_efs.CONFIG_SECTION)
if mount_efs.CONFIG_SECTION != config_section:
config.add_section(config_section)
config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', dns_name_format)
config.set(config_section, 'dns_name_suffix', dns_name_suffix)
if has_fallback_to_mount_target_ip_address_item:
config.set(mount_efs.CONFIG_SECTION, mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, str(fallback_to_mount_target_ip_address))
config = config
dns_mock = mocker.patch('socket.gethostbyname', side_effect=socket.gaierror)
get_fallback_mount_target_ip_mock = mocker.patch('mount_efs.get_fallback_mount_target_ip_address', return_value=IP_ADDRESS)
(dns_name, ip_address) = mount_efs.get_dns_name_and_fallback_mount_target_ip_address(config, FS_ID, DEFAULT_NFS_OPTIONS)
assert '%s.efs.%s.amazonaws.com' % (FS_ID, DEFAULT_REGION) == dns_name
assert IP_ADDRESS == ip_address
utils.assert_called(dns_mock)
utils.assert_called(get_fallback_mount_target_ip_mock)
|
efs-utils
|
positive
|
def sample_factors(self, num, random_state):
"""Sample a batch of factors Y."""
factors = np.zeros(shape=(num, self.num_factors), dtype=np.int64)
for i in range(self.num_factors):
<DeepExtract>
factors[:, i] = random_state.randint(self.factor_sizes[i], size=num)
</DeepExtract>
return factors
|
def sample_factors(self, num, random_state):
"""Sample a batch of factors Y."""
factors = np.zeros(shape=(num, self.num_factors), dtype=np.int64)
for i in range(self.num_factors):
factors[:, i] = random_state.randint(self.factor_sizes[i], size=num)
return factors
|
disentanglement_lib
|
positive
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(examples)))
<DeepExtract>
if isinstance(example, PaddingInputExample):
feature = InputFeatures(input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
elif len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:max_seq_length - 2]
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info('*** Example ***')
tf.logging.info('guid: %s' % example.guid)
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
tf.logging.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
tf.logging.info('label: %s (id = %d)' % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True)
feature = feature
</DeepExtract>
features.append(feature)
return features
|
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(examples)))
if isinstance(example, PaddingInputExample):
feature = InputFeatures(input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
elif len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:max_seq_length - 2]
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info('*** Example ***')
tf.logging.info('guid: %s' % example.guid)
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
tf.logging.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
tf.logging.info('label: %s (id = %d)' % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True)
feature = feature
features.append(feature)
return features
|
arabert
|
positive
|
def execute(self, operation, parameters=None):
"""
Prepare and execute a database operation (query or command).
:param operation: query or command to execute.
:param parameters: sequence or mapping that will be bound to
variables in the operation.
:return: None
"""
<DeepExtract>
if self._state == self._states.CURSOR_CLOSED:
raise InterfaceError('cursor already closed')
</DeepExtract>
<DeepExtract>
self._state = self._states.RUNNING
</DeepExtract>
try:
<DeepExtract>
external_tables = [{'name': name, 'structure': structure, 'data': data} for (name, (structure, data)) in self._external_tables.items()] or None
execute = self._client.execute
if self._stream_results:
execute = self._client.execute_iter
self._settings = self._settings or {}
self._settings['max_block_size'] = self._max_row_buffer
execute_kwargs = {'settings': self._settings, 'external_tables': external_tables, 'types_check': self._types_check, 'query_id': self._query_id}
(execute, execute_kwargs) = (execute, execute_kwargs)
</DeepExtract>
<DeepExtract>
self._check_cursor_closed()
self._begin_query()
try:
(execute, execute_kwargs) = self._prepare()
response = execute(operation, params=parameters, with_column_types=True, **execute_kwargs)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response)
self._end_query()
</DeepExtract>
except DriverError as orig:
raise OperationalError(orig)
<DeepExtract>
if executemany:
self._rowcount = response
response = None
if not response or isinstance(response, int):
self._columns = self._types = self._rows = []
if isinstance(response, int):
self._rowcount = response
return
if self._stream_results:
columns_with_types = next(response)
rows = response
else:
(rows, columns_with_types) = response
self._columns_with_types = columns_with_types
if columns_with_types:
(self._columns, self._types) = zip(*columns_with_types)
if not self._stream_results:
self._rowcount = len(rows)
else:
self._columns = self._types = []
self._rows = rows
</DeepExtract>
<DeepExtract>
self._state = self._states.FINISHED
</DeepExtract>
|
def execute(self, operation, parameters=None):
"""
Prepare and execute a database operation (query or command).
:param operation: query or command to execute.
:param parameters: sequence or mapping that will be bound to
variables in the operation.
:return: None
"""
if self._state == self._states.CURSOR_CLOSED:
raise InterfaceError('cursor already closed')
self._state = self._states.RUNNING
try:
external_tables = [{'name': name, 'structure': structure, 'data': data} for (name, (structure, data)) in self._external_tables.items()] or None
execute = self._client.execute
if self._stream_results:
execute = self._client.execute_iter
self._settings = self._settings or {}
self._settings['max_block_size'] = self._max_row_buffer
execute_kwargs = {'settings': self._settings, 'external_tables': external_tables, 'types_check': self._types_check, 'query_id': self._query_id}
(execute, execute_kwargs) = (execute, execute_kwargs)
self._check_cursor_closed()
self._begin_query()
try:
(execute, execute_kwargs) = self._prepare()
response = execute(operation, params=parameters, with_column_types=True, **execute_kwargs)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response)
self._end_query()
except DriverError as orig:
raise OperationalError(orig)
if executemany:
self._rowcount = response
response = None
if not response or isinstance(response, int):
self._columns = self._types = self._rows = []
if isinstance(response, int):
self._rowcount = response
return
if self._stream_results:
columns_with_types = next(response)
rows = response
else:
(rows, columns_with_types) = response
self._columns_with_types = columns_with_types
if columns_with_types:
(self._columns, self._types) = zip(*columns_with_types)
if not self._stream_results:
self._rowcount = len(rows)
else:
self._columns = self._types = []
self._rows = rows
self._state = self._states.FINISHED
</DeepExtract>
|
clickhouse-driver
|
positive
|
def iter_explain(self, instances_df, nh_size):
[Xs, Ys, isSparse] = self.preprocessor.generate_samples(nh_size)
[Xe, Ye, isSparse] = self.preprocessor.preprocess(instances_df)
<DeepExtract>
distances = self.compute_distances_to_neighborhood(Xe, Xs)
weights = self.kernel.transform(distances)
sample_weights = weights
</DeepExtract>
classes = self.preprocessor.get_classes()
predictor_features = self.preprocessor.get_predictor_features()
coefs_cols = ['coef_{}'.format(c) for c in classes]
predictor_features_df = pd.DataFrame(predictor_features, columns=['feature'])
samples_cols = ['sample_{}'.format(s) for s in range(nh_size)]
for (row_idx, [to_exp, to_proba, w]) in enumerate(izip(Xe, Ye, sample_weights)):
Xs[0, :] = to_exp
Ys[0, :] = to_proba
model_regressor = Ridge(alpha=self.ridge_alpha, fit_intercept=True, random_state=self.random_state)
model_regressor.fit(Xs, Ys, sample_weight=w)
local_r2_score = model_regressor.score(Xs, Ys, sample_weight=None)
intercept_np = model_regressor.intercept_
model_coefs = model_regressor.coef_
kernel_distance_avg = np.mean(w)
kernel_distance_std = np.std(w)
coefs_df = pd.DataFrame(model_coefs.T, columns=coefs_cols)
explanation_df = pd.concat((predictor_features_df, coefs_df), axis=1)
explanation_df.insert(0, '_exp_id', row_idx)
instance_df = pd.DataFrame(to_exp.reshape(-1, len(to_exp)), columns=predictor_features)
instance_df['r2_score'] = local_r2_score
instance_df['kernel_distance_avg'] = kernel_distance_avg
instance_df['kernel_distance_std'] = kernel_distance_std
instance_df.insert(0, '_exp_id', row_idx)
yield (explanation_df, instance_df)
|
def iter_explain(self, instances_df, nh_size):
[Xs, Ys, isSparse] = self.preprocessor.generate_samples(nh_size)
[Xe, Ye, isSparse] = self.preprocessor.preprocess(instances_df)
distances = self.compute_distances_to_neighborhood(Xe, Xs)
weights = self.kernel.transform(distances)
sample_weights = weights
classes = self.preprocessor.get_classes()
predictor_features = self.preprocessor.get_predictor_features()
coefs_cols = ['coef_{}'.format(c) for c in classes]
predictor_features_df = pd.DataFrame(predictor_features, columns=['feature'])
samples_cols = ['sample_{}'.format(s) for s in range(nh_size)]
for (row_idx, [to_exp, to_proba, w]) in enumerate(izip(Xe, Ye, sample_weights)):
Xs[0, :] = to_exp
Ys[0, :] = to_proba
model_regressor = Ridge(alpha=self.ridge_alpha, fit_intercept=True, random_state=self.random_state)
model_regressor.fit(Xs, Ys, sample_weight=w)
local_r2_score = model_regressor.score(Xs, Ys, sample_weight=None)
intercept_np = model_regressor.intercept_
model_coefs = model_regressor.coef_
kernel_distance_avg = np.mean(w)
kernel_distance_std = np.std(w)
coefs_df = pd.DataFrame(model_coefs.T, columns=coefs_cols)
explanation_df = pd.concat((predictor_features_df, coefs_df), axis=1)
explanation_df.insert(0, '_exp_id', row_idx)
instance_df = pd.DataFrame(to_exp.reshape(-1, len(to_exp)), columns=predictor_features)
instance_df['r2_score'] = local_r2_score
instance_df['kernel_distance_avg'] = kernel_distance_avg
instance_df['kernel_distance_std'] = kernel_distance_std
instance_df.insert(0, '_exp_id', row_idx)
yield (explanation_df, instance_df)
|
dataiku-contrib
|
positive
|
def _sample(n_samples):
"""
Not for end use; please use `sample`.
Samples model parameters from the prior distribution. This is an helper function called by the main `sample` one
in order to split drawing from the prior in chunks to avoid parallelization issues with MPI.
Parameters
----------
n_samples: integer
Number of samples to generate
Returns
-------
list
List containing sampled parameter values.
"""
<DeepExtract>
seed_arr = self.rng.randint(0, np.iinfo(np.uint32).max, size=n_samples, dtype=np.uint32)
sorted_seed_arr = np.sort(seed_arr)
indices = sorted_seed_arr[:-1] == sorted_seed_arr[1:]
if np.sum(indices) > 0:
sorted_seed_arr[:-1][indices] = sorted_seed_arr[:-1][indices] + 1
rng_arr = np.array([np.random.RandomState(seed) for seed in sorted_seed_arr])
rng_pds = self.backend.parallelize(rng_arr)
rng_pds = rng_pds
</DeepExtract>
parameters_pds = self.backend.map(self._sample_parameter_only, rng_pds)
parameters = self.backend.collect(parameters_pds)
return parameters
|
def _sample(n_samples):
"""
Not for end use; please use `sample`.
Samples model parameters from the prior distribution. This is an helper function called by the main `sample` one
in order to split drawing from the prior in chunks to avoid parallelization issues with MPI.
Parameters
----------
n_samples: integer
Number of samples to generate
Returns
-------
list
List containing sampled parameter values.
"""
seed_arr = self.rng.randint(0, np.iinfo(np.uint32).max, size=n_samples, dtype=np.uint32)
sorted_seed_arr = np.sort(seed_arr)
indices = sorted_seed_arr[:-1] == sorted_seed_arr[1:]
if np.sum(indices) > 0:
sorted_seed_arr[:-1][indices] = sorted_seed_arr[:-1][indices] + 1
rng_arr = np.array([np.random.RandomState(seed) for seed in sorted_seed_arr])
rng_pds = self.backend.parallelize(rng_arr)
rng_pds = rng_pds
parameters_pds = self.backend.map(self._sample_parameter_only, rng_pds)
parameters = self.backend.collect(parameters_pds)
return parameters
|
abcpy
|
positive
|
def addAnnotation(self, cert, udct, box, page0ref, obj13, obj13ref, new_13):
from endesive.pdf.PyPDF2_annotate.annotations.signature import Signature
from endesive.pdf.PyPDF2_annotate.config.appearance import Appearance
from endesive.pdf.PyPDF2_annotate.config.location import Location
from endesive.pdf.PyPDF2_annotate.util.geometry import identity
(x1, y1, x2, y2) = box
annotation = Signature(Location(x1=x1, y1=y1, x2=x2, y2=y2, page=0), Appearance())
if 'signature' in udct:
annotationtext = udct['signature']
wrap_text = udct.get('text', {}).get('wraptext', True)
font_size = udct.get('text', {}).get('fontsize', 12)
text_align = udct.get('text', {}).get('textalign', 'left')
line_spacing = udct.get('text', {}).get('linespacing', 1.2)
annotation.add_default_font()
annotation.set_signature_appearance(['fill_colour', 0, 0, 0], ['font', 'default', font_size], ['text_box', annotationtext, 'default', 0, 0, x2 - x1, y2 - y1, font_size, wrap_text, text_align, 'middle', line_spacing])
elif 'signature_img' in udct:
annotation.add_image(udct['signature_img'], 'Image')
annotation.set_signature_appearance(['image', 'Image', 0, 0, x2 - x1, y2 - y1, udct.get('signature_img_distort', True), udct.get('signature_img_centred', False)])
elif 'signature_appearance' in udct:
sig = {}
for f in ('background', 'icon', 'labels', 'border', 'outline'):
if f in udct['signature_appearance']:
sig[f] = udct['signature_appearance'][f]
toggles = udct['signature_appearance'].get('display', [])
for f in ('contact', 'reason', 'location', 'contact', 'signingdate'):
if f in toggles:
sig[f] = udct.get(f, '{} unknown'.format(f))
if 'date' in toggles:
sig['date'] = udct['signingdate']
if 'CN' in toggles:
from cryptography.x509 import ObjectIdentifier
sig['CN'] = cert.subject.get_attributes_for_oid(ObjectIdentifier('2.5.4.3'))[0].value
if 'DN' in toggles:
sig['DN'] = cert.subject.rfc4514_string()
annotation.simple_signature(sig)
else:
if 'manual_images' in udct:
for (name, img) in udct['manual_images'].items():
annotation.add_image(img, name=name)
if 'manual_fonts' in udct:
for (name, path) in udct['manual_fonts'].items():
annotation.add_ttf_font(path, name=name)
annotation.add_default_font()
annotation.set_signature_appearance(*udct['signature_manual'])
pdfa = annotation.as_pdf_object(identity(), page=page0ref)
<DeepExtract>
stream = getattr(pdfa['/AP']['/N'], 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(pdfa['/AP']['/N'])
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in pdfa['/AP']['/N'] and pdfa['/AP']['/N']['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in pdfa['/AP']['/N'].items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
objapn = dct
</DeepExtract>
objapnref = self._addObject(objapn)
objap = po.DictionaryObject()
objap[po.NameObject('/N')] = objapnref
obj13.update({po.NameObject('/Rect'): po.ArrayObject([po.FloatObject(x1), po.FloatObject(y1), po.FloatObject(x2), po.FloatObject(y2)]), po.NameObject('/AP'): objap})
page0 = page0ref.getObject()
if new_13:
annots = po.ArrayObject([obj13ref])
if '/Annots' in page0:
page0annots = page0['/Annots']
if isinstance(page0annots, po.IndirectObject):
annots.insert(0, page0annots)
elif isinstance(page0annots, po.ArrayObject):
annots = page0annots
annots.append(obj13ref)
else:
annots = page0['/Annots']
page0.update({po.NameObject('/Annots'): annots})
self._objects[page0ref.idnum - 1] = page0
|
def addAnnotation(self, cert, udct, box, page0ref, obj13, obj13ref, new_13):
from endesive.pdf.PyPDF2_annotate.annotations.signature import Signature
from endesive.pdf.PyPDF2_annotate.config.appearance import Appearance
from endesive.pdf.PyPDF2_annotate.config.location import Location
from endesive.pdf.PyPDF2_annotate.util.geometry import identity
(x1, y1, x2, y2) = box
annotation = Signature(Location(x1=x1, y1=y1, x2=x2, y2=y2, page=0), Appearance())
if 'signature' in udct:
annotationtext = udct['signature']
wrap_text = udct.get('text', {}).get('wraptext', True)
font_size = udct.get('text', {}).get('fontsize', 12)
text_align = udct.get('text', {}).get('textalign', 'left')
line_spacing = udct.get('text', {}).get('linespacing', 1.2)
annotation.add_default_font()
annotation.set_signature_appearance(['fill_colour', 0, 0, 0], ['font', 'default', font_size], ['text_box', annotationtext, 'default', 0, 0, x2 - x1, y2 - y1, font_size, wrap_text, text_align, 'middle', line_spacing])
elif 'signature_img' in udct:
annotation.add_image(udct['signature_img'], 'Image')
annotation.set_signature_appearance(['image', 'Image', 0, 0, x2 - x1, y2 - y1, udct.get('signature_img_distort', True), udct.get('signature_img_centred', False)])
elif 'signature_appearance' in udct:
sig = {}
for f in ('background', 'icon', 'labels', 'border', 'outline'):
if f in udct['signature_appearance']:
sig[f] = udct['signature_appearance'][f]
toggles = udct['signature_appearance'].get('display', [])
for f in ('contact', 'reason', 'location', 'contact', 'signingdate'):
if f in toggles:
sig[f] = udct.get(f, '{} unknown'.format(f))
if 'date' in toggles:
sig['date'] = udct['signingdate']
if 'CN' in toggles:
from cryptography.x509 import ObjectIdentifier
sig['CN'] = cert.subject.get_attributes_for_oid(ObjectIdentifier('2.5.4.3'))[0].value
if 'DN' in toggles:
sig['DN'] = cert.subject.rfc4514_string()
annotation.simple_signature(sig)
else:
if 'manual_images' in udct:
for (name, img) in udct['manual_images'].items():
annotation.add_image(img, name=name)
if 'manual_fonts' in udct:
for (name, path) in udct['manual_fonts'].items():
annotation.add_ttf_font(path, name=name)
annotation.add_default_font()
annotation.set_signature_appearance(*udct['signature_manual'])
pdfa = annotation.as_pdf_object(identity(), page=page0ref)
stream = getattr(pdfa['/AP']['/N'], 'stream', None)
if stream is not None:
d = {'__streamdata__': stream, '/Length': len(stream)}
d.update(pdfa['/AP']['/N'])
dct = pdf.StreamObject.initializeFromDictionary(d)
if '/Filter' in pdfa['/AP']['/N'] and pdfa['/AP']['/N']['/Filter'] == '/FlatDecode':
del dct['/Filter']
dct = dct.flateEncode()
else:
dct = pdf.DictionaryObject()
for (k, v) in pdfa['/AP']['/N'].items():
if isinstance(v, pdf.DictionaryObject):
if v.indirect:
v = self._extend(v)
v = self._addObject(v)
else:
v = self._extend(v)
elif isinstance(v, list):
result = pdf.ArrayObject()
for va in v:
if isinstance(va, pdf.DictionaryObject):
if va.indirect:
va = self._extend(va)
va = self._addObject(va)
else:
va = self._extend(va)
result.append(va)
v = result
dct[k] = v
objapn = dct
objapnref = self._addObject(objapn)
objap = po.DictionaryObject()
objap[po.NameObject('/N')] = objapnref
obj13.update({po.NameObject('/Rect'): po.ArrayObject([po.FloatObject(x1), po.FloatObject(y1), po.FloatObject(x2), po.FloatObject(y2)]), po.NameObject('/AP'): objap})
page0 = page0ref.getObject()
if new_13:
annots = po.ArrayObject([obj13ref])
if '/Annots' in page0:
page0annots = page0['/Annots']
if isinstance(page0annots, po.IndirectObject):
annots.insert(0, page0annots)
elif isinstance(page0annots, po.ArrayObject):
annots = page0annots
annots.append(obj13ref)
else:
annots = page0['/Annots']
page0.update({po.NameObject('/Annots'): annots})
self._objects[page0ref.idnum - 1] = page0
|
endesive
|
positive
|
def api_request(url, method='get', params=None, ret_key=None):
params = params or {}
resp = None
try:
method = method.lower()
<DeepExtract>
g.user is not None or abort(403, 'You have to login to do this')
key = g.user.key
secret = g.user.secret
values = ''.join([str(params[k]) for k in sorted(params.keys()) if params[k] is not None]) if params.keys() else ''
_secret = ''.join([urlparse(url).path, secret, values]).encode('utf-8')
params['_secret'] = hashlib.sha1(_secret).hexdigest()
params['_key'] = key
params = params
</DeepExtract>
if method == 'get':
resp = getattr(requests, method)(url, params=params)
else:
resp = getattr(requests, method)(url, data=params)
if resp.status_code != 200:
return abort(resp.status_code, resp.json().get('message'))
resp = resp.json()
if ret_key is not None:
return resp.get(ret_key)
return resp
except Exception as e:
code = e.code if hasattr(e, 'code') else None
if isinstance(code, int) and resp is not None:
return abort(code, resp.json().get('message'))
current_app.logger.warning(url)
current_app.logger.warning(params)
current_app.logger.error(str(e))
return abort(500, 'server unknown error')
|
def api_request(url, method='get', params=None, ret_key=None):
params = params or {}
resp = None
try:
method = method.lower()
g.user is not None or abort(403, 'You have to login to do this')
key = g.user.key
secret = g.user.secret
values = ''.join([str(params[k]) for k in sorted(params.keys()) if params[k] is not None]) if params.keys() else ''
_secret = ''.join([urlparse(url).path, secret, values]).encode('utf-8')
params['_secret'] = hashlib.sha1(_secret).hexdigest()
params['_key'] = key
params = params
if method == 'get':
resp = getattr(requests, method)(url, params=params)
else:
resp = getattr(requests, method)(url, data=params)
if resp.status_code != 200:
return abort(resp.status_code, resp.json().get('message'))
resp = resp.json()
if ret_key is not None:
return resp.get(ret_key)
return resp
except Exception as e:
code = e.code if hasattr(e, 'code') else None
if isinstance(code, int) and resp is not None:
return abort(code, resp.json().get('message'))
current_app.logger.warning(url)
current_app.logger.warning(params)
current_app.logger.error(str(e))
return abort(500, 'server unknown error')
|
cmdb
|
positive
|
@ignoreLinted('H_DP_GENERAL_MISPLACED', 'H_DP_GLOBALS_MISPLACED', 'I_NOTIMPL_HINT')
def test_issue_54(self):
"""
Subject: W_ARROW wasn't implemented and basic-pbx samples used it
Source: https://github.com/ossobv/asterisklint/issues/54
"""
<DeepExtract>
dp = NamedTemporaryFile()
dp.write("[Hints]\n; Don't die even though there is no arrow. Source: asterisk/asterisk\n; 2cfb3df35df7930541177eb32d71afa52cd38899 configs/basic-pbx/extensions.conf\nexten => _10XX,hint,PJSIP/${EXTEN}\nexten = _11XX,hint,PJSIP/${EXTEN}\n".encode('utf-8'))
dp.flush()
mainmod = import_module('asterisklint.commands.dialplan-check')
mainmod.main([dp.name], {})
</DeepExtract>
self.assertLinted({'H_CONF_NO_ARROW': 1, 'W_WSH_VARSET': 1})
|
@ignoreLinted('H_DP_GENERAL_MISPLACED', 'H_DP_GLOBALS_MISPLACED', 'I_NOTIMPL_HINT')
def test_issue_54(self):
"""
Subject: W_ARROW wasn't implemented and basic-pbx samples used it
Source: https://github.com/ossobv/asterisklint/issues/54
"""
dp = NamedTemporaryFile()
dp.write("[Hints]\n; Don't die even though there is no arrow. Source: asterisk/asterisk\n; 2cfb3df35df7930541177eb32d71afa52cd38899 configs/basic-pbx/extensions.conf\nexten => _10XX,hint,PJSIP/${EXTEN}\nexten = _11XX,hint,PJSIP/${EXTEN}\n".encode('utf-8'))
dp.flush()
mainmod = import_module('asterisklint.commands.dialplan-check')
mainmod.main([dp.name], {})
self.assertLinted({'H_CONF_NO_ARROW': 1, 'W_WSH_VARSET': 1})
|
asterisklint
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.