before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces['error']:
return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if not style or style == 'default':
style = 'pep440'
if style == 'pep440':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += plus_or_dot(pieces)
rendered += '%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0+untagged.%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
</DeepExtract>
elif style == 'pep440-branch':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += '%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0'
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += '+untagged.%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
</DeepExtract>
elif style == 'pep440-pre':
<DeepExtract>
if pieces['closest-tag']:
if pieces['distance']:
(tag_version, post_version) = pep440_split_post(pieces['closest-tag'])
rendered = tag_version
if post_version is not None:
rendered += '.post%d.dev%d' % (post_version + 1, pieces['distance'])
else:
rendered += '.post0.dev%d' % pieces['distance']
else:
rendered = pieces['closest-tag']
else:
rendered = '0.post0.dev%d' % pieces['distance']
rendered = rendered
</DeepExtract>
elif style == 'pep440-post':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += 'g%s' % pieces['short']
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += '+g%s' % pieces['short']
rendered = rendered
</DeepExtract>
elif style == 'pep440-post-branch':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += 'g%s' % pieces['short']
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += '+g%s' % pieces['short']
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
</DeepExtract>
elif style == 'pep440-old':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered = rendered
</DeepExtract>
elif style == 'git-describe':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
</DeepExtract>
elif style == 'git-describe-long':
<DeepExtract>
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
</DeepExtract>
else:
raise ValueError("unknown style '%s'" % style)
return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}
|
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces['error']:
return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if not style or style == 'default':
style = 'pep440'
if style == 'pep440':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += plus_or_dot(pieces)
rendered += '%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0+untagged.%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
elif style == 'pep440-branch':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += '%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0'
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += '+untagged.%d.g%s' % (pieces['distance'], pieces['short'])
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
elif style == 'pep440-pre':
if pieces['closest-tag']:
if pieces['distance']:
(tag_version, post_version) = pep440_split_post(pieces['closest-tag'])
rendered = tag_version
if post_version is not None:
rendered += '.post%d.dev%d' % (post_version + 1, pieces['distance'])
else:
rendered += '.post0.dev%d' % pieces['distance']
else:
rendered = pieces['closest-tag']
else:
rendered = '0.post0.dev%d' % pieces['distance']
rendered = rendered
elif style == 'pep440-post':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += 'g%s' % pieces['short']
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered += '+g%s' % pieces['short']
rendered = rendered
elif style == 'pep440-post-branch':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += 'g%s' % pieces['short']
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['branch'] != 'master':
rendered += '.dev0'
rendered += '+g%s' % pieces['short']
if pieces['dirty']:
rendered += '.dirty'
rendered = rendered
elif style == 'pep440-old':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance'] or pieces['dirty']:
rendered += '.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
else:
rendered = '0.post%d' % pieces['distance']
if pieces['dirty']:
rendered += '.dev0'
rendered = rendered
elif style == 'git-describe':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if pieces['distance']:
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
elif style == 'git-describe-long':
if pieces['closest-tag']:
rendered = pieces['closest-tag']
rendered += '-%d-g%s' % (pieces['distance'], pieces['short'])
else:
rendered = pieces['short']
if pieces['dirty']:
rendered += '-dirty'
rendered = rendered
else:
raise ValueError("unknown style '%s'" % style)
return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}
|
andes
|
positive
|
def __array__(self, dtype=None):
<DeepExtract>
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
out = self._out
</DeepExtract>
if dtype is not None:
out = out.astype(dtype)
return out
|
def __array__(self, dtype=None):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
out = self._out
if dtype is not None:
out = out.astype(dtype)
return out
|
CHER
|
positive
|
def split_args(self, data, where):
args = super().split_args(data, where)
if len(args) >= 2:
if len(args) > 2:
E_APP_ARG_MANY(where, app=self.name, data=data, max_args=2)
cond = args[0]
<DeepExtract>
brackets = 0
parens = 0
quotes = False
skipnext = False
ret = [[]]
start = 0
for (i, char) in enumerate(args[1]):
if isinstance(char, Var):
pass
elif skipnext:
skipnext = False
elif char == '[':
brackets += 1
elif char == ']':
if brackets:
brackets -= 1
elif char == '(':
parens += 1
elif char == ')':
if parens:
parens -= 1
elif char == '"' and ':' != '"':
quotes = not quotes
if False:
ret[-1].extend(args[1][start:i])
start = i + 1
elif char == '\\':
if False:
ret[-1].extend(args[1][start:i])
start = i + 1
skipnext = True
elif char == ':' and (not (brackets or parens or quotes)):
ret[-1].extend(args[1][start:i])
start = i + 1
ret.append([])
ret[-1].extend(args[1][start:])
squashed = []
for letters in ret:
letters = list(strjoin(letters))
if len(letters) == 1:
squashed.append(letters[0])
else:
squashed.append(Var.join(letters))
actions = squashed
</DeepExtract>
if len(actions) == 1:
(iftrue, iffalse) = (actions[0], None)
else:
assert len(actions) >= 2, actions
(iftrue, iffalse) = (actions[0], actions[1])
else:
assert len(args) == 1, args
<DeepExtract>
brackets = 0
parens = 0
quotes = False
skipnext = False
ret = [[]]
start = 0
for (i, char) in enumerate(data):
if isinstance(char, Var):
pass
elif skipnext:
skipnext = False
elif char == '[':
brackets += 1
elif char == ']':
if brackets:
brackets -= 1
elif char == '(':
parens += 1
elif char == ')':
if parens:
parens -= 1
elif char == '"' and ',' != '"':
quotes = not quotes
if False:
ret[-1].extend(data[start:i])
start = i + 1
elif char == '\\':
if False:
ret[-1].extend(data[start:i])
start = i + 1
skipnext = True
elif char == ',' and (not (brackets or parens or quotes)):
ret[-1].extend(data[start:i])
start = i + 1
ret.append([])
ret[-1].extend(data[start:])
squashed = []
for letters in ret:
letters = list(strjoin(letters))
if len(letters) == 1:
squashed.append(letters[0])
else:
squashed.append(Var.join(letters))
args = squashed
</DeepExtract>
cond = args[0]
iftrue = args[1:]
if len(iftrue):
E_APP_ARG_IFSTYLE(where, app=self.name, data=data, cond=args[0], args=args[1:])
else:
E_APP_ARG_FEW(where, app=self.name, data=data, min_args=2)
(cond, iftrue, iffalse) = (args[0], args[1:], None)
cond = cond.strip()
if cond == '':
E_APP_ARG_IFEMPTY(where, app=self.name, data=data)
return (cond, iftrue, iffalse)
|
def split_args(self, data, where):
args = super().split_args(data, where)
if len(args) >= 2:
if len(args) > 2:
E_APP_ARG_MANY(where, app=self.name, data=data, max_args=2)
cond = args[0]
brackets = 0
parens = 0
quotes = False
skipnext = False
ret = [[]]
start = 0
for (i, char) in enumerate(args[1]):
if isinstance(char, Var):
pass
elif skipnext:
skipnext = False
elif char == '[':
brackets += 1
elif char == ']':
if brackets:
brackets -= 1
elif char == '(':
parens += 1
elif char == ')':
if parens:
parens -= 1
elif char == '"' and ':' != '"':
quotes = not quotes
if False:
ret[-1].extend(args[1][start:i])
start = i + 1
elif char == '\\':
if False:
ret[-1].extend(args[1][start:i])
start = i + 1
skipnext = True
elif char == ':' and (not (brackets or parens or quotes)):
ret[-1].extend(args[1][start:i])
start = i + 1
ret.append([])
ret[-1].extend(args[1][start:])
squashed = []
for letters in ret:
letters = list(strjoin(letters))
if len(letters) == 1:
squashed.append(letters[0])
else:
squashed.append(Var.join(letters))
actions = squashed
if len(actions) == 1:
(iftrue, iffalse) = (actions[0], None)
else:
assert len(actions) >= 2, actions
(iftrue, iffalse) = (actions[0], actions[1])
else:
assert len(args) == 1, args
brackets = 0
parens = 0
quotes = False
skipnext = False
ret = [[]]
start = 0
for (i, char) in enumerate(data):
if isinstance(char, Var):
pass
elif skipnext:
skipnext = False
elif char == '[':
brackets += 1
elif char == ']':
if brackets:
brackets -= 1
elif char == '(':
parens += 1
elif char == ')':
if parens:
parens -= 1
elif char == '"' and ',' != '"':
quotes = not quotes
if False:
ret[-1].extend(data[start:i])
start = i + 1
elif char == '\\':
if False:
ret[-1].extend(data[start:i])
start = i + 1
skipnext = True
elif char == ',' and (not (brackets or parens or quotes)):
ret[-1].extend(data[start:i])
start = i + 1
ret.append([])
ret[-1].extend(data[start:])
squashed = []
for letters in ret:
letters = list(strjoin(letters))
if len(letters) == 1:
squashed.append(letters[0])
else:
squashed.append(Var.join(letters))
args = squashed
cond = args[0]
iftrue = args[1:]
if len(iftrue):
E_APP_ARG_IFSTYLE(where, app=self.name, data=data, cond=args[0], args=args[1:])
else:
E_APP_ARG_FEW(where, app=self.name, data=data, min_args=2)
(cond, iftrue, iffalse) = (args[0], args[1:], None)
cond = cond.strip()
if cond == '':
E_APP_ARG_IFEMPTY(where, app=self.name, data=data)
return (cond, iftrue, iffalse)
|
asterisklint
|
positive
|
def get_extended_header(self):
"""Get, decode and return the extended header as a named tuple (ExtendedHeader).
The volume object ID returned is expected to be random (not based on a MAC address and time)."""
extended_header_buf = self.buf[40:132]
(machine_id_raw, volume_object_id_raw, unknown_32, unknown_timestamp_int_40, unknown_timestamp_int_48, unknown_flags_56, unknown_state_60, unknown_log_entry_index_64, unknown_log_entry_index_68, unknown_log_entry_index_72, unknown_log_entry_index_76, unknown_log_entry_index_80, unknown_log_entry_index_84, unknown_log_entry_index_88) = struct.unpack('<16s16sQQQLLLLLLLLL', extended_header_buf)
null_pos = machine_id_raw.find(b'\x00')
if null_pos != -1:
machine_id_raw = machine_id_raw[:null_pos]
<DeepExtract>
try:
s1 = machine_id_raw.decode('cp866')
except Exception:
s1 = None
try:
s2 = machine_id_raw.decode('windows-1252')
except Exception:
s2 = None
if s2 is None and s1 is not None:
machine_id = s1
if s1 is None and s2 is not None:
machine_id = s2
if s1 is None and s2 is None:
machine_id = '(unknown encoding)'
if s1 == s2:
machine_id = s1
machine_id = '"{}" (cp-866), "{}" (windows-1252)'.format(s1, s2)
</DeepExtract>
volume_object_id = uuid.UUID(bytes_le=volume_object_id_raw)
return ExtendedHeader(machine_id=machine_id, volume_object_id=volume_object_id, unknown_32=unknown_32, unknown_timestamp_int_40=unknown_timestamp_int_40, unknown_timestamp_int_48=unknown_timestamp_int_48, unknown_flags_56=unknown_flags_56, unknown_state_60=unknown_state_60, unknown_log_entry_index_64=unknown_log_entry_index_64, unknown_log_entry_index_68=unknown_log_entry_index_68, unknown_log_entry_index_72=unknown_log_entry_index_72, unknown_log_entry_index_76=unknown_log_entry_index_76, unknown_log_entry_index_80=unknown_log_entry_index_80, unknown_log_entry_index_84=unknown_log_entry_index_84, unknown_log_entry_index_88=unknown_log_entry_index_88)
|
def get_extended_header(self):
"""Get, decode and return the extended header as a named tuple (ExtendedHeader).
The volume object ID returned is expected to be random (not based on a MAC address and time)."""
extended_header_buf = self.buf[40:132]
(machine_id_raw, volume_object_id_raw, unknown_32, unknown_timestamp_int_40, unknown_timestamp_int_48, unknown_flags_56, unknown_state_60, unknown_log_entry_index_64, unknown_log_entry_index_68, unknown_log_entry_index_72, unknown_log_entry_index_76, unknown_log_entry_index_80, unknown_log_entry_index_84, unknown_log_entry_index_88) = struct.unpack('<16s16sQQQLLLLLLLLL', extended_header_buf)
null_pos = machine_id_raw.find(b'\x00')
if null_pos != -1:
machine_id_raw = machine_id_raw[:null_pos]
try:
s1 = machine_id_raw.decode('cp866')
except Exception:
s1 = None
try:
s2 = machine_id_raw.decode('windows-1252')
except Exception:
s2 = None
if s2 is None and s1 is not None:
machine_id = s1
if s1 is None and s2 is not None:
machine_id = s2
if s1 is None and s2 is None:
machine_id = '(unknown encoding)'
if s1 == s2:
machine_id = s1
machine_id = '"{}" (cp-866), "{}" (windows-1252)'.format(s1, s2)
volume_object_id = uuid.UUID(bytes_le=volume_object_id_raw)
return ExtendedHeader(machine_id=machine_id, volume_object_id=volume_object_id, unknown_32=unknown_32, unknown_timestamp_int_40=unknown_timestamp_int_40, unknown_timestamp_int_48=unknown_timestamp_int_48, unknown_flags_56=unknown_flags_56, unknown_state_60=unknown_state_60, unknown_log_entry_index_64=unknown_log_entry_index_64, unknown_log_entry_index_68=unknown_log_entry_index_68, unknown_log_entry_index_72=unknown_log_entry_index_72, unknown_log_entry_index_76=unknown_log_entry_index_76, unknown_log_entry_index_80=unknown_log_entry_index_80, unknown_log_entry_index_84=unknown_log_entry_index_84, unknown_log_entry_index_88=unknown_log_entry_index_88)
|
dfir_ntfs
|
positive
|
def stop_before_fixture(self, uuid=None):
<DeepExtract>
uuid = uuid or self._last_item_uuid(item_type=TestBeforeResult)
fixture = self._items.pop(uuid, None)
</DeepExtract>
if fixture and (not fixture.stop):
fixture.stop = now()
|
def stop_before_fixture(self, uuid=None):
uuid = uuid or self._last_item_uuid(item_type=TestBeforeResult)
fixture = self._items.pop(uuid, None)
if fixture and (not fixture.stop):
fixture.stop = now()
|
allure-python
|
positive
|
def mon_create_initial(args):
<DeepExtract>
if _cfg:
cfg = _cfg
else:
cfg = conf.ceph.load(args)
mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
if not mon_initial_members:
if True:
raise exc.NeedHostError('could not find `mon initial members` defined in ceph.conf')
else:
mon_initial_members = re.split('[,\\s]+', mon_initial_members)
mon_initial_members = mon_initial_members
</DeepExtract>
args.mon = mon_initial_members
<DeepExtract>
cfg = conf.ceph.load(args)
if not args.mon:
args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)
if args.keyrings:
monitor_keyring = concatenate_keyrings(args)
else:
keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster)
try:
monitor_keyring = files.read_file(keyring_path)
except IOError:
LOG.warning('keyring (%s) not found, creating a new one' % keyring_path)
new_mon_keyring(args)
monitor_keyring = files.read_file(keyring_path)
LOG.debug('Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon))
errors = 0
for (name, host) in mon_hosts(args.mon):
try:
LOG.debug('detecting platform for host %s ...', name)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(name)
hostname_is_compatible(distro.conn, rlogger, name)
rlogger.debug('deploying mon to %s', name)
distro.mon.create(distro, args, monitor_keyring)
time.sleep(2)
mon_status(distro.conn, rlogger, name, args)
catch_mon_errors(distro.conn, rlogger, name, cfg, args)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to create %d monitors' % errors)
</DeepExtract>
mon_in_quorum = set([])
mon_members = set([host for host in mon_initial_members])
for host in mon_initial_members:
mon_name = 'mon.%s' % host
LOG.info('processing monitor %s', mon_name)
sleeps = [20, 20, 15, 10, 10, 5]
tries = 5
rlogger = logging.getLogger(host)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
while tries:
<DeepExtract>
asok_path = paths.mon.asok(args.cluster, host)
(out, err, code) = remoto.process.check(distro.conn, ['ceph', '--cluster={cluster}'.format(cluster=args.cluster), '--admin-daemon', asok_path, 'mon_status'])
for line in err:
rlogger.error(line)
try:
status = json.loads(''.join(out))
except ValueError:
status = {}
</DeepExtract>
has_reached_quorum = status.get('state', '') in ['peon', 'leader']
if not has_reached_quorum:
LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
tries -= 1
sleep_seconds = sleeps.pop()
LOG.warning('waiting %s seconds before retrying', sleep_seconds)
time.sleep(sleep_seconds)
else:
mon_in_quorum.add(host)
LOG.info('%s monitor has reached quorum!', mon_name)
break
distro.conn.exit()
if mon_in_quorum == mon_members:
LOG.info('all initial monitors are running and have formed quorum')
LOG.info('Running gatherkeys...')
gatherkeys.gatherkeys(args)
else:
LOG.error('Some monitors have still not reached quorum:')
for host in mon_members - mon_in_quorum:
LOG.error('%s', host)
raise SystemExit('cluster may not be in a healthy state')
|
def mon_create_initial(args):
if _cfg:
cfg = _cfg
else:
cfg = conf.ceph.load(args)
mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
if not mon_initial_members:
if True:
raise exc.NeedHostError('could not find `mon initial members` defined in ceph.conf')
else:
mon_initial_members = re.split('[,\\s]+', mon_initial_members)
mon_initial_members = mon_initial_members
args.mon = mon_initial_members
cfg = conf.ceph.load(args)
if not args.mon:
args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)
if args.keyrings:
monitor_keyring = concatenate_keyrings(args)
else:
keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster)
try:
monitor_keyring = files.read_file(keyring_path)
except IOError:
LOG.warning('keyring (%s) not found, creating a new one' % keyring_path)
new_mon_keyring(args)
monitor_keyring = files.read_file(keyring_path)
LOG.debug('Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon))
errors = 0
for (name, host) in mon_hosts(args.mon):
try:
LOG.debug('detecting platform for host %s ...', name)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(name)
hostname_is_compatible(distro.conn, rlogger, name)
rlogger.debug('deploying mon to %s', name)
distro.mon.create(distro, args, monitor_keyring)
time.sleep(2)
mon_status(distro.conn, rlogger, name, args)
catch_mon_errors(distro.conn, rlogger, name, cfg, args)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to create %d monitors' % errors)
mon_in_quorum = set([])
mon_members = set([host for host in mon_initial_members])
for host in mon_initial_members:
mon_name = 'mon.%s' % host
LOG.info('processing monitor %s', mon_name)
sleeps = [20, 20, 15, 10, 10, 5]
tries = 5
rlogger = logging.getLogger(host)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
while tries:
asok_path = paths.mon.asok(args.cluster, host)
(out, err, code) = remoto.process.check(distro.conn, ['ceph', '--cluster={cluster}'.format(cluster=args.cluster), '--admin-daemon', asok_path, 'mon_status'])
for line in err:
rlogger.error(line)
try:
status = json.loads(''.join(out))
except ValueError:
status = {}
has_reached_quorum = status.get('state', '') in ['peon', 'leader']
if not has_reached_quorum:
LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
tries -= 1
sleep_seconds = sleeps.pop()
LOG.warning('waiting %s seconds before retrying', sleep_seconds)
time.sleep(sleep_seconds)
else:
mon_in_quorum.add(host)
LOG.info('%s monitor has reached quorum!', mon_name)
break
distro.conn.exit()
if mon_in_quorum == mon_members:
LOG.info('all initial monitors are running and have formed quorum')
LOG.info('Running gatherkeys...')
gatherkeys.gatherkeys(args)
else:
LOG.error('Some monitors have still not reached quorum:')
for host in mon_members - mon_in_quorum:
LOG.error('%s', host)
raise SystemExit('cluster may not be in a healthy state')
|
ceph-deploy
|
positive
|
def test_plugins_init(cli_parser, fake_home):
<DeepExtract>
options = parse_args(cli_parser, ['install', 'helm'])
pluginscmd = PluginsCmd(options)
</DeepExtract>
assert pluginscmd.plugin == 'helm'
assert PluginsCmd.name == 'plugins'
|
def test_plugins_init(cli_parser, fake_home):
options = parse_args(cli_parser, ['install', 'helm'])
pluginscmd = PluginsCmd(options)
assert pluginscmd.plugin == 'helm'
assert PluginsCmd.name == 'plugins'
|
appr
|
positive
|
def test_confirm_invalid(self):
<DeepExtract>
bob = User.objects.create_user('bob', 'bob@example.com', 'abc123')
EmailAddress.objects.create(user=bob, email='bob@example.com', verified=True, primary=True)
data = {'email': 'bob@example.com'}
response = self.client.post('/account/password_reset/', data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
(url, path) = self._read_reset_email(mail.outbox[0])
</DeepExtract>
path = path[:-5] + '0' * 4 + path[-1]
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'The password reset link was invalid')
|
def test_confirm_invalid(self):
bob = User.objects.create_user('bob', 'bob@example.com', 'abc123')
EmailAddress.objects.create(user=bob, email='bob@example.com', verified=True, primary=True)
data = {'email': 'bob@example.com'}
response = self.client.post('/account/password_reset/', data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
(url, path) = self._read_reset_email(mail.outbox[0])
path = path[:-5] + '0' * 4 + path[-1]
response = self.client.get(path)
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'The password reset link was invalid')
|
colab
|
positive
|
def on_focus(self, *args):
if self.focus:
Animation.cancel_all(self, '_line_width', '_hint_y', '_hint_lbl_font_size')
if len(self.text) == 0:
self.hint_anim_in.start(self)
if self.error:
Animation(duration=0.2, _current_hint_text_color=self.error_color).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=self.error_color).start(self)
elif self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
else:
pass
elif not self.error:
<DeepExtract>
if self.focus and None is not None or (self.error and None is not None):
self._line_width = self.width
self.anim = Animation(_line_width=self.width, duration=0.2, t='out_quad')
self._msg_lbl.width = self.width
self._hint_lbl.width = self.width
</DeepExtract>
self.anim.start(self)
Animation(duration=0.2, _current_hint_text_color=self.line_color_focus).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
if self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
else:
pass
else:
Animation.cancel_all(self, '_line_width', '_hint_y', '_hint_lbl_font_size')
if len(self.text) == 0:
self.hint_anim_out.start(self)
if not self.error:
self.line_color_focus = self.base_line_color_focus
Animation(duration=0.2, _current_line_color=self.line_color_focus, _current_hint_text_color=self.theme_cls.disabled_hint_text_color).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
elif self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
<DeepExtract>
if self.focus and None is not None or (self.error and None is not None):
self._line_width = 0
self.anim = Animation(_line_width=0, duration=0.2, t='out_quad')
self._msg_lbl.width = self.width
self._hint_lbl.width = self.width
</DeepExtract>
self.anim.start(self)
elif self.error:
Animation(duration=0.2, _current_line_color=self.error_color, _current_hint_text_color=self.error_color).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=self.error_color).start(self)
elif self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
|
def on_focus(self, *args):
if self.focus:
Animation.cancel_all(self, '_line_width', '_hint_y', '_hint_lbl_font_size')
if len(self.text) == 0:
self.hint_anim_in.start(self)
if self.error:
Animation(duration=0.2, _current_hint_text_color=self.error_color).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=self.error_color).start(self)
elif self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
else:
pass
elif not self.error:
if self.focus and None is not None or (self.error and None is not None):
self._line_width = self.width
self.anim = Animation(_line_width=self.width, duration=0.2, t='out_quad')
self._msg_lbl.width = self.width
self._hint_lbl.width = self.width
self.anim.start(self)
Animation(duration=0.2, _current_hint_text_color=self.line_color_focus).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
if self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
else:
pass
else:
Animation.cancel_all(self, '_line_width', '_hint_y', '_hint_lbl_font_size')
if len(self.text) == 0:
self.hint_anim_out.start(self)
if not self.error:
self.line_color_focus = self.base_line_color_focus
Animation(duration=0.2, _current_line_color=self.line_color_focus, _current_hint_text_color=self.theme_cls.disabled_hint_text_color).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
elif self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
if self.focus and None is not None or (self.error and None is not None):
self._line_width = 0
self.anim = Animation(_line_width=0, duration=0.2, t='out_quad')
self._msg_lbl.width = self.width
self._hint_lbl.width = self.width
self.anim.start(self)
elif self.error:
Animation(duration=0.2, _current_line_color=self.error_color, _current_hint_text_color=self.error_color).start(self)
if self.mode == 'on_error':
Animation(duration=0.2, _current_error_color=self.error_color).start(self)
elif self.mode == 'persistent':
Animation(duration=0.2, _current_error_color=self.theme_cls.disabled_hint_text_color).start(self)
elif self.mode == 'on_focus':
Animation(duration=0.2, _current_error_color=(0, 0, 0, 0)).start(self)
|
CoPilot
|
positive
|
def prepare_value(self, value):
<DeepExtract>
if value in self.empty_values:
value = None
value = json.loads(value)
</DeepExtract>
if value is not None:
value = json.dumps(value, indent=4)
return value
|
def prepare_value(self, value):
if value in self.empty_values:
value = None
value = json.loads(value)
if value is not None:
value = json.dumps(value, indent=4)
return value
|
django_restframework_apiview
|
positive
|
def get_fastbin_targets(proc):
memory_map = open('/proc/{}/maps'.format(proc.pid), 'rb').readlines()
libc = ELF('./libc.so.6')
syms = libc.symbols
writable = []
got_libc_base = False
for x in memory_map:
if 'libc.so.6' in x:
l = x.split(' ')
mem_start = int(l[0].split('-')[0], 16)
mem_end = int(l[0].split('-')[1], 16)
if not got_libc_base:
LIBC = mem_start
got_libc_base = True
prot = l[1]
if 'rw' in prot:
writable.append((mem_start, mem_end))
addrs = []
for (s, e) in writable:
size = e - s
data = proc.leak(s, size)
for i in range(size - 8):
if data[i + 1:i + 8] == '\x00' * 7 and data[i] != '\x00':
addr = i + s
fastbin_size = ord(data[i])
<DeepExtract>
names = []
trimmed_size = fastbin_size & ~7
for x in syms:
if addr <= LIBC + syms[x] <= trimmed_size + addr:
names.append(x)
overwritable_syms = names
</DeepExtract>
addrs.append((addr - LIBC, fastbin_size, overwritable_syms))
return addrs
|
def get_fastbin_targets(proc):
memory_map = open('/proc/{}/maps'.format(proc.pid), 'rb').readlines()
libc = ELF('./libc.so.6')
syms = libc.symbols
writable = []
got_libc_base = False
for x in memory_map:
if 'libc.so.6' in x:
l = x.split(' ')
mem_start = int(l[0].split('-')[0], 16)
mem_end = int(l[0].split('-')[1], 16)
if not got_libc_base:
LIBC = mem_start
got_libc_base = True
prot = l[1]
if 'rw' in prot:
writable.append((mem_start, mem_end))
addrs = []
for (s, e) in writable:
size = e - s
data = proc.leak(s, size)
for i in range(size - 8):
if data[i + 1:i + 8] == '\x00' * 7 and data[i] != '\x00':
addr = i + s
fastbin_size = ord(data[i])
names = []
trimmed_size = fastbin_size & ~7
for x in syms:
if addr <= LIBC + syms[x] <= trimmed_size + addr:
names.append(x)
overwritable_syms = names
addrs.append((addr - LIBC, fastbin_size, overwritable_syms))
return addrs
|
CTF-writeups
|
positive
|
def test(this_fetcher):
<DeepExtract>
assert isinstance(this_fetcher.to_dataframe(), pd.core.frame.DataFrame)
assert this_fetcher.N_RECORDS >= 1
assert this_fetcher.N_FILES >= 1
if True:
assert is_list_of_strings(this_fetcher.cachepath)
</DeepExtract>
this_fetcher.clear_cache()
with pytest.raises(CacheFileNotFound):
this_fetcher.cachepath
|
def test(this_fetcher):
assert isinstance(this_fetcher.to_dataframe(), pd.core.frame.DataFrame)
assert this_fetcher.N_RECORDS >= 1
assert this_fetcher.N_FILES >= 1
if True:
assert is_list_of_strings(this_fetcher.cachepath)
this_fetcher.clear_cache()
with pytest.raises(CacheFileNotFound):
this_fetcher.cachepath
|
argopy
|
positive
|
def _tree_widget_sublist(node, root=False, expand=False):
import ipytree
result = ipytree.Node()
<DeepExtract>
if node.get_type() in {'Dataset', 'Array'}:
result.icon = 'table'
elif node.get_type() in {'Group', 'File'}:
result.icon = 'folder'
else:
raise ValueError('Unknown type: %s' % node.get_type())
</DeepExtract>
if root or expand is True or (isinstance(expand, int) and node.depth < expand):
result.opened = True
else:
result.opened = False
result.name = node.get_text()
result.nodes = [_tree_widget_sublist(c, expand=expand) for c in node.get_children()]
result.disabled = True
return result
|
def _tree_widget_sublist(node, root=False, expand=False):
import ipytree
result = ipytree.Node()
if node.get_type() in {'Dataset', 'Array'}:
result.icon = 'table'
elif node.get_type() in {'Group', 'File'}:
result.icon = 'folder'
else:
raise ValueError('Unknown type: %s' % node.get_type())
if root or expand is True or (isinstance(expand, int) and node.depth < expand):
result.opened = True
else:
result.opened = False
result.name = node.get_text()
result.nodes = [_tree_widget_sublist(c, expand=expand) for c in node.get_children()]
result.disabled = True
return result
|
cooler
|
positive
|
@classmethod
def list_from_aws(cls: Type['IAMGroupResourceSpec'], client: BaseClient, account_id: str, region: str) -> ListFromAWSResult:
"""Return a dict of dicts of the format:
{'group_1_arn': {group_1_dict},
'group_2_arn': {group_2_dict},
...}
Where the dicts represent results from list_groups."""
groups = {}
paginator = client.get_paginator('list_groups')
for resp in paginator.paginate():
for group in resp.get('Groups', []):
resource_arn = group['Arn']
group_name = group['GroupName']
try:
group['Users'] = cls.get_group_users(client=client, group_name=group_name)
groups[resource_arn] = group
<DeepExtract>
policies = []
paginator = client.get_paginator('list_attached_group_policies')
for resp in paginator.paginate(GroupName=group_name):
for policy in resp.get('AttachedPolicies', []):
policies.append(policy)
attached_policies = policies
</DeepExtract>
group['PolicyAttachments'] = attached_policies
<DeepExtract>
policies = []
paginator = client.get_paginator('list_group_policies')
for resp in paginator.paginate(GroupName=group_name):
for policy_name in resp.get('PolicyNames', []):
policy = get_embedded_group_policy(client, group_name, policy_name)
policies.append(policy)
embedded_policies = policies
</DeepExtract>
group['EmbeddedPolicy'] = embedded_policies
except ClientError as c_e:
error_code = getattr(c_e, 'response', {}).get('Error', {}).get('Code', {})
if error_code != 'NoSuchEntity':
raise c_e
return ListFromAWSResult(resources=groups)
|
@classmethod
def list_from_aws(cls: Type['IAMGroupResourceSpec'], client: BaseClient, account_id: str, region: str) -> ListFromAWSResult:
"""Return a dict of dicts of the format:
{'group_1_arn': {group_1_dict},
'group_2_arn': {group_2_dict},
...}
Where the dicts represent results from list_groups."""
groups = {}
paginator = client.get_paginator('list_groups')
for resp in paginator.paginate():
for group in resp.get('Groups', []):
resource_arn = group['Arn']
group_name = group['GroupName']
try:
group['Users'] = cls.get_group_users(client=client, group_name=group_name)
groups[resource_arn] = group
policies = []
paginator = client.get_paginator('list_attached_group_policies')
for resp in paginator.paginate(GroupName=group_name):
for policy in resp.get('AttachedPolicies', []):
policies.append(policy)
attached_policies = policies
group['PolicyAttachments'] = attached_policies
policies = []
paginator = client.get_paginator('list_group_policies')
for resp in paginator.paginate(GroupName=group_name):
for policy_name in resp.get('PolicyNames', []):
policy = get_embedded_group_policy(client, group_name, policy_name)
policies.append(policy)
embedded_policies = policies
group['EmbeddedPolicy'] = embedded_policies
except ClientError as c_e:
error_code = getattr(c_e, 'response', {}).get('Error', {}).get('Code', {})
if error_code != 'NoSuchEntity':
raise c_e
return ListFromAWSResult(resources=groups)
|
altimeter
|
positive
|
def validate_against_cached_collection_results(queryRanges):
<DeepExtract>
overlapping_partition_key_ranges = self.smart_routing_map_provider.get_overlapping_ranges('sample collection id', queryRanges)
</DeepExtract>
self.assertEqual(overlapping_partition_key_ranges, self.cached_collection_routing_map.get_overlapping_ranges(queryRanges))
|
def validate_against_cached_collection_results(queryRanges):
overlapping_partition_key_ranges = self.smart_routing_map_provider.get_overlapping_ranges('sample collection id', queryRanges)
self.assertEqual(overlapping_partition_key_ranges, self.cached_collection_routing_map.get_overlapping_ranges(queryRanges))
|
azure-cosmos-python
|
positive
|
def binary_search_max_batch(network, alg, low, high):
ret = 0
(low, high) = (round_up(low), round_down(high))
while low <= high:
<DeepExtract>
mid = int(low + (high - low) // 2 // 4 * 4)
</DeepExtract>
success = run_benchmark(network, alg, mid, debug_speed=True) == 0
if success:
ret = mid
<DeepExtract>
low = int((mid + 1 + 3) // 4 * 4)
</DeepExtract>
else:
<DeepExtract>
high = int(mid - 1 // 4 * 4)
</DeepExtract>
return ret
|
def binary_search_max_batch(network, alg, low, high):
ret = 0
(low, high) = (round_up(low), round_down(high))
while low <= high:
mid = int(low + (high - low) // 2 // 4 * 4)
success = run_benchmark(network, alg, mid, debug_speed=True) == 0
if success:
ret = mid
low = int((mid + 1 + 3) // 4 * 4)
else:
high = int(mid - 1 // 4 * 4)
return ret
|
actnn
|
positive
|
@_make_expr_internal.register(instrs.MAKE_FUNCTION)
@_make_expr_internal.register(instrs.MAKE_CLOSURE)
def _make_lambda(toplevel, stack_builders):
load_name = stack_builders.pop()
load_code = stack_builders.pop()
<DeepExtract>
if not isinstance(load_code, instrs.LOAD_CONST):
raise TypeError("make_function expected 'load_code_instr` to be a LOAD_CONST, but got %s" % load_code)
if not isinstance(load_code.arg, types.CodeType):
raise TypeError('make_function expected load_code_instr to load a code object, but got %s' % load_code.arg)
if not isinstance(load_name, instrs.LOAD_CONST):
raise TypeError("make_function expected 'load_name_instr` to be a LOAD_CONST, but got %s" % load_code)
if not isinstance(load_name.arg, str):
raise TypeError('make_function expected load_name_instr to load a string, but got %r instead' % load_name.arg)
is_lambda = is_lambda_name(load_name.arg)
if True and (not is_lambda):
raise ValueError('Expected to make a function named <lambda>, but got %r instead.' % load_name.arg)
if not True and is_lambda:
raise ValueError('Unexpectedly received lambda function.')
if not isinstance(toplevel, (instrs.MAKE_FUNCTION, instrs.MAKE_CLOSURE)):
raise TypeError('make_function expected a MAKE_FUNCTION or MAKE_CLOSUREinstruction, but got %s instead.' % toplevel)
</DeepExtract>
co = load_code.arg
(args, kwonly, varargs, varkwargs) = paramnames(co)
<DeepExtract>
(n_defaults, n_kwonlydefaults, n_annotations) = unpack_make_function_arg(toplevel.arg)
if n_annotations:
load_annotation_names = stack_builders.pop()
annotations = dict(zip(reversed(load_annotation_names.arg), (make_expr(stack_builders) for _ in range(n_annotations - 1))))
else:
annotations = {}
kwonlys = {}
while n_kwonlydefaults:
default_expr = make_expr(stack_builders)
key_instr = stack_builders.pop()
if not isinstance(key_instr, instrs.LOAD_CONST):
raise DecompilationError('kwonlydefault key is not a LOAD_CONST: %s' % key_instr)
if not isinstance(key_instr.arg, str):
raise DecompilationError("kwonlydefault key builder is not a 'LOAD_CONST of a string: %s" % key_instr)
kwonlys[key_instr.arg] = default_expr
n_kwonlydefaults -= 1
defaults = make_exprs(stack_builders, n_defaults)
(defaults, kw_defaults, annotations) = (defaults, kwonlys, annotations)
</DeepExtract>
if annotations:
raise DecompilationError('Unexpected annotations while building lambda: %s' % annotations)
if isinstance(toplevel, instrs.MAKE_CLOSURE):
<DeepExtract>
cells = make_expr(stack_builders)
if not isinstance(cells, ast.Tuple):
raise DecompilationError('Expected an ast.Tuple of closure cells, but got %s' % cells)
_closure_cells = cells
</DeepExtract>
<DeepExtract>
code = Code.from_pycode(co)
for (a, b) in sliding_window(2, code.instrs):
a._next_target_of = b._target_of
b._next_target_of = set()
try:
body = instrs_to_body(deque(code.instrs), DecompilationContext(in_lambda=True))
if DecompilationContext(in_lambda=True).in_function_block:
body = make_global_and_nonlocal_decls(code.instrs) + body
body = body
finally:
for i in code.instrs:
del i._next_target_of
</DeepExtract>
if len(body) != 1:
raise DecompilationError('Got multiple expresssions for lambda: %s' % body)
body = body[0]
return ast.Lambda(args=make_function_arguments(args, kwonly, varargs, varkwargs, defaults, kw_defaults, annotations), body=body)
|
@_make_expr_internal.register(instrs.MAKE_FUNCTION)
@_make_expr_internal.register(instrs.MAKE_CLOSURE)
def _make_lambda(toplevel, stack_builders):
load_name = stack_builders.pop()
load_code = stack_builders.pop()
if not isinstance(load_code, instrs.LOAD_CONST):
raise TypeError("make_function expected 'load_code_instr` to be a LOAD_CONST, but got %s" % load_code)
if not isinstance(load_code.arg, types.CodeType):
raise TypeError('make_function expected load_code_instr to load a code object, but got %s' % load_code.arg)
if not isinstance(load_name, instrs.LOAD_CONST):
raise TypeError("make_function expected 'load_name_instr` to be a LOAD_CONST, but got %s" % load_code)
if not isinstance(load_name.arg, str):
raise TypeError('make_function expected load_name_instr to load a string, but got %r instead' % load_name.arg)
is_lambda = is_lambda_name(load_name.arg)
if True and (not is_lambda):
raise ValueError('Expected to make a function named <lambda>, but got %r instead.' % load_name.arg)
if not True and is_lambda:
raise ValueError('Unexpectedly received lambda function.')
if not isinstance(toplevel, (instrs.MAKE_FUNCTION, instrs.MAKE_CLOSURE)):
raise TypeError('make_function expected a MAKE_FUNCTION or MAKE_CLOSUREinstruction, but got %s instead.' % toplevel)
co = load_code.arg
(args, kwonly, varargs, varkwargs) = paramnames(co)
(n_defaults, n_kwonlydefaults, n_annotations) = unpack_make_function_arg(toplevel.arg)
if n_annotations:
load_annotation_names = stack_builders.pop()
annotations = dict(zip(reversed(load_annotation_names.arg), (make_expr(stack_builders) for _ in range(n_annotations - 1))))
else:
annotations = {}
kwonlys = {}
while n_kwonlydefaults:
default_expr = make_expr(stack_builders)
key_instr = stack_builders.pop()
if not isinstance(key_instr, instrs.LOAD_CONST):
raise DecompilationError('kwonlydefault key is not a LOAD_CONST: %s' % key_instr)
if not isinstance(key_instr.arg, str):
raise DecompilationError("kwonlydefault key builder is not a 'LOAD_CONST of a string: %s" % key_instr)
kwonlys[key_instr.arg] = default_expr
n_kwonlydefaults -= 1
defaults = make_exprs(stack_builders, n_defaults)
(defaults, kw_defaults, annotations) = (defaults, kwonlys, annotations)
if annotations:
raise DecompilationError('Unexpected annotations while building lambda: %s' % annotations)
if isinstance(toplevel, instrs.MAKE_CLOSURE):
cells = make_expr(stack_builders)
if not isinstance(cells, ast.Tuple):
raise DecompilationError('Expected an ast.Tuple of closure cells, but got %s' % cells)
_closure_cells = cells
code = Code.from_pycode(co)
for (a, b) in sliding_window(2, code.instrs):
a._next_target_of = b._target_of
b._next_target_of = set()
try:
body = instrs_to_body(deque(code.instrs), DecompilationContext(in_lambda=True))
if DecompilationContext(in_lambda=True).in_function_block:
body = make_global_and_nonlocal_decls(code.instrs) + body
body = body
finally:
for i in code.instrs:
del i._next_target_of
if len(body) != 1:
raise DecompilationError('Got multiple expresssions for lambda: %s' % body)
body = body[0]
return ast.Lambda(args=make_function_arguments(args, kwonly, varargs, varkwargs, defaults, kw_defaults, annotations), body=body)
|
codetransformer
|
positive
|
def updateInfo(self, group):
"""
plots info for the currently selected group
"""
t1 = time.time()
<DeepExtract>
objects = [self.meanAx.lines, self.isiAx.patches, self.overTimeAx.lines, self.meanDensAx.images, self.cumSpikeAx.lines, self.meanDensLogAx.images, self.maxDistrAx.patches, self.maxDistrAx.lines]
map(delfunc, objects)
</DeepExtract>
self.additionalIsiPatches = []
data = group.meandata
<DeepExtract>
while 1:
l = len(self.meanAx.lines)
if not l:
break
self.meanAx.lines[0].remove()
if len(self.meanAx.lines) == l:
del self.meanAx.lines[0]
</DeepExtract>
if len(data):
x = range(data[0].shape[0])
ylim_mean = 1.5 * np.max(np.abs(data))
self.meanAx.set_ylim(-ylim_mean, ylim_mean)
for row in data:
line = mpl.Line2D(x, row)
self.meanAx.add_line(line)
data = group.isidata
if len(data) > 1:
nBins = options['isi_n_bins']
self.isiAx.cla()
(n, _, _) = self.isiAx.hist(data, nBins, color=options['histcolor'], histtype=options['histtype'])
self.isiAx.set_ylim((0, np.max(n) + 5))
too_short = (data <= options['isi_too_short_ms']).sum() / group.times.shape[0]
titlestr = '{:.1%} < {} ms'.format(too_short, options['isi_too_short_ms'])
else:
self.isiAx.cla()
titlestr = ''
self.isiAx.set_title(titlestr)
if self.sign == 'pos':
data = [c.spikes.max(1) for c in group.clusters]
elif self.sign == 'neg':
data = [c.spikes.min(1) for c in group.clusters]
times = [(c.times - self.startTime) / 60000.0 for c in group.clusters]
if len(times):
self.overTimeAx.cla()
for (x, y) in zip(times, data):
self.overTimeAx.plot(x, y, 'b.', markersize=options['smallmarker'])
tdata = np.hstack(times)
tdata.sort()
for il in self.cumSpikeAx.lines:
il.remove()
self.cumSpikeAx.plot(tdata, np.arange(len(tdata)), 'b')
self.cumSpikeAx.set_xlim(0, tdata.max())
self.cumSpikeAx.set_ylim(0, len(tdata))
tstr = '{} spikes'.format(len(tdata))
mdata = np.hstack(data)
if self.sign == 'pos':
self.overTimeAx.set_ylim((0, mdata.max() * 1.1))
else:
self.overTimeAx.set_ylim((mdata.min() * 1.1, 0))
self.maxDistrAx.cla()
(ns, _, _) = self.maxDistrAx.hist(mdata, 100, color=options['histcolor'], histtype=options['histtype'])
self.maxDistrAx.set_xlim(min(0, mdata.min()), max(0, mdata.max()))
self.maxDistrAx.set_ylim((0, max(ns) * 1.15))
else:
tstr = ''
self.overTimeAx.cla()
self.cumSpikeAx.set_title(tstr)
if self.thresholds is not None:
thr_times = self.thresholds[:, :2].ravel() - self.thresholds[0, 0]
thr_times /= 60000.0
tthr = (self.thresholds[:, 2], self.thresholds[:, 2])
thrs = np.vstack(tthr).T.ravel()
if self.sign == 'neg':
thrs *= -1
self.overTimeAx.plot(thr_times, thrs, 'm', lw=2)
self.overTimeAx.set_xlim((thr_times[0], thr_times[-1]))
if len(thrs) > 1:
self.maxDistrAx.axvline(np.median(thrs), color='m', lw=2)
self.maxDistrAx.axvline(thrs.min(), color='m')
self.maxDistrAx.axvline(thrs.max(), color='m')
else:
self.maxDistrAx.axvline(thrs[0], color='m')
data = group.densitydata
if len(data):
self.meanDensAx.imshow(data, cmap=options['cmap'], aspect='auto', origin='lower')
self.meanDensLogAx.imshow(np.log(1 + data), cmap=options['cmap'], aspect='auto', origin='lower')
self.draw()
t2 = time.time()
print('Update time: {:.0f} ms'.format((t2 - t1) * 1000))
|
def updateInfo(self, group):
"""
plots info for the currently selected group
"""
t1 = time.time()
objects = [self.meanAx.lines, self.isiAx.patches, self.overTimeAx.lines, self.meanDensAx.images, self.cumSpikeAx.lines, self.meanDensLogAx.images, self.maxDistrAx.patches, self.maxDistrAx.lines]
map(delfunc, objects)
self.additionalIsiPatches = []
data = group.meandata
while 1:
l = len(self.meanAx.lines)
if not l:
break
self.meanAx.lines[0].remove()
if len(self.meanAx.lines) == l:
del self.meanAx.lines[0]
if len(data):
x = range(data[0].shape[0])
ylim_mean = 1.5 * np.max(np.abs(data))
self.meanAx.set_ylim(-ylim_mean, ylim_mean)
for row in data:
line = mpl.Line2D(x, row)
self.meanAx.add_line(line)
data = group.isidata
if len(data) > 1:
nBins = options['isi_n_bins']
self.isiAx.cla()
(n, _, _) = self.isiAx.hist(data, nBins, color=options['histcolor'], histtype=options['histtype'])
self.isiAx.set_ylim((0, np.max(n) + 5))
too_short = (data <= options['isi_too_short_ms']).sum() / group.times.shape[0]
titlestr = '{:.1%} < {} ms'.format(too_short, options['isi_too_short_ms'])
else:
self.isiAx.cla()
titlestr = ''
self.isiAx.set_title(titlestr)
if self.sign == 'pos':
data = [c.spikes.max(1) for c in group.clusters]
elif self.sign == 'neg':
data = [c.spikes.min(1) for c in group.clusters]
times = [(c.times - self.startTime) / 60000.0 for c in group.clusters]
if len(times):
self.overTimeAx.cla()
for (x, y) in zip(times, data):
self.overTimeAx.plot(x, y, 'b.', markersize=options['smallmarker'])
tdata = np.hstack(times)
tdata.sort()
for il in self.cumSpikeAx.lines:
il.remove()
self.cumSpikeAx.plot(tdata, np.arange(len(tdata)), 'b')
self.cumSpikeAx.set_xlim(0, tdata.max())
self.cumSpikeAx.set_ylim(0, len(tdata))
tstr = '{} spikes'.format(len(tdata))
mdata = np.hstack(data)
if self.sign == 'pos':
self.overTimeAx.set_ylim((0, mdata.max() * 1.1))
else:
self.overTimeAx.set_ylim((mdata.min() * 1.1, 0))
self.maxDistrAx.cla()
(ns, _, _) = self.maxDistrAx.hist(mdata, 100, color=options['histcolor'], histtype=options['histtype'])
self.maxDistrAx.set_xlim(min(0, mdata.min()), max(0, mdata.max()))
self.maxDistrAx.set_ylim((0, max(ns) * 1.15))
else:
tstr = ''
self.overTimeAx.cla()
self.cumSpikeAx.set_title(tstr)
if self.thresholds is not None:
thr_times = self.thresholds[:, :2].ravel() - self.thresholds[0, 0]
thr_times /= 60000.0
tthr = (self.thresholds[:, 2], self.thresholds[:, 2])
thrs = np.vstack(tthr).T.ravel()
if self.sign == 'neg':
thrs *= -1
self.overTimeAx.plot(thr_times, thrs, 'm', lw=2)
self.overTimeAx.set_xlim((thr_times[0], thr_times[-1]))
if len(thrs) > 1:
self.maxDistrAx.axvline(np.median(thrs), color='m', lw=2)
self.maxDistrAx.axvline(thrs.min(), color='m')
self.maxDistrAx.axvline(thrs.max(), color='m')
else:
self.maxDistrAx.axvline(thrs[0], color='m')
data = group.densitydata
if len(data):
self.meanDensAx.imshow(data, cmap=options['cmap'], aspect='auto', origin='lower')
self.meanDensLogAx.imshow(np.log(1 + data), cmap=options['cmap'], aspect='auto', origin='lower')
self.draw()
t2 = time.time()
print('Update time: {:.0f} ms'.format((t2 - t1) * 1000))
|
combinato
|
positive
|
def notify_popo(users, message):
<DeepExtract>
print('>>> ' + 'Notify popo users')
</DeepExtract>
print('Skip, todo')
for user in users:
pass
|
def notify_popo(users, message):
print('>>> ' + 'Notify popo users')
print('Skip, todo')
for user in users:
pass
|
ATX
|
positive
|
def __setitem__(self, key, value):
"""define the square bracket operator to refer to the object's __dict__
for setting values."""
if '.' in key:
<DeepExtract>
key_split = key.split('.')
cur_dict = self
for k in key_split[:-1]:
try:
cur_dict = cur_dict[k]
except KeyError:
cur_dict[k] = self.__class__()
cur_dict = cur_dict[k]
cur_dict[key_split[-1]] = value
</DeepExtract>
else:
setattr(self, key, value)
|
def __setitem__(self, key, value):
"""define the square bracket operator to refer to the object's __dict__
for setting values."""
if '.' in key:
key_split = key.split('.')
cur_dict = self
for k in key_split[:-1]:
try:
cur_dict = cur_dict[k]
except KeyError:
cur_dict[k] = self.__class__()
cur_dict = cur_dict[k]
cur_dict[key_split[-1]] = value
else:
setattr(self, key, value)
|
configman
|
positive
|
def distribute_and_over_or(s):
"""Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
>>> distribute_and_over_or((A & B) | C)
((A | C) & (B | C))
"""
s = expr(s)
if s.op == '|':
<DeepExtract>
s.args = dissociate('|', s.args)
if len(s.args) == 0:
s = _op_identity['|']
elif len(s.args) == 1:
s = s.args[0]
else:
s = Expr('|', *s.args)
</DeepExtract>
if s.op != '|':
return distribute_and_over_or(s)
if len(s.args) == 0:
return False
if len(s.args) == 1:
return distribute_and_over_or(s.args[0])
conj = first((arg for arg in s.args if arg.op == '&'))
if not conj:
return s
others = [a for a in s.args if a is not conj]
<DeepExtract>
others = dissociate('|', others)
if len(others) == 0:
rest = _op_identity['|']
elif len(others) == 1:
rest = others[0]
else:
rest = Expr('|', *others)
</DeepExtract>
return associate('&', [distribute_and_over_or(c | rest) for c in conj.args])
elif s.op == '&':
return associate('&', list(map(distribute_and_over_or, s.args)))
else:
return s
|
def distribute_and_over_or(s):
"""Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
>>> distribute_and_over_or((A & B) | C)
((A | C) & (B | C))
"""
s = expr(s)
if s.op == '|':
s.args = dissociate('|', s.args)
if len(s.args) == 0:
s = _op_identity['|']
elif len(s.args) == 1:
s = s.args[0]
else:
s = Expr('|', *s.args)
if s.op != '|':
return distribute_and_over_or(s)
if len(s.args) == 0:
return False
if len(s.args) == 1:
return distribute_and_over_or(s.args[0])
conj = first((arg for arg in s.args if arg.op == '&'))
if not conj:
return s
others = [a for a in s.args if a is not conj]
others = dissociate('|', others)
if len(others) == 0:
rest = _op_identity['|']
elif len(others) == 1:
rest = others[0]
else:
rest = Expr('|', *others)
return associate('&', [distribute_and_over_or(c | rest) for c in conj.args])
elif s.op == '&':
return associate('&', list(map(distribute_and_over_or, s.args)))
else:
return s
|
artificial-intelligence
|
positive
|
def batch_gen_train(self, batch_triples):
""" generate data for GFAW candidates
:param batch_size: batch_size of this batch
:param current_node: eg. 'C0086418', dtype: str
:return: batches of data
"""
def function(row):
return row[0] + '#' + row[1]
etypairs = np.apply_along_axis(function, 1, batch_triples)
rlts = np.array([self.rel2id[rel] for rel in batch_triples[:, 2]])
orders = []
for i in etypairs:
if i in self.entpair2scope:
orders.append(self.entpair2scope[i])
else:
print('Attention, ', i, ' is not in entpair2scope!')
<DeepExtract>
batch_data = {}
_word = []
_pos1 = []
_pos2 = []
_mask = []
_rel = []
_ins_rel = []
_multi_rel = []
_entpair = []
_length = []
_scope = []
cur_pos = 0
for (index, o) in enumerate(orders):
bag_size = o[1] - o[0]
_word.append(self.data_word[o[0]:o[1]])
_pos1.append(self.data_pos1[o[0]:o[1]])
_pos2.append(self.data_pos2[o[0]:o[1]])
_mask.append(self.data_mask[o[0]:o[1]])
_rel.append(rlts[index])
_ins_rel.append(np.repeat(rlts[index], bag_size))
_length.append(self.data_length[o[0]:o[1]])
_scope.append([cur_pos, cur_pos + bag_size])
cur_pos = cur_pos + bag_size
if self.mode == self.MODE_ENTPAIR_BAG:
_one_multi_rel = np.zeros(self.rel_tot, dtype=np.int32)
for j in range(o[0], o[1]):
_one_multi_rel[self.data_rel[j]] = 1
_multi_rel.append(_one_multi_rel)
_entpair.append(self.scope2entpair[tuple(o)])
for i in range(self.batch_size - len(orders)):
_word.append(np.zeros((1, self.data_word.shape[-1]), dtype=np.int32))
_pos1.append(np.zeros((1, self.data_pos1.shape[-1]), dtype=np.int32))
_pos2.append(np.zeros((1, self.data_pos2.shape[-1]), dtype=np.int32))
_mask.append(np.zeros((1, self.data_mask.shape[-1]), dtype=np.int32))
_rel.append(0)
_ins_rel.append(np.zeros(1, dtype=np.int32))
_length.append(np.zeros(1, dtype=np.int32))
_scope.append([cur_pos, cur_pos + 1])
cur_pos += 1
if self.mode == self.MODE_ENTPAIR_BAG:
_multi_rel.append(np.zeros(self.rel_tot, dtype=np.int32))
_entpair.append('None#None')
batch_data['word'] = np.concatenate(_word)
batch_data['pos1'] = np.concatenate(_pos1)
batch_data['pos2'] = np.concatenate(_pos2)
batch_data['mask'] = np.concatenate(_mask)
batch_data['rel'] = np.stack(_rel)
batch_data['ins_rel'] = np.concatenate(_ins_rel)
if self.mode == self.MODE_ENTPAIR_BAG:
batch_data['multi_rel'] = np.stack(_multi_rel)
batch_data['entpair'] = _entpair
batch_data['length'] = np.concatenate(_length)
batch_data['scope'] = np.stack(_scope)
batch = batch_data
</DeepExtract>
return batch
|
def batch_gen_train(self, batch_triples):
""" generate data for GFAW candidates
:param batch_size: batch_size of this batch
:param current_node: eg. 'C0086418', dtype: str
:return: batches of data
"""
def function(row):
return row[0] + '#' + row[1]
etypairs = np.apply_along_axis(function, 1, batch_triples)
rlts = np.array([self.rel2id[rel] for rel in batch_triples[:, 2]])
orders = []
for i in etypairs:
if i in self.entpair2scope:
orders.append(self.entpair2scope[i])
else:
print('Attention, ', i, ' is not in entpair2scope!')
batch_data = {}
_word = []
_pos1 = []
_pos2 = []
_mask = []
_rel = []
_ins_rel = []
_multi_rel = []
_entpair = []
_length = []
_scope = []
cur_pos = 0
for (index, o) in enumerate(orders):
bag_size = o[1] - o[0]
_word.append(self.data_word[o[0]:o[1]])
_pos1.append(self.data_pos1[o[0]:o[1]])
_pos2.append(self.data_pos2[o[0]:o[1]])
_mask.append(self.data_mask[o[0]:o[1]])
_rel.append(rlts[index])
_ins_rel.append(np.repeat(rlts[index], bag_size))
_length.append(self.data_length[o[0]:o[1]])
_scope.append([cur_pos, cur_pos + bag_size])
cur_pos = cur_pos + bag_size
if self.mode == self.MODE_ENTPAIR_BAG:
_one_multi_rel = np.zeros(self.rel_tot, dtype=np.int32)
for j in range(o[0], o[1]):
_one_multi_rel[self.data_rel[j]] = 1
_multi_rel.append(_one_multi_rel)
_entpair.append(self.scope2entpair[tuple(o)])
for i in range(self.batch_size - len(orders)):
_word.append(np.zeros((1, self.data_word.shape[-1]), dtype=np.int32))
_pos1.append(np.zeros((1, self.data_pos1.shape[-1]), dtype=np.int32))
_pos2.append(np.zeros((1, self.data_pos2.shape[-1]), dtype=np.int32))
_mask.append(np.zeros((1, self.data_mask.shape[-1]), dtype=np.int32))
_rel.append(0)
_ins_rel.append(np.zeros(1, dtype=np.int32))
_length.append(np.zeros(1, dtype=np.int32))
_scope.append([cur_pos, cur_pos + 1])
cur_pos += 1
if self.mode == self.MODE_ENTPAIR_BAG:
_multi_rel.append(np.zeros(self.rel_tot, dtype=np.int32))
_entpair.append('None#None')
batch_data['word'] = np.concatenate(_word)
batch_data['pos1'] = np.concatenate(_pos1)
batch_data['pos2'] = np.concatenate(_pos2)
batch_data['mask'] = np.concatenate(_mask)
batch_data['rel'] = np.stack(_rel)
batch_data['ins_rel'] = np.concatenate(_ins_rel)
if self.mode == self.MODE_ENTPAIR_BAG:
batch_data['multi_rel'] = np.stack(_multi_rel)
batch_data['entpair'] = _entpair
batch_data['length'] = np.concatenate(_length)
batch_data['scope'] = np.stack(_scope)
batch = batch_data
return batch
|
CPL
|
positive
|
@patch('chaosaws.emr.actions.aws_client', autospec=True)
def test_modify_cluster_invalid_cluster(self, aws_client):
cluster_id = 'j-123456789AAZ'
<DeepExtract>
mocked_response = ClientError(operation_name=kwargs['op'], error_response={'Error': {'Code': kwargs['Code'], 'Message': kwargs['Message']}})
</DeepExtract>
client = MagicMock()
aws_client.return_value = client
client.modify_cluster.side_effect = mocked_response
with pytest.raises(FailedActivity) as e:
modify_cluster(cluster_id=self.cluster_id, concurrency=10)
assert "Cluster id '%s' is not valid" % cluster_id in str(e.value)
|
@patch('chaosaws.emr.actions.aws_client', autospec=True)
def test_modify_cluster_invalid_cluster(self, aws_client):
cluster_id = 'j-123456789AAZ'
mocked_response = ClientError(operation_name=kwargs['op'], error_response={'Error': {'Code': kwargs['Code'], 'Message': kwargs['Message']}})
client = MagicMock()
aws_client.return_value = client
client.modify_cluster.side_effect = mocked_response
with pytest.raises(FailedActivity) as e:
modify_cluster(cluster_id=self.cluster_id, concurrency=10)
assert "Cluster id '%s' is not valid" % cluster_id in str(e.value)
|
chaostoolkit-aws
|
positive
|
def render(self, data, parser_name, negate=False):
"""render"""
if negate:
tmplt = self.get_parser(parser_name).get('remval') or self.get_parser(parser_name)['setval']
else:
tmplt = self.get_parser(parser_name)['setval']
<DeepExtract>
try:
if callable(tmplt):
res = tmplt(data)
else:
res = self._template(value=tmplt, variables=data, fail_on_undefined=False)
except KeyError:
command = None
if res:
if negate:
rem = '{0} '.format(self._prefix.get('remove', 'no'))
if isinstance(res, list):
cmd = [rem + each for each in res]
command = cmd
command = rem + res
elif self._prefix.get('set'):
set_cmd = '{0} '.format(self._prefix.get('set', ''))
if isinstance(res, list):
cmd = [set_cmd + each for each in res]
command = cmd
command = set_cmd + res
command = res
</DeepExtract>
return command
|
def render(self, data, parser_name, negate=False):
"""render"""
if negate:
tmplt = self.get_parser(parser_name).get('remval') or self.get_parser(parser_name)['setval']
else:
tmplt = self.get_parser(parser_name)['setval']
try:
if callable(tmplt):
res = tmplt(data)
else:
res = self._template(value=tmplt, variables=data, fail_on_undefined=False)
except KeyError:
command = None
if res:
if negate:
rem = '{0} '.format(self._prefix.get('remove', 'no'))
if isinstance(res, list):
cmd = [rem + each for each in res]
command = cmd
command = rem + res
elif self._prefix.get('set'):
set_cmd = '{0} '.format(self._prefix.get('set', ''))
if isinstance(res, list):
cmd = [set_cmd + each for each in res]
command = cmd
command = set_cmd + res
command = res
return command
|
ansible.netcommon
|
positive
|
def always_branch(self, data, addr):
<DeepExtract>
ins = instructions.parse_instruction(AVR.chip, addr, data)
</DeepExtract>
dst = ins._operands[0]
v = (dst.immediate_value - 2) / 2
v = v & 4095 | 49152
return struct.pack('<H', v)
|
def always_branch(self, data, addr):
ins = instructions.parse_instruction(AVR.chip, addr, data)
dst = ins._operands[0]
v = (dst.immediate_value - 2) / 2
v = v & 4095 | 49152
return struct.pack('<H', v)
|
binaryninja_avr
|
positive
|
@swagger_auto_schema(method='post', responses=with_common_response({status.HTTP_201_CREATED: ChainCodeIDSerializer}))
@action(detail=False, methods=['post'])
def approve_for_my_org(self, request):
serializer = ChainCodeApproveForMyOrgBody(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
channel_name = serializer.validated_data.get('channel_name')
chaincode_name = serializer.validated_data.get('chaincode_name')
chaincode_version = serializer.validated_data.get('chaincode_version')
policy = serializer.validated_data.get('policy')
orderer_url = serializer.validated_data.get('orderer_url')
sequence = serializer.validated_data.get('sequence')
org = request.user.organization
qs = Node.objects.filter(type='orderer', organization=org)
if not qs.exists():
raise ResourceNotFound
orderer_node = qs.first()
orderer_tls_dir = '{}/{}/crypto-config/ordererOrganizations/{}/orderers/{}/msp/tlscacerts'.format(CELLO_HOME, org.name, org.name.split('.', 1)[1], orderer_node.name + '.' + org.name.split('.', 1)[1])
orderer_tls_root_cert = ''
for (_, _, files) in os.walk(orderer_tls_dir):
orderer_tls_root_cert = orderer_tls_dir + '/' + files[0]
break
qs = Node.objects.filter(type='peer', organization=org)
if not qs.exists():
raise ResourceNotFound
peer_node = qs.first()
<DeepExtract>
org_name = org.name
org_domain = org_name.split('.', 1)[1]
dir_certificate = '{}/{}/crypto-config/ordererOrganizations/{}'.format(CELLO_HOME, org_name, org_domain)
dir_node = '{}/{}/crypto-config/peerOrganizations'.format(CELLO_HOME, org_name)
envs = {'CORE_PEER_TLS_ENABLED': 'true', 'CORE_PEER_LOCALMSPID': '{}MSP'.format(org_name.capitalize()), 'CORE_PEER_TLS_ROOTCERT_FILE': '{}/{}/peers/{}/tls/ca.crt'.format(dir_node, org_name, peer_node.name + '.' + org_name), 'CORE_PEER_ADDRESS': '{}:{}'.format(peer_node.name + '.' + org_name, str(7051)), 'CORE_PEER_MSPCONFIGPATH': '{}/{}/users/Admin@{}/msp'.format(dir_node, org_name, org_name), 'FABRIC_CFG_PATH': '{}/{}/peers/{}/'.format(dir_node, org_name, peer_node.name + '.' + org_name), 'ORDERER_CA': '{}/msp/tlscacerts/tlsca.{}-cert.pem'.format(dir_certificate, org_domain)}
envs = envs
</DeepExtract>
peer_channel_cli = PeerChainCode('v2.2.0', **envs)
(code, content) = peer_channel_cli.lifecycle_approve_for_my_org(orderer_url, orderer_tls_root_cert, channel_name, chaincode_name, chaincode_version, policy, sequence)
if code != 0:
return Response(err(' lifecycle_approve_for_my_org failed. err: ' + content), status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST)
return Response(ok('success'), status=status.HTTP_200_OK)
|
@swagger_auto_schema(method='post', responses=with_common_response({status.HTTP_201_CREATED: ChainCodeIDSerializer}))
@action(detail=False, methods=['post'])
def approve_for_my_org(self, request):
serializer = ChainCodeApproveForMyOrgBody(data=request.data)
if serializer.is_valid(raise_exception=True):
try:
channel_name = serializer.validated_data.get('channel_name')
chaincode_name = serializer.validated_data.get('chaincode_name')
chaincode_version = serializer.validated_data.get('chaincode_version')
policy = serializer.validated_data.get('policy')
orderer_url = serializer.validated_data.get('orderer_url')
sequence = serializer.validated_data.get('sequence')
org = request.user.organization
qs = Node.objects.filter(type='orderer', organization=org)
if not qs.exists():
raise ResourceNotFound
orderer_node = qs.first()
orderer_tls_dir = '{}/{}/crypto-config/ordererOrganizations/{}/orderers/{}/msp/tlscacerts'.format(CELLO_HOME, org.name, org.name.split('.', 1)[1], orderer_node.name + '.' + org.name.split('.', 1)[1])
orderer_tls_root_cert = ''
for (_, _, files) in os.walk(orderer_tls_dir):
orderer_tls_root_cert = orderer_tls_dir + '/' + files[0]
break
qs = Node.objects.filter(type='peer', organization=org)
if not qs.exists():
raise ResourceNotFound
peer_node = qs.first()
org_name = org.name
org_domain = org_name.split('.', 1)[1]
dir_certificate = '{}/{}/crypto-config/ordererOrganizations/{}'.format(CELLO_HOME, org_name, org_domain)
dir_node = '{}/{}/crypto-config/peerOrganizations'.format(CELLO_HOME, org_name)
envs = {'CORE_PEER_TLS_ENABLED': 'true', 'CORE_PEER_LOCALMSPID': '{}MSP'.format(org_name.capitalize()), 'CORE_PEER_TLS_ROOTCERT_FILE': '{}/{}/peers/{}/tls/ca.crt'.format(dir_node, org_name, peer_node.name + '.' + org_name), 'CORE_PEER_ADDRESS': '{}:{}'.format(peer_node.name + '.' + org_name, str(7051)), 'CORE_PEER_MSPCONFIGPATH': '{}/{}/users/Admin@{}/msp'.format(dir_node, org_name, org_name), 'FABRIC_CFG_PATH': '{}/{}/peers/{}/'.format(dir_node, org_name, peer_node.name + '.' + org_name), 'ORDERER_CA': '{}/msp/tlscacerts/tlsca.{}-cert.pem'.format(dir_certificate, org_domain)}
envs = envs
peer_channel_cli = PeerChainCode('v2.2.0', **envs)
(code, content) = peer_channel_cli.lifecycle_approve_for_my_org(orderer_url, orderer_tls_root_cert, channel_name, chaincode_name, chaincode_version, policy, sequence)
if code != 0:
return Response(err(' lifecycle_approve_for_my_org failed. err: ' + content), status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST)
return Response(ok('success'), status=status.HTTP_200_OK)
|
cello
|
positive
|
def add(self, node, is_seed, data_center=None):
if node.name in self.nodes:
raise common.ArgumentError('Cannot create existing node %s' % node.name)
self.nodes[node.name] = node
if is_seed:
self.seeds.append(node)
<DeepExtract>
node_list = [node.name for node in list(self.nodes.values())]
seed_list = self.get_seeds()
filename = os.path.join(self.__path, self.name, 'cluster.conf')
config_map = {'name': self.name, 'nodes': node_list, 'seeds': seed_list, 'partitioner': self.partitioner, 'install_dir': self.__install_dir, 'config_options': self._config_options, 'dse_config_options': self._dse_config_options, 'misc_config_options': self._misc_config_options, 'log_level': self.__log_level, 'use_vnodes': self.use_vnodes, 'datadirs': self.data_dir_count, 'environment_variables': self._environment_variables, 'cassandra_version': str(self.cassandra_version())}
extension.append_to_cluster_config(self, config_map)
with open(filename, 'w') as f:
yaml.safe_dump(config_map, f)
</DeepExtract>
node.data_center = data_center
if data_center is None:
for existing_node in self.nodelist():
if existing_node.data_center is not None:
raise common.ArgumentError('Please specify the DC this node should be added to')
node.set_log_level(self.__log_level)
for debug_class in self._debug:
node.set_log_level('DEBUG', debug_class)
for trace_class in self._trace:
node.set_log_level('TRACE', trace_class)
if data_center is not None:
<DeepExtract>
dcs = [('default', 'dc1')]
for node in self.nodelist():
if node.data_center is not None:
dcs.append((node.address(), node.data_center))
for node in self.nodelist():
node.update_topology(dcs)
</DeepExtract>
node._save()
return self
|
def add(self, node, is_seed, data_center=None):
if node.name in self.nodes:
raise common.ArgumentError('Cannot create existing node %s' % node.name)
self.nodes[node.name] = node
if is_seed:
self.seeds.append(node)
node_list = [node.name for node in list(self.nodes.values())]
seed_list = self.get_seeds()
filename = os.path.join(self.__path, self.name, 'cluster.conf')
config_map = {'name': self.name, 'nodes': node_list, 'seeds': seed_list, 'partitioner': self.partitioner, 'install_dir': self.__install_dir, 'config_options': self._config_options, 'dse_config_options': self._dse_config_options, 'misc_config_options': self._misc_config_options, 'log_level': self.__log_level, 'use_vnodes': self.use_vnodes, 'datadirs': self.data_dir_count, 'environment_variables': self._environment_variables, 'cassandra_version': str(self.cassandra_version())}
extension.append_to_cluster_config(self, config_map)
with open(filename, 'w') as f:
yaml.safe_dump(config_map, f)
node.data_center = data_center
if data_center is None:
for existing_node in self.nodelist():
if existing_node.data_center is not None:
raise common.ArgumentError('Please specify the DC this node should be added to')
node.set_log_level(self.__log_level)
for debug_class in self._debug:
node.set_log_level('DEBUG', debug_class)
for trace_class in self._trace:
node.set_log_level('TRACE', trace_class)
if data_center is not None:
dcs = [('default', 'dc1')]
for node in self.nodelist():
if node.data_center is not None:
dcs.append((node.address(), node.data_center))
for node in self.nodelist():
node.update_topology(dcs)
node._save()
return self
|
ccm
|
positive
|
def compare_folders_structures(path_folder: PathLike, list_hashes: PathLike):
"""Compares the structure of a folder against a reference.
Parameters
----------
path_folder: Starting point for the tree listing.
list_hashes: Path to the pickled hash dictionary.
The dictionary is assumed to be of the form
{/path/to/file.extension: hash(file.extension)}
See Also
--------
compare_folders_with_hashes
"""
import pickle
hashes_check = pickle.load(open(list_hashes, 'rb'))
<DeepExtract>
import hashlib
def file_as_bytes(file):
with file:
hashes_new = file.read()
hashes_new = {fname[len(str(path_folder)):]: str(hashlib.md5(file_as_bytes(open(fname, 'rb'))).digest()) for fname in list_files_with_extensions(path_folder, extensions_to_keep)}
</DeepExtract>
if set(hashes_check.keys()) != set(hashes_new.keys()):
error_message1 = ''
error_message2 = ''
for key in hashes_check:
if key not in hashes_new:
error_message1 += f'{key} not found !\n'
for key in hashes_new:
if key not in hashes_check:
error_message2 += f"{key}'s creation was not expected !\n"
raise ValueError(error_message1 + error_message2)
|
def compare_folders_structures(path_folder: PathLike, list_hashes: PathLike):
"""Compares the structure of a folder against a reference.
Parameters
----------
path_folder: Starting point for the tree listing.
list_hashes: Path to the pickled hash dictionary.
The dictionary is assumed to be of the form
{/path/to/file.extension: hash(file.extension)}
See Also
--------
compare_folders_with_hashes
"""
import pickle
hashes_check = pickle.load(open(list_hashes, 'rb'))
import hashlib
def file_as_bytes(file):
with file:
hashes_new = file.read()
hashes_new = {fname[len(str(path_folder)):]: str(hashlib.md5(file_as_bytes(open(fname, 'rb'))).digest()) for fname in list_files_with_extensions(path_folder, extensions_to_keep)}
if set(hashes_check.keys()) != set(hashes_new.keys()):
error_message1 = ''
error_message2 = ''
for key in hashes_check:
if key not in hashes_new:
error_message1 += f'{key} not found !\n'
for key in hashes_new:
if key not in hashes_check:
error_message2 += f"{key}'s creation was not expected !\n"
raise ValueError(error_message1 + error_message2)
|
clinica
|
positive
|
def setupUi(self, Dialog):
Dialog.setObjectName('Dialog')
Dialog.resize(1032, 736)
font = QtGui.QFont()
font.setPointSize(8)
Dialog.setFont(font)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName('verticalLayout_3')
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName('verticalLayout')
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setScaledContents(False)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName('label')
self.verticalLayout.addWidget(self.label)
self.horizontalLayout_32 = QtWidgets.QHBoxLayout()
self.horizontalLayout_32.setObjectName('horizontalLayout_32')
self.credits = QtWidgets.QPushButton(Dialog)
self.credits.setObjectName('credits')
self.horizontalLayout_32.addWidget(self.credits)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setObjectName('pushButton')
self.horizontalLayout_32.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout_32)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName('horizontalLayout_5')
self.patreonBtn = QtWidgets.QPushButton(Dialog)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(':/newPrefix/patreon.jpg'), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.patreonBtn.setIcon(icon)
self.patreonBtn.setIconSize(QtCore.QSize(32, 32))
self.patreonBtn.setObjectName('patreonBtn')
self.horizontalLayout_5.addWidget(self.patreonBtn)
self.discBtn = QtWidgets.QPushButton(Dialog)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(':/newPrefix/disc.jpg'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.discBtn.setIcon(icon1)
self.discBtn.setIconSize(QtCore.QSize(32, 32))
self.discBtn.setObjectName('discBtn')
self.horizontalLayout_5.addWidget(self.discBtn)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName('horizontalLayout_15')
self.radioInputVideos = QtWidgets.QRadioButton(Dialog)
self.radioInputVideos.setChecked(True)
self.radioInputVideos.setObjectName('radioInputVideos')
self.horizontalLayout_15.addWidget(self.radioInputVideos)
self.radioInputPNG = QtWidgets.QRadioButton(Dialog)
self.radioInputPNG.setEnabled(True)
self.radioInputPNG.setObjectName('radioInputPNG')
self.horizontalLayout_15.addWidget(self.radioInputPNG)
self.radioResumeRender = QtWidgets.QRadioButton(Dialog)
self.radioResumeRender.setEnabled(True)
self.radioResumeRender.setObjectName('radioResumeRender')
self.horizontalLayout_15.addWidget(self.radioResumeRender)
self.verticalLayout.addLayout(self.horizontalLayout_15)
self._2 = QtWidgets.QHBoxLayout()
self._2.setObjectName('_2')
self.inputVideosLayout = QtWidgets.QFrame(Dialog)
self.inputVideosLayout.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.inputVideosLayout.setFrameShadow(QtWidgets.QFrame.Raised)
self.inputVideosLayout.setObjectName('inputVideosLayout')
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.inputVideosLayout)
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.inputFileBtn = QtWidgets.QPushButton(self.inputVideosLayout)
self.inputFileBtn.setObjectName('inputFileBtn')
self.horizontalLayout_2.addWidget(self.inputFileBtn)
self.inputFileLabel = QtWidgets.QLabel(self.inputVideosLayout)
self.inputFileLabel.setScaledContents(False)
self.inputFileLabel.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.inputFileLabel.setObjectName('inputFileLabel')
self.horizontalLayout_2.addWidget(self.inputFileLabel)
self._2.addWidget(self.inputVideosLayout)
self.inputSequenceLayout_2 = QtWidgets.QFrame(Dialog)
self.inputSequenceLayout_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.inputSequenceLayout_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.inputSequenceLayout_2.setObjectName('inputSequenceLayout_2')
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.inputSequenceLayout_2)
self.horizontalLayout_13.setObjectName('horizontalLayout_13')
self.inputFolderBtn = QtWidgets.QPushButton(self.inputSequenceLayout_2)
self.inputFolderBtn.setObjectName('inputFolderBtn')
self.horizontalLayout_13.addWidget(self.inputFolderBtn)
self.inputFolderLabel = QtWidgets.QLabel(self.inputSequenceLayout_2)
self.inputFolderLabel.setObjectName('inputFolderLabel')
self.horizontalLayout_13.addWidget(self.inputFolderLabel)
self._2.addWidget(self.inputSequenceLayout_2)
self.inputResumeLayout_2 = QtWidgets.QFrame(Dialog)
self.inputResumeLayout_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.inputResumeLayout_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.inputResumeLayout_2.setObjectName('inputResumeLayout_2')
self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.inputResumeLayout_2)
self.horizontalLayout_16.setObjectName('horizontalLayout_16')
self.inputResumeFolder = QtWidgets.QPushButton(self.inputResumeLayout_2)
self.inputResumeFolder.setObjectName('inputResumeFolder')
self.horizontalLayout_16.addWidget(self.inputResumeFolder)
self.inputResumeLabel = QtWidgets.QLabel(self.inputResumeLayout_2)
self.inputResumeLabel.setObjectName('inputResumeLabel')
self.horizontalLayout_16.addWidget(self.inputResumeLabel)
self._2.addWidget(self.inputResumeLayout_2)
self.verticalLayout.addLayout(self._2)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName('horizontalLayout_6')
self.exportType = QtWidgets.QComboBox(Dialog)
self.exportType.setEnabled(True)
self.exportType.setObjectName('exportType')
self.exportType.addItem('')
self.exportType.addItem('')
self.exportType.addItem('')
self.exportType.addItem('')
self.horizontalLayout_6.addWidget(self.exportType)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName('horizontalLayout_3')
self.outputFolderBtn = QtWidgets.QPushButton(Dialog)
self.outputFolderBtn.setObjectName('outputFolderBtn')
self.horizontalLayout_3.addWidget(self.outputFolderBtn)
self.outputFolderLabel = QtWidgets.QLabel(Dialog)
self.outputFolderLabel.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.outputFolderLabel.setObjectName('outputFolderLabel')
self.horizontalLayout_3.addWidget(self.outputFolderLabel)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setObjectName('tabWidget')
self.tab = QtWidgets.QWidget()
self.tab.setObjectName('tab')
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_7.setObjectName('verticalLayout_7')
self.horizontalLayout_31 = QtWidgets.QHBoxLayout()
self.horizontalLayout_31.setObjectName('horizontalLayout_31')
self.label_27 = QtWidgets.QLabel(self.tab)
self.label_27.setObjectName('label_27')
self.horizontalLayout_31.addWidget(self.label_27)
self.flowModel = QtWidgets.QComboBox(self.tab)
self.flowModel.setObjectName('flowModel')
self.flowModel.addItem('')
self.horizontalLayout_31.addWidget(self.flowModel)
self.verticalLayout_7.addLayout(self.horizontalLayout_31)
self.label_18 = QtWidgets.QLabel(self.tab)
self.label_18.setObjectName('label_18')
self.verticalLayout_7.addWidget(self.label_18)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName('horizontalLayout_9')
self.label_6 = QtWidgets.QLabel(self.tab)
self.label_6.setObjectName('label_6')
self.horizontalLayout_9.addWidget(self.label_6)
self.interpolMethod = QtWidgets.QComboBox(self.tab)
self.interpolMethod.setModelColumn(0)
self.interpolMethod.setObjectName('interpolMethod')
self.interpolMethod.addItem('')
self.interpolMethod.addItem('')
self.interpolMethod.addItem('')
self.interpolMethod.addItem('')
self.horizontalLayout_9.addWidget(self.interpolMethod)
self.verticalLayout_7.addLayout(self.horizontalLayout_9)
self.modeDesc = QtWidgets.QLabel(self.tab)
self.modeDesc.setObjectName('modeDesc')
self.verticalLayout_7.addWidget(self.modeDesc)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem)
self.tabWidget.addTab(self.tab, '')
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName('tab_2')
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName('verticalLayout_6')
self.useHalf = QtWidgets.QCheckBox(self.tab_2)
self.useHalf.setEnabled(True)
self.useHalf.setChecked(True)
self.useHalf.setObjectName('useHalf')
self.verticalLayout_6.addWidget(self.useHalf)
self.doBenchmark = QtWidgets.QCheckBox(self.tab_2)
self.doBenchmark.setChecked(True)
self.doBenchmark.setObjectName('doBenchmark')
self.verticalLayout_6.addWidget(self.doBenchmark)
self.horizontalLayout_30 = QtWidgets.QHBoxLayout()
self.horizontalLayout_30.setObjectName('horizontalLayout_30')
self.label_32 = QtWidgets.QLabel(self.tab_2)
self.label_32.setObjectName('label_32')
self.horizontalLayout_30.addWidget(self.label_32)
self.batchSize = QtWidgets.QSpinBox(self.tab_2)
self.batchSize.setMinimum(1)
self.batchSize.setObjectName('batchSize')
self.horizontalLayout_30.addWidget(self.batchSize)
self.verticalLayout_6.addLayout(self.horizontalLayout_30)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName('horizontalLayout_18')
self.label_16 = QtWidgets.QLabel(self.tab_2)
self.label_16.setObjectName('label_16')
self.horizontalLayout_18.addWidget(self.label_16)
self.deviceList = QtWidgets.QComboBox(self.tab_2)
self.deviceList.setObjectName('deviceList')
self.horizontalLayout_18.addWidget(self.deviceList)
self.verticalLayout_6.addLayout(self.horizontalLayout_18)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName('horizontalLayout_17')
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName('verticalLayout_2')
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setObjectName('label_7')
self.verticalLayout_2.addWidget(self.label_7)
self.animMethod = QtWidgets.QComboBox(self.tab_2)
self.animMethod.setObjectName('animMethod')
self.animMethod.addItem('')
self.animMethod.addItem('')
self.verticalLayout_2.addWidget(self.animMethod)
self.horizontalLayout_17.addLayout(self.verticalLayout_2)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName('horizontalLayout_20')
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName('verticalLayout_4')
self.label_12 = QtWidgets.QLabel(self.tab_2)
self.label_12.setObjectName('label_12')
self.verticalLayout_4.addWidget(self.label_12)
self.intAlgo = QtWidgets.QComboBox(self.tab_2)
self.intAlgo.setObjectName('intAlgo')
self.intAlgo.addItem('')
self.intAlgo.addItem('')
self.verticalLayout_4.addWidget(self.intAlgo)
self.horizontalLayout_20.addLayout(self.verticalLayout_4)
self.horizontalLayout_17.addLayout(self.horizontalLayout_20)
self.verticalLayout_6.addLayout(self.horizontalLayout_17)
self.line_3 = QtWidgets.QFrame(self.tab_2)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName('line_3')
self.verticalLayout_6.addWidget(self.line_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName('horizontalLayout')
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setObjectName('label_2')
self.horizontalLayout.addWidget(self.label_2)
self.fpsInput = QtWidgets.QLineEdit(self.tab_2)
self.fpsInput.setObjectName('fpsInput')
self.horizontalLayout.addWidget(self.fpsInput)
self.interpolationLevel = QtWidgets.QComboBox(self.tab_2)
self.interpolationLevel.setMinimumSize(QtCore.QSize(100, 0))
self.interpolationLevel.setModelColumn(0)
self.interpolationLevel.setObjectName('interpolationLevel')
self.interpolationLevel.addItem('')
self.interpolationLevel.addItem('')
self.interpolationLevel.addItem('')
self.horizontalLayout.addWidget(self.interpolationLevel)
self.label_5 = QtWidgets.QLabel(self.tab_2)
self.label_5.setObjectName('label_5')
self.horizontalLayout.addWidget(self.label_5)
self.outputFps = QtWidgets.QLineEdit(self.tab_2)
self.outputFps.setReadOnly(True)
self.outputFps.setObjectName('outputFps')
self.horizontalLayout.addWidget(self.outputFps)
self.verticalLayout_6.addLayout(self.horizontalLayout)
self.label_17 = QtWidgets.QLabel(self.tab_2)
self.label_17.setObjectName('label_17')
self.verticalLayout_6.addWidget(self.label_17)
self.line_6 = QtWidgets.QFrame(self.tab_2)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName('line_6')
self.verticalLayout_6.addWidget(self.line_6)
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName('horizontalLayout_26')
self.label_23 = QtWidgets.QLabel(self.tab_2)
self.label_23.setObjectName('label_23')
self.horizontalLayout_26.addWidget(self.label_23)
self.pngCompress = QtWidgets.QLineEdit(self.tab_2)
self.pngCompress.setObjectName('pngCompress')
self.horizontalLayout_26.addWidget(self.pngCompress)
self.label_24 = QtWidgets.QLabel(self.tab_2)
self.label_24.setObjectName('label_24')
self.horizontalLayout_26.addWidget(self.label_24)
self.verticalLayout_6.addLayout(self.horizontalLayout_26)
self.label_26 = QtWidgets.QLabel(self.tab_2)
self.label_26.setObjectName('label_26')
self.verticalLayout_6.addWidget(self.label_26)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem1)
self.tabWidget.addTab(self.tab_2, '')
self.tab_8 = QtWidgets.QWidget()
self.tab_8.setObjectName('tab_8')
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.tab_8)
self.verticalLayout_15.setObjectName('verticalLayout_15')
self.horizontalLayout_25 = QtWidgets.QHBoxLayout()
self.horizontalLayout_25.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_25.setObjectName('horizontalLayout_25')
self.label_22 = QtWidgets.QLabel(self.tab_8)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_22.sizePolicy().hasHeightForWidth())
self.label_22.setSizePolicy(sizePolicy)
self.label_22.setObjectName('label_22')
self.horizontalLayout_25.addWidget(self.label_22)
self.crfVal = QtWidgets.QLineEdit(self.tab_8)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.crfVal.sizePolicy().hasHeightForWidth())
self.crfVal.setSizePolicy(sizePolicy)
self.crfVal.setObjectName('crfVal')
self.horizontalLayout_25.addWidget(self.crfVal)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_25.addItem(spacerItem2)
self.verticalLayout_15.addLayout(self.horizontalLayout_25)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_15.addItem(spacerItem3)
self.tabWidget.addTab(self.tab_8, '')
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName('tab_3')
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout_8.setObjectName('verticalLayout_8')
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName('horizontalLayout_14')
self.dontInterpolateScenes = QtWidgets.QCheckBox(self.tab_3)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.dontInterpolateScenes.setFont(font)
self.dontInterpolateScenes.setObjectName('dontInterpolateScenes')
self.horizontalLayout_14.addWidget(self.dontInterpolateScenes)
self.label_11 = QtWidgets.QLabel(self.tab_3)
self.label_11.setObjectName('label_11')
self.horizontalLayout_14.addWidget(self.label_11)
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_2.setObjectName('lineEdit_2')
self.horizontalLayout_14.addWidget(self.lineEdit_2)
self.verifyScenes = QtWidgets.QPushButton(self.tab_3)
self.verifyScenes.setObjectName('verifyScenes')
self.horizontalLayout_14.addWidget(self.verifyScenes)
self.verticalLayout_8.addLayout(self.horizontalLayout_14)
self.line_5 = QtWidgets.QFrame(self.tab_3)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName('line_5')
self.verticalLayout_8.addWidget(self.line_5)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_19.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_19.setObjectName('horizontalLayout_19')
self.cleanInterpol = QtWidgets.QCheckBox(self.tab_3)
self.cleanInterpol.setObjectName('cleanInterpol')
self.horizontalLayout_19.addWidget(self.cleanInterpol)
self.verticalLayout_8.addLayout(self.horizontalLayout_19)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName('horizontalLayout_21')
self.perfectLoop = QtWidgets.QCheckBox(self.tab_3)
self.perfectLoop.setObjectName('perfectLoop')
self.horizontalLayout_21.addWidget(self.perfectLoop)
self.audioVersion = QtWidgets.QCheckBox(self.tab_3)
self.audioVersion.setObjectName('audioVersion')
self.horizontalLayout_21.addWidget(self.audioVersion)
self.verticalLayout_8.addLayout(self.horizontalLayout_21)
self.line_4 = QtWidgets.QFrame(self.tab_3)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName('line_4')
self.verticalLayout_8.addWidget(self.line_4)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setSpacing(6)
self.horizontalLayout_12.setObjectName('horizontalLayout_12')
self.label_10 = QtWidgets.QLabel(self.tab_3)
self.label_10.setObjectName('label_10')
self.horizontalLayout_12.addWidget(self.label_10)
self.fpsLimit = QtWidgets.QLineEdit(self.tab_3)
self.fpsLimit.setMaximumSize(QtCore.QSize(100000, 16777215))
self.fpsLimit.setLayoutDirection(QtCore.Qt.LeftToRight)
self.fpsLimit.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.fpsLimit.setObjectName('fpsLimit')
self.horizontalLayout_12.addWidget(self.fpsLimit)
self.verticalLayout_8.addLayout(self.horizontalLayout_12)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName('horizontalLayout_10')
self.to60 = QtWidgets.QCheckBox(self.tab_3)
self.to60.setObjectName('to60')
self.horizontalLayout_10.addWidget(self.to60)
self.to60C1 = QtWidgets.QCheckBox(self.tab_3)
self.to60C1.setObjectName('to60C1')
self.horizontalLayout_10.addWidget(self.to60C1)
self.to60C2 = QtWidgets.QCheckBox(self.tab_3)
self.to60C2.setObjectName('to60C2')
self.horizontalLayout_10.addWidget(self.to60C2)
self.verticalLayout_8.addLayout(self.horizontalLayout_10)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_8.addItem(spacerItem4)
self.tabWidget.addTab(self.tab_3, '')
self.PixelArt = QtWidgets.QWidget()
self.PixelArt.setObjectName('PixelArt')
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.PixelArt)
self.verticalLayout_14.setObjectName('verticalLayout_14')
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName('verticalLayout_5')
self.Alpha = QtWidgets.QLabel(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Alpha.sizePolicy().hasHeightForWidth())
self.Alpha.setSizePolicy(sizePolicy)
self.Alpha.setObjectName('Alpha')
self.verticalLayout_5.addWidget(self.Alpha)
self.alphaOpt = QtWidgets.QComboBox(self.PixelArt)
self.alphaOpt.setObjectName('alphaOpt')
self.alphaOpt.addItem('')
self.alphaOpt.addItem('')
self.verticalLayout_5.addWidget(self.alphaOpt)
self.verticalLayout_14.addLayout(self.verticalLayout_5)
self.limitPalette = QtWidgets.QCheckBox(self.PixelArt)
self.limitPalette.setObjectName('limitPalette')
self.verticalLayout_14.addWidget(self.limitPalette)
self.checkBox = QtWidgets.QCheckBox(self.PixelArt)
self.checkBox.setEnabled(False)
self.checkBox.setObjectName('checkBox')
self.verticalLayout_14.addWidget(self.checkBox)
self.checkBox_2 = QtWidgets.QCheckBox(self.PixelArt)
self.checkBox_2.setEnabled(False)
self.checkBox_2.setObjectName('checkBox_2')
self.verticalLayout_14.addWidget(self.checkBox_2)
self.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.horizontalLayout_22.setObjectName('horizontalLayout_22')
self.pixelUpscaleDowscaleBefore = QtWidgets.QComboBox(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelUpscaleDowscaleBefore.sizePolicy().hasHeightForWidth())
self.pixelUpscaleDowscaleBefore.setSizePolicy(sizePolicy)
self.pixelUpscaleDowscaleBefore.setObjectName('pixelUpscaleDowscaleBefore')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.horizontalLayout_22.addWidget(self.pixelUpscaleDowscaleBefore)
self.label_19 = QtWidgets.QLabel(self.PixelArt)
self.label_19.setObjectName('label_19')
self.horizontalLayout_22.addWidget(self.label_19)
self.verticalLayout_14.addLayout(self.horizontalLayout_22)
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setObjectName('horizontalLayout_23')
self.pixelDownscaleUpscaleAfter = QtWidgets.QComboBox(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelDownscaleUpscaleAfter.sizePolicy().hasHeightForWidth())
self.pixelDownscaleUpscaleAfter.setSizePolicy(sizePolicy)
self.pixelDownscaleUpscaleAfter.setObjectName('pixelDownscaleUpscaleAfter')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.horizontalLayout_23.addWidget(self.pixelDownscaleUpscaleAfter)
self.label_20 = QtWidgets.QLabel(self.PixelArt)
self.label_20.setObjectName('label_20')
self.horizontalLayout_23.addWidget(self.label_20)
self.verticalLayout_14.addLayout(self.horizontalLayout_23)
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setObjectName('horizontalLayout_24')
self.pixelUpscaleAfter = QtWidgets.QComboBox(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelUpscaleAfter.sizePolicy().hasHeightForWidth())
self.pixelUpscaleAfter.setSizePolicy(sizePolicy)
self.pixelUpscaleAfter.setObjectName('pixelUpscaleAfter')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.horizontalLayout_24.addWidget(self.pixelUpscaleAfter)
self.label_21 = QtWidgets.QLabel(self.PixelArt)
self.label_21.setObjectName('label_21')
self.horizontalLayout_24.addWidget(self.label_21)
self.verticalLayout_14.addLayout(self.horizontalLayout_24)
self.tabWidget.addTab(self.PixelArt, '')
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName('tab_4')
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.tab_4)
self.verticalLayout_10.setObjectName('verticalLayout_10')
self.label_15 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName('label_15')
self.verticalLayout_10.addWidget(self.label_15)
self.label_14 = QtWidgets.QLabel(self.tab_4)
self.label_14.setObjectName('label_14')
self.verticalLayout_10.addWidget(self.label_14)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName('horizontalLayout_4')
self.useResize = QtWidgets.QCheckBox(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.useResize.setFont(font)
self.useResize.setToolTip('')
self.useResize.setToolTipDuration(-1)
self.useResize.setStatusTip('')
self.useResize.setInputMethodHints(QtCore.Qt.ImhNone)
self.useResize.setObjectName('useResize')
self.horizontalLayout_4.addWidget(self.useResize)
self.label_3 = QtWidgets.QLabel(self.tab_4)
self.label_3.setObjectName('label_3')
self.horizontalLayout_4.addWidget(self.label_3)
self.widthValue = QtWidgets.QLineEdit(self.tab_4)
self.widthValue.setInputMask('')
self.widthValue.setText('')
self.widthValue.setObjectName('widthValue')
self.horizontalLayout_4.addWidget(self.widthValue)
self.verticalLayout_10.addLayout(self.horizontalLayout_4)
self.label_13 = QtWidgets.QLabel(self.tab_4)
self.label_13.setObjectName('label_13')
self.verticalLayout_10.addWidget(self.label_13)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName('horizontalLayout_7')
self.useSplit = QtWidgets.QCheckBox(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.useSplit.setFont(font)
self.useSplit.setObjectName('useSplit')
self.horizontalLayout_7.addWidget(self.useSplit)
self.label_8 = QtWidgets.QLabel(self.tab_4)
self.label_8.setObjectName('label_8')
self.horizontalLayout_7.addWidget(self.label_8)
self.splitSizeX = QtWidgets.QLineEdit(self.tab_4)
self.splitSizeX.setToolTip('')
self.splitSizeX.setWhatsThis('')
self.splitSizeX.setAutoFillBackground(False)
self.splitSizeX.setInputMask('')
self.splitSizeX.setObjectName('splitSizeX')
self.horizontalLayout_7.addWidget(self.splitSizeX)
self.label_25 = QtWidgets.QLabel(self.tab_4)
self.label_25.setObjectName('label_25')
self.horizontalLayout_7.addWidget(self.label_25)
self.splitSizeY = QtWidgets.QLineEdit(self.tab_4)
self.splitSizeY.setObjectName('splitSizeY')
self.horizontalLayout_7.addWidget(self.splitSizeY)
self.label_9 = QtWidgets.QLabel(self.tab_4)
self.label_9.setObjectName('label_9')
self.horizontalLayout_7.addWidget(self.label_9)
self.splitPad = QtWidgets.QLineEdit(self.tab_4)
self.splitPad.setObjectName('splitPad')
self.horizontalLayout_7.addWidget(self.splitPad)
self.verticalLayout_10.addLayout(self.horizontalLayout_7)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_10.addItem(spacerItem5)
self.tabWidget.addTab(self.tab_4, '')
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName('tab_6')
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.tab_6)
self.verticalLayout_11.setObjectName('verticalLayout_11')
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName('horizontalLayout_8')
self.label_4 = QtWidgets.QLabel(self.tab_6)
self.label_4.setObjectName('label_4')
self.horizontalLayout_8.addWidget(self.label_4)
self.brightBtn = QtWidgets.QPushButton(self.tab_6)
self.brightBtn.setObjectName('brightBtn')
self.horizontalLayout_8.addWidget(self.brightBtn)
self.darkBtn = QtWidgets.QPushButton(self.tab_6)
self.darkBtn.setObjectName('darkBtn')
self.horizontalLayout_8.addWidget(self.darkBtn)
self.verticalLayout_11.addLayout(self.horizontalLayout_8)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName('horizontalLayout_28')
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setObjectName('verticalLayout_17')
self.placeholderList = QtWidgets.QComboBox(self.tab_6)
self.placeholderList.setObjectName('placeholderList')
self.verticalLayout_17.addWidget(self.placeholderList)
self.horizontalLayout_29 = QtWidgets.QHBoxLayout()
self.horizontalLayout_29.setObjectName('horizontalLayout_29')
self.loadPlace = QtWidgets.QPushButton(self.tab_6)
self.loadPlace.setObjectName('loadPlace')
self.horizontalLayout_29.addWidget(self.loadPlace)
self.deletePlace = QtWidgets.QPushButton(self.tab_6)
self.deletePlace.setObjectName('deletePlace')
self.horizontalLayout_29.addWidget(self.deletePlace)
self.verticalLayout_17.addLayout(self.horizontalLayout_29)
self.horizontalLayout_28.addLayout(self.verticalLayout_17)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_28.addItem(spacerItem6)
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setObjectName('verticalLayout_16')
self.placeholderName = QtWidgets.QLineEdit(self.tab_6)
self.placeholderName.setText('')
self.placeholderName.setObjectName('placeholderName')
self.verticalLayout_16.addWidget(self.placeholderName)
self.savePlace = QtWidgets.QPushButton(self.tab_6)
self.savePlace.setObjectName('savePlace')
self.verticalLayout_16.addWidget(self.savePlace)
self.horizontalLayout_28.addLayout(self.verticalLayout_16)
self.verticalLayout_11.addLayout(self.horizontalLayout_28)
spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_11.addItem(spacerItem7)
self.tabWidget.addTab(self.tab_6, '')
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName('tab_5')
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.tab_5)
self.verticalLayout_9.setObjectName('verticalLayout_9')
self.textEdit = QtWidgets.QTextEdit(self.tab_5)
self.textEdit.setEnabled(True)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName('textEdit')
self.verticalLayout_9.addWidget(self.textEdit)
self.tabWidget.addTab(self.tab_5, '')
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName('tab_7')
self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.tab_7)
self.verticalLayout_13.setObjectName('verticalLayout_13')
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName('verticalLayout_12')
self.verticalLayout_13.addLayout(self.verticalLayout_12)
self.fastMode = QtWidgets.QCheckBox(self.tab_7)
self.fastMode.setObjectName('fastMode')
self.verticalLayout_13.addWidget(self.fastMode)
self.dontCleanCache = QtWidgets.QCheckBox(self.tab_7)
self.dontCleanCache.setObjectName('dontCleanCache')
self.verticalLayout_13.addWidget(self.dontCleanCache)
self.ffmpegPrint = QtWidgets.QCheckBox(self.tab_7)
self.ffmpegPrint.setObjectName('ffmpegPrint')
self.verticalLayout_13.addWidget(self.ffmpegPrint)
self.onlyInterpolateMissing = QtWidgets.QCheckBox(self.tab_7)
self.onlyInterpolateMissing.setObjectName('onlyInterpolateMissing')
self.verticalLayout_13.addWidget(self.onlyInterpolateMissing)
self.debugKeepDuplicates = QtWidgets.QCheckBox(self.tab_7)
self.debugKeepDuplicates.setObjectName('debugKeepDuplicates')
self.verticalLayout_13.addWidget(self.debugKeepDuplicates)
self.line_8 = QtWidgets.QFrame(self.tab_7)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName('line_8')
self.verticalLayout_13.addWidget(self.line_8)
self.line_7 = QtWidgets.QFrame(self.tab_7)
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName('line_7')
self.verticalLayout_13.addWidget(self.line_7)
self.label_31 = QtWidgets.QLabel(self.tab_7)
self.label_31.setObjectName('label_31')
self.verticalLayout_13.addWidget(self.label_31)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName('horizontalLayout_27')
self.label_30 = QtWidgets.QLabel(self.tab_7)
self.label_30.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_30.setObjectName('label_30')
self.horizontalLayout_27.addWidget(self.label_30)
self.smoothFlow = QtWidgets.QSpinBox(self.tab_7)
self.smoothFlow.setObjectName('smoothFlow')
self.horizontalLayout_27.addWidget(self.smoothFlow)
self.label_29 = QtWidgets.QLabel(self.tab_7)
self.label_29.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_29.setObjectName('label_29')
self.horizontalLayout_27.addWidget(self.label_29)
self.flowForce = QtWidgets.QSpinBox(self.tab_7)
self.flowForce.setMinimum(0)
self.flowForce.setMaximum(40)
self.flowForce.setProperty('value', 5)
self.flowForce.setObjectName('flowForce')
self.horizontalLayout_27.addWidget(self.flowForce)
self.verticalLayout_13.addLayout(self.horizontalLayout_27)
spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_13.addItem(spacerItem8)
self.tabWidget.addTab(self.tab_7, '')
self.verticalLayout.addWidget(self.tabWidget)
self.progressBar = QtWidgets.QProgressBar(Dialog)
self.progressBar.setProperty('value', 0)
self.progressBar.setObjectName('progressBar')
self.verticalLayout.addWidget(self.progressBar)
self.line_2 = QtWidgets.QFrame(Dialog)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName('line_2')
self.verticalLayout.addWidget(self.line_2)
self.renderBtn = QtWidgets.QPushButton(Dialog)
self.renderBtn.setObjectName('renderBtn')
self.verticalLayout.addWidget(self.renderBtn)
self.verticalLayout.setStretch(10, 1)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName('horizontalLayout_11')
self.renderFrames = QtWidgets.QPushButton(Dialog)
self.renderFrames.setObjectName('renderFrames')
self.horizontalLayout_11.addWidget(self.renderFrames)
self.renderInterpolation = QtWidgets.QPushButton(Dialog)
self.renderInterpolation.setObjectName('renderInterpolation')
self.horizontalLayout_11.addWidget(self.renderInterpolation)
self.renderVideo = QtWidgets.QPushButton(Dialog)
self.renderVideo.setObjectName('renderVideo')
self.horizontalLayout_11.addWidget(self.renderVideo)
self.verticalLayout_3.addLayout(self.horizontalLayout_11)
self.line = QtWidgets.QFrame(Dialog)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName('line')
self.verticalLayout_3.addWidget(self.line)
<DeepExtract>
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate('Dialog', 'Dialog'))
self.label.setText(_translate('Dialog', 'DAIN-APP _ #DAINAPP'))
self.credits.setText(_translate('Dialog', 'Credits'))
self.pushButton.setText(_translate('Dialog', 'Patrons that made this app possible.'))
self.patreonBtn.setText(_translate('Dialog', 'Support the App on Patreon'))
self.discBtn.setText(_translate('Dialog', 'Share results on Discord'))
self.radioInputVideos.setText(_translate('Dialog', 'Input Video(s)'))
self.radioInputPNG.setText(_translate('Dialog', 'Input PNG Sequence'))
self.radioResumeRender.setText(_translate('Dialog', 'Resume Render'))
self.inputFileBtn.setText(_translate('Dialog', 'Input File(s)'))
self.inputFileLabel.setText(_translate('Dialog', 'Input File(s) Path'))
self.inputFolderBtn.setText(_translate('Dialog', 'Input PNG Sequence Folder'))
self.inputFolderLabel.setText(_translate('Dialog', 'Input Folder Path'))
self.inputResumeFolder.setText(_translate('Dialog', 'Render Folder'))
self.inputResumeLabel.setText(_translate('Dialog', 'Resume Render Folder'))
self.exportType.setItemText(0, _translate('Dialog', 'Export as Mp4'))
self.exportType.setItemText(1, _translate('Dialog', 'Export as WebM'))
self.exportType.setItemText(2, _translate('Dialog', 'Export as GIF'))
self.exportType.setItemText(3, _translate('Dialog', 'Export as APNG'))
self.outputFolderBtn.setText(_translate('Dialog', 'Output Folder'))
self.outputFolderLabel.setText(_translate('Dialog', 'Selected Output Folder'))
self.label_27.setText(_translate('Dialog', 'Model:'))
self.flowModel.setItemText(0, _translate('Dialog', 'Default Flow Model'))
self.label_18.setText(_translate('Dialog', "This variable don't affect the input if it's a PNG sequence."))
self.label_6.setToolTip(_translate('Dialog', 'Select how the app will select and use the original frames from the video.'))
self.label_6.setText(_translate('Dialog', 'Frames Handling Mode:'))
self.interpolMethod.setToolTip(_translate('Dialog', 'Select how the app will select and use the original frames from the video.'))
self.interpolMethod.setCurrentText(_translate('Dialog', 'Mode 1: Default (all frames treated the same)'))
self.interpolMethod.setItemText(0, _translate('Dialog', 'Mode 1: Default (all frames treated the same)'))
self.interpolMethod.setItemText(1, _translate('Dialog', 'Mode 2: Remove duplicate frames (may alter animation speed)'))
self.interpolMethod.setItemText(2, _translate('Dialog', "Mode 3: Adaptative Record timestamps then remove duplicate frames (won't alter animation speed)"))
self.interpolMethod.setItemText(3, _translate('Dialog', "Mode 4: Static Record timestamps then remove duplicate frames (won't alter animation speed)"))
self.modeDesc.setText(_translate('Dialog', 'Mode description: asdasda \nsdadasdadsa'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate('Dialog', 'Frame Handling Options'))
self.useHalf.setToolTip(_translate('Dialog', 'This option convert float32 to float16 during calculations, this can save some memory and also speed up some calculations.'))
self.useHalf.setText(_translate('Dialog', 'Use Half-Precision float'))
self.doBenchmark.setToolTip(_translate('Dialog', 'Pytorch will try to optimize the algorithm with your configurations, it take a little longer to start but it can boost perfomance as well.'))
self.doBenchmark.setText(_translate('Dialog', 'Use Pytorch benchmark'))
self.label_32.setToolTip(_translate('Dialog', 'How much frames to process in parallel.'))
self.label_32.setText(_translate('Dialog', 'Batch Size:'))
self.batchSize.setToolTip(_translate('Dialog', 'How much frames to process in parallel.'))
self.label_16.setToolTip(_translate('Dialog', 'Select what card you want to use to process the video.'))
self.label_16.setText(_translate('Dialog', 'Device to use:'))
self.deviceList.setToolTip(_translate('Dialog', 'Select what card you want to use to process the video.'))
self.label_7.setText(_translate('Dialog', 'Depth Awareness Mode:'))
self.animMethod.setItemText(0, _translate('Dialog', 'Real life or 3D: Media with clear depth perception.'))
self.animMethod.setItemText(1, _translate('Dialog', 'Cartoon or anime: Media with little or no depth.'))
self.label_12.setText(_translate('Dialog', 'Interpolation Algorithm:'))
self.intAlgo.setItemText(0, _translate('Dialog', 'Default: 2X/4X/8X; Less Memory; Slower; Cleaner Results'))
self.intAlgo.setItemText(1, _translate('Dialog', '[Not Working] Experimental: 2X/3X/4X/5X/6X/7X/8X; More Memory; Faster; May generate more artifacts;'))
self.label_2.setToolTip(_translate('Dialog', 'Show what the fps is of the selected file.'))
self.label_2.setText(_translate('Dialog', 'Input FPS:'))
self.fpsInput.setToolTip(_translate('Dialog', 'Show what the fps is of the selected file.'))
self.interpolationLevel.setItemText(0, _translate('Dialog', 'Interpolate 2X'))
self.interpolationLevel.setItemText(1, _translate('Dialog', 'Interpolate 4X'))
self.interpolationLevel.setItemText(2, _translate('Dialog', 'Interpolate 8X'))
self.label_5.setText(_translate('Dialog', 'Output FPS: '))
self.label_17.setText(_translate('Dialog', 'Note: GIFS above 50FPS will slow down. (This is a gif limitation)'))
self.label_23.setText(_translate('Dialog', 'PNG files compression:'))
self.pngCompress.setText(_translate('Dialog', '6'))
self.label_24.setText(_translate('Dialog', '1 gives best speed, 9 gives best compression, 0 gives no compression at all'))
self.label_26.setText(_translate('Dialog', 'Not applied when extracting original_frames from mp4'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate('Dialog', 'Interpolation Options'))
self.label_22.setText(_translate('Dialog', '[MP4 ] CRF value:'))
self.crfVal.setToolTip(_translate('Dialog', 'Change the quality of the output video. 16 usually is fine.'))
self.crfVal.setText(_translate('Dialog', '16'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_8), _translate('Dialog', 'Output Options'))
self.dontInterpolateScenes.setToolTip(_translate('Dialog', 'In case of a scene change, interpolation will not occur. Make this value bigger to detect more scenes as a scene change.'))
self.dontInterpolateScenes.setText(_translate('Dialog', "Don't interpolate scenes changes"))
self.label_11.setToolTip(_translate('Dialog', 'In case of a scene change, interpolation will not occur. Make this value bigger to detect more scenes as a scene change.'))
self.label_11.setText(_translate('Dialog', 'Detection sensitivity:'))
self.lineEdit_2.setToolTip(_translate('Dialog', 'In case of a scene change, interpolation will not occur. Make this value bigger to detect more scenes as a scene change.'))
self.lineEdit_2.setInputMask(_translate('Dialog', '000'))
self.lineEdit_2.setText(_translate('Dialog', '10'))
self.verifyScenes.setText(_translate('Dialog', 'Verify Scenes Changes'))
self.cleanInterpol.setToolTip(_translate('Dialog', 'If there is files on the interpolation folder, it will remove before starting the process'))
self.cleanInterpol.setText(_translate('Dialog', 'Delete interpolation Folder At Start'))
self.perfectLoop.setToolTip(_translate('Dialog', 'Interpolate the last frame with the first frame of the movie'))
self.perfectLoop.setText(_translate('Dialog', 'Perfect loop animation [The animation repeat in a perfect loop]'))
self.audioVersion.setText(_translate('Dialog', 'Create a output with audio.'))
self.label_10.setToolTip(_translate('Dialog', 'Create another output video limiting the fps to X'))
self.label_10.setText(_translate('Dialog', 'If FPS exceeds this value. Create another version with this fps. [FPS] = '))
self.fpsLimit.setToolTip(_translate('Dialog', 'Create another output video limiting the fps to X'))
self.fpsLimit.setInputMask(_translate('Dialog', '000'))
self.fpsLimit.setText(_translate('Dialog', '60'))
self.to60.setText(_translate('Dialog', '(If FPS exceeds [FPS]) Create a [FPS] version of movie.'))
self.to60C1.setText(_translate('Dialog', '(If FPS exceeds [FPS]) Intepolate down to [FPS] [Conf 1: Smooth]'))
self.to60C2.setText(_translate('Dialog', '(If FPS exceeds [FPS]) Intepolate down to [FPS] [Conf 2: Sharp]'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate('Dialog', 'Misc Options'))
self.Alpha.setText(_translate('Dialog', ' [Broken] Alpha Transparency\n[GIF need Limit Color pallet to show alpha]:'))
self.alphaOpt.setItemText(0, _translate('Dialog', 'No Alpha'))
self.alphaOpt.setItemText(1, _translate('Dialog', 'With Alpha'))
self.limitPalette.setToolTip(_translate('Dialog', 'If you are outputing to gif, you want to turn on this option.'))
self.limitPalette.setText(_translate('Dialog', 'Limit Color Palette to only use original colors. [Sometimes work better with pixelart]'))
self.checkBox.setText(_translate('Dialog', 'Generate Palette from original frames. [Not working]'))
self.checkBox_2.setText(_translate('Dialog', 'Generate Palette from interpolated frames. [Not working]'))
self.pixelUpscaleDowscaleBefore.setItemText(0, _translate('Dialog', 'Disabled'))
self.pixelUpscaleDowscaleBefore.setItemText(1, _translate('Dialog', '2X'))
self.pixelUpscaleDowscaleBefore.setItemText(2, _translate('Dialog', '3X'))
self.pixelUpscaleDowscaleBefore.setItemText(3, _translate('Dialog', '4X'))
self.pixelUpscaleDowscaleBefore.setItemText(4, _translate('Dialog', '5X'))
self.pixelUpscaleDowscaleBefore.setItemText(5, _translate('Dialog', '6X'))
self.pixelUpscaleDowscaleBefore.setItemText(6, _translate('Dialog', '7X'))
self.pixelUpscaleDowscaleBefore.setItemText(7, _translate('Dialog', '8X'))
self.pixelUpscaleDowscaleBefore.setItemText(8, _translate('Dialog', '9X'))
self.pixelUpscaleDowscaleBefore.setItemText(9, _translate('Dialog', '10X'))
self.label_19.setText(_translate('Dialog', 'Upscale [Nearest neighbor] by X; Do Interpolation; Downscale[Nearest neighbor] by X'))
self.pixelDownscaleUpscaleAfter.setItemText(0, _translate('Dialog', 'Disabled'))
self.pixelDownscaleUpscaleAfter.setItemText(1, _translate('Dialog', '2X'))
self.pixelDownscaleUpscaleAfter.setItemText(2, _translate('Dialog', '3X'))
self.pixelDownscaleUpscaleAfter.setItemText(3, _translate('Dialog', '4X'))
self.pixelDownscaleUpscaleAfter.setItemText(4, _translate('Dialog', '5X'))
self.pixelDownscaleUpscaleAfter.setItemText(5, _translate('Dialog', '6X'))
self.pixelDownscaleUpscaleAfter.setItemText(6, _translate('Dialog', '7X'))
self.pixelDownscaleUpscaleAfter.setItemText(7, _translate('Dialog', '8X'))
self.pixelDownscaleUpscaleAfter.setItemText(8, _translate('Dialog', '9X'))
self.pixelDownscaleUpscaleAfter.setItemText(9, _translate('Dialog', '10X'))
self.label_20.setText(_translate('Dialog', 'Do Interpolation; Downscale [Nearest neighbor] by X; Upscale [Nearest neighbor] by X'))
self.pixelUpscaleAfter.setItemText(0, _translate('Dialog', 'Disabled'))
self.pixelUpscaleAfter.setItemText(1, _translate('Dialog', '2X'))
self.pixelUpscaleAfter.setItemText(2, _translate('Dialog', '3X'))
self.pixelUpscaleAfter.setItemText(3, _translate('Dialog', '4X'))
self.pixelUpscaleAfter.setItemText(4, _translate('Dialog', '5X'))
self.pixelUpscaleAfter.setItemText(5, _translate('Dialog', '6X'))
self.pixelUpscaleAfter.setItemText(6, _translate('Dialog', '7X'))
self.pixelUpscaleAfter.setItemText(7, _translate('Dialog', '8X'))
self.pixelUpscaleAfter.setItemText(8, _translate('Dialog', '9X'))
self.pixelUpscaleAfter.setItemText(9, _translate('Dialog', '10X'))
self.label_21.setText(_translate('Dialog', 'Do Interpolation; Upscale [Nearest neighbor] by X'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.PixelArt), _translate('Dialog', 'PixelArt'))
self.label_15.setText(_translate('Dialog', "Keep in mind that other applications also share the VRAM, it's almost impossible that Dain-App have all the available VRAM only to itself."))
self.label_14.setText(_translate('Dialog', "This application runs exclusively on Video memory and requires a LOT of it. For example, rendering at 720p takes between 10 and 11 GB of Vram. \nThere are two options to address the issue of a video file being too high resolution to handle with your GPU's available Vram capacity (CUDA out of memory error message). \nThe faster and easier of the options is to downscale the video to a resolution your GPU can handle. Simply check the downscale box and enter the new height,\nthe width will be calculated automatically based on the aspect ratio."))
self.useResize.setText(_translate('Dialog', 'Downscale Video:'))
self.label_3.setText(_translate('Dialog', 'New Height:'))
self.label_13.setText(_translate('Dialog', 'The slower more complicated solution is to use the Frame Splitter, but it will let you render the full resolution of your video.\nThis will lower the Vram load by splitting each frame being into pieces (Number of divisions), rendering each piece one at a time, then stitching them back together.\nHowever, this method causes lines crossing the borders of the pieces to not line up properly, creating a visible grid pattern where the borders of the pieces are. Padding is used to counteract this.\nPadding is additional space beyond the section size that overlaps with adjacent pieces and is used to ensure the lines between the frames pieces line up properly.\nKeep inscreasing the division size until the Out of Memory error is fixed.'))
self.useSplit.setText(_translate('Dialog', 'Split frames into sections:'))
self.label_8.setText(_translate('Dialog', 'Number of divisions X:'))
self.splitSizeX.setText(_translate('Dialog', '2'))
self.splitSizeX.setPlaceholderText(_translate('Dialog', 'Section Size'))
self.label_25.setText(_translate('Dialog', 'Number of divisions Y:'))
self.splitSizeY.setText(_translate('Dialog', '2'))
self.label_9.setText(_translate('Dialog', 'Section Padding[px]:'))
self.splitPad.setText(_translate('Dialog', '150'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate('Dialog', 'Fix OutOfMemory Options'))
self.label_4.setText(_translate('Dialog', 'Color Scheme: '))
self.brightBtn.setText(_translate('Dialog', 'Bright Mode'))
self.darkBtn.setText(_translate('Dialog', 'Dark Mode'))
self.loadPlace.setText(_translate('Dialog', 'Load Preset'))
self.deletePlace.setText(_translate('Dialog', 'Delete Preset'))
self.placeholderName.setPlaceholderText(_translate('Dialog', 'Placeholder name'))
self.savePlace.setText(_translate('Dialog', 'Save New Preset'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate('Dialog', 'Application Style'))
self.textEdit.setHtml(_translate('Dialog', '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:\'MS Shell Dlg 2\'; font-size:8pt; font-weight:400; font-style:normal;">\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">How do i start?</p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Dain-App is slow to render long videos or videos with great quality,if you are new to it start with something small like a gif or a 3 second video before starting to use the application.</p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">The simplest test you can do is just select a "input file"e and "output Folder" and press "Perform all steps", this should create your first interpolation in the selected folder.</p>\n<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>\n<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">I Get Out Of memory Error even with the split option turned on?</p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">You need to add even more splits.</p></body></html>'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate('Dialog', 'FAQ'))
self.fastMode.setToolTip(_translate('Dialog', "<html><head/><body><p>Will make interpolation a lot faster and use less Vram. But it also produce worse results. Good if you don't care much for the final quality or is testing configurations.</p><p>Try it with bigger batch sizes to gain even more speed.</p></body></html>"))
self.fastMode.setText(_translate('Dialog', 'Fast Mode'))
self.dontCleanCache.setText(_translate('Dialog', "Don't clean CUDA cache between frames"))
self.ffmpegPrint.setText(_translate('Dialog', 'Print on console FFMPEG messages.'))
self.onlyInterpolateMissing.setText(_translate('Dialog', '[For internal debugging] Only interpolate deleted frames.'))
self.debugKeepDuplicates.setText(_translate('Dialog', "[For internal debugging] Don't use mpdecimate"))
self.label_31.setText(_translate('Dialog', 'Motion Blur {WIP}:'))
self.label_30.setToolTip(_translate('Dialog', '<html><head/><body><p>[Not work with fast mode enabled]</p><p>[Still WIP]</p><p>0 = Disabled</p><p>2 = Subtle blur</p><p>5 = Small blur</p><p>10 = A lot of blur</p><p>Above 10 = Absurd blur</p></body></html>'))
self.label_30.setText(_translate('Dialog', 'Motion Blur Force [0 = Disable]:'))
self.smoothFlow.setToolTip(_translate('Dialog', '<html><head/><body><p>[Not work with fast mode enabled]</p><p>[Still WIP]</p><p>0 = Disabled</p><p>2 = Subtle blur</p><p>5 = Small blur</p><p>10 = A lot of blur</p><p>Above 10 = Absurd blur</p></body></html>'))
self.label_29.setToolTip(_translate('Dialog', '<html><head/><body><p>0 = All motion create motion blur</p><p>5 = Almost all motion create blur</p><p>15 = Faster motions create blur</p><p>Above 20 = Only real fast motions create blur</p></body></html>'))
self.label_29.setText(_translate('Dialog', 'Motion Blur Threshold'))
self.flowForce.setToolTip(_translate('Dialog', '<html><head/><body><p>0 = All motion create motion blur</p><p>5 = Almost all motion create blur</p><p>15 = Faster motions create blur</p><p>Above 20 = Only real fast motions create blur</p></body></html>'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7), _translate('Dialog', 'Beta Options'))
self.renderBtn.setText(_translate('Dialog', 'Perform all steps: Render'))
self.renderFrames.setText(_translate('Dialog', 'Step 1: Split source video into frames'))
self.renderInterpolation.setText(_translate('Dialog', 'Step 2: Feed source frames to DAIN'))
self.renderVideo.setText(_translate('Dialog', 'Step 3: Convert DAIN frames to video'))
</DeepExtract>
self.tabWidget.setCurrentIndex(4)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.renderFrames, self.patreonBtn)
Dialog.setTabOrder(self.patreonBtn, self.renderVideo)
Dialog.setTabOrder(self.renderVideo, self.renderBtn)
Dialog.setTabOrder(self.renderBtn, self.discBtn)
Dialog.setTabOrder(self.discBtn, self.radioInputVideos)
Dialog.setTabOrder(self.radioInputVideos, self.radioInputPNG)
Dialog.setTabOrder(self.radioInputPNG, self.radioResumeRender)
Dialog.setTabOrder(self.radioResumeRender, self.inputFileBtn)
Dialog.setTabOrder(self.inputFileBtn, self.inputFolderBtn)
Dialog.setTabOrder(self.inputFolderBtn, self.inputResumeFolder)
Dialog.setTabOrder(self.inputResumeFolder, self.renderInterpolation)
Dialog.setTabOrder(self.renderInterpolation, self.exportType)
Dialog.setTabOrder(self.exportType, self.outputFolderBtn)
Dialog.setTabOrder(self.outputFolderBtn, self.animMethod)
Dialog.setTabOrder(self.animMethod, self.alphaOpt)
Dialog.setTabOrder(self.alphaOpt, self.intAlgo)
Dialog.setTabOrder(self.intAlgo, self.fpsInput)
Dialog.setTabOrder(self.fpsInput, self.interpolationLevel)
Dialog.setTabOrder(self.interpolationLevel, self.outputFps)
Dialog.setTabOrder(self.outputFps, self.useSplit)
Dialog.setTabOrder(self.useSplit, self.splitSizeX)
Dialog.setTabOrder(self.splitSizeX, self.splitPad)
Dialog.setTabOrder(self.splitPad, self.useResize)
Dialog.setTabOrder(self.useResize, self.widthValue)
Dialog.setTabOrder(self.widthValue, self.dontInterpolateScenes)
Dialog.setTabOrder(self.dontInterpolateScenes, self.lineEdit_2)
Dialog.setTabOrder(self.lineEdit_2, self.verifyScenes)
Dialog.setTabOrder(self.verifyScenes, self.cleanInterpol)
Dialog.setTabOrder(self.cleanInterpol, self.fpsLimit)
Dialog.setTabOrder(self.fpsLimit, self.to60)
Dialog.setTabOrder(self.to60, self.to60C1)
Dialog.setTabOrder(self.to60C1, self.to60C2)
Dialog.setTabOrder(self.to60C2, self.tabWidget)
Dialog.setTabOrder(self.tabWidget, self.interpolMethod)
Dialog.setTabOrder(self.interpolMethod, self.textEdit)
|
def setupUi(self, Dialog):
Dialog.setObjectName('Dialog')
Dialog.resize(1032, 736)
font = QtGui.QFont()
font.setPointSize(8)
Dialog.setFont(font)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout_3.setObjectName('verticalLayout_3')
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.verticalLayout.setObjectName('verticalLayout')
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setScaledContents(False)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName('label')
self.verticalLayout.addWidget(self.label)
self.horizontalLayout_32 = QtWidgets.QHBoxLayout()
self.horizontalLayout_32.setObjectName('horizontalLayout_32')
self.credits = QtWidgets.QPushButton(Dialog)
self.credits.setObjectName('credits')
self.horizontalLayout_32.addWidget(self.credits)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setObjectName('pushButton')
self.horizontalLayout_32.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout_32)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName('horizontalLayout_5')
self.patreonBtn = QtWidgets.QPushButton(Dialog)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(':/newPrefix/patreon.jpg'), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.patreonBtn.setIcon(icon)
self.patreonBtn.setIconSize(QtCore.QSize(32, 32))
self.patreonBtn.setObjectName('patreonBtn')
self.horizontalLayout_5.addWidget(self.patreonBtn)
self.discBtn = QtWidgets.QPushButton(Dialog)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(':/newPrefix/disc.jpg'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.discBtn.setIcon(icon1)
self.discBtn.setIconSize(QtCore.QSize(32, 32))
self.discBtn.setObjectName('discBtn')
self.horizontalLayout_5.addWidget(self.discBtn)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName('horizontalLayout_15')
self.radioInputVideos = QtWidgets.QRadioButton(Dialog)
self.radioInputVideos.setChecked(True)
self.radioInputVideos.setObjectName('radioInputVideos')
self.horizontalLayout_15.addWidget(self.radioInputVideos)
self.radioInputPNG = QtWidgets.QRadioButton(Dialog)
self.radioInputPNG.setEnabled(True)
self.radioInputPNG.setObjectName('radioInputPNG')
self.horizontalLayout_15.addWidget(self.radioInputPNG)
self.radioResumeRender = QtWidgets.QRadioButton(Dialog)
self.radioResumeRender.setEnabled(True)
self.radioResumeRender.setObjectName('radioResumeRender')
self.horizontalLayout_15.addWidget(self.radioResumeRender)
self.verticalLayout.addLayout(self.horizontalLayout_15)
self._2 = QtWidgets.QHBoxLayout()
self._2.setObjectName('_2')
self.inputVideosLayout = QtWidgets.QFrame(Dialog)
self.inputVideosLayout.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.inputVideosLayout.setFrameShadow(QtWidgets.QFrame.Raised)
self.inputVideosLayout.setObjectName('inputVideosLayout')
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.inputVideosLayout)
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.inputFileBtn = QtWidgets.QPushButton(self.inputVideosLayout)
self.inputFileBtn.setObjectName('inputFileBtn')
self.horizontalLayout_2.addWidget(self.inputFileBtn)
self.inputFileLabel = QtWidgets.QLabel(self.inputVideosLayout)
self.inputFileLabel.setScaledContents(False)
self.inputFileLabel.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.inputFileLabel.setObjectName('inputFileLabel')
self.horizontalLayout_2.addWidget(self.inputFileLabel)
self._2.addWidget(self.inputVideosLayout)
self.inputSequenceLayout_2 = QtWidgets.QFrame(Dialog)
self.inputSequenceLayout_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.inputSequenceLayout_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.inputSequenceLayout_2.setObjectName('inputSequenceLayout_2')
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.inputSequenceLayout_2)
self.horizontalLayout_13.setObjectName('horizontalLayout_13')
self.inputFolderBtn = QtWidgets.QPushButton(self.inputSequenceLayout_2)
self.inputFolderBtn.setObjectName('inputFolderBtn')
self.horizontalLayout_13.addWidget(self.inputFolderBtn)
self.inputFolderLabel = QtWidgets.QLabel(self.inputSequenceLayout_2)
self.inputFolderLabel.setObjectName('inputFolderLabel')
self.horizontalLayout_13.addWidget(self.inputFolderLabel)
self._2.addWidget(self.inputSequenceLayout_2)
self.inputResumeLayout_2 = QtWidgets.QFrame(Dialog)
self.inputResumeLayout_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.inputResumeLayout_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.inputResumeLayout_2.setObjectName('inputResumeLayout_2')
self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.inputResumeLayout_2)
self.horizontalLayout_16.setObjectName('horizontalLayout_16')
self.inputResumeFolder = QtWidgets.QPushButton(self.inputResumeLayout_2)
self.inputResumeFolder.setObjectName('inputResumeFolder')
self.horizontalLayout_16.addWidget(self.inputResumeFolder)
self.inputResumeLabel = QtWidgets.QLabel(self.inputResumeLayout_2)
self.inputResumeLabel.setObjectName('inputResumeLabel')
self.horizontalLayout_16.addWidget(self.inputResumeLabel)
self._2.addWidget(self.inputResumeLayout_2)
self.verticalLayout.addLayout(self._2)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName('horizontalLayout_6')
self.exportType = QtWidgets.QComboBox(Dialog)
self.exportType.setEnabled(True)
self.exportType.setObjectName('exportType')
self.exportType.addItem('')
self.exportType.addItem('')
self.exportType.addItem('')
self.exportType.addItem('')
self.horizontalLayout_6.addWidget(self.exportType)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName('horizontalLayout_3')
self.outputFolderBtn = QtWidgets.QPushButton(Dialog)
self.outputFolderBtn.setObjectName('outputFolderBtn')
self.horizontalLayout_3.addWidget(self.outputFolderBtn)
self.outputFolderLabel = QtWidgets.QLabel(Dialog)
self.outputFolderLabel.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.outputFolderLabel.setObjectName('outputFolderLabel')
self.horizontalLayout_3.addWidget(self.outputFolderLabel)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.tabWidget = QtWidgets.QTabWidget(Dialog)
self.tabWidget.setObjectName('tabWidget')
self.tab = QtWidgets.QWidget()
self.tab.setObjectName('tab')
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_7.setObjectName('verticalLayout_7')
self.horizontalLayout_31 = QtWidgets.QHBoxLayout()
self.horizontalLayout_31.setObjectName('horizontalLayout_31')
self.label_27 = QtWidgets.QLabel(self.tab)
self.label_27.setObjectName('label_27')
self.horizontalLayout_31.addWidget(self.label_27)
self.flowModel = QtWidgets.QComboBox(self.tab)
self.flowModel.setObjectName('flowModel')
self.flowModel.addItem('')
self.horizontalLayout_31.addWidget(self.flowModel)
self.verticalLayout_7.addLayout(self.horizontalLayout_31)
self.label_18 = QtWidgets.QLabel(self.tab)
self.label_18.setObjectName('label_18')
self.verticalLayout_7.addWidget(self.label_18)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName('horizontalLayout_9')
self.label_6 = QtWidgets.QLabel(self.tab)
self.label_6.setObjectName('label_6')
self.horizontalLayout_9.addWidget(self.label_6)
self.interpolMethod = QtWidgets.QComboBox(self.tab)
self.interpolMethod.setModelColumn(0)
self.interpolMethod.setObjectName('interpolMethod')
self.interpolMethod.addItem('')
self.interpolMethod.addItem('')
self.interpolMethod.addItem('')
self.interpolMethod.addItem('')
self.horizontalLayout_9.addWidget(self.interpolMethod)
self.verticalLayout_7.addLayout(self.horizontalLayout_9)
self.modeDesc = QtWidgets.QLabel(self.tab)
self.modeDesc.setObjectName('modeDesc')
self.verticalLayout_7.addWidget(self.modeDesc)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_7.addItem(spacerItem)
self.tabWidget.addTab(self.tab, '')
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName('tab_2')
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName('verticalLayout_6')
self.useHalf = QtWidgets.QCheckBox(self.tab_2)
self.useHalf.setEnabled(True)
self.useHalf.setChecked(True)
self.useHalf.setObjectName('useHalf')
self.verticalLayout_6.addWidget(self.useHalf)
self.doBenchmark = QtWidgets.QCheckBox(self.tab_2)
self.doBenchmark.setChecked(True)
self.doBenchmark.setObjectName('doBenchmark')
self.verticalLayout_6.addWidget(self.doBenchmark)
self.horizontalLayout_30 = QtWidgets.QHBoxLayout()
self.horizontalLayout_30.setObjectName('horizontalLayout_30')
self.label_32 = QtWidgets.QLabel(self.tab_2)
self.label_32.setObjectName('label_32')
self.horizontalLayout_30.addWidget(self.label_32)
self.batchSize = QtWidgets.QSpinBox(self.tab_2)
self.batchSize.setMinimum(1)
self.batchSize.setObjectName('batchSize')
self.horizontalLayout_30.addWidget(self.batchSize)
self.verticalLayout_6.addLayout(self.horizontalLayout_30)
self.horizontalLayout_18 = QtWidgets.QHBoxLayout()
self.horizontalLayout_18.setObjectName('horizontalLayout_18')
self.label_16 = QtWidgets.QLabel(self.tab_2)
self.label_16.setObjectName('label_16')
self.horizontalLayout_18.addWidget(self.label_16)
self.deviceList = QtWidgets.QComboBox(self.tab_2)
self.deviceList.setObjectName('deviceList')
self.horizontalLayout_18.addWidget(self.deviceList)
self.verticalLayout_6.addLayout(self.horizontalLayout_18)
self.horizontalLayout_17 = QtWidgets.QHBoxLayout()
self.horizontalLayout_17.setObjectName('horizontalLayout_17')
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName('verticalLayout_2')
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setObjectName('label_7')
self.verticalLayout_2.addWidget(self.label_7)
self.animMethod = QtWidgets.QComboBox(self.tab_2)
self.animMethod.setObjectName('animMethod')
self.animMethod.addItem('')
self.animMethod.addItem('')
self.verticalLayout_2.addWidget(self.animMethod)
self.horizontalLayout_17.addLayout(self.verticalLayout_2)
self.horizontalLayout_20 = QtWidgets.QHBoxLayout()
self.horizontalLayout_20.setObjectName('horizontalLayout_20')
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName('verticalLayout_4')
self.label_12 = QtWidgets.QLabel(self.tab_2)
self.label_12.setObjectName('label_12')
self.verticalLayout_4.addWidget(self.label_12)
self.intAlgo = QtWidgets.QComboBox(self.tab_2)
self.intAlgo.setObjectName('intAlgo')
self.intAlgo.addItem('')
self.intAlgo.addItem('')
self.verticalLayout_4.addWidget(self.intAlgo)
self.horizontalLayout_20.addLayout(self.verticalLayout_4)
self.horizontalLayout_17.addLayout(self.horizontalLayout_20)
self.verticalLayout_6.addLayout(self.horizontalLayout_17)
self.line_3 = QtWidgets.QFrame(self.tab_2)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName('line_3')
self.verticalLayout_6.addWidget(self.line_3)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName('horizontalLayout')
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setObjectName('label_2')
self.horizontalLayout.addWidget(self.label_2)
self.fpsInput = QtWidgets.QLineEdit(self.tab_2)
self.fpsInput.setObjectName('fpsInput')
self.horizontalLayout.addWidget(self.fpsInput)
self.interpolationLevel = QtWidgets.QComboBox(self.tab_2)
self.interpolationLevel.setMinimumSize(QtCore.QSize(100, 0))
self.interpolationLevel.setModelColumn(0)
self.interpolationLevel.setObjectName('interpolationLevel')
self.interpolationLevel.addItem('')
self.interpolationLevel.addItem('')
self.interpolationLevel.addItem('')
self.horizontalLayout.addWidget(self.interpolationLevel)
self.label_5 = QtWidgets.QLabel(self.tab_2)
self.label_5.setObjectName('label_5')
self.horizontalLayout.addWidget(self.label_5)
self.outputFps = QtWidgets.QLineEdit(self.tab_2)
self.outputFps.setReadOnly(True)
self.outputFps.setObjectName('outputFps')
self.horizontalLayout.addWidget(self.outputFps)
self.verticalLayout_6.addLayout(self.horizontalLayout)
self.label_17 = QtWidgets.QLabel(self.tab_2)
self.label_17.setObjectName('label_17')
self.verticalLayout_6.addWidget(self.label_17)
self.line_6 = QtWidgets.QFrame(self.tab_2)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName('line_6')
self.verticalLayout_6.addWidget(self.line_6)
self.horizontalLayout_26 = QtWidgets.QHBoxLayout()
self.horizontalLayout_26.setObjectName('horizontalLayout_26')
self.label_23 = QtWidgets.QLabel(self.tab_2)
self.label_23.setObjectName('label_23')
self.horizontalLayout_26.addWidget(self.label_23)
self.pngCompress = QtWidgets.QLineEdit(self.tab_2)
self.pngCompress.setObjectName('pngCompress')
self.horizontalLayout_26.addWidget(self.pngCompress)
self.label_24 = QtWidgets.QLabel(self.tab_2)
self.label_24.setObjectName('label_24')
self.horizontalLayout_26.addWidget(self.label_24)
self.verticalLayout_6.addLayout(self.horizontalLayout_26)
self.label_26 = QtWidgets.QLabel(self.tab_2)
self.label_26.setObjectName('label_26')
self.verticalLayout_6.addWidget(self.label_26)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem1)
self.tabWidget.addTab(self.tab_2, '')
self.tab_8 = QtWidgets.QWidget()
self.tab_8.setObjectName('tab_8')
self.verticalLayout_15 = QtWidgets.QVBoxLayout(self.tab_8)
self.verticalLayout_15.setObjectName('verticalLayout_15')
self.horizontalLayout_25 = QtWidgets.QHBoxLayout()
self.horizontalLayout_25.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_25.setObjectName('horizontalLayout_25')
self.label_22 = QtWidgets.QLabel(self.tab_8)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_22.sizePolicy().hasHeightForWidth())
self.label_22.setSizePolicy(sizePolicy)
self.label_22.setObjectName('label_22')
self.horizontalLayout_25.addWidget(self.label_22)
self.crfVal = QtWidgets.QLineEdit(self.tab_8)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.crfVal.sizePolicy().hasHeightForWidth())
self.crfVal.setSizePolicy(sizePolicy)
self.crfVal.setObjectName('crfVal')
self.horizontalLayout_25.addWidget(self.crfVal)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_25.addItem(spacerItem2)
self.verticalLayout_15.addLayout(self.horizontalLayout_25)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_15.addItem(spacerItem3)
self.tabWidget.addTab(self.tab_8, '')
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName('tab_3')
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout_8.setObjectName('verticalLayout_8')
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setObjectName('horizontalLayout_14')
self.dontInterpolateScenes = QtWidgets.QCheckBox(self.tab_3)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.dontInterpolateScenes.setFont(font)
self.dontInterpolateScenes.setObjectName('dontInterpolateScenes')
self.horizontalLayout_14.addWidget(self.dontInterpolateScenes)
self.label_11 = QtWidgets.QLabel(self.tab_3)
self.label_11.setObjectName('label_11')
self.horizontalLayout_14.addWidget(self.label_11)
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_2.setObjectName('lineEdit_2')
self.horizontalLayout_14.addWidget(self.lineEdit_2)
self.verifyScenes = QtWidgets.QPushButton(self.tab_3)
self.verifyScenes.setObjectName('verifyScenes')
self.horizontalLayout_14.addWidget(self.verifyScenes)
self.verticalLayout_8.addLayout(self.horizontalLayout_14)
self.line_5 = QtWidgets.QFrame(self.tab_3)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName('line_5')
self.verticalLayout_8.addWidget(self.line_5)
self.horizontalLayout_19 = QtWidgets.QHBoxLayout()
self.horizontalLayout_19.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.horizontalLayout_19.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_19.setObjectName('horizontalLayout_19')
self.cleanInterpol = QtWidgets.QCheckBox(self.tab_3)
self.cleanInterpol.setObjectName('cleanInterpol')
self.horizontalLayout_19.addWidget(self.cleanInterpol)
self.verticalLayout_8.addLayout(self.horizontalLayout_19)
self.horizontalLayout_21 = QtWidgets.QHBoxLayout()
self.horizontalLayout_21.setObjectName('horizontalLayout_21')
self.perfectLoop = QtWidgets.QCheckBox(self.tab_3)
self.perfectLoop.setObjectName('perfectLoop')
self.horizontalLayout_21.addWidget(self.perfectLoop)
self.audioVersion = QtWidgets.QCheckBox(self.tab_3)
self.audioVersion.setObjectName('audioVersion')
self.horizontalLayout_21.addWidget(self.audioVersion)
self.verticalLayout_8.addLayout(self.horizontalLayout_21)
self.line_4 = QtWidgets.QFrame(self.tab_3)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName('line_4')
self.verticalLayout_8.addWidget(self.line_4)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setSpacing(6)
self.horizontalLayout_12.setObjectName('horizontalLayout_12')
self.label_10 = QtWidgets.QLabel(self.tab_3)
self.label_10.setObjectName('label_10')
self.horizontalLayout_12.addWidget(self.label_10)
self.fpsLimit = QtWidgets.QLineEdit(self.tab_3)
self.fpsLimit.setMaximumSize(QtCore.QSize(100000, 16777215))
self.fpsLimit.setLayoutDirection(QtCore.Qt.LeftToRight)
self.fpsLimit.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.fpsLimit.setObjectName('fpsLimit')
self.horizontalLayout_12.addWidget(self.fpsLimit)
self.verticalLayout_8.addLayout(self.horizontalLayout_12)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setObjectName('horizontalLayout_10')
self.to60 = QtWidgets.QCheckBox(self.tab_3)
self.to60.setObjectName('to60')
self.horizontalLayout_10.addWidget(self.to60)
self.to60C1 = QtWidgets.QCheckBox(self.tab_3)
self.to60C1.setObjectName('to60C1')
self.horizontalLayout_10.addWidget(self.to60C1)
self.to60C2 = QtWidgets.QCheckBox(self.tab_3)
self.to60C2.setObjectName('to60C2')
self.horizontalLayout_10.addWidget(self.to60C2)
self.verticalLayout_8.addLayout(self.horizontalLayout_10)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_8.addItem(spacerItem4)
self.tabWidget.addTab(self.tab_3, '')
self.PixelArt = QtWidgets.QWidget()
self.PixelArt.setObjectName('PixelArt')
self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.PixelArt)
self.verticalLayout_14.setObjectName('verticalLayout_14')
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName('verticalLayout_5')
self.Alpha = QtWidgets.QLabel(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Alpha.sizePolicy().hasHeightForWidth())
self.Alpha.setSizePolicy(sizePolicy)
self.Alpha.setObjectName('Alpha')
self.verticalLayout_5.addWidget(self.Alpha)
self.alphaOpt = QtWidgets.QComboBox(self.PixelArt)
self.alphaOpt.setObjectName('alphaOpt')
self.alphaOpt.addItem('')
self.alphaOpt.addItem('')
self.verticalLayout_5.addWidget(self.alphaOpt)
self.verticalLayout_14.addLayout(self.verticalLayout_5)
self.limitPalette = QtWidgets.QCheckBox(self.PixelArt)
self.limitPalette.setObjectName('limitPalette')
self.verticalLayout_14.addWidget(self.limitPalette)
self.checkBox = QtWidgets.QCheckBox(self.PixelArt)
self.checkBox.setEnabled(False)
self.checkBox.setObjectName('checkBox')
self.verticalLayout_14.addWidget(self.checkBox)
self.checkBox_2 = QtWidgets.QCheckBox(self.PixelArt)
self.checkBox_2.setEnabled(False)
self.checkBox_2.setObjectName('checkBox_2')
self.verticalLayout_14.addWidget(self.checkBox_2)
self.horizontalLayout_22 = QtWidgets.QHBoxLayout()
self.horizontalLayout_22.setObjectName('horizontalLayout_22')
self.pixelUpscaleDowscaleBefore = QtWidgets.QComboBox(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelUpscaleDowscaleBefore.sizePolicy().hasHeightForWidth())
self.pixelUpscaleDowscaleBefore.setSizePolicy(sizePolicy)
self.pixelUpscaleDowscaleBefore.setObjectName('pixelUpscaleDowscaleBefore')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.pixelUpscaleDowscaleBefore.addItem('')
self.horizontalLayout_22.addWidget(self.pixelUpscaleDowscaleBefore)
self.label_19 = QtWidgets.QLabel(self.PixelArt)
self.label_19.setObjectName('label_19')
self.horizontalLayout_22.addWidget(self.label_19)
self.verticalLayout_14.addLayout(self.horizontalLayout_22)
self.horizontalLayout_23 = QtWidgets.QHBoxLayout()
self.horizontalLayout_23.setObjectName('horizontalLayout_23')
self.pixelDownscaleUpscaleAfter = QtWidgets.QComboBox(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelDownscaleUpscaleAfter.sizePolicy().hasHeightForWidth())
self.pixelDownscaleUpscaleAfter.setSizePolicy(sizePolicy)
self.pixelDownscaleUpscaleAfter.setObjectName('pixelDownscaleUpscaleAfter')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.pixelDownscaleUpscaleAfter.addItem('')
self.horizontalLayout_23.addWidget(self.pixelDownscaleUpscaleAfter)
self.label_20 = QtWidgets.QLabel(self.PixelArt)
self.label_20.setObjectName('label_20')
self.horizontalLayout_23.addWidget(self.label_20)
self.verticalLayout_14.addLayout(self.horizontalLayout_23)
self.horizontalLayout_24 = QtWidgets.QHBoxLayout()
self.horizontalLayout_24.setObjectName('horizontalLayout_24')
self.pixelUpscaleAfter = QtWidgets.QComboBox(self.PixelArt)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pixelUpscaleAfter.sizePolicy().hasHeightForWidth())
self.pixelUpscaleAfter.setSizePolicy(sizePolicy)
self.pixelUpscaleAfter.setObjectName('pixelUpscaleAfter')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.pixelUpscaleAfter.addItem('')
self.horizontalLayout_24.addWidget(self.pixelUpscaleAfter)
self.label_21 = QtWidgets.QLabel(self.PixelArt)
self.label_21.setObjectName('label_21')
self.horizontalLayout_24.addWidget(self.label_21)
self.verticalLayout_14.addLayout(self.horizontalLayout_24)
self.tabWidget.addTab(self.PixelArt, '')
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName('tab_4')
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.tab_4)
self.verticalLayout_10.setObjectName('verticalLayout_10')
self.label_15 = QtWidgets.QLabel(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName('label_15')
self.verticalLayout_10.addWidget(self.label_15)
self.label_14 = QtWidgets.QLabel(self.tab_4)
self.label_14.setObjectName('label_14')
self.verticalLayout_10.addWidget(self.label_14)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName('horizontalLayout_4')
self.useResize = QtWidgets.QCheckBox(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.useResize.setFont(font)
self.useResize.setToolTip('')
self.useResize.setToolTipDuration(-1)
self.useResize.setStatusTip('')
self.useResize.setInputMethodHints(QtCore.Qt.ImhNone)
self.useResize.setObjectName('useResize')
self.horizontalLayout_4.addWidget(self.useResize)
self.label_3 = QtWidgets.QLabel(self.tab_4)
self.label_3.setObjectName('label_3')
self.horizontalLayout_4.addWidget(self.label_3)
self.widthValue = QtWidgets.QLineEdit(self.tab_4)
self.widthValue.setInputMask('')
self.widthValue.setText('')
self.widthValue.setObjectName('widthValue')
self.horizontalLayout_4.addWidget(self.widthValue)
self.verticalLayout_10.addLayout(self.horizontalLayout_4)
self.label_13 = QtWidgets.QLabel(self.tab_4)
self.label_13.setObjectName('label_13')
self.verticalLayout_10.addWidget(self.label_13)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName('horizontalLayout_7')
self.useSplit = QtWidgets.QCheckBox(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.useSplit.setFont(font)
self.useSplit.setObjectName('useSplit')
self.horizontalLayout_7.addWidget(self.useSplit)
self.label_8 = QtWidgets.QLabel(self.tab_4)
self.label_8.setObjectName('label_8')
self.horizontalLayout_7.addWidget(self.label_8)
self.splitSizeX = QtWidgets.QLineEdit(self.tab_4)
self.splitSizeX.setToolTip('')
self.splitSizeX.setWhatsThis('')
self.splitSizeX.setAutoFillBackground(False)
self.splitSizeX.setInputMask('')
self.splitSizeX.setObjectName('splitSizeX')
self.horizontalLayout_7.addWidget(self.splitSizeX)
self.label_25 = QtWidgets.QLabel(self.tab_4)
self.label_25.setObjectName('label_25')
self.horizontalLayout_7.addWidget(self.label_25)
self.splitSizeY = QtWidgets.QLineEdit(self.tab_4)
self.splitSizeY.setObjectName('splitSizeY')
self.horizontalLayout_7.addWidget(self.splitSizeY)
self.label_9 = QtWidgets.QLabel(self.tab_4)
self.label_9.setObjectName('label_9')
self.horizontalLayout_7.addWidget(self.label_9)
self.splitPad = QtWidgets.QLineEdit(self.tab_4)
self.splitPad.setObjectName('splitPad')
self.horizontalLayout_7.addWidget(self.splitPad)
self.verticalLayout_10.addLayout(self.horizontalLayout_7)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_10.addItem(spacerItem5)
self.tabWidget.addTab(self.tab_4, '')
self.tab_6 = QtWidgets.QWidget()
self.tab_6.setObjectName('tab_6')
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.tab_6)
self.verticalLayout_11.setObjectName('verticalLayout_11')
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName('horizontalLayout_8')
self.label_4 = QtWidgets.QLabel(self.tab_6)
self.label_4.setObjectName('label_4')
self.horizontalLayout_8.addWidget(self.label_4)
self.brightBtn = QtWidgets.QPushButton(self.tab_6)
self.brightBtn.setObjectName('brightBtn')
self.horizontalLayout_8.addWidget(self.brightBtn)
self.darkBtn = QtWidgets.QPushButton(self.tab_6)
self.darkBtn.setObjectName('darkBtn')
self.horizontalLayout_8.addWidget(self.darkBtn)
self.verticalLayout_11.addLayout(self.horizontalLayout_8)
self.horizontalLayout_28 = QtWidgets.QHBoxLayout()
self.horizontalLayout_28.setObjectName('horizontalLayout_28')
self.verticalLayout_17 = QtWidgets.QVBoxLayout()
self.verticalLayout_17.setObjectName('verticalLayout_17')
self.placeholderList = QtWidgets.QComboBox(self.tab_6)
self.placeholderList.setObjectName('placeholderList')
self.verticalLayout_17.addWidget(self.placeholderList)
self.horizontalLayout_29 = QtWidgets.QHBoxLayout()
self.horizontalLayout_29.setObjectName('horizontalLayout_29')
self.loadPlace = QtWidgets.QPushButton(self.tab_6)
self.loadPlace.setObjectName('loadPlace')
self.horizontalLayout_29.addWidget(self.loadPlace)
self.deletePlace = QtWidgets.QPushButton(self.tab_6)
self.deletePlace.setObjectName('deletePlace')
self.horizontalLayout_29.addWidget(self.deletePlace)
self.verticalLayout_17.addLayout(self.horizontalLayout_29)
self.horizontalLayout_28.addLayout(self.verticalLayout_17)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_28.addItem(spacerItem6)
self.verticalLayout_16 = QtWidgets.QVBoxLayout()
self.verticalLayout_16.setObjectName('verticalLayout_16')
self.placeholderName = QtWidgets.QLineEdit(self.tab_6)
self.placeholderName.setText('')
self.placeholderName.setObjectName('placeholderName')
self.verticalLayout_16.addWidget(self.placeholderName)
self.savePlace = QtWidgets.QPushButton(self.tab_6)
self.savePlace.setObjectName('savePlace')
self.verticalLayout_16.addWidget(self.savePlace)
self.horizontalLayout_28.addLayout(self.verticalLayout_16)
self.verticalLayout_11.addLayout(self.horizontalLayout_28)
spacerItem7 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_11.addItem(spacerItem7)
self.tabWidget.addTab(self.tab_6, '')
self.tab_5 = QtWidgets.QWidget()
self.tab_5.setObjectName('tab_5')
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.tab_5)
self.verticalLayout_9.setObjectName('verticalLayout_9')
self.textEdit = QtWidgets.QTextEdit(self.tab_5)
self.textEdit.setEnabled(True)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName('textEdit')
self.verticalLayout_9.addWidget(self.textEdit)
self.tabWidget.addTab(self.tab_5, '')
self.tab_7 = QtWidgets.QWidget()
self.tab_7.setObjectName('tab_7')
self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.tab_7)
self.verticalLayout_13.setObjectName('verticalLayout_13')
self.verticalLayout_12 = QtWidgets.QVBoxLayout()
self.verticalLayout_12.setObjectName('verticalLayout_12')
self.verticalLayout_13.addLayout(self.verticalLayout_12)
self.fastMode = QtWidgets.QCheckBox(self.tab_7)
self.fastMode.setObjectName('fastMode')
self.verticalLayout_13.addWidget(self.fastMode)
self.dontCleanCache = QtWidgets.QCheckBox(self.tab_7)
self.dontCleanCache.setObjectName('dontCleanCache')
self.verticalLayout_13.addWidget(self.dontCleanCache)
self.ffmpegPrint = QtWidgets.QCheckBox(self.tab_7)
self.ffmpegPrint.setObjectName('ffmpegPrint')
self.verticalLayout_13.addWidget(self.ffmpegPrint)
self.onlyInterpolateMissing = QtWidgets.QCheckBox(self.tab_7)
self.onlyInterpolateMissing.setObjectName('onlyInterpolateMissing')
self.verticalLayout_13.addWidget(self.onlyInterpolateMissing)
self.debugKeepDuplicates = QtWidgets.QCheckBox(self.tab_7)
self.debugKeepDuplicates.setObjectName('debugKeepDuplicates')
self.verticalLayout_13.addWidget(self.debugKeepDuplicates)
self.line_8 = QtWidgets.QFrame(self.tab_7)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName('line_8')
self.verticalLayout_13.addWidget(self.line_8)
self.line_7 = QtWidgets.QFrame(self.tab_7)
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName('line_7')
self.verticalLayout_13.addWidget(self.line_7)
self.label_31 = QtWidgets.QLabel(self.tab_7)
self.label_31.setObjectName('label_31')
self.verticalLayout_13.addWidget(self.label_31)
self.horizontalLayout_27 = QtWidgets.QHBoxLayout()
self.horizontalLayout_27.setObjectName('horizontalLayout_27')
self.label_30 = QtWidgets.QLabel(self.tab_7)
self.label_30.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_30.setObjectName('label_30')
self.horizontalLayout_27.addWidget(self.label_30)
self.smoothFlow = QtWidgets.QSpinBox(self.tab_7)
self.smoothFlow.setObjectName('smoothFlow')
self.horizontalLayout_27.addWidget(self.smoothFlow)
self.label_29 = QtWidgets.QLabel(self.tab_7)
self.label_29.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.label_29.setObjectName('label_29')
self.horizontalLayout_27.addWidget(self.label_29)
self.flowForce = QtWidgets.QSpinBox(self.tab_7)
self.flowForce.setMinimum(0)
self.flowForce.setMaximum(40)
self.flowForce.setProperty('value', 5)
self.flowForce.setObjectName('flowForce')
self.horizontalLayout_27.addWidget(self.flowForce)
self.verticalLayout_13.addLayout(self.horizontalLayout_27)
spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_13.addItem(spacerItem8)
self.tabWidget.addTab(self.tab_7, '')
self.verticalLayout.addWidget(self.tabWidget)
self.progressBar = QtWidgets.QProgressBar(Dialog)
self.progressBar.setProperty('value', 0)
self.progressBar.setObjectName('progressBar')
self.verticalLayout.addWidget(self.progressBar)
self.line_2 = QtWidgets.QFrame(Dialog)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName('line_2')
self.verticalLayout.addWidget(self.line_2)
self.renderBtn = QtWidgets.QPushButton(Dialog)
self.renderBtn.setObjectName('renderBtn')
self.verticalLayout.addWidget(self.renderBtn)
self.verticalLayout.setStretch(10, 1)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName('horizontalLayout_11')
self.renderFrames = QtWidgets.QPushButton(Dialog)
self.renderFrames.setObjectName('renderFrames')
self.horizontalLayout_11.addWidget(self.renderFrames)
self.renderInterpolation = QtWidgets.QPushButton(Dialog)
self.renderInterpolation.setObjectName('renderInterpolation')
self.horizontalLayout_11.addWidget(self.renderInterpolation)
self.renderVideo = QtWidgets.QPushButton(Dialog)
self.renderVideo.setObjectName('renderVideo')
self.horizontalLayout_11.addWidget(self.renderVideo)
self.verticalLayout_3.addLayout(self.horizontalLayout_11)
self.line = QtWidgets.QFrame(Dialog)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName('line')
self.verticalLayout_3.addWidget(self.line)
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate('Dialog', 'Dialog'))
self.label.setText(_translate('Dialog', 'DAIN-APP _ #DAINAPP'))
self.credits.setText(_translate('Dialog', 'Credits'))
self.pushButton.setText(_translate('Dialog', 'Patrons that made this app possible.'))
self.patreonBtn.setText(_translate('Dialog', 'Support the App on Patreon'))
self.discBtn.setText(_translate('Dialog', 'Share results on Discord'))
self.radioInputVideos.setText(_translate('Dialog', 'Input Video(s)'))
self.radioInputPNG.setText(_translate('Dialog', 'Input PNG Sequence'))
self.radioResumeRender.setText(_translate('Dialog', 'Resume Render'))
self.inputFileBtn.setText(_translate('Dialog', 'Input File(s)'))
self.inputFileLabel.setText(_translate('Dialog', 'Input File(s) Path'))
self.inputFolderBtn.setText(_translate('Dialog', 'Input PNG Sequence Folder'))
self.inputFolderLabel.setText(_translate('Dialog', 'Input Folder Path'))
self.inputResumeFolder.setText(_translate('Dialog', 'Render Folder'))
self.inputResumeLabel.setText(_translate('Dialog', 'Resume Render Folder'))
self.exportType.setItemText(0, _translate('Dialog', 'Export as Mp4'))
self.exportType.setItemText(1, _translate('Dialog', 'Export as WebM'))
self.exportType.setItemText(2, _translate('Dialog', 'Export as GIF'))
self.exportType.setItemText(3, _translate('Dialog', 'Export as APNG'))
self.outputFolderBtn.setText(_translate('Dialog', 'Output Folder'))
self.outputFolderLabel.setText(_translate('Dialog', 'Selected Output Folder'))
self.label_27.setText(_translate('Dialog', 'Model:'))
self.flowModel.setItemText(0, _translate('Dialog', 'Default Flow Model'))
self.label_18.setText(_translate('Dialog', "This variable don't affect the input if it's a PNG sequence."))
self.label_6.setToolTip(_translate('Dialog', 'Select how the app will select and use the original frames from the video.'))
self.label_6.setText(_translate('Dialog', 'Frames Handling Mode:'))
self.interpolMethod.setToolTip(_translate('Dialog', 'Select how the app will select and use the original frames from the video.'))
self.interpolMethod.setCurrentText(_translate('Dialog', 'Mode 1: Default (all frames treated the same)'))
self.interpolMethod.setItemText(0, _translate('Dialog', 'Mode 1: Default (all frames treated the same)'))
self.interpolMethod.setItemText(1, _translate('Dialog', 'Mode 2: Remove duplicate frames (may alter animation speed)'))
self.interpolMethod.setItemText(2, _translate('Dialog', "Mode 3: Adaptative Record timestamps then remove duplicate frames (won't alter animation speed)"))
self.interpolMethod.setItemText(3, _translate('Dialog', "Mode 4: Static Record timestamps then remove duplicate frames (won't alter animation speed)"))
self.modeDesc.setText(_translate('Dialog', 'Mode description: asdasda \nsdadasdadsa'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate('Dialog', 'Frame Handling Options'))
self.useHalf.setToolTip(_translate('Dialog', 'This option convert float32 to float16 during calculations, this can save some memory and also speed up some calculations.'))
self.useHalf.setText(_translate('Dialog', 'Use Half-Precision float'))
self.doBenchmark.setToolTip(_translate('Dialog', 'Pytorch will try to optimize the algorithm with your configurations, it take a little longer to start but it can boost perfomance as well.'))
self.doBenchmark.setText(_translate('Dialog', 'Use Pytorch benchmark'))
self.label_32.setToolTip(_translate('Dialog', 'How much frames to process in parallel.'))
self.label_32.setText(_translate('Dialog', 'Batch Size:'))
self.batchSize.setToolTip(_translate('Dialog', 'How much frames to process in parallel.'))
self.label_16.setToolTip(_translate('Dialog', 'Select what card you want to use to process the video.'))
self.label_16.setText(_translate('Dialog', 'Device to use:'))
self.deviceList.setToolTip(_translate('Dialog', 'Select what card you want to use to process the video.'))
self.label_7.setText(_translate('Dialog', 'Depth Awareness Mode:'))
self.animMethod.setItemText(0, _translate('Dialog', 'Real life or 3D: Media with clear depth perception.'))
self.animMethod.setItemText(1, _translate('Dialog', 'Cartoon or anime: Media with little or no depth.'))
self.label_12.setText(_translate('Dialog', 'Interpolation Algorithm:'))
self.intAlgo.setItemText(0, _translate('Dialog', 'Default: 2X/4X/8X; Less Memory; Slower; Cleaner Results'))
self.intAlgo.setItemText(1, _translate('Dialog', '[Not Working] Experimental: 2X/3X/4X/5X/6X/7X/8X; More Memory; Faster; May generate more artifacts;'))
self.label_2.setToolTip(_translate('Dialog', 'Show what the fps is of the selected file.'))
self.label_2.setText(_translate('Dialog', 'Input FPS:'))
self.fpsInput.setToolTip(_translate('Dialog', 'Show what the fps is of the selected file.'))
self.interpolationLevel.setItemText(0, _translate('Dialog', 'Interpolate 2X'))
self.interpolationLevel.setItemText(1, _translate('Dialog', 'Interpolate 4X'))
self.interpolationLevel.setItemText(2, _translate('Dialog', 'Interpolate 8X'))
self.label_5.setText(_translate('Dialog', 'Output FPS: '))
self.label_17.setText(_translate('Dialog', 'Note: GIFS above 50FPS will slow down. (This is a gif limitation)'))
self.label_23.setText(_translate('Dialog', 'PNG files compression:'))
self.pngCompress.setText(_translate('Dialog', '6'))
self.label_24.setText(_translate('Dialog', '1 gives best speed, 9 gives best compression, 0 gives no compression at all'))
self.label_26.setText(_translate('Dialog', 'Not applied when extracting original_frames from mp4'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate('Dialog', 'Interpolation Options'))
self.label_22.setText(_translate('Dialog', '[MP4 ] CRF value:'))
self.crfVal.setToolTip(_translate('Dialog', 'Change the quality of the output video. 16 usually is fine.'))
self.crfVal.setText(_translate('Dialog', '16'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_8), _translate('Dialog', 'Output Options'))
self.dontInterpolateScenes.setToolTip(_translate('Dialog', 'In case of a scene change, interpolation will not occur. Make this value bigger to detect more scenes as a scene change.'))
self.dontInterpolateScenes.setText(_translate('Dialog', "Don't interpolate scenes changes"))
self.label_11.setToolTip(_translate('Dialog', 'In case of a scene change, interpolation will not occur. Make this value bigger to detect more scenes as a scene change.'))
self.label_11.setText(_translate('Dialog', 'Detection sensitivity:'))
self.lineEdit_2.setToolTip(_translate('Dialog', 'In case of a scene change, interpolation will not occur. Make this value bigger to detect more scenes as a scene change.'))
self.lineEdit_2.setInputMask(_translate('Dialog', '000'))
self.lineEdit_2.setText(_translate('Dialog', '10'))
self.verifyScenes.setText(_translate('Dialog', 'Verify Scenes Changes'))
self.cleanInterpol.setToolTip(_translate('Dialog', 'If there is files on the interpolation folder, it will remove before starting the process'))
self.cleanInterpol.setText(_translate('Dialog', 'Delete interpolation Folder At Start'))
self.perfectLoop.setToolTip(_translate('Dialog', 'Interpolate the last frame with the first frame of the movie'))
self.perfectLoop.setText(_translate('Dialog', 'Perfect loop animation [The animation repeat in a perfect loop]'))
self.audioVersion.setText(_translate('Dialog', 'Create a output with audio.'))
self.label_10.setToolTip(_translate('Dialog', 'Create another output video limiting the fps to X'))
self.label_10.setText(_translate('Dialog', 'If FPS exceeds this value. Create another version with this fps. [FPS] = '))
self.fpsLimit.setToolTip(_translate('Dialog', 'Create another output video limiting the fps to X'))
self.fpsLimit.setInputMask(_translate('Dialog', '000'))
self.fpsLimit.setText(_translate('Dialog', '60'))
self.to60.setText(_translate('Dialog', '(If FPS exceeds [FPS]) Create a [FPS] version of movie.'))
self.to60C1.setText(_translate('Dialog', '(If FPS exceeds [FPS]) Intepolate down to [FPS] [Conf 1: Smooth]'))
self.to60C2.setText(_translate('Dialog', '(If FPS exceeds [FPS]) Intepolate down to [FPS] [Conf 2: Sharp]'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate('Dialog', 'Misc Options'))
self.Alpha.setText(_translate('Dialog', ' [Broken] Alpha Transparency\n[GIF need Limit Color pallet to show alpha]:'))
self.alphaOpt.setItemText(0, _translate('Dialog', 'No Alpha'))
self.alphaOpt.setItemText(1, _translate('Dialog', 'With Alpha'))
self.limitPalette.setToolTip(_translate('Dialog', 'If you are outputing to gif, you want to turn on this option.'))
self.limitPalette.setText(_translate('Dialog', 'Limit Color Palette to only use original colors. [Sometimes work better with pixelart]'))
self.checkBox.setText(_translate('Dialog', 'Generate Palette from original frames. [Not working]'))
self.checkBox_2.setText(_translate('Dialog', 'Generate Palette from interpolated frames. [Not working]'))
self.pixelUpscaleDowscaleBefore.setItemText(0, _translate('Dialog', 'Disabled'))
self.pixelUpscaleDowscaleBefore.setItemText(1, _translate('Dialog', '2X'))
self.pixelUpscaleDowscaleBefore.setItemText(2, _translate('Dialog', '3X'))
self.pixelUpscaleDowscaleBefore.setItemText(3, _translate('Dialog', '4X'))
self.pixelUpscaleDowscaleBefore.setItemText(4, _translate('Dialog', '5X'))
self.pixelUpscaleDowscaleBefore.setItemText(5, _translate('Dialog', '6X'))
self.pixelUpscaleDowscaleBefore.setItemText(6, _translate('Dialog', '7X'))
self.pixelUpscaleDowscaleBefore.setItemText(7, _translate('Dialog', '8X'))
self.pixelUpscaleDowscaleBefore.setItemText(8, _translate('Dialog', '9X'))
self.pixelUpscaleDowscaleBefore.setItemText(9, _translate('Dialog', '10X'))
self.label_19.setText(_translate('Dialog', 'Upscale [Nearest neighbor] by X; Do Interpolation; Downscale[Nearest neighbor] by X'))
self.pixelDownscaleUpscaleAfter.setItemText(0, _translate('Dialog', 'Disabled'))
self.pixelDownscaleUpscaleAfter.setItemText(1, _translate('Dialog', '2X'))
self.pixelDownscaleUpscaleAfter.setItemText(2, _translate('Dialog', '3X'))
self.pixelDownscaleUpscaleAfter.setItemText(3, _translate('Dialog', '4X'))
self.pixelDownscaleUpscaleAfter.setItemText(4, _translate('Dialog', '5X'))
self.pixelDownscaleUpscaleAfter.setItemText(5, _translate('Dialog', '6X'))
self.pixelDownscaleUpscaleAfter.setItemText(6, _translate('Dialog', '7X'))
self.pixelDownscaleUpscaleAfter.setItemText(7, _translate('Dialog', '8X'))
self.pixelDownscaleUpscaleAfter.setItemText(8, _translate('Dialog', '9X'))
self.pixelDownscaleUpscaleAfter.setItemText(9, _translate('Dialog', '10X'))
self.label_20.setText(_translate('Dialog', 'Do Interpolation; Downscale [Nearest neighbor] by X; Upscale [Nearest neighbor] by X'))
self.pixelUpscaleAfter.setItemText(0, _translate('Dialog', 'Disabled'))
self.pixelUpscaleAfter.setItemText(1, _translate('Dialog', '2X'))
self.pixelUpscaleAfter.setItemText(2, _translate('Dialog', '3X'))
self.pixelUpscaleAfter.setItemText(3, _translate('Dialog', '4X'))
self.pixelUpscaleAfter.setItemText(4, _translate('Dialog', '5X'))
self.pixelUpscaleAfter.setItemText(5, _translate('Dialog', '6X'))
self.pixelUpscaleAfter.setItemText(6, _translate('Dialog', '7X'))
self.pixelUpscaleAfter.setItemText(7, _translate('Dialog', '8X'))
self.pixelUpscaleAfter.setItemText(8, _translate('Dialog', '9X'))
self.pixelUpscaleAfter.setItemText(9, _translate('Dialog', '10X'))
self.label_21.setText(_translate('Dialog', 'Do Interpolation; Upscale [Nearest neighbor] by X'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.PixelArt), _translate('Dialog', 'PixelArt'))
self.label_15.setText(_translate('Dialog', "Keep in mind that other applications also share the VRAM, it's almost impossible that Dain-App have all the available VRAM only to itself."))
self.label_14.setText(_translate('Dialog', "This application runs exclusively on Video memory and requires a LOT of it. For example, rendering at 720p takes between 10 and 11 GB of Vram. \nThere are two options to address the issue of a video file being too high resolution to handle with your GPU's available Vram capacity (CUDA out of memory error message). \nThe faster and easier of the options is to downscale the video to a resolution your GPU can handle. Simply check the downscale box and enter the new height,\nthe width will be calculated automatically based on the aspect ratio."))
self.useResize.setText(_translate('Dialog', 'Downscale Video:'))
self.label_3.setText(_translate('Dialog', 'New Height:'))
self.label_13.setText(_translate('Dialog', 'The slower more complicated solution is to use the Frame Splitter, but it will let you render the full resolution of your video.\nThis will lower the Vram load by splitting each frame being into pieces (Number of divisions), rendering each piece one at a time, then stitching them back together.\nHowever, this method causes lines crossing the borders of the pieces to not line up properly, creating a visible grid pattern where the borders of the pieces are. Padding is used to counteract this.\nPadding is additional space beyond the section size that overlaps with adjacent pieces and is used to ensure the lines between the frames pieces line up properly.\nKeep inscreasing the division size until the Out of Memory error is fixed.'))
self.useSplit.setText(_translate('Dialog', 'Split frames into sections:'))
self.label_8.setText(_translate('Dialog', 'Number of divisions X:'))
self.splitSizeX.setText(_translate('Dialog', '2'))
self.splitSizeX.setPlaceholderText(_translate('Dialog', 'Section Size'))
self.label_25.setText(_translate('Dialog', 'Number of divisions Y:'))
self.splitSizeY.setText(_translate('Dialog', '2'))
self.label_9.setText(_translate('Dialog', 'Section Padding[px]:'))
self.splitPad.setText(_translate('Dialog', '150'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate('Dialog', 'Fix OutOfMemory Options'))
self.label_4.setText(_translate('Dialog', 'Color Scheme: '))
self.brightBtn.setText(_translate('Dialog', 'Bright Mode'))
self.darkBtn.setText(_translate('Dialog', 'Dark Mode'))
self.loadPlace.setText(_translate('Dialog', 'Load Preset'))
self.deletePlace.setText(_translate('Dialog', 'Delete Preset'))
self.placeholderName.setPlaceholderText(_translate('Dialog', 'Placeholder name'))
self.savePlace.setText(_translate('Dialog', 'Save New Preset'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_6), _translate('Dialog', 'Application Style'))
self.textEdit.setHtml(_translate('Dialog', '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:\'MS Shell Dlg 2\'; font-size:8pt; font-weight:400; font-style:normal;">\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">How do i start?</p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Dain-App is slow to render long videos or videos with great quality,if you are new to it start with something small like a gif or a 3 second video before starting to use the application.</p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">The simplest test you can do is just select a "input file"e and "output Folder" and press "Perform all steps", this should create your first interpolation in the selected folder.</p>\n<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>\n<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">I Get Out Of memory Error even with the split option turned on?</p>\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">You need to add even more splits.</p></body></html>'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate('Dialog', 'FAQ'))
self.fastMode.setToolTip(_translate('Dialog', "<html><head/><body><p>Will make interpolation a lot faster and use less Vram. But it also produce worse results. Good if you don't care much for the final quality or is testing configurations.</p><p>Try it with bigger batch sizes to gain even more speed.</p></body></html>"))
self.fastMode.setText(_translate('Dialog', 'Fast Mode'))
self.dontCleanCache.setText(_translate('Dialog', "Don't clean CUDA cache between frames"))
self.ffmpegPrint.setText(_translate('Dialog', 'Print on console FFMPEG messages.'))
self.onlyInterpolateMissing.setText(_translate('Dialog', '[For internal debugging] Only interpolate deleted frames.'))
self.debugKeepDuplicates.setText(_translate('Dialog', "[For internal debugging] Don't use mpdecimate"))
self.label_31.setText(_translate('Dialog', 'Motion Blur {WIP}:'))
self.label_30.setToolTip(_translate('Dialog', '<html><head/><body><p>[Not work with fast mode enabled]</p><p>[Still WIP]</p><p>0 = Disabled</p><p>2 = Subtle blur</p><p>5 = Small blur</p><p>10 = A lot of blur</p><p>Above 10 = Absurd blur</p></body></html>'))
self.label_30.setText(_translate('Dialog', 'Motion Blur Force [0 = Disable]:'))
self.smoothFlow.setToolTip(_translate('Dialog', '<html><head/><body><p>[Not work with fast mode enabled]</p><p>[Still WIP]</p><p>0 = Disabled</p><p>2 = Subtle blur</p><p>5 = Small blur</p><p>10 = A lot of blur</p><p>Above 10 = Absurd blur</p></body></html>'))
self.label_29.setToolTip(_translate('Dialog', '<html><head/><body><p>0 = All motion create motion blur</p><p>5 = Almost all motion create blur</p><p>15 = Faster motions create blur</p><p>Above 20 = Only real fast motions create blur</p></body></html>'))
self.label_29.setText(_translate('Dialog', 'Motion Blur Threshold'))
self.flowForce.setToolTip(_translate('Dialog', '<html><head/><body><p>0 = All motion create motion blur</p><p>5 = Almost all motion create blur</p><p>15 = Faster motions create blur</p><p>Above 20 = Only real fast motions create blur</p></body></html>'))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_7), _translate('Dialog', 'Beta Options'))
self.renderBtn.setText(_translate('Dialog', 'Perform all steps: Render'))
self.renderFrames.setText(_translate('Dialog', 'Step 1: Split source video into frames'))
self.renderInterpolation.setText(_translate('Dialog', 'Step 2: Feed source frames to DAIN'))
self.renderVideo.setText(_translate('Dialog', 'Step 3: Convert DAIN frames to video'))
self.tabWidget.setCurrentIndex(4)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.renderFrames, self.patreonBtn)
Dialog.setTabOrder(self.patreonBtn, self.renderVideo)
Dialog.setTabOrder(self.renderVideo, self.renderBtn)
Dialog.setTabOrder(self.renderBtn, self.discBtn)
Dialog.setTabOrder(self.discBtn, self.radioInputVideos)
Dialog.setTabOrder(self.radioInputVideos, self.radioInputPNG)
Dialog.setTabOrder(self.radioInputPNG, self.radioResumeRender)
Dialog.setTabOrder(self.radioResumeRender, self.inputFileBtn)
Dialog.setTabOrder(self.inputFileBtn, self.inputFolderBtn)
Dialog.setTabOrder(self.inputFolderBtn, self.inputResumeFolder)
Dialog.setTabOrder(self.inputResumeFolder, self.renderInterpolation)
Dialog.setTabOrder(self.renderInterpolation, self.exportType)
Dialog.setTabOrder(self.exportType, self.outputFolderBtn)
Dialog.setTabOrder(self.outputFolderBtn, self.animMethod)
Dialog.setTabOrder(self.animMethod, self.alphaOpt)
Dialog.setTabOrder(self.alphaOpt, self.intAlgo)
Dialog.setTabOrder(self.intAlgo, self.fpsInput)
Dialog.setTabOrder(self.fpsInput, self.interpolationLevel)
Dialog.setTabOrder(self.interpolationLevel, self.outputFps)
Dialog.setTabOrder(self.outputFps, self.useSplit)
Dialog.setTabOrder(self.useSplit, self.splitSizeX)
Dialog.setTabOrder(self.splitSizeX, self.splitPad)
Dialog.setTabOrder(self.splitPad, self.useResize)
Dialog.setTabOrder(self.useResize, self.widthValue)
Dialog.setTabOrder(self.widthValue, self.dontInterpolateScenes)
Dialog.setTabOrder(self.dontInterpolateScenes, self.lineEdit_2)
Dialog.setTabOrder(self.lineEdit_2, self.verifyScenes)
Dialog.setTabOrder(self.verifyScenes, self.cleanInterpol)
Dialog.setTabOrder(self.cleanInterpol, self.fpsLimit)
Dialog.setTabOrder(self.fpsLimit, self.to60)
Dialog.setTabOrder(self.to60, self.to60C1)
Dialog.setTabOrder(self.to60C1, self.to60C2)
Dialog.setTabOrder(self.to60C2, self.tabWidget)
Dialog.setTabOrder(self.tabWidget, self.interpolMethod)
Dialog.setTabOrder(self.interpolMethod, self.textEdit)
|
Dain-App
|
positive
|
def __getitem__(self, idx):
<DeepExtract>
idx = self._get_index(idx)
if self.benchmark:
img_input = self.images_input[idx]
img_tar = self.images_tar[idx]
filename = str(idx + 1)
elif self.args.ext == 'img':
img_input = imageio.imread(self.images_input[idx])
img_tar = imageio.imread(self.images_tar[idx])
filename = self.images_tar[idx]
elif self.args.ext.find('sep') >= 0:
img_input = np.load(self.images_input[idx])
img_tar = np.load(self.images_tar[idx])
filename = self.images_tar[idx]
else:
img_input = self.images_input[idx]
img_tar = self.images_tar[idx]
filename = str(idx + 1)
(img_input, img_tar, filename) = (img_input, img_tar, filename)
</DeepExtract>
(img_input, img_tar) = common.set_channel([img_input, img_tar], self.args.n_colors)
<DeepExtract>
patch_size = self.args.patch_size
scale = self.scale
if self.train:
(img_input, img_tar) = common.get_patch(img_input, img_tar, patch_size, scale)
(img_input, img_tar) = common.augment([img_input, img_tar])
img_input = common.add_noise_shuhang(img_input, self.sigma)
else:
(ih, iw) = img_input.shape[0:2]
img_tar = img_tar[0:ih * scale, 0:iw * scale]
(img_input, img_tar) = (img_input, img_tar)
</DeepExtract>
(input_tensor, tar_tensor) = common.np2Tensor([img_input, img_tar], self.args.rgb_range)
return (input_tensor, tar_tensor, filename)
|
def __getitem__(self, idx):
idx = self._get_index(idx)
if self.benchmark:
img_input = self.images_input[idx]
img_tar = self.images_tar[idx]
filename = str(idx + 1)
elif self.args.ext == 'img':
img_input = imageio.imread(self.images_input[idx])
img_tar = imageio.imread(self.images_tar[idx])
filename = self.images_tar[idx]
elif self.args.ext.find('sep') >= 0:
img_input = np.load(self.images_input[idx])
img_tar = np.load(self.images_tar[idx])
filename = self.images_tar[idx]
else:
img_input = self.images_input[idx]
img_tar = self.images_tar[idx]
filename = str(idx + 1)
(img_input, img_tar, filename) = (img_input, img_tar, filename)
(img_input, img_tar) = common.set_channel([img_input, img_tar], self.args.n_colors)
patch_size = self.args.patch_size
scale = self.scale
if self.train:
(img_input, img_tar) = common.get_patch(img_input, img_tar, patch_size, scale)
(img_input, img_tar) = common.augment([img_input, img_tar])
img_input = common.add_noise_shuhang(img_input, self.sigma)
else:
(ih, iw) = img_input.shape[0:2]
img_tar = img_tar[0:ih * scale, 0:iw * scale]
(img_input, img_tar) = (img_input, img_tar)
(input_tensor, tar_tensor) = common.np2Tensor([img_input, img_tar], self.args.rgb_range)
return (input_tensor, tar_tensor, filename)
|
dhp
|
positive
|
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
self.toknames = {}
self.funcsym = {}
self.strsym = {}
self.ignore = {}
self.errorf = {}
self.eoff = {}
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
<DeepExtract>
nonstate = 1
parts = f.split('_')
for (i, part) in enumerate(parts[1:], 1):
if part not in self.stateinfo and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(self.stateinfo)
tokenname = '_'.join(parts[i:])
(states, tokname) = (states, tokenname)
</DeepExtract>
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
|
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
self.toknames = {}
self.funcsym = {}
self.strsym = {}
self.ignore = {}
self.errorf = {}
self.eoff = {}
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
nonstate = 1
parts = f.split('_')
for (i, part) in enumerate(parts[1:], 1):
if part not in self.stateinfo and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(self.stateinfo)
tokenname = '_'.join(parts[i:])
(states, tokname) = (states, tokenname)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
|
booleannet
|
positive
|
def fibonacci_support(px):
def fibonacci_levels(px):
return [min(px) + l * (max(px) - min(px)) for l in [0, 0.236, 0.382, 0.5, 0.618, 1]]
def find_interval(x, val):
return (-1 if val < x[0] else 99) if val < x[0] or val > x[-1] else max(bisect.bisect_left(x, val) - 1, 0)
last_price = px[-1]
lower_dist = upper_dist = 0
<DeepExtract>
sups = [min(px[:-1]) + l * (max(px[:-1]) - min(px[:-1])) for l in [0, 0.236, 0.382, 0.5, 0.618, 1]]
</DeepExtract>
<DeepExtract>
idx = (-1 if last_price < sups[0] else 99) if last_price < sups[0] or last_price > sups[-1] else max(bisect.bisect_left(sups, last_price) - 1, 0)
</DeepExtract>
if idx == -1:
lower_dist = -1
upper_dist = round(100.0 * (sups[0] / last_price - 1), 2)
elif idx == 99:
lower_dist = round(100.0 * (last_price / sups[-1] - 1), 2)
upper_dist = -1
else:
lower_dist = round(100.0 * (last_price / sups[idx] - 1), 2)
upper_dist = round(100.0 * (sups[idx + 1] / last_price - 1), 2)
return (lower_dist, upper_dist)
|
def fibonacci_support(px):
def fibonacci_levels(px):
return [min(px) + l * (max(px) - min(px)) for l in [0, 0.236, 0.382, 0.5, 0.618, 1]]
def find_interval(x, val):
return (-1 if val < x[0] else 99) if val < x[0] or val > x[-1] else max(bisect.bisect_left(x, val) - 1, 0)
last_price = px[-1]
lower_dist = upper_dist = 0
sups = [min(px[:-1]) + l * (max(px[:-1]) - min(px[:-1])) for l in [0, 0.236, 0.382, 0.5, 0.618, 1]]
idx = (-1 if last_price < sups[0] else 99) if last_price < sups[0] or last_price > sups[-1] else max(bisect.bisect_left(sups, last_price) - 1, 0)
if idx == -1:
lower_dist = -1
upper_dist = round(100.0 * (sups[0] / last_price - 1), 2)
elif idx == 99:
lower_dist = round(100.0 * (last_price / sups[-1] - 1), 2)
upper_dist = -1
else:
lower_dist = round(100.0 * (last_price / sups[idx] - 1), 2)
upper_dist = round(100.0 * (sups[idx + 1] / last_price - 1), 2)
return (lower_dist, upper_dist)
|
blueshift-demo-strategies
|
positive
|
def _setup(self):
context = aq_inner(self.context)
self._ts = getToolByName(context, 'translation_service')
self.url_quote_plus = quote_plus
self.first_weekday = api.portal.get_registry_record('plone.first_weekday')
self.now = localtime()
<DeepExtract>
session = None
request = self.request
year = request.get('year', None)
month = request.get('month', None)
use_session = False
if use_session:
session = request.get('SESSION', None)
if session:
if not year:
year = session.get('calendar_year', None)
if not month:
month = session.get('calendar_month', None)
if not year:
year = self.now[0]
if not month:
month = self.now[1]
try:
(year, month) = (int(year), int(month))
except (TypeError, ValueError):
(year, month) = self.now[:2]
if session:
session.set('calendar_year', year)
session.set('calendar_month', month)
self.yearmonth = yearmonth = (year, month)
</DeepExtract>
self.year = year = yearmonth[0]
self.month = month = yearmonth[1]
self.showPrevMonth = yearmonth > (self.now[0] - 1, self.now[1])
self.showNextMonth = yearmonth < (self.now[0] + 1, self.now[1])
<DeepExtract>
if month == 0 or month == 1:
(month, year) = (12, year - 1)
else:
month -= 1
(self.prevMonthYear, self.prevMonthMonth) = (year, month)
</DeepExtract>
<DeepExtract>
if month == 12:
(month, year) = (1, year + 1)
else:
month += 1
(self.nextMonthYear, self.nextMonthMonth) = (year, month)
</DeepExtract>
self.monthName = PLMF(self._ts.month_msgid(month), default=self._ts.month_english(month))
|
def _setup(self):
context = aq_inner(self.context)
self._ts = getToolByName(context, 'translation_service')
self.url_quote_plus = quote_plus
self.first_weekday = api.portal.get_registry_record('plone.first_weekday')
self.now = localtime()
session = None
request = self.request
year = request.get('year', None)
month = request.get('month', None)
use_session = False
if use_session:
session = request.get('SESSION', None)
if session:
if not year:
year = session.get('calendar_year', None)
if not month:
month = session.get('calendar_month', None)
if not year:
year = self.now[0]
if not month:
month = self.now[1]
try:
(year, month) = (int(year), int(month))
except (TypeError, ValueError):
(year, month) = self.now[:2]
if session:
session.set('calendar_year', year)
session.set('calendar_month', month)
self.yearmonth = yearmonth = (year, month)
self.year = year = yearmonth[0]
self.month = month = yearmonth[1]
self.showPrevMonth = yearmonth > (self.now[0] - 1, self.now[1])
self.showNextMonth = yearmonth < (self.now[0] + 1, self.now[1])
if month == 0 or month == 1:
(month, year) = (12, year - 1)
else:
month -= 1
(self.prevMonthYear, self.prevMonthMonth) = (year, month)
if month == 12:
(month, year) = (1, year + 1)
else:
month += 1
(self.nextMonthYear, self.nextMonthMonth) = (year, month)
self.monthName = PLMF(self._ts.month_msgid(month), default=self._ts.month_english(month))
|
collective.cover
|
positive
|
def __init__(self, input_depth, output_depth, expansion, stride, bn_type='bn', kernel=3, width_divisor=1, shuffle_type=None, pw_group=1, se=False, cdw=False, dw_skip_bn=False, dw_skip_relu=False):
super(IRFBlock, self).__init__()
assert kernel in [1, 3, 5, 7], kernel
self.use_res_connect = stride == 1 and input_depth == output_depth
self.output_depth = output_depth
mid_depth = int(input_depth * expansion)
<DeepExtract>
ret = int(mid_depth)
if width_divisor > 0 and mid_depth % width_divisor != 0:
ret = int((_py2_round(mid_depth / width_divisor) or width_divisor) * width_divisor)
mid_depth = ret
</DeepExtract>
self.pw = ConvBNRelu(input_depth, mid_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu='relu', bn_type=bn_type, group=pw_group)
<DeepExtract>
assert stride in [1, 2, 4] or stride in [-1, -2, -4] or (isinstance(stride, tuple) and all((x in [-1, -2, -4] for x in stride)))
scales = stride
ret = None
if isinstance(stride, tuple) or stride < 0:
scales = [-x for x in stride] if isinstance(stride, tuple) else -stride
stride = 1
ret = Upsample(scale_factor=scales, mode='nearest', align_corners=None)
(self.upscale, stride) = (ret, stride)
</DeepExtract>
if kernel == 1:
self.dw = nn.Sequential()
elif cdw:
dw1 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu', bn_type=bn_type)
dw2 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=1, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.dw = nn.Sequential(OrderedDict([('dw1', dw1), ('dw2', dw2)]))
else:
self.dw = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.pwl = ConvBNRelu(mid_depth, output_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu=None, bn_type=bn_type, group=pw_group)
self.shuffle_type = shuffle_type
if shuffle_type is not None:
self.shuffle = ChannelShuffle(pw_group)
self.se4 = SEModule(output_depth) if se else nn.Sequential()
self.output_depth = output_depth
|
def __init__(self, input_depth, output_depth, expansion, stride, bn_type='bn', kernel=3, width_divisor=1, shuffle_type=None, pw_group=1, se=False, cdw=False, dw_skip_bn=False, dw_skip_relu=False):
super(IRFBlock, self).__init__()
assert kernel in [1, 3, 5, 7], kernel
self.use_res_connect = stride == 1 and input_depth == output_depth
self.output_depth = output_depth
mid_depth = int(input_depth * expansion)
ret = int(mid_depth)
if width_divisor > 0 and mid_depth % width_divisor != 0:
ret = int((_py2_round(mid_depth / width_divisor) or width_divisor) * width_divisor)
mid_depth = ret
self.pw = ConvBNRelu(input_depth, mid_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu='relu', bn_type=bn_type, group=pw_group)
assert stride in [1, 2, 4] or stride in [-1, -2, -4] or (isinstance(stride, tuple) and all((x in [-1, -2, -4] for x in stride)))
scales = stride
ret = None
if isinstance(stride, tuple) or stride < 0:
scales = [-x for x in stride] if isinstance(stride, tuple) else -stride
stride = 1
ret = Upsample(scale_factor=scales, mode='nearest', align_corners=None)
(self.upscale, stride) = (ret, stride)
if kernel == 1:
self.dw = nn.Sequential()
elif cdw:
dw1 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu', bn_type=bn_type)
dw2 = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=1, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.dw = nn.Sequential(OrderedDict([('dw1', dw1), ('dw2', dw2)]))
else:
self.dw = ConvBNRelu(mid_depth, mid_depth, kernel=kernel, stride=stride, pad=kernel // 2, group=mid_depth, no_bias=1, use_relu='relu' if not dw_skip_relu else None, bn_type=bn_type if not dw_skip_bn else None)
self.pwl = ConvBNRelu(mid_depth, output_depth, kernel=1, stride=1, pad=0, no_bias=1, use_relu=None, bn_type=bn_type, group=pw_group)
self.shuffle_type = shuffle_type
if shuffle_type is not None:
self.shuffle = ChannelShuffle(pw_group)
self.se4 = SEModule(output_depth) if se else nn.Sequential()
self.output_depth = output_depth
|
EveryPixelMatters
|
positive
|
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if self.dirty or (self.cursor >= len(self) and len(self) == 1):
<DeepExtract>
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self):
if len(self) == 1:
el = self.toTree(self.singleElement)
raise RewriteCardinalityException(self.elementDescription)
if self.singleElement is not None:
self.cursor += 1
el = self.toTree(self.singleElement)
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
el = o
</DeepExtract>
return self.dup(el)
<DeepExtract>
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self):
if len(self) == 1:
el = self.toTree(self.singleElement)
raise RewriteCardinalityException(self.elementDescription)
if self.singleElement is not None:
self.cursor += 1
el = self.toTree(self.singleElement)
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
el = o
</DeepExtract>
return el
|
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if self.dirty or (self.cursor >= len(self) and len(self) == 1):
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self):
if len(self) == 1:
el = self.toTree(self.singleElement)
raise RewriteCardinalityException(self.elementDescription)
if self.singleElement is not None:
self.cursor += 1
el = self.toTree(self.singleElement)
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
el = o
return self.dup(el)
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self):
if len(self) == 1:
el = self.toTree(self.singleElement)
raise RewriteCardinalityException(self.elementDescription)
if self.singleElement is not None:
self.cursor += 1
el = self.toTree(self.singleElement)
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
el = o
return el
|
cpy
|
positive
|
def __repr__(self):
<DeepExtract>
if not self.value.fields:
sfields = '[]'
sfields = '[\n%s]' % ''.join((f' {_repr_indent(v)}\n' for v in self.value.fields))
</DeepExtract>
return f'<BinPtchEntry {self.path!r} {sfields}>'
|
def __repr__(self):
if not self.value.fields:
sfields = '[]'
sfields = '[\n%s]' % ''.join((f' {_repr_indent(v)}\n' for v in self.value.fields))
return f'<BinPtchEntry {self.path!r} {sfields}>'
|
CDTB
|
positive
|
def test_step(self, data):
"""One test step
Arguments:
data {dict of data} -- required keys and values:
'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences
'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences
'Y_floor' {LongTensor [batch_size]} -- floor of response sentence
Returns:
dict of data -- returned keys and values
'symbols' {LongTensor [batch_size, max_decode_len]} -- token ids of response hypothesis
dict of statistics -- returned keys and values
"""
(X, Y) = (data['X'], data['Y'])
(X_floor, Y_floor) = (data['X_floor'], data['Y_floor'])
with torch.no_grad():
<DeepExtract>
data_dict = self._construct_input_output(inputs=X, input_floors=X_floor, outputs=None, output_floors=Y_floor, sample_mode=True)
ctx_embeddings = data_dict['embeddings']
seq_lens = data_dict['seq_lens']
batch_size = ctx_embeddings.size(0)
max_seq_len = max(seq_lens)
min_seq_len = min(seq_lens)
input_embeddings = ctx_embeddings[:, :min_seq_len, :].contiguous()
past = None
symbols = [[] for _ in range(batch_size)]
symbol_logprobs = [[] for _ in range(batch_size)]
early_stop_flags = torch.BoolTensor([False] * batch_size).to(DEVICE)
for step in range(0, self.decode_max_len + max_seq_len - min_seq_len):
early_stop = early_stop_flags.all().item()
if early_stop:
break
new_word_position_id = step + min_seq_len
(step_logits, past) = self._forward(input_embeddings, past)
step_last_logits = step_logits[:, -1, :]
decode_dict = self._step_decode(logits=step_last_logits, gen_type=self.gen_type, top_p=self.top_p, top_k=self.top_k, temp=self.temp)
step_symbol = decode_dict['symbol']
step_last_logprobs = step_last_logits.log_softmax(dim=1)
step_symbol_logprobs = step_last_logprobs.gather(1, step_symbol)
input_embeddings = self._new_step_input_embeddings(new_symbol=step_symbol, position_id=new_word_position_id)
if new_word_position_id < max_seq_len:
for batch_idx in range(batch_size):
seq_len = seq_lens[batch_idx]
if new_word_position_id < seq_len:
input_embeddings.data[batch_idx] = ctx_embeddings.data[batch_idx][new_word_position_id]
for batch_idx in range(batch_size):
seq_len = seq_lens[batch_idx]
if new_word_position_id >= seq_len:
symbols[batch_idx].append(step_symbol[batch_idx].item())
symbol_logprobs[batch_idx].append(step_symbol_logprobs[batch_idx].item())
position_in_response = new_word_position_id >= torch.LongTensor(seq_lens).to(DEVICE)
step_stop_flags = step_symbol.squeeze(1) == self.eos_token_id
step_stop_flags &= position_in_response
early_stop_flags |= step_stop_flags
output_lens = [len(seq) for seq in symbols]
max_output_len = max(output_lens)
symbols = [seq + [self.pad_token_id] * (max_output_len - len(seq)) for seq in symbols]
symbols = torch.LongTensor(symbols).to(DEVICE)
symbol_logprobs = [seq + [-float('inf')] * (max_output_len - len(seq)) for seq in symbol_logprobs]
symbol_logprobs = torch.FloatTensor(symbol_logprobs).to(DEVICE)
sample_dict = {'symbols': symbols, 'symbol_logprobs': symbol_logprobs}
</DeepExtract>
ret_data = {'symbols': sample_dict['symbols'], 'symbol_logprobs': sample_dict['symbol_logprobs']}
ret_stat = {}
return (ret_data, ret_stat)
|
def test_step(self, data):
"""One test step
Arguments:
data {dict of data} -- required keys and values:
'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences
'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences
'Y_floor' {LongTensor [batch_size]} -- floor of response sentence
Returns:
dict of data -- returned keys and values
'symbols' {LongTensor [batch_size, max_decode_len]} -- token ids of response hypothesis
dict of statistics -- returned keys and values
"""
(X, Y) = (data['X'], data['Y'])
(X_floor, Y_floor) = (data['X_floor'], data['Y_floor'])
with torch.no_grad():
data_dict = self._construct_input_output(inputs=X, input_floors=X_floor, outputs=None, output_floors=Y_floor, sample_mode=True)
ctx_embeddings = data_dict['embeddings']
seq_lens = data_dict['seq_lens']
batch_size = ctx_embeddings.size(0)
max_seq_len = max(seq_lens)
min_seq_len = min(seq_lens)
input_embeddings = ctx_embeddings[:, :min_seq_len, :].contiguous()
past = None
symbols = [[] for _ in range(batch_size)]
symbol_logprobs = [[] for _ in range(batch_size)]
early_stop_flags = torch.BoolTensor([False] * batch_size).to(DEVICE)
for step in range(0, self.decode_max_len + max_seq_len - min_seq_len):
early_stop = early_stop_flags.all().item()
if early_stop:
break
new_word_position_id = step + min_seq_len
(step_logits, past) = self._forward(input_embeddings, past)
step_last_logits = step_logits[:, -1, :]
decode_dict = self._step_decode(logits=step_last_logits, gen_type=self.gen_type, top_p=self.top_p, top_k=self.top_k, temp=self.temp)
step_symbol = decode_dict['symbol']
step_last_logprobs = step_last_logits.log_softmax(dim=1)
step_symbol_logprobs = step_last_logprobs.gather(1, step_symbol)
input_embeddings = self._new_step_input_embeddings(new_symbol=step_symbol, position_id=new_word_position_id)
if new_word_position_id < max_seq_len:
for batch_idx in range(batch_size):
seq_len = seq_lens[batch_idx]
if new_word_position_id < seq_len:
input_embeddings.data[batch_idx] = ctx_embeddings.data[batch_idx][new_word_position_id]
for batch_idx in range(batch_size):
seq_len = seq_lens[batch_idx]
if new_word_position_id >= seq_len:
symbols[batch_idx].append(step_symbol[batch_idx].item())
symbol_logprobs[batch_idx].append(step_symbol_logprobs[batch_idx].item())
position_in_response = new_word_position_id >= torch.LongTensor(seq_lens).to(DEVICE)
step_stop_flags = step_symbol.squeeze(1) == self.eos_token_id
step_stop_flags &= position_in_response
early_stop_flags |= step_stop_flags
output_lens = [len(seq) for seq in symbols]
max_output_len = max(output_lens)
symbols = [seq + [self.pad_token_id] * (max_output_len - len(seq)) for seq in symbols]
symbols = torch.LongTensor(symbols).to(DEVICE)
symbol_logprobs = [seq + [-float('inf')] * (max_output_len - len(seq)) for seq in symbol_logprobs]
symbol_logprobs = torch.FloatTensor(symbol_logprobs).to(DEVICE)
sample_dict = {'symbols': symbols, 'symbol_logprobs': symbol_logprobs}
ret_data = {'symbols': sample_dict['symbols'], 'symbol_logprobs': sample_dict['symbol_logprobs']}
ret_stat = {}
return (ret_data, ret_stat)
|
dialog-processing
|
positive
|
def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
unfinalized_scores: A vector containing scores for all
unfinalized hypotheses
"""
assert bbsz_idx.numel() == eos_scores.numel()
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2]
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2]
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for (i, (idx, score)) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
def get_hypo():
nonpad_idxs = src_tokens[sent].ne(self.pad)
hypo_attn = attn_clone[i][nonpad_idxs]
(_, alignment) = hypo_attn.max(dim=0)
return {'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': alignment, 'positional_scores': pos_scores[i]}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
elif not self.stop_early and score > worst_finalized[sent]['score']:
worst_idx = worst_finalized[sent]['idx']
if worst_idx is not None:
<DeepExtract>
nonpad_idxs = src_tokens[sent].ne(self.pad)
hypo_attn = attn_clone[i][nonpad_idxs]
(_, alignment) = hypo_attn.max(dim=0)
finalized[sent][worst_idx] = {'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': alignment, 'positional_scores': pos_scores[i]}
</DeepExtract>
(idx, s) = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
worst_finalized[sent] = {'score': s['score'], 'idx': idx}
newly_finished = []
for (sent, unfin_idx) in sents_seen:
if not finished[sent] and is_finished(sent, step, unfinalized_scores):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
|
def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
unfinalized_scores: A vector containing scores for all
unfinalized hypotheses
"""
assert bbsz_idx.numel() == eos_scores.numel()
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2]
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2]
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for (i, (idx, score)) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
def get_hypo():
nonpad_idxs = src_tokens[sent].ne(self.pad)
hypo_attn = attn_clone[i][nonpad_idxs]
(_, alignment) = hypo_attn.max(dim=0)
return {'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': alignment, 'positional_scores': pos_scores[i]}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
elif not self.stop_early and score > worst_finalized[sent]['score']:
worst_idx = worst_finalized[sent]['idx']
if worst_idx is not None:
nonpad_idxs = src_tokens[sent].ne(self.pad)
hypo_attn = attn_clone[i][nonpad_idxs]
(_, alignment) = hypo_attn.max(dim=0)
finalized[sent][worst_idx] = {'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, 'alignment': alignment, 'positional_scores': pos_scores[i]}
(idx, s) = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
worst_finalized[sent] = {'score': s['score'], 'idx': idx}
newly_finished = []
for (sent, unfin_idx) in sents_seen:
if not finished[sent] and is_finished(sent, step, unfinalized_scores):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
|
crosentgec
|
positive
|
def boot_plr(y, d, thetas, ses, all_l_hat, all_m_hat, all_g_hat, all_smpls, score, bootstrap, n_rep_boot, n_rep=1, apply_cross_fitting=True):
all_boot_theta = list()
all_boot_t_stat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
if apply_cross_fitting:
n_obs = len(y)
else:
test_index = smpls[0][1]
n_obs = len(test_index)
weights = draw_weights(bootstrap, n_rep_boot, n_obs)
<DeepExtract>
(y_minus_l_hat, d_minus_m_hat, y_minus_g_hat) = compute_plr_residuals(y, d, all_l_hat[i_rep], all_m_hat[i_rep], all_g_hat[i_rep], smpls)
if apply_cross_fitting:
if score == 'partialling out':
J = np.mean(-np.multiply(d_minus_m_hat, d_minus_m_hat))
else:
assert score == 'IV-type'
J = np.mean(-np.multiply(d_minus_m_hat, d))
else:
test_index = smpls[0][1]
if score == 'partialling out':
J = np.mean(-np.multiply(d_minus_m_hat[test_index], d_minus_m_hat[test_index]))
else:
assert score == 'IV-type'
J = np.mean(-np.multiply(d_minus_m_hat[test_index], d[test_index]))
if score == 'partialling out':
psi = np.multiply(y_minus_l_hat - d_minus_m_hat * thetas[i_rep], d_minus_m_hat)
else:
assert score == 'IV-type'
psi = np.multiply(y_minus_g_hat - d * thetas[i_rep], d_minus_m_hat)
(boot_theta, boot_t_stat) = boot_manual(psi, J, smpls, ses[i_rep], weights, n_rep_boot, apply_cross_fitting)
(boot_theta, boot_t_stat) = (boot_theta, boot_t_stat)
</DeepExtract>
all_boot_theta.append(boot_theta)
all_boot_t_stat.append(boot_t_stat)
boot_theta = np.hstack(all_boot_theta)
boot_t_stat = np.hstack(all_boot_t_stat)
return (boot_theta, boot_t_stat)
|
def boot_plr(y, d, thetas, ses, all_l_hat, all_m_hat, all_g_hat, all_smpls, score, bootstrap, n_rep_boot, n_rep=1, apply_cross_fitting=True):
all_boot_theta = list()
all_boot_t_stat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
if apply_cross_fitting:
n_obs = len(y)
else:
test_index = smpls[0][1]
n_obs = len(test_index)
weights = draw_weights(bootstrap, n_rep_boot, n_obs)
(y_minus_l_hat, d_minus_m_hat, y_minus_g_hat) = compute_plr_residuals(y, d, all_l_hat[i_rep], all_m_hat[i_rep], all_g_hat[i_rep], smpls)
if apply_cross_fitting:
if score == 'partialling out':
J = np.mean(-np.multiply(d_minus_m_hat, d_minus_m_hat))
else:
assert score == 'IV-type'
J = np.mean(-np.multiply(d_minus_m_hat, d))
else:
test_index = smpls[0][1]
if score == 'partialling out':
J = np.mean(-np.multiply(d_minus_m_hat[test_index], d_minus_m_hat[test_index]))
else:
assert score == 'IV-type'
J = np.mean(-np.multiply(d_minus_m_hat[test_index], d[test_index]))
if score == 'partialling out':
psi = np.multiply(y_minus_l_hat - d_minus_m_hat * thetas[i_rep], d_minus_m_hat)
else:
assert score == 'IV-type'
psi = np.multiply(y_minus_g_hat - d * thetas[i_rep], d_minus_m_hat)
(boot_theta, boot_t_stat) = boot_manual(psi, J, smpls, ses[i_rep], weights, n_rep_boot, apply_cross_fitting)
(boot_theta, boot_t_stat) = (boot_theta, boot_t_stat)
all_boot_theta.append(boot_theta)
all_boot_t_stat.append(boot_t_stat)
boot_theta = np.hstack(all_boot_theta)
boot_t_stat = np.hstack(all_boot_t_stat)
return (boot_theta, boot_t_stat)
|
doubleml-for-py
|
positive
|
def dpCreateRivetFromUI(self, *args):
""" Just collect all information from UI and call the main function to create Rivet setup.
"""
geoToAttach = cmds.textField(self.geoToAttachTF, query=True, text=True)
uvSet = cmds.textField(self.uvSetTF, query=True, text=True)
itemList = cmds.textScrollList(self.itemScrollList, query=True, allItems=True)
attachTranslate = cmds.checkBox(self.attachTCB, query=True, value=True)
attachRotate = cmds.checkBox(self.attachRCB, query=True, value=True)
addFatherGrp = cmds.checkBox(self.fatherGrpCB, query=True, value=True)
addInvert = cmds.checkBox(self.addInvertCB, query=True, value=True)
invT = cmds.checkBox(self.invertTCB, query=True, value=True)
invR = cmds.checkBox(self.invertRCB, query=True, value=True)
<DeepExtract>
self.shapeToAttachList = None
self.shapeToAttach = None
self.cpNode = None
self.tempNoce = None
attrList = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz']
(self.rivetList, togetherList) = ([], [])
isComponent = None
self.masterGrp = None
self.masterCtrl = None
self.scalableGrp = None
allList = cmds.ls(selection=False, type='transform')
if allList:
for node in allList:
if cmds.objExists(node + '.' + MASTER_GRP) and cmds.getAttr(node + '.' + MASTER_GRP) == 1:
self.masterGrp = node
if self.masterGrp:
masterCtrlList = cmds.listConnections(self.masterGrp + '.masterCtrl')
scalableGrpList = cmds.listConnections(self.masterGrp + '.scalableGrp')
if masterCtrlList:
self.masterCtrl = masterCtrlList[0]
if scalableGrpList:
self.scalableGrp = scalableGrpList[0]
createdRivetGrp = False
self.rivetGrp = RIVET_GRP
if not cmds.objExists(RIVET_GRP):
createdRivetGrp = True
self.rivetGrp = cmds.group(name=RIVET_GRP, empty=True)
for attr in attrList:
cmds.setAttr(self.rivetGrp + '.' + attr, lock=True, keyable=False, channelBox=False)
cmds.addAttr(self.rivetGrp, longName='dpRivetGrp', attributeType='bool')
cmds.setAttr(self.rivetGrp + '.dpRivetGrp', 1)
if self.scalableGrp:
cmds.parent(self.rivetGrp, self.scalableGrp)
if cmds.objExists(geoToAttach):
self.shapeToAttachList = cmds.ls(geoToAttach, dag=True, shapes=True)
if self.shapeToAttachList:
self.shapeToAttach = self.shapeToAttachList[0]
self.shapeType = cmds.objectType(self.shapeToAttach)
if itemList:
asked = False
for (i, item) in enumerate(itemList):
if '.vtx' in item or '.cv' in item or '.pt' in item:
if True:
if not asked:
isComponent = cmds.confirmDialog(title='dpRivet on Components', message="How do you want attach vertices, cv's or lattice points?", button=('Individually', 'Together', 'Ignore'), defaultButton='Individually', dismissString='Ignore', cancelButton='Ignore')
asked = True
if isComponent == 'Individually':
cls = cmds.cluster(item, name=item[:item.rfind('.')] + '_' + str(i) + '_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
elif isComponent == 'Together':
togetherList.append(item)
elif isComponent == 'Ignore':
itemList.remove(item)
elif isComponent == 'Ignore':
itemList.remove(item)
elif isComponent == 'Together':
togetherList.append(item)
else:
cls = cmds.cluster(item, name=item[:item.rfind('.')] + '_' + str(i) + '_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
else:
cls = cmds.cluster(item, name=item[:item.rfind('.')] + '_' + str(i) + '_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
elif cmds.objExists(item):
self.rivetList.append(item)
else:
mel.eval('error "Select and add at least one item to be attached as a Rivet, please.";')
if isComponent == 'Together':
cls = cmds.cluster(togetherList, name='dpRivet_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
if not addFatherGrp:
cancelProcess = False
for rivet in self.rivetList:
if cmds.listAttr(rivet, locked=True):
cancelProcess = True
break
for attr in attrList:
if cmds.listConnections(rivet + '.' + attr, source=True, destination=False):
cancelProcess = True
break
if cancelProcess:
if createdRivetGrp:
cmds.delete(self.rivetGrp)
else:
for rivet in self.rivetList:
if not rivet in itemList:
cmds.delete(rivet)
mel.eval('error "Canceled process: items to be Rivet can\'t be animated or have locked attributes, sorry.";')
return
dupGeo = cmds.duplicate(geoToAttach, name=geoToAttach + '_dpRivet_TEMP_Geo')[0]
for attr in attrList:
cmds.setAttr(dupGeo + '.' + attr, lock=False)
if cmds.listRelatives(dupGeo, allParents=True):
cmds.parent(dupGeo, world=True)
cmds.makeIdentity(dupGeo, apply=True)
dupShape = cmds.ls(dupGeo, dag=True, shapes=True)[0]
self.tempNode = cmds.createNode('transform', name=geoToAttach + '_dpRivet_TEMP_Transf', skipSelect=True)
if self.shapeType == 'mesh':
uvSetList = cmds.polyUVSet(dupShape, query=True, allUVSets=True)
if len(uvSetList) > 1:
if not uvSetList[0] == uvSet:
try:
cmds.polyUVSet(dupShape, copy=True, uvSet=uvSet, newUVSet=uvSetList[0])
except:
uvSet = uvSetList[0]
self.cpNode = cmds.createNode('closestPointOnMesh', name=geoToAttach + '_dpRivet_TEMP_CP', skipSelect=True)
cmds.connectAttr(dupShape + '.outMesh', self.cpNode + '.inMesh', force=True)
cmds.connectAttr(self.tempNode + '.translate', self.cpNode + '.inPosition', force=True)
else:
uRange = cmds.getAttr(dupShape + '.minMaxRangeU')[0]
vRange = cmds.getAttr(dupShape + '.minMaxRangeV')[0]
self.cpNode = cmds.createNode('closestPointOnSurface', name=geoToAttach + '_dpRivet_TEMP_CP', skipSelect=True)
cmds.connectAttr(dupShape + '.local', self.cpNode + '.inputSurface', force=True)
for rivet in self.rivetList:
rivetPos = cmds.xform(rivet, query=True, worldSpace=True, rotatePivot=True)
if addFatherGrp:
rivet = cmds.group(rivet, name=rivet + '_Rivet_Grp')
cmds.xform(rivet, worldSpace=True, rotatePivot=(rivetPos[0], rivetPos[1], rivetPos[2]))
cmds.xform(self.tempNode, worldSpace=True, translation=(rivetPos[0], rivetPos[1], rivetPos[2]))
fu = cmds.getAttr(self.cpNode + '.u')
fv = cmds.getAttr(self.cpNode + '.v')
if self.shapeType == 'nurbsSurface':
fu = abs((fu - uRange[0]) / (uRange[1] - uRange[0]))
fv = abs((fv - vRange[0]) / (vRange[1] - vRange[0]))
folTransf = cmds.createNode('transform', name=rivet + '_Fol', parent=self.rivetGrp, skipSelect=True)
folShape = cmds.createNode('follicle', name=rivet + '_FolShape', parent=folTransf, skipSelect=True)
if self.shapeType == 'mesh':
cmds.connectAttr(self.shapeToAttach + '.worldMesh[0]', folShape + '.inputMesh', force=True)
cmds.setAttr(folShape + '.mapSetName', uvSet, type='string')
else:
cmds.connectAttr(self.shapeToAttach + '.local', folShape + '.inputSurface', force=True)
cmds.connectAttr(self.shapeToAttach + '.worldMatrix[0]', folShape + '.inputWorldMatrix', force=True)
cmds.connectAttr(folShape + '.outRotate', folTransf + '.rotate', force=True)
cmds.connectAttr(folShape + '.outTranslate', folTransf + '.translate', force=True)
cmds.setAttr(folShape + '.parameterU', fu)
cmds.setAttr(folShape + '.parameterV', fv)
if attachTranslate and attachRotate:
cmds.parentConstraint(folTransf, rivet, maintainOffset=useOffset, name=rivet + '_PaC')
elif attachTranslate:
cmds.parentConstraint(folTransf, rivet, maintainOffset=useOffset, name=rivet + '_PaC', skipRotate=('x', 'y', 'z'))
elif attachRotate:
cmds.parentConstraint(folTransf, rivet, maintainOffset=useOffset, name=rivet + '_PaC', skipTranslate=('x', 'y', 'z'))
if self.masterCtrl:
cmds.scaleConstraint(self.masterCtrl, folTransf, maintainOffset=True, name=folTransf + '_ScC')
if addInvert:
for rivet in self.rivetList:
self.dpInvertAttrTranformation(rivet, invT, invR)
cmds.delete(dupGeo, self.cpNode, self.tempNode)
else:
mel.eval('error "Load one geometry to attach Rivets on it, please.";')
cmds.select(clear=True)
return folTransf
</DeepExtract>
<DeepExtract>
if cmds.window('dpRivetWindow', query=True, exists=True):
cmds.deleteUI('dpRivetWindow', window=True)
</DeepExtract>
|
def dpCreateRivetFromUI(self, *args):
""" Just collect all information from UI and call the main function to create Rivet setup.
"""
geoToAttach = cmds.textField(self.geoToAttachTF, query=True, text=True)
uvSet = cmds.textField(self.uvSetTF, query=True, text=True)
itemList = cmds.textScrollList(self.itemScrollList, query=True, allItems=True)
attachTranslate = cmds.checkBox(self.attachTCB, query=True, value=True)
attachRotate = cmds.checkBox(self.attachRCB, query=True, value=True)
addFatherGrp = cmds.checkBox(self.fatherGrpCB, query=True, value=True)
addInvert = cmds.checkBox(self.addInvertCB, query=True, value=True)
invT = cmds.checkBox(self.invertTCB, query=True, value=True)
invR = cmds.checkBox(self.invertRCB, query=True, value=True)
self.shapeToAttachList = None
self.shapeToAttach = None
self.cpNode = None
self.tempNoce = None
attrList = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz']
(self.rivetList, togetherList) = ([], [])
isComponent = None
self.masterGrp = None
self.masterCtrl = None
self.scalableGrp = None
allList = cmds.ls(selection=False, type='transform')
if allList:
for node in allList:
if cmds.objExists(node + '.' + MASTER_GRP) and cmds.getAttr(node + '.' + MASTER_GRP) == 1:
self.masterGrp = node
if self.masterGrp:
masterCtrlList = cmds.listConnections(self.masterGrp + '.masterCtrl')
scalableGrpList = cmds.listConnections(self.masterGrp + '.scalableGrp')
if masterCtrlList:
self.masterCtrl = masterCtrlList[0]
if scalableGrpList:
self.scalableGrp = scalableGrpList[0]
createdRivetGrp = False
self.rivetGrp = RIVET_GRP
if not cmds.objExists(RIVET_GRP):
createdRivetGrp = True
self.rivetGrp = cmds.group(name=RIVET_GRP, empty=True)
for attr in attrList:
cmds.setAttr(self.rivetGrp + '.' + attr, lock=True, keyable=False, channelBox=False)
cmds.addAttr(self.rivetGrp, longName='dpRivetGrp', attributeType='bool')
cmds.setAttr(self.rivetGrp + '.dpRivetGrp', 1)
if self.scalableGrp:
cmds.parent(self.rivetGrp, self.scalableGrp)
if cmds.objExists(geoToAttach):
self.shapeToAttachList = cmds.ls(geoToAttach, dag=True, shapes=True)
if self.shapeToAttachList:
self.shapeToAttach = self.shapeToAttachList[0]
self.shapeType = cmds.objectType(self.shapeToAttach)
if itemList:
asked = False
for (i, item) in enumerate(itemList):
if '.vtx' in item or '.cv' in item or '.pt' in item:
if True:
if not asked:
isComponent = cmds.confirmDialog(title='dpRivet on Components', message="How do you want attach vertices, cv's or lattice points?", button=('Individually', 'Together', 'Ignore'), defaultButton='Individually', dismissString='Ignore', cancelButton='Ignore')
asked = True
if isComponent == 'Individually':
cls = cmds.cluster(item, name=item[:item.rfind('.')] + '_' + str(i) + '_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
elif isComponent == 'Together':
togetherList.append(item)
elif isComponent == 'Ignore':
itemList.remove(item)
elif isComponent == 'Ignore':
itemList.remove(item)
elif isComponent == 'Together':
togetherList.append(item)
else:
cls = cmds.cluster(item, name=item[:item.rfind('.')] + '_' + str(i) + '_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
else:
cls = cmds.cluster(item, name=item[:item.rfind('.')] + '_' + str(i) + '_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
elif cmds.objExists(item):
self.rivetList.append(item)
else:
mel.eval('error "Select and add at least one item to be attached as a Rivet, please.";')
if isComponent == 'Together':
cls = cmds.cluster(togetherList, name='dpRivet_Cls')[0] + 'Handle'
clsToRivet = cmds.parent(cls, self.rivetGrp)[0]
self.rivetList.append(clsToRivet)
if not addFatherGrp:
cancelProcess = False
for rivet in self.rivetList:
if cmds.listAttr(rivet, locked=True):
cancelProcess = True
break
for attr in attrList:
if cmds.listConnections(rivet + '.' + attr, source=True, destination=False):
cancelProcess = True
break
if cancelProcess:
if createdRivetGrp:
cmds.delete(self.rivetGrp)
else:
for rivet in self.rivetList:
if not rivet in itemList:
cmds.delete(rivet)
mel.eval('error "Canceled process: items to be Rivet can\'t be animated or have locked attributes, sorry.";')
return
dupGeo = cmds.duplicate(geoToAttach, name=geoToAttach + '_dpRivet_TEMP_Geo')[0]
for attr in attrList:
cmds.setAttr(dupGeo + '.' + attr, lock=False)
if cmds.listRelatives(dupGeo, allParents=True):
cmds.parent(dupGeo, world=True)
cmds.makeIdentity(dupGeo, apply=True)
dupShape = cmds.ls(dupGeo, dag=True, shapes=True)[0]
self.tempNode = cmds.createNode('transform', name=geoToAttach + '_dpRivet_TEMP_Transf', skipSelect=True)
if self.shapeType == 'mesh':
uvSetList = cmds.polyUVSet(dupShape, query=True, allUVSets=True)
if len(uvSetList) > 1:
if not uvSetList[0] == uvSet:
try:
cmds.polyUVSet(dupShape, copy=True, uvSet=uvSet, newUVSet=uvSetList[0])
except:
uvSet = uvSetList[0]
self.cpNode = cmds.createNode('closestPointOnMesh', name=geoToAttach + '_dpRivet_TEMP_CP', skipSelect=True)
cmds.connectAttr(dupShape + '.outMesh', self.cpNode + '.inMesh', force=True)
cmds.connectAttr(self.tempNode + '.translate', self.cpNode + '.inPosition', force=True)
else:
uRange = cmds.getAttr(dupShape + '.minMaxRangeU')[0]
vRange = cmds.getAttr(dupShape + '.minMaxRangeV')[0]
self.cpNode = cmds.createNode('closestPointOnSurface', name=geoToAttach + '_dpRivet_TEMP_CP', skipSelect=True)
cmds.connectAttr(dupShape + '.local', self.cpNode + '.inputSurface', force=True)
for rivet in self.rivetList:
rivetPos = cmds.xform(rivet, query=True, worldSpace=True, rotatePivot=True)
if addFatherGrp:
rivet = cmds.group(rivet, name=rivet + '_Rivet_Grp')
cmds.xform(rivet, worldSpace=True, rotatePivot=(rivetPos[0], rivetPos[1], rivetPos[2]))
cmds.xform(self.tempNode, worldSpace=True, translation=(rivetPos[0], rivetPos[1], rivetPos[2]))
fu = cmds.getAttr(self.cpNode + '.u')
fv = cmds.getAttr(self.cpNode + '.v')
if self.shapeType == 'nurbsSurface':
fu = abs((fu - uRange[0]) / (uRange[1] - uRange[0]))
fv = abs((fv - vRange[0]) / (vRange[1] - vRange[0]))
folTransf = cmds.createNode('transform', name=rivet + '_Fol', parent=self.rivetGrp, skipSelect=True)
folShape = cmds.createNode('follicle', name=rivet + '_FolShape', parent=folTransf, skipSelect=True)
if self.shapeType == 'mesh':
cmds.connectAttr(self.shapeToAttach + '.worldMesh[0]', folShape + '.inputMesh', force=True)
cmds.setAttr(folShape + '.mapSetName', uvSet, type='string')
else:
cmds.connectAttr(self.shapeToAttach + '.local', folShape + '.inputSurface', force=True)
cmds.connectAttr(self.shapeToAttach + '.worldMatrix[0]', folShape + '.inputWorldMatrix', force=True)
cmds.connectAttr(folShape + '.outRotate', folTransf + '.rotate', force=True)
cmds.connectAttr(folShape + '.outTranslate', folTransf + '.translate', force=True)
cmds.setAttr(folShape + '.parameterU', fu)
cmds.setAttr(folShape + '.parameterV', fv)
if attachTranslate and attachRotate:
cmds.parentConstraint(folTransf, rivet, maintainOffset=useOffset, name=rivet + '_PaC')
elif attachTranslate:
cmds.parentConstraint(folTransf, rivet, maintainOffset=useOffset, name=rivet + '_PaC', skipRotate=('x', 'y', 'z'))
elif attachRotate:
cmds.parentConstraint(folTransf, rivet, maintainOffset=useOffset, name=rivet + '_PaC', skipTranslate=('x', 'y', 'z'))
if self.masterCtrl:
cmds.scaleConstraint(self.masterCtrl, folTransf, maintainOffset=True, name=folTransf + '_ScC')
if addInvert:
for rivet in self.rivetList:
self.dpInvertAttrTranformation(rivet, invT, invR)
cmds.delete(dupGeo, self.cpNode, self.tempNode)
else:
mel.eval('error "Load one geometry to attach Rivets on it, please.";')
cmds.select(clear=True)
return folTransf
if cmds.window('dpRivetWindow', query=True, exists=True):
cmds.deleteUI('dpRivetWindow', window=True)
</DeepExtract>
|
dpAutoRigSystem
|
positive
|
def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, attn_mask_h, attn_mask_g, target_mapping, d_model, n_head, d_head, dropout, dropatt, is_training, kernel_initializer, scope='rel_attn'):
"""Two-stream attention with relative positional encoding."""
scale = 1 / d_head ** 0.5
with tf.variable_scope(scope, reuse=False):
if mems is not None and mems.shape.ndims > 1:
cat = tf.concat([mems, h], 0)
else:
cat = h
<DeepExtract>
proj_weight = tf.get_variable('{}/kernel'.format('k'), [d_model, n_head, d_head], dtype=cat.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', cat, proj_weight)
k_head_h = head
</DeepExtract>
<DeepExtract>
proj_weight = tf.get_variable('{}/kernel'.format('v'), [d_model, n_head, d_head], dtype=cat.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', cat, proj_weight)
v_head_h = head
</DeepExtract>
<DeepExtract>
proj_weight = tf.get_variable('{}/kernel'.format('r'), [d_model, n_head, d_head], dtype=r.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', r, proj_weight)
k_head_r = head
</DeepExtract>
<DeepExtract>
proj_weight = tf.get_variable('{}/kernel'.format('q'), [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', h, proj_weight)
q_head_h = head
</DeepExtract>
<DeepExtract>
ac = tf.einsum('ibnd,jbnd->ijbn', q_head_h + r_w_bias, k_head_h)
bd = tf.einsum('ibnd,jbnd->ijbn', q_head_h + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head_h + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
attn_score = (ac + bd + ef) * scale
if attn_mask_h is not None:
attn_score = attn_score - 1e+30 * attn_mask_h
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
attn_vec_h = attn_vec
</DeepExtract>
<DeepExtract>
proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer)
attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec_h, proj_o)
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
if residual:
output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1, scope='LayerNorm')
else:
output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, scope='LayerNorm')
output_h = output
</DeepExtract>
with tf.variable_scope(scope, reuse=True):
<DeepExtract>
proj_weight = tf.get_variable('{}/kernel'.format('q'), [d_model, n_head, d_head], dtype=g.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', g, proj_weight)
q_head_g = head
</DeepExtract>
if target_mapping is not None:
q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
<DeepExtract>
ac = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_w_bias, k_head_h)
bd = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head_g + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
attn_score = (ac + bd + ef) * scale
if attn_mask_g is not None:
attn_score = attn_score - 1e+30 * attn_mask_g
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
attn_vec_g = attn_vec
</DeepExtract>
attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
else:
<DeepExtract>
ac = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_w_bias, k_head_h)
bd = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head_g + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
attn_score = (ac + bd + ef) * scale
if attn_mask_g is not None:
attn_score = attn_score - 1e+30 * attn_mask_g
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
attn_vec_g = attn_vec
</DeepExtract>
<DeepExtract>
proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], dtype=g.dtype, initializer=kernel_initializer)
attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec_g, proj_o)
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
if residual:
output = tf.contrib.layers.layer_norm(attn_out + g, begin_norm_axis=-1, scope='LayerNorm')
else:
output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, scope='LayerNorm')
output_g = output
</DeepExtract>
return (output_h, output_g)
|
def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, attn_mask_h, attn_mask_g, target_mapping, d_model, n_head, d_head, dropout, dropatt, is_training, kernel_initializer, scope='rel_attn'):
"""Two-stream attention with relative positional encoding."""
scale = 1 / d_head ** 0.5
with tf.variable_scope(scope, reuse=False):
if mems is not None and mems.shape.ndims > 1:
cat = tf.concat([mems, h], 0)
else:
cat = h
proj_weight = tf.get_variable('{}/kernel'.format('k'), [d_model, n_head, d_head], dtype=cat.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', cat, proj_weight)
k_head_h = head
proj_weight = tf.get_variable('{}/kernel'.format('v'), [d_model, n_head, d_head], dtype=cat.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', cat, proj_weight)
v_head_h = head
proj_weight = tf.get_variable('{}/kernel'.format('r'), [d_model, n_head, d_head], dtype=r.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', r, proj_weight)
k_head_r = head
proj_weight = tf.get_variable('{}/kernel'.format('q'), [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', h, proj_weight)
q_head_h = head
ac = tf.einsum('ibnd,jbnd->ijbn', q_head_h + r_w_bias, k_head_h)
bd = tf.einsum('ibnd,jbnd->ijbn', q_head_h + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head_h + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
attn_score = (ac + bd + ef) * scale
if attn_mask_h is not None:
attn_score = attn_score - 1e+30 * attn_mask_h
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
attn_vec_h = attn_vec
proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer)
attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec_h, proj_o)
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
if residual:
output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1, scope='LayerNorm')
else:
output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, scope='LayerNorm')
output_h = output
with tf.variable_scope(scope, reuse=True):
proj_weight = tf.get_variable('{}/kernel'.format('q'), [d_model, n_head, d_head], dtype=g.dtype, initializer=kernel_initializer)
head = tf.einsum('ibh,hnd->ibnd', g, proj_weight)
q_head_g = head
if target_mapping is not None:
q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping)
ac = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_w_bias, k_head_h)
bd = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head_g + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
attn_score = (ac + bd + ef) * scale
if attn_mask_g is not None:
attn_score = attn_score - 1e+30 * attn_mask_g
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
attn_vec_g = attn_vec
attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping)
else:
ac = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_w_bias, k_head_h)
bd = tf.einsum('ibnd,jbnd->ijbn', q_head_g + r_r_bias, k_head_r)
bd = rel_shift(bd, klen=tf.shape(ac)[1])
if seg_mat is None:
ef = 0
else:
ef = tf.einsum('ibnd,snd->ibns', q_head_g + r_s_bias, seg_embed)
ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef)
attn_score = (ac + bd + ef) * scale
if attn_mask_g is not None:
attn_score = attn_score - 1e+30 * attn_mask_g
attn_prob = tf.nn.softmax(attn_score, 1)
attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training)
attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
attn_vec_g = attn_vec
proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], dtype=g.dtype, initializer=kernel_initializer)
attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec_g, proj_o)
attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
if residual:
output = tf.contrib.layers.layer_norm(attn_out + g, begin_norm_axis=-1, scope='LayerNorm')
else:
output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, scope='LayerNorm')
output_g = output
return (output_h, output_g)
|
embedding-as-service
|
positive
|
def posterize(pil_img, level):
<DeepExtract>
sample_level(level) = int(sample_level(level) * 4 / 10)
</DeepExtract>
ret = ImageOps.posterize(pil_img, 4 - level)
return ret
|
def posterize(pil_img, level):
sample_level(level) = int(sample_level(level) * 4 / 10)
ret = ImageOps.posterize(pil_img, 4 - level)
return ret
|
AICity2020-VOC-ReID
|
positive
|
def run(self):
ShellHelper.execute_shell(self.flush_logcat_cmd, False, False)
with subprocess.Popen(self.monitor_logcat_cmd, shell=True, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True, encoding='utf-8', errors='ignore') as p:
self.logcat_process = p
current_log = None
current_process_pid = None
current_recording_name = None
for line in p.stdout:
line_cleaned = line.encode('utf-8', 'ignore').decode('utf-8').strip()
line_parts = line_cleaned.split()
if len(line_parts) <= 5:
continue
if self.TEST_STARTED in line and current_log is None and (current_process_pid is None):
current_log = TestLogCat()
current_process_pid = line_parts[self.PID_INDEX]
test_name = re.findall('TestRunner: started:(.+)\\(', line_cleaned)
current_log.test_name = test_name[0].strip()
full_test_package = re.findall('\\((.+)\\)', line_cleaned)
package_parts = full_test_package[0].split('.')
current_log.test_container = package_parts.pop().strip()
current_log.test_full_package = full_test_package[0].strip() + '.' + test_name[0].strip()
if self.should_record_screen:
<DeepExtract>
if self.screen_recording_thread is None or not self.screen_recording_thread.is_alive():
recording_name = current_log.test_name + '-' + str(int(round(time.time() * 1000))) + '.mp4'
self.screen_recording_thread = TestRecordingThread(self.record_screen_cmd, recording_name, self.device)
self.screen_recording_thread.start()
</DeepExtract>
if current_recording_name is not None:
self.recordings.append(current_recording_name)
current_recording_name = self.screen_recording_thread.recording_name
if current_log is not None:
if line_parts[self.PID_INDEX] == current_process_pid:
logcat_line = TestLogCatLine()
date = line_parts[self.DATE_INDEX]
logcat_line.date = date
time_hour = line_parts[self.TIME_INDEX]
logcat_line.time = time_hour
level = line_parts[self.LEVEL_INDEX]
logcat_line.level = level
tag = line_parts[self.TAG_INDEX]
if len(tag) > 0 and tag[len(tag) - 1] == ':':
tag = tag[:-1]
logcat_line.tag = tag
string_pos = line_cleaned.find(tag)
length_tag = len(tag)
text = line_cleaned[string_pos + length_tag:].strip()
if text.startswith(':'):
text = text[1:]
text = text.strip()
logcat_line.text = text
current_log.lines.append(logcat_line)
if self.TEST_FINISHED in line:
self.logs.append(current_log)
if self.should_record_screen:
<DeepExtract>
if self.screen_recording_thread is not None:
self.screen_recording_thread.kill_processes()
self.screen_recording_thread = None
</DeepExtract>
self.recordings.append(current_recording_name)
current_log = None
current_process_pid = None
current_recording_name = None
|
def run(self):
ShellHelper.execute_shell(self.flush_logcat_cmd, False, False)
with subprocess.Popen(self.monitor_logcat_cmd, shell=True, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True, encoding='utf-8', errors='ignore') as p:
self.logcat_process = p
current_log = None
current_process_pid = None
current_recording_name = None
for line in p.stdout:
line_cleaned = line.encode('utf-8', 'ignore').decode('utf-8').strip()
line_parts = line_cleaned.split()
if len(line_parts) <= 5:
continue
if self.TEST_STARTED in line and current_log is None and (current_process_pid is None):
current_log = TestLogCat()
current_process_pid = line_parts[self.PID_INDEX]
test_name = re.findall('TestRunner: started:(.+)\\(', line_cleaned)
current_log.test_name = test_name[0].strip()
full_test_package = re.findall('\\((.+)\\)', line_cleaned)
package_parts = full_test_package[0].split('.')
current_log.test_container = package_parts.pop().strip()
current_log.test_full_package = full_test_package[0].strip() + '.' + test_name[0].strip()
if self.should_record_screen:
if self.screen_recording_thread is None or not self.screen_recording_thread.is_alive():
recording_name = current_log.test_name + '-' + str(int(round(time.time() * 1000))) + '.mp4'
self.screen_recording_thread = TestRecordingThread(self.record_screen_cmd, recording_name, self.device)
self.screen_recording_thread.start()
if current_recording_name is not None:
self.recordings.append(current_recording_name)
current_recording_name = self.screen_recording_thread.recording_name
if current_log is not None:
if line_parts[self.PID_INDEX] == current_process_pid:
logcat_line = TestLogCatLine()
date = line_parts[self.DATE_INDEX]
logcat_line.date = date
time_hour = line_parts[self.TIME_INDEX]
logcat_line.time = time_hour
level = line_parts[self.LEVEL_INDEX]
logcat_line.level = level
tag = line_parts[self.TAG_INDEX]
if len(tag) > 0 and tag[len(tag) - 1] == ':':
tag = tag[:-1]
logcat_line.tag = tag
string_pos = line_cleaned.find(tag)
length_tag = len(tag)
text = line_cleaned[string_pos + length_tag:].strip()
if text.startswith(':'):
text = text[1:]
text = text.strip()
logcat_line.text = text
current_log.lines.append(logcat_line)
if self.TEST_FINISHED in line:
self.logs.append(current_log)
if self.should_record_screen:
if self.screen_recording_thread is not None:
self.screen_recording_thread.kill_processes()
self.screen_recording_thread = None
self.recordings.append(current_recording_name)
current_log = None
current_process_pid = None
current_recording_name = None
|
AutomationTestSupervisor
|
positive
|
def get_alias(self):
"""Returns the alias for this identifier or ``None``."""
<DeepExtract>
if not isinstance(0, int):
0 = self.token_index(0)
for n in range(0, len(self.tokens)):
token = self.tokens[n]
if token.match(T.Keyword, 'AS', regex):
kw = token
</DeepExtract>
if kw is not None:
return self._get_first_name(kw, keywords=True)
if len(self.tokens) > 2 and self.token_next_by_type(0, T.Whitespace) is not None:
return self._get_first_name(reverse=True)
return None
|
def get_alias(self):
"""Returns the alias for this identifier or ``None``."""
if not isinstance(0, int):
0 = self.token_index(0)
for n in range(0, len(self.tokens)):
token = self.tokens[n]
if token.match(T.Keyword, 'AS', regex):
kw = token
if kw is not None:
return self._get_first_name(kw, keywords=True)
if len(self.tokens) > 2 and self.token_next_by_type(0, T.Whitespace) is not None:
return self._get_first_name(reverse=True)
return None
|
es-monitor
|
positive
|
def allow_request(self, request, view):
"""
Modify throttling for service users.
Updates throttling rate if the request is coming from the service user, and
defaults to UserRateThrottle's configured setting otherwise.
Updated throttling rate comes from `DEFAULT_THROTTLE_RATES` key in `REST_FRAMEWORK`
setting. service user throttling is specified in `DEFAULT_THROTTLE_RATES` by `service_user` key
Example Setting::
REST_FRAMEWORK = {
...
'DEFAULT_THROTTLE_RATES': {
...
'service_user': '50/day'
}
}
"""
service_users = getattr(settings, 'ANALYTICS_API_SERVICE_USERNAMES', None)
if service_users and request.user.username in service_users:
<DeepExtract>
self.scope = SERVICE_USER_SCOPE
self.rate = self.get_rate()
(self.num_requests, self.duration) = self.parse_rate(self.rate)
</DeepExtract>
return super().allow_request(request, view)
|
def allow_request(self, request, view):
"""
Modify throttling for service users.
Updates throttling rate if the request is coming from the service user, and
defaults to UserRateThrottle's configured setting otherwise.
Updated throttling rate comes from `DEFAULT_THROTTLE_RATES` key in `REST_FRAMEWORK`
setting. service user throttling is specified in `DEFAULT_THROTTLE_RATES` by `service_user` key
Example Setting::
REST_FRAMEWORK = {
...
'DEFAULT_THROTTLE_RATES': {
...
'service_user': '50/day'
}
}
"""
service_users = getattr(settings, 'ANALYTICS_API_SERVICE_USERNAMES', None)
if service_users and request.user.username in service_users:
self.scope = SERVICE_USER_SCOPE
self.rate = self.get_rate()
(self.num_requests, self.duration) = self.parse_rate(self.rate)
return super().allow_request(request, view)
|
edx-analytics-data-api
|
positive
|
def get_logprobs_state(it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=1):
xt = self.embed(it)
<DeepExtract>
is_first_step = (state[0] == 0).all(2).all(0)
if is_first_step.all():
(_, state) = self._core(fc_feats, state)
elif is_first_step.any():
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
(_, state) = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
(output, state) = self._core(xt, state)
</DeepExtract>
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return (logprobs, state)
|
def get_logprobs_state(it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=1):
xt = self.embed(it)
is_first_step = (state[0] == 0).all(2).all(0)
if is_first_step.all():
(_, state) = self._core(fc_feats, state)
elif is_first_step.any():
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
(_, state) = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
(output, state) = self._core(xt, state)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return (logprobs, state)
|
connect-caption-and-trace
|
positive
|
def grep(S):
"""
GREP Genome Assembly with Perfect Coverage and Repeats
"""
<DeepExtract>
counts = {}
for s in S:
if s in counts:
counts[s] += 1
else:
counts[s] = 1
counts = counts
</DeepExtract>
(B, E) = dbru(S, include_revc=False)
<DeepExtract>
F = {}
for b in B:
F[b] = [f for (e, f) in E if e == b]
F = F
</DeepExtract>
Runs = [[S[0]]]
counts[S[0]] -= 1
<DeepExtract>
removes = []
for (key, value) in counts.items():
if value == 0:
removes.append(key)
for key in removes:
del counts[key]
counts = counts
</DeepExtract>
CountsForRuns = [counts]
for n in range(len(S) - 1):
NewRuns = []
for i in range(len(Runs)):
run = Runs[i]
counts = CountsForRuns[i]
last = run[-1][1:]
succ = F[last]
counts_old = copy.deepcopy(counts)
j = 0
added = False
while j < len(succ):
kmer = last + succ[j][-1]
if kmer in counts_old:
if added:
new_counts = copy.deepcopy(counts_old)
new_counts[kmer] -= 1
<DeepExtract>
removes = []
for (key, value) in new_counts.items():
if value == 0:
removes.append(key)
for key in removes:
del new_counts[key]
new_counts = new_counts
</DeepExtract>
new_run = copy.deepcopy(run[:-1])
new_run.append(kmer)
CountsForRuns.append(new_counts)
NewRuns.append(new_run)
else:
counts[kmer] -= 1
<DeepExtract>
removes = []
for (key, value) in counts.items():
if value == 0:
removes.append(key)
for key in removes:
del counts[key]
counts = counts
</DeepExtract>
run.append(kmer)
added = True
j += 1
Runs = Runs + NewRuns
return [format(r)[:-1] for r in Runs if len(r) == len(S)]
|
def grep(S):
"""
GREP Genome Assembly with Perfect Coverage and Repeats
"""
counts = {}
for s in S:
if s in counts:
counts[s] += 1
else:
counts[s] = 1
counts = counts
(B, E) = dbru(S, include_revc=False)
F = {}
for b in B:
F[b] = [f for (e, f) in E if e == b]
F = F
Runs = [[S[0]]]
counts[S[0]] -= 1
removes = []
for (key, value) in counts.items():
if value == 0:
removes.append(key)
for key in removes:
del counts[key]
counts = counts
CountsForRuns = [counts]
for n in range(len(S) - 1):
NewRuns = []
for i in range(len(Runs)):
run = Runs[i]
counts = CountsForRuns[i]
last = run[-1][1:]
succ = F[last]
counts_old = copy.deepcopy(counts)
j = 0
added = False
while j < len(succ):
kmer = last + succ[j][-1]
if kmer in counts_old:
if added:
new_counts = copy.deepcopy(counts_old)
new_counts[kmer] -= 1
removes = []
for (key, value) in new_counts.items():
if value == 0:
removes.append(key)
for key in removes:
del new_counts[key]
new_counts = new_counts
new_run = copy.deepcopy(run[:-1])
new_run.append(kmer)
CountsForRuns.append(new_counts)
NewRuns.append(new_run)
else:
counts[kmer] -= 1
removes = []
for (key, value) in counts.items():
if value == 0:
removes.append(key)
for key in removes:
del counts[key]
counts = counts
run.append(kmer)
added = True
j += 1
Runs = Runs + NewRuns
return [format(r)[:-1] for r in Runs if len(r) == len(S)]
|
bioinformatics
|
positive
|
def dot_lastdim(A, B, out=None):
"""Performs dot for multi-dimensional matrices A and B, where
A.shape[-1] = B.shape[0]. The returned matrix should have shape
A.shape[:-1] + B.shape[1:].
A and B should both be c-contiguous, otherwise the code will report
an error.
"""
if out == None:
out = np.empty(A.shape[:-1] + B.shape[1:], A.dtype)
lda = A.size / A.shape[-1]
dim = A.shape[-1]
ldb = B.size / B.shape[0]
Aview = A.view()
Bview = B.view()
outview = out.view()
Aview.shape = (lda, dim)
Bview.shape = (dim, ldb)
outview.shape = (lda, ldb)
<DeepExtract>
if outview == None:
outview = np.empty((Aview.shape[0], Bview.shape[1]), Aview.dtype, Bview.dtype)
if outview.size == 1:
outview[:] = np.dot(Aview.flat, Bview.flat)
elif outview.flags.f_contiguous:
outview = _gemm_f_contiguous(1.0, Aview, Bview, out=outview)
else:
outview = _gemm_c_contiguous(1.0, Aview, Bview, out=outview)
return outview
</DeepExtract>
return out
|
def dot_lastdim(A, B, out=None):
"""Performs dot for multi-dimensional matrices A and B, where
A.shape[-1] = B.shape[0]. The returned matrix should have shape
A.shape[:-1] + B.shape[1:].
A and B should both be c-contiguous, otherwise the code will report
an error.
"""
if out == None:
out = np.empty(A.shape[:-1] + B.shape[1:], A.dtype)
lda = A.size / A.shape[-1]
dim = A.shape[-1]
ldb = B.size / B.shape[0]
Aview = A.view()
Bview = B.view()
outview = out.view()
Aview.shape = (lda, dim)
Bview.shape = (dim, ldb)
outview.shape = (lda, ldb)
if outview == None:
outview = np.empty((Aview.shape[0], Bview.shape[1]), Aview.dtype, Bview.dtype)
if outview.size == 1:
outview[:] = np.dot(Aview.flat, Bview.flat)
elif outview.flags.f_contiguous:
outview = _gemm_f_contiguous(1.0, Aview, Bview, out=outview)
else:
outview = _gemm_c_contiguous(1.0, Aview, Bview, out=outview)
return outview
return out
|
decaf
|
positive
|
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if self._header is not None and self._header[-1:] == b'\x80':
force = True
if force:
<DeepExtract>
if self.children is None:
self._parse_children()
contents = BytesIO()
for (index, info) in enumerate(self._fields):
child = self.children[index]
if child is None:
child_dump = b''
elif child.__class__ == tuple:
if force:
child_dump = self._lazy_child(index).dump(force=force)
else:
child_dump = child[3] + child[4] + child[5]
else:
child_dump = child.dump(force=force)
if info[2] and 'default' in info[2]:
default_value = info[1](**info[2])
if default_value.dump() == child_dump:
continue
contents.write(child_dump)
self._contents = contents.getvalue()
self._header = None
if self._trailer != b'':
self._trailer = b''
</DeepExtract>
return Asn1Value.dump(self)
|
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if self._header is not None and self._header[-1:] == b'\x80':
force = True
if force:
if self.children is None:
self._parse_children()
contents = BytesIO()
for (index, info) in enumerate(self._fields):
child = self.children[index]
if child is None:
child_dump = b''
elif child.__class__ == tuple:
if force:
child_dump = self._lazy_child(index).dump(force=force)
else:
child_dump = child[3] + child[4] + child[5]
else:
child_dump = child.dump(force=force)
if info[2] and 'default' in info[2]:
default_value = info[1](**info[2])
if default_value.dump() == child_dump:
continue
contents.write(child_dump)
self._contents = contents.getvalue()
self._header = None
if self._trailer != b'':
self._trailer = b''
return Asn1Value.dump(self)
|
asn1crypto
|
positive
|
def encode(self, text, text_pair=None, add_special_tokens=False, max_length=None, stride=0, truncation_strategy='longest_first', return_tensors=None, **kwargs):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length: if set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
<DeepExtract>
def get_input_ids(text):
if isinstance(text, six.string_types):
encoded_inputs = self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], six.string_types):
encoded_inputs = self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
encoded_inputs = text
else:
raise ValueError('Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.')
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
encoded_inputs = self.prepare_for_model(first_ids, pair_ids=second_ids, max_length=max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors)
</DeepExtract>
return encoded_inputs['input_ids']
|
def encode(self, text, text_pair=None, add_special_tokens=False, max_length=None, stride=0, truncation_strategy='longest_first', return_tensors=None, **kwargs):
"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair: Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
max_length: if set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
stride: if set to a number along with max_length, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
**kwargs: passed to the `self.tokenize()` method
"""
def get_input_ids(text):
if isinstance(text, six.string_types):
encoded_inputs = self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], six.string_types):
encoded_inputs = self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
encoded_inputs = text
else:
raise ValueError('Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.')
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
encoded_inputs = self.prepare_for_model(first_ids, pair_ids=second_ids, max_length=max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors)
return encoded_inputs['input_ids']
|
albert_pytorch
|
positive
|
@patch('backend.lambdas.tasks.work_query_queue.sf_client')
def test_it_loads_execution_from_state(sf_mock):
<DeepExtract>
sf_mock.describe_execution.return_value = {'executionArn': 'arn:aws:states:eu-west-1:123456789012:execution:S3F2-StateMachine:59923759-3016-82d8-bbc0', 'stateMachineArn': 'arn:aws:states:eu-west-1:123456789012:stateMachine:S3F2-StateMachine', 'name': '59923759-3016-82d8-bbc0', 'status': 'RUNNING', 'startDate': 1575900611.248, 'stopDate': 1575900756.716, 'input': '{}', **kwargs}
</DeepExtract>
resp = load_execution({'ExecutionArn': 'arn', 'ReceiptHandle': 'handle'})
assert {**execution_stub(), 'ReceiptHandle': 'handle'} == resp
|
@patch('backend.lambdas.tasks.work_query_queue.sf_client')
def test_it_loads_execution_from_state(sf_mock):
sf_mock.describe_execution.return_value = {'executionArn': 'arn:aws:states:eu-west-1:123456789012:execution:S3F2-StateMachine:59923759-3016-82d8-bbc0', 'stateMachineArn': 'arn:aws:states:eu-west-1:123456789012:stateMachine:S3F2-StateMachine', 'name': '59923759-3016-82d8-bbc0', 'status': 'RUNNING', 'startDate': 1575900611.248, 'stopDate': 1575900756.716, 'input': '{}', **kwargs}
resp = load_execution({'ExecutionArn': 'arn', 'ReceiptHandle': 'handle'})
assert {**execution_stub(), 'ReceiptHandle': 'handle'} == resp
|
amazon-s3-find-and-forget
|
positive
|
def info(text, render=1):
""" display a info
Args:
text (str): info message
display (bool, optional): Defaults to True. Display or return settings
Returns:
str: setting value if display=False, None otherwise
"""
color = 'blue'
s = '[Info] %s' % text
if render:
<DeepExtract>
s = colorize(s, color, bg_color, brightness)
display(s)
</DeepExtract>
else:
return colorize(s + '\n', color)
|
def info(text, render=1):
""" display a info
Args:
text (str): info message
display (bool, optional): Defaults to True. Display or return settings
Returns:
str: setting value if display=False, None otherwise
"""
color = 'blue'
s = '[Info] %s' % text
if render:
s = colorize(s, color, bg_color, brightness)
display(s)
else:
return colorize(s + '\n', color)
|
AutoRec
|
positive
|
def all_bispectra_polar(alist, polar, phase_type='resid_phas', snr_cut=0.0, debias_snr=False, match_by_scan=False, verbose=False):
"""
match_by_scan: option to only use scan_id rather than timestamp to find triplets of phases to form closure phases
should only be used for scan-averaged data
"""
if match_by_scan:
alist = ut.coh_avg_vis(alist, tavg='scan', phase_type=phase_type)
if 'snr' not in alist.columns:
alist.loc[:, 'snr'] = alist.loc[:, 'amp'] / alist.loc[:, 'sigma']
if debias_snr == True:
foo = np.maximum(np.asarray(alist['snr']) ** 2 - 1, 0)
alist['snr'] = np.sqrt(foo)
alist.drop(list(alist[alist.snr < snr_cut].index.values), inplace=True)
alist = alist[alist['polarization'] == polar]
alist = alist.loc[:, ~alist.columns.duplicated()]
if 'scan_id' not in alist.columns:
alist.loc[:, 'scan_id'] = alist.loc[:, 'scan_no_tot']
if 'band' not in alist.columns:
alist.loc[:, 'band'] = np.nan
alist['amps'] = alist['amp']
alist['snrs'] = alist['snr']
if 'fracpol' in alist.columns:
alist['fracpols'] = alist['fracpol']
else:
alist['fracpols'] = 0
<DeepExtract>
all_baselines = set(alist.baseline)
if all(['-' in x for x in all_baselines]):
all_stations = set([y for sub in [x.split('-') for x in all_baselines] for y in sub])
else:
all_stations = set(''.join(list(all_baselines)))
foo = list(itertools.combinations(all_stations, 3))
foo = [list(x) for x in foo if ('R' not in set(x)) | ('S' not in set(x))]
foo = [''.join(sorted(x)) for x in foo]
triL = foo
</DeepExtract>
<DeepExtract>
all_baselines = set(alist.baseline)
foo_base = []
signat = []
for cou in range(len(triL)):
b0 = triL[cou][0:2]
b1 = triL[cou][1:3]
b2 = triL[cou][2] + triL[cou][0]
if b0 in all_baselines:
base0 = b0
sign0 = 1
elif b0[1] + b0[0] in all_baselines:
base0 = b0[1] + b0[0]
sign0 = -1
else:
base0 = -1
sign0 = 0
if b1 in all_baselines:
base1 = b1
sign1 = 1
elif b1[1] + b1[0] in all_baselines:
base1 = b1[1] + b1[0]
sign1 = -1
else:
base1 = -1
sign1 = 0
if b2 in all_baselines:
base2 = b2
sign2 = 1
elif b2[1] + b2[0] in all_baselines:
base2 = b2[1] + b2[0]
sign2 = -1
else:
base2 = -1
sign2 = 0
baselines = [base0, base1, base2]
baselinesSTR = map(lambda x: type(x) == str, baselines)
if all(baselinesSTR):
foo_base.append(baselines)
signat.append([sign0, sign1, sign2])
(tri_baseL, sgnL) = (foo_base, signat)
</DeepExtract>
<DeepExtract>
tri = [''.join(sorted(list(set(''.join(x))))) for x in tri_baseL]
triL = tri
</DeepExtract>
<DeepExtract>
all_baselines = set(alist.baseline)
foo_base = []
signat = []
for cou in range(len(triL)):
b0 = triL[cou][0:2]
b1 = triL[cou][1:3]
b2 = triL[cou][2] + triL[cou][0]
if b0 in all_baselines:
base0 = b0
sign0 = 1
elif b0[1] + b0[0] in all_baselines:
base0 = b0[1] + b0[0]
sign0 = -1
else:
base0 = -1
sign0 = 0
if b1 in all_baselines:
base1 = b1
sign1 = 1
elif b1[1] + b1[0] in all_baselines:
base1 = b1[1] + b1[0]
sign1 = -1
else:
base1 = -1
sign1 = 0
if b2 in all_baselines:
base2 = b2
sign2 = 1
elif b2[1] + b2[0] in all_baselines:
base2 = b2[1] + b2[0]
sign2 = -1
else:
base2 = -1
sign2 = 0
baselines = [base0, base1, base2]
baselinesSTR = map(lambda x: type(x) == str, baselines)
if all(baselinesSTR):
foo_base.append(baselines)
signat.append([sign0, sign1, sign2])
(tri_baseL, sgnL) = (foo_base, signat)
</DeepExtract>
bsp_out = pd.DataFrame({})
for cou in range(len(triL)):
Tri = tri_baseL[cou]
if verbose:
print(Tri)
signat = sgnL[cou]
condB1 = alist['baseline'] == Tri[0]
condB2 = alist['baseline'] == Tri[1]
condB3 = alist['baseline'] == Tri[2]
condB = condB1 | condB2 | condB3
alist_Tri = alist.loc[condB, ['expt_no', 'scan_id', 'source', 'datetime', 'baseline', phase_type, 'amp', 'snr', 'gmst', 'band', 'amps', 'snrs', 'fracpols']]
if match_by_scan:
tlist = alist_Tri.groupby(['band', 'scan_id']).filter(lambda x: len(x) == 3)
else:
tlist = alist_Tri.groupby(['band', 'datetime']).filter(lambda x: len(x) == 3)
tlist.loc[:, 'sigma'] = tlist.loc[:, 'amp'] / tlist.loc[:, 'snr']
for cou2 in range(3):
tlist.loc[tlist.loc[:, 'baseline'] == Tri[cou2], phase_type] *= signat[cou2] * np.pi / 180.0
tlist.loc[:, 'sigma'] = 1.0 / tlist.loc[:, 'snr'] ** 2
if match_by_scan:
bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id']).agg({phase_type: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x), 'datetime': min})
else:
bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id', 'datetime']).agg({phase_type: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x)})
bsp.loc[:, 'bsp'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, phase_type])
bsp.loc[:, 'snr'] = 1.0 / bsp.loc[:, 'sigma']
bsp.loc[:, 'sigma'] = bsp.loc[:, 'amp'] * bsp.loc[:, 'sigma']
bsp.loc[:, 'triangle'] = [triL[cou]] * np.shape(bsp)[0]
bsp.loc[:, 'polarization'] = [polar] * np.shape(bsp)[0]
bsp.loc[:, 'cphase'] = np.angle(bsp.loc[:, 'bsp']) * 180.0 / np.pi
bsp.loc[:, 'amp'] = np.abs(bsp.loc[:, 'bsp'])
bsp.loc[:, 'sigmaCP'] = 1.0 / bsp.loc[:, 'snr'] * 180.0 / np.pi
bsp_out = pd.concat([bsp_out, bsp])
if verbose:
print(triL[cou] + ': ' + str(np.shape(bsp)[0]) + ' closure phases')
bsp_out = bsp_out.reset_index()
bsp_out['rel_err'] = np.asarray(bsp_out['cphase']) / np.asarray(bsp_out['sigmaCP'])
return bsp_out
|
def all_bispectra_polar(alist, polar, phase_type='resid_phas', snr_cut=0.0, debias_snr=False, match_by_scan=False, verbose=False):
"""
match_by_scan: option to only use scan_id rather than timestamp to find triplets of phases to form closure phases
should only be used for scan-averaged data
"""
if match_by_scan:
alist = ut.coh_avg_vis(alist, tavg='scan', phase_type=phase_type)
if 'snr' not in alist.columns:
alist.loc[:, 'snr'] = alist.loc[:, 'amp'] / alist.loc[:, 'sigma']
if debias_snr == True:
foo = np.maximum(np.asarray(alist['snr']) ** 2 - 1, 0)
alist['snr'] = np.sqrt(foo)
alist.drop(list(alist[alist.snr < snr_cut].index.values), inplace=True)
alist = alist[alist['polarization'] == polar]
alist = alist.loc[:, ~alist.columns.duplicated()]
if 'scan_id' not in alist.columns:
alist.loc[:, 'scan_id'] = alist.loc[:, 'scan_no_tot']
if 'band' not in alist.columns:
alist.loc[:, 'band'] = np.nan
alist['amps'] = alist['amp']
alist['snrs'] = alist['snr']
if 'fracpol' in alist.columns:
alist['fracpols'] = alist['fracpol']
else:
alist['fracpols'] = 0
all_baselines = set(alist.baseline)
if all(['-' in x for x in all_baselines]):
all_stations = set([y for sub in [x.split('-') for x in all_baselines] for y in sub])
else:
all_stations = set(''.join(list(all_baselines)))
foo = list(itertools.combinations(all_stations, 3))
foo = [list(x) for x in foo if ('R' not in set(x)) | ('S' not in set(x))]
foo = [''.join(sorted(x)) for x in foo]
triL = foo
all_baselines = set(alist.baseline)
foo_base = []
signat = []
for cou in range(len(triL)):
b0 = triL[cou][0:2]
b1 = triL[cou][1:3]
b2 = triL[cou][2] + triL[cou][0]
if b0 in all_baselines:
base0 = b0
sign0 = 1
elif b0[1] + b0[0] in all_baselines:
base0 = b0[1] + b0[0]
sign0 = -1
else:
base0 = -1
sign0 = 0
if b1 in all_baselines:
base1 = b1
sign1 = 1
elif b1[1] + b1[0] in all_baselines:
base1 = b1[1] + b1[0]
sign1 = -1
else:
base1 = -1
sign1 = 0
if b2 in all_baselines:
base2 = b2
sign2 = 1
elif b2[1] + b2[0] in all_baselines:
base2 = b2[1] + b2[0]
sign2 = -1
else:
base2 = -1
sign2 = 0
baselines = [base0, base1, base2]
baselinesSTR = map(lambda x: type(x) == str, baselines)
if all(baselinesSTR):
foo_base.append(baselines)
signat.append([sign0, sign1, sign2])
(tri_baseL, sgnL) = (foo_base, signat)
tri = [''.join(sorted(list(set(''.join(x))))) for x in tri_baseL]
triL = tri
all_baselines = set(alist.baseline)
foo_base = []
signat = []
for cou in range(len(triL)):
b0 = triL[cou][0:2]
b1 = triL[cou][1:3]
b2 = triL[cou][2] + triL[cou][0]
if b0 in all_baselines:
base0 = b0
sign0 = 1
elif b0[1] + b0[0] in all_baselines:
base0 = b0[1] + b0[0]
sign0 = -1
else:
base0 = -1
sign0 = 0
if b1 in all_baselines:
base1 = b1
sign1 = 1
elif b1[1] + b1[0] in all_baselines:
base1 = b1[1] + b1[0]
sign1 = -1
else:
base1 = -1
sign1 = 0
if b2 in all_baselines:
base2 = b2
sign2 = 1
elif b2[1] + b2[0] in all_baselines:
base2 = b2[1] + b2[0]
sign2 = -1
else:
base2 = -1
sign2 = 0
baselines = [base0, base1, base2]
baselinesSTR = map(lambda x: type(x) == str, baselines)
if all(baselinesSTR):
foo_base.append(baselines)
signat.append([sign0, sign1, sign2])
(tri_baseL, sgnL) = (foo_base, signat)
bsp_out = pd.DataFrame({})
for cou in range(len(triL)):
Tri = tri_baseL[cou]
if verbose:
print(Tri)
signat = sgnL[cou]
condB1 = alist['baseline'] == Tri[0]
condB2 = alist['baseline'] == Tri[1]
condB3 = alist['baseline'] == Tri[2]
condB = condB1 | condB2 | condB3
alist_Tri = alist.loc[condB, ['expt_no', 'scan_id', 'source', 'datetime', 'baseline', phase_type, 'amp', 'snr', 'gmst', 'band', 'amps', 'snrs', 'fracpols']]
if match_by_scan:
tlist = alist_Tri.groupby(['band', 'scan_id']).filter(lambda x: len(x) == 3)
else:
tlist = alist_Tri.groupby(['band', 'datetime']).filter(lambda x: len(x) == 3)
tlist.loc[:, 'sigma'] = tlist.loc[:, 'amp'] / tlist.loc[:, 'snr']
for cou2 in range(3):
tlist.loc[tlist.loc[:, 'baseline'] == Tri[cou2], phase_type] *= signat[cou2] * np.pi / 180.0
tlist.loc[:, 'sigma'] = 1.0 / tlist.loc[:, 'snr'] ** 2
if match_by_scan:
bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id']).agg({phase_type: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x), 'datetime': min})
else:
bsp = tlist.groupby(['expt_no', 'source', 'band', 'scan_id', 'datetime']).agg({phase_type: lambda x: np.sum(x), 'amp': lambda x: np.prod(x), 'sigma': lambda x: np.sqrt(np.sum(x)), 'amps': lambda x: tuple(x), 'snrs': lambda x: tuple(x), 'fracpols': lambda x: tuple(x)})
bsp.loc[:, 'bsp'] = bsp.loc[:, 'amp'] * np.exp(1j * bsp.loc[:, phase_type])
bsp.loc[:, 'snr'] = 1.0 / bsp.loc[:, 'sigma']
bsp.loc[:, 'sigma'] = bsp.loc[:, 'amp'] * bsp.loc[:, 'sigma']
bsp.loc[:, 'triangle'] = [triL[cou]] * np.shape(bsp)[0]
bsp.loc[:, 'polarization'] = [polar] * np.shape(bsp)[0]
bsp.loc[:, 'cphase'] = np.angle(bsp.loc[:, 'bsp']) * 180.0 / np.pi
bsp.loc[:, 'amp'] = np.abs(bsp.loc[:, 'bsp'])
bsp.loc[:, 'sigmaCP'] = 1.0 / bsp.loc[:, 'snr'] * 180.0 / np.pi
bsp_out = pd.concat([bsp_out, bsp])
if verbose:
print(triL[cou] + ': ' + str(np.shape(bsp)[0]) + ' closure phases')
bsp_out = bsp_out.reset_index()
bsp_out['rel_err'] = np.asarray(bsp_out['cphase']) / np.asarray(bsp_out['sigmaCP'])
return bsp_out
|
eat
|
positive
|
def _construct_form(self, i, **kwargs):
if self._initial and (not kwargs.get('instance', None)):
<DeepExtract>
instance = None
try:
instance = self.get_queryset()[i]
except IndexError:
try:
queryset_count = self.get_queryset().count()
values = self.__initial_minus_queryset()[i - queryset_count]
values[self.ct_field.name] = ContentType.objects.get_for_model(self.instance)
values[self.ct_fk_field.name] = self.instance.pk
instance = self.model(**values)
except IndexError:
pass
instance = instance
</DeepExtract>
if instance:
kwargs['instance'] = instance
form = super()._construct_form(i, **kwargs)
return form
|
def _construct_form(self, i, **kwargs):
if self._initial and (not kwargs.get('instance', None)):
instance = None
try:
instance = self.get_queryset()[i]
except IndexError:
try:
queryset_count = self.get_queryset().count()
values = self.__initial_minus_queryset()[i - queryset_count]
values[self.ct_field.name] = ContentType.objects.get_for_model(self.instance)
values[self.ct_fk_field.name] = self.instance.pk
instance = self.model(**values)
except IndexError:
pass
instance = instance
if instance:
kwargs['instance'] = instance
form = super()._construct_form(i, **kwargs)
return form
|
django-fluent-contents
|
positive
|
def _write_reports(self):
f_out_all = pyfastaq.utils.open_file_write(self.outprefix + '.details.tsv')
print('gene\tallele\tcov\tpc\tctgs\tdepth\thetmin\thets', file=f_out_all)
out_simple = [str(self.sequence_type)]
for gene in self.mlst_profile.genes_list:
<DeepExtract>
allele_str = str(self.gene_results[gene]['allele'])
if self.gene_results[gene]['sure'] is not None and (not self.gene_results[gene]['sure']):
self.any_allele_unsure = True
allele_str += '*'
details_list = [str(self.gene_results[gene][x]) for x in ['cov', 'pc', 'ctgs', 'depth', 'hetmin', 'hets']]
(allele_str, detail_str) = (allele_str, '\t'.join(details_list))
</DeepExtract>
out_simple.append(allele_str)
print(gene, allele_str, detail_str, sep='\t', file=f_out_all)
pyfastaq.utils.close(f_out_all)
if self.sequence_type != 'ND' and self.any_allele_unsure:
out_simple[0] += '*'
f_out_simple = pyfastaq.utils.open_file_write(self.outprefix + '.tsv')
print('ST', *self.mlst_profile.genes_list, sep='\t', file=f_out_simple)
print(*out_simple, sep='\t', file=f_out_simple)
pyfastaq.utils.close(f_out_simple)
|
def _write_reports(self):
f_out_all = pyfastaq.utils.open_file_write(self.outprefix + '.details.tsv')
print('gene\tallele\tcov\tpc\tctgs\tdepth\thetmin\thets', file=f_out_all)
out_simple = [str(self.sequence_type)]
for gene in self.mlst_profile.genes_list:
allele_str = str(self.gene_results[gene]['allele'])
if self.gene_results[gene]['sure'] is not None and (not self.gene_results[gene]['sure']):
self.any_allele_unsure = True
allele_str += '*'
details_list = [str(self.gene_results[gene][x]) for x in ['cov', 'pc', 'ctgs', 'depth', 'hetmin', 'hets']]
(allele_str, detail_str) = (allele_str, '\t'.join(details_list))
out_simple.append(allele_str)
print(gene, allele_str, detail_str, sep='\t', file=f_out_all)
pyfastaq.utils.close(f_out_all)
if self.sequence_type != 'ND' and self.any_allele_unsure:
out_simple[0] += '*'
f_out_simple = pyfastaq.utils.open_file_write(self.outprefix + '.tsv')
print('ST', *self.mlst_profile.genes_list, sep='\t', file=f_out_simple)
print(*out_simple, sep='\t', file=f_out_simple)
pyfastaq.utils.close(f_out_simple)
|
ariba
|
positive
|
def main():
<DeepExtract>
parser = argparse.ArgumentParser(description='Azrael Demo Script', formatter_class=argparse.RawTextHelpFormatter)
padd = parser.add_argument
padd('--interval', metavar='N', type=int, default=2, help='Update interval in seconds')
param = parser.parse_args()
</DeepExtract>
db = pymongo.MongoClient()['timing']['timing']
pd.options.display.float_format = ' {:5,.0f}'.format
(cntEmpty, last) = (0, 0)
while True:
query = {'Timestamp': {'$gt': last}}
last = time.time()
data = list(db.find(query))
if len(data) > 0:
cntEmpty = 0
print('\r' + '*' * 78)
print('Updated: {}'.format(time.ctime(last)))
print('\r' + '*' * 78 + '\n')
s = '{}|{}|{}\n'
csv = [s.format(_['Metric'], _['Value'], _['Type']) for _ in data]
csv = 'Metric|Value|Type\n' + ''.join(csv)
csv = io.StringIO(csv)
del s
df = pd.read_csv(csv, sep='|')
<DeepExtract>
df = df[df.Type == 'Time'].copy()
df['Value'] = 1000000 * df['Value']
agg = ['count', 'min', 'max', 'mean', 'std', 'sum']
out = df[['Metric', 'Value']].groupby('Metric').agg(agg)
out = out.Value
printHeader('Timings (us)')
printDataframe(out)
</DeepExtract>
<DeepExtract>
df = df[df.Type == 'Quantity'].copy()
df['Value'] = df['Value'].astype(np.int64)
agg = ['count', 'min', 'max', 'mean', 'std', 'sum']
out = df[['Metric', 'Value']].groupby('Metric').agg(agg)
out = out.Value
printHeader('Quantity')
printDataframe(out)
</DeepExtract>
print('\r' + '*' * 70 + ' {}'.format(cntEmpty), end='', flush=True)
cntEmpty += 1
time.sleep(param.interval)
|
def main():
parser = argparse.ArgumentParser(description='Azrael Demo Script', formatter_class=argparse.RawTextHelpFormatter)
padd = parser.add_argument
padd('--interval', metavar='N', type=int, default=2, help='Update interval in seconds')
param = parser.parse_args()
db = pymongo.MongoClient()['timing']['timing']
pd.options.display.float_format = ' {:5,.0f}'.format
(cntEmpty, last) = (0, 0)
while True:
query = {'Timestamp': {'$gt': last}}
last = time.time()
data = list(db.find(query))
if len(data) > 0:
cntEmpty = 0
print('\r' + '*' * 78)
print('Updated: {}'.format(time.ctime(last)))
print('\r' + '*' * 78 + '\n')
s = '{}|{}|{}\n'
csv = [s.format(_['Metric'], _['Value'], _['Type']) for _ in data]
csv = 'Metric|Value|Type\n' + ''.join(csv)
csv = io.StringIO(csv)
del s
df = pd.read_csv(csv, sep='|')
df = df[df.Type == 'Time'].copy()
df['Value'] = 1000000 * df['Value']
agg = ['count', 'min', 'max', 'mean', 'std', 'sum']
out = df[['Metric', 'Value']].groupby('Metric').agg(agg)
out = out.Value
printHeader('Timings (us)')
printDataframe(out)
df = df[df.Type == 'Quantity'].copy()
df['Value'] = df['Value'].astype(np.int64)
agg = ['count', 'min', 'max', 'mean', 'std', 'sum']
out = df[['Metric', 'Value']].groupby('Metric').agg(agg)
out = out.Value
printHeader('Quantity')
printDataframe(out)
print('\r' + '*' * 70 + ' {}'.format(cntEmpty), end='', flush=True)
cntEmpty += 1
time.sleep(param.interval)
|
azrael
|
positive
|
def get_linear_logit(features, feature_columns, units=1, use_bias=False, seed=1024, prefix='linear', l2_reg=0, sparse_feat_refine_weight=None):
linear_feature_columns = copy(feature_columns)
for i in range(len(linear_feature_columns)):
if isinstance(linear_feature_columns[i], SparseFeat):
linear_feature_columns[i] = linear_feature_columns[i]._replace(embedding_dim=1, embeddings_initializer=Zeros())
if isinstance(linear_feature_columns[i], VarLenSparseFeat):
linear_feature_columns[i] = linear_feature_columns[i]._replace(sparsefeat=linear_feature_columns[i].sparsefeat._replace(embedding_dim=1, embeddings_initializer=Zeros()))
linear_emb_list = [input_from_feature_columns(features, linear_feature_columns, l2_reg, seed, prefix=prefix + str(i))[0] for i in range(units)]
<DeepExtract>
sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeat), linear_feature_columns)) if linear_feature_columns else []
varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), linear_feature_columns)) if linear_feature_columns else []
embedding_matrix_dict = create_embedding_matrix(linear_feature_columns, l2_reg, seed, prefix=prefix, seq_mask_zero=seq_mask_zero)
group_sparse_embedding_dict = embedding_lookup(embedding_matrix_dict, features, sparse_feature_columns)
dense_value_list = get_dense_input(features, linear_feature_columns)
if not support_dense and len(dense_value_list) > 0:
raise ValueError('DenseFeat is not supported in dnn_feature_columns')
sequence_embed_dict = varlen_embedding_lookup(embedding_matrix_dict, features, varlen_sparse_feature_columns)
group_varlen_sparse_embedding_dict = get_varlen_pooling_list(sequence_embed_dict, features, varlen_sparse_feature_columns)
group_embedding_dict = mergeDict(group_sparse_embedding_dict, group_varlen_sparse_embedding_dict)
if not support_group:
group_embedding_dict = list(chain.from_iterable(group_embedding_dict.values()))
(_, dense_input_list) = (group_embedding_dict, dense_value_list)
</DeepExtract>
linear_logit_list = []
for i in range(units):
if len(linear_emb_list[i]) > 0 and len(dense_input_list) > 0:
sparse_input = concat_func(linear_emb_list[i])
dense_input = concat_func(dense_input_list)
if sparse_feat_refine_weight is not None:
sparse_input = Lambda(lambda x: x[0] * tf.expand_dims(x[1], axis=1))([sparse_input, sparse_feat_refine_weight])
linear_logit = Linear(l2_reg, mode=2, use_bias=use_bias, seed=seed)([sparse_input, dense_input])
elif len(linear_emb_list[i]) > 0:
sparse_input = concat_func(linear_emb_list[i])
if sparse_feat_refine_weight is not None:
sparse_input = Lambda(lambda x: x[0] * tf.expand_dims(x[1], axis=1))([sparse_input, sparse_feat_refine_weight])
linear_logit = Linear(l2_reg, mode=0, use_bias=use_bias, seed=seed)(sparse_input)
elif len(dense_input_list) > 0:
dense_input = concat_func(dense_input_list)
linear_logit = Linear(l2_reg, mode=1, use_bias=use_bias, seed=seed)(dense_input)
else:
return Lambda(lambda x: tf.constant([[0.0]]))(list(features.values())[0])
linear_logit_list.append(linear_logit)
return concat_func(linear_logit_list)
|
def get_linear_logit(features, feature_columns, units=1, use_bias=False, seed=1024, prefix='linear', l2_reg=0, sparse_feat_refine_weight=None):
linear_feature_columns = copy(feature_columns)
for i in range(len(linear_feature_columns)):
if isinstance(linear_feature_columns[i], SparseFeat):
linear_feature_columns[i] = linear_feature_columns[i]._replace(embedding_dim=1, embeddings_initializer=Zeros())
if isinstance(linear_feature_columns[i], VarLenSparseFeat):
linear_feature_columns[i] = linear_feature_columns[i]._replace(sparsefeat=linear_feature_columns[i].sparsefeat._replace(embedding_dim=1, embeddings_initializer=Zeros()))
linear_emb_list = [input_from_feature_columns(features, linear_feature_columns, l2_reg, seed, prefix=prefix + str(i))[0] for i in range(units)]
sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeat), linear_feature_columns)) if linear_feature_columns else []
varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), linear_feature_columns)) if linear_feature_columns else []
embedding_matrix_dict = create_embedding_matrix(linear_feature_columns, l2_reg, seed, prefix=prefix, seq_mask_zero=seq_mask_zero)
group_sparse_embedding_dict = embedding_lookup(embedding_matrix_dict, features, sparse_feature_columns)
dense_value_list = get_dense_input(features, linear_feature_columns)
if not support_dense and len(dense_value_list) > 0:
raise ValueError('DenseFeat is not supported in dnn_feature_columns')
sequence_embed_dict = varlen_embedding_lookup(embedding_matrix_dict, features, varlen_sparse_feature_columns)
group_varlen_sparse_embedding_dict = get_varlen_pooling_list(sequence_embed_dict, features, varlen_sparse_feature_columns)
group_embedding_dict = mergeDict(group_sparse_embedding_dict, group_varlen_sparse_embedding_dict)
if not support_group:
group_embedding_dict = list(chain.from_iterable(group_embedding_dict.values()))
(_, dense_input_list) = (group_embedding_dict, dense_value_list)
linear_logit_list = []
for i in range(units):
if len(linear_emb_list[i]) > 0 and len(dense_input_list) > 0:
sparse_input = concat_func(linear_emb_list[i])
dense_input = concat_func(dense_input_list)
if sparse_feat_refine_weight is not None:
sparse_input = Lambda(lambda x: x[0] * tf.expand_dims(x[1], axis=1))([sparse_input, sparse_feat_refine_weight])
linear_logit = Linear(l2_reg, mode=2, use_bias=use_bias, seed=seed)([sparse_input, dense_input])
elif len(linear_emb_list[i]) > 0:
sparse_input = concat_func(linear_emb_list[i])
if sparse_feat_refine_weight is not None:
sparse_input = Lambda(lambda x: x[0] * tf.expand_dims(x[1], axis=1))([sparse_input, sparse_feat_refine_weight])
linear_logit = Linear(l2_reg, mode=0, use_bias=use_bias, seed=seed)(sparse_input)
elif len(dense_input_list) > 0:
dense_input = concat_func(dense_input_list)
linear_logit = Linear(l2_reg, mode=1, use_bias=use_bias, seed=seed)(dense_input)
else:
return Lambda(lambda x: tf.constant([[0.0]]))(list(features.values())[0])
linear_logit_list.append(linear_logit)
return concat_func(linear_logit_list)
|
DeepCTR
|
positive
|
def update_or_create(self):
log('ModuleExecutor.update_or_create()')
if not self.ssl_certkey_exists():
self.module_result['changed'] = True
if not self.module.check_mode:
log('ssl certkey does not exist. Will create.')
<DeepExtract>
log('ModuleExecutor.create_ssl_certkey()')
processed_data = copy.deepcopy(self.configured_ssl_certkey)
if 'nodomaincheck' in processed_data:
del processed_data['nodomaincheck']
if 'deletefromdevice' in processed_data:
del processed_data['deletefromdevice']
post_data = {'sslcertkey': processed_data}
result = self.fetcher.post(post_data=post_data, resource='sslcertkey')
log('post data: %s' % post_data)
log('result of post: %s' % result)
if result['http_response_data']['status'] == 201:
if result.get('nitro_errorcode') is not None:
if result['nitro_errorcode'] != 0:
raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
elif 400 <= result['http_response_data']['status'] <= 599:
raise NitroException(errorcode=result.get('nitro_errorcode'), message=result.get('nitro_message'), severity=result.get('nitro_severity'))
else:
msg = 'Did not get nitro errorcode and http status was not 201 or 4xx (%s)' % result['http_response_data']['status']
self.module.fail_json(msg=msg, **self.module_result)
</DeepExtract>
elif not self.ssl_certkey_identical():
self.module_result['changed'] = True
if not self.module.check_mode:
log('ssl certkey not identical. Will update.')
<DeepExtract>
log('ModuleExecutor.update_ssl_certkey()')
changed_keys = list(frozenset(self.differing_keys) & frozenset(self.change_keys))
if len(changed_keys) > 0:
log('Keys that force change operation %s' % changed_keys)
self.do_change_operation()
updated_keys = list(frozenset(self.differing_keys) & frozenset(self.update_keys))
if len(updated_keys) > 0:
log('Keys that force update operations %s' % updated_keys)
self.do_update_operation()
</DeepExtract>
else:
self.module_result['changed'] = False
|
def update_or_create(self):
log('ModuleExecutor.update_or_create()')
if not self.ssl_certkey_exists():
self.module_result['changed'] = True
if not self.module.check_mode:
log('ssl certkey does not exist. Will create.')
log('ModuleExecutor.create_ssl_certkey()')
processed_data = copy.deepcopy(self.configured_ssl_certkey)
if 'nodomaincheck' in processed_data:
del processed_data['nodomaincheck']
if 'deletefromdevice' in processed_data:
del processed_data['deletefromdevice']
post_data = {'sslcertkey': processed_data}
result = self.fetcher.post(post_data=post_data, resource='sslcertkey')
log('post data: %s' % post_data)
log('result of post: %s' % result)
if result['http_response_data']['status'] == 201:
if result.get('nitro_errorcode') is not None:
if result['nitro_errorcode'] != 0:
raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
elif 400 <= result['http_response_data']['status'] <= 599:
raise NitroException(errorcode=result.get('nitro_errorcode'), message=result.get('nitro_message'), severity=result.get('nitro_severity'))
else:
msg = 'Did not get nitro errorcode and http status was not 201 or 4xx (%s)' % result['http_response_data']['status']
self.module.fail_json(msg=msg, **self.module_result)
elif not self.ssl_certkey_identical():
self.module_result['changed'] = True
if not self.module.check_mode:
log('ssl certkey not identical. Will update.')
log('ModuleExecutor.update_ssl_certkey()')
changed_keys = list(frozenset(self.differing_keys) & frozenset(self.change_keys))
if len(changed_keys) > 0:
log('Keys that force change operation %s' % changed_keys)
self.do_change_operation()
updated_keys = list(frozenset(self.differing_keys) & frozenset(self.update_keys))
if len(updated_keys) > 0:
log('Keys that force update operations %s' % updated_keys)
self.do_update_operation()
else:
self.module_result['changed'] = False
|
citrix-adc-ansible-modules
|
positive
|
@cached
def sha256(self, include_version=False) -> str:
<DeepExtract>
</DeepExtract>
return utils.dict_hash(hashable_contents)
|
@cached
def sha256(self, include_version=False) -> str:
return utils.dict_hash(hashable_contents)
|
detection-rules
|
positive
|
def mask_rcnn_fcn_head_v0upshare(model, blob_in, dim_in, spatial_scale):
"""Use a ResNet "conv5" / "stage5" head for mask prediction. Weights and
computation are shared with the conv5 box head. Computation can only be
shared during training, since inference is cascaded.
v0upshare design: conv5, convT 2x2.
"""
assert cfg.MRCNN.ROI_XFORM_RESOLUTION == cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
if model.train:
dim_conv5 = 2048
blob_conv5 = model.net.SampleAs(['res5_2_sum', 'roi_has_mask_int32'], ['_[mask]_res5_2_sum_sliced'])
else:
<DeepExtract>
model.RoIFeatureTransform(blob_in, blob_out='_[mask]_pool5', blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
dilation = cfg.MRCNN.DILATION
stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7)
(s, dim_in) = ResNet.add_stage(model, '_[mask]_res5', '_[mask]_pool5', 3, dim_in, 2048, 512, dilation, stride_init=stride_init)
(blob_conv5, dim_conv5) = (s, 2048)
</DeepExtract>
dim_reduced = cfg.MRCNN.DIM_REDUCED
blob_mask = model.ConvTranspose(blob_conv5, 'conv5_mask', dim_conv5, dim_reduced, kernel=2, pad=0, stride=2, weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}), bias_init=const_fill(0.0))
model.Relu('conv5_mask', 'conv5_mask')
return (blob_mask, dim_reduced)
|
def mask_rcnn_fcn_head_v0upshare(model, blob_in, dim_in, spatial_scale):
"""Use a ResNet "conv5" / "stage5" head for mask prediction. Weights and
computation are shared with the conv5 box head. Computation can only be
shared during training, since inference is cascaded.
v0upshare design: conv5, convT 2x2.
"""
assert cfg.MRCNN.ROI_XFORM_RESOLUTION == cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
if model.train:
dim_conv5 = 2048
blob_conv5 = model.net.SampleAs(['res5_2_sum', 'roi_has_mask_int32'], ['_[mask]_res5_2_sum_sliced'])
else:
model.RoIFeatureTransform(blob_in, blob_out='_[mask]_pool5', blob_rois='mask_rois', method=cfg.MRCNN.ROI_XFORM_METHOD, resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION, sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale)
dilation = cfg.MRCNN.DILATION
stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7)
(s, dim_in) = ResNet.add_stage(model, '_[mask]_res5', '_[mask]_pool5', 3, dim_in, 2048, 512, dilation, stride_init=stride_init)
(blob_conv5, dim_conv5) = (s, 2048)
dim_reduced = cfg.MRCNN.DIM_REDUCED
blob_mask = model.ConvTranspose(blob_conv5, 'conv5_mask', dim_conv5, dim_reduced, kernel=2, pad=0, stride=2, weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}), bias_init=const_fill(0.0))
model.Relu('conv5_mask', 'conv5_mask')
return (blob_mask, dim_reduced)
|
CBNet
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
result_tmp = d_a * (d_b & 65535) << n.value
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
result_w0 = e_d_0 - result_tmp
result_w1 = e_d_1
self.put(result_w0, 'd{0}'.format(self.data['c']))
self.put(result_w1, 'd{0}'.format(self.data['c'] + 1))
result = result_w1
result <<= 32
result |= result_w0
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
result_tmp = d_a * (d_b & 65535) << n.value
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
result_w0 = e_d_0 - result_tmp
result_w1 = e_d_1
self.put(result_w0, 'd{0}'.format(self.data['c']))
self.put(result_w1, 'd{0}'.format(self.data['c'] + 1))
result = result_w1
result <<= 32
result |= result_w0
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
|
angr-platforms
|
positive
|
def read_field(self, name, bitstring=True):
for e in self.Fields.EFUSES:
field = self.Fields.get(e)
if field.name == name:
<DeepExtract>
block = None
for b in self.Blocks.BLOCKS:
blk = self.Blocks.get(b)
if blk.id == field.block:
blk_len_bits = self.get_bitlen_of_block(blk, wr=wr_regs)
addr = blk.wr_addr if wr_regs else blk.rd_addr
self.mem.pos = self.mem.length - ((addr - self.REGS.DR_REG_EFUSE_BASE) * 8 + blk_len_bits)
block = self.mem.read(blk_len_bits)
break
return block
</DeepExtract>
<DeepExtract>
block = None
for b in self.Blocks.BLOCKS:
blk = self.Blocks.get(b)
if blk.id == field.block:
blk_len_bits = self.get_bitlen_of_block(blk, wr=wr_regs)
addr = blk.wr_addr if wr_regs else blk.rd_addr
self.mem.pos = self.mem.length - ((addr - self.REGS.DR_REG_EFUSE_BASE) * 8 + blk_len_bits)
block = self.mem.read(blk_len_bits)
break
block = block
</DeepExtract>
if field.type.startswith('bool'):
field_len = 1
else:
field_len = int(re.search('\\d+', field.type).group())
if field.type.startswith('bytes'):
field_len *= 8
block.pos = block.length - (field.word * 32 + field.pos + field_len)
if bitstring:
return block.read(field_len)
else:
return block.read(field.type)
return None
|
def read_field(self, name, bitstring=True):
for e in self.Fields.EFUSES:
field = self.Fields.get(e)
if field.name == name:
block = None
for b in self.Blocks.BLOCKS:
blk = self.Blocks.get(b)
if blk.id == field.block:
blk_len_bits = self.get_bitlen_of_block(blk, wr=wr_regs)
addr = blk.wr_addr if wr_regs else blk.rd_addr
self.mem.pos = self.mem.length - ((addr - self.REGS.DR_REG_EFUSE_BASE) * 8 + blk_len_bits)
block = self.mem.read(blk_len_bits)
break
return block
block = None
for b in self.Blocks.BLOCKS:
blk = self.Blocks.get(b)
if blk.id == field.block:
blk_len_bits = self.get_bitlen_of_block(blk, wr=wr_regs)
addr = blk.wr_addr if wr_regs else blk.rd_addr
self.mem.pos = self.mem.length - ((addr - self.REGS.DR_REG_EFUSE_BASE) * 8 + blk_len_bits)
block = self.mem.read(blk_len_bits)
break
block = block
if field.type.startswith('bool'):
field_len = 1
else:
field_len = int(re.search('\\d+', field.type).group())
if field.type.startswith('bytes'):
field_len *= 8
block.pos = block.length - (field.word * 32 + field.pos + field_len)
if bitstring:
return block.read(field_len)
else:
return block.read(field.type)
return None
|
esptool
|
positive
|
@njit(**params)
def f(bufn, pos, count):
new_pos = pos + ele_size * count
if new_pos > bufn[1]:
<DeepExtract>
raise FFError('ff_read: not enough data')
</DeepExtract>
v = np.frombuffer(bufn[0][pos:new_pos], dtype=dt)
return (list(v), new_pos)
|
@njit(**params)
def f(bufn, pos, count):
new_pos = pos + ele_size * count
if new_pos > bufn[1]:
raise FFError('ff_read: not enough data')
v = np.frombuffer(bufn[0][pos:new_pos], dtype=dt)
return (list(v), new_pos)
|
deca
|
positive
|
@pwndbg.memoize.reset_on_cont
def get_one_instruction(address):
<DeepExtract>
if pwndbg.arch.current == 'armcm':
extra = CS_MODE_MCLASS | CS_MODE_THUMB if pwndbg.regs.xpsr & 1 << 24 else CS_MODE_MCLASS
elif pwndbg.arch.current in ('arm', 'aarch64'):
extra = CS_MODE_THUMB if pwndbg.regs.cpsr & 1 << 5 else CS_MODE_ARM
elif pwndbg.arch.current == 'sparc':
if 'v9' in gdb.newest_frame().architecture().name():
extra = CS_MODE_V9
else:
extra = 0
else:
extra = None
md = get_disassembler_cached(pwndbg.arch.current, pwndbg.arch.ptrsize, pwndbg.arch.endian, extra)
</DeepExtract>
size = VariableInstructionSizeMax.get(pwndbg.arch.current, 4)
data = pwndbg.memory.read(address, size, partial=True)
for ins in md.disasm(bytes(data), address, 1):
pwndbg.disasm.arch.DisassemblyAssistant.enhance(ins)
return ins
|
@pwndbg.memoize.reset_on_cont
def get_one_instruction(address):
if pwndbg.arch.current == 'armcm':
extra = CS_MODE_MCLASS | CS_MODE_THUMB if pwndbg.regs.xpsr & 1 << 24 else CS_MODE_MCLASS
elif pwndbg.arch.current in ('arm', 'aarch64'):
extra = CS_MODE_THUMB if pwndbg.regs.cpsr & 1 << 5 else CS_MODE_ARM
elif pwndbg.arch.current == 'sparc':
if 'v9' in gdb.newest_frame().architecture().name():
extra = CS_MODE_V9
else:
extra = 0
else:
extra = None
md = get_disassembler_cached(pwndbg.arch.current, pwndbg.arch.ptrsize, pwndbg.arch.endian, extra)
size = VariableInstructionSizeMax.get(pwndbg.arch.current, 4)
data = pwndbg.memory.read(address, size, partial=True)
for ins in md.disasm(bytes(data), address, 1):
pwndbg.disasm.arch.DisassemblyAssistant.enhance(ins)
return ins
|
217gdb
|
positive
|
def test_edit_someone_else_account_with_admin(self):
<DeepExtract>
if credentials is None:
credentials = self.credentials
self.client.login(username=credentials.get('email'), password=credentials.get('password'))
if login_2fa:
user = User.objects.get(email=credentials.get('email'))
self.login_2fa(user)
</DeepExtract>
<DeepExtract>
if is_superuser:
user2 = {'email': email, 'name': name, 'password': password, 'phone_number': '+12125552368'}
user2 = {'email': email, 'name': name, 'province': _get_province(province), 'is_admin': False, 'password': password, 'phone_number': '+12125552368'}
</DeepExtract>
user2 = User.objects.create_user(**user2)
response = self.client.get(reverse('user_edit_name', kwargs={'pk': user2.id}))
self.assertEqual(response.status_code, 403)
|
def test_edit_someone_else_account_with_admin(self):
if credentials is None:
credentials = self.credentials
self.client.login(username=credentials.get('email'), password=credentials.get('password'))
if login_2fa:
user = User.objects.get(email=credentials.get('email'))
self.login_2fa(user)
if is_superuser:
user2 = {'email': email, 'name': name, 'password': password, 'phone_number': '+12125552368'}
user2 = {'email': email, 'name': name, 'province': _get_province(province), 'is_admin': False, 'password': password, 'phone_number': '+12125552368'}
user2 = User.objects.create_user(**user2)
response = self.client.get(reverse('user_edit_name', kwargs={'pk': user2.id}))
self.assertEqual(response.status_code, 403)
|
covid-alert-portal
|
positive
|
def fetch_balance(self, public_key):
<DeepExtract>
utxos = [tx_out for tx_out in self.utxo_set.values() if tx_out.public_key == public_key]
</DeepExtract>
return sum([tx_out.amount for tx_out in utxos])
|
def fetch_balance(self, public_key):
utxos = [tx_out for tx_out in self.utxo_set.values() if tx_out.public_key == public_key]
return sum([tx_out.amount for tx_out in utxos])
|
digital-cash
|
positive
|
def branches(self, num_branches):
"""Create num_branches child matchers, one of which must match for the parent match to succeed."""
child_group = []
for _ in range(num_branches):
<DeepExtract>
new_matcher = Matcher(self.comp, self.original, self.loc, self.check_var, self.style, self.name_list, self.names)
</DeepExtract>
child_group.append(new_matcher)
self.child_groups.append(child_group)
return child_group
|
def branches(self, num_branches):
"""Create num_branches child matchers, one of which must match for the parent match to succeed."""
child_group = []
for _ in range(num_branches):
new_matcher = Matcher(self.comp, self.original, self.loc, self.check_var, self.style, self.name_list, self.names)
child_group.append(new_matcher)
self.child_groups.append(child_group)
return child_group
|
coconut
|
positive
|
def create_bootstrap_steps(resource_type):
"""Create the boostrap steps for installation on all machines
Args:
resource_type(enum of str): type of resource we're bootstraping
can be ec2 / emr
"""
step_params = self.bootstrap_definitions.get(resource_type, list())
<DeepExtract>
input_node = None
steps = []
step_params = process_steps(step_params)
for step_param in step_params:
if isinstance(input_node, S3Node) and 'input_node' not in step_param and ('input_path' not in step_param):
step_param['input_node'] = input_node
if is_teardown:
step_param['sns_object'] = self.sns
try:
step_class = step_param.pop('step_class')
step_args = step_class.arguments_processor(self, step_param)
except Exception:
logger.error('Error creating step with params: %s', step_param)
raise
try:
step = step_class(**step_args)
except Exception:
logger.error('Error creating step of class %s, step_param %s', str(step_class.__name__), str(step_args))
raise
self.add_step(step, True, is_teardown)
input_node = step.output
steps.append(step)
steps = steps
</DeepExtract>
self._bootstrap_steps.extend(steps)
return steps
|
def create_bootstrap_steps(resource_type):
"""Create the boostrap steps for installation on all machines
Args:
resource_type(enum of str): type of resource we're bootstraping
can be ec2 / emr
"""
step_params = self.bootstrap_definitions.get(resource_type, list())
input_node = None
steps = []
step_params = process_steps(step_params)
for step_param in step_params:
if isinstance(input_node, S3Node) and 'input_node' not in step_param and ('input_path' not in step_param):
step_param['input_node'] = input_node
if is_teardown:
step_param['sns_object'] = self.sns
try:
step_class = step_param.pop('step_class')
step_args = step_class.arguments_processor(self, step_param)
except Exception:
logger.error('Error creating step with params: %s', step_param)
raise
try:
step = step_class(**step_args)
except Exception:
logger.error('Error creating step of class %s, step_param %s', str(step_class.__name__), str(step_args))
raise
self.add_step(step, True, is_teardown)
input_node = step.output
steps.append(step)
steps = steps
self._bootstrap_steps.extend(steps)
return steps
|
dataduct
|
positive
|
def create_waymo_infos(root_path, split='train', nsweeps=1):
<DeepExtract>
dir_path = os.path.join(root_path, split, 'lidar')
available_frames = list(os.listdir(dir_path))
sorted_frames = sort_frame(available_frames)
print(split, ' split ', 'exist frame num:', len(available_frames))
frames = sorted_frames
</DeepExtract>
<DeepExtract>
infos = []
for frame_name in tqdm(frames):
lidar_path = os.path.join(root_path, split, 'lidar', frame_name)
ref_path = os.path.join(root_path, split, 'annos', frame_name)
ref_obj = get_obj(ref_path)
ref_time = 1e-06 * int(ref_obj['frame_name'].split('_')[-1])
ref_pose = np.reshape(ref_obj['veh_to_global'], [4, 4])
(_, ref_from_global) = veh_pos_to_transform(ref_pose)
info = {'path': lidar_path, 'anno_path': ref_path, 'token': frame_name, 'timestamp': ref_time, 'sweeps': []}
sequence_id = int(frame_name.split('_')[1])
frame_id = int(frame_name.split('_')[3][:-4])
prev_id = frame_id
sweeps = []
while len(sweeps) < nsweeps - 1:
if prev_id <= 0:
if len(sweeps) == 0:
sweep = {'path': lidar_path, 'token': frame_name, 'transform_matrix': None, 'time_lag': 0}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
prev_id = prev_id - 1
curr_name = 'seq_{}_frame_{}.pkl'.format(sequence_id, prev_id)
curr_lidar_path = os.path.join(root_path, split, 'lidar', curr_name)
curr_label_path = os.path.join(root_path, split, 'annos', curr_name)
curr_obj = get_obj(curr_label_path)
curr_pose = np.reshape(curr_obj['veh_to_global'], [4, 4])
(global_from_car, _) = veh_pos_to_transform(curr_pose)
tm = reduce(np.dot, [ref_from_global, global_from_car])
curr_time = int(curr_obj['frame_name'].split('_')[-1])
time_lag = ref_time - 1e-06 * curr_time
sweep = {'path': curr_lidar_path, 'transform_matrix': tm, 'time_lag': time_lag}
sweeps.append(sweep)
info['sweeps'] = sweeps
if split != 'test':
TYPE_LIST = ['UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST']
annos = ref_obj['objects']
num_points_in_gt = np.array([ann['num_points'] for ann in annos])
gt_boxes = np.array([ann['box'] for ann in annos]).reshape(-1, 9)
if len(gt_boxes) != 0:
gt_boxes[:, -1] = -np.pi / 2 - gt_boxes[:, -1]
gt_boxes[:, [3, 4]] = gt_boxes[:, [4, 3]]
gt_names = np.array([TYPE_LIST[ann['label']] for ann in annos])
mask_not_zero = (num_points_in_gt > 0).reshape(-1)
info['gt_boxes'] = gt_boxes[mask_not_zero, :].astype(np.float32)
info['gt_names'] = gt_names[mask_not_zero].astype(str)
infos.append(info)
waymo_infos = infos
</DeepExtract>
print(f'sample: {len(waymo_infos)}')
with open(os.path.join(root_path, 'infos_' + split + '_{:02d}sweeps_filter_zero_gt.pkl'.format(nsweeps)), 'wb') as f:
pickle.dump(waymo_infos, f)
|
def create_waymo_infos(root_path, split='train', nsweeps=1):
dir_path = os.path.join(root_path, split, 'lidar')
available_frames = list(os.listdir(dir_path))
sorted_frames = sort_frame(available_frames)
print(split, ' split ', 'exist frame num:', len(available_frames))
frames = sorted_frames
infos = []
for frame_name in tqdm(frames):
lidar_path = os.path.join(root_path, split, 'lidar', frame_name)
ref_path = os.path.join(root_path, split, 'annos', frame_name)
ref_obj = get_obj(ref_path)
ref_time = 1e-06 * int(ref_obj['frame_name'].split('_')[-1])
ref_pose = np.reshape(ref_obj['veh_to_global'], [4, 4])
(_, ref_from_global) = veh_pos_to_transform(ref_pose)
info = {'path': lidar_path, 'anno_path': ref_path, 'token': frame_name, 'timestamp': ref_time, 'sweeps': []}
sequence_id = int(frame_name.split('_')[1])
frame_id = int(frame_name.split('_')[3][:-4])
prev_id = frame_id
sweeps = []
while len(sweeps) < nsweeps - 1:
if prev_id <= 0:
if len(sweeps) == 0:
sweep = {'path': lidar_path, 'token': frame_name, 'transform_matrix': None, 'time_lag': 0}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
prev_id = prev_id - 1
curr_name = 'seq_{}_frame_{}.pkl'.format(sequence_id, prev_id)
curr_lidar_path = os.path.join(root_path, split, 'lidar', curr_name)
curr_label_path = os.path.join(root_path, split, 'annos', curr_name)
curr_obj = get_obj(curr_label_path)
curr_pose = np.reshape(curr_obj['veh_to_global'], [4, 4])
(global_from_car, _) = veh_pos_to_transform(curr_pose)
tm = reduce(np.dot, [ref_from_global, global_from_car])
curr_time = int(curr_obj['frame_name'].split('_')[-1])
time_lag = ref_time - 1e-06 * curr_time
sweep = {'path': curr_lidar_path, 'transform_matrix': tm, 'time_lag': time_lag}
sweeps.append(sweep)
info['sweeps'] = sweeps
if split != 'test':
TYPE_LIST = ['UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST']
annos = ref_obj['objects']
num_points_in_gt = np.array([ann['num_points'] for ann in annos])
gt_boxes = np.array([ann['box'] for ann in annos]).reshape(-1, 9)
if len(gt_boxes) != 0:
gt_boxes[:, -1] = -np.pi / 2 - gt_boxes[:, -1]
gt_boxes[:, [3, 4]] = gt_boxes[:, [4, 3]]
gt_names = np.array([TYPE_LIST[ann['label']] for ann in annos])
mask_not_zero = (num_points_in_gt > 0).reshape(-1)
info['gt_boxes'] = gt_boxes[mask_not_zero, :].astype(np.float32)
info['gt_names'] = gt_names[mask_not_zero].astype(str)
infos.append(info)
waymo_infos = infos
print(f'sample: {len(waymo_infos)}')
with open(os.path.join(root_path, 'infos_' + split + '_{:02d}sweeps_filter_zero_gt.pkl'.format(nsweeps)), 'wb') as f:
pickle.dump(waymo_infos, f)
|
CenterPoint
|
positive
|
def test_detail_delete_template(admin_client):
"""Tests for proper plan_list_detail_delete template."""
<DeepExtract>
plan_list = models.PlanList.objects.create(title=title)
</DeepExtract>
<DeepExtract>
if not plan:
plan = create_plan()
if not plan_list:
plan_list = create_plan_list()
detail = models.PlanListDetail.objects.create(plan=plan, plan_list=plan_list, order=order)
</DeepExtract>
response = admin_client.get(reverse('dfs_plan_list_detail_delete', kwargs={'plan_list_id': plan_list.id, 'plan_list_detail_id': detail.id}))
assert 'subscriptions/plan_list_detail_delete.html' in [t.name for t in response.templates]
|
def test_detail_delete_template(admin_client):
"""Tests for proper plan_list_detail_delete template."""
plan_list = models.PlanList.objects.create(title=title)
if not plan:
plan = create_plan()
if not plan_list:
plan_list = create_plan_list()
detail = models.PlanListDetail.objects.create(plan=plan, plan_list=plan_list, order=order)
response = admin_client.get(reverse('dfs_plan_list_detail_delete', kwargs={'plan_list_id': plan_list.id, 'plan_list_detail_id': detail.id}))
assert 'subscriptions/plan_list_detail_delete.html' in [t.name for t in response.templates]
|
django-flexible-subscriptions
|
positive
|
def test_build_start_with():
"""Test building all packages starting with a specific one."""
with redirected_stdio():
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
<DeepExtract>
for i in range(4):
wf.create_package('pkg_{}'.format(i), depends=['pkg_{}'.format(i - 1)] if i > 0 else [])
</DeepExtract>
wf.build()
assert catkin_failure(BUILD + ['--start-with'])
assert catkin_failure(BUILD + ['--start-with', 'pkg_nil'])
assert catkin_success(BUILD + ['--start-with', 'pkg_0'])
for i in range(4):
assert os.path.exists(os.path.join('build', 'pkg_{}'.format(i)))
assert catkin_success(CLEAN)
assert catkin_success(BUILD + ['--start-with', 'pkg_2'])
assert not os.path.exists(os.path.join('build', 'pkg_0'))
assert not os.path.exists(os.path.join('build', 'pkg_1'))
assert os.path.exists(os.path.join('build', 'pkg_2'))
assert os.path.exists(os.path.join('build', 'pkg_3'))
assert catkin_success(CLEAN)
|
def test_build_start_with():
"""Test building all packages starting with a specific one."""
with redirected_stdio():
for build_type in BUILD_TYPES:
with workspace_factory() as wf:
for i in range(4):
wf.create_package('pkg_{}'.format(i), depends=['pkg_{}'.format(i - 1)] if i > 0 else [])
wf.build()
assert catkin_failure(BUILD + ['--start-with'])
assert catkin_failure(BUILD + ['--start-with', 'pkg_nil'])
assert catkin_success(BUILD + ['--start-with', 'pkg_0'])
for i in range(4):
assert os.path.exists(os.path.join('build', 'pkg_{}'.format(i)))
assert catkin_success(CLEAN)
assert catkin_success(BUILD + ['--start-with', 'pkg_2'])
assert not os.path.exists(os.path.join('build', 'pkg_0'))
assert not os.path.exists(os.path.join('build', 'pkg_1'))
assert os.path.exists(os.path.join('build', 'pkg_2'))
assert os.path.exists(os.path.join('build', 'pkg_3'))
assert catkin_success(CLEAN)
|
catkin_tools
|
positive
|
def test_add_tags(self):
<DeepExtract>
my_wf = Workflow.from_dict(self.bs_wf.to_dict())
</DeepExtract>
my_wf.metadata = {'tags': ['a']}
my_wf = add_tags(my_wf, ['b', 'c'])
found = 0
self.assertEqual(my_wf.metadata['tags'], ['a', 'b', 'c'])
for fw in my_wf.fws:
self.assertEqual(fw.spec['tags'], ['b', 'c'])
for t in fw.tasks:
if 'VaspToDb' in str(t):
self.assertEqual(t['additional_fields']['tags'], ['b', 'c'])
found += 1
self.assertEqual(found, 4)
<DeepExtract>
my_wf = Workflow.from_dict(self.bsboltz_wf.to_dict())
</DeepExtract>
my_wf = add_tags(my_wf, ['foo', 'bar'])
v_found = 0
b_found = 0
self.assertEqual(my_wf.metadata['tags'], ['foo', 'bar'])
for fw in my_wf.fws:
self.assertEqual(fw.spec['tags'], ['foo', 'bar'])
for t in fw.tasks:
if 'BoltztrapToDb' in str(t):
self.assertEqual(t['additional_fields']['tags'], ['foo', 'bar'])
b_found += 1
if 'VaspToDb' in str(t):
self.assertEqual(t['additional_fields']['tags'], ['foo', 'bar'])
v_found += 1
self.assertEqual(b_found, 1)
self.assertEqual(v_found, 4)
|
def test_add_tags(self):
my_wf = Workflow.from_dict(self.bs_wf.to_dict())
my_wf.metadata = {'tags': ['a']}
my_wf = add_tags(my_wf, ['b', 'c'])
found = 0
self.assertEqual(my_wf.metadata['tags'], ['a', 'b', 'c'])
for fw in my_wf.fws:
self.assertEqual(fw.spec['tags'], ['b', 'c'])
for t in fw.tasks:
if 'VaspToDb' in str(t):
self.assertEqual(t['additional_fields']['tags'], ['b', 'c'])
found += 1
self.assertEqual(found, 4)
my_wf = Workflow.from_dict(self.bsboltz_wf.to_dict())
my_wf = add_tags(my_wf, ['foo', 'bar'])
v_found = 0
b_found = 0
self.assertEqual(my_wf.metadata['tags'], ['foo', 'bar'])
for fw in my_wf.fws:
self.assertEqual(fw.spec['tags'], ['foo', 'bar'])
for t in fw.tasks:
if 'BoltztrapToDb' in str(t):
self.assertEqual(t['additional_fields']['tags'], ['foo', 'bar'])
b_found += 1
if 'VaspToDb' in str(t):
self.assertEqual(t['additional_fields']['tags'], ['foo', 'bar'])
v_found += 1
self.assertEqual(b_found, 1)
self.assertEqual(v_found, 4)
|
atomate
|
positive
|
@mock.patch.object(timeutils, 'utcnow')
def test_evaluation_keep_alarm_attributes_constant(self, utcnow):
utcnow.return_value = datetime.datetime(2015, 7, 26, 3, 33, 21, 876795)
self._set_all_alarms('ok')
original_alarms = copy.deepcopy(self.alarms)
<DeepExtract>
now = timeutils.utcnow_ts()
if aggregated:
avgs = {'measures': {'aggregated': [[str(now - len([self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) * 60), 60, value] for value in [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]]}}
avgs = [[str(now - len([self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) * 60), 60, value] for value in [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]]
</DeepExtract>
self.client.metric.get_measures.side_effect = [avgs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
primitive_alarms = [a.as_dict() for a in self.alarms]
for alarm in original_alarms:
alarm.state = 'alarm'
alarm.state_reason = mock.ANY
primitive_original_alarms = [a.as_dict() for a in original_alarms]
self.assertEqual(primitive_original_alarms, primitive_alarms)
|
@mock.patch.object(timeutils, 'utcnow')
def test_evaluation_keep_alarm_attributes_constant(self, utcnow):
utcnow.return_value = datetime.datetime(2015, 7, 26, 3, 33, 21, 876795)
self._set_all_alarms('ok')
original_alarms = copy.deepcopy(self.alarms)
now = timeutils.utcnow_ts()
if aggregated:
avgs = {'measures': {'aggregated': [[str(now - len([self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) * 60), 60, value] for value in [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]]}}
avgs = [[str(now - len([self.alarms[0].rule['threshold'] + v for v in range(1, 6)]) * 60), 60, value] for value in [self.alarms[0].rule['threshold'] + v for v in range(1, 6)]]
self.client.metric.get_measures.side_effect = [avgs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
primitive_alarms = [a.as_dict() for a in self.alarms]
for alarm in original_alarms:
alarm.state = 'alarm'
alarm.state_reason = mock.ANY
primitive_original_alarms = [a.as_dict() for a in original_alarms]
self.assertEqual(primitive_original_alarms, primitive_alarms)
|
aodh
|
positive
|
def run(self, current_actions) -> bool:
if self.should_be_paused(current_actions):
_LOG.warning('Rebalance paused, because other blocking events running: {}'.format(current_actions))
return True
if self.zk.is_rebalancing():
return True
new_broker_ids = sorted((int(id_) for id_ in self.zk.get_broker_ids()))
if new_broker_ids != self.all_broker_ids:
_LOG.warning('Rebalance stopped because of broker list change from {} to {}'.format(self.all_broker_ids, new_broker_ids))
return False
if self.state == OptimizedRebalanceChange._LOAD_STATE:
<DeepExtract>
self.broker_distribution = {id_: BrokerDescription(id_, self.broker_racks[id_]) for id_ in self.broker_ids}
self.source_distribution = {(topic, partition): replicas for (topic, partition, replicas) in self.zk.load_partition_assignment() if topic not in self.exclude_topics}
for (topic_partition, replicas) in self.source_distribution.items():
if not replicas:
continue
leader = True
for replica in replicas:
if replica not in self.broker_distribution:
self.broker_distribution[replica] = BrokerDescription(replica, self.broker_racks[replica])
if leader:
self.broker_distribution[replica].add_leader(topic_partition)
leader = False
else:
self.broker_distribution[replica].add_replica(topic_partition)
active_brokers = [broker for (id_, broker) in self.broker_distribution.items() if id_ in self.broker_ids]
rack_to_brokers = {}
for (broker, rack) in self.broker_racks.items():
rack_to_brokers.setdefault(rack, []).append(broker)
for rack in rack_to_brokers.keys():
active_brokers_in_rack = [active_broker for active_broker in active_brokers if active_broker._rack_id == rack]
total_leaders = sum((b.get_leader_count() for b in self.broker_distribution.values() if b._rack_id == rack))
new_leader_count = distribute(total_leaders, active_brokers_in_rack, BrokerDescription.get_leader_count)
total_replicas = sum((b.get_replica_count() for b in self.broker_distribution.values() if b.rack_id == rack)) + sum(new_leader_count)
new_replica_count = distribute(total_replicas, active_brokers_in_rack, BrokerDescription.get_replica_count)
for i in range(0, len(active_brokers_in_rack)):
active_brokers_in_rack[i].set_leader_expectation(new_leader_count[i])
active_brokers_in_rack[i].set_replica_expectation(new_replica_count[i] - new_leader_count[i])
</DeepExtract>
self.state = OptimizedRebalanceChange._COMPUTE_LEADERS
elif self.state == OptimizedRebalanceChange._COMPUTE_LEADERS:
<DeepExtract>
candidates = DistributionMap(self.broker_distribution.values())
while any([broker.have_extra_leaders() for broker in self.broker_distribution.values()]):
(source_broker, target_broker, topic) = candidates.take_move_pair()
selected_partition = None
source_partitions = source_broker.list_partitions(topic, False)
target_partitions = target_broker.list_partitions(topic, True)
for partition in source_partitions:
if partition in target_partitions:
selected_partition = partition
break
selected_partition = source_partitions[0] if selected_partition is None else selected_partition
topic_partition = (topic, selected_partition)
target_broker.accept_leader(source_broker, topic_partition)
self.action_queue.append((topic_partition, source_broker.broker_id, target_broker.broker_id))
candidates.cleanup()
</DeepExtract>
self.state = OptimizedRebalanceChange._COMPUTE_REPLICAS
elif self.state == OptimizedRebalanceChange._COMPUTE_REPLICAS:
<DeepExtract>
self._remove_replica_copies()
if not self._rebalance_replicas_template(False):
self._rebalance_replicas_template(True)
if not self._rebalance_replicas_template(False):
_LOG.error('Failed to rebalance replicas. Probably because of replication factor problems. Will just stop the process')
raise Exception('Failed to perform replica rebalance {}, {}, {}'.format(self.broker_distribution, self.broker_ids, self.action_queue))
</DeepExtract>
self.state = OptimizedRebalanceChange._SORT_ACTIONS
elif self.state == OptimizedRebalanceChange._SORT_ACTIONS:
<DeepExtract>
result = {}
for (topic_partition, source_broker_id, target_broker_id) in self.action_queue:
if topic_partition not in result:
result[topic_partition] = list(self.source_distribution[topic_partition])
tmp_result = result[topic_partition]
for i in range(len(tmp_result) - 1, -1, -1):
if tmp_result[i] == source_broker_id:
tmp_result[i] = target_broker_id
break
self.action_queue = result
</DeepExtract>
self.state = OptimizedRebalanceChange._BALANCE
elif self.state == OptimizedRebalanceChange._BALANCE:
return not self._balance()
return True
|
def run(self, current_actions) -> bool:
if self.should_be_paused(current_actions):
_LOG.warning('Rebalance paused, because other blocking events running: {}'.format(current_actions))
return True
if self.zk.is_rebalancing():
return True
new_broker_ids = sorted((int(id_) for id_ in self.zk.get_broker_ids()))
if new_broker_ids != self.all_broker_ids:
_LOG.warning('Rebalance stopped because of broker list change from {} to {}'.format(self.all_broker_ids, new_broker_ids))
return False
if self.state == OptimizedRebalanceChange._LOAD_STATE:
self.broker_distribution = {id_: BrokerDescription(id_, self.broker_racks[id_]) for id_ in self.broker_ids}
self.source_distribution = {(topic, partition): replicas for (topic, partition, replicas) in self.zk.load_partition_assignment() if topic not in self.exclude_topics}
for (topic_partition, replicas) in self.source_distribution.items():
if not replicas:
continue
leader = True
for replica in replicas:
if replica not in self.broker_distribution:
self.broker_distribution[replica] = BrokerDescription(replica, self.broker_racks[replica])
if leader:
self.broker_distribution[replica].add_leader(topic_partition)
leader = False
else:
self.broker_distribution[replica].add_replica(topic_partition)
active_brokers = [broker for (id_, broker) in self.broker_distribution.items() if id_ in self.broker_ids]
rack_to_brokers = {}
for (broker, rack) in self.broker_racks.items():
rack_to_brokers.setdefault(rack, []).append(broker)
for rack in rack_to_brokers.keys():
active_brokers_in_rack = [active_broker for active_broker in active_brokers if active_broker._rack_id == rack]
total_leaders = sum((b.get_leader_count() for b in self.broker_distribution.values() if b._rack_id == rack))
new_leader_count = distribute(total_leaders, active_brokers_in_rack, BrokerDescription.get_leader_count)
total_replicas = sum((b.get_replica_count() for b in self.broker_distribution.values() if b.rack_id == rack)) + sum(new_leader_count)
new_replica_count = distribute(total_replicas, active_brokers_in_rack, BrokerDescription.get_replica_count)
for i in range(0, len(active_brokers_in_rack)):
active_brokers_in_rack[i].set_leader_expectation(new_leader_count[i])
active_brokers_in_rack[i].set_replica_expectation(new_replica_count[i] - new_leader_count[i])
self.state = OptimizedRebalanceChange._COMPUTE_LEADERS
elif self.state == OptimizedRebalanceChange._COMPUTE_LEADERS:
candidates = DistributionMap(self.broker_distribution.values())
while any([broker.have_extra_leaders() for broker in self.broker_distribution.values()]):
(source_broker, target_broker, topic) = candidates.take_move_pair()
selected_partition = None
source_partitions = source_broker.list_partitions(topic, False)
target_partitions = target_broker.list_partitions(topic, True)
for partition in source_partitions:
if partition in target_partitions:
selected_partition = partition
break
selected_partition = source_partitions[0] if selected_partition is None else selected_partition
topic_partition = (topic, selected_partition)
target_broker.accept_leader(source_broker, topic_partition)
self.action_queue.append((topic_partition, source_broker.broker_id, target_broker.broker_id))
candidates.cleanup()
self.state = OptimizedRebalanceChange._COMPUTE_REPLICAS
elif self.state == OptimizedRebalanceChange._COMPUTE_REPLICAS:
self._remove_replica_copies()
if not self._rebalance_replicas_template(False):
self._rebalance_replicas_template(True)
if not self._rebalance_replicas_template(False):
_LOG.error('Failed to rebalance replicas. Probably because of replication factor problems. Will just stop the process')
raise Exception('Failed to perform replica rebalance {}, {}, {}'.format(self.broker_distribution, self.broker_ids, self.action_queue))
self.state = OptimizedRebalanceChange._SORT_ACTIONS
elif self.state == OptimizedRebalanceChange._SORT_ACTIONS:
result = {}
for (topic_partition, source_broker_id, target_broker_id) in self.action_queue:
if topic_partition not in result:
result[topic_partition] = list(self.source_distribution[topic_partition])
tmp_result = result[topic_partition]
for i in range(len(tmp_result) - 1, -1, -1):
if tmp_result[i] == source_broker_id:
tmp_result[i] = target_broker_id
break
self.action_queue = result
self.state = OptimizedRebalanceChange._BALANCE
elif self.state == OptimizedRebalanceChange._BALANCE:
return not self._balance()
return True
|
bubuku
|
positive
|
def _load_model(self, path):
<DeepExtract>
with open(path, 'rb') as f:
self._model = pickle.load(f)
</DeepExtract>
return self._model
|
def _load_model(self, path):
with open(path, 'rb') as f:
self._model = pickle.load(f)
return self._model
|
axcell
|
positive
|
def get_or_create_group(self, name, path=None, parent=None):
if not name:
return None
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
parent = self.objects.get(parent, self.root_group)
<DeepExtract>
if parent:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name and parent.has_child(g)]
else:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name]
groups = groups
</DeepExtract>
for grp in groups:
if parent.has_child(grp.id):
return grp
grp = PBXGroup.Create(name, path)
parent.add_child(grp)
self.objects[grp.id] = grp
self.modified = True
return grp
|
def get_or_create_group(self, name, path=None, parent=None):
if not name:
return None
if not parent:
parent = self.root_group
elif not isinstance(parent, PBXGroup):
parent = self.objects.get(parent, self.root_group)
if parent:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name and parent.has_child(g)]
else:
groups = [g for g in self.objects.values() if g.get('isa') == 'PBXGroup' and g.get_name() == name]
groups = groups
for grp in groups:
if parent.has_child(grp.id):
return grp
grp = PBXGroup.Create(name, path)
parent.add_child(grp)
self.objects[grp.id] = grp
self.modified = True
return grp
|
cocos2d-console
|
positive
|
def ensure_many_models(self, clip_min=None, clip_max=None):
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=ConvergenceWarning)
<DeepExtract>
n = 100
beta = 0.4
X = pd.DataFrame(np.random.normal(size=(n, 5)))
a = pd.Series([0] * (n // 2) + [1] * (n // 2))
y = a.mul(beta)
data = {'X': X, 'a': a, 'y': y, 'beta': beta}
</DeepExtract>
for propensity_learner in [GradientBoostingClassifier(n_estimators=10), RandomForestClassifier(n_estimators=100), MLPClassifier(hidden_layer_sizes=(5,)), KNeighborsClassifier(n_neighbors=20)]:
weight_model = IPW(propensity_learner, clip_min=clip_min, clip_max=clip_max)
propensity_learner_name = str(propensity_learner).split('(', maxsplit=1)[0]
for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10), MLPRegressor(hidden_layer_sizes=(5,)), ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(), KNeighborsRegressor(), SVR(), LinearSVR()]:
outcome_learner_name = str(outcome_learner).split('(', maxsplit=1)[0]
outcome_model = Standardization(outcome_learner)
with self.subTest('Test fit & predict using {} & {}'.format(propensity_learner_name, outcome_learner_name)):
model = self.estimator.__class__(outcome_model, weight_model)
model.fit(data['X'], data['a'], data['y'], refit_weight_model=False)
model.estimate_individual_outcome(data['X'], data['a'])
self.assertTrue(True)
|
def ensure_many_models(self, clip_min=None, clip_max=None):
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=ConvergenceWarning)
n = 100
beta = 0.4
X = pd.DataFrame(np.random.normal(size=(n, 5)))
a = pd.Series([0] * (n // 2) + [1] * (n // 2))
y = a.mul(beta)
data = {'X': X, 'a': a, 'y': y, 'beta': beta}
for propensity_learner in [GradientBoostingClassifier(n_estimators=10), RandomForestClassifier(n_estimators=100), MLPClassifier(hidden_layer_sizes=(5,)), KNeighborsClassifier(n_neighbors=20)]:
weight_model = IPW(propensity_learner, clip_min=clip_min, clip_max=clip_max)
propensity_learner_name = str(propensity_learner).split('(', maxsplit=1)[0]
for outcome_learner in [GradientBoostingRegressor(n_estimators=10), RandomForestRegressor(n_estimators=10), MLPRegressor(hidden_layer_sizes=(5,)), ElasticNet(), RANSACRegressor(), HuberRegressor(), PassiveAggressiveRegressor(), KNeighborsRegressor(), SVR(), LinearSVR()]:
outcome_learner_name = str(outcome_learner).split('(', maxsplit=1)[0]
outcome_model = Standardization(outcome_learner)
with self.subTest('Test fit & predict using {} & {}'.format(propensity_learner_name, outcome_learner_name)):
model = self.estimator.__class__(outcome_model, weight_model)
model.fit(data['X'], data['a'], data['y'], refit_weight_model=False)
model.estimate_individual_outcome(data['X'], data['a'])
self.assertTrue(True)
|
causallib
|
positive
|
def test_create_docker_file_with_cpu_config(self):
<DeepExtract>
self.entry_point = 'sample.py'
self.chief_config = machine_config.COMMON_MACHINE_CONFIGS['K80_1X']
self.worker_config = machine_config.COMMON_MACHINE_CONFIGS['K80_1X']
self.entry_point_dir = '.'
self.project_id = 'my-project'
self._mock_request_get = mock.patch('requests.get').start()
self._mock_request_get.return_value = mock.Mock()
self._mock_request_get.return_value.ok = requests_get_return_value
mock.patch.object(gcp, 'get_project_name', autospec=True, return_value='my-project').start()
mock.patch.object(uuid, 'uuid4', autospec=True, return_value='abcde').start()
</DeepExtract>
lcb = containerize.LocalContainerBuilder(self.entry_point, None, machine_config.COMMON_MACHINE_CONFIGS['CPU'], self.worker_config)
lcb._create_docker_file()
expected_docker_file_lines = ['FROM tensorflow/tensorflow:{}\n'.format(_TF_VERSION), 'WORKDIR /app/\n', 'COPY /app/ /app/\n', 'ENTRYPOINT ["python", "sample.py"]']
<DeepExtract>
with open(lcb.docker_file_path, 'r') as f:
actual_lines = f.readlines()
self.assertListEqual(expected_docker_file_lines, actual_lines)
</DeepExtract>
<DeepExtract>
mock.patch.stopall()
os.remove(lcb.docker_file_path)
</DeepExtract>
|
def test_create_docker_file_with_cpu_config(self):
self.entry_point = 'sample.py'
self.chief_config = machine_config.COMMON_MACHINE_CONFIGS['K80_1X']
self.worker_config = machine_config.COMMON_MACHINE_CONFIGS['K80_1X']
self.entry_point_dir = '.'
self.project_id = 'my-project'
self._mock_request_get = mock.patch('requests.get').start()
self._mock_request_get.return_value = mock.Mock()
self._mock_request_get.return_value.ok = requests_get_return_value
mock.patch.object(gcp, 'get_project_name', autospec=True, return_value='my-project').start()
mock.patch.object(uuid, 'uuid4', autospec=True, return_value='abcde').start()
lcb = containerize.LocalContainerBuilder(self.entry_point, None, machine_config.COMMON_MACHINE_CONFIGS['CPU'], self.worker_config)
lcb._create_docker_file()
expected_docker_file_lines = ['FROM tensorflow/tensorflow:{}\n'.format(_TF_VERSION), 'WORKDIR /app/\n', 'COPY /app/ /app/\n', 'ENTRYPOINT ["python", "sample.py"]']
with open(lcb.docker_file_path, 'r') as f:
actual_lines = f.readlines()
self.assertListEqual(expected_docker_file_lines, actual_lines)
mock.patch.stopall()
os.remove(lcb.docker_file_path)
</DeepExtract>
|
cloud
|
positive
|
@test
def writelines_flush(self):
<DeepExtract>
self.stream = StringIO() if FlushStream() is None else FlushStream()
self.writer = writer(self.stream)
</DeepExtract>
lines = 'foo\nbar\n'
self.writer.writelines(['foo', 'bar'])
self.writer.writelines(['foo', 'bar'], flush=True)
Assert(self.stream.contents) == [lines, True, lines, True]
self.writer.writelines(['foo', 'bar'], flush=False)
Assert(self.stream.contents) == [lines, True, lines, True, lines]
|
@test
def writelines_flush(self):
self.stream = StringIO() if FlushStream() is None else FlushStream()
self.writer = writer(self.stream)
lines = 'foo\nbar\n'
self.writer.writelines(['foo', 'bar'])
self.writer.writelines(['foo', 'bar'], flush=True)
Assert(self.stream.contents) == [lines, True, lines, True]
self.writer.writelines(['foo', 'bar'], flush=False)
Assert(self.stream.contents) == [lines, True, lines, True, lines]
|
brownie
|
positive
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_get_prices_success_no_optional_params(mock_rqi, client):
<DeepExtract>
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
</DeepExtract>
mock_rqi.get_prices = Mock(return_value=[Decimal(2.123)])
response = client.get(PRICES_ENDPOINT, {'sell_asset': data['stellar_assets'][0].asset_identification_format, 'sell_amount': 100})
assert response.status_code == 200, response.content
body = response.json()
assert body == {'buy_assets': [{'asset': data['offchain_assets'][0].asset_identification_format, 'price': '2.12', 'decimals': data['offchain_assets'][0].significant_decimals}]}
mock_rqi.get_prices.assert_called_once()
kwargs = mock_rqi.get_prices.call_args[1]
del kwargs['token']
del kwargs['request']
assert kwargs == {'sell_asset': data['stellar_assets'][0], 'sell_amount': Decimal(100), 'buy_assets': data['offchain_assets'], 'buy_delivery_method': None, 'sell_delivery_method': None, 'country_code': None}
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_get_prices_success_no_optional_params(mock_rqi, client):
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
mock_rqi.get_prices = Mock(return_value=[Decimal(2.123)])
response = client.get(PRICES_ENDPOINT, {'sell_asset': data['stellar_assets'][0].asset_identification_format, 'sell_amount': 100})
assert response.status_code == 200, response.content
body = response.json()
assert body == {'buy_assets': [{'asset': data['offchain_assets'][0].asset_identification_format, 'price': '2.12', 'decimals': data['offchain_assets'][0].significant_decimals}]}
mock_rqi.get_prices.assert_called_once()
kwargs = mock_rqi.get_prices.call_args[1]
del kwargs['token']
del kwargs['request']
assert kwargs == {'sell_asset': data['stellar_assets'][0], 'sell_amount': Decimal(100), 'buy_assets': data['offchain_assets'], 'buy_delivery_method': None, 'sell_delivery_method': None, 'country_code': None}
|
django-polaris
|
positive
|
def test_can_parse_two_ordinary_steps():
"""
It should correctly extract two ordinary steps into an array.
"""
<DeepExtract>
feature = '\n Feature: parse a step\n Scenario: parse a single step\n '
feature += I_DIE_HAPPY + I_LIKE_VEGETABLES
steps = Feature.from_string(feature).scenarios[0].steps
</DeepExtract>
assert_equal(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equal(steps[0].sentence, first_line_of(I_DIE_HAPPY))
assert_equal(steps[1].sentence, first_line_of(I_LIKE_VEGETABLES))
|
def test_can_parse_two_ordinary_steps():
"""
It should correctly extract two ordinary steps into an array.
"""
feature = '\n Feature: parse a step\n Scenario: parse a single step\n '
feature += I_DIE_HAPPY + I_LIKE_VEGETABLES
steps = Feature.from_string(feature).scenarios[0].steps
assert_equal(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equal(steps[0].sentence, first_line_of(I_DIE_HAPPY))
assert_equal(steps[1].sentence, first_line_of(I_LIKE_VEGETABLES))
|
aloe
|
positive
|
def nvmlDeviceGetInforomConfigurationChecksum(handle):
c_checksum = c_uint()
<DeepExtract>
global nvmlLib
if 'nvmlDeviceGetInforomConfigurationChecksum' in _nvmlGetFunctionPointer_cache:
fn = _nvmlGetFunctionPointer_cache['nvmlDeviceGetInforomConfigurationChecksum']
libLoadLock.acquire()
try:
if nvmlLib == None:
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache['nvmlDeviceGetInforomConfigurationChecksum'] = getattr(nvmlLib, 'nvmlDeviceGetInforomConfigurationChecksum')
fn = _nvmlGetFunctionPointer_cache['nvmlDeviceGetInforomConfigurationChecksum']
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
libLoadLock.release()
</DeepExtract>
ret = fn(handle, byref(c_checksum))
<DeepExtract>
if ret != NVML_SUCCESS:
raise NVMLError(ret)
return ret
</DeepExtract>
return c_checksum.value
|
def nvmlDeviceGetInforomConfigurationChecksum(handle):
c_checksum = c_uint()
global nvmlLib
if 'nvmlDeviceGetInforomConfigurationChecksum' in _nvmlGetFunctionPointer_cache:
fn = _nvmlGetFunctionPointer_cache['nvmlDeviceGetInforomConfigurationChecksum']
libLoadLock.acquire()
try:
if nvmlLib == None:
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache['nvmlDeviceGetInforomConfigurationChecksum'] = getattr(nvmlLib, 'nvmlDeviceGetInforomConfigurationChecksum')
fn = _nvmlGetFunctionPointer_cache['nvmlDeviceGetInforomConfigurationChecksum']
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
libLoadLock.release()
ret = fn(handle, byref(c_checksum))
if ret != NVML_SUCCESS:
raise NVMLError(ret)
return ret
return c_checksum.value
|
DeepFaceLab_Linux
|
positive
|
def decrypt_get_item(decrypt_method, crypto_config_method, read_method, **kwargs):
"""Transparently decrypt an item after getting it from the table.
:param callable decrypt_method: Method to use to decrypt item
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
"""
<DeepExtract>
for arg in ('AttributesToGet', 'ProjectionExpression'):
if arg in kwargs:
raise InvalidArgumentError('"{}" is not supported for this operation'.format(arg))
if kwargs.get('Select', None) in ('SPECIFIC_ATTRIBUTES', 'ALL_PROJECTED_ATTRIBUTES'):
raise InvalidArgumentError('Scan "Select" value of "{}" is not supported'.format(kwargs['Select']))
</DeepExtract>
(crypto_config, ddb_kwargs) = crypto_config_method(**kwargs)
response = read_method(**ddb_kwargs)
if 'Item' in response:
response['Item'] = decrypt_method(item=response['Item'], crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response['Item'])))
return response
|
def decrypt_get_item(decrypt_method, crypto_config_method, read_method, **kwargs):
"""Transparently decrypt an item after getting it from the table.
:param callable decrypt_method: Method to use to decrypt item
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable read_method: Method that reads from the table
:param **kwargs: Keyword arguments to pass to ``read_method``
:return: DynamoDB response
:rtype: dict
"""
for arg in ('AttributesToGet', 'ProjectionExpression'):
if arg in kwargs:
raise InvalidArgumentError('"{}" is not supported for this operation'.format(arg))
if kwargs.get('Select', None) in ('SPECIFIC_ATTRIBUTES', 'ALL_PROJECTED_ATTRIBUTES'):
raise InvalidArgumentError('Scan "Select" value of "{}" is not supported'.format(kwargs['Select']))
(crypto_config, ddb_kwargs) = crypto_config_method(**kwargs)
response = read_method(**ddb_kwargs)
if 'Item' in response:
response['Item'] = decrypt_method(item=response['Item'], crypto_config=crypto_config.with_item(_item_transformer(decrypt_method)(response['Item'])))
return response
|
aws-dynamodb-encryption-python
|
positive
|
def check_path(path):
if os.path.isfile(path):
<DeepExtract>
log.debug("checking file path '%s'", path)
if os.path.islink(path):
log.debug("ignoring symlink '%s'", path)
return False
elif os.path.basename(path).lower() in self.ignore_list:
log.debug("ignoring file '%s', basename '%s' is in ignore list", path, os.path.basename(path))
return False
is_dup = False
if self.compare_by_name:
if self.is_file_dup_by_name(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.compare_by_checksum:
if self.is_file_dup_by_hash(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
elif self.compare_by_size:
if self.is_file_dup_by_size(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.regex:
if self.is_file_dup_by_regex(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
if is_dup:
return True
return False
</DeepExtract>
elif os.path.isdir(path):
for (root, dirs, files) in os.walk(path):
if not self.include_dot_dirs:
dirs[:] = [d for d in dirs if d[0] != '.']
for filebasename in files:
filepath = os.path.join(root, filebasename)
try:
<DeepExtract>
log.debug("checking file path '%s'", filepath)
if os.path.islink(filepath):
log.debug("ignoring symlink '%s'", filepath)
return False
elif os.path.basename(filepath).lower() in self.ignore_list:
log.debug("ignoring file '%s', basename '%s' is in ignore list", filepath, os.path.basename(filepath))
return False
is_dup = False
if self.compare_by_name:
if self.is_file_dup_by_name(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.compare_by_checksum:
if self.is_file_dup_by_hash(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
elif self.compare_by_size:
if self.is_file_dup_by_size(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.regex:
if self.is_file_dup_by_regex(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
if is_dup:
return True
return False
</DeepExtract>
except OSError as exc:
log.error("error while checking file '{0}': {1}".format(filepath, exc))
self.failed = True
else:
die("'%s' is not a file or directory")
|
def check_path(path):
if os.path.isfile(path):
log.debug("checking file path '%s'", path)
if os.path.islink(path):
log.debug("ignoring symlink '%s'", path)
return False
elif os.path.basename(path).lower() in self.ignore_list:
log.debug("ignoring file '%s', basename '%s' is in ignore list", path, os.path.basename(path))
return False
is_dup = False
if self.compare_by_name:
if self.is_file_dup_by_name(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.compare_by_checksum:
if self.is_file_dup_by_hash(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
elif self.compare_by_size:
if self.is_file_dup_by_size(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.regex:
if self.is_file_dup_by_regex(path):
if not self.no_short_circuit:
return True
else:
is_dup = True
if is_dup:
return True
return False
elif os.path.isdir(path):
for (root, dirs, files) in os.walk(path):
if not self.include_dot_dirs:
dirs[:] = [d for d in dirs if d[0] != '.']
for filebasename in files:
filepath = os.path.join(root, filebasename)
try:
log.debug("checking file path '%s'", filepath)
if os.path.islink(filepath):
log.debug("ignoring symlink '%s'", filepath)
return False
elif os.path.basename(filepath).lower() in self.ignore_list:
log.debug("ignoring file '%s', basename '%s' is in ignore list", filepath, os.path.basename(filepath))
return False
is_dup = False
if self.compare_by_name:
if self.is_file_dup_by_name(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.compare_by_checksum:
if self.is_file_dup_by_hash(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
elif self.compare_by_size:
if self.is_file_dup_by_size(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
if self.regex:
if self.is_file_dup_by_regex(filepath):
if not self.no_short_circuit:
return True
else:
is_dup = True
if is_dup:
return True
return False
except OSError as exc:
log.error("error while checking file '{0}': {1}".format(filepath, exc))
self.failed = True
else:
die("'%s' is not a file or directory")
|
DevOps-Python-tools
|
positive
|
def sys_check(*args):
cmd = ' '.join(args)
if 0 != os.system(cmd):
<DeepExtract>
raise RuntimeError('Command failed! %s' % cmd)
</DeepExtract>
return 0
|
def sys_check(*args):
cmd = ' '.join(args)
if 0 != os.system(cmd):
raise RuntimeError('Command failed! %s' % cmd)
return 0
|
avobjects
|
positive
|
def _check_histogram(samples, name):
group = None
timestamp = None
def do_checks():
if bucket != float('+Inf'):
raise ValueError('+Inf bucket missing: ' + name)
if count is not None and value != count:
raise ValueError('Count does not match +Inf value: ' + name)
if has_sum and count is None:
raise ValueError('_count must be present if _sum is present: ' + name)
if has_gsum and count is None:
raise ValueError('_gcount must be present if _gsum is present: ' + name)
if not (has_sum or has_gsum) and count is not None:
raise ValueError('_sum/_gsum must be present if _count is present: ' + name)
if has_negative_buckets and has_sum:
raise ValueError('Cannot have _sum with negative buckets: ' + name)
if not has_negative_buckets and has_negative_gsum:
raise ValueError('Cannot have negative _gsum with non-negative buckets: ' + name)
for s in samples:
suffix = s.name[len(name):]
<DeepExtract>
if 'histogram' == 'info':
g = {}
if 'histogram' == 'summary' and s.name == name:
d = s.labels.copy()
del d['quantile']
g = d
if 'histogram' == 'stateset':
d = s.labels.copy()
del d[name]
g = d
if 'histogram' in ['histogram', 'gaugehistogram'] and s.name == name + '_bucket':
d = s.labels.copy()
del d['le']
g = d
g = s.labels
</DeepExtract>
if g != group or s.timestamp != timestamp:
if group is not None:
<DeepExtract>
if bucket != float('+Inf'):
raise ValueError('+Inf bucket missing: ' + name)
if count is not None and value != count:
raise ValueError('Count does not match +Inf value: ' + name)
if has_sum and count is None:
raise ValueError('_count must be present if _sum is present: ' + name)
if has_gsum and count is None:
raise ValueError('_gcount must be present if _gsum is present: ' + name)
if not (has_sum or has_gsum) and count is not None:
raise ValueError('_sum/_gsum must be present if _count is present: ' + name)
if has_negative_buckets and has_sum:
raise ValueError('Cannot have _sum with negative buckets: ' + name)
if not has_negative_buckets and has_negative_gsum:
raise ValueError('Cannot have negative _gsum with non-negative buckets: ' + name)
</DeepExtract>
count = None
bucket = None
has_negative_buckets = False
has_sum = False
has_gsum = False
has_negative_gsum = False
value = 0
group = g
timestamp = s.timestamp
if suffix == '_bucket':
b = float(s.labels['le'])
if b < 0:
has_negative_buckets = True
if bucket is not None and b <= bucket:
raise ValueError('Buckets out of order: ' + name)
if s.value < value:
raise ValueError('Bucket values out of order: ' + name)
bucket = b
value = s.value
elif suffix in ['_count', '_gcount']:
count = s.value
elif suffix in ['_sum']:
has_sum = True
elif suffix in ['_gsum']:
has_gsum = True
if s.value < 0:
has_negative_gsum = True
if group is not None:
<DeepExtract>
if bucket != float('+Inf'):
raise ValueError('+Inf bucket missing: ' + name)
if count is not None and value != count:
raise ValueError('Count does not match +Inf value: ' + name)
if has_sum and count is None:
raise ValueError('_count must be present if _sum is present: ' + name)
if has_gsum and count is None:
raise ValueError('_gcount must be present if _gsum is present: ' + name)
if not (has_sum or has_gsum) and count is not None:
raise ValueError('_sum/_gsum must be present if _count is present: ' + name)
if has_negative_buckets and has_sum:
raise ValueError('Cannot have _sum with negative buckets: ' + name)
if not has_negative_buckets and has_negative_gsum:
raise ValueError('Cannot have negative _gsum with non-negative buckets: ' + name)
</DeepExtract>
|
def _check_histogram(samples, name):
group = None
timestamp = None
def do_checks():
if bucket != float('+Inf'):
raise ValueError('+Inf bucket missing: ' + name)
if count is not None and value != count:
raise ValueError('Count does not match +Inf value: ' + name)
if has_sum and count is None:
raise ValueError('_count must be present if _sum is present: ' + name)
if has_gsum and count is None:
raise ValueError('_gcount must be present if _gsum is present: ' + name)
if not (has_sum or has_gsum) and count is not None:
raise ValueError('_sum/_gsum must be present if _count is present: ' + name)
if has_negative_buckets and has_sum:
raise ValueError('Cannot have _sum with negative buckets: ' + name)
if not has_negative_buckets and has_negative_gsum:
raise ValueError('Cannot have negative _gsum with non-negative buckets: ' + name)
for s in samples:
suffix = s.name[len(name):]
if 'histogram' == 'info':
g = {}
if 'histogram' == 'summary' and s.name == name:
d = s.labels.copy()
del d['quantile']
g = d
if 'histogram' == 'stateset':
d = s.labels.copy()
del d[name]
g = d
if 'histogram' in ['histogram', 'gaugehistogram'] and s.name == name + '_bucket':
d = s.labels.copy()
del d['le']
g = d
g = s.labels
if g != group or s.timestamp != timestamp:
if group is not None:
if bucket != float('+Inf'):
raise ValueError('+Inf bucket missing: ' + name)
if count is not None and value != count:
raise ValueError('Count does not match +Inf value: ' + name)
if has_sum and count is None:
raise ValueError('_count must be present if _sum is present: ' + name)
if has_gsum and count is None:
raise ValueError('_gcount must be present if _gsum is present: ' + name)
if not (has_sum or has_gsum) and count is not None:
raise ValueError('_sum/_gsum must be present if _count is present: ' + name)
if has_negative_buckets and has_sum:
raise ValueError('Cannot have _sum with negative buckets: ' + name)
if not has_negative_buckets and has_negative_gsum:
raise ValueError('Cannot have negative _gsum with non-negative buckets: ' + name)
count = None
bucket = None
has_negative_buckets = False
has_sum = False
has_gsum = False
has_negative_gsum = False
value = 0
group = g
timestamp = s.timestamp
if suffix == '_bucket':
b = float(s.labels['le'])
if b < 0:
has_negative_buckets = True
if bucket is not None and b <= bucket:
raise ValueError('Buckets out of order: ' + name)
if s.value < value:
raise ValueError('Bucket values out of order: ' + name)
bucket = b
value = s.value
elif suffix in ['_count', '_gcount']:
count = s.value
elif suffix in ['_sum']:
has_sum = True
elif suffix in ['_gsum']:
has_gsum = True
if s.value < 0:
has_negative_gsum = True
if group is not None:
if bucket != float('+Inf'):
raise ValueError('+Inf bucket missing: ' + name)
if count is not None and value != count:
raise ValueError('Count does not match +Inf value: ' + name)
if has_sum and count is None:
raise ValueError('_count must be present if _sum is present: ' + name)
if has_gsum and count is None:
raise ValueError('_gcount must be present if _gsum is present: ' + name)
if not (has_sum or has_gsum) and count is not None:
raise ValueError('_sum/_gsum must be present if _count is present: ' + name)
if has_negative_buckets and has_sum:
raise ValueError('Cannot have _sum with negative buckets: ' + name)
if not has_negative_buckets and has_negative_gsum:
raise ValueError('Cannot have negative _gsum with non-negative buckets: ' + name)
</DeepExtract>
|
client_python
|
positive
|
def get_image(roidb, scale=False):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
new_rec = roi_rec.copy()
if scale:
scale_range = config.TRAIN.SCALE_RANGE
im_scale = np.random.uniform(scale_range[0], scale_range[1])
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
elif not config.ORIGIN_SCALE:
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
<DeepExtract>
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if min_size > 0 and np.round(im_scale * im_size_min) < min_size:
im_scale = float(min_size) / float(im_size_min)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
if config.IMAGE_STRIDE == 0:
(im, im_scale) = (im, im_scale)
else:
im_height = int(np.ceil(im.shape[0] / float(config.IMAGE_STRIDE)) * config.IMAGE_STRIDE)
im_width = int(np.ceil(im.shape[1] / float(config.IMAGE_STRIDE)) * config.IMAGE_STRIDE)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
(im, im_scale) = (padded_im, im_scale)
</DeepExtract>
else:
im_scale = 1.0
<DeepExtract>
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = im[:, :, 2 - i] - config.PIXEL_MEANS[2 - i]
im_tensor = im_tensor
</DeepExtract>
if 'boxes_mask' in roi_rec:
im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy() * im_scale
boxes_mask = boxes_mask.astype(np.int)
for j in xrange(boxes_mask.shape[0]):
m = boxes_mask[j]
im_tensor[:, :, m[1]:m[3], m[0]:m[2]] = 0.0
processed_ims.append(im_tensor)
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
if config.TRAIN.IMAGE_ALIGN > 0:
if im_tensor.shape[2] % config.TRAIN.IMAGE_ALIGN != 0 or im_tensor.shape[3] % config.TRAIN.IMAGE_ALIGN != 0:
new_height = math.ceil(float(im_tensor.shape[2]) / config.TRAIN.IMAGE_ALIGN) * config.TRAIN.IMAGE_ALIGN
new_width = math.ceil(float(im_tensor.shape[3]) / config.TRAIN.IMAGE_ALIGN) * config.TRAIN.IMAGE_ALIGN
new_im_tensor = np.zeros((1, 3, int(new_height), int(new_width)))
new_im_tensor[:, :, 0:im_tensor.shape[2], 0:im_tensor.shape[3]] = im_tensor
print(im_tensor.shape, new_im_tensor.shape, file=sys.stderr)
im_tensor = new_im_tensor
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return (processed_ims, processed_roidb)
|
def get_image(roidb, scale=False):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
new_rec = roi_rec.copy()
if scale:
scale_range = config.TRAIN.SCALE_RANGE
im_scale = np.random.uniform(scale_range[0], scale_range[1])
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
elif not config.ORIGIN_SCALE:
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if min_size > 0 and np.round(im_scale * im_size_min) < min_size:
im_scale = float(min_size) / float(im_size_min)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
if config.IMAGE_STRIDE == 0:
(im, im_scale) = (im, im_scale)
else:
im_height = int(np.ceil(im.shape[0] / float(config.IMAGE_STRIDE)) * config.IMAGE_STRIDE)
im_width = int(np.ceil(im.shape[1] / float(config.IMAGE_STRIDE)) * config.IMAGE_STRIDE)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
(im, im_scale) = (padded_im, im_scale)
else:
im_scale = 1.0
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = im[:, :, 2 - i] - config.PIXEL_MEANS[2 - i]
im_tensor = im_tensor
if 'boxes_mask' in roi_rec:
im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy() * im_scale
boxes_mask = boxes_mask.astype(np.int)
for j in xrange(boxes_mask.shape[0]):
m = boxes_mask[j]
im_tensor[:, :, m[1]:m[3], m[0]:m[2]] = 0.0
processed_ims.append(im_tensor)
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
if config.TRAIN.IMAGE_ALIGN > 0:
if im_tensor.shape[2] % config.TRAIN.IMAGE_ALIGN != 0 or im_tensor.shape[3] % config.TRAIN.IMAGE_ALIGN != 0:
new_height = math.ceil(float(im_tensor.shape[2]) / config.TRAIN.IMAGE_ALIGN) * config.TRAIN.IMAGE_ALIGN
new_width = math.ceil(float(im_tensor.shape[3]) / config.TRAIN.IMAGE_ALIGN) * config.TRAIN.IMAGE_ALIGN
new_im_tensor = np.zeros((1, 3, int(new_height), int(new_width)))
new_im_tensor[:, :, 0:im_tensor.shape[2], 0:im_tensor.shape[3]] = im_tensor
print(im_tensor.shape, new_im_tensor.shape, file=sys.stderr)
im_tensor = new_im_tensor
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return (processed_ims, processed_roidb)
|
enhanced-ssh-mxnet
|
positive
|
def aaa_vehicle_group(self, country, name, _type: unittype.VehicleType, position: mapping.Point, heading=0, group_size=1, formation=unitgroup.VehicleGroup.Formation.Line, move_formation: PointAction=PointAction.OffRoad):
"""
Override the default vehicle group so that our group can contain a mix of units (which is required for advanced
SAM sites)
For further docstrings, see the built-in function
"""
vg = unitgroup.VehicleGroup(self.next_group_id(), self.string(name))
for i in range(1, group_size + 1):
heading = randint(0, 359)
if _type == AirDefence.SAM_SA_3_S_125_LN_5P73:
num_launchers = 4
v = self.vehicle(name + ' Unit #{nr}-sr'.format(nr=i), AirDefence.SAM_SR_P_19)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-tr'.format(nr=i), AirDefence.SAM_SA_3_S_125_TR_SNR)
center_x = position.x + randint(20, 40)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
<DeepExtract>
if 180 == 360:
outer_offset = 180 / num_launchers
else:
outer_offset = 180 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 180 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 100 * math.cos(math.radians(current_offset)), position.y + 100 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
</DeepExtract>
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_3_S_125_LN_5P73)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
elif _type == AirDefence.SAM_SA_6_Kub_LN_2P25:
num_launchers = 6
v = self.vehicle(name + ' Unit #{nr}-str'.format(nr=i), AirDefence.SAM_SA_6_Kub_STR_9S91)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
<DeepExtract>
if 360 == 360:
outer_offset = 360 / num_launchers
else:
outer_offset = 360 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 360 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 100 * math.cos(math.radians(current_offset)), position.y + 100 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
</DeepExtract>
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_6_Kub_LN_2P25)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
elif _type == AirDefence.SAM_SA_10_S_300PS_LN_5P85C:
num_launchers = 8
v = self.vehicle(name + ' Unit #{nr}-sr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_SR_5N66M)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-tr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_TR_30N6)
center_x = position.x + randint(20, 40)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-c'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_CP_54K6)
center_x = position.x + randint(40, 60)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
<DeepExtract>
if 360 == 360:
outer_offset = 360 / num_launchers
else:
outer_offset = 360 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 360 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 150 * math.cos(math.radians(current_offset)), position.y + 150 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
</DeepExtract>
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_10_S_300PS_LN_5P85C)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
elif _type == AirDefence.SAM_SA_10_S_300PS_CP_54K6:
num_launchers = 8
v = self.vehicle(name + ' Unit #{nr}-sr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_SR_64H6E)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-tr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_TR_30N6)
center_x = position.x + randint(20, 40)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-c'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_CP_54K6)
center_x = position.x + randint(40, 60)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
<DeepExtract>
if 360 == 360:
outer_offset = 360 / num_launchers
else:
outer_offset = 360 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 360 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 150 * math.cos(math.radians(current_offset)), position.y + 150 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
</DeepExtract>
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_10_S_300PS_LN_5P85D)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
else:
v = self.vehicle(name + ' Unit #{nr}-sam'.format(nr=i), _type)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
wp = vg.add_waypoint(vg.units[0].position, move_formation, 0)
wp.ETA_locked = True
if _type.eplrs:
wp.tasks.append(task.EPLRS(self.next_eplrs('vehicle')))
country.add_vehicle_group(vg)
return vg
|
def aaa_vehicle_group(self, country, name, _type: unittype.VehicleType, position: mapping.Point, heading=0, group_size=1, formation=unitgroup.VehicleGroup.Formation.Line, move_formation: PointAction=PointAction.OffRoad):
"""
Override the default vehicle group so that our group can contain a mix of units (which is required for advanced
SAM sites)
For further docstrings, see the built-in function
"""
vg = unitgroup.VehicleGroup(self.next_group_id(), self.string(name))
for i in range(1, group_size + 1):
heading = randint(0, 359)
if _type == AirDefence.SAM_SA_3_S_125_LN_5P73:
num_launchers = 4
v = self.vehicle(name + ' Unit #{nr}-sr'.format(nr=i), AirDefence.SAM_SR_P_19)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-tr'.format(nr=i), AirDefence.SAM_SA_3_S_125_TR_SNR)
center_x = position.x + randint(20, 40)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
if 180 == 360:
outer_offset = 180 / num_launchers
else:
outer_offset = 180 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 180 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 100 * math.cos(math.radians(current_offset)), position.y + 100 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_3_S_125_LN_5P73)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
elif _type == AirDefence.SAM_SA_6_Kub_LN_2P25:
num_launchers = 6
v = self.vehicle(name + ' Unit #{nr}-str'.format(nr=i), AirDefence.SAM_SA_6_Kub_STR_9S91)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
if 360 == 360:
outer_offset = 360 / num_launchers
else:
outer_offset = 360 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 360 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 100 * math.cos(math.radians(current_offset)), position.y + 100 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_6_Kub_LN_2P25)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
elif _type == AirDefence.SAM_SA_10_S_300PS_LN_5P85C:
num_launchers = 8
v = self.vehicle(name + ' Unit #{nr}-sr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_SR_5N66M)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-tr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_TR_30N6)
center_x = position.x + randint(20, 40)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-c'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_CP_54K6)
center_x = position.x + randint(40, 60)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
if 360 == 360:
outer_offset = 360 / num_launchers
else:
outer_offset = 360 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 360 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 150 * math.cos(math.radians(current_offset)), position.y + 150 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_10_S_300PS_LN_5P85C)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
elif _type == AirDefence.SAM_SA_10_S_300PS_CP_54K6:
num_launchers = 8
v = self.vehicle(name + ' Unit #{nr}-sr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_SR_64H6E)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-tr'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_TR_30N6)
center_x = position.x + randint(20, 40)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
v = self.vehicle(name + ' Unit #{nr}-c'.format(nr=i), AirDefence.SAM_SA_10_S_300PS_CP_54K6)
center_x = position.x + randint(40, 60)
center_y = position.y + (i - 1) * 20
v.position.x = center_x
v.position.y = center_y
v.heading = heading
vg.add_unit(v)
if 360 == 360:
outer_offset = 360 / num_launchers
else:
outer_offset = 360 / (num_launchers - 1)
positions = []
if num_launchers % 2 == 0:
current_offset = heading - 360 / (num_launchers - 1) / 2
else:
current_offset = heading
current_offset -= outer_offset * (math.ceil(num_launchers / 2) - 1)
for x in range(1, num_launchers + 1):
positions.append((position.x + 150 * math.cos(math.radians(current_offset)), position.y + 150 * math.sin(math.radians(current_offset)), current_offset))
current_offset += outer_offset
plop_positions = positions
for x in range(0, num_launchers):
v = self.vehicle(name + ' Unit #{nr}-{x}'.format(nr=i, x=x), AirDefence.SAM_SA_10_S_300PS_LN_5P85D)
v.position.x = plop_positions[x][0]
v.position.y = plop_positions[x][1]
v.heading = plop_positions[x][2]
vg.add_unit(v)
else:
v = self.vehicle(name + ' Unit #{nr}-sam'.format(nr=i), _type)
v.position.x = position.x
v.position.y = position.y + (i - 1) * 20
v.heading = heading
vg.add_unit(v)
wp = vg.add_waypoint(vg.units[0].position, move_formation, 0)
wp.ETA_locked = True
if _type.eplrs:
wp.tasks.append(task.EPLRS(self.next_eplrs('vehicle')))
country.add_vehicle_group(vg)
return vg
|
dcs_liberation
|
positive
|
def main():
import numpy as np
import itertools
from arsenal.iterextras import take
from itertools import count
class WeightedTuple:
def __init__(self, w, *key):
self.key = key
self.w = w
def __lt__(self, other):
return (self.w, self.key) < (other.w, other.key)
def __eq__(self, other):
return (self.w, self.key) == (other.w, other.key)
def __mul__(self, other):
return LWeightedTuple(self.w * other.w, self, other)
def __add__(self, other):
return LWeightedTuple(self.w + other.w, self, other)
def __iter__(self):
return iter((self.w, self.key))
def __repr__(self):
return repr((self.w, self.key))
class LWeightedTuple(WeightedTuple):
"""WeightedTuple with lazy concatenation of keys."""
def __init__(self, w, a, b):
self.w = w
self.a = a
self.b = b
@property
def key(self):
return self.a.key + self.b.key
def wprod(xs):
return np.product([WeightedTuple(x, x) for x in xs])
def wsum(xs):
return np.sum([WeightedTuple(x, x) for x in xs])
def check(iters):
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*iters))))
have = list(sorted_product(p, *iters))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
print('===========')
<DeepExtract>
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(0.1, 0.4, 0.5), (0.09, 0.11, 0.8), (0.111, 0.3, 0.6)]))))
have = list(sorted_product(p, *[(0.1, 0.4, 0.5), (0.09, 0.11, 0.8), (0.111, 0.3, 0.6)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
</DeepExtract>
print('===========')
<DeepExtract>
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(1, 2, 3), (4, 7, 11)]))))
have = list(sorted_product(p, *[(1, 2, 3), (4, 7, 11)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
</DeepExtract>
print('===========')
<DeepExtract>
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(0.01, 0.4, 0.5), (0.11, 0.8), (0.6,)]))))
have = list(sorted_product(p, *[(0.01, 0.4, 0.5), (0.11, 0.8), (0.6,)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
</DeepExtract>
print('===========')
<DeepExtract>
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(1, 2, 3, 100), (4, 7, 9), (14, 17, 19), (24, 27, 29)]))))
have = list(sorted_product(p, *[(1, 2, 3, 100), (4, 7, 9), (14, 17, 19), (24, 27, 29)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
</DeepExtract>
print('===========')
a = (3 ** i for i in count(1))
b = (4 ** i for i in count(1))
c = (5 ** i for i in count(1))
for (s, x) in take(20, sorted_product(wsum, a, b, c)):
print(s, x)
|
def main():
import numpy as np
import itertools
from arsenal.iterextras import take
from itertools import count
class WeightedTuple:
def __init__(self, w, *key):
self.key = key
self.w = w
def __lt__(self, other):
return (self.w, self.key) < (other.w, other.key)
def __eq__(self, other):
return (self.w, self.key) == (other.w, other.key)
def __mul__(self, other):
return LWeightedTuple(self.w * other.w, self, other)
def __add__(self, other):
return LWeightedTuple(self.w + other.w, self, other)
def __iter__(self):
return iter((self.w, self.key))
def __repr__(self):
return repr((self.w, self.key))
class LWeightedTuple(WeightedTuple):
"""WeightedTuple with lazy concatenation of keys."""
def __init__(self, w, a, b):
self.w = w
self.a = a
self.b = b
@property
def key(self):
return self.a.key + self.b.key
def wprod(xs):
return np.product([WeightedTuple(x, x) for x in xs])
def wsum(xs):
return np.sum([WeightedTuple(x, x) for x in xs])
def check(iters):
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*iters))))
have = list(sorted_product(p, *iters))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
print('===========')
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(0.1, 0.4, 0.5), (0.09, 0.11, 0.8), (0.111, 0.3, 0.6)]))))
have = list(sorted_product(p, *[(0.1, 0.4, 0.5), (0.09, 0.11, 0.8), (0.111, 0.3, 0.6)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
print('===========')
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(1, 2, 3), (4, 7, 11)]))))
have = list(sorted_product(p, *[(1, 2, 3), (4, 7, 11)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
print('===========')
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(0.01, 0.4, 0.5), (0.11, 0.8), (0.6,)]))))
have = list(sorted_product(p, *[(0.01, 0.4, 0.5), (0.11, 0.8), (0.6,)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
print('===========')
for p in [np.product, np.sum, tuple, wprod]:
want = list(sorted((p(x) for x in itertools.product(*[(1, 2, 3, 100), (4, 7, 9), (14, 17, 19), (24, 27, 29)]))))
have = list(sorted_product(p, *[(1, 2, 3, 100), (4, 7, 9), (14, 17, 19), (24, 27, 29)]))
print()
print('product operator:', p.__name__)
print('HAVE:', have)
print('WANT:', want)
assert have == want
print('pass.')
print('===========')
a = (3 ** i for i in count(1))
b = (4 ** i for i in count(1))
c = (5 ** i for i in count(1))
for (s, x) in take(20, sorted_product(wsum, a, b, c)):
print(s, x)
|
arsenal
|
positive
|
def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
if err_name is None:
stderr = []
else:
<DeepExtract>
output = []
with open(err_name, 'r') as fd:
for line in fd:
new_line = re.sub('\\x1b\\[.+m', '', line)
output.append(new_line)
os.remove(err_name)
stderr = output
</DeepExtract>
<DeepExtract>
output = []
with open(out_name, 'r') as fd:
for line in fd:
new_line = re.sub('\\x1b\\[.+m', '', line)
output.append(new_line)
os.remove(out_name)
stdout = output
</DeepExtract>
<DeepExtract>
errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
if str(exc).strip():
msg = str(exc).strip()
elif errors:
msg = errors[-1].encode('utf-8')
else:
msg = 'unknown cause'
reason = {'warnings': [to_native(w) for w in warnings], 'errors': [to_native(e) for e in errors], 'msg': msg, 'module_stderr': ''.join(stderr), 'module_stdout': ''.join(stdout)}
</DeepExtract>
reason['msg'] = msg_format % reason['msg']
return reason
|
def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
if err_name is None:
stderr = []
else:
output = []
with open(err_name, 'r') as fd:
for line in fd:
new_line = re.sub('\\x1b\\[.+m', '', line)
output.append(new_line)
os.remove(err_name)
stderr = output
output = []
with open(out_name, 'r') as fd:
for line in fd:
new_line = re.sub('\\x1b\\[.+m', '', line)
output.append(new_line)
os.remove(out_name)
stdout = output
errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
if str(exc).strip():
msg = str(exc).strip()
elif errors:
msg = errors[-1].encode('utf-8')
else:
msg = 'unknown cause'
reason = {'warnings': [to_native(w) for w in warnings], 'errors': [to_native(e) for e in errors], 'msg': msg, 'module_stderr': ''.join(stderr), 'module_stdout': ''.join(stdout)}
reason['msg'] = msg_format % reason['msg']
return reason
|
community.docker
|
positive
|
@patch('chaosaws.s3.probes.aws_client', autospec=True)
def test_bucket_versioning_suspended_false(test_client: aws_client):
client = MagicMock()
test_client.return_value = client
<DeepExtract>
config = os.path.join(data_path, 'list_buckets_1.json')
with open(config, 'r') as fh:
client.list_buckets.return_value = json.loads(fh.read())
</DeepExtract>
<DeepExtract>
config = os.path.join(data_path, 'get_bucket_versioning_1.json')
with open(config, 'r') as fh:
client.get_bucket_versioning.return_value = json.loads(fh.read())
</DeepExtract>
response = versioning_status(bucket_name='Test-Bucket-1', status='Enabled')
assert not response
|
@patch('chaosaws.s3.probes.aws_client', autospec=True)
def test_bucket_versioning_suspended_false(test_client: aws_client):
client = MagicMock()
test_client.return_value = client
config = os.path.join(data_path, 'list_buckets_1.json')
with open(config, 'r') as fh:
client.list_buckets.return_value = json.loads(fh.read())
config = os.path.join(data_path, 'get_bucket_versioning_1.json')
with open(config, 'r') as fh:
client.get_bucket_versioning.return_value = json.loads(fh.read())
response = versioning_status(bucket_name='Test-Bucket-1', status='Enabled')
assert not response
|
chaostoolkit-aws
|
positive
|
def setMainUsuario(self, ct_Usuario):
ct_Usuario.setObjectName('ct_Usuario')
ct_Usuario.resize(1000, 500)
self.fr_Usurio = QtWidgets.QFrame(ct_Usuario)
self.fr_Usurio.setGeometry(QtCore.QRect(0, 0, 1000, 5000))
self.fr_Usurio.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.fr_Usurio.setFrameShadow(QtWidgets.QFrame.Raised)
self.fr_Usurio.setObjectName('fr_Usurio')
self.tb_Usuario = QtWidgets.QTableWidget(self.fr_Usurio)
self.tb_Usuario.setGeometry(QtCore.QRect(0, 40, 1000, 455))
self.tb_Usuario.setProperty('cursor', QtCore.Qt.PointingHandCursor)
self.tb_Usuario.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tb_Usuario.setStyleSheet('QTableView{\ncolor: #797979;\nfont-weight: bold;\nfont-size: 13px;\nbackground: #FFF;\npadding: 0 0 0 5px;\n}\nQHeaderView:section{\nbackground: #FFF;\npadding: 5px 0 ;\nfont-size: 13px;\nfont-family: "Arial";\nfont-weight: bold;\ncolor: #797979;\nborder: none;\nborder-bottom: 2px solid #CCC;\n}\nQTableView::item {\nborder-bottom: 2px solid #CCC;\npadding: 2px;\n}\n\n')
self.tb_Usuario.setFrameShape(QtWidgets.QFrame.NoFrame)
self.tb_Usuario.setFrameShadow(QtWidgets.QFrame.Plain)
self.tb_Usuario.setAutoScrollMargin(20)
self.tb_Usuario.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tb_Usuario.setTabKeyNavigation(False)
self.tb_Usuario.setProperty('showDropIndicator', False)
self.tb_Usuario.setDragDropOverwriteMode(False)
self.tb_Usuario.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tb_Usuario.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tb_Usuario.setTextElideMode(QtCore.Qt.ElideMiddle)
self.tb_Usuario.setShowGrid(False)
self.tb_Usuario.setCornerButtonEnabled(False)
self.tb_Usuario.setRowCount(0)
self.tb_Usuario.setObjectName('tb_Usuario')
self.tb_Usuario.setColumnCount(6)
self.tb_Usuario.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tb_Usuario.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Usuario.setHorizontalHeaderItem(5, item)
self.tb_Usuario.horizontalHeader().setDefaultSectionSize(120)
self.tb_Usuario.horizontalHeader().setStretchLastSection(True)
self.tb_Usuario.verticalHeader().setVisible(False)
self.tb_Usuario.verticalHeader().setCascadingSectionResizes(True)
self.tb_Usuario.verticalHeader().setDefaultSectionSize(50)
self.fr_TopoUsuarios = QtWidgets.QFrame(self.fr_Usurio)
self.fr_TopoUsuarios.setGeometry(QtCore.QRect(0, 0, 1000, 40))
self.fr_TopoUsuarios.setStyleSheet('background:#E1DFE0;\nborder: none;')
self.fr_TopoUsuarios.setObjectName('fr_TopoUsuarios')
self.bt_BuscaUsurio = QtWidgets.QPushButton(self.fr_TopoUsuarios)
self.bt_BuscaUsurio.setGeometry(QtCore.QRect(830, 5, 30, 30))
font = QtGui.QFont()
font.setFamily('Arial')
self.bt_BuscaUsurio.setFont(font)
self.bt_BuscaUsurio.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_BuscaUsurio.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_BuscaUsurio.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_BuscaUsurio.setText('')
self.bt_BuscaUsurio.setObjectName('bt_BuscaUsurio')
self.bt_AddNovoUsuario = QtWidgets.QPushButton(self.fr_TopoUsuarios)
self.bt_AddNovoUsuario.setGeometry(QtCore.QRect(900, 0, 100, 40))
self.bt_AddNovoUsuario.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_AddNovoUsuario.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_AddNovoUsuario.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_AddNovoUsuario.setStyleSheet('QPushButton {\nbackground-color: #7AB32E;\n }\nQPushButton:hover{\nbackground-color: #40a286\n}')
self.bt_AddNovoUsuario.setText('')
self.bt_AddNovoUsuario.setIconSize(QtCore.QSize(75, 35))
self.bt_AddNovoUsuario.setObjectName('bt_AddNovoUsuario')
self.tx_BuscarUsuario = QtWidgets.QLineEdit(self.fr_TopoUsuarios)
self.tx_BuscarUsuario.setGeometry(QtCore.QRect(0, 5, 830, 30))
font = QtGui.QFont()
font.setFamily('Arial')
self.tx_BuscarUsuario.setFont(font)
self.tx_BuscarUsuario.setFocusPolicy(QtCore.Qt.ClickFocus)
self.tx_BuscarUsuario.setStyleSheet('QLineEdit {\ncolor: #000\n}\n')
self.tx_BuscarUsuario.setObjectName('tx_BuscarUsuario')
self.bt_PrintRelatUsuario = QtWidgets.QPushButton(self.fr_TopoUsuarios)
self.bt_PrintRelatUsuario.setGeometry(QtCore.QRect(870, 5, 30, 30))
font = QtGui.QFont()
font.setFamily('Arial')
self.bt_PrintRelatUsuario.setFont(font)
self.bt_PrintRelatUsuario.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_PrintRelatUsuario.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_PrintRelatUsuario.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_PrintRelatUsuario.setText('')
self.bt_PrintRelatUsuario.setObjectName('bt_PrintRelatUsuario')
<DeepExtract>
ct_Usuario.setWindowTitle(QtWidgets.QApplication.translate('ct_Usuario', 'Frame', None, -1))
self.tb_Usuario.horizontalHeaderItem(0).setText(QtWidgets.QApplication.translate('ct_Usuario', 'ID', None, -1))
self.tb_Usuario.horizontalHeaderItem(1).setText(QtWidgets.QApplication.translate('ct_Usuario', 'NOME', None, -1))
self.tb_Usuario.horizontalHeaderItem(2).setText(QtWidgets.QApplication.translate('ct_Usuario', 'TELEFONE', None, -1))
self.tb_Usuario.horizontalHeaderItem(3).setText(QtWidgets.QApplication.translate('ct_Usuario', 'E-MAIL', None, -1))
self.tb_Usuario.horizontalHeaderItem(4).setText(QtWidgets.QApplication.translate('ct_Usuario', 'NIVEL / STATUS', None, -1))
self.tb_Usuario.horizontalHeaderItem(5).setText(QtWidgets.QApplication.translate('ct_Usuario', 'EDITAR', None, -1))
self.bt_BuscaUsurio.setToolTip(QtWidgets.QApplication.translate('ct_Usuario', 'BUSCAR', None, -1))
self.tx_BuscarUsuario.setPlaceholderText(QtWidgets.QApplication.translate('ct_Usuario', 'PROCURAR POR...', None, -1))
self.bt_PrintRelatUsuario.setToolTip(QtWidgets.QApplication.translate('ct_Usuario', 'IMPRIMIR', None, -1))
</DeepExtract>
QtCore.QMetaObject.connectSlotsByName(ct_Usuario)
|
def setMainUsuario(self, ct_Usuario):
ct_Usuario.setObjectName('ct_Usuario')
ct_Usuario.resize(1000, 500)
self.fr_Usurio = QtWidgets.QFrame(ct_Usuario)
self.fr_Usurio.setGeometry(QtCore.QRect(0, 0, 1000, 5000))
self.fr_Usurio.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.fr_Usurio.setFrameShadow(QtWidgets.QFrame.Raised)
self.fr_Usurio.setObjectName('fr_Usurio')
self.tb_Usuario = QtWidgets.QTableWidget(self.fr_Usurio)
self.tb_Usuario.setGeometry(QtCore.QRect(0, 40, 1000, 455))
self.tb_Usuario.setProperty('cursor', QtCore.Qt.PointingHandCursor)
self.tb_Usuario.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tb_Usuario.setStyleSheet('QTableView{\ncolor: #797979;\nfont-weight: bold;\nfont-size: 13px;\nbackground: #FFF;\npadding: 0 0 0 5px;\n}\nQHeaderView:section{\nbackground: #FFF;\npadding: 5px 0 ;\nfont-size: 13px;\nfont-family: "Arial";\nfont-weight: bold;\ncolor: #797979;\nborder: none;\nborder-bottom: 2px solid #CCC;\n}\nQTableView::item {\nborder-bottom: 2px solid #CCC;\npadding: 2px;\n}\n\n')
self.tb_Usuario.setFrameShape(QtWidgets.QFrame.NoFrame)
self.tb_Usuario.setFrameShadow(QtWidgets.QFrame.Plain)
self.tb_Usuario.setAutoScrollMargin(20)
self.tb_Usuario.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tb_Usuario.setTabKeyNavigation(False)
self.tb_Usuario.setProperty('showDropIndicator', False)
self.tb_Usuario.setDragDropOverwriteMode(False)
self.tb_Usuario.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tb_Usuario.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tb_Usuario.setTextElideMode(QtCore.Qt.ElideMiddle)
self.tb_Usuario.setShowGrid(False)
self.tb_Usuario.setCornerButtonEnabled(False)
self.tb_Usuario.setRowCount(0)
self.tb_Usuario.setObjectName('tb_Usuario')
self.tb_Usuario.setColumnCount(6)
self.tb_Usuario.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tb_Usuario.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
self.tb_Usuario.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tb_Usuario.setHorizontalHeaderItem(5, item)
self.tb_Usuario.horizontalHeader().setDefaultSectionSize(120)
self.tb_Usuario.horizontalHeader().setStretchLastSection(True)
self.tb_Usuario.verticalHeader().setVisible(False)
self.tb_Usuario.verticalHeader().setCascadingSectionResizes(True)
self.tb_Usuario.verticalHeader().setDefaultSectionSize(50)
self.fr_TopoUsuarios = QtWidgets.QFrame(self.fr_Usurio)
self.fr_TopoUsuarios.setGeometry(QtCore.QRect(0, 0, 1000, 40))
self.fr_TopoUsuarios.setStyleSheet('background:#E1DFE0;\nborder: none;')
self.fr_TopoUsuarios.setObjectName('fr_TopoUsuarios')
self.bt_BuscaUsurio = QtWidgets.QPushButton(self.fr_TopoUsuarios)
self.bt_BuscaUsurio.setGeometry(QtCore.QRect(830, 5, 30, 30))
font = QtGui.QFont()
font.setFamily('Arial')
self.bt_BuscaUsurio.setFont(font)
self.bt_BuscaUsurio.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_BuscaUsurio.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_BuscaUsurio.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_BuscaUsurio.setText('')
self.bt_BuscaUsurio.setObjectName('bt_BuscaUsurio')
self.bt_AddNovoUsuario = QtWidgets.QPushButton(self.fr_TopoUsuarios)
self.bt_AddNovoUsuario.setGeometry(QtCore.QRect(900, 0, 100, 40))
self.bt_AddNovoUsuario.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_AddNovoUsuario.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_AddNovoUsuario.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_AddNovoUsuario.setStyleSheet('QPushButton {\nbackground-color: #7AB32E;\n }\nQPushButton:hover{\nbackground-color: #40a286\n}')
self.bt_AddNovoUsuario.setText('')
self.bt_AddNovoUsuario.setIconSize(QtCore.QSize(75, 35))
self.bt_AddNovoUsuario.setObjectName('bt_AddNovoUsuario')
self.tx_BuscarUsuario = QtWidgets.QLineEdit(self.fr_TopoUsuarios)
self.tx_BuscarUsuario.setGeometry(QtCore.QRect(0, 5, 830, 30))
font = QtGui.QFont()
font.setFamily('Arial')
self.tx_BuscarUsuario.setFont(font)
self.tx_BuscarUsuario.setFocusPolicy(QtCore.Qt.ClickFocus)
self.tx_BuscarUsuario.setStyleSheet('QLineEdit {\ncolor: #000\n}\n')
self.tx_BuscarUsuario.setObjectName('tx_BuscarUsuario')
self.bt_PrintRelatUsuario = QtWidgets.QPushButton(self.fr_TopoUsuarios)
self.bt_PrintRelatUsuario.setGeometry(QtCore.QRect(870, 5, 30, 30))
font = QtGui.QFont()
font.setFamily('Arial')
self.bt_PrintRelatUsuario.setFont(font)
self.bt_PrintRelatUsuario.setCursor(QtCore.Qt.PointingHandCursor)
self.bt_PrintRelatUsuario.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_PrintRelatUsuario.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.bt_PrintRelatUsuario.setText('')
self.bt_PrintRelatUsuario.setObjectName('bt_PrintRelatUsuario')
ct_Usuario.setWindowTitle(QtWidgets.QApplication.translate('ct_Usuario', 'Frame', None, -1))
self.tb_Usuario.horizontalHeaderItem(0).setText(QtWidgets.QApplication.translate('ct_Usuario', 'ID', None, -1))
self.tb_Usuario.horizontalHeaderItem(1).setText(QtWidgets.QApplication.translate('ct_Usuario', 'NOME', None, -1))
self.tb_Usuario.horizontalHeaderItem(2).setText(QtWidgets.QApplication.translate('ct_Usuario', 'TELEFONE', None, -1))
self.tb_Usuario.horizontalHeaderItem(3).setText(QtWidgets.QApplication.translate('ct_Usuario', 'E-MAIL', None, -1))
self.tb_Usuario.horizontalHeaderItem(4).setText(QtWidgets.QApplication.translate('ct_Usuario', 'NIVEL / STATUS', None, -1))
self.tb_Usuario.horizontalHeaderItem(5).setText(QtWidgets.QApplication.translate('ct_Usuario', 'EDITAR', None, -1))
self.bt_BuscaUsurio.setToolTip(QtWidgets.QApplication.translate('ct_Usuario', 'BUSCAR', None, -1))
self.tx_BuscarUsuario.setPlaceholderText(QtWidgets.QApplication.translate('ct_Usuario', 'PROCURAR POR...', None, -1))
self.bt_PrintRelatUsuario.setToolTip(QtWidgets.QApplication.translate('ct_Usuario', 'IMPRIMIR', None, -1))
QtCore.QMetaObject.connectSlotsByName(ct_Usuario)
|
controleEstoque
|
positive
|
def test_schema_generation_param_type_aws_specific(template_fragment):
<DeepExtract>
create_fragments_folder_with_template('aws-specific-parameter.json', template_fragment)
schema = template_fragment.generate_schema()
schema = schema
</DeepExtract>
assert len(schema) == 4
assert len(schema['properties']) == 2
assert schema['properties']['Parameters']['properties']['VpcId']['properties']['Type']['type'] == 'string'
<DeepExtract>
__make_resource_validator().validate(schema)
</DeepExtract>
|
def test_schema_generation_param_type_aws_specific(template_fragment):
create_fragments_folder_with_template('aws-specific-parameter.json', template_fragment)
schema = template_fragment.generate_schema()
schema = schema
assert len(schema) == 4
assert len(schema['properties']) == 2
assert schema['properties']['Parameters']['properties']['VpcId']['properties']['Type']['type'] == 'string'
__make_resource_validator().validate(schema)
</DeepExtract>
|
cloudformation-cli
|
positive
|
@cache
def search(query, results=10, suggestion=False):
"""
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
"""
search_params = {'list': 'search', 'srprop': '', 'srlimit': results, 'limit': results, 'srsearch': query}
if suggestion:
search_params['srinfo'] = 'suggestion'
<DeepExtract>
global RATE_LIMIT_LAST_CALL
global USER_AGENT
search_params['format'] = 'json'
if not 'action' in search_params:
search_params['action'] = 'query'
headers = {'User-Agent': USER_AGENT}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now()):
wait_time = RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(API_URL, params=search_params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
raw_results = r.json()
</DeepExtract>
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError(query)
else:
raise WikipediaException(raw_results['error']['info'])
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return (list(search_results), raw_results['query']['searchinfo']['suggestion'])
else:
return (list(search_results), None)
return list(search_results)
|
@cache
def search(query, results=10, suggestion=False):
"""
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
"""
search_params = {'list': 'search', 'srprop': '', 'srlimit': results, 'limit': results, 'srsearch': query}
if suggestion:
search_params['srinfo'] = 'suggestion'
global RATE_LIMIT_LAST_CALL
global USER_AGENT
search_params['format'] = 'json'
if not 'action' in search_params:
search_params['action'] = 'query'
headers = {'User-Agent': USER_AGENT}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now()):
wait_time = RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(API_URL, params=search_params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
raw_results = r.json()
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError(query)
else:
raise WikipediaException(raw_results['error']['info'])
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return (list(search_results), raw_results['query']['searchinfo']['suggestion'])
else:
return (list(search_results), None)
return list(search_results)
|
BenchmarkingZeroShot
|
positive
|
def __init__(self, conv_channels, embed_dim, normalization_constant=0.5, bmm=None):
super().__init__()
self.normalization_constant = normalization_constant
<DeepExtract>
m = nn.Linear(conv_channels, embed_dim)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / conv_channels))
nn.init.constant_(m.bias, 0)
self.in_projection = nn.utils.weight_norm(m)
</DeepExtract>
<DeepExtract>
m = nn.Linear(embed_dim, conv_channels)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / embed_dim))
nn.init.constant_(m.bias, 0)
self.out_projection = nn.utils.weight_norm(m)
</DeepExtract>
self.bmm = bmm if bmm is not None else torch.bmm
|
def __init__(self, conv_channels, embed_dim, normalization_constant=0.5, bmm=None):
super().__init__()
self.normalization_constant = normalization_constant
m = nn.Linear(conv_channels, embed_dim)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / conv_channels))
nn.init.constant_(m.bias, 0)
self.in_projection = nn.utils.weight_norm(m)
m = nn.Linear(embed_dim, conv_channels)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / embed_dim))
nn.init.constant_(m.bias, 0)
self.out_projection = nn.utils.weight_norm(m)
self.bmm = bmm if bmm is not None else torch.bmm
|
crosentgec
|
positive
|
def match_oligo(seq, oligo, mismatches=0):
"""Function for searching for a certain oligo"""
<DeepExtract>
oligo = oligo.upper()
re_out = []
for char in oligo:
if char == 'G':
re_out.append('[GRSKVDBXN]')
elif char == 'A':
re_out.append('[AMRWVHDXN]')
elif char == 'T':
re_out.append('[TWYKHDBXN]')
elif char == 'C':
re_out.append('[CMSVHBXN]')
elif char == 'M':
re_out.append('[MACVHXN]')
elif char == 'R':
re_out.append('[RAGVDXN]')
elif char == 'W':
re_out.append('[WATHDXN]')
elif char == 'S':
re_out.append('[SCGVBXN]')
elif char == 'Y':
re_out.append('[YCTHBXN]')
elif char == 'K':
re_out.append('[KGTDBXN]')
elif char == 'V':
re_out.append('[VACGXNMRS]')
elif char == 'H':
re_out.append('[HACTXNMWY]')
elif char == 'D':
re_out.append('[DAGTXNRWK]')
elif char == 'B':
re_out.append('[BCGTXNSYK]')
elif char == 'X':
re_out.append('[XNMRWSYKVHDBGATC]')
elif char == 'N':
re_out.append('[NXMRWSYKVHDBGATC]')
re_out = string.join(re_out, '')
re_out = '(?=(%s))' % re_out
re_out_comp = re.compile(re_out, re.IGNORECASE)
re_oligo = re_out_comp
</DeepExtract>
L_out = []
for match in re_oligo.finditer(seq):
L_out.append([match.start() + 1, match.end() + len(match.group(1))])
return L_out
|
def match_oligo(seq, oligo, mismatches=0):
"""Function for searching for a certain oligo"""
oligo = oligo.upper()
re_out = []
for char in oligo:
if char == 'G':
re_out.append('[GRSKVDBXN]')
elif char == 'A':
re_out.append('[AMRWVHDXN]')
elif char == 'T':
re_out.append('[TWYKHDBXN]')
elif char == 'C':
re_out.append('[CMSVHBXN]')
elif char == 'M':
re_out.append('[MACVHXN]')
elif char == 'R':
re_out.append('[RAGVDXN]')
elif char == 'W':
re_out.append('[WATHDXN]')
elif char == 'S':
re_out.append('[SCGVBXN]')
elif char == 'Y':
re_out.append('[YCTHBXN]')
elif char == 'K':
re_out.append('[KGTDBXN]')
elif char == 'V':
re_out.append('[VACGXNMRS]')
elif char == 'H':
re_out.append('[HACTXNMWY]')
elif char == 'D':
re_out.append('[DAGTXNRWK]')
elif char == 'B':
re_out.append('[BCGTXNSYK]')
elif char == 'X':
re_out.append('[XNMRWSYKVHDBGATC]')
elif char == 'N':
re_out.append('[NXMRWSYKVHDBGATC]')
re_out = string.join(re_out, '')
re_out = '(?=(%s))' % re_out
re_out_comp = re.compile(re_out, re.IGNORECASE)
re_oligo = re_out_comp
L_out = []
for match in re_oligo.finditer(seq):
L_out.append([match.start() + 1, match.end() + len(match.group(1))])
return L_out
|
DNApy
|
positive
|
def test_serialize_with_naive_datetime(self):
<DeepExtract>
from colander import Invalid
exc = Invalid(node, msg, val)
typ = exc
</DeepExtract>
node = DummySchemaNode(None)
<DeepExtract>
import datetime
dt = datetime.datetime(2010, 4, 26, 10, 48)
</DeepExtract>
result = typ.serialize(node, dt)
expected = dt.replace(tzinfo=typ.default_tzinfo).isoformat()
self.assertEqual(result, expected)
|
def test_serialize_with_naive_datetime(self):
from colander import Invalid
exc = Invalid(node, msg, val)
typ = exc
node = DummySchemaNode(None)
import datetime
dt = datetime.datetime(2010, 4, 26, 10, 48)
result = typ.serialize(node, dt)
expected = dt.replace(tzinfo=typ.default_tzinfo).isoformat()
self.assertEqual(result, expected)
|
colander
|
positive
|
def single_uniform_segment_matching(self, dense_targets, sampled_pts, edge_idx):
ext_idx = edge_idx[::3]
aug_ext_idx = torch.cat([ext_idx, torch.tensor([self.num_sampling - 1], device=ext_idx.device)], dim=0)
ch_pts = sampled_pts[ext_idx]
diff = (ch_pts[:, None, :] - dense_targets[None, :, :]).pow(2).sum(2)
min_idx = torch.argmin(diff, dim=1)
aug_min_idx = torch.cat([min_idx, torch.tensor([self.num_sampling * 3 - 1], device=min_idx.device)], dim=0)
before_i = 0
after_i = 1
segments = []
for i in range(4):
original_len = aug_min_idx[after_i] - aug_min_idx[before_i]
assert original_len >= 0
if original_len == 0:
after_i += 1
continue
desired_num_seg = aug_ext_idx[after_i] - aug_ext_idx[before_i]
assert desired_num_seg >= 0
if desired_num_seg == 0:
before_i += 1
after_i += 1
continue
<DeepExtract>
n = dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]].shape[0]
if n == desired_num_seg:
re_sampled_pts = dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]]
segment_len = np.sqrt(np.sum((dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][1:] - dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][:-1]) ** 2, axis=1))
start_node = np.cumsum(np.concatenate([np.array([0]), segment_len]))
total_len = np.sum(segment_len)
new_per_len = total_len / desired_num_seg
mark_1d = ((np.arange(desired_num_seg - 1) + 1) * new_per_len).reshape(-1, 1)
locate = start_node.reshape(1, -1) - mark_1d
(iss, jss) = np.where(locate > 0)
cut_idx = np.cumsum(np.unique(iss, return_counts=True)[1])
cut_idx = np.concatenate([np.array([0]), cut_idx[:-1]])
after_idx = jss[cut_idx]
before_idx = after_idx - 1
after_idx[after_idx < 0] = 0
before = locate[np.arange(desired_num_seg - 1), before_idx]
after = locate[np.arange(desired_num_seg - 1), after_idx]
w = (-before / (after - before)).reshape(-1, 1)
sampled_pts = (1 - w) * dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][before_idx] + w * dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][after_idx]
re_sampled_pts = np.concatenate([dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][:1], sampled_pts, dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][-1:]], axis=0)
</DeepExtract>
segments.append(re_sampled_pts)
segments = np.concatenate(segments, axis=0)
assert len(segments) == self.num_sampling
return segments
|
def single_uniform_segment_matching(self, dense_targets, sampled_pts, edge_idx):
ext_idx = edge_idx[::3]
aug_ext_idx = torch.cat([ext_idx, torch.tensor([self.num_sampling - 1], device=ext_idx.device)], dim=0)
ch_pts = sampled_pts[ext_idx]
diff = (ch_pts[:, None, :] - dense_targets[None, :, :]).pow(2).sum(2)
min_idx = torch.argmin(diff, dim=1)
aug_min_idx = torch.cat([min_idx, torch.tensor([self.num_sampling * 3 - 1], device=min_idx.device)], dim=0)
before_i = 0
after_i = 1
segments = []
for i in range(4):
original_len = aug_min_idx[after_i] - aug_min_idx[before_i]
assert original_len >= 0
if original_len == 0:
after_i += 1
continue
desired_num_seg = aug_ext_idx[after_i] - aug_ext_idx[before_i]
assert desired_num_seg >= 0
if desired_num_seg == 0:
before_i += 1
after_i += 1
continue
n = dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]].shape[0]
if n == desired_num_seg:
re_sampled_pts = dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]]
segment_len = np.sqrt(np.sum((dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][1:] - dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][:-1]) ** 2, axis=1))
start_node = np.cumsum(np.concatenate([np.array([0]), segment_len]))
total_len = np.sum(segment_len)
new_per_len = total_len / desired_num_seg
mark_1d = ((np.arange(desired_num_seg - 1) + 1) * new_per_len).reshape(-1, 1)
locate = start_node.reshape(1, -1) - mark_1d
(iss, jss) = np.where(locate > 0)
cut_idx = np.cumsum(np.unique(iss, return_counts=True)[1])
cut_idx = np.concatenate([np.array([0]), cut_idx[:-1]])
after_idx = jss[cut_idx]
before_idx = after_idx - 1
after_idx[after_idx < 0] = 0
before = locate[np.arange(desired_num_seg - 1), before_idx]
after = locate[np.arange(desired_num_seg - 1), after_idx]
w = (-before / (after - before)).reshape(-1, 1)
sampled_pts = (1 - w) * dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][before_idx] + w * dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][after_idx]
re_sampled_pts = np.concatenate([dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][:1], sampled_pts, dense_targets[aug_min_idx[before_i]:aug_min_idx[after_i]][-1:]], axis=0)
segments.append(re_sampled_pts)
segments = np.concatenate(segments, axis=0)
assert len(segments) == self.num_sampling
return segments
|
dance
|
positive
|
def test_dummy(self):
<DeepExtract>
header = realpath(join('config_02.h'.resource_dir, filename))
</DeepExtract>
with self.subTest('Confim failure'):
with self.assertRaises(ValueError):
checker = UBootHeaderChecker('2011.13', self.resource_dir)
_ = checker.load(header)
with self.subTest('Confim dummy_headers works'):
checker = UBootHeaderChecker('2011.13', self.resource_dir, dummy_headers=['test/induce_error.h'])
_ = checker.load(header)
with self.subTest('dummy_headers as string'):
checker = UBootHeaderChecker('2011.13', self.resource_dir, dummy_headers='test/induce_error.h')
_ = checker.load(header)
|
def test_dummy(self):
header = realpath(join('config_02.h'.resource_dir, filename))
with self.subTest('Confim failure'):
with self.assertRaises(ValueError):
checker = UBootHeaderChecker('2011.13', self.resource_dir)
_ = checker.load(header)
with self.subTest('Confim dummy_headers works'):
checker = UBootHeaderChecker('2011.13', self.resource_dir, dummy_headers=['test/induce_error.h'])
_ = checker.load(header)
with self.subTest('dummy_headers as string'):
checker = UBootHeaderChecker('2011.13', self.resource_dir, dummy_headers='test/induce_error.h')
_ = checker.load(header)
|
depthcharge
|
positive
|
@admin.display(description='Blind')
def desc(self, obj):
desc = str(obj)
<DeepExtract>
url = reverse('admin:example_app_blind_change', args=(obj.id,))
url = url
</DeepExtract>
return f'<a href="{url}">{desc}</a>'
|
@admin.display(description='Blind')
def desc(self, obj):
desc = str(obj)
url = reverse('admin:example_app_blind_change', args=(obj.id,))
url = url
return f'<a href="{url}">{desc}</a>'
|
django-silk
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.