before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
self.output_stride = output_stride
current_stride = 1
if inverted_residual_setting is None:
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
<DeepExtract>
if min_value is None:
min_value = round_nearest
new_v = max(min_value, int(input_channel * width_mult + round_nearest / 2) // round_nearest * round_nearest)
if new_v < 0.9 * input_channel * width_mult:
new_v += round_nearest
input_channel = new_v
</DeepExtract>
<DeepExtract>
if min_value is None:
min_value = round_nearest
new_v = max(min_value, int(last_channel * max(1.0, width_mult) + round_nearest / 2) // round_nearest * round_nearest)
if new_v < 0.9 * last_channel * max(1.0, width_mult):
new_v += round_nearest
self.last_channel = new_v
</DeepExtract>
features = [ConvBNReLU(3, input_channel, stride=2)]
current_stride *= 2
dilation = 1
previous_dilation = 1
for (t, c, n, s) in inverted_residual_setting:
<DeepExtract>
if min_value is None:
min_value = round_nearest
new_v = max(min_value, int(c * width_mult + round_nearest / 2) // round_nearest * round_nearest)
if new_v < 0.9 * c * width_mult:
new_v += round_nearest
output_channel = new_v
</DeepExtract>
previous_dilation = dilation
if current_stride == output_stride:
stride = 1
dilation *= s
else:
stride = s
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t))
else:
features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
|
def __init__(self, num_classes=1000, output_stride=8, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
self.output_stride = output_stride
current_stride = 1
if inverted_residual_setting is None:
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
if min_value is None:
min_value = round_nearest
new_v = max(min_value, int(input_channel * width_mult + round_nearest / 2) // round_nearest * round_nearest)
if new_v < 0.9 * input_channel * width_mult:
new_v += round_nearest
input_channel = new_v
if min_value is None:
min_value = round_nearest
new_v = max(min_value, int(last_channel * max(1.0, width_mult) + round_nearest / 2) // round_nearest * round_nearest)
if new_v < 0.9 * last_channel * max(1.0, width_mult):
new_v += round_nearest
self.last_channel = new_v
features = [ConvBNReLU(3, input_channel, stride=2)]
current_stride *= 2
dilation = 1
previous_dilation = 1
for (t, c, n, s) in inverted_residual_setting:
if min_value is None:
min_value = round_nearest
new_v = max(min_value, int(c * width_mult + round_nearest / 2) // round_nearest * round_nearest)
if new_v < 0.9 * c * width_mult:
new_v += round_nearest
output_channel = new_v
previous_dilation = dilation
if current_stride == output_stride:
stride = 1
dilation *= s
else:
stride = s
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
features.append(block(input_channel, output_channel, stride, previous_dilation, expand_ratio=t))
else:
features.append(block(input_channel, output_channel, 1, dilation, expand_ratio=t))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
|
CV_LTH_Pre-training
|
positive
|
def bulk_patch(self, url, list_data, check=status.HTTP_200_OK):
response = self._client.patch(url, json=jsonable_encoder(list_data))
<DeepExtract>
if check is None:
return
if isinstance(check, (list, tuple)):
assert response.status_code in check, response.text
else:
assert response.status_code == check, response.text
</DeepExtract>
return response.json()
|
def bulk_patch(self, url, list_data, check=status.HTTP_200_OK):
response = self._client.patch(url, json=jsonable_encoder(list_data))
if check is None:
return
if isinstance(check, (list, tuple)):
assert response.status_code in check, response.text
else:
assert response.status_code == check, response.text
return response.json()
|
balsam
|
positive
|
def _process_all_allele_view(limit):
"""
Add the allele as a variant locus (or reference locus if wild-type).
If the marker is specified, we add the link to the marker.
We assume that the MGI ids are available in the idhash,
added in all_summary_view.
We add the sequence alteration as a BNode here, if there is a marker.
Otherwise, the allele itself is a sequence alteration.
Triples:
<MGI:allele_id> a GENO:variant_locus
OR GENO:reference_locus
OR GENO:sequence_alteration IF no marker_id specified.
[GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>
GENO:derived_from <MGI:strain_id>
GENO:has_variant_part <_seq_alt_id>
<_seq_alt_id> a GENO:sequence_alteration
derives_from <strain_id>
:param limit:
:return:
"""
src_key = 'all_allele_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
LOG.info('adding alleles, mapping to markers, extracting their sequence alterations from all_allele_view')
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
with open(raw, 'r') as reader:
row = reader.readline().rstrip('\n').split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip('\n')
line_num += 1
row = line.split('\t')
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
allele_key = row[col.index('_allele_key')].strip()
marker_key = row[col.index('_marker_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
symbol = row[col.index('symbol')].strip()
name = row[col.index('name')].strip()
iswildtype = row[col.index('iswildtype')].strip()
if self.test_mode is True and int(allele_key) not in self.test_keys.get('allele'):
continue
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
LOG.error("what to do! can't find allele_id. skipping %s %s", allele_key, symbol)
continue
marker_id = None
if marker_key is not None and marker_key != '':
marker_id = self.idhash['marker'].get(marker_key)
if marker_id is None:
LOG.error("what to do! can't find marker_id. skipping %s %s", marker_key, symbol)
continue
<DeepExtract>
iid = self.make_id('mgi' + 'seqalt' + 'key' + allele_key, '_')
iseqalt_id = iid
</DeepExtract>
if iswildtype == '0':
locus_type = self.globaltt['variant_locus']
locus_rel = self.globaltt['is_allele_of']
elif iswildtype == '1':
locus_type = self.globaltt['reference_locus']
locus_rel = self.globaltt['is_reference_allele_of']
self.wildtype_alleles.add(allele_id)
else:
locus_rel = None
locus_type = None
model.addIndividualToGraph(allele_id, symbol, locus_type)
model.makeLeader(allele_id)
self.label_hash[allele_id] = symbol
self.idhash['seqalt'][allele_key] = iseqalt_id
allele_label = self.label_hash.get(allele_id)
marker_label = self.label_hash.get(marker_id)
if allele_label is not None and allele_label == marker_label:
self.idhash['seqalt'][allele_key] = allele_id
model.addComment(allele_id, self._make_internal_identifier('allele', allele_key))
if marker_id is not None:
geno.addAlleleOfGene(allele_id, marker_id, locus_rel)
if iswildtype == '0':
sa_label = symbol
sa_id = iseqalt_id
if marker_key is not None and allele_label != marker_label and (marker_key != ''):
if re.match('.*<.*>.*', symbol):
sa_label = re.sub('.*<', '<', symbol)
elif re.match('\\+', symbol):
sa_label = '<+>'
geno.addSequenceAlterationToVariantLocus(iseqalt_id, allele_id)
else:
sa_id = allele_id
sa_label = re.sub('[\\<\\>]', '', sa_label)
geno.addSequenceAlteration(sa_id, sa_label, None, name)
self.label_hash[sa_id] = sa_label
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is not None and strain_id not in ['MGI:4867032', 'MGI:5649511']:
geno.addSequenceDerivesFrom(allele_id, strain_id)
if not self.test_mode and limit is not None and (line_num > limit):
break
|
def _process_all_allele_view(limit):
"""
Add the allele as a variant locus (or reference locus if wild-type).
If the marker is specified, we add the link to the marker.
We assume that the MGI ids are available in the idhash,
added in all_summary_view.
We add the sequence alteration as a BNode here, if there is a marker.
Otherwise, the allele itself is a sequence alteration.
Triples:
<MGI:allele_id> a GENO:variant_locus
OR GENO:reference_locus
OR GENO:sequence_alteration IF no marker_id specified.
[GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>
GENO:derived_from <MGI:strain_id>
GENO:has_variant_part <_seq_alt_id>
<_seq_alt_id> a GENO:sequence_alteration
derives_from <strain_id>
:param limit:
:return:
"""
src_key = 'all_allele_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
LOG.info('adding alleles, mapping to markers, extracting their sequence alterations from all_allele_view')
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
with open(raw, 'r') as reader:
row = reader.readline().rstrip('\n').split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip('\n')
line_num += 1
row = line.split('\t')
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
allele_key = row[col.index('_allele_key')].strip()
marker_key = row[col.index('_marker_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
symbol = row[col.index('symbol')].strip()
name = row[col.index('name')].strip()
iswildtype = row[col.index('iswildtype')].strip()
if self.test_mode is True and int(allele_key) not in self.test_keys.get('allele'):
continue
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
LOG.error("what to do! can't find allele_id. skipping %s %s", allele_key, symbol)
continue
marker_id = None
if marker_key is not None and marker_key != '':
marker_id = self.idhash['marker'].get(marker_key)
if marker_id is None:
LOG.error("what to do! can't find marker_id. skipping %s %s", marker_key, symbol)
continue
iid = self.make_id('mgi' + 'seqalt' + 'key' + allele_key, '_')
iseqalt_id = iid
if iswildtype == '0':
locus_type = self.globaltt['variant_locus']
locus_rel = self.globaltt['is_allele_of']
elif iswildtype == '1':
locus_type = self.globaltt['reference_locus']
locus_rel = self.globaltt['is_reference_allele_of']
self.wildtype_alleles.add(allele_id)
else:
locus_rel = None
locus_type = None
model.addIndividualToGraph(allele_id, symbol, locus_type)
model.makeLeader(allele_id)
self.label_hash[allele_id] = symbol
self.idhash['seqalt'][allele_key] = iseqalt_id
allele_label = self.label_hash.get(allele_id)
marker_label = self.label_hash.get(marker_id)
if allele_label is not None and allele_label == marker_label:
self.idhash['seqalt'][allele_key] = allele_id
model.addComment(allele_id, self._make_internal_identifier('allele', allele_key))
if marker_id is not None:
geno.addAlleleOfGene(allele_id, marker_id, locus_rel)
if iswildtype == '0':
sa_label = symbol
sa_id = iseqalt_id
if marker_key is not None and allele_label != marker_label and (marker_key != ''):
if re.match('.*<.*>.*', symbol):
sa_label = re.sub('.*<', '<', symbol)
elif re.match('\\+', symbol):
sa_label = '<+>'
geno.addSequenceAlterationToVariantLocus(iseqalt_id, allele_id)
else:
sa_id = allele_id
sa_label = re.sub('[\\<\\>]', '', sa_label)
geno.addSequenceAlteration(sa_id, sa_label, None, name)
self.label_hash[sa_id] = sa_label
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is not None and strain_id not in ['MGI:4867032', 'MGI:5649511']:
geno.addSequenceDerivesFrom(allele_id, strain_id)
if not self.test_mode and limit is not None and (line_num > limit):
break
|
dipper
|
positive
|
def compute_time_features(index, hour_of_week=True, day_of_week=True, hour_of_day=True):
"""Compute hour of week, day of week, or hour of day features.
Parameters
----------
index : :any:`pandas.DatetimeIndex`
Datetime index with hourly frequency.
hour_of_week : :any:`bool`
Include the `hour_of_week` feature.
day_of_week : :any:`bool`
Include the `day_of_week` feature.
hour_of_day : :any:`bool`
Include the `hour_of_day` feature.
Returns
-------
time_features : :any:`pandas.DataFrame`
A dataframe with the input datetime index and up to three columns
- hour_of_week : Label for hour of week, 0-167, 0 is 12-1am Monday
- day_of_week : Label for day of week, 0-6, 0 is Monday.
- hour_of_day : Label for hour of day, 0-23, 0 is 12-1am.
"""
if index.freq != 'H':
raise ValueError("index must have hourly frequency (freq='H'). Found: {}".format(index.freq))
dow_feature = pd.Series(index.dayofweek, index=index, name='day_of_week')
hod_feature = pd.Series(index.hour, index=index, name='hour_of_day')
how_feature = (dow_feature * 24 + hod_feature).rename('hour_of_week')
features = []
warnings = []
if day_of_week:
features.append(dow_feature.astype('category'))
if hour_of_day:
features.append(hod_feature.astype('category'))
if hour_of_week:
how_feature = how_feature.astype('category')
features.append(how_feature)
<DeepExtract>
unique = set(how_feature.unique())
total = set(range(168))
missing = sorted(total - unique)
if len(missing) == 0:
warning = None
else:
warning = EEMeterWarning(qualified_name='eemeter.hour_of_week.missing', description='Missing some of the (zero-indexed) 168 hours of the week.', data={'missing_hours_of_week': missing})
</DeepExtract>
if warning is not None:
warnings.append(warning)
if len(features) == 0:
raise ValueError('No features selected.')
<DeepExtract>
def _to_frame_if_needed(df_or_series):
if isinstance(df_or_series, pd.Series):
time_features = df_or_series.to_frame()
time_features = df_or_series
df = pd.concat([_to_frame_if_needed(feature) for feature in features], axis=1)
if not keep_partial_nan_rows:
df = overwrite_partial_rows_with_nan(df)
time_features = df
</DeepExtract>
return time_features
|
def compute_time_features(index, hour_of_week=True, day_of_week=True, hour_of_day=True):
"""Compute hour of week, day of week, or hour of day features.
Parameters
----------
index : :any:`pandas.DatetimeIndex`
Datetime index with hourly frequency.
hour_of_week : :any:`bool`
Include the `hour_of_week` feature.
day_of_week : :any:`bool`
Include the `day_of_week` feature.
hour_of_day : :any:`bool`
Include the `hour_of_day` feature.
Returns
-------
time_features : :any:`pandas.DataFrame`
A dataframe with the input datetime index and up to three columns
- hour_of_week : Label for hour of week, 0-167, 0 is 12-1am Monday
- day_of_week : Label for day of week, 0-6, 0 is Monday.
- hour_of_day : Label for hour of day, 0-23, 0 is 12-1am.
"""
if index.freq != 'H':
raise ValueError("index must have hourly frequency (freq='H'). Found: {}".format(index.freq))
dow_feature = pd.Series(index.dayofweek, index=index, name='day_of_week')
hod_feature = pd.Series(index.hour, index=index, name='hour_of_day')
how_feature = (dow_feature * 24 + hod_feature).rename('hour_of_week')
features = []
warnings = []
if day_of_week:
features.append(dow_feature.astype('category'))
if hour_of_day:
features.append(hod_feature.astype('category'))
if hour_of_week:
how_feature = how_feature.astype('category')
features.append(how_feature)
unique = set(how_feature.unique())
total = set(range(168))
missing = sorted(total - unique)
if len(missing) == 0:
warning = None
else:
warning = EEMeterWarning(qualified_name='eemeter.hour_of_week.missing', description='Missing some of the (zero-indexed) 168 hours of the week.', data={'missing_hours_of_week': missing})
if warning is not None:
warnings.append(warning)
if len(features) == 0:
raise ValueError('No features selected.')
def _to_frame_if_needed(df_or_series):
if isinstance(df_or_series, pd.Series):
time_features = df_or_series.to_frame()
time_features = df_or_series
df = pd.concat([_to_frame_if_needed(feature) for feature in features], axis=1)
if not keep_partial_nan_rows:
df = overwrite_partial_rows_with_nan(df)
time_features = df
return time_features
|
eemeter
|
positive
|
def test_trie() -> None:
<DeepExtract>
with open(f'{ETHEREUM_TESTS_PATH}/TrieTests/' + 'trietest.json') as f:
tests = json.load(f)
tests = tests
</DeepExtract>
for (name, test) in tests.items():
st: Trie[Bytes, Bytes] = Trie(secured=False, default=b'')
for t in test.get('in'):
trie_set(st, to_bytes(t[0]), to_bytes(t[1]))
result = root(st)
expected = remove_hex_prefix(test.get('root'))
assert result.hex() == expected, f'test {name} failed'
|
def test_trie() -> None:
with open(f'{ETHEREUM_TESTS_PATH}/TrieTests/' + 'trietest.json') as f:
tests = json.load(f)
tests = tests
for (name, test) in tests.items():
st: Trie[Bytes, Bytes] = Trie(secured=False, default=b'')
for t in test.get('in'):
trie_set(st, to_bytes(t[0]), to_bytes(t[1]))
result = root(st)
expected = remove_hex_prefix(test.get('root'))
assert result.hex() == expected, f'test {name} failed'
|
eth1.0-specs
|
positive
|
def preprocess_test(out_dir, temp_dir=None):
if temp_dir is None:
temp_dir = os.path.join(out_dir, 'temp')
test_image_url = 'http://opencas.webarchiv.kit.edu/data/endovis15_ins/Segmentation_Rigid_Testing_Revision.zip'
test_image_zip = os.path.join(temp_dir, os.path.basename(test_image_url))
test_image_dir = os.path.join(temp_dir, 'test', 'image')
<DeepExtract>
os.makedirs(os.path.dirname(test_image_zip), exist_ok=True)
if not os.path.exists(test_image_zip):
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, ncols=80) as t:
urllib.request.urlretrieve(test_image_url, test_image_zip, reporthook=my_hook(t))
</DeepExtract>
<DeepExtract>
os.makedirs(os.path.dirname(test_image_dir), exist_ok=True)
with zipfile.ZipFile(test_image_zip) as existing_zip:
existing_zip.extractall(test_image_dir)
</DeepExtract>
test_label_url = 'http://opencas.webarchiv.kit.edu/data/endovis15_ins/Segmentation_Rigid_Testing_GT.zip'
test_label_zip = os.path.join(temp_dir, os.path.basename(test_label_url))
test_label_dir = os.path.join(temp_dir, 'test', 'label')
<DeepExtract>
os.makedirs(os.path.dirname(test_label_zip), exist_ok=True)
if not os.path.exists(test_label_zip):
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, ncols=80) as t:
urllib.request.urlretrieve(test_label_url, test_label_zip, reporthook=my_hook(t))
</DeepExtract>
<DeepExtract>
os.makedirs(os.path.dirname(test_label_dir), exist_ok=True)
with zipfile.ZipFile(test_label_zip) as existing_zip:
existing_zip.extractall(test_label_dir)
</DeepExtract>
test_image_files = glob.glob(os.path.join(test_image_dir, '**', '*_raw.png'), recursive=True)
print('# test images:', len(test_image_files))
<DeepExtract>
commonpath = os.path.commonpath(test_image_files)
for f in tqdm.tqdm(test_image_files):
out = os.path.join(os.path.join(out_dir, 'test'), os.path.relpath(f, commonpath))
os.makedirs(os.path.dirname(out), exist_ok=True)
copyfile(f, out)
</DeepExtract>
test_label_files = glob.glob(os.path.join(test_label_dir, '**', '*_class.png'), recursive=True)
print('# test labels:', len(test_label_files))
<DeepExtract>
commonpath = os.path.commonpath(test_label_files)
for f in tqdm.tqdm(test_label_files):
out = os.path.join(os.path.join(out_dir, 'test'), os.path.relpath(f, commonpath))
os.makedirs(os.path.dirname(out), exist_ok=True)
src = cv2.imread(f)
src = src[:, :, 0]
dst = np.zeros(src.shape, src.dtype)
if binary:
dst[src != 0] = 1
else:
dst[src == 70] = 1
dst[src == 160] = 2
cv2.imwrite(out, dst)
</DeepExtract>
|
def preprocess_test(out_dir, temp_dir=None):
if temp_dir is None:
temp_dir = os.path.join(out_dir, 'temp')
test_image_url = 'http://opencas.webarchiv.kit.edu/data/endovis15_ins/Segmentation_Rigid_Testing_Revision.zip'
test_image_zip = os.path.join(temp_dir, os.path.basename(test_image_url))
test_image_dir = os.path.join(temp_dir, 'test', 'image')
os.makedirs(os.path.dirname(test_image_zip), exist_ok=True)
if not os.path.exists(test_image_zip):
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, ncols=80) as t:
urllib.request.urlretrieve(test_image_url, test_image_zip, reporthook=my_hook(t))
os.makedirs(os.path.dirname(test_image_dir), exist_ok=True)
with zipfile.ZipFile(test_image_zip) as existing_zip:
existing_zip.extractall(test_image_dir)
test_label_url = 'http://opencas.webarchiv.kit.edu/data/endovis15_ins/Segmentation_Rigid_Testing_GT.zip'
test_label_zip = os.path.join(temp_dir, os.path.basename(test_label_url))
test_label_dir = os.path.join(temp_dir, 'test', 'label')
os.makedirs(os.path.dirname(test_label_zip), exist_ok=True)
if not os.path.exists(test_label_zip):
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, ncols=80) as t:
urllib.request.urlretrieve(test_label_url, test_label_zip, reporthook=my_hook(t))
os.makedirs(os.path.dirname(test_label_dir), exist_ok=True)
with zipfile.ZipFile(test_label_zip) as existing_zip:
existing_zip.extractall(test_label_dir)
test_image_files = glob.glob(os.path.join(test_image_dir, '**', '*_raw.png'), recursive=True)
print('# test images:', len(test_image_files))
commonpath = os.path.commonpath(test_image_files)
for f in tqdm.tqdm(test_image_files):
out = os.path.join(os.path.join(out_dir, 'test'), os.path.relpath(f, commonpath))
os.makedirs(os.path.dirname(out), exist_ok=True)
copyfile(f, out)
test_label_files = glob.glob(os.path.join(test_label_dir, '**', '*_class.png'), recursive=True)
print('# test labels:', len(test_label_files))
commonpath = os.path.commonpath(test_label_files)
for f in tqdm.tqdm(test_label_files):
out = os.path.join(os.path.join(out_dir, 'test'), os.path.relpath(f, commonpath))
os.makedirs(os.path.dirname(out), exist_ok=True)
src = cv2.imread(f)
src = src[:, :, 0]
dst = np.zeros(src.shape, src.dtype)
if binary:
dst[src != 0] = 1
else:
dst[src == 70] = 1
dst[src == 160] = 2
cv2.imwrite(out, dst)
</DeepExtract>
|
bayesian_unet
|
positive
|
def twoDimMatrix(self):
localctx = BraketPragmasParser.TwoDimMatrixContext(self, self._ctx, self.state)
<DeepExtract>
if hasattr(localctx, 'enterBraketPragma'):
localctx.enterBraketPragma(self)
</DeepExtract>
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
self.match(BraketPragmasParser.LBRACKET)
self.state = 193
<DeepExtract>
localctx = BraketPragmasParser.RowContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_row)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(BraketPragmasParser.LBRACKET)
self.state = 204
self.complexNumber()
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == BraketPragmasParser.COMMA:
self.state = 205
self.match(BraketPragmasParser.COMMA)
self.state = 206
self.complexNumber()
self.state = 211
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 212
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
</DeepExtract>
self.state = 198
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == BraketPragmasParser.COMMA:
self.state = 194
self.match(BraketPragmasParser.COMMA)
self.state = 195
<DeepExtract>
localctx = BraketPragmasParser.RowContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_row)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(BraketPragmasParser.LBRACKET)
self.state = 204
self.complexNumber()
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == BraketPragmasParser.COMMA:
self.state = 205
self.match(BraketPragmasParser.COMMA)
self.state = 206
self.complexNumber()
self.state = 211
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 212
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
</DeepExtract>
self.state = 200
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 201
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
<DeepExtract>
if hasattr(listener, 'exitBraketPragma'):
listener.exitBraketPragma(self)
</DeepExtract>
return localctx
|
def twoDimMatrix(self):
localctx = BraketPragmasParser.TwoDimMatrixContext(self, self._ctx, self.state)
if hasattr(localctx, 'enterBraketPragma'):
localctx.enterBraketPragma(self)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 192
self.match(BraketPragmasParser.LBRACKET)
self.state = 193
localctx = BraketPragmasParser.RowContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_row)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(BraketPragmasParser.LBRACKET)
self.state = 204
self.complexNumber()
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == BraketPragmasParser.COMMA:
self.state = 205
self.match(BraketPragmasParser.COMMA)
self.state = 206
self.complexNumber()
self.state = 211
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 212
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
self.state = 198
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == BraketPragmasParser.COMMA:
self.state = 194
self.match(BraketPragmasParser.COMMA)
self.state = 195
localctx = BraketPragmasParser.RowContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_row)
self._la = 0
try:
self.enterOuterAlt(localctx, 1)
self.state = 203
self.match(BraketPragmasParser.LBRACKET)
self.state = 204
self.complexNumber()
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == BraketPragmasParser.COMMA:
self.state = 205
self.match(BraketPragmasParser.COMMA)
self.state = 206
self.complexNumber()
self.state = 211
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 212
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
self.state = 200
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 201
self.match(BraketPragmasParser.RBRACKET)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
if hasattr(listener, 'exitBraketPragma'):
listener.exitBraketPragma(self)
return localctx
|
amazon-braket-default-simulator-python
|
positive
|
def __cut_all(self, sentence):
<DeepExtract>
self.check_initialized()
DAG = {}
N = len(sentence)
for k in xrange(N):
tmplist = []
i = k
frag = sentence[k]
while i < N and frag in self.FREQ:
if self.FREQ[frag]:
tmplist.append(i)
i += 1
frag = sentence[k:i + 1]
if not tmplist:
tmplist.append(k)
DAG[k] = tmplist
dag = DAG
</DeepExtract>
old_j = -1
for (k, L) in iteritems(dag):
if len(L) == 1 and k > old_j:
yield sentence[k:L[0] + 1]
old_j = L[0]
else:
for j in L:
if j > k:
yield sentence[k:j + 1]
old_j = j
|
def __cut_all(self, sentence):
self.check_initialized()
DAG = {}
N = len(sentence)
for k in xrange(N):
tmplist = []
i = k
frag = sentence[k]
while i < N and frag in self.FREQ:
if self.FREQ[frag]:
tmplist.append(i)
i += 1
frag = sentence[k:i + 1]
if not tmplist:
tmplist.append(k)
DAG[k] = tmplist
dag = DAG
old_j = -1
for (k, L) in iteritems(dag):
if len(L) == 1 and k > old_j:
yield sentence[k:L[0] + 1]
old_j = L[0]
else:
for j in L:
if j > k:
yield sentence[k:j + 1]
old_j = j
|
Chinese-clinical-NER
|
positive
|
def test_optimisation_problem2():
ind = 1
nuisance = 1
parameter_names = ['x1', 'x2']
target_name = 'y'
dim = 2
n1 = 20
bounds = [(-10, 10), (-10, 10)]
def f(x):
y = np.array([x[0], x[1]])
return y
def objective(x):
<DeepExtract>
rv = ss.multivariate_normal(mean, hess)
y1 = -rv.pdf(x)
</DeepExtract>
return np.sqrt((y1[0] - 1) ** 2 + (y1[1] - 4) ** 2)
mean = np.array([0.0, 0.0])
hess = np.array([[1.0, 0.7], [0.7, 1.0]])
prior = ss.multivariate_normal(mean, hess)
opt_prob = OptimisationProblem(ind, nuisance, parameter_names, target_name, objective, dim, prior, n1, bounds)
x0 = np.array([-10, -10])
solved = opt_prob.solve_gradients(x0=x0)
assert solved
assert np.allclose(opt_prob.result.x_min, np.array([1, 4]), atol=0.1)
opt_prob.build_region(eps_region=0.2)
opt_prob.visualize_region()
|
def test_optimisation_problem2():
ind = 1
nuisance = 1
parameter_names = ['x1', 'x2']
target_name = 'y'
dim = 2
n1 = 20
bounds = [(-10, 10), (-10, 10)]
def f(x):
y = np.array([x[0], x[1]])
return y
def objective(x):
rv = ss.multivariate_normal(mean, hess)
y1 = -rv.pdf(x)
return np.sqrt((y1[0] - 1) ** 2 + (y1[1] - 4) ** 2)
mean = np.array([0.0, 0.0])
hess = np.array([[1.0, 0.7], [0.7, 1.0]])
prior = ss.multivariate_normal(mean, hess)
opt_prob = OptimisationProblem(ind, nuisance, parameter_names, target_name, objective, dim, prior, n1, bounds)
x0 = np.array([-10, -10])
solved = opt_prob.solve_gradients(x0=x0)
assert solved
assert np.allclose(opt_prob.result.x_min, np.array([1, 4]), atol=0.1)
opt_prob.build_region(eps_region=0.2)
opt_prob.visualize_region()
|
elfi
|
positive
|
def join_network(self, seeds=[]):
<DeepExtract>
thread = Thread(target=self._listen)
thread.daemon = True
thread.start()
</DeepExtract>
for seed in seeds:
<DeepExtract>
uri = {'uri': seed}['uri']
self.log('init peer %s' % {'uri': seed})
if not uri in self._peers:
self._peers[uri] = PeerConnection(uri, self)
</DeepExtract>
|
def join_network(self, seeds=[]):
thread = Thread(target=self._listen)
thread.daemon = True
thread.start()
for seed in seeds:
uri = {'uri': seed}['uri']
self.log('init peer %s' % {'uri': seed})
if not uri in self._peers:
self._peers[uri] = PeerConnection(uri, self)
</DeepExtract>
|
DarkWallet
|
positive
|
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r, gt_encode_label, gpu_id=0):
if self.is_training:
gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
gt_encode_label = tf.reshape(gt_encode_label, [-1, self.coding_len])
gt_encode_label = tf.cast(gt_encode_label, tf.float32)
<DeepExtract>
if self.base_network_name.startswith('resnet_v1'):
feature_pyramid = resnet.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
feature_pyramid = resnet_gluoncv.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name.startswith('MobilenetV2'):
feature_pyramid = mobilenet_v2.mobilenetv2_base(input_img_batch, is_training=self.is_training)
else:
raise ValueError('Sry, we only support resnet, mobilenet_v2')
</DeepExtract>
<DeepExtract>
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope('rpn_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'rpn_classification_' + level, 'rpn_regression_' + level]
(rpn_box_scores, rpn_box_probs) = self.rpn_cls_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_delta_boxes = self.rpn_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
(rpn_box_pred_list, rpn_cls_score_list, rpn_cls_prob_list) = (rpn_delta_boxes_list, rpn_scores_list, rpn_probs_list)
</DeepExtract>
<DeepExtract>
with tf.variable_scope('make_anchors'):
anchor_list = []
level_list = cfgs.LEVEL
with tf.name_scope('make_anchors_all_level'):
for (level, base_anchor_size, stride) in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):
'\n (level, base_anchor_size) tuple:\n (P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)\n '
(featuremap_height, featuremap_width) = (tf.shape(feature_pyramid[level])[1], tf.shape(feature_pyramid[level])[2])
featuremap_height = tf.cast(featuremap_height, tf.float32)
featuremap_width = tf.cast(featuremap_width, tf.float32)
if self.method == 'H':
tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre, inp=[featuremap_height, featuremap_width, stride, np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0], Tout=[tf.float32])
tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])
else:
tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size, anchor_scales=cfgs.ANCHOR_SCALES, anchor_ratios=cfgs.ANCHOR_RATIOS, anchor_angles=cfgs.ANCHOR_ANGLES, featuremap_height=featuremap_height, featuremap_width=featuremap_width, stride=stride)
tmp_anchors = tf.reshape(tmp_anchors, [-1, 5])
anchor_list.append(tmp_anchors)
anchor_list = anchor_list
</DeepExtract>
rpn_box_pred = tf.concat(rpn_box_pred_list, axis=0)
rpn_cls_score = tf.concat(rpn_cls_score_list, axis=0)
anchors = tf.concat(anchor_list, axis=0)
if self.is_training:
with tf.variable_scope('build_loss'):
(labels, target_delta, anchor_states, target_boxes) = tf.py_func(func=anchor_target_layer, inp=[gtboxes_batch_h, gtboxes_batch_r, anchors], Tout=[tf.float32, tf.float32, tf.float32, tf.float32])
if self.method == 'H':
<DeepExtract>
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(anchor_states, 1)), [-1])
positive_anchor = tf.gather(anchors, positive_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=input_img_batch, boxes=positive_anchor, method=0)
tf.summary.image('positive_anchor', pos_in_img)
</DeepExtract>
else:
<DeepExtract>
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(anchor_states, 1)), [-1])
positive_anchor = tf.gather(anchors, positive_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=input_img_batch, boxes=positive_anchor, method=1)
tf.summary.image('positive_anchor', pos_in_img)
</DeepExtract>
cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)
if cfgs.USE_IOU_FACTOR:
reg_loss = losses.iou_smooth_l1_loss_(target_delta, rpn_box_pred, anchor_states, target_boxes, anchors)
else:
reg_loss = losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)
self.losses_dict['cls_loss'] = cls_loss * cfgs.CLS_WEIGHT
self.losses_dict['reg_loss'] = reg_loss * cfgs.REG_WEIGHT
with tf.variable_scope('refine_feature_pyramid'):
refine_feature_pyramid = {}
refine_boxes_list = []
for (box_pred, cls_prob, anchor, stride, level) in zip(rpn_box_pred_list, rpn_cls_prob_list, anchor_list, cfgs.ANCHOR_STRIDE, cfgs.LEVEL):
box_pred = tf.reshape(box_pred, [-1, self.num_anchors_per_location, 5])
anchor = tf.reshape(anchor, [-1, self.num_anchors_per_location, 5 if self.method == 'R' else 4])
cls_prob = tf.reshape(cls_prob, [-1, self.num_anchors_per_location, cfgs.CLASS_NUM])
cls_max_prob = tf.reduce_max(cls_prob, axis=-1)
box_pred_argmax = tf.cast(tf.reshape(tf.argmax(cls_max_prob, axis=-1), [-1, 1]), tf.int32)
indices = tf.cast(tf.cumsum(tf.ones_like(box_pred_argmax), axis=0), tf.int32) - tf.constant(1, tf.int32)
indices = tf.concat([indices, box_pred_argmax], axis=-1)
box_pred_filter = tf.reshape(tf.gather_nd(box_pred, indices), [-1, 5])
anchor_filter = tf.reshape(tf.gather_nd(anchor, indices), [-1, 5 if self.method == 'R' else 4])
if cfgs.METHOD == 'H':
x_c = (anchor_filter[:, 2] + anchor_filter[:, 0]) / 2
y_c = (anchor_filter[:, 3] + anchor_filter[:, 1]) / 2
h = anchor_filter[:, 2] - anchor_filter[:, 0] + 1
w = anchor_filter[:, 3] - anchor_filter[:, 1] + 1
theta = -90 * tf.ones_like(x_c)
anchor_filter = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
boxes_filter = bbox_transform.rbbox_transform_inv(boxes=anchor_filter, deltas=box_pred_filter)
refine_boxes_list.append(boxes_filter)
center_point = boxes_filter[:, :2] / stride
<DeepExtract>
(h, w) = (tf.cast(tf.shape(feature_pyramid[level])[1], tf.int32), tf.cast(tf.shape(feature_pyramid[level])[2], tf.int32))
xmin = tf.maximum(0.0, tf.floor(center_point[:, 0]))
xmin = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(xmin))
ymin = tf.maximum(0.0, tf.floor(center_point[:, 1]))
ymin = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(ymin))
xmax = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(center_point[:, 0]))
xmax = tf.maximum(0.0, tf.floor(xmax))
ymax = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(center_point[:, 1]))
ymax = tf.maximum(0.0, tf.floor(ymax))
left_top = tf.cast(tf.transpose(tf.stack([ymin, xmin], axis=0)), tf.int32)
right_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmax], axis=0)), tf.int32)
left_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmin], axis=0)), tf.int32)
right_top = tf.cast(tf.transpose(tf.stack([ymin, xmax], axis=0)), tf.int32)
feature = feature_pyramid[level]
left_top_feature = tf.gather_nd(tf.squeeze(feature), left_top)
right_bottom_feature = tf.gather_nd(tf.squeeze(feature), right_bottom)
left_bottom_feature = tf.gather_nd(tf.squeeze(feature), left_bottom)
right_top_feature = tf.gather_nd(tf.squeeze(feature), right_top)
refine_feature = right_bottom_feature * tf.tile(tf.reshape(tf.abs((center_point[:, 0] - xmin) * (center_point[:, 1] - ymin)), [-1, 1]), [1, cfgs.FPN_CHANNEL]) + left_top_feature * tf.tile(tf.reshape(tf.abs((xmax - center_point[:, 0]) * (ymax - center_point[:, 1])), [-1, 1]), [1, cfgs.FPN_CHANNEL]) + right_top_feature * tf.tile(tf.reshape(tf.abs((center_point[:, 0] - xmin) * (ymax - center_point[:, 1])), [-1, 1]), [1, cfgs.FPN_CHANNEL]) + left_bottom_feature * tf.tile(tf.reshape(tf.abs((xmax - center_point[:, 0]) * (center_point[:, 1] - ymin)), [-1, 1]), [1, cfgs.FPN_CHANNEL])
refine_feature = tf.reshape(refine_feature, [1, tf.cast(h, tf.int32), tf.cast(w, tf.int32), cfgs.FPN_CHANNEL])
refine_feature_pyramid[level] = refine_feature + feature
</DeepExtract>
<DeepExtract>
refine_delta_boxes_list = []
refine_scores_list = []
refine_probs_list = []
refine_angle_cls_list = []
with tf.variable_scope('refine_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'refine_classification', 'refine_regression', 'refine_angle_cls']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'refine_classification_' + level, 'refine_regression_' + level, 'refine_angle_cls_' + level]
(refine_box_scores, refine_box_probs) = self.refine_cls_net(refine_feature_pyramid[level], scope_list, reuse_flag, level)
(refine_delta_boxes, refine_angle_cls) = self.refine_reg_net(refine_feature_pyramid[level], scope_list, reuse_flag, level)
refine_scores_list.append(refine_box_scores)
refine_probs_list.append(refine_box_probs)
refine_delta_boxes_list.append(refine_delta_boxes)
refine_angle_cls_list.append(refine_angle_cls)
(refine_box_pred_list, refine_cls_score_list, refine_cls_prob_list, refine_angle_cls_list) = (refine_delta_boxes_list, refine_scores_list, refine_probs_list, refine_angle_cls_list)
</DeepExtract>
refine_box_pred = tf.concat(refine_box_pred_list, axis=0)
refine_cls_score = tf.concat(refine_cls_score_list, axis=0)
refine_cls_prob = tf.concat(refine_cls_prob_list, axis=0)
refine_angle_cls = tf.concat(refine_angle_cls_list, axis=0)
refine_boxes = tf.concat(refine_boxes_list, axis=0)
if self.is_training:
with tf.variable_scope('build_refine_loss'):
(refine_labels, refine_target_delta, refine_box_states, refine_target_boxes, refine_target_encode_label) = tf.py_func(func=refinebox_target_layer, inp=[gtboxes_batch_r, gt_encode_label, refine_boxes, cfgs.REFINE_IOU_POSITIVE_THRESHOLD[0], cfgs.REFINE_IOU_NEGATIVE_THRESHOLD[0], gpu_id], Tout=[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
<DeepExtract>
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(refine_box_states, 1)), [-1])
positive_anchor = tf.gather(refine_boxes, positive_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=input_img_batch, boxes=positive_anchor, method=1)
tf.summary.image('positive_anchor', pos_in_img)
</DeepExtract>
refine_cls_loss = losses.focal_loss(refine_labels, refine_cls_score, refine_box_states)
refine_reg_loss = losses.smooth_l1_loss(refine_target_delta, refine_box_pred, refine_box_states)
angle_cls_loss = losses_dcl.angle_cls_period_focal_loss(refine_target_encode_label, refine_angle_cls, refine_box_states, refine_target_boxes, decimal_weight=cfgs.DATASET_NAME.startswith('DOTA'))
self.losses_dict['refine_cls_loss'] = refine_cls_loss * cfgs.CLS_WEIGHT
self.losses_dict['refine_reg_loss'] = refine_reg_loss * cfgs.REG_WEIGHT
self.losses_dict['angle_cls_loss'] = angle_cls_loss * cfgs.ANGLE_WEIGHT
with tf.variable_scope('postprocess_detctions'):
(scores, category, boxes_angle) = postprocess_detctions(refine_bbox_pred=refine_box_pred, refine_cls_prob=refine_cls_prob, refine_angle_prob=tf.sigmoid(refine_angle_cls), refine_boxes=refine_boxes, is_training=self.is_training, gpu_id=gpu_id)
scores = tf.stop_gradient(scores)
category = tf.stop_gradient(category)
boxes_angle = tf.stop_gradient(boxes_angle)
if self.is_training:
return (scores, category, boxes_angle, self.losses_dict)
else:
return (scores, category, boxes_angle)
|
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r, gt_encode_label, gpu_id=0):
if self.is_training:
gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
gt_encode_label = tf.reshape(gt_encode_label, [-1, self.coding_len])
gt_encode_label = tf.cast(gt_encode_label, tf.float32)
if self.base_network_name.startswith('resnet_v1'):
feature_pyramid = resnet.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
feature_pyramid = resnet_gluoncv.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name.startswith('MobilenetV2'):
feature_pyramid = mobilenet_v2.mobilenetv2_base(input_img_batch, is_training=self.is_training)
else:
raise ValueError('Sry, we only support resnet, mobilenet_v2')
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope('rpn_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'rpn_classification_' + level, 'rpn_regression_' + level]
(rpn_box_scores, rpn_box_probs) = self.rpn_cls_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_delta_boxes = self.rpn_reg_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
(rpn_box_pred_list, rpn_cls_score_list, rpn_cls_prob_list) = (rpn_delta_boxes_list, rpn_scores_list, rpn_probs_list)
with tf.variable_scope('make_anchors'):
anchor_list = []
level_list = cfgs.LEVEL
with tf.name_scope('make_anchors_all_level'):
for (level, base_anchor_size, stride) in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):
'\n (level, base_anchor_size) tuple:\n (P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)\n '
(featuremap_height, featuremap_width) = (tf.shape(feature_pyramid[level])[1], tf.shape(feature_pyramid[level])[2])
featuremap_height = tf.cast(featuremap_height, tf.float32)
featuremap_width = tf.cast(featuremap_width, tf.float32)
if self.method == 'H':
tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre, inp=[featuremap_height, featuremap_width, stride, np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0], Tout=[tf.float32])
tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])
else:
tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size, anchor_scales=cfgs.ANCHOR_SCALES, anchor_ratios=cfgs.ANCHOR_RATIOS, anchor_angles=cfgs.ANCHOR_ANGLES, featuremap_height=featuremap_height, featuremap_width=featuremap_width, stride=stride)
tmp_anchors = tf.reshape(tmp_anchors, [-1, 5])
anchor_list.append(tmp_anchors)
anchor_list = anchor_list
rpn_box_pred = tf.concat(rpn_box_pred_list, axis=0)
rpn_cls_score = tf.concat(rpn_cls_score_list, axis=0)
anchors = tf.concat(anchor_list, axis=0)
if self.is_training:
with tf.variable_scope('build_loss'):
(labels, target_delta, anchor_states, target_boxes) = tf.py_func(func=anchor_target_layer, inp=[gtboxes_batch_h, gtboxes_batch_r, anchors], Tout=[tf.float32, tf.float32, tf.float32, tf.float32])
if self.method == 'H':
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(anchor_states, 1)), [-1])
positive_anchor = tf.gather(anchors, positive_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=input_img_batch, boxes=positive_anchor, method=0)
tf.summary.image('positive_anchor', pos_in_img)
else:
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(anchor_states, 1)), [-1])
positive_anchor = tf.gather(anchors, positive_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=input_img_batch, boxes=positive_anchor, method=1)
tf.summary.image('positive_anchor', pos_in_img)
cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)
if cfgs.USE_IOU_FACTOR:
reg_loss = losses.iou_smooth_l1_loss_(target_delta, rpn_box_pred, anchor_states, target_boxes, anchors)
else:
reg_loss = losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)
self.losses_dict['cls_loss'] = cls_loss * cfgs.CLS_WEIGHT
self.losses_dict['reg_loss'] = reg_loss * cfgs.REG_WEIGHT
with tf.variable_scope('refine_feature_pyramid'):
refine_feature_pyramid = {}
refine_boxes_list = []
for (box_pred, cls_prob, anchor, stride, level) in zip(rpn_box_pred_list, rpn_cls_prob_list, anchor_list, cfgs.ANCHOR_STRIDE, cfgs.LEVEL):
box_pred = tf.reshape(box_pred, [-1, self.num_anchors_per_location, 5])
anchor = tf.reshape(anchor, [-1, self.num_anchors_per_location, 5 if self.method == 'R' else 4])
cls_prob = tf.reshape(cls_prob, [-1, self.num_anchors_per_location, cfgs.CLASS_NUM])
cls_max_prob = tf.reduce_max(cls_prob, axis=-1)
box_pred_argmax = tf.cast(tf.reshape(tf.argmax(cls_max_prob, axis=-1), [-1, 1]), tf.int32)
indices = tf.cast(tf.cumsum(tf.ones_like(box_pred_argmax), axis=0), tf.int32) - tf.constant(1, tf.int32)
indices = tf.concat([indices, box_pred_argmax], axis=-1)
box_pred_filter = tf.reshape(tf.gather_nd(box_pred, indices), [-1, 5])
anchor_filter = tf.reshape(tf.gather_nd(anchor, indices), [-1, 5 if self.method == 'R' else 4])
if cfgs.METHOD == 'H':
x_c = (anchor_filter[:, 2] + anchor_filter[:, 0]) / 2
y_c = (anchor_filter[:, 3] + anchor_filter[:, 1]) / 2
h = anchor_filter[:, 2] - anchor_filter[:, 0] + 1
w = anchor_filter[:, 3] - anchor_filter[:, 1] + 1
theta = -90 * tf.ones_like(x_c)
anchor_filter = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
boxes_filter = bbox_transform.rbbox_transform_inv(boxes=anchor_filter, deltas=box_pred_filter)
refine_boxes_list.append(boxes_filter)
center_point = boxes_filter[:, :2] / stride
(h, w) = (tf.cast(tf.shape(feature_pyramid[level])[1], tf.int32), tf.cast(tf.shape(feature_pyramid[level])[2], tf.int32))
xmin = tf.maximum(0.0, tf.floor(center_point[:, 0]))
xmin = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(xmin))
ymin = tf.maximum(0.0, tf.floor(center_point[:, 1]))
ymin = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(ymin))
xmax = tf.minimum(tf.cast(w - 1, tf.float32), tf.ceil(center_point[:, 0]))
xmax = tf.maximum(0.0, tf.floor(xmax))
ymax = tf.minimum(tf.cast(h - 1, tf.float32), tf.ceil(center_point[:, 1]))
ymax = tf.maximum(0.0, tf.floor(ymax))
left_top = tf.cast(tf.transpose(tf.stack([ymin, xmin], axis=0)), tf.int32)
right_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmax], axis=0)), tf.int32)
left_bottom = tf.cast(tf.transpose(tf.stack([ymax, xmin], axis=0)), tf.int32)
right_top = tf.cast(tf.transpose(tf.stack([ymin, xmax], axis=0)), tf.int32)
feature = feature_pyramid[level]
left_top_feature = tf.gather_nd(tf.squeeze(feature), left_top)
right_bottom_feature = tf.gather_nd(tf.squeeze(feature), right_bottom)
left_bottom_feature = tf.gather_nd(tf.squeeze(feature), left_bottom)
right_top_feature = tf.gather_nd(tf.squeeze(feature), right_top)
refine_feature = right_bottom_feature * tf.tile(tf.reshape(tf.abs((center_point[:, 0] - xmin) * (center_point[:, 1] - ymin)), [-1, 1]), [1, cfgs.FPN_CHANNEL]) + left_top_feature * tf.tile(tf.reshape(tf.abs((xmax - center_point[:, 0]) * (ymax - center_point[:, 1])), [-1, 1]), [1, cfgs.FPN_CHANNEL]) + right_top_feature * tf.tile(tf.reshape(tf.abs((center_point[:, 0] - xmin) * (ymax - center_point[:, 1])), [-1, 1]), [1, cfgs.FPN_CHANNEL]) + left_bottom_feature * tf.tile(tf.reshape(tf.abs((xmax - center_point[:, 0]) * (center_point[:, 1] - ymin)), [-1, 1]), [1, cfgs.FPN_CHANNEL])
refine_feature = tf.reshape(refine_feature, [1, tf.cast(h, tf.int32), tf.cast(w, tf.int32), cfgs.FPN_CHANNEL])
refine_feature_pyramid[level] = refine_feature + feature
refine_delta_boxes_list = []
refine_scores_list = []
refine_probs_list = []
refine_angle_cls_list = []
with tf.variable_scope('refine_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'refine_classification', 'refine_regression', 'refine_angle_cls']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'refine_classification_' + level, 'refine_regression_' + level, 'refine_angle_cls_' + level]
(refine_box_scores, refine_box_probs) = self.refine_cls_net(refine_feature_pyramid[level], scope_list, reuse_flag, level)
(refine_delta_boxes, refine_angle_cls) = self.refine_reg_net(refine_feature_pyramid[level], scope_list, reuse_flag, level)
refine_scores_list.append(refine_box_scores)
refine_probs_list.append(refine_box_probs)
refine_delta_boxes_list.append(refine_delta_boxes)
refine_angle_cls_list.append(refine_angle_cls)
(refine_box_pred_list, refine_cls_score_list, refine_cls_prob_list, refine_angle_cls_list) = (refine_delta_boxes_list, refine_scores_list, refine_probs_list, refine_angle_cls_list)
refine_box_pred = tf.concat(refine_box_pred_list, axis=0)
refine_cls_score = tf.concat(refine_cls_score_list, axis=0)
refine_cls_prob = tf.concat(refine_cls_prob_list, axis=0)
refine_angle_cls = tf.concat(refine_angle_cls_list, axis=0)
refine_boxes = tf.concat(refine_boxes_list, axis=0)
if self.is_training:
with tf.variable_scope('build_refine_loss'):
(refine_labels, refine_target_delta, refine_box_states, refine_target_boxes, refine_target_encode_label) = tf.py_func(func=refinebox_target_layer, inp=[gtboxes_batch_r, gt_encode_label, refine_boxes, cfgs.REFINE_IOU_POSITIVE_THRESHOLD[0], cfgs.REFINE_IOU_NEGATIVE_THRESHOLD[0], gpu_id], Tout=[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(refine_box_states, 1)), [-1])
positive_anchor = tf.gather(refine_boxes, positive_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=input_img_batch, boxes=positive_anchor, method=1)
tf.summary.image('positive_anchor', pos_in_img)
refine_cls_loss = losses.focal_loss(refine_labels, refine_cls_score, refine_box_states)
refine_reg_loss = losses.smooth_l1_loss(refine_target_delta, refine_box_pred, refine_box_states)
angle_cls_loss = losses_dcl.angle_cls_period_focal_loss(refine_target_encode_label, refine_angle_cls, refine_box_states, refine_target_boxes, decimal_weight=cfgs.DATASET_NAME.startswith('DOTA'))
self.losses_dict['refine_cls_loss'] = refine_cls_loss * cfgs.CLS_WEIGHT
self.losses_dict['refine_reg_loss'] = refine_reg_loss * cfgs.REG_WEIGHT
self.losses_dict['angle_cls_loss'] = angle_cls_loss * cfgs.ANGLE_WEIGHT
with tf.variable_scope('postprocess_detctions'):
(scores, category, boxes_angle) = postprocess_detctions(refine_bbox_pred=refine_box_pred, refine_cls_prob=refine_cls_prob, refine_angle_prob=tf.sigmoid(refine_angle_cls), refine_boxes=refine_boxes, is_training=self.is_training, gpu_id=gpu_id)
scores = tf.stop_gradient(scores)
category = tf.stop_gradient(category)
boxes_angle = tf.stop_gradient(boxes_angle)
if self.is_training:
return (scores, category, boxes_angle, self.losses_dict)
else:
return (scores, category, boxes_angle)
|
DCL_RetinaNet_Tensorflow
|
positive
|
def getChangableOptions(self):
<DeepExtract>
results = self.useroptiondb.conn.execute('SELECT name, value FROM option WHERE userid = ?', (self.userid,)).fetchall()
useropts = dict(((r[0], json.loads(r[1])) for r in results))
opts = self.useroptiondb.DEFAULTS.replace(useropts, on_error=self.delete_bad_option)
</DeepExtract>
visible_props = (p for p in opts.to_properties() if not p.hidden)
return cfg.from_list(visible_props).to_nested_dict()
|
def getChangableOptions(self):
results = self.useroptiondb.conn.execute('SELECT name, value FROM option WHERE userid = ?', (self.userid,)).fetchall()
useropts = dict(((r[0], json.loads(r[1])) for r in results))
opts = self.useroptiondb.DEFAULTS.replace(useropts, on_error=self.delete_bad_option)
visible_props = (p for p in opts.to_properties() if not p.hidden)
return cfg.from_list(visible_props).to_nested_dict()
|
cherrymusic
|
positive
|
def __collect_sample(ast, fd_index, args):
root = ast[fd_index]
if root['type'] != 'FunctionDef':
raise ValueError('Wrong node type.')
target = root['value']
<DeepExtract>
tnodes = __terminals(ast, fd_index, args)
tree_paths = []
for ((v_path, v_value), (u_path, u_value)) in itertools.combinations(iterable=tnodes, r=2):
(prefix, lca, suffix) = __merge_terminals2_paths(v_path, u_path)
if len(prefix) + 1 + len(suffix) <= args.max_path_length and abs(len(prefix) - len(suffix)) <= args.max_path_width:
path = prefix + [lca] + suffix
tree_path = (v_value, path, u_value)
tree_paths.append(tree_path)
tree_paths = tree_paths
</DeepExtract>
contexts = []
for tree_path in tree_paths:
(start, connector, finish) = tree_path
(start, finish) = (__delim_name(start), __delim_name(finish))
connector = '|'.join((ast[v]['type'] for v in connector))
context = f'{start},{connector},{finish}'
contexts.append(context)
if len(contexts) == 0:
return None
<DeepExtract>
if target in {METHOD_NAME, NUM}:
target = target
def camel_case_split(identifier):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
target = [m.group(0) for m in matches]
blocks = []
for underscore_block in target.split('_'):
blocks.extend(camel_case_split(underscore_block))
target = '|'.join((block.lower() for block in blocks))
</DeepExtract>
context = ' '.join(contexts)
return f'{target} {context}'
|
def __collect_sample(ast, fd_index, args):
root = ast[fd_index]
if root['type'] != 'FunctionDef':
raise ValueError('Wrong node type.')
target = root['value']
tnodes = __terminals(ast, fd_index, args)
tree_paths = []
for ((v_path, v_value), (u_path, u_value)) in itertools.combinations(iterable=tnodes, r=2):
(prefix, lca, suffix) = __merge_terminals2_paths(v_path, u_path)
if len(prefix) + 1 + len(suffix) <= args.max_path_length and abs(len(prefix) - len(suffix)) <= args.max_path_width:
path = prefix + [lca] + suffix
tree_path = (v_value, path, u_value)
tree_paths.append(tree_path)
tree_paths = tree_paths
contexts = []
for tree_path in tree_paths:
(start, connector, finish) = tree_path
(start, finish) = (__delim_name(start), __delim_name(finish))
connector = '|'.join((ast[v]['type'] for v in connector))
context = f'{start},{connector},{finish}'
contexts.append(context)
if len(contexts) == 0:
return None
if target in {METHOD_NAME, NUM}:
target = target
def camel_case_split(identifier):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
target = [m.group(0) for m in matches]
blocks = []
for underscore_block in target.split('_'):
blocks.extend(camel_case_split(underscore_block))
target = '|'.join((block.lower() for block in blocks))
context = ' '.join(contexts)
return f'{target} {context}'
|
code-transformer
|
positive
|
def test_meta():
<DeepExtract>
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(rng)
pool_classifiers = AdaBoostClassifier(random_state=rng)
pool_classifiers.fit(X_train, y_train)
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
</DeepExtract>
meta_des = METADES(pool_classifiers)
meta_des.fit(X_dsel, y_dsel)
assert np.isclose(meta_des.score(X_test, y_test), 0.796969696969697)
|
def test_meta():
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(rng)
pool_classifiers = AdaBoostClassifier(random_state=rng)
pool_classifiers.fit(X_train, y_train)
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
meta_des = METADES(pool_classifiers)
meta_des.fit(X_dsel, y_dsel)
assert np.isclose(meta_des.score(X_test, y_test), 0.796969696969697)
|
DESlib
|
positive
|
def cimpl_consumer(self, conf=None):
"""
Returns a consumer bound to this cluster.
Args:
conf (dict): Consumer config overrides
Returns:
Consumer: A new Consumer instance
"""
<DeepExtract>
raise NotImplementedError('client_conf has not been implemented')
</DeepExtract>
if conf is not None:
consumer_conf.update(conf)
return Consumer(consumer_conf)
|
def cimpl_consumer(self, conf=None):
"""
Returns a consumer bound to this cluster.
Args:
conf (dict): Consumer config overrides
Returns:
Consumer: A new Consumer instance
"""
raise NotImplementedError('client_conf has not been implemented')
if conf is not None:
consumer_conf.update(conf)
return Consumer(consumer_conf)
|
confluent-kafka-python
|
positive
|
def get_decoded_html(url, faker=False):
<DeepExtract>
logging.debug('get_response: %s' % url)
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(request.Request(url, headers=fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
response = response
</DeepExtract>
data = response.data
<DeepExtract>
m = re.search('charset=([\\w-]+)', response.headers['content-type'])
if m:
charset = m.group(1)
</DeepExtract>
if charset:
return data.decode(charset, 'ignore')
else:
return data
|
def get_decoded_html(url, faker=False):
logging.debug('get_response: %s' % url)
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(request.Request(url, headers=fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
response = response
data = response.data
m = re.search('charset=([\\w-]+)', response.headers['content-type'])
if m:
charset = m.group(1)
if charset:
return data.decode(charset, 'ignore')
else:
return data
|
acmpv
|
positive
|
def parse_for(self):
for_ = For(Trace(self))
self.tokens.expect('for')
self.tokens.expect('(')
if self.tokens.peek() != ';':
<DeepExtract>
for_.statement1 = DiscardExpression(Trace(self), self.parse_expression())
</DeepExtract>
self.tokens.expect(';')
if self.tokens.peek() != ';':
<DeepExtract>
expression = self.parse_assignment()
while self.tokens.peek() == ',':
self.tokens.expect(',')
expression = MultiExpression(Trace(self), expression, [self.parse_assignment()])
for_.expression = expression
</DeepExtract>
if for_.expression.type_() not in integer_like:
self.tokens.error('For statement conditional must be an integer like expression')
self.tokens.expect(';')
if self.tokens.peek() != ')':
<DeepExtract>
for_.statement2 = DiscardExpression(Trace(self), self.parse_expression())
</DeepExtract>
self.tokens.expect(')')
stored_loop = self.loop
self.loop = for_
<DeepExtract>
self.statement += 1
if self.tokens.peek_next() == ':' and self.tokens.peek() not in ['default', 'case']:
label_name = self.tokens.get()
self.tokens.expect(':')
label = Label(Trace(self), self.parse_statement())
if label_name in self.goto_labels:
self.tokens.error('label %s is already defined' % label_name)
self.goto_labels[label_name] = label
for_.statement3 = label
elif self.tokens.peek() in numeric_types + self.structs + storage_specifiers:
for_.statement3 = self.parse_compound_declaration()
elif self.tokens.peek() == 'struct':
for_.statement3 = self.parse_struct_declaration()
elif self.tokens.peek() == 'if':
for_.statement3 = self.parse_if()
elif self.tokens.peek() == 'while':
for_.statement3 = self.parse_while()
elif self.tokens.peek() == 'do':
for_.statement3 = self.parse_do_while()
elif self.tokens.peek() == 'for':
for_.statement3 = self.parse_for()
elif self.tokens.peek() == 'return':
for_.statement3 = self.parse_return()
elif self.tokens.peek() == 'break':
for_.statement3 = self.parse_break()
elif self.tokens.peek() == 'continue':
for_.statement3 = self.parse_continue()
elif self.tokens.peek() == '{':
for_.statement3 = self.parse_block()
elif self.tokens.peek() == 'assert':
for_.statement3 = self.parse_assert()
elif self.tokens.peek() == 'report':
for_.statement3 = self.parse_report()
elif self.tokens.peek() == 'switch':
for_.statement3 = self.parse_switch()
elif self.tokens.peek() == 'case':
for_.statement3 = self.parse_case()
elif self.tokens.peek() == 'default':
for_.statement3 = self.parse_default()
elif self.tokens.peek() == 'goto':
for_.statement3 = self.parse_goto()
elif self.tokens.peek() == 'wait_clocks':
for_.statement3 = self.parse_wait_clocks()
else:
expression = self.parse_discard()
self.tokens.expect(';')
for_.statement3 = expression
</DeepExtract>
self.loop = stored_loop
return for_
|
def parse_for(self):
for_ = For(Trace(self))
self.tokens.expect('for')
self.tokens.expect('(')
if self.tokens.peek() != ';':
for_.statement1 = DiscardExpression(Trace(self), self.parse_expression())
self.tokens.expect(';')
if self.tokens.peek() != ';':
expression = self.parse_assignment()
while self.tokens.peek() == ',':
self.tokens.expect(',')
expression = MultiExpression(Trace(self), expression, [self.parse_assignment()])
for_.expression = expression
if for_.expression.type_() not in integer_like:
self.tokens.error('For statement conditional must be an integer like expression')
self.tokens.expect(';')
if self.tokens.peek() != ')':
for_.statement2 = DiscardExpression(Trace(self), self.parse_expression())
self.tokens.expect(')')
stored_loop = self.loop
self.loop = for_
self.statement += 1
if self.tokens.peek_next() == ':' and self.tokens.peek() not in ['default', 'case']:
label_name = self.tokens.get()
self.tokens.expect(':')
label = Label(Trace(self), self.parse_statement())
if label_name in self.goto_labels:
self.tokens.error('label %s is already defined' % label_name)
self.goto_labels[label_name] = label
for_.statement3 = label
elif self.tokens.peek() in numeric_types + self.structs + storage_specifiers:
for_.statement3 = self.parse_compound_declaration()
elif self.tokens.peek() == 'struct':
for_.statement3 = self.parse_struct_declaration()
elif self.tokens.peek() == 'if':
for_.statement3 = self.parse_if()
elif self.tokens.peek() == 'while':
for_.statement3 = self.parse_while()
elif self.tokens.peek() == 'do':
for_.statement3 = self.parse_do_while()
elif self.tokens.peek() == 'for':
for_.statement3 = self.parse_for()
elif self.tokens.peek() == 'return':
for_.statement3 = self.parse_return()
elif self.tokens.peek() == 'break':
for_.statement3 = self.parse_break()
elif self.tokens.peek() == 'continue':
for_.statement3 = self.parse_continue()
elif self.tokens.peek() == '{':
for_.statement3 = self.parse_block()
elif self.tokens.peek() == 'assert':
for_.statement3 = self.parse_assert()
elif self.tokens.peek() == 'report':
for_.statement3 = self.parse_report()
elif self.tokens.peek() == 'switch':
for_.statement3 = self.parse_switch()
elif self.tokens.peek() == 'case':
for_.statement3 = self.parse_case()
elif self.tokens.peek() == 'default':
for_.statement3 = self.parse_default()
elif self.tokens.peek() == 'goto':
for_.statement3 = self.parse_goto()
elif self.tokens.peek() == 'wait_clocks':
for_.statement3 = self.parse_wait_clocks()
else:
expression = self.parse_discard()
self.tokens.expect(';')
for_.statement3 = expression
self.loop = stored_loop
return for_
|
Chips-2.0
|
positive
|
def build_start_handler(action):
args = vars(self.args)
if 'version' not in args:
args['version'] = self.SUPPORTED_VERSIONS.get(args['stack-version'], args['stack-version'])
if parse_version(args['version']) >= parse_version('8.0'):
args['enable_apm_managed'] = True
if args.get('enable_apm_server') is False:
args['enable_apm_managed'] = False
if args.get('enable_apm_managed', False):
args['enable_apm_server'] = False
if not args.get('enable_kibana', True):
print('Kibana will be launched to configure APM integration and stopped after that.')
args['enable_kibana'] = True
args['shutdown_kibana'] = True
if args.get('apm_server_enable_tls'):
args['apm_server_url'] = args.get('apm_server_url', DEFAULT_APM_SERVER_URL).replace('http:', 'https:')
args['opbeans_apm_js_server_url'] = args['apm_server_url']
selections = set()
run_all = args.get('run_all')
all_opbeans = args.get('run_all_opbeans') or run_all
any_opbeans = all_opbeans or any((v and k.startswith('enable_opbeans_') for (k, v) in args.items()))
opbeans_sidecars = ['postgres', 'redis', 'opbeans-load-generator']
opbeans_2nds = ('opbeans-go01', 'opbeans-java01', 'opbeans-python01', 'opbeans-ruby01', 'opbeans-dotnet01', 'opbeans-node01')
for service in self.services:
service_enabled = args.get('enable_' + service.option_name())
is_opbeans_service = issubclass(service, OpbeansService) or service is OpbeansRum
is_opbeans_sidecar = service.name() in opbeans_sidecars
is_opbeans_2nd = service.name() in opbeans_2nds
is_obs = issubclass(service, BeatMixin)
if service_enabled or (all_opbeans and is_opbeans_service and (not is_opbeans_2nd)) or (any_opbeans and is_opbeans_sidecar and (not is_opbeans_2nd)) or (run_all and is_obs and (not is_opbeans_2nd)):
selections.add(service(**args))
if args.get('dyno'):
toxi = Toxi()
selections.add(toxi)
toxi.gen_ports(selections)
c = toxi.gen_config(selections)
this_dir = os.path.dirname(os.path.realpath(__file__))
toxi_cfg_path = os.path.join(this_dir, '../../docker/toxi/toxi.cfg')
with open(toxi_cfg_path, 'w') as fh_:
fh_.write(c)
dyno = Dyno()
selections.add(dyno)
statsd = StatsD()
selections.add(statsd)
selections.add(WaitService(set(selections), **args))
curl_image = 'docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT'
for (c, snapshot_repo) in enumerate(args.get('elasticsearch_snapshot_repo', [])):
if not snapshot_repo.startswith('http'):
print('skipping setup of non-http(s) repo: {}'.format(snapshot_repo))
continue
if not snapshot_repo.endswith('/'):
print("http(s) repo should probably end with '/': {}".format(snapshot_repo))
repo_label = 'repo{:d}'.format(c)
cmd = ['curl', '-X', 'PUT', '-H', 'Content-Type: application/json', '-d', json.dumps({'type': 'url', 'settings': {'url': snapshot_repo}}), 'http://admin:changeme@elasticsearch:9200/_snapshot/{:s}'.format(repo_label)]
selections.add(CommandService(cmd, service=repo_label, image=curl_image, depends_on=['elasticsearch']))
if args.get('enable_kibana') and (not args.get('enable_apm_managed', False)):
kibana_scheme = 'https' if args.get('kibana_enable_tls', False) else 'http'
kibana_url = kibana_scheme + '://admin:changeme@kibana:5601'
cmd = ['curl', '-X', 'POST', '-H', 'kbn-xsrf: 1', kibana_url + '/api/fleet/setup']
selections.add(CommandService(cmd, service='fleet_setup', image=curl_image, depends_on=['kibana']))
services_to_load = {}
for service in selections:
download_url = service.image_download_url()
if download_url:
services_to_load[list(service.render().keys())[0]] = download_url
if not args['skip_download'] and services_to_load:
load_images(set(services_to_load.values()), args['image_cache_dir'])
services = {}
for service in selections:
services.update(service.render())
for addl_services in args['with_services']:
with open(addl_services) as f:
services.update(json.load(f))
enabled_opbeans_services = [k for k in services.keys() if k.startswith('opbeans-') and k not in ('opbeans-rum', 'opbeans-load-generator')]
enabled_opbeans_services_str = ','.join(enabled_opbeans_services)
for s in enabled_opbeans_services:
if isinstance(services[s]['environment'], dict):
services[s]['environment']['OPBEANS_SERVICES'] = enabled_opbeans_services_str
else:
services[s]['environment'].append('OPBEANS_SERVICES=' + enabled_opbeans_services_str)
loadgen = services.get('opbeans-load-generator')
if loadgen is not None:
enabled_opbeans = any((re.search('OPBEANS_URLS=.+', v) for v in loadgen['environment']))
if args.get('disable_opbeans_load_generator') or not enabled_opbeans:
del services['opbeans-load-generator']
compose = dict(version='2.4', services=services, networks=dict(default={'name': 'apm-integration-testing'}), volumes=dict(esdata={'driver': 'local'}, pgdata={'driver': 'local'}))
docker_compose_path = args['docker_compose_path']
if args.get('output_format') == 'yaml':
try:
import yaml
except ImportError:
print("Failed to import 'yaml': pip install yaml, or specify an alternative --output-format.")
sys.exit(1)
yaml.dump(compose, docker_compose_path, explicit_start=True, default_flow_style=False, indent=2)
elif args.get('output_format') == 'json':
json.dump(compose, docker_compose_path, indent=2, sort_keys=True)
docker_compose_path.flush()
if hasattr(docker_compose_path, 'name') and os.path.isdir(os.path.dirname(docker_compose_path.name)):
docker_compose_path.close()
print('Starting/Building stack services..\n')
docker_compose_cmd = ['docker-compose', '-f', docker_compose_path.name]
if not sys.stdin.isatty() and action not in ['build']:
docker_compose_cmd.extend(['--no-ansi', '--log-level', 'ERROR'])
build_services = [name for (name, service) in compose['services'].items() if 'build' in service]
if build_services:
docker_compose_build = docker_compose_cmd + ['build']
if not args['skip_pull']:
docker_compose_build.append('--pull')
if args['force_build']:
docker_compose_build.append('--no-cache')
if args['build_parallel']:
docker_compose_build.append('--parallel')
<DeepExtract>
try:
subprocess.check_call(docker_compose_build + build_services)
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
</DeepExtract>
image_services = [name for (name, service) in compose['services'].items() if 'image' in service and name not in services_to_load]
if args.get('kibana_src'):
image_services.remove('kibana')
if image_services and (not args['skip_download']):
pull_params = ['pull']
if not sys.stdin.isatty():
pull_params.extend(['-q'])
<DeepExtract>
try:
subprocess.check_call(docker_compose_cmd + pull_params + image_services)
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
</DeepExtract>
if action in ['start']:
up_params = ['up', '-d']
if args['remove_orphans']:
up_params.append('--remove-orphans')
if action in ['build']:
up_params = ['build']
if not sys.stdin.isatty() and action not in ['build']:
up_params.extend(['--quiet-pull'])
<DeepExtract>
try:
subprocess.check_call(docker_compose_cmd + up_params)
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
</DeepExtract>
if args.get('shutdown_kibana', False):
print('Stopping Kibana after configuring APM integration.')
<DeepExtract>
try:
subprocess.check_call(docker_compose_cmd + ['stop', 'kibana'])
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
</DeepExtract>
|
def build_start_handler(action):
args = vars(self.args)
if 'version' not in args:
args['version'] = self.SUPPORTED_VERSIONS.get(args['stack-version'], args['stack-version'])
if parse_version(args['version']) >= parse_version('8.0'):
args['enable_apm_managed'] = True
if args.get('enable_apm_server') is False:
args['enable_apm_managed'] = False
if args.get('enable_apm_managed', False):
args['enable_apm_server'] = False
if not args.get('enable_kibana', True):
print('Kibana will be launched to configure APM integration and stopped after that.')
args['enable_kibana'] = True
args['shutdown_kibana'] = True
if args.get('apm_server_enable_tls'):
args['apm_server_url'] = args.get('apm_server_url', DEFAULT_APM_SERVER_URL).replace('http:', 'https:')
args['opbeans_apm_js_server_url'] = args['apm_server_url']
selections = set()
run_all = args.get('run_all')
all_opbeans = args.get('run_all_opbeans') or run_all
any_opbeans = all_opbeans or any((v and k.startswith('enable_opbeans_') for (k, v) in args.items()))
opbeans_sidecars = ['postgres', 'redis', 'opbeans-load-generator']
opbeans_2nds = ('opbeans-go01', 'opbeans-java01', 'opbeans-python01', 'opbeans-ruby01', 'opbeans-dotnet01', 'opbeans-node01')
for service in self.services:
service_enabled = args.get('enable_' + service.option_name())
is_opbeans_service = issubclass(service, OpbeansService) or service is OpbeansRum
is_opbeans_sidecar = service.name() in opbeans_sidecars
is_opbeans_2nd = service.name() in opbeans_2nds
is_obs = issubclass(service, BeatMixin)
if service_enabled or (all_opbeans and is_opbeans_service and (not is_opbeans_2nd)) or (any_opbeans and is_opbeans_sidecar and (not is_opbeans_2nd)) or (run_all and is_obs and (not is_opbeans_2nd)):
selections.add(service(**args))
if args.get('dyno'):
toxi = Toxi()
selections.add(toxi)
toxi.gen_ports(selections)
c = toxi.gen_config(selections)
this_dir = os.path.dirname(os.path.realpath(__file__))
toxi_cfg_path = os.path.join(this_dir, '../../docker/toxi/toxi.cfg')
with open(toxi_cfg_path, 'w') as fh_:
fh_.write(c)
dyno = Dyno()
selections.add(dyno)
statsd = StatsD()
selections.add(statsd)
selections.add(WaitService(set(selections), **args))
curl_image = 'docker.elastic.co/elasticsearch/elasticsearch:8.0.0-SNAPSHOT'
for (c, snapshot_repo) in enumerate(args.get('elasticsearch_snapshot_repo', [])):
if not snapshot_repo.startswith('http'):
print('skipping setup of non-http(s) repo: {}'.format(snapshot_repo))
continue
if not snapshot_repo.endswith('/'):
print("http(s) repo should probably end with '/': {}".format(snapshot_repo))
repo_label = 'repo{:d}'.format(c)
cmd = ['curl', '-X', 'PUT', '-H', 'Content-Type: application/json', '-d', json.dumps({'type': 'url', 'settings': {'url': snapshot_repo}}), 'http://admin:changeme@elasticsearch:9200/_snapshot/{:s}'.format(repo_label)]
selections.add(CommandService(cmd, service=repo_label, image=curl_image, depends_on=['elasticsearch']))
if args.get('enable_kibana') and (not args.get('enable_apm_managed', False)):
kibana_scheme = 'https' if args.get('kibana_enable_tls', False) else 'http'
kibana_url = kibana_scheme + '://admin:changeme@kibana:5601'
cmd = ['curl', '-X', 'POST', '-H', 'kbn-xsrf: 1', kibana_url + '/api/fleet/setup']
selections.add(CommandService(cmd, service='fleet_setup', image=curl_image, depends_on=['kibana']))
services_to_load = {}
for service in selections:
download_url = service.image_download_url()
if download_url:
services_to_load[list(service.render().keys())[0]] = download_url
if not args['skip_download'] and services_to_load:
load_images(set(services_to_load.values()), args['image_cache_dir'])
services = {}
for service in selections:
services.update(service.render())
for addl_services in args['with_services']:
with open(addl_services) as f:
services.update(json.load(f))
enabled_opbeans_services = [k for k in services.keys() if k.startswith('opbeans-') and k not in ('opbeans-rum', 'opbeans-load-generator')]
enabled_opbeans_services_str = ','.join(enabled_opbeans_services)
for s in enabled_opbeans_services:
if isinstance(services[s]['environment'], dict):
services[s]['environment']['OPBEANS_SERVICES'] = enabled_opbeans_services_str
else:
services[s]['environment'].append('OPBEANS_SERVICES=' + enabled_opbeans_services_str)
loadgen = services.get('opbeans-load-generator')
if loadgen is not None:
enabled_opbeans = any((re.search('OPBEANS_URLS=.+', v) for v in loadgen['environment']))
if args.get('disable_opbeans_load_generator') or not enabled_opbeans:
del services['opbeans-load-generator']
compose = dict(version='2.4', services=services, networks=dict(default={'name': 'apm-integration-testing'}), volumes=dict(esdata={'driver': 'local'}, pgdata={'driver': 'local'}))
docker_compose_path = args['docker_compose_path']
if args.get('output_format') == 'yaml':
try:
import yaml
except ImportError:
print("Failed to import 'yaml': pip install yaml, or specify an alternative --output-format.")
sys.exit(1)
yaml.dump(compose, docker_compose_path, explicit_start=True, default_flow_style=False, indent=2)
elif args.get('output_format') == 'json':
json.dump(compose, docker_compose_path, indent=2, sort_keys=True)
docker_compose_path.flush()
if hasattr(docker_compose_path, 'name') and os.path.isdir(os.path.dirname(docker_compose_path.name)):
docker_compose_path.close()
print('Starting/Building stack services..\n')
docker_compose_cmd = ['docker-compose', '-f', docker_compose_path.name]
if not sys.stdin.isatty() and action not in ['build']:
docker_compose_cmd.extend(['--no-ansi', '--log-level', 'ERROR'])
build_services = [name for (name, service) in compose['services'].items() if 'build' in service]
if build_services:
docker_compose_build = docker_compose_cmd + ['build']
if not args['skip_pull']:
docker_compose_build.append('--pull')
if args['force_build']:
docker_compose_build.append('--no-cache')
if args['build_parallel']:
docker_compose_build.append('--parallel')
try:
subprocess.check_call(docker_compose_build + build_services)
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
image_services = [name for (name, service) in compose['services'].items() if 'image' in service and name not in services_to_load]
if args.get('kibana_src'):
image_services.remove('kibana')
if image_services and (not args['skip_download']):
pull_params = ['pull']
if not sys.stdin.isatty():
pull_params.extend(['-q'])
try:
subprocess.check_call(docker_compose_cmd + pull_params + image_services)
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
if action in ['start']:
up_params = ['up', '-d']
if args['remove_orphans']:
up_params.append('--remove-orphans')
if action in ['build']:
up_params = ['build']
if not sys.stdin.isatty() and action not in ['build']:
up_params.extend(['--quiet-pull'])
try:
subprocess.check_call(docker_compose_cmd + up_params)
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
if args.get('shutdown_kibana', False):
print('Stopping Kibana after configuring APM integration.')
try:
subprocess.check_call(docker_compose_cmd + ['stop', 'kibana'])
except OSError as err:
print('ERROR: Docker Compose might be missing. See below for further details.\n')
raise OSError(err)
</DeepExtract>
|
apm-integration-testing
|
positive
|
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get_list`` to fetch only the objects requests in
a single query. This method only responds to HTTP GET.
For backward compatibility the method ``obj_get`` is used if
``obj_get_list`` is not implemented.
Should return a HttpResponse (200 OK).
"""
<DeepExtract>
if ['get'] is None:
['get'] = []
request_method = request.method.lower()
allows = ','.join([meth.upper() for meth in ['get']])
if request_method == 'options':
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if request_method not in ['get']:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
</DeepExtract>
<DeepExtract>
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if auth_result is not True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
</DeepExtract>
<DeepExtract>
identifier = self._meta.authentication.get_identifier(request)
throttle = self._meta.throttle.should_be_throttled(identifier)
if throttle:
response = http.HttpTooManyRequests()
if isinstance(throttle, int) and (not isinstance(throttle, bool)):
response['Retry-After'] = throttle
elif isinstance(throttle, datetime):
throttle_utc = make_naive_utc(throttle)
response['Retry-After'] = format_date_time(mktime(throttle_utc.timetuple()))
raise ImmediateHttpResponse(response=response)
</DeepExtract>
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
<DeepExtract>
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
base_bundle = Bundle(obj=obj, data=data, request=request, objects_saved=objects_saved, via_uri=via_uri)
</DeepExtract>
queryset = None
try:
queryset = self.obj_get_list(bundle=base_bundle).filter(**{self._meta.detail_uri_name + '__in': obj_identifiers})
except NotImplementedError:
pass
if queryset is not None:
objects_dict = {}
for obj in queryset:
objects_dict[str(getattr(obj, self._meta.detail_uri_name))] = obj
for identifier in obj_identifiers:
if identifier in objects_dict:
<DeepExtract>
if objects_dict[identifier] is None and self._meta.object_class:
objects_dict[identifier] = self._meta.object_class()
bundle = Bundle(obj=objects_dict[identifier], data=data, request=request, objects_saved=objects_saved, via_uri=via_uri)
</DeepExtract>
<DeepExtract>
data = bundle.data
api_name = self._meta.api_name
resource_name = self._meta.resource_name
for (field_name, field_object) in self.fields.items():
field_use_in = field_object.use_in
if callable(field_use_in):
if not field_use_in(bundle):
continue
elif field_use_in not in ['all', 'list' if True else 'detail']:
continue
if field_object.dehydrated_type == 'related':
field_object.api_name = api_name
field_object.resource_name = resource_name
data[field_name] = field_object.dehydrate(bundle, for_list=True)
method = getattr(self, 'dehydrate_%s' % field_name, None)
if method:
data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
bundle = bundle
</DeepExtract>
objects.append(bundle)
else:
not_found.append(identifier)
else:
for identifier in obj_identifiers:
try:
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
<DeepExtract>
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
bundle = Bundle(obj=obj, data=data, request=request, objects_saved=objects_saved, via_uri=via_uri)
</DeepExtract>
<DeepExtract>
data = bundle.data
api_name = self._meta.api_name
resource_name = self._meta.resource_name
for (field_name, field_object) in self.fields.items():
field_use_in = field_object.use_in
if callable(field_use_in):
if not field_use_in(bundle):
continue
elif field_use_in not in ['all', 'list' if True else 'detail']:
continue
if field_object.dehydrated_type == 'related':
field_object.api_name = api_name
field_object.resource_name = resource_name
data[field_name] = field_object.dehydrate(bundle, for_list=True)
method = getattr(self, 'dehydrate_%s' % field_name, None)
if method:
data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
bundle = bundle
</DeepExtract>
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {self._meta.collection_name: objects}
if len(not_found):
object_list['not_found'] = not_found
<DeepExtract>
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
</DeepExtract>
return self.create_response(request, object_list)
|
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get_list`` to fetch only the objects requests in
a single query. This method only responds to HTTP GET.
For backward compatibility the method ``obj_get`` is used if
``obj_get_list`` is not implemented.
Should return a HttpResponse (200 OK).
"""
if ['get'] is None:
['get'] = []
request_method = request.method.lower()
allows = ','.join([meth.upper() for meth in ['get']])
if request_method == 'options':
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if request_method not in ['get']:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if auth_result is not True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
identifier = self._meta.authentication.get_identifier(request)
throttle = self._meta.throttle.should_be_throttled(identifier)
if throttle:
response = http.HttpTooManyRequests()
if isinstance(throttle, int) and (not isinstance(throttle, bool)):
response['Retry-After'] = throttle
elif isinstance(throttle, datetime):
throttle_utc = make_naive_utc(throttle)
response['Retry-After'] = format_date_time(mktime(throttle_utc.timetuple()))
raise ImmediateHttpResponse(response=response)
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
base_bundle = Bundle(obj=obj, data=data, request=request, objects_saved=objects_saved, via_uri=via_uri)
queryset = None
try:
queryset = self.obj_get_list(bundle=base_bundle).filter(**{self._meta.detail_uri_name + '__in': obj_identifiers})
except NotImplementedError:
pass
if queryset is not None:
objects_dict = {}
for obj in queryset:
objects_dict[str(getattr(obj, self._meta.detail_uri_name))] = obj
for identifier in obj_identifiers:
if identifier in objects_dict:
if objects_dict[identifier] is None and self._meta.object_class:
objects_dict[identifier] = self._meta.object_class()
bundle = Bundle(obj=objects_dict[identifier], data=data, request=request, objects_saved=objects_saved, via_uri=via_uri)
data = bundle.data
api_name = self._meta.api_name
resource_name = self._meta.resource_name
for (field_name, field_object) in self.fields.items():
field_use_in = field_object.use_in
if callable(field_use_in):
if not field_use_in(bundle):
continue
elif field_use_in not in ['all', 'list' if True else 'detail']:
continue
if field_object.dehydrated_type == 'related':
field_object.api_name = api_name
field_object.resource_name = resource_name
data[field_name] = field_object.dehydrate(bundle, for_list=True)
method = getattr(self, 'dehydrate_%s' % field_name, None)
if method:
data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
bundle = bundle
objects.append(bundle)
else:
not_found.append(identifier)
else:
for identifier in obj_identifiers:
try:
raise NotImplementedError()
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
bundle = Bundle(obj=obj, data=data, request=request, objects_saved=objects_saved, via_uri=via_uri)
data = bundle.data
api_name = self._meta.api_name
resource_name = self._meta.resource_name
for (field_name, field_object) in self.fields.items():
field_use_in = field_object.use_in
if callable(field_use_in):
if not field_use_in(bundle):
continue
elif field_use_in not in ['all', 'list' if True else 'detail']:
continue
if field_object.dehydrated_type == 'related':
field_object.api_name = api_name
field_object.resource_name = resource_name
data[field_name] = field_object.dehydrate(bundle, for_list=True)
method = getattr(self, 'dehydrate_%s' % field_name, None)
if method:
data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
bundle = bundle
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {self._meta.collection_name: objects}
if len(not_found):
object_list['not_found'] = not_found
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
return self.create_response(request, object_list)
|
django-tastypie
|
positive
|
def adjust_transform_for_image(transform, image, relative_translation):
"""
Adjust a transformation for a specific image.
The translation of the matrix will be scaled with the size of the image.
The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.
"""
(height, width, channels) = image.shape
result = transform
if relative_translation:
result[0:2, 2] *= [width, height]
<DeepExtract>
(0.5 * width, 0.5 * height) = np.array((0.5 * width, 0.5 * height))
result = np.linalg.multi_dot([np.array([[1, 0, (0.5 * width, 0.5 * height)[0]], [0, 1, (0.5 * width, 0.5 * height)[1]], [0, 0, 1]]), transform, np.array([[1, 0, -(0.5 * width, 0.5 * height)[0]], [0, 1, -(0.5 * width, 0.5 * height)[1]], [0, 0, 1]])])
</DeepExtract>
return result
|
def adjust_transform_for_image(transform, image, relative_translation):
"""
Adjust a transformation for a specific image.
The translation of the matrix will be scaled with the size of the image.
The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.
"""
(height, width, channels) = image.shape
result = transform
if relative_translation:
result[0:2, 2] *= [width, height]
(0.5 * width, 0.5 * height) = np.array((0.5 * width, 0.5 * height))
result = np.linalg.multi_dot([np.array([[1, 0, (0.5 * width, 0.5 * height)[0]], [0, 1, (0.5 * width, 0.5 * height)[1]], [0, 0, 1]]), transform, np.array([[1, 0, -(0.5 * width, 0.5 * height)[0]], [0, 1, -(0.5 * width, 0.5 * height)[1]], [0, 0, 1]])])
return result
|
ensembleObjectDetection
|
positive
|
def convert_drs_lambda(contextdrs, expression):
variable = expression.variable
term = expression.term
<DeepExtract>
if isinstance(term, ApplicationExpression):
drs_expr = convert_drs_application(DRS([], []), term)
elif isinstance(term, EqualityExpression):
drs_expr = convert_drs_equality(DRS([], []), term)
elif isinstance(term, AndExpression):
drs_expr = convert_drs_and(DRS([], []), term)
elif isinstance(term, OrExpression):
drs_expr = convert_drs_or(DRS([], []), term)
elif isinstance(term, ImpExpression):
drs_expr = convert_drs_imp(DRS([], []), term)
elif isinstance(term, NegatedExpression):
drs_expr = convert_drs_not(DRS([], []), term)
elif isinstance(term, ExistsExpression):
drs_expr = convert_drs_exists(DRS([], []), term)
elif isinstance(term, AllExpression):
drs_expr = convert_drs_all(DRS([], []), term)
elif isinstance(term, LambdaExpression):
drs_expr = convert_drs_lambda(DRS([], []), term)
elif isinstance(term, AbstractVariableExpression):
variable_str = str(term.variable).lower()
if variable_str[0] == '_':
variable = Variable(variable_str[1:])
else:
variable = Variable(variable_str)
drs_expr = DrtAbstractVariableExpression(variable)
else:
drs_expr = term
term = drs_expr
</DeepExtract>
drs_expr = DrtLambdaExpression(variable, term)
return drs_expr
|
def convert_drs_lambda(contextdrs, expression):
variable = expression.variable
term = expression.term
if isinstance(term, ApplicationExpression):
drs_expr = convert_drs_application(DRS([], []), term)
elif isinstance(term, EqualityExpression):
drs_expr = convert_drs_equality(DRS([], []), term)
elif isinstance(term, AndExpression):
drs_expr = convert_drs_and(DRS([], []), term)
elif isinstance(term, OrExpression):
drs_expr = convert_drs_or(DRS([], []), term)
elif isinstance(term, ImpExpression):
drs_expr = convert_drs_imp(DRS([], []), term)
elif isinstance(term, NegatedExpression):
drs_expr = convert_drs_not(DRS([], []), term)
elif isinstance(term, ExistsExpression):
drs_expr = convert_drs_exists(DRS([], []), term)
elif isinstance(term, AllExpression):
drs_expr = convert_drs_all(DRS([], []), term)
elif isinstance(term, LambdaExpression):
drs_expr = convert_drs_lambda(DRS([], []), term)
elif isinstance(term, AbstractVariableExpression):
variable_str = str(term.variable).lower()
if variable_str[0] == '_':
variable = Variable(variable_str[1:])
else:
variable = Variable(variable_str)
drs_expr = DrtAbstractVariableExpression(variable)
else:
drs_expr = term
term = drs_expr
drs_expr = DrtLambdaExpression(variable, term)
return drs_expr
|
ccg2lambda
|
positive
|
def save_model(self, epoch):
<DeepExtract>
assert self.logger is not None, 'Training'
</DeepExtract>
<DeepExtract>
self._assert_training()
save_path = os.path.join(self.logger.checkpoint_dir, self.save_filename('renew', epoch))
torch.save(self.model.renew.cpu().state_dict(), save_path)
self.model.renew.cuda()
</DeepExtract>
<DeepExtract>
self._assert_training()
save_path = os.path.join(self.logger.checkpoint_dir, self.save_filename('srconv', epoch))
torch.save(self.model.srconv.cpu().state_dict(), save_path)
self.model.srconv.cuda()
</DeepExtract>
<DeepExtract>
self._assert_training()
save_path = os.path.join(self.logger.checkpoint_dir, self.save_filename('upsample', epoch))
torch.save(self.model.upsample.cpu().state_dict(), save_path)
self.model.upsample.cuda()
</DeepExtract>
|
def save_model(self, epoch):
assert self.logger is not None, 'Training'
self._assert_training()
save_path = os.path.join(self.logger.checkpoint_dir, self.save_filename('renew', epoch))
torch.save(self.model.renew.cpu().state_dict(), save_path)
self.model.renew.cuda()
self._assert_training()
save_path = os.path.join(self.logger.checkpoint_dir, self.save_filename('srconv', epoch))
torch.save(self.model.srconv.cpu().state_dict(), save_path)
self.model.srconv.cuda()
self._assert_training()
save_path = os.path.join(self.logger.checkpoint_dir, self.save_filename('upsample', epoch))
torch.save(self.model.upsample.cpu().state_dict(), save_path)
self.model.upsample.cuda()
</DeepExtract>
|
Component-Divide-and-Conquer-for-Real-World-Image-Super-Resolution
|
positive
|
def updateMusicBrainzArtists(self, verbose=False):
paths = config.config['music_paths']
for path in paths:
if not os.path.isdir(path):
continue
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
dirnames.sort()
if '.artist_mbid' in filenames:
if verbose:
print(f'Found artist dir at {dirpath}')
image_filenames = [f for f in ('artist.jpg', 'artist.png', 'artist.webp') if f in filenames]
image_filename = image_filenames[0] if image_filenames else None
<DeepExtract>
mbidfile = os.path.join(dirpath, '.artist_mbid')
mbids = [x.strip('\n') for x in open(mbidfile).readlines()]
dirname = os.path.normpath(dirpath)
path_id = MusicBrainzDatabase.get_artist_path_id(dirname)
connection = MusicDatabase.getCursor()
if path_id:
MusicBrainzDatabase.set_artist_path_image_filename(path_id, image_filename)
else:
path_id = MusicBrainzDatabase.add_artist_path(dirname, image_filename, connection=connection)
if not path_id:
return
if verbose:
print(f'Adding artist path {dirname} ({path_id}) for {mbids}...')
if len(mbids) == 1:
artist_id = MusicBrainzDatabase.get_artist_id(mbids[0])
if artist_id:
MusicBrainzDatabase.set_artist_path(artist_id, path_id, connection=connection)
artist_credit_ids = MusicBrainzDatabase.get_artist_credit_ids(mbids)
if artist_credit_ids:
MusicBrainzDatabase.set_artist_credit_path(artist_credit_ids, path_id, connection=connection)
</DeepExtract>
MusicDatabase.commit()
for excludeDir in self.excludeDirectories:
try:
dirnames.remove(excludeDir)
except ValueError:
pass
|
def updateMusicBrainzArtists(self, verbose=False):
paths = config.config['music_paths']
for path in paths:
if not os.path.isdir(path):
continue
for (dirpath, dirnames, filenames) in os.walk(path, topdown=True):
dirnames.sort()
if '.artist_mbid' in filenames:
if verbose:
print(f'Found artist dir at {dirpath}')
image_filenames = [f for f in ('artist.jpg', 'artist.png', 'artist.webp') if f in filenames]
image_filename = image_filenames[0] if image_filenames else None
mbidfile = os.path.join(dirpath, '.artist_mbid')
mbids = [x.strip('\n') for x in open(mbidfile).readlines()]
dirname = os.path.normpath(dirpath)
path_id = MusicBrainzDatabase.get_artist_path_id(dirname)
connection = MusicDatabase.getCursor()
if path_id:
MusicBrainzDatabase.set_artist_path_image_filename(path_id, image_filename)
else:
path_id = MusicBrainzDatabase.add_artist_path(dirname, image_filename, connection=connection)
if not path_id:
return
if verbose:
print(f'Adding artist path {dirname} ({path_id}) for {mbids}...')
if len(mbids) == 1:
artist_id = MusicBrainzDatabase.get_artist_id(mbids[0])
if artist_id:
MusicBrainzDatabase.set_artist_path(artist_id, path_id, connection=connection)
artist_credit_ids = MusicBrainzDatabase.get_artist_credit_ids(mbids)
if artist_credit_ids:
MusicBrainzDatabase.set_artist_credit_path(artist_credit_ids, path_id, connection=connection)
MusicDatabase.commit()
for excludeDir in self.excludeDirectories:
try:
dirnames.remove(excludeDir)
except ValueError:
pass
|
bard
|
positive
|
def generate_graph_data():
import bootstrapvz.common.tasks
import bootstrapvz.providers
import bootstrapvz.plugins
from bootstrapvz.base.tasklist import get_all_tasks
tasks = get_all_tasks([bootstrapvz.common.tasks, bootstrapvz.providers, bootstrapvz.plugins])
def distinct(seq):
seen = set()
return [x for x in seq if x not in seen and (not seen.add(x))]
<DeepExtract>
seen = set()
modules = [x for x in [task.__module__ for task in tasks] if x not in seen and (not seen.add(x))]
</DeepExtract>
task_links = []
task_links.extend([{'source': task, 'target': succ, 'definer': task} for task in tasks for succ in task.successors])
task_links.extend([{'source': pre, 'target': task, 'definer': task} for task in tasks for pre in task.predecessors])
def mk_phase(phase):
return {'name': phase.name, 'description': phase.description}
def mk_module(module):
return {'name': module}
from bootstrapvz.common import phases
def mk_node(task):
return {'name': task.__name__, 'module': modules.index(task.__module__), 'phase': (i for (i, phase) in enumerate(phases.order) if phase is task.phase).next()}
def mk_link(link):
for key in ['source', 'target', 'definer']:
link[key] = tasks.index(link[key])
return link
return {'phases': map(mk_phase, phases.order), 'modules': map(mk_module, modules), 'nodes': map(mk_node, tasks), 'links': map(mk_link, task_links)}
|
def generate_graph_data():
import bootstrapvz.common.tasks
import bootstrapvz.providers
import bootstrapvz.plugins
from bootstrapvz.base.tasklist import get_all_tasks
tasks = get_all_tasks([bootstrapvz.common.tasks, bootstrapvz.providers, bootstrapvz.plugins])
def distinct(seq):
seen = set()
return [x for x in seq if x not in seen and (not seen.add(x))]
seen = set()
modules = [x for x in [task.__module__ for task in tasks] if x not in seen and (not seen.add(x))]
task_links = []
task_links.extend([{'source': task, 'target': succ, 'definer': task} for task in tasks for succ in task.successors])
task_links.extend([{'source': pre, 'target': task, 'definer': task} for task in tasks for pre in task.predecessors])
def mk_phase(phase):
return {'name': phase.name, 'description': phase.description}
def mk_module(module):
return {'name': module}
from bootstrapvz.common import phases
def mk_node(task):
return {'name': task.__name__, 'module': modules.index(task.__module__), 'phase': (i for (i, phase) in enumerate(phases.order) if phase is task.phase).next()}
def mk_link(link):
for key in ['source', 'target', 'definer']:
link[key] = tasks.index(link[key])
return link
return {'phases': map(mk_phase, phases.order), 'modules': map(mk_module, modules), 'nodes': map(mk_node, tasks), 'links': map(mk_link, task_links)}
|
bootstrap-vz
|
positive
|
def count():
n = len(indices)
if n < 2:
return 1
<DeepExtract>
def count():
n = len(indices[1:])
if n < 2:
result = 1
result = wrapped_count(indices[1:][1:])
for (j, k) in valid_bonds(seq, indices[1:]):
(I1, I2) = partition(indices[1:], j, k)
result += wrapped_count(I1) * wrapped_count(I2)
result = result
key = str(indices[1:])
if not key in cache:
cache[key] = count()
result = cache[key]
</DeepExtract>
for (j, k) in valid_bonds(seq, indices):
(I1, I2) = partition(indices, j, k)
result += wrapped_count(I1) * wrapped_count(I2)
return result
|
def count():
n = len(indices)
if n < 2:
return 1
def count():
n = len(indices[1:])
if n < 2:
result = 1
result = wrapped_count(indices[1:][1:])
for (j, k) in valid_bonds(seq, indices[1:]):
(I1, I2) = partition(indices[1:], j, k)
result += wrapped_count(I1) * wrapped_count(I2)
result = result
key = str(indices[1:])
if not key in cache:
cache[key] = count()
result = cache[key]
for (j, k) in valid_bonds(seq, indices):
(I1, I2) = partition(indices, j, k)
result += wrapped_count(I1) * wrapped_count(I2)
return result
|
bioinformatics
|
positive
|
def buffer_update():
"""Updates our buffer with new lines."""
global pattern_tmpl, matched_lines, pattern, count, hilight, invert, exact
time_grep = now()
<DeepExtract>
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', title or 'grep output buffer')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
elif title:
weechat.buffer_set(buffer, 'title', title)
buffer = buffer
</DeepExtract>
if get_config_boolean('clear_buffer'):
weechat.buffer_clear(buffer)
matched_lines.strip_separator()
len_total_lines = len(matched_lines)
<DeepExtract>
value = weechat.config_get_plugin('max_lines')
try:
max_lines = int(value)
except ValueError:
if value == '' and allow_empty_string:
max_lines = value
default = settings['max_lines']
error("Error while fetching config '%s'. Using default value '%s'." % ('max_lines', default))
error("'%s' is not a number." % value)
max_lines = int(default)
</DeepExtract>
if not count and len_total_lines > max_lines:
weechat.buffer_clear(buffer)
def _make_summary(log, lines, note):
return '%s matches "%s%s%s"%s in %s%s%s%s' % (lines.matches_count, color_summary, pattern_tmpl, color_info, invert and ' (inverted)' or '', color_summary, log, color_reset, note)
if count:
make_summary = lambda log, lines: _make_summary(log, lines, ' (not shown)')
else:
def make_summary(log, lines):
if lines.stripped_lines:
if lines:
note = ' (last %s lines shown)' % len(lines)
else:
note = ' (not shown)'
else:
note = ''
return _make_summary(log, lines, note)
global weechat_format
if hilight:
format_line = lambda s: '%s %s %s' % split_line(s)
else:
def format_line(s):
global nick_dict, weechat_format
<DeepExtract>
global weechat_format
if weechat_format and s.count('\t') >= 2:
(date, nick, msg) = s.split('\t', 2)
else:
weechat_format = False
(date, nick, msg) = ('', '', s)
if '\t' in msg:
msg = msg.replace('\t', ' ')
(date, nick, msg) = (date, nick, msg)
</DeepExtract>
if weechat_format:
try:
nick = nick_dict[nick]
except KeyError:
<DeepExtract>
if not nick:
nick_c = ''
wcolor = weechat.color
config_string = lambda s: weechat.config_string(weechat.config_get(s))
config_int = lambda s: weechat.config_integer(weechat.config_get(s))
prefix = config_string('irc.look.nick_prefix')
suffix = config_string('irc.look.nick_suffix')
prefix_c = suffix_c = wcolor(config_string('weechat.color.chat_delimiters'))
if nick[0] == prefix:
nick = nick[1:]
else:
prefix = prefix_c = ''
if nick[-1] == suffix:
nick = nick[:-1]
suffix = wcolor(color_delimiter) + suffix
else:
suffix = suffix_c = ''
modes = '@!+%'
if nick[0] in modes:
(mode, nick) = (nick[0], nick[1:])
mode_color = wcolor(config_string('weechat.color.nicklist_prefix%d' % (modes.find(mode) + 1)))
else:
mode = mode_color = ''
nick_color = ''
if nick:
nick_color = weechat.info_get('irc_nick_color', nick)
if not nick_color:
color_nicks_number = config_int('weechat.look.color_nicks_number')
idx = sum(map(ord, nick)) % color_nicks_number + 1
nick_color = wcolor(config_string('weechat.color.chat_nick_color%02d' % idx))
nick_c = ''.join((prefix_c, prefix, mode_color, mode, nick_color, nick, suffix_c, suffix))
</DeepExtract>
nick_dict[nick] = nick_c
nick = nick_c
return '%s%s %s%s %s' % (color_date, date, nick, color_reset, msg)
else:
return msg
prnt(buffer, '\n')
<DeepExtract>
if buffer is None:
buffer = buffer_create()
say('%s%s' % (color_info, 'Search for "%s%s%s"%s in %s%s%s.' % (color_summary, pattern_tmpl, color_info, invert and ' (inverted)' or '', color_summary, matched_lines, color_reset)), buffer)
if display and get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
</DeepExtract>
if matched_lines.get_matches_count():
if count:
matched_lines_items = matched_lines.items_count()
else:
matched_lines_items = matched_lines.items()
matched_lines.get_last_lines(max_lines)
for (log, lines) in matched_lines_items:
if lines.matches_count:
if not count:
weechat_format = True
if exact:
lines.onlyUniq()
for line in lines:
if line == linesList._sep:
prnt(buffer, context_sep)
else:
if '\x00' in line:
<DeepExtract>
prnt(buffer, '%s%s %s' % (weechat.prefix('error'), script_nick, "Found garbage in log '%s', maybe it's corrupted" % log))
if weechat.config_get_plugin('debug'):
import traceback
if traceback.sys.exc_type:
trace = traceback.format_exc()
prnt('', trace)
</DeepExtract>
line = line.replace('\x00', '')
prnt_date_tags(buffer, 0, 'no_highlight', format_line(line))
if count or get_config_boolean('show_summary'):
<DeepExtract>
if lines.stripped_lines:
if lines:
note = ' (last %s lines shown)' % len(lines)
else:
note = ' (not shown)'
else:
note = ''
summary = _make_summary(log, lines, note)
</DeepExtract>
<DeepExtract>
if buffer is None:
buffer = buffer_create()
say('%s%s' % (color_info, summary), buffer)
if display and get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
</DeepExtract>
if not count and lines:
prnt(buffer, '\n')
else:
<DeepExtract>
if buffer is None:
buffer = buffer_create()
say('%s%s' % (color_info, 'No matches found.'), buffer)
if display and get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
</DeepExtract>
global time_start
time_end = now()
time_total = time_end - time_start
time_grep_pct = (time_grep - time_start) / time_total * 100
if not count and len_total_lines > max_lines:
note = ' (last %s lines shown)' % len(matched_lines)
else:
note = ''
title = '\'q\': close buffer | Search in %s%s%s %s matches%s | pattern "%s%s%s"%s %s | %.4f seconds (%.2f%%)' % (color_title, matched_lines, color_reset, matched_lines.get_matches_count(), note, color_title, pattern_tmpl, color_reset, invert and ' (inverted)' or '', format_options(), time_total, time_grep_pct)
weechat.buffer_set(buffer, 'title', title)
if get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
del matched_lines
|
def buffer_update():
"""Updates our buffer with new lines."""
global pattern_tmpl, matched_lines, pattern, count, hilight, invert, exact
time_grep = now()
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', title or 'grep output buffer')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
elif title:
weechat.buffer_set(buffer, 'title', title)
buffer = buffer
if get_config_boolean('clear_buffer'):
weechat.buffer_clear(buffer)
matched_lines.strip_separator()
len_total_lines = len(matched_lines)
value = weechat.config_get_plugin('max_lines')
try:
max_lines = int(value)
except ValueError:
if value == '' and allow_empty_string:
max_lines = value
default = settings['max_lines']
error("Error while fetching config '%s'. Using default value '%s'." % ('max_lines', default))
error("'%s' is not a number." % value)
max_lines = int(default)
if not count and len_total_lines > max_lines:
weechat.buffer_clear(buffer)
def _make_summary(log, lines, note):
return '%s matches "%s%s%s"%s in %s%s%s%s' % (lines.matches_count, color_summary, pattern_tmpl, color_info, invert and ' (inverted)' or '', color_summary, log, color_reset, note)
if count:
make_summary = lambda log, lines: _make_summary(log, lines, ' (not shown)')
else:
def make_summary(log, lines):
if lines.stripped_lines:
if lines:
note = ' (last %s lines shown)' % len(lines)
else:
note = ' (not shown)'
else:
note = ''
return _make_summary(log, lines, note)
global weechat_format
if hilight:
format_line = lambda s: '%s %s %s' % split_line(s)
else:
def format_line(s):
global nick_dict, weechat_format
global weechat_format
if weechat_format and s.count('\t') >= 2:
(date, nick, msg) = s.split('\t', 2)
else:
weechat_format = False
(date, nick, msg) = ('', '', s)
if '\t' in msg:
msg = msg.replace('\t', ' ')
(date, nick, msg) = (date, nick, msg)
if weechat_format:
try:
nick = nick_dict[nick]
except KeyError:
if not nick:
nick_c = ''
wcolor = weechat.color
config_string = lambda s: weechat.config_string(weechat.config_get(s))
config_int = lambda s: weechat.config_integer(weechat.config_get(s))
prefix = config_string('irc.look.nick_prefix')
suffix = config_string('irc.look.nick_suffix')
prefix_c = suffix_c = wcolor(config_string('weechat.color.chat_delimiters'))
if nick[0] == prefix:
nick = nick[1:]
else:
prefix = prefix_c = ''
if nick[-1] == suffix:
nick = nick[:-1]
suffix = wcolor(color_delimiter) + suffix
else:
suffix = suffix_c = ''
modes = '@!+%'
if nick[0] in modes:
(mode, nick) = (nick[0], nick[1:])
mode_color = wcolor(config_string('weechat.color.nicklist_prefix%d' % (modes.find(mode) + 1)))
else:
mode = mode_color = ''
nick_color = ''
if nick:
nick_color = weechat.info_get('irc_nick_color', nick)
if not nick_color:
color_nicks_number = config_int('weechat.look.color_nicks_number')
idx = sum(map(ord, nick)) % color_nicks_number + 1
nick_color = wcolor(config_string('weechat.color.chat_nick_color%02d' % idx))
nick_c = ''.join((prefix_c, prefix, mode_color, mode, nick_color, nick, suffix_c, suffix))
nick_dict[nick] = nick_c
nick = nick_c
return '%s%s %s%s %s' % (color_date, date, nick, color_reset, msg)
else:
return msg
prnt(buffer, '\n')
if buffer is None:
buffer = buffer_create()
say('%s%s' % (color_info, 'Search for "%s%s%s"%s in %s%s%s.' % (color_summary, pattern_tmpl, color_info, invert and ' (inverted)' or '', color_summary, matched_lines, color_reset)), buffer)
if display and get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
if matched_lines.get_matches_count():
if count:
matched_lines_items = matched_lines.items_count()
else:
matched_lines_items = matched_lines.items()
matched_lines.get_last_lines(max_lines)
for (log, lines) in matched_lines_items:
if lines.matches_count:
if not count:
weechat_format = True
if exact:
lines.onlyUniq()
for line in lines:
if line == linesList._sep:
prnt(buffer, context_sep)
else:
if '\x00' in line:
prnt(buffer, '%s%s %s' % (weechat.prefix('error'), script_nick, "Found garbage in log '%s', maybe it's corrupted" % log))
if weechat.config_get_plugin('debug'):
import traceback
if traceback.sys.exc_type:
trace = traceback.format_exc()
prnt('', trace)
line = line.replace('\x00', '')
prnt_date_tags(buffer, 0, 'no_highlight', format_line(line))
if count or get_config_boolean('show_summary'):
if lines.stripped_lines:
if lines:
note = ' (last %s lines shown)' % len(lines)
else:
note = ' (not shown)'
else:
note = ''
summary = _make_summary(log, lines, note)
if buffer is None:
buffer = buffer_create()
say('%s%s' % (color_info, summary), buffer)
if display and get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
if not count and lines:
prnt(buffer, '\n')
else:
if buffer is None:
buffer = buffer_create()
say('%s%s' % (color_info, 'No matches found.'), buffer)
if display and get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
global time_start
time_end = now()
time_total = time_end - time_start
time_grep_pct = (time_grep - time_start) / time_total * 100
if not count and len_total_lines > max_lines:
note = ' (last %s lines shown)' % len(matched_lines)
else:
note = ''
title = '\'q\': close buffer | Search in %s%s%s %s matches%s | pattern "%s%s%s"%s %s | %.4f seconds (%.2f%%)' % (color_title, matched_lines, color_reset, matched_lines.get_matches_count(), note, color_title, pattern_tmpl, color_reset, invert and ' (inverted)' or '', format_options(), time_total, time_grep_pct)
weechat.buffer_set(buffer, 'title', title)
if get_config_boolean('go_to_buffer'):
weechat.buffer_set(buffer, 'display', '1')
del matched_lines
|
dotfiles
|
positive
|
def testModule_aliasInScope(self):
"""Tests that goog.module style aliases are supported."""
input_lines = ["goog.module('test.module');", "var AliasedClass = goog.require('goog.AliasedClass');", 'goog.scope(function() {', 'var x = new AliasedClass();', '});']
<DeepExtract>
(_, namespaces_info) = self._GetStartTokenAndNamespacesInfoForScript(input_lines, ['goog'])
namespaces_info = namespaces_info
</DeepExtract>
<DeepExtract>
line_text = "goog.require('" + 'goog.AliasedClass' + "');\n"
namespaceToken = testutil.TokenizeSource([line_text])
</DeepExtract>
self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken), 'AliasedClass should be marked as used')
|
def testModule_aliasInScope(self):
"""Tests that goog.module style aliases are supported."""
input_lines = ["goog.module('test.module');", "var AliasedClass = goog.require('goog.AliasedClass');", 'goog.scope(function() {', 'var x = new AliasedClass();', '});']
(_, namespaces_info) = self._GetStartTokenAndNamespacesInfoForScript(input_lines, ['goog'])
namespaces_info = namespaces_info
line_text = "goog.require('" + 'goog.AliasedClass' + "');\n"
namespaceToken = testutil.TokenizeSource([line_text])
self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken), 'AliasedClass should be marked as used')
|
closure-linter
|
positive
|
def convert_openioc_csv_to_openioc_csv_model(openioc_csv: str) -> OpenIOCCSV:
"""
Convert OpenIOC CSV into an OpenIOC CSV model.
:param openioc_csv: OpenIOC CSV.
:type openioc_csv: str
:return: OpenIOC CSV model.
:rtype: OpenIOCCSV
"""
<DeepExtract>
file_buffer = StringIO(openioc_csv)
csv_reader = csv.reader(file_buffer, delimiter=',', quotechar="'")
parsed_rows = []
next(csv_reader, None)
for row in csv_reader:
if not row:
continue
uid = row[_CSV_INDEX_UID]
publication = row[_CSV_INDEX_PUBLICATION]
indicator = row[_CSV_INDEX_INDICATOR]
detection_date = row[_CSV_INDEX_DETECTION_DATE]
indicator_type = row[_CSV_INDEX_INDICATOR_TYPE]
parsed_row = {'id': uid, 'publication': publication, 'indicator': indicator, 'detection_date': datetime.strptime(detection_date, _CSV_DETECTION_DATE_FORMAT), 'indicator_type': indicator_type}
parsed_rows.append(parsed_row)
openioc_data = {'indicators': parsed_rows}
</DeepExtract>
return OpenIOCCSV.parse_obj(openioc_data)
|
def convert_openioc_csv_to_openioc_csv_model(openioc_csv: str) -> OpenIOCCSV:
"""
Convert OpenIOC CSV into an OpenIOC CSV model.
:param openioc_csv: OpenIOC CSV.
:type openioc_csv: str
:return: OpenIOC CSV model.
:rtype: OpenIOCCSV
"""
file_buffer = StringIO(openioc_csv)
csv_reader = csv.reader(file_buffer, delimiter=',', quotechar="'")
parsed_rows = []
next(csv_reader, None)
for row in csv_reader:
if not row:
continue
uid = row[_CSV_INDEX_UID]
publication = row[_CSV_INDEX_PUBLICATION]
indicator = row[_CSV_INDEX_INDICATOR]
detection_date = row[_CSV_INDEX_DETECTION_DATE]
indicator_type = row[_CSV_INDEX_INDICATOR_TYPE]
parsed_row = {'id': uid, 'publication': publication, 'indicator': indicator, 'detection_date': datetime.strptime(detection_date, _CSV_DETECTION_DATE_FORMAT), 'indicator_type': indicator_type}
parsed_rows.append(parsed_row)
openioc_data = {'indicators': parsed_rows}
return OpenIOCCSV.parse_obj(openioc_data)
|
connectors
|
positive
|
def randomize_state_deneb(spec, state, stats, exit_fraction=0.1, slash_fraction=0.1):
<DeepExtract>
scenario_state = randomize_state_bellatrix(spec, state, stats, exit_fraction=exit_fraction, slash_fraction=slash_fraction)
scenario_state = scenario_state
</DeepExtract>
return scenario_state
|
def randomize_state_deneb(spec, state, stats, exit_fraction=0.1, slash_fraction=0.1):
scenario_state = randomize_state_bellatrix(spec, state, stats, exit_fraction=exit_fraction, slash_fraction=slash_fraction)
scenario_state = scenario_state
return scenario_state
|
eth2.0-specs
|
positive
|
def main(args):
if args['serve']:
threading.current_thread().name = 'main'
name = os.environ['NAME']
global node
node = Node(address=(name, PORT))
<DeepExtract>
global node
coinbase = prepare_coinbase(lookup_public_key('alice'), tx_id='abc123')
unmined_block = Block(txns=[coinbase], prev_id=None, nonce=0)
mined_block = mine_block(unmined_block)
node.blocks.append(mined_block)
node.update_utxo_set(coinbase)
</DeepExtract>
server_thread = threading.Thread(target=serve, name='server')
server_thread.start()
peers = [(p, PORT) for p in os.environ['PEERS'].split(',')]
for peer in peers:
node.connect(peer)
<DeepExtract>
miner_public_key = lookup_private_key(name).get_verifying_key()
</DeepExtract>
miner_thread = threading.Thread(target=mine_forever, args=[miner_public_key], name='miner')
miner_thread.start()
elif args['ping']:
<DeepExtract>
i = int(args['--node'][-1])
port = PORT + i
address = ('localhost', port)
</DeepExtract>
<DeepExtract>
message = prepare_message('ping', '')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if response:
return deserialize(s.recv(5000))
</DeepExtract>
elif args['balance']:
<DeepExtract>
public_key = lookup_private_key(args['<name>']).get_verifying_key()
</DeepExtract>
<DeepExtract>
i = int(args['--node'][-1])
port = PORT + i
address = ('localhost', port)
</DeepExtract>
<DeepExtract>
message = prepare_message('balance', public_key)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if True:
True = deserialize(s.recv(5000))
</DeepExtract>
print(response['data'])
elif args['tx']:
<DeepExtract>
exponent = {'alice': 1, 'bob': 2, 'node0': 3, 'node1': 4, 'node2': 5}[args['<from>']]
sender_private_key = SigningKey.from_secret_exponent(exponent, curve=SECP256k1)
</DeepExtract>
sender_public_key = sender_private_key.get_verifying_key()
<DeepExtract>
exponent = {'alice': 1, 'bob': 2, 'node0': 3, 'node1': 4, 'node2': 5}[args['<to>']]
recipient_private_key = SigningKey.from_secret_exponent(exponent, curve=SECP256k1)
</DeepExtract>
recipient_public_key = recipient_private_key.get_verifying_key()
amount = int(args['<amount>'])
<DeepExtract>
i = int(args['--node'][-1])
port = PORT + i
address = ('localhost', port)
</DeepExtract>
<DeepExtract>
message = prepare_message('utxos', sender_public_key)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if True:
True = deserialize(s.recv(5000))
</DeepExtract>
utxos = response['data']
<DeepExtract>
sender_public_key = sender_private_key.get_verifying_key()
tx_ins = []
tx_in_sum = 0
for tx_out in utxos:
tx_ins.append(TxIn(tx_id=tx_out.tx_id, index=tx_out.index, signature=None))
tx_in_sum += tx_out.amount
if tx_in_sum > amount:
break
assert tx_in_sum >= amount
tx_id = uuid.uuid4()
change = tx_in_sum - amount
tx_outs = [TxOut(tx_id=tx_id, index=0, amount=amount, public_key=recipient_public_key), TxOut(tx_id=tx_id, index=1, amount=change, public_key=sender_public_key)]
tx = Tx(id=tx_id, tx_ins=tx_ins, tx_outs=tx_outs)
for i in range(len(tx.tx_ins)):
tx.sign_input(i, sender_private_key)
tx = tx
</DeepExtract>
<DeepExtract>
message = prepare_message('tx', tx)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if response:
return deserialize(s.recv(5000))
</DeepExtract>
else:
print('Invalid command')
|
def main(args):
if args['serve']:
threading.current_thread().name = 'main'
name = os.environ['NAME']
global node
node = Node(address=(name, PORT))
global node
coinbase = prepare_coinbase(lookup_public_key('alice'), tx_id='abc123')
unmined_block = Block(txns=[coinbase], prev_id=None, nonce=0)
mined_block = mine_block(unmined_block)
node.blocks.append(mined_block)
node.update_utxo_set(coinbase)
server_thread = threading.Thread(target=serve, name='server')
server_thread.start()
peers = [(p, PORT) for p in os.environ['PEERS'].split(',')]
for peer in peers:
node.connect(peer)
miner_public_key = lookup_private_key(name).get_verifying_key()
miner_thread = threading.Thread(target=mine_forever, args=[miner_public_key], name='miner')
miner_thread.start()
elif args['ping']:
i = int(args['--node'][-1])
port = PORT + i
address = ('localhost', port)
message = prepare_message('ping', '')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if response:
return deserialize(s.recv(5000))
elif args['balance']:
public_key = lookup_private_key(args['<name>']).get_verifying_key()
i = int(args['--node'][-1])
port = PORT + i
address = ('localhost', port)
message = prepare_message('balance', public_key)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if True:
True = deserialize(s.recv(5000))
print(response['data'])
elif args['tx']:
exponent = {'alice': 1, 'bob': 2, 'node0': 3, 'node1': 4, 'node2': 5}[args['<from>']]
sender_private_key = SigningKey.from_secret_exponent(exponent, curve=SECP256k1)
sender_public_key = sender_private_key.get_verifying_key()
exponent = {'alice': 1, 'bob': 2, 'node0': 3, 'node1': 4, 'node2': 5}[args['<to>']]
recipient_private_key = SigningKey.from_secret_exponent(exponent, curve=SECP256k1)
recipient_public_key = recipient_private_key.get_verifying_key()
amount = int(args['<amount>'])
i = int(args['--node'][-1])
port = PORT + i
address = ('localhost', port)
message = prepare_message('utxos', sender_public_key)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if True:
True = deserialize(s.recv(5000))
utxos = response['data']
sender_public_key = sender_private_key.get_verifying_key()
tx_ins = []
tx_in_sum = 0
for tx_out in utxos:
tx_ins.append(TxIn(tx_id=tx_out.tx_id, index=tx_out.index, signature=None))
tx_in_sum += tx_out.amount
if tx_in_sum > amount:
break
assert tx_in_sum >= amount
tx_id = uuid.uuid4()
change = tx_in_sum - amount
tx_outs = [TxOut(tx_id=tx_id, index=0, amount=amount, public_key=recipient_public_key), TxOut(tx_id=tx_id, index=1, amount=change, public_key=sender_public_key)]
tx = Tx(id=tx_id, tx_ins=tx_ins, tx_outs=tx_outs)
for i in range(len(tx.tx_ins)):
tx.sign_input(i, sender_private_key)
tx = tx
message = prepare_message('tx', tx)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(serialize(message))
if response:
return deserialize(s.recv(5000))
else:
print('Invalid command')
|
digital-cash
|
positive
|
def kitti_res_to_nuscenes(self, meta: Dict[str, bool]=None) -> None:
"""
Converts a KITTI detection result to the nuScenes detection results format.
:param meta: Meta data describing the method used to generate the result. See nuscenes.org/object-detection.
"""
if meta is None:
meta = {'use_camera': False, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}
results = {}
kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split,))
split_logs = create_splits_logs(self.split, self.nusc)
<DeepExtract>
samples = []
for sample in self.nusc.sample:
scene = self.nusc.get('scene', sample['scene_token'])
log = self.nusc.get('log', scene['log_token'])
logfile = log['logfile']
if logfile in split_logs:
samples.append(sample['token'])
sample_tokens = samples
</DeepExtract>
sample_tokens = sample_tokens[:self.image_count]
for sample_token in sample_tokens:
kitti_token = '%s_%s' % (self.split, sample_token)
boxes = kitti.get_boxes(token=kitti_token)
sample_results = [self._box_to_sample_result(sample_token, box) for box in boxes]
results[sample_token] = sample_results
submission = {'meta': meta, 'results': results}
submission_path = os.path.join(self.nusc_kitti_dir, 'submission.json')
print('Writing submission to: %s' % submission_path)
with open(submission_path, 'w') as f:
json.dump(submission, f, indent=2)
|
def kitti_res_to_nuscenes(self, meta: Dict[str, bool]=None) -> None:
"""
Converts a KITTI detection result to the nuScenes detection results format.
:param meta: Meta data describing the method used to generate the result. See nuscenes.org/object-detection.
"""
if meta is None:
meta = {'use_camera': False, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}
results = {}
kitti = KittiDB(root=self.nusc_kitti_dir, splits=(self.split,))
split_logs = create_splits_logs(self.split, self.nusc)
samples = []
for sample in self.nusc.sample:
scene = self.nusc.get('scene', sample['scene_token'])
log = self.nusc.get('log', scene['log_token'])
logfile = log['logfile']
if logfile in split_logs:
samples.append(sample['token'])
sample_tokens = samples
sample_tokens = sample_tokens[:self.image_count]
for sample_token in sample_tokens:
kitti_token = '%s_%s' % (self.split, sample_token)
boxes = kitti.get_boxes(token=kitti_token)
sample_results = [self._box_to_sample_result(sample_token, box) for box in boxes]
results[sample_token] = sample_results
submission = {'meta': meta, 'results': results}
submission_path = os.path.join(self.nusc_kitti_dir, 'submission.json')
print('Writing submission to: %s' % submission_path)
with open(submission_path, 'w') as f:
json.dump(submission, f, indent=2)
|
CenterFusion
|
positive
|
def _get_bucket_if_exist(self):
<DeepExtract>
client = NotImplementedError
</DeepExtract>
<DeepExtract>
bucket = NotImplementedError
</DeepExtract>
list_buckets_names = [bucket.name for bucket in self._list_buckets(client)]
try:
assert self._bucket_name in list_buckets_names
except AssertionError as err:
raise Exception(f'{self._bucket_name} bucket does not exist. available buckets are {list_buckets_names}').with_traceback(err.__traceback__)
return bucket
|
def _get_bucket_if_exist(self):
client = NotImplementedError
bucket = NotImplementedError
list_buckets_names = [bucket.name for bucket in self._list_buckets(client)]
try:
assert self._bucket_name in list_buckets_names
except AssertionError as err:
raise Exception(f'{self._bucket_name} bucket does not exist. available buckets are {list_buckets_names}').with_traceback(err.__traceback__)
return bucket
|
artefactory-connectors-kit
|
positive
|
def human_play(q):
computer_player = Agent(q, learning_rate=0.0, discount=1.0, temperature=0.0)
human = random.choice([Cell.X, Cell.O])
<DeepExtract>
game_state = State({}, Cell.X)
</DeepExtract>
<DeepExtract>
rows = 'ABC'
for row in (0, 1, 2):
print(rows[row] + ' ' + ' | '.join((game_state.cell(row, col).value for col in (0, 1, 2))))
print(' 1 2 3')
print('\n')
</DeepExtract>
while not game_state.is_over():
if game_state.next_to_play == human:
human_move_txt = input('Your move? ')
row = 'ABC'.index(human_move_txt[0])
col = int(human_move_txt[1]) - 1
move = (row, col)
else:
move = computer_player.select_move(game_state)
game_state = game_state.apply_move(*move)
<DeepExtract>
rows = 'ABC'
for row in (0, 1, 2):
print(rows[row] + ' ' + ' | '.join((game_state.cell(row, col).value for col in (0, 1, 2))))
print(' 1 2 3')
print('\n')
</DeepExtract>
winner = game_state.winner()
if winner == Cell.EMPTY:
print("It's a draw")
else:
print('%s wins!' % winner.value)
|
def human_play(q):
computer_player = Agent(q, learning_rate=0.0, discount=1.0, temperature=0.0)
human = random.choice([Cell.X, Cell.O])
game_state = State({}, Cell.X)
rows = 'ABC'
for row in (0, 1, 2):
print(rows[row] + ' ' + ' | '.join((game_state.cell(row, col).value for col in (0, 1, 2))))
print(' 1 2 3')
print('\n')
while not game_state.is_over():
if game_state.next_to_play == human:
human_move_txt = input('Your move? ')
row = 'ABC'.index(human_move_txt[0])
col = int(human_move_txt[1]) - 1
move = (row, col)
else:
move = computer_player.select_move(game_state)
game_state = game_state.apply_move(*move)
rows = 'ABC'
for row in (0, 1, 2):
print(rows[row] + ' ' + ' | '.join((game_state.cell(row, col).value for col in (0, 1, 2))))
print(' 1 2 3')
print('\n')
winner = game_state.winner()
if winner == Cell.EMPTY:
print("It's a draw")
else:
print('%s wins!' % winner.value)
|
deep_learning_and_the_game_of_go
|
positive
|
def _gregorian_year_month_day_format(self, date_list=None, original_list=None):
"""
Detects date in the following format
format: <year><separator><month><separator><day>
where each part is in of one of the formats given against them
day: d, dd
month: m, mm
year: yy, yyyy
separator: "/", "-", "."
Two character years are assumed to be belong to 21st century - 20xx.
Only years between 1900 to 2099 are detected
Few valid examples:
"31/1/31", "97/2/21", "2017/12/01"
Args:
date_list: Optional, list to store dictionaries of detected dates
original_list: Optional, list to store corresponding substrings of given text which were detected as
date entities
Returns:
A tuple of two lists with first list containing the detected date entities and second list containing their
corresponding substrings in the given text.
"""
if original_list is None:
original_list = []
if date_list is None:
date_list = []
regex_pattern = re.compile('\\b(((?:20|19)[0-9]{2})\\s?[/\\-\\.]\\s?(1[0-2]|0?[1-9])\\s?[/\\-\\.]\\s?([12][0-9]|3[01]|0?[1-9]))\\W')
patterns = regex_pattern.findall(self.processed_text.lower())
for pattern in patterns:
original = pattern[0]
dd = pattern[3]
mm = pattern[2]
<DeepExtract>
past_regex = re.compile('birth|bday|dob|born')
present_regex = None
future_regex = None
this_century = int(str(self.now_date.year)[:2])
if len(pattern[1]) == 2:
if (self.bot_message and past_regex.search(self.bot_message) or self.past_date_referenced is True) and int(pattern[1]) > int(str(self.now_date.year)[2:]):
yy = str(this_century - 1) + pattern[1]
elif present_regex and present_regex.search(self.bot_message):
yy = str(this_century) + pattern[1]
elif future_regex and future_regex.search(self.bot_message):
yy = str(this_century + 1) + pattern[1]
if len(pattern[1]) == 2:
yy = str(this_century) + pattern[1]
yy = pattern[1]
</DeepExtract>
date = {'dd': int(dd), 'mm': int(mm), 'yy': int(yy), 'type': TYPE_EXACT}
date_list.append(date)
original_list.append(original)
return (date_list, original_list)
|
def _gregorian_year_month_day_format(self, date_list=None, original_list=None):
"""
Detects date in the following format
format: <year><separator><month><separator><day>
where each part is in of one of the formats given against them
day: d, dd
month: m, mm
year: yy, yyyy
separator: "/", "-", "."
Two character years are assumed to be belong to 21st century - 20xx.
Only years between 1900 to 2099 are detected
Few valid examples:
"31/1/31", "97/2/21", "2017/12/01"
Args:
date_list: Optional, list to store dictionaries of detected dates
original_list: Optional, list to store corresponding substrings of given text which were detected as
date entities
Returns:
A tuple of two lists with first list containing the detected date entities and second list containing their
corresponding substrings in the given text.
"""
if original_list is None:
original_list = []
if date_list is None:
date_list = []
regex_pattern = re.compile('\\b(((?:20|19)[0-9]{2})\\s?[/\\-\\.]\\s?(1[0-2]|0?[1-9])\\s?[/\\-\\.]\\s?([12][0-9]|3[01]|0?[1-9]))\\W')
patterns = regex_pattern.findall(self.processed_text.lower())
for pattern in patterns:
original = pattern[0]
dd = pattern[3]
mm = pattern[2]
past_regex = re.compile('birth|bday|dob|born')
present_regex = None
future_regex = None
this_century = int(str(self.now_date.year)[:2])
if len(pattern[1]) == 2:
if (self.bot_message and past_regex.search(self.bot_message) or self.past_date_referenced is True) and int(pattern[1]) > int(str(self.now_date.year)[2:]):
yy = str(this_century - 1) + pattern[1]
elif present_regex and present_regex.search(self.bot_message):
yy = str(this_century) + pattern[1]
elif future_regex and future_regex.search(self.bot_message):
yy = str(this_century + 1) + pattern[1]
if len(pattern[1]) == 2:
yy = str(this_century) + pattern[1]
yy = pattern[1]
date = {'dd': int(dd), 'mm': int(mm), 'yy': int(yy), 'type': TYPE_EXACT}
date_list.append(date)
original_list.append(original)
return (date_list, original_list)
|
chatbot_ner
|
positive
|
def add_gauss(self, F0=1.0, FWHM_maj=50.0 * RADPERUAS, FWHM_min=50.0 * RADPERUAS, PA=0.0, x0=0.0, y0=0.0, pol_frac=0.0, pol_evpa=0.0, cpol_frac=0.0):
"""Add an anisotropic Gaussian model.
Args:
F0 (float): The total flux of the Gaussian (Jy)
FWHM_maj (float): The FWHM of the Gaussian major axis (radians)
FWHM_min (float): The FWHM of the Gaussian minor axis (radians)
PA (float): Position angle of the major axis, east of north (radians)
x0 (float): The x-coordinate (radians)
y0 (float): The y-coordinate (radians)
Returns:
(Model): Updated Model
"""
<DeepExtract>
out = Model(ra=self.ra, dec=self.dec, pa=self.pa, polrep=self.polrep, pol_prim=self.pol_prim, rf=self.rf, source=self.source, mjd=self.mjd, time=self.time)
out.models = copy.deepcopy(self.models)
out.params = copy.deepcopy(self.params.copy())
out = out
</DeepExtract>
out.models.append('gauss')
out.params.append({'F0': F0, 'FWHM_maj': FWHM_maj, 'FWHM_min': FWHM_min, 'PA': PA, 'x0': x0, 'y0': y0, 'pol_frac': pol_frac, 'pol_evpa': pol_evpa, 'cpol_frac': cpol_frac})
return out
|
def add_gauss(self, F0=1.0, FWHM_maj=50.0 * RADPERUAS, FWHM_min=50.0 * RADPERUAS, PA=0.0, x0=0.0, y0=0.0, pol_frac=0.0, pol_evpa=0.0, cpol_frac=0.0):
"""Add an anisotropic Gaussian model.
Args:
F0 (float): The total flux of the Gaussian (Jy)
FWHM_maj (float): The FWHM of the Gaussian major axis (radians)
FWHM_min (float): The FWHM of the Gaussian minor axis (radians)
PA (float): Position angle of the major axis, east of north (radians)
x0 (float): The x-coordinate (radians)
y0 (float): The y-coordinate (radians)
Returns:
(Model): Updated Model
"""
out = Model(ra=self.ra, dec=self.dec, pa=self.pa, polrep=self.polrep, pol_prim=self.pol_prim, rf=self.rf, source=self.source, mjd=self.mjd, time=self.time)
out.models = copy.deepcopy(self.models)
out.params = copy.deepcopy(self.params.copy())
out = out
out.models.append('gauss')
out.params.append({'F0': F0, 'FWHM_maj': FWHM_maj, 'FWHM_min': FWHM_min, 'PA': PA, 'x0': x0, 'y0': y0, 'pol_frac': pol_frac, 'pol_evpa': pol_evpa, 'cpol_frac': cpol_frac})
return out
|
eht-imaging
|
positive
|
def get_rcnn_batch(roidb, cfg):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
(imgs, roidb) = get_image(roidb, cfg)
im_array = tensor_vstack(imgs)
assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, 'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)
if cfg.TRAIN.BATCH_ROIS == -1:
rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
fg_rois_per_image = rois_per_image
else:
rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
rois_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
num_classes = roi_rec['gt_overlaps'].shape[1]
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
<DeepExtract>
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)
bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)
keep_indexes = np.append(fg_indexes, bg_indexes)
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, gap_indexes)
labels = labels[keep_indexes]
labels[fg_rois_per_this_image:] = 0
rois = rois[keep_indexes]
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = (targets - np.array(cfg.TRAIN.BBOX_MEANS)) / np.array(cfg.TRAIN.BBOX_STDS)
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
(bbox_targets, bbox_weights) = expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
(im_rois, labels, bbox_targets, bbox_weights) = (rois, labels, bbox_targets, bbox_weights)
</DeepExtract>
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
rois_array = np.array(rois_array)
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
data = {'data': im_array, 'rois': rois_array}
label = {'label': labels_array, 'bbox_target': bbox_targets_array, 'bbox_weight': bbox_weights_array}
return (data, label)
|
def get_rcnn_batch(roidb, cfg):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
(imgs, roidb) = get_image(roidb, cfg)
im_array = tensor_vstack(imgs)
assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, 'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)
if cfg.TRAIN.BATCH_ROIS == -1:
rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
fg_rois_per_image = rois_per_image
else:
rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
rois_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
num_classes = roi_rec['gt_overlaps'].shape[1]
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)
bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)
keep_indexes = np.append(fg_indexes, bg_indexes)
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, gap_indexes)
labels = labels[keep_indexes]
labels[fg_rois_per_this_image:] = 0
rois = rois[keep_indexes]
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = (targets - np.array(cfg.TRAIN.BBOX_MEANS)) / np.array(cfg.TRAIN.BBOX_STDS)
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
(bbox_targets, bbox_weights) = expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
(im_rois, labels, bbox_targets, bbox_weights) = (rois, labels, bbox_targets, bbox_weights)
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
rois_array = np.array(rois_array)
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
data = {'data': im_array, 'rois': rois_array}
label = {'label': labels_array, 'bbox_target': bbox_targets_array, 'bbox_weight': bbox_weights_array}
return (data, label)
|
Accel
|
positive
|
@size.setter
def size(self, value):
"""Sets the size in bytes of the inode's file."""
self._size = value
<DeepExtract>
self._fs._writeToBlock(self._tableBid, self._inodeTableOffset + 4, pack('<I', self._size & 4294967295))
</DeepExtract>
if self._superblock.revisionMajor > 0 and self._mode & 32768 != 0:
<DeepExtract>
self._fs._writeToBlock(self._tableBid, self._inodeTableOffset + 108, pack('<I', self._size >> 32))
</DeepExtract>
|
@size.setter
def size(self, value):
"""Sets the size in bytes of the inode's file."""
self._size = value
self._fs._writeToBlock(self._tableBid, self._inodeTableOffset + 4, pack('<I', self._size & 4294967295))
if self._superblock.revisionMajor > 0 and self._mode & 32768 != 0:
self._fs._writeToBlock(self._tableBid, self._inodeTableOffset + 108, pack('<I', self._size >> 32))
</DeepExtract>
|
cyberstakes-writeps-2018
|
positive
|
def set_endianness(self, endianness='little'):
req = ['-gdb-set', 'endian', '%s' % endianness]
<DeepExtract>
token = self._communicator.get_token()
req = [req] if isinstance(req, str) else req
req = str(token) + ' '.join(req)
self.log.debug('Sending request: %s' % req)
self._gdbmi.write(req, read_response=False, timeout_sec=0)
try:
response = self._communicator.get_sync_response(token, timeout=timeout)
ret = True if response['message'] == GDB_PROT_DONE else False
except:
response = None
ret = None
(ret, resp) = (ret, response)
</DeepExtract>
self.log.debug('Attempt to set endianness of the target. Received: %s' % resp)
return ret
|
def set_endianness(self, endianness='little'):
req = ['-gdb-set', 'endian', '%s' % endianness]
token = self._communicator.get_token()
req = [req] if isinstance(req, str) else req
req = str(token) + ' '.join(req)
self.log.debug('Sending request: %s' % req)
self._gdbmi.write(req, read_response=False, timeout_sec=0)
try:
response = self._communicator.get_sync_response(token, timeout=timeout)
ret = True if response['message'] == GDB_PROT_DONE else False
except:
response = None
ret = None
(ret, resp) = (ret, response)
self.log.debug('Attempt to set endianness of the target. Received: %s' % resp)
return ret
|
avatar2
|
positive
|
def normalize_opt(opt: str, ctx: t.Optional['Context']) -> str:
if ctx is None or ctx.token_normalize_func is None:
return opt
<DeepExtract>
first = opt[:1]
if first.isalnum():
(prefix, opt) = ('', opt)
if opt[1:2] == first:
(prefix, opt) = (opt[:2], opt[2:])
(prefix, opt) = (first, opt[1:])
</DeepExtract>
return f'{prefix}{ctx.token_normalize_func(opt)}'
|
def normalize_opt(opt: str, ctx: t.Optional['Context']) -> str:
if ctx is None or ctx.token_normalize_func is None:
return opt
first = opt[:1]
if first.isalnum():
(prefix, opt) = ('', opt)
if opt[1:2] == first:
(prefix, opt) = (opt[:2], opt[2:])
(prefix, opt) = (first, opt[1:])
return f'{prefix}{ctx.token_normalize_func(opt)}'
|
click
|
positive
|
def dump_getdist(self):
"""Writes the GetDist format point."""
if not self.output:
return
<DeepExtract>
lines = []
if weight is not None:
lines.append(' weight = %s' % weight)
if self.minimum['minuslogpost'] is not None:
lines.append(' -log(Like) = %s' % self.minimum['minuslogpost'])
lines.append(' chi-sq = %s' % (2 * self.minimum['minuslogpost']))
lines.append('')
labels = self.model.parameterization.labels()
label_list = list(labels)
if hasattr(self.minimum, 'chi2_names'):
label_list += self.minimum.chi2_names
width = max((len(lab) for lab in label_list)) + 2
def add_section(pars):
for (p, val) in pars:
lab = labels.get(p, p)
num = label_list.index(p) + 1
if isinstance(val, (float, np.floating)) and len(str(val)) > 10:
lines.append('%5d %-17.9e %-*s %s' % (num, val, width, p, lab))
else:
lines.append('%5d %-17s %-*s %s' % (num, val, width, p, lab))
add_section([(p, self.minimum[p]) for p in self.model.parameterization.sampled_params()])
lines.append('')
add_section([[p, value] for (p, value) in self.model.parameterization.constant_params().items()])
lines.append('')
add_section([[p, self.minimum[p]] for p in self.model.parameterization.derived_params()])
if hasattr(self.minimum, 'chi2_names'):
labels.update({p: '\\chi^2_{\\rm %s}' % undo_chi2_name(p).replace('_', '\\ ') for p in self.minimum.chi2_names})
add_section([[chi2, self.minimum[chi2]] for chi2 in self.minimum.chi2_names])
getdist_bf = '\n'.join(lines)
</DeepExtract>
out_filename = os.path.join(self.output.folder, self.output.prefix + getdist_ext_ignore_prior[self.ignore_prior])
with open(out_filename, 'w', encoding='utf-8') as f:
f.write(getdist_bf)
|
def dump_getdist(self):
"""Writes the GetDist format point."""
if not self.output:
return
lines = []
if weight is not None:
lines.append(' weight = %s' % weight)
if self.minimum['minuslogpost'] is not None:
lines.append(' -log(Like) = %s' % self.minimum['minuslogpost'])
lines.append(' chi-sq = %s' % (2 * self.minimum['minuslogpost']))
lines.append('')
labels = self.model.parameterization.labels()
label_list = list(labels)
if hasattr(self.minimum, 'chi2_names'):
label_list += self.minimum.chi2_names
width = max((len(lab) for lab in label_list)) + 2
def add_section(pars):
for (p, val) in pars:
lab = labels.get(p, p)
num = label_list.index(p) + 1
if isinstance(val, (float, np.floating)) and len(str(val)) > 10:
lines.append('%5d %-17.9e %-*s %s' % (num, val, width, p, lab))
else:
lines.append('%5d %-17s %-*s %s' % (num, val, width, p, lab))
add_section([(p, self.minimum[p]) for p in self.model.parameterization.sampled_params()])
lines.append('')
add_section([[p, value] for (p, value) in self.model.parameterization.constant_params().items()])
lines.append('')
add_section([[p, self.minimum[p]] for p in self.model.parameterization.derived_params()])
if hasattr(self.minimum, 'chi2_names'):
labels.update({p: '\\chi^2_{\\rm %s}' % undo_chi2_name(p).replace('_', '\\ ') for p in self.minimum.chi2_names})
add_section([[chi2, self.minimum[chi2]] for chi2 in self.minimum.chi2_names])
getdist_bf = '\n'.join(lines)
out_filename = os.path.join(self.output.folder, self.output.prefix + getdist_ext_ignore_prior[self.ignore_prior])
with open(out_filename, 'w', encoding='utf-8') as f:
f.write(getdist_bf)
|
cobaya
|
positive
|
@register_model
def mixnet_m(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Medium model.
"""
default_cfg = default_cfgs['mixnet_m']
<DeepExtract>
arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model = GenEfficientNet(_decode_arch_def(arch_def), num_classes=num_classes, stem_size=24, num_features=1536, channel_multiplier=1.0, bn_args=_resolve_bn_args(kwargs), act_fn=F.relu, **kwargs)
model = model
</DeepExtract>
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
@register_model
def mixnet_m(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Medium model.
"""
default_cfg = default_cfgs['mixnet_m']
arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model = GenEfficientNet(_decode_arch_def(arch_def), num_classes=num_classes, stem_size=24, num_features=1536, channel_multiplier=1.0, bn_args=_resolve_bn_args(kwargs), act_fn=F.relu, **kwargs)
model = model
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
DNA
|
positive
|
def run(self, interaction: Interaction, app: AppPublic) -> Interaction | None:
"""Execute the ``doc`` request for mode interactive.
:param interaction: The interaction from the user
:param app: The app instance
:returns: The pending :class:`~ansible_navigator.ui_framework.ui.Interaction` or
:data:`None`
"""
self._logger.debug('doc requested in interactive')
self._prepare_to_run(app, interaction)
colon_prompt = self._interaction.action.match.groupdict()['params']
if interaction.content and (not colon_prompt):
try:
self._plugin_name = interaction.content.showing['task_action']
self._plugin_type = self._args.entry('plugin_type').value.default
source = 'task action'
except (KeyError, AttributeError, TypeError):
self._logger.info('No plugin name found in current content')
if self._plugin_name is None:
args_updated = self._update_args([self._name] + shlex.split(colon_prompt or ''))
if not args_updated:
self._prepare_to_exit(interaction)
return None
source = self._args.entry('plugin_name').value.source.value
self._plugin_name = self._args.plugin_name
self._plugin_type = self._args.plugin_type
self._logger.debug('Plugin name used from %s: %s', source, self._plugin_name)
self._logger.debug('Plugin type used from %s: %s', source, self._plugin_type)
<DeepExtract>
if isinstance(self._args.set_environment_variable, dict):
set_env_vars = {**self._args.set_environment_variable}
else:
set_env_vars = {}
if self._args.display_color is False or self._args.mode == 'interactive':
set_env_vars['ANSIBLE_NOCOLOR'] = '1'
kwargs = {'container_engine': self._args.container_engine, 'execution_environment_image': self._args.execution_environment_image, 'execution_environment': self._args.execution_environment, 'navigator_mode': self._args.mode, 'pass_environment_variable': self._args.pass_environment_variable, 'set_environment_variable': set_env_vars, 'private_data_dir': self._args.ansible_runner_artifact_dir, 'rotate_artifacts': self._args.ansible_runner_rotate_artifacts_count, 'timeout': self._args.ansible_runner_timeout}
if isinstance(self._args.execution_environment_volume_mounts, list):
kwargs.update({'container_volume_mounts': self._args.execution_environment_volume_mounts})
if isinstance(self._args.container_options, list):
kwargs.update({'container_options': self._args.container_options})
if self._args.mode == 'interactive':
if isinstance(self._args.playbook, str):
playbook_dir = os.path.dirname(self._args.playbook)
else:
playbook_dir = os.getcwd()
kwargs.update({'host_cwd': playbook_dir})
self._runner = AnsibleDoc(**kwargs)
self._logger.debug('doc playbook dir set to: %s', playbook_dir)
(plugin_doc, plugin_doc_err) = self._runner.fetch_plugin_doc([self._plugin_name], plugin_type=self._plugin_type, playbook_dir=playbook_dir)
if plugin_doc_err:
self._logger.error("Error occurred while fetching doc for plugin %s: '%s'", self._plugin_name, plugin_doc_err)
plugin_doc_response = self._extract_plugin_doc(plugin_doc, plugin_doc_err)
plugin_doc = plugin_doc_response
else:
kwargs.update({'host_cwd': os.getcwd()})
if self._args.execution_environment:
ansible_doc_path = 'ansible-doc'
else:
exec_path = shutil.which('ansible-doc')
if exec_path is None:
msg = "'ansible-doc' executable not found"
self._logger.error(msg)
raise RuntimeError(msg)
ansible_doc_path = exec_path
pass_through_arg = []
if self._plugin_name is not C.NOT_SET:
pass_through_arg.append(self._plugin_name)
if self._plugin_type is not C.NOT_SET:
pass_through_arg.extend(['-t', self._plugin_type])
if self._args.help_doc is True:
pass_through_arg.append('--help')
if isinstance(self._args.cmdline, list):
pass_through_arg.extend(self._args.cmdline)
kwargs.update({'cmdline': pass_through_arg})
self._runner = Command(executable_cmd=ansible_doc_path, **kwargs)
stdout_return = self._runner.run()
plugin_doc = stdout_return
</DeepExtract>
if not isinstance(plugin_doc, dict):
self._prepare_to_exit(interaction)
return None
while True:
app.update()
next_interaction: Interaction = interaction.ui.show(content_heading=self.generate_content_heading, obj=plugin_doc)
if next_interaction.name != 'refresh':
break
self._prepare_to_exit(interaction)
return next_interaction
|
def run(self, interaction: Interaction, app: AppPublic) -> Interaction | None:
"""Execute the ``doc`` request for mode interactive.
:param interaction: The interaction from the user
:param app: The app instance
:returns: The pending :class:`~ansible_navigator.ui_framework.ui.Interaction` or
:data:`None`
"""
self._logger.debug('doc requested in interactive')
self._prepare_to_run(app, interaction)
colon_prompt = self._interaction.action.match.groupdict()['params']
if interaction.content and (not colon_prompt):
try:
self._plugin_name = interaction.content.showing['task_action']
self._plugin_type = self._args.entry('plugin_type').value.default
source = 'task action'
except (KeyError, AttributeError, TypeError):
self._logger.info('No plugin name found in current content')
if self._plugin_name is None:
args_updated = self._update_args([self._name] + shlex.split(colon_prompt or ''))
if not args_updated:
self._prepare_to_exit(interaction)
return None
source = self._args.entry('plugin_name').value.source.value
self._plugin_name = self._args.plugin_name
self._plugin_type = self._args.plugin_type
self._logger.debug('Plugin name used from %s: %s', source, self._plugin_name)
self._logger.debug('Plugin type used from %s: %s', source, self._plugin_type)
if isinstance(self._args.set_environment_variable, dict):
set_env_vars = {**self._args.set_environment_variable}
else:
set_env_vars = {}
if self._args.display_color is False or self._args.mode == 'interactive':
set_env_vars['ANSIBLE_NOCOLOR'] = '1'
kwargs = {'container_engine': self._args.container_engine, 'execution_environment_image': self._args.execution_environment_image, 'execution_environment': self._args.execution_environment, 'navigator_mode': self._args.mode, 'pass_environment_variable': self._args.pass_environment_variable, 'set_environment_variable': set_env_vars, 'private_data_dir': self._args.ansible_runner_artifact_dir, 'rotate_artifacts': self._args.ansible_runner_rotate_artifacts_count, 'timeout': self._args.ansible_runner_timeout}
if isinstance(self._args.execution_environment_volume_mounts, list):
kwargs.update({'container_volume_mounts': self._args.execution_environment_volume_mounts})
if isinstance(self._args.container_options, list):
kwargs.update({'container_options': self._args.container_options})
if self._args.mode == 'interactive':
if isinstance(self._args.playbook, str):
playbook_dir = os.path.dirname(self._args.playbook)
else:
playbook_dir = os.getcwd()
kwargs.update({'host_cwd': playbook_dir})
self._runner = AnsibleDoc(**kwargs)
self._logger.debug('doc playbook dir set to: %s', playbook_dir)
(plugin_doc, plugin_doc_err) = self._runner.fetch_plugin_doc([self._plugin_name], plugin_type=self._plugin_type, playbook_dir=playbook_dir)
if plugin_doc_err:
self._logger.error("Error occurred while fetching doc for plugin %s: '%s'", self._plugin_name, plugin_doc_err)
plugin_doc_response = self._extract_plugin_doc(plugin_doc, plugin_doc_err)
plugin_doc = plugin_doc_response
else:
kwargs.update({'host_cwd': os.getcwd()})
if self._args.execution_environment:
ansible_doc_path = 'ansible-doc'
else:
exec_path = shutil.which('ansible-doc')
if exec_path is None:
msg = "'ansible-doc' executable not found"
self._logger.error(msg)
raise RuntimeError(msg)
ansible_doc_path = exec_path
pass_through_arg = []
if self._plugin_name is not C.NOT_SET:
pass_through_arg.append(self._plugin_name)
if self._plugin_type is not C.NOT_SET:
pass_through_arg.extend(['-t', self._plugin_type])
if self._args.help_doc is True:
pass_through_arg.append('--help')
if isinstance(self._args.cmdline, list):
pass_through_arg.extend(self._args.cmdline)
kwargs.update({'cmdline': pass_through_arg})
self._runner = Command(executable_cmd=ansible_doc_path, **kwargs)
stdout_return = self._runner.run()
plugin_doc = stdout_return
if not isinstance(plugin_doc, dict):
self._prepare_to_exit(interaction)
return None
while True:
app.update()
next_interaction: Interaction = interaction.ui.show(content_heading=self.generate_content_heading, obj=plugin_doc)
if next_interaction.name != 'refresh':
break
self._prepare_to_exit(interaction)
return next_interaction
|
ansible-navigator
|
positive
|
def preprocess_batch(self):
if cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN:
mixup_label_dict = dict([(cls, []) for cls in self.mixup_db_class])
sample_dicts_list = []
for (scene_key, v) in tqdm.tqdm(self.sample_data_token_list.items()):
for sample_data_token in v:
<DeepExtract>
sample_dicts = []
biggest_label_num = 0
cur_sample_data = self.nusc.get('sample_data', sample_data_token)
cur_sample_token = cur_sample_data['sample_token']
cur_sample = self.nusc.get('sample', cur_sample_token)
ego_pose = self.nusc.get('ego_pose', cur_sample_data['ego_pose_token'])
calibrated_sensor = self.nusc.get('calibrated_sensor', cur_sample_data['calibrated_sensor_token'])
l2e_r = calibrated_sensor['rotation']
l2e_t = calibrated_sensor['translation']
e2g_r = ego_pose['rotation']
e2g_t = ego_pose['translation']
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
cur_timestamp = cur_sample['timestamp']
cur_transformation_matrix = {'lidar2ego_translation': l2e_t, 'lidar2ego_rotation': l2e_r, 'ego2global_translation': e2g_t, 'ego2global_rotation': e2g_r}
sweeps = []
while len(sweeps) < self.max_sweeps:
if not cur_sample_data['prev'] == '':
cur_sample_data = self.nusc.get('sample_data', cur_sample_data['prev'])
cur_ego_pose = self.nusc.get('ego_pose', cur_sample_data['ego_pose_token'])
cur_calibrated_sensor = self.nusc.get('calibrated_sensor', cur_sample_data['calibrated_sensor_token'])
(cur_lidar_path, cur_sweep_boxes, _) = self.nusc.get_sample_data(cur_sample_data['token'])
sweep = {'lidar_path': cur_lidar_path, 'sample_data_token': cur_sample_data['token'], 'lidar2ego_translation': cur_calibrated_sensor['translation'], 'lidar2ego_rotation': cur_calibrated_sensor['rotation'], 'ego2global_translation': cur_ego_pose['translation'], 'ego2global_rotation': cur_ego_pose['rotation'], 'timestamp': cur_sample_data['timestamp']}
l2e_r_s = sweep['lidar2ego_rotation']
l2e_t_s = sweep['lidar2ego_translation']
e2g_r_s = sweep['ego2global_rotation']
e2g_t_s = sweep['ego2global_translation']
l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix
R = l2e_r_s_mat.T @ e2g_r_s_mat.T @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + l2e_t @ np.linalg.inv(l2e_r_mat).T
sweep['sweep2lidar_rotation'] = R.T
sweep['sweep2lidar_translation'] = T
sweeps.append(sweep)
else:
break
if self.img_list in ['train', 'val'] and cfg.TEST.WITH_GT:
(cur_data_path, all_boxes, _) = self.nusc.get_sample_data(sample_data_token)
locs = np.array([box.center for box in all_boxes]).reshape(-1, 3)
sizes = np.array([box.wlh for box in all_boxes]).reshape(-1, 3)
rots = np.array([box.orientation.yaw_pitch_roll[0] for box in all_boxes]).reshape(-1, 1)
all_boxes_3d = np.concatenate([locs, sizes, -rots], axis=-1)
annos_tokens = cur_sample['anns']
all_velocity = np.array([self.nusc.box_velocity(ann_token)[:2] for ann_token in annos_tokens])
for i in range(len(all_boxes)):
velo = np.array([*all_velocity[i], 0.0])
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
all_velocity[i] = velo[:2]
attribute_tokens = [self.nusc.get('sample_annotation', ann_token)['attribute_tokens'] for ann_token in annos_tokens]
all_attribute = []
for attribute_token in attribute_tokens:
if len(attribute_token) == 0:
all_attribute.append([])
else:
all_attribute.append(self.nusc.get('attribute', attribute_token[0])['name'])
categories = np.array([box.name for box in all_boxes])
if self.img_list == 'train':
useful_idx = [index for (index, category) in enumerate(categories) if self.useful_cls_dict[category] != 'ignore']
else:
useful_idx = [index for (index, category) in enumerate(categories)]
if len(useful_idx) == 0:
if self.img_list == 'train':
(sample_dict, tmp_biggest_label_num) = (None, biggest_label_num)
else:
all_boxes_3d = np.ones([1, 7], dtype=np.float32)
all_boxes_classes = np.array(['ignore'])
all_attribute = np.array([-1])
all_velocity = np.array([[0, 0]], dtype=np.float32)
else:
all_boxes_3d = all_boxes_3d[useful_idx]
categories = categories[useful_idx]
all_boxes_classes = np.array([self.useful_cls_dict[cate] for cate in categories])
for (tmp_idx, all_boxes_class) in enumerate(all_boxes_classes):
cur_mean_size = self.cls_size_dict[all_boxes_class] * self.cls_num_dict[all_boxes_class]
cur_cls_num = self.cls_num_dict[all_boxes_class] + 1
cur_total_size = cur_mean_size + all_boxes_3d[tmp_idx, [4, 5, 3]]
cur_mean_size = cur_total_size / cur_cls_num
self.cls_size_dict[all_boxes_class] = cur_mean_size
self.cls_num_dict[all_boxes_class] = cur_cls_num
all_attribute = [all_attribute[tmp_idx] for tmp_idx in useful_idx]
tmp_attribute = []
for attr in all_attribute:
if attr == []:
tmp_attribute.append(-1)
else:
tmp_attribute.append(self.attribute_idx_list[attr])
all_attribute = tmp_attribute
all_attribute = np.array(all_attribute, dtype=np.int32)
all_velocity = [all_velocity[tmp_idx] for tmp_idx in useful_idx]
all_velocity = np.array(all_velocity, dtype=np.float32)
else:
cur_data_path = self.nusc.get_sample_data_path(sample_data_token)
if self.img_list in ['train', 'val', 'trainval'] and cfg.TEST.WITH_GT:
sample_dict = {maps_dict.KEY_LABEL_BOXES_3D: all_boxes_3d, maps_dict.KEY_LABEL_CLASSES: all_boxes_classes, maps_dict.KEY_LABEL_ATTRIBUTES: all_attribute, maps_dict.KEY_LABEL_VELOCITY: all_velocity, maps_dict.KEY_LABEL_NUM: len(all_boxes_3d), maps_dict.KEY_POINT_CLOUD: cur_data_path, maps_dict.KEY_TRANSFORMRATION_MATRIX: cur_transformation_matrix, maps_dict.KEY_SAMPLE_NAME: '{}/{}/{}'.format(scene_key, cur_sample_token, sample_data_token), maps_dict.KEY_SWEEPS: sweeps, maps_dict.KEY_TIMESTAMPS: cur_timestamp}
biggest_label_num = max(len(all_boxes_3d), biggest_label_num)
else:
sample_dict = {maps_dict.KEY_POINT_CLOUD: cur_data_path, maps_dict.KEY_SAMPLE_NAME: '{}/{}/{}'.format(scene_key, cur_sample_token, sample_data_token), maps_dict.KEY_TRANSFORMRATION_MATRIX: cur_transformation_matrix, maps_dict.KEY_SWEEPS: sweeps, maps_dict.KEY_TIMESTAMPS: cur_timestamp}
(sample_dict, tmp_biggest_label_num) = (sample_dict, biggest_label_num)
</DeepExtract>
if sample_dict is None:
continue
sample_dicts_list.append(sample_dict)
if self.img_list in ['train', 'val', 'trainval'] and cfg.TEST.WITH_GT and cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN:
<DeepExtract>
all_boxes_3d = sample_dict[maps_dict.KEY_LABEL_BOXES_3D]
all_boxes_classes = sample_dict[maps_dict.KEY_LABEL_CLASSES]
point_cloud_path = sample_dict[maps_dict.KEY_POINT_CLOUD]
all_boxes_3d = cast_box_3d_to_kitti_format(all_boxes_3d)
points = np.fromfile(point_cloud_path, dtype=np.float32).reshape((-1, 5))
points = cast_points_to_kitti(points)
points[:, 3] /= 255
points[:, 4] = 0
points_mask = check_inside_points(points, all_boxes_3d)
points_masks_num = np.sum(points_masks, axis=0)
valid_box_idx = np.where(points_masks_num >= cfg.DATASET.MIN_POINTS_NUM)[0]
if len(valid_box_idx) == 0:
mixup_sample_dicts = None
valid_label_boxes_3d = all_boxes_3d[valid_box_idx]
valid_label_classes = all_boxes_classes[valid_box_idx]
sample_dicts = []
for (index, i) in enumerate(valid_box_idx):
cur_points_mask = points_mask[:, i]
cur_points_idx = np.where(cur_points_mask)[0]
cur_inside_points = points[cur_points_idx, :]
sample_dict = {maps_dict.KEY_SAMPLED_GT_POINTS: cur_inside_points, maps_dict.KEY_SAMPLED_GT_LABELS_3D: valid_label_boxes_3d[index], maps_dict.KEY_SAMPLED_GT_CLSES: valid_label_classes[index]}
sample_dicts.append(sample_dict)
mixup_sample_dicts = sample_dicts
</DeepExtract>
if mixup_sample_dicts is None:
continue
for mixup_sample_dict in mixup_sample_dicts:
cur_cls = mixup_sample_dict[maps_dict.KEY_SAMPLED_GT_CLSES]
mixup_label_dict[cur_cls].append(mixup_sample_dict)
with open(self.train_list, 'wb') as f:
pickle.dump(sample_dicts_list, f)
for (k, v) in self.cls_num_dict.items():
print('class name: %s / class num: %d / mean size: (%f, %f, %f)' % (k, v, self.cls_size_dict[k][0], self.cls_size_dict[k][1], self.cls_size_dict[k][2]))
if self.img_list in ['train', 'val', 'trainval'] and cfg.TEST.WITH_GT and cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN:
print('**** Generating groundtruth database ****')
for (cur_cls_name, mixup_sample_dict) in mixup_label_dict.items():
cur_mixup_db_cls_path = self.mixup_db_cls_path[cur_cls_name]
cur_mixup_db_trainlist_path = self.mixup_db_trainlist_path[cur_cls_name]
print('**** Class %s ****' % cur_cls_name)
with open(cur_mixup_db_trainlist_path, 'w') as f:
for (tmp_idx, tmp_cur_mixup_sample_dict) in tqdm.tqdm(enumerate(mixup_sample_dict)):
f.write('%06d.npy\n' % tmp_idx)
np.save(os.path.join(cur_mixup_db_cls_path, '%06d.npy' % tmp_idx), tmp_cur_mixup_sample_dict)
print('Ending of the preprocess !!!')
|
def preprocess_batch(self):
if cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN:
mixup_label_dict = dict([(cls, []) for cls in self.mixup_db_class])
sample_dicts_list = []
for (scene_key, v) in tqdm.tqdm(self.sample_data_token_list.items()):
for sample_data_token in v:
sample_dicts = []
biggest_label_num = 0
cur_sample_data = self.nusc.get('sample_data', sample_data_token)
cur_sample_token = cur_sample_data['sample_token']
cur_sample = self.nusc.get('sample', cur_sample_token)
ego_pose = self.nusc.get('ego_pose', cur_sample_data['ego_pose_token'])
calibrated_sensor = self.nusc.get('calibrated_sensor', cur_sample_data['calibrated_sensor_token'])
l2e_r = calibrated_sensor['rotation']
l2e_t = calibrated_sensor['translation']
e2g_r = ego_pose['rotation']
e2g_t = ego_pose['translation']
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
cur_timestamp = cur_sample['timestamp']
cur_transformation_matrix = {'lidar2ego_translation': l2e_t, 'lidar2ego_rotation': l2e_r, 'ego2global_translation': e2g_t, 'ego2global_rotation': e2g_r}
sweeps = []
while len(sweeps) < self.max_sweeps:
if not cur_sample_data['prev'] == '':
cur_sample_data = self.nusc.get('sample_data', cur_sample_data['prev'])
cur_ego_pose = self.nusc.get('ego_pose', cur_sample_data['ego_pose_token'])
cur_calibrated_sensor = self.nusc.get('calibrated_sensor', cur_sample_data['calibrated_sensor_token'])
(cur_lidar_path, cur_sweep_boxes, _) = self.nusc.get_sample_data(cur_sample_data['token'])
sweep = {'lidar_path': cur_lidar_path, 'sample_data_token': cur_sample_data['token'], 'lidar2ego_translation': cur_calibrated_sensor['translation'], 'lidar2ego_rotation': cur_calibrated_sensor['rotation'], 'ego2global_translation': cur_ego_pose['translation'], 'ego2global_rotation': cur_ego_pose['rotation'], 'timestamp': cur_sample_data['timestamp']}
l2e_r_s = sweep['lidar2ego_rotation']
l2e_t_s = sweep['lidar2ego_translation']
e2g_r_s = sweep['ego2global_rotation']
e2g_t_s = sweep['ego2global_translation']
l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix
R = l2e_r_s_mat.T @ e2g_r_s_mat.T @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + l2e_t @ np.linalg.inv(l2e_r_mat).T
sweep['sweep2lidar_rotation'] = R.T
sweep['sweep2lidar_translation'] = T
sweeps.append(sweep)
else:
break
if self.img_list in ['train', 'val'] and cfg.TEST.WITH_GT:
(cur_data_path, all_boxes, _) = self.nusc.get_sample_data(sample_data_token)
locs = np.array([box.center for box in all_boxes]).reshape(-1, 3)
sizes = np.array([box.wlh for box in all_boxes]).reshape(-1, 3)
rots = np.array([box.orientation.yaw_pitch_roll[0] for box in all_boxes]).reshape(-1, 1)
all_boxes_3d = np.concatenate([locs, sizes, -rots], axis=-1)
annos_tokens = cur_sample['anns']
all_velocity = np.array([self.nusc.box_velocity(ann_token)[:2] for ann_token in annos_tokens])
for i in range(len(all_boxes)):
velo = np.array([*all_velocity[i], 0.0])
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
all_velocity[i] = velo[:2]
attribute_tokens = [self.nusc.get('sample_annotation', ann_token)['attribute_tokens'] for ann_token in annos_tokens]
all_attribute = []
for attribute_token in attribute_tokens:
if len(attribute_token) == 0:
all_attribute.append([])
else:
all_attribute.append(self.nusc.get('attribute', attribute_token[0])['name'])
categories = np.array([box.name for box in all_boxes])
if self.img_list == 'train':
useful_idx = [index for (index, category) in enumerate(categories) if self.useful_cls_dict[category] != 'ignore']
else:
useful_idx = [index for (index, category) in enumerate(categories)]
if len(useful_idx) == 0:
if self.img_list == 'train':
(sample_dict, tmp_biggest_label_num) = (None, biggest_label_num)
else:
all_boxes_3d = np.ones([1, 7], dtype=np.float32)
all_boxes_classes = np.array(['ignore'])
all_attribute = np.array([-1])
all_velocity = np.array([[0, 0]], dtype=np.float32)
else:
all_boxes_3d = all_boxes_3d[useful_idx]
categories = categories[useful_idx]
all_boxes_classes = np.array([self.useful_cls_dict[cate] for cate in categories])
for (tmp_idx, all_boxes_class) in enumerate(all_boxes_classes):
cur_mean_size = self.cls_size_dict[all_boxes_class] * self.cls_num_dict[all_boxes_class]
cur_cls_num = self.cls_num_dict[all_boxes_class] + 1
cur_total_size = cur_mean_size + all_boxes_3d[tmp_idx, [4, 5, 3]]
cur_mean_size = cur_total_size / cur_cls_num
self.cls_size_dict[all_boxes_class] = cur_mean_size
self.cls_num_dict[all_boxes_class] = cur_cls_num
all_attribute = [all_attribute[tmp_idx] for tmp_idx in useful_idx]
tmp_attribute = []
for attr in all_attribute:
if attr == []:
tmp_attribute.append(-1)
else:
tmp_attribute.append(self.attribute_idx_list[attr])
all_attribute = tmp_attribute
all_attribute = np.array(all_attribute, dtype=np.int32)
all_velocity = [all_velocity[tmp_idx] for tmp_idx in useful_idx]
all_velocity = np.array(all_velocity, dtype=np.float32)
else:
cur_data_path = self.nusc.get_sample_data_path(sample_data_token)
if self.img_list in ['train', 'val', 'trainval'] and cfg.TEST.WITH_GT:
sample_dict = {maps_dict.KEY_LABEL_BOXES_3D: all_boxes_3d, maps_dict.KEY_LABEL_CLASSES: all_boxes_classes, maps_dict.KEY_LABEL_ATTRIBUTES: all_attribute, maps_dict.KEY_LABEL_VELOCITY: all_velocity, maps_dict.KEY_LABEL_NUM: len(all_boxes_3d), maps_dict.KEY_POINT_CLOUD: cur_data_path, maps_dict.KEY_TRANSFORMRATION_MATRIX: cur_transformation_matrix, maps_dict.KEY_SAMPLE_NAME: '{}/{}/{}'.format(scene_key, cur_sample_token, sample_data_token), maps_dict.KEY_SWEEPS: sweeps, maps_dict.KEY_TIMESTAMPS: cur_timestamp}
biggest_label_num = max(len(all_boxes_3d), biggest_label_num)
else:
sample_dict = {maps_dict.KEY_POINT_CLOUD: cur_data_path, maps_dict.KEY_SAMPLE_NAME: '{}/{}/{}'.format(scene_key, cur_sample_token, sample_data_token), maps_dict.KEY_TRANSFORMRATION_MATRIX: cur_transformation_matrix, maps_dict.KEY_SWEEPS: sweeps, maps_dict.KEY_TIMESTAMPS: cur_timestamp}
(sample_dict, tmp_biggest_label_num) = (sample_dict, biggest_label_num)
if sample_dict is None:
continue
sample_dicts_list.append(sample_dict)
if self.img_list in ['train', 'val', 'trainval'] and cfg.TEST.WITH_GT and cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN:
all_boxes_3d = sample_dict[maps_dict.KEY_LABEL_BOXES_3D]
all_boxes_classes = sample_dict[maps_dict.KEY_LABEL_CLASSES]
point_cloud_path = sample_dict[maps_dict.KEY_POINT_CLOUD]
all_boxes_3d = cast_box_3d_to_kitti_format(all_boxes_3d)
points = np.fromfile(point_cloud_path, dtype=np.float32).reshape((-1, 5))
points = cast_points_to_kitti(points)
points[:, 3] /= 255
points[:, 4] = 0
points_mask = check_inside_points(points, all_boxes_3d)
points_masks_num = np.sum(points_masks, axis=0)
valid_box_idx = np.where(points_masks_num >= cfg.DATASET.MIN_POINTS_NUM)[0]
if len(valid_box_idx) == 0:
mixup_sample_dicts = None
valid_label_boxes_3d = all_boxes_3d[valid_box_idx]
valid_label_classes = all_boxes_classes[valid_box_idx]
sample_dicts = []
for (index, i) in enumerate(valid_box_idx):
cur_points_mask = points_mask[:, i]
cur_points_idx = np.where(cur_points_mask)[0]
cur_inside_points = points[cur_points_idx, :]
sample_dict = {maps_dict.KEY_SAMPLED_GT_POINTS: cur_inside_points, maps_dict.KEY_SAMPLED_GT_LABELS_3D: valid_label_boxes_3d[index], maps_dict.KEY_SAMPLED_GT_CLSES: valid_label_classes[index]}
sample_dicts.append(sample_dict)
mixup_sample_dicts = sample_dicts
if mixup_sample_dicts is None:
continue
for mixup_sample_dict in mixup_sample_dicts:
cur_cls = mixup_sample_dict[maps_dict.KEY_SAMPLED_GT_CLSES]
mixup_label_dict[cur_cls].append(mixup_sample_dict)
with open(self.train_list, 'wb') as f:
pickle.dump(sample_dicts_list, f)
for (k, v) in self.cls_num_dict.items():
print('class name: %s / class num: %d / mean size: (%f, %f, %f)' % (k, v, self.cls_size_dict[k][0], self.cls_size_dict[k][1], self.cls_size_dict[k][2]))
if self.img_list in ['train', 'val', 'trainval'] and cfg.TEST.WITH_GT and cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN:
print('**** Generating groundtruth database ****')
for (cur_cls_name, mixup_sample_dict) in mixup_label_dict.items():
cur_mixup_db_cls_path = self.mixup_db_cls_path[cur_cls_name]
cur_mixup_db_trainlist_path = self.mixup_db_trainlist_path[cur_cls_name]
print('**** Class %s ****' % cur_cls_name)
with open(cur_mixup_db_trainlist_path, 'w') as f:
for (tmp_idx, tmp_cur_mixup_sample_dict) in tqdm.tqdm(enumerate(mixup_sample_dict)):
f.write('%06d.npy\n' % tmp_idx)
np.save(os.path.join(cur_mixup_db_cls_path, '%06d.npy' % tmp_idx), tmp_cur_mixup_sample_dict)
print('Ending of the preprocess !!!')
|
3DSSD
|
positive
|
def test_cmsis_svd(self):
"""
Verify output of CMSIS template.
"""
<DeepExtract>
os.system('python3 cyanobyte/codegen.py -c -o ./tmp/ -t templates/' + 'cmsis.svd' + ' peripherals/ADS1015.yaml peripherals/BH1750FVI.yaml peripherals/BMP180.yaml peripherals/BMP280.yaml peripherals/LSM303D.yaml peripherals/MCP4725.yaml peripherals/MCP9808.yaml peripherals/TCS3472.yaml peripherals/example.yaml > /dev/null')
</DeepExtract>
<DeepExtract>
peripherals = ['ADS1015', 'BH1750FVI', 'BMP180', 'BMP280', 'LSM303D', 'MCP4725', 'MCP9808', 'TCS3472', 'Example']
test_path = 'test/sampleData'
tmp_path = 'tmp/com/cyanobyte'
for peripheral in peripherals:
full_test_path = os.path.join(test_path, 'cmsis-svd', peripheral + '.' + 'svd')
full_tmp_path = os.path.join(tmp_path, peripheral + '.' + 'svd')
print('Comparing', full_test_path, 'and', full_tmp_path)
with open(full_test_path) as file1:
with open(full_tmp_path) as file2:
file_contents_1 = file1.read()
file_contents_2 = file2.read()
self.assertEqual(file_contents_1, file_contents_2, msg='{0} and {1} are not the same'.format(full_test_path, full_tmp_path))
</DeepExtract>
|
def test_cmsis_svd(self):
"""
Verify output of CMSIS template.
"""
os.system('python3 cyanobyte/codegen.py -c -o ./tmp/ -t templates/' + 'cmsis.svd' + ' peripherals/ADS1015.yaml peripherals/BH1750FVI.yaml peripherals/BMP180.yaml peripherals/BMP280.yaml peripherals/LSM303D.yaml peripherals/MCP4725.yaml peripherals/MCP9808.yaml peripherals/TCS3472.yaml peripherals/example.yaml > /dev/null')
peripherals = ['ADS1015', 'BH1750FVI', 'BMP180', 'BMP280', 'LSM303D', 'MCP4725', 'MCP9808', 'TCS3472', 'Example']
test_path = 'test/sampleData'
tmp_path = 'tmp/com/cyanobyte'
for peripheral in peripherals:
full_test_path = os.path.join(test_path, 'cmsis-svd', peripheral + '.' + 'svd')
full_tmp_path = os.path.join(tmp_path, peripheral + '.' + 'svd')
print('Comparing', full_test_path, 'and', full_tmp_path)
with open(full_test_path) as file1:
with open(full_tmp_path) as file2:
file_contents_1 = file1.read()
file_contents_2 = file2.read()
self.assertEqual(file_contents_1, file_contents_2, msg='{0} and {1} are not the same'.format(full_test_path, full_tmp_path))
</DeepExtract>
|
cyanobyte
|
positive
|
def patients_registered_practice_as_of(self, date, returning=None):
<DeepExtract>
all_join_tables = set()
sql_expressions = []
for date_expression in date_expressions:
assert date_expression is not None
(sql_expression, join_tables) = self.date_ref_to_sql_expr(date_expression)
sql_expressions.append(sql_expression)
all_join_tables.update(join_tables)
joins = [f"LEFT JOIN {join_table}\nON {join_table}.patient_id = {'RegistrationHistory'}.patient_id" for join_table in all_join_tables]
join_str = '\n'.join(joins)
(date_sql, date_joins) = (*sql_expressions, join_str)
</DeepExtract>
if '__' in returning:
(_, app_trial_name, app_property_name) = returning.split('__')
app_to_db_trial_name = {'germdefence': 'germdefence'}
app_to_db_property_name = {property.lower(): property for property in ['enrolled', 'trial_arm', 'Av_rooms_per_house', 'deprivation_pctile', 'group_mean_behaviour_mean', 'group_mean_intention_mean', 'hand_behav_practice_mean', 'hand_intent_practice_mean', 'IMD_decile', 'IntCon', 'MeanAge', 'MedianAge', 'Minority_ethnic_total', 'N_completers_HW_behav', 'N_completers_RI_behav', 'N_completers_RI_intent', 'n_engaged_pages_viewed_mean_mean', 'n_engaged_visits_mean', 'N_goalsetting_completers_per_practice', 'n_pages_viewed_mean', 'n_times_visited_mean', 'N_visits_practice', 'prop_engaged_visits', 'total_visit_time_mean']}
try:
db_trial_name = app_to_db_trial_name[app_trial_name]
except KeyError:
raise ValueError(f"Unknown RCT '{app_trial_name}', available names are: {', '.join(app_to_db_trial_name.keys())}")
try:
db_property_name = app_to_db_property_name[app_property_name]
except KeyError:
newline = '\n'
raise ValueError(f"Unknown property '{app_property_name}', available properties are:\n{newline.join(app_to_db_trial_name.keys())}")
if app_property_name in ['enrolled', 'trial_arm']:
to_select = '1' if app_property_name == 'enrolled' else 'TrialArm'
return f"\n SELECT\n Patient_ID AS patient_id,\n {to_select} AS {returning}\n FROM\n ClusterRandomisedTrial AS lhs\n LEFT JOIN (\n SELECT\n Patient_ID,\n Organisation_ID,\n ROW_NUMBER() OVER (\n PARTITION BY Patient_ID\n ORDER BY StartDate DESC, EndDate DESC, Registration_ID\n ) AS rownum\n FROM\n RegistrationHistory\n {date_joins}\n WHERE\n StartDate <= {date_sql}\n AND EndDate > {date_sql}\n ) AS rhs\n ON lhs.Organisation_ID = rhs.Organisation_ID\n WHERE\n rownum = 1\n AND TrialNumber IN (\n SELECT\n TrialNumber\n FROM\n ClusterRandomisedTrialReference\n WHERE\n TrialName = '{db_trial_name}'\n )\n "
else:
return f"\n SELECT\n Patient_ID AS patient_id,\n PropertyValue AS {returning}\n FROM\n ClusterRandomisedTrialDetail as lhs\n LEFT JOIN (\n SELECT\n Patient_ID,\n Organisation_ID,\n ROW_NUMBER() OVER (\n PARTITION BY Patient_ID\n ORDER BY StartDate DESC, EndDate DESC, Registration_ID\n ) AS rownum\n FROM\n RegistrationHistory\n {date_joins}\n WHERE\n StartDate <= {date_sql}\n AND EndDate > {date_sql}\n ) rhs\n ON lhs.Organisation_ID = rhs.Organisation_ID\n WHERE\n Property = '{db_property_name}'\n AND rownum = 1\n AND TrialNumber IN (\n SELECT\n TrialNumber\n FROM\n ClusterRandomisedTrialReference\n WHERE\n TrialName = '{db_trial_name}'\n )\n "
if returning == 'stp_code':
column = 'STPCode'
elif returning in ('msoa', 'msoa_code'):
column = 'MSOACode'
elif returning == 'nuts1_region_name':
column = 'Region'
elif returning == 'pseudo_id':
column = 'Organisation_ID'
else:
raise ValueError(f'Unsupported `returning` value: {returning}')
return f'\n SELECT\n t.Patient_ID AS patient_id,\n Organisation.{column} AS {returning}\n FROM (\n SELECT RegistrationHistory.Patient_ID, Organisation_ID,\n ROW_NUMBER() OVER (\n PARTITION BY RegistrationHistory.Patient_ID\n ORDER BY StartDate DESC, EndDate DESC, Registration_ID\n ) AS rownum\n FROM RegistrationHistory\n {date_joins}\n WHERE StartDate <= {date_sql} AND EndDate > {date_sql}\n ) t\n LEFT JOIN Organisation\n ON Organisation.Organisation_ID = t.Organisation_ID\n WHERE t.rownum = 1\n '
|
def patients_registered_practice_as_of(self, date, returning=None):
all_join_tables = set()
sql_expressions = []
for date_expression in date_expressions:
assert date_expression is not None
(sql_expression, join_tables) = self.date_ref_to_sql_expr(date_expression)
sql_expressions.append(sql_expression)
all_join_tables.update(join_tables)
joins = [f"LEFT JOIN {join_table}\nON {join_table}.patient_id = {'RegistrationHistory'}.patient_id" for join_table in all_join_tables]
join_str = '\n'.join(joins)
(date_sql, date_joins) = (*sql_expressions, join_str)
if '__' in returning:
(_, app_trial_name, app_property_name) = returning.split('__')
app_to_db_trial_name = {'germdefence': 'germdefence'}
app_to_db_property_name = {property.lower(): property for property in ['enrolled', 'trial_arm', 'Av_rooms_per_house', 'deprivation_pctile', 'group_mean_behaviour_mean', 'group_mean_intention_mean', 'hand_behav_practice_mean', 'hand_intent_practice_mean', 'IMD_decile', 'IntCon', 'MeanAge', 'MedianAge', 'Minority_ethnic_total', 'N_completers_HW_behav', 'N_completers_RI_behav', 'N_completers_RI_intent', 'n_engaged_pages_viewed_mean_mean', 'n_engaged_visits_mean', 'N_goalsetting_completers_per_practice', 'n_pages_viewed_mean', 'n_times_visited_mean', 'N_visits_practice', 'prop_engaged_visits', 'total_visit_time_mean']}
try:
db_trial_name = app_to_db_trial_name[app_trial_name]
except KeyError:
raise ValueError(f"Unknown RCT '{app_trial_name}', available names are: {', '.join(app_to_db_trial_name.keys())}")
try:
db_property_name = app_to_db_property_name[app_property_name]
except KeyError:
newline = '\n'
raise ValueError(f"Unknown property '{app_property_name}', available properties are:\n{newline.join(app_to_db_trial_name.keys())}")
if app_property_name in ['enrolled', 'trial_arm']:
to_select = '1' if app_property_name == 'enrolled' else 'TrialArm'
return f"\n SELECT\n Patient_ID AS patient_id,\n {to_select} AS {returning}\n FROM\n ClusterRandomisedTrial AS lhs\n LEFT JOIN (\n SELECT\n Patient_ID,\n Organisation_ID,\n ROW_NUMBER() OVER (\n PARTITION BY Patient_ID\n ORDER BY StartDate DESC, EndDate DESC, Registration_ID\n ) AS rownum\n FROM\n RegistrationHistory\n {date_joins}\n WHERE\n StartDate <= {date_sql}\n AND EndDate > {date_sql}\n ) AS rhs\n ON lhs.Organisation_ID = rhs.Organisation_ID\n WHERE\n rownum = 1\n AND TrialNumber IN (\n SELECT\n TrialNumber\n FROM\n ClusterRandomisedTrialReference\n WHERE\n TrialName = '{db_trial_name}'\n )\n "
else:
return f"\n SELECT\n Patient_ID AS patient_id,\n PropertyValue AS {returning}\n FROM\n ClusterRandomisedTrialDetail as lhs\n LEFT JOIN (\n SELECT\n Patient_ID,\n Organisation_ID,\n ROW_NUMBER() OVER (\n PARTITION BY Patient_ID\n ORDER BY StartDate DESC, EndDate DESC, Registration_ID\n ) AS rownum\n FROM\n RegistrationHistory\n {date_joins}\n WHERE\n StartDate <= {date_sql}\n AND EndDate > {date_sql}\n ) rhs\n ON lhs.Organisation_ID = rhs.Organisation_ID\n WHERE\n Property = '{db_property_name}'\n AND rownum = 1\n AND TrialNumber IN (\n SELECT\n TrialNumber\n FROM\n ClusterRandomisedTrialReference\n WHERE\n TrialName = '{db_trial_name}'\n )\n "
if returning == 'stp_code':
column = 'STPCode'
elif returning in ('msoa', 'msoa_code'):
column = 'MSOACode'
elif returning == 'nuts1_region_name':
column = 'Region'
elif returning == 'pseudo_id':
column = 'Organisation_ID'
else:
raise ValueError(f'Unsupported `returning` value: {returning}')
return f'\n SELECT\n t.Patient_ID AS patient_id,\n Organisation.{column} AS {returning}\n FROM (\n SELECT RegistrationHistory.Patient_ID, Organisation_ID,\n ROW_NUMBER() OVER (\n PARTITION BY RegistrationHistory.Patient_ID\n ORDER BY StartDate DESC, EndDate DESC, Registration_ID\n ) AS rownum\n FROM RegistrationHistory\n {date_joins}\n WHERE StartDate <= {date_sql} AND EndDate > {date_sql}\n ) t\n LEFT JOIN Organisation\n ON Organisation.Organisation_ID = t.Organisation_ID\n WHERE t.rownum = 1\n '
|
cohort-extractor
|
positive
|
def collision_fn(q):
if violates_limits(body, joints, q):
return True
<DeepExtract>
assert len(joints) == len(q)
for (joint, value) in zip(joints, q):
set_joint_position(body, joint, value)
</DeepExtract>
for attachment in attachments:
attachment.assign()
for (link1, link2) in check_link_pairs:
if pairwise_link_collision(body, link1, body, link2):
return True
return any((pairwise_collision(*pair) for pair in check_body_pairs))
|
def collision_fn(q):
if violates_limits(body, joints, q):
return True
assert len(joints) == len(q)
for (joint, value) in zip(joints, q):
set_joint_position(body, joint, value)
for attachment in attachments:
attachment.assign()
for (link1, link2) in check_link_pairs:
if pairwise_link_collision(body, link1, body, link2):
return True
return any((pairwise_collision(*pair) for pair in check_body_pairs))
|
decentralized-multiarm
|
positive
|
def test_trie_secure() -> None:
<DeepExtract>
with open(f'{ETHEREUM_TESTS_PATH}/TrieTests/' + 'trietest_secureTrie.json') as f:
tests = json.load(f)
tests = tests
</DeepExtract>
for (name, test) in tests.items():
st: Trie[Bytes, Bytes] = Trie(secured=True, default=b'')
for t in test.get('in'):
trie_set(st, to_bytes(t[0]), to_bytes(t[1]))
result = root(st)
expected = remove_hex_prefix(test.get('root'))
assert result.hex() == expected, f'test {name} failed'
|
def test_trie_secure() -> None:
with open(f'{ETHEREUM_TESTS_PATH}/TrieTests/' + 'trietest_secureTrie.json') as f:
tests = json.load(f)
tests = tests
for (name, test) in tests.items():
st: Trie[Bytes, Bytes] = Trie(secured=True, default=b'')
for t in test.get('in'):
trie_set(st, to_bytes(t[0]), to_bytes(t[1]))
result = root(st)
expected = remove_hex_prefix(test.get('root'))
assert result.hex() == expected, f'test {name} failed'
|
eth1.0-specs
|
positive
|
def tempUpdate(self, value):
self.tempValue.setText(str('{:.0f}'.format(self.slider2Temp(self.sliderTemp.value()))))
if self.sliderTemp.isSliderDown() or self.slider2Temp(value) == self.tempCorrection:
return
try:
self.sliderTemp.valueChanged.disconnect()
self.sliderTemp.sliderReleased.disconnect()
except RuntimeError:
pass
<DeepExtract>
self.tempCorrection = 2000 + self.sliderTemp.value() * self.sliderTemp.value()
</DeepExtract>
multipliers = [1 / m for m in temperatureAndTint2Multipliers(self.tempCorrection, 1.0, self.XYZ2CameraMatrix, dngDict=self.dngDict)]
multipliers[1] *= self.tintCorrection
self.rawMultipliers = multipliers
m = multipliers[1]
self.rawMultipliers = [self.rawMultipliers[i] / m for i in range(4)]
self.dataChanged.emit(1)
self.sliderTemp.valueChanged.connect(self.tempUpdate)
self.sliderTemp.sliderReleased.connect(lambda : self.tempUpdate(self.sliderTemp.value()))
|
def tempUpdate(self, value):
self.tempValue.setText(str('{:.0f}'.format(self.slider2Temp(self.sliderTemp.value()))))
if self.sliderTemp.isSliderDown() or self.slider2Temp(value) == self.tempCorrection:
return
try:
self.sliderTemp.valueChanged.disconnect()
self.sliderTemp.sliderReleased.disconnect()
except RuntimeError:
pass
self.tempCorrection = 2000 + self.sliderTemp.value() * self.sliderTemp.value()
multipliers = [1 / m for m in temperatureAndTint2Multipliers(self.tempCorrection, 1.0, self.XYZ2CameraMatrix, dngDict=self.dngDict)]
multipliers[1] *= self.tintCorrection
self.rawMultipliers = multipliers
m = multipliers[1]
self.rawMultipliers = [self.rawMultipliers[i] / m for i in range(4)]
self.dataChanged.emit(1)
self.sliderTemp.valueChanged.connect(self.tempUpdate)
self.sliderTemp.sliderReleased.connect(lambda : self.tempUpdate(self.sliderTemp.value()))
|
bLUe_PYSIDE2
|
positive
|
def notify(title='', text='', sound=None):
"""Post notification via Notify.app helper.
Args:
title (str, optional): Notification title.
text (str, optional): Notification body text.
sound (str, optional): Name of sound to play.
Raises:
ValueError: Raised if both ``title`` and ``text`` are empty.
Returns:
bool: ``True`` if notification was posted, else ``False``.
"""
if title == text == '':
raise ValueError('Empty notification')
sound = validate_sound(sound) or ''
<DeepExtract>
n = wf().datafile('Notify.app/Contents/MacOS/applet')
</DeepExtract>
if not os.path.exists(n):
<DeepExtract>
archive = os.path.join(os.path.dirname(__file__), 'Notify.tgz')
destdir = wf().datadir
app_path = os.path.join(destdir, 'Notify.app')
n = notifier_program()
log().debug('installing Notify.app to %r ...', destdir)
tgz = tarfile.open(archive, 'r:gz')
tgz.extractall(destdir)
assert os.path.exists(n), 'Notify.app could not be installed in %s' % destdir
icon = notifier_icon_path()
workflow_icon = wf().workflowfile('icon.png')
if os.path.exists(icon):
os.unlink(icon)
png_to_icns(workflow_icon, icon)
if sys.version_info >= (2, 7):
from AppKit import NSWorkspace, NSImage
ws = NSWorkspace.sharedWorkspace()
img = NSImage.alloc().init()
img.initWithContentsOfFile_(icon)
ws.setIcon_forFile_options_(img, app_path, 0)
ip_path = os.path.join(app_path, 'Contents/Info.plist')
bundle_id = '{0}.{1}'.format(wf().bundleid, uuid.uuid4().hex)
data = plistlib.readPlist(ip_path)
log().debug('changing bundle ID to %r', bundle_id)
data['CFBundleIdentifier'] = bundle_id
plistlib.writePlist(data, ip_path)
</DeepExtract>
env = os.environ.copy()
enc = 'utf-8'
env['NOTIFY_TITLE'] = title.encode(enc)
env['NOTIFY_MESSAGE'] = text.encode(enc)
env['NOTIFY_SOUND'] = sound.encode(enc)
cmd = [n]
retcode = subprocess.call(cmd, env=env)
if retcode == 0:
return True
log().error('Notify.app exited with status {0}.'.format(retcode))
return False
|
def notify(title='', text='', sound=None):
"""Post notification via Notify.app helper.
Args:
title (str, optional): Notification title.
text (str, optional): Notification body text.
sound (str, optional): Name of sound to play.
Raises:
ValueError: Raised if both ``title`` and ``text`` are empty.
Returns:
bool: ``True`` if notification was posted, else ``False``.
"""
if title == text == '':
raise ValueError('Empty notification')
sound = validate_sound(sound) or ''
n = wf().datafile('Notify.app/Contents/MacOS/applet')
if not os.path.exists(n):
archive = os.path.join(os.path.dirname(__file__), 'Notify.tgz')
destdir = wf().datadir
app_path = os.path.join(destdir, 'Notify.app')
n = notifier_program()
log().debug('installing Notify.app to %r ...', destdir)
tgz = tarfile.open(archive, 'r:gz')
tgz.extractall(destdir)
assert os.path.exists(n), 'Notify.app could not be installed in %s' % destdir
icon = notifier_icon_path()
workflow_icon = wf().workflowfile('icon.png')
if os.path.exists(icon):
os.unlink(icon)
png_to_icns(workflow_icon, icon)
if sys.version_info >= (2, 7):
from AppKit import NSWorkspace, NSImage
ws = NSWorkspace.sharedWorkspace()
img = NSImage.alloc().init()
img.initWithContentsOfFile_(icon)
ws.setIcon_forFile_options_(img, app_path, 0)
ip_path = os.path.join(app_path, 'Contents/Info.plist')
bundle_id = '{0}.{1}'.format(wf().bundleid, uuid.uuid4().hex)
data = plistlib.readPlist(ip_path)
log().debug('changing bundle ID to %r', bundle_id)
data['CFBundleIdentifier'] = bundle_id
plistlib.writePlist(data, ip_path)
env = os.environ.copy()
enc = 'utf-8'
env['NOTIFY_TITLE'] = title.encode(enc)
env['NOTIFY_MESSAGE'] = text.encode(enc)
env['NOTIFY_SOUND'] = sound.encode(enc)
cmd = [n]
retcode = subprocess.call(cmd, env=env)
if retcode == 0:
return True
log().error('Notify.app exited with status {0}.'.format(retcode))
return False
|
alfred-pocket
|
positive
|
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
next_sentence_labels = features['next_sentence_labels']
truncated_masked_lm_probs_teacher = features['truncated_masked_lm_probs']
top_k_indices = features['top_k_indices']
is_training = mode == tf.estimator.ModeKeys.TRAIN
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
<DeepExtract>
model.get_sequence_output() = gather_indexes(model.get_sequence_output(), masked_lm_positions)
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
model.get_sequence_output() = tf.layers.dense(model.get_sequence_output(), units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer(bert_config.initializer_range))
model.get_sequence_output() = modeling.layer_norm(model.get_sequence_output())
output_bias = tf.get_variable('output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer())
logits = tf.matmul(model.get_sequence_output(), model.get_embedding_table(), transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs_student = tf.nn.log_softmax(logits, axis=-1)
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
prob_shape = tf.shape(log_probs_student)
new_shape = [prob_shape[0], truncation_factor]
top_k_indices = tf.reshape(top_k_indices, new_shape)
top_k_log_probs_student = tf.batch_gather(log_probs_student, top_k_indices)
truncated_masked_lm_probs_teacher = tf.reshape(truncated_masked_lm_probs_teacher, new_shape)
per_example_loss = -tf.reduce_sum(truncated_masked_lm_probs_teacher * top_k_log_probs_student, axis=[-1])
numerator = tf.reduce_sum(masked_lm_weights * per_example_loss)
denominator = tf.reduce_sum(masked_lm_weights) + 1e-05
loss = numerator / denominator
(masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = (loss, per_example_loss, log_probs_student)
</DeepExtract>
<DeepExtract>
with tf.variable_scope('cls/seq_relationship'):
output_weights = tf.get_variable('output_weights', shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable('output_bias', shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(model.get_pooled_output(), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
one_hot_labels = tf.one_hot(next_sentence_labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
(next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = (loss, per_example_loss, log_probs)
</DeepExtract>
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(values=next_sentence_example_loss)
return {'masked_lm_accuracy': masked_lm_accuracy, 'masked_lm_loss': masked_lm_mean_loss, 'next_sentence_accuracy': next_sentence_accuracy, 'next_sentence_loss': next_sentence_mean_loss}
eval_metrics = (metric_fn, [masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
raise ValueError('Only TRAIN and EVAL modes are supported: %s' % mode)
return output_spec
|
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
masked_lm_positions = features['masked_lm_positions']
masked_lm_ids = features['masked_lm_ids']
masked_lm_weights = features['masked_lm_weights']
next_sentence_labels = features['next_sentence_labels']
truncated_masked_lm_probs_teacher = features['truncated_masked_lm_probs']
top_k_indices = features['top_k_indices']
is_training = mode == tf.estimator.ModeKeys.TRAIN
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
model.get_sequence_output() = gather_indexes(model.get_sequence_output(), masked_lm_positions)
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
model.get_sequence_output() = tf.layers.dense(model.get_sequence_output(), units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer(bert_config.initializer_range))
model.get_sequence_output() = modeling.layer_norm(model.get_sequence_output())
output_bias = tf.get_variable('output_bias', shape=[bert_config.vocab_size], initializer=tf.zeros_initializer())
logits = tf.matmul(model.get_sequence_output(), model.get_embedding_table(), transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs_student = tf.nn.log_softmax(logits, axis=-1)
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
prob_shape = tf.shape(log_probs_student)
new_shape = [prob_shape[0], truncation_factor]
top_k_indices = tf.reshape(top_k_indices, new_shape)
top_k_log_probs_student = tf.batch_gather(log_probs_student, top_k_indices)
truncated_masked_lm_probs_teacher = tf.reshape(truncated_masked_lm_probs_teacher, new_shape)
per_example_loss = -tf.reduce_sum(truncated_masked_lm_probs_teacher * top_k_log_probs_student, axis=[-1])
numerator = tf.reduce_sum(masked_lm_weights * per_example_loss)
denominator = tf.reduce_sum(masked_lm_weights) + 1e-05
loss = numerator / denominator
(masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = (loss, per_example_loss, log_probs_student)
with tf.variable_scope('cls/seq_relationship'):
output_weights = tf.get_variable('output_weights', shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable('output_bias', shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(model.get_pooled_output(), output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
one_hot_labels = tf.one_hot(next_sentence_labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
(next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = (loss, per_example_loss, log_probs)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(values=next_sentence_example_loss)
return {'masked_lm_accuracy': masked_lm_accuracy, 'masked_lm_loss': masked_lm_mean_loss, 'next_sentence_accuracy': next_sentence_accuracy, 'next_sentence_loss': next_sentence_mean_loss}
eval_metrics = (metric_fn, [masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
raise ValueError('Only TRAIN and EVAL modes are supported: %s' % mode)
return output_spec
|
DistillBERT
|
positive
|
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
<DeepExtract>
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
</DeepExtract>
return self._obs_from_buf()
|
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
return self._obs_from_buf()
|
carla-rl
|
positive
|
def _parse_interaction_msg(operation, params):
"""
Parses an interaction (press, move, release) message and returns
the component parts
"""
col = int(params[0])
row = int(params[1])
position = BlueDotPosition(col, row, params[2], params[3])
<DeepExtract>
try:
button = self._buttons[(col, row)]
except KeyError:
raise ButtonDoesNotExist('The button `{}` does not exist'.format((col, row)))
</DeepExtract>
return (button, position)
|
def _parse_interaction_msg(operation, params):
"""
Parses an interaction (press, move, release) message and returns
the component parts
"""
col = int(params[0])
row = int(params[1])
position = BlueDotPosition(col, row, params[2], params[3])
try:
button = self._buttons[(col, row)]
except KeyError:
raise ButtonDoesNotExist('The button `{}` does not exist'.format((col, row)))
return (button, position)
|
BlueDot
|
positive
|
def get_fastbin_targets(proc):
memory_map = open('/proc/{}/maps'.format(proc.pid), 'rb').readlines()
libc = ELF('./libc.so.6')
syms = libc.symbols
writable = []
got_libc_base = False
for x in memory_map:
if 'libc.so.6' in x:
l = x.split(' ')
mem_start = int(l[0].split('-')[0], 16)
mem_end = int(l[0].split('-')[1], 16)
if not got_libc_base:
LIBC = mem_start
got_libc_base = True
prot = l[1]
if 'rw' in prot:
writable.append((mem_start, mem_end))
addrs = []
for (s, e) in writable:
size = e - s
data = proc.leak(s, size)
for i in range(size - 8):
if data[i + 1:i + 8] == '\x00' * 7 and data[i] != '\x00':
addr = i + s
fastbin_size = ord(data[i])
<DeepExtract>
names = []
trimmed_size = fastbin_size & ~7
for x in syms:
if addr <= LIBC + syms[x] <= trimmed_size + addr:
names.append(x)
overwritable_syms = names
</DeepExtract>
addrs.append((addr - LIBC, fastbin_size, overwritable_syms))
return addrs
|
def get_fastbin_targets(proc):
memory_map = open('/proc/{}/maps'.format(proc.pid), 'rb').readlines()
libc = ELF('./libc.so.6')
syms = libc.symbols
writable = []
got_libc_base = False
for x in memory_map:
if 'libc.so.6' in x:
l = x.split(' ')
mem_start = int(l[0].split('-')[0], 16)
mem_end = int(l[0].split('-')[1], 16)
if not got_libc_base:
LIBC = mem_start
got_libc_base = True
prot = l[1]
if 'rw' in prot:
writable.append((mem_start, mem_end))
addrs = []
for (s, e) in writable:
size = e - s
data = proc.leak(s, size)
for i in range(size - 8):
if data[i + 1:i + 8] == '\x00' * 7 and data[i] != '\x00':
addr = i + s
fastbin_size = ord(data[i])
names = []
trimmed_size = fastbin_size & ~7
for x in syms:
if addr <= LIBC + syms[x] <= trimmed_size + addr:
names.append(x)
overwritable_syms = names
addrs.append((addr - LIBC, fastbin_size, overwritable_syms))
return addrs
|
CTF-writeups
|
positive
|
def generate_mastersnapshots_from_json(mastersnapshot_json_data, snapshot_json_data, container=None):
"""
Get the masternapshot and validate list of snapshots in the json.
The json could be from the database or from a filesystem.
"""
snapshot_data = {}
mastersnapshots = get_field_value(mastersnapshot_json_data, 'snapshots')
if not mastersnapshots:
logger.error('Json MasterSnapshot does not contain snapshots, next!...')
return snapshot_data
for mastersnapshot in mastersnapshots:
set_processed_templates({})
node_resource_types = {}
for nd in mastersnapshot.get('nodes', []):
if 'masterSnapshotId' in nd and 'type' in nd:
node_resource_types[nd['masterSnapshotId']] = nd['type']
<DeepExtract>
snapshot_data = {}
snapshot_type = get_field_value(mastersnapshot, 'type')
if not snapshot_type:
snapshot_source = get_field_value(mastersnapshot, 'source')
connector_data = get_custom_data(snapshot_source)
if connector_data:
snapshot_type = get_field_value(connector_data, 'type')
if snapshot_type and snapshot_type in mastersnapshot_fns:
if 'nodes' not in mastersnapshot or not mastersnapshot['nodes']:
logger.error('No nodes in snapshot to be backed up!...')
current_data = snapshot_data
snapshot_data = mastersnapshot_fns[snapshot_type](mastersnapshot, container)
logger.info('\tSnapshot:')
for (key, value) in snapshot_data.items():
logger.info('\t%s:%s', key, json.dumps(value))
current_data = snapshot_data
</DeepExtract>
for (ms_id, node_list) in current_data.items():
if isinstance(node_list, list):
if ms_id in snapshot_data:
if isinstance(snapshot_data[ms_id], list):
snapshot_data[ms_id].extend(node_list)
else:
snapshot_data[ms_id] = node_list
else:
snapshot_data[ms_id] = node_list
else:
logger.debug('No snapshot found for resource type: "%s" in %s connector ' % (node_resource_types[ms_id], get_field_value(mastersnapshot, 'source')))
if ms_id not in snapshot_data:
snapshot_data[ms_id] = node_list
return snapshot_data
|
def generate_mastersnapshots_from_json(mastersnapshot_json_data, snapshot_json_data, container=None):
"""
Get the masternapshot and validate list of snapshots in the json.
The json could be from the database or from a filesystem.
"""
snapshot_data = {}
mastersnapshots = get_field_value(mastersnapshot_json_data, 'snapshots')
if not mastersnapshots:
logger.error('Json MasterSnapshot does not contain snapshots, next!...')
return snapshot_data
for mastersnapshot in mastersnapshots:
set_processed_templates({})
node_resource_types = {}
for nd in mastersnapshot.get('nodes', []):
if 'masterSnapshotId' in nd and 'type' in nd:
node_resource_types[nd['masterSnapshotId']] = nd['type']
snapshot_data = {}
snapshot_type = get_field_value(mastersnapshot, 'type')
if not snapshot_type:
snapshot_source = get_field_value(mastersnapshot, 'source')
connector_data = get_custom_data(snapshot_source)
if connector_data:
snapshot_type = get_field_value(connector_data, 'type')
if snapshot_type and snapshot_type in mastersnapshot_fns:
if 'nodes' not in mastersnapshot or not mastersnapshot['nodes']:
logger.error('No nodes in snapshot to be backed up!...')
current_data = snapshot_data
snapshot_data = mastersnapshot_fns[snapshot_type](mastersnapshot, container)
logger.info('\tSnapshot:')
for (key, value) in snapshot_data.items():
logger.info('\t%s:%s', key, json.dumps(value))
current_data = snapshot_data
for (ms_id, node_list) in current_data.items():
if isinstance(node_list, list):
if ms_id in snapshot_data:
if isinstance(snapshot_data[ms_id], list):
snapshot_data[ms_id].extend(node_list)
else:
snapshot_data[ms_id] = node_list
else:
snapshot_data[ms_id] = node_list
else:
logger.debug('No snapshot found for resource type: "%s" in %s connector ' % (node_resource_types[ms_id], get_field_value(mastersnapshot, 'source')))
if ms_id not in snapshot_data:
snapshot_data[ms_id] = node_list
return snapshot_data
|
cloud-validation-framework
|
positive
|
def get_value(expr: ast.AST | astroid.NodeNG, allow_inference: bool=True) -> object:
if isinstance(expr, ast.AST):
with suppress(ValueError, SyntaxError):
return ast.literal_eval(expr)
return UNKNOWN
if astroid is None:
return UNKNOWN
if isinstance(expr, astroid.NodeNG):
with suppress(AttributeError):
renderred = expr.as_string()
with suppress(ValueError, SyntaxError):
return ast.literal_eval(renderred)
<DeepExtract>
if not isinstance(expr, (astroid.List, astroid.Set, astroid.Tuple)):
value = UNKNOWN
result = []
for element_expr in expr.elts:
value = get_value(expr=element_expr)
if value is UNKNOWN:
value = UNKNOWN
result.append(value)
if type(expr) is astroid.Tuple:
value = tuple(result)
if type(expr) is astroid.Set:
value = set(result)
value = result
</DeepExtract>
if value is not UNKNOWN:
return value
if allow_inference:
for parent_expr in infer(expr):
if parent_expr == expr:
continue
<DeepExtract>
if isinstance(parent_expr, ast.AST):
with suppress(ValueError, SyntaxError):
value = ast.literal_eval(parent_expr)
value = UNKNOWN
if astroid is None:
value = UNKNOWN
if isinstance(parent_expr, astroid.NodeNG):
with suppress(AttributeError):
renderred = parent_expr.as_string()
with suppress(ValueError, SyntaxError):
value = ast.literal_eval(renderred)
value = _parse_collections(parent_expr)
if value is not UNKNOWN:
value = value
if allow_inference:
for parent_expr in infer(parent_expr):
if parent_expr == parent_expr:
continue
value = get_value(parent_expr)
if value is not UNKNOWN:
value = value
value = UNKNOWN
</DeepExtract>
if value is not UNKNOWN:
return value
return UNKNOWN
|
def get_value(expr: ast.AST | astroid.NodeNG, allow_inference: bool=True) -> object:
if isinstance(expr, ast.AST):
with suppress(ValueError, SyntaxError):
return ast.literal_eval(expr)
return UNKNOWN
if astroid is None:
return UNKNOWN
if isinstance(expr, astroid.NodeNG):
with suppress(AttributeError):
renderred = expr.as_string()
with suppress(ValueError, SyntaxError):
return ast.literal_eval(renderred)
if not isinstance(expr, (astroid.List, astroid.Set, astroid.Tuple)):
value = UNKNOWN
result = []
for element_expr in expr.elts:
value = get_value(expr=element_expr)
if value is UNKNOWN:
value = UNKNOWN
result.append(value)
if type(expr) is astroid.Tuple:
value = tuple(result)
if type(expr) is astroid.Set:
value = set(result)
value = result
if value is not UNKNOWN:
return value
if allow_inference:
for parent_expr in infer(expr):
if parent_expr == expr:
continue
if isinstance(parent_expr, ast.AST):
with suppress(ValueError, SyntaxError):
value = ast.literal_eval(parent_expr)
value = UNKNOWN
if astroid is None:
value = UNKNOWN
if isinstance(parent_expr, astroid.NodeNG):
with suppress(AttributeError):
renderred = parent_expr.as_string()
with suppress(ValueError, SyntaxError):
value = ast.literal_eval(renderred)
value = _parse_collections(parent_expr)
if value is not UNKNOWN:
value = value
if allow_inference:
for parent_expr in infer(parent_expr):
if parent_expr == parent_expr:
continue
value = get_value(parent_expr)
if value is not UNKNOWN:
value = value
value = UNKNOWN
if value is not UNKNOWN:
return value
return UNKNOWN
|
deal
|
positive
|
def parse_zerion(data_rows, parser, **_kwargs):
for (row_index, data_row) in enumerate(data_rows):
if config.debug:
sys.stderr.write('%sconv: row[%s] %s\n' % (Fore.YELLOW, parser.in_header_row_num + data_row.line_num, data_row))
if data_row.parsed:
continue
try:
<DeepExtract>
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict['Date'] + ' ' + row_dict['Time'], tz='Europe/London')
data_row.parsed = True
(fee_quantity, fee_asset, fee_value) = _get_data(data_row, 'Fee Amount', 'Fee Currency', 'Fee Fiat Amount', 'Fee Fiat Currency')
if row_dict['Status'] != 'Confirmed':
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_SPEND, data_row.timestamp, sell_quantity=Decimal(0), sell_asset=fee_asset, fee_quantity=fee_quantity, fee_asset=fee_asset, fee_value=fee_value, wallet=WALLET)
return
changes = json.loads(row_dict['Changes JSON'])
t_ins = [t for t in changes if t['type'] == 'in']
t_outs = [t for t in changes if t['type'] == 'out']
if row_dict['Accounting Type'] == 'Income':
if len(t_ins) > 1:
_do_zerion_multi_deposit(data_row, data_rows, row_index, t_ins)
else:
(buy_quantity, buy_asset, buy_value) = _get_data(data_row, 'Buy Amount', 'Buy Currency', 'Buy Fiat Amount', 'Buy Fiat Currency', t_ins)
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_DEPOSIT, data_row.timestamp, buy_quantity=buy_quantity, buy_asset=buy_asset, buy_value=buy_value, fee_quantity=fee_quantity, fee_asset=fee_asset, fee_value=fee_value, wallet=WALLET)
elif row_dict['Accounting Type'] == 'Spend':
if len(t_outs) > 1:
_do_zerion_multi_withdrawal(data_row, data_rows, row_index, t_outs)
else:
(sell_quantity, sell_asset, sell_value) = _get_data(data_row, 'Sell Amount', 'Sell Currency', 'Sell Fiat Amount', 'Sell Fiat Currency', t_outs)
if sell_quantity is None and fee_quantity is None:
return
if sell_quantity is None:
sell_quantity = 0
sell_asset = fee_asset
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_WITHDRAWAL, data_row.timestamp, sell_quantity=sell_quantity, sell_asset=sell_asset, sell_value=sell_value, fee_quantity=fee_quantity, fee_asset=fee_asset, fee_value=fee_value, wallet=WALLET)
elif row_dict['Accounting Type'] == 'Trade':
if len(t_ins) == 1:
_do_zerion_multi_sell(data_row, data_rows, row_index, t_ins, t_outs)
elif len(t_outs) == 1:
_do_zerion_multi_buy(data_row, data_rows, row_index, t_ins, t_outs)
else:
raise UnexpectedContentError(parser.in_header.index('Changes JSON'), 'Changes JSON', row_dict['Changes JSON'])
else:
raise UnexpectedTypeError(parser.in_header.index('Accounting Type'), 'Accounting Type', row_dict['Accounting Type'])
</DeepExtract>
except DataRowError as e:
data_row.failure = e
except (ValueError, ArithmeticError) as e:
if config.debug:
raise
data_row.failure = e
|
def parse_zerion(data_rows, parser, **_kwargs):
for (row_index, data_row) in enumerate(data_rows):
if config.debug:
sys.stderr.write('%sconv: row[%s] %s\n' % (Fore.YELLOW, parser.in_header_row_num + data_row.line_num, data_row))
if data_row.parsed:
continue
try:
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict['Date'] + ' ' + row_dict['Time'], tz='Europe/London')
data_row.parsed = True
(fee_quantity, fee_asset, fee_value) = _get_data(data_row, 'Fee Amount', 'Fee Currency', 'Fee Fiat Amount', 'Fee Fiat Currency')
if row_dict['Status'] != 'Confirmed':
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_SPEND, data_row.timestamp, sell_quantity=Decimal(0), sell_asset=fee_asset, fee_quantity=fee_quantity, fee_asset=fee_asset, fee_value=fee_value, wallet=WALLET)
return
changes = json.loads(row_dict['Changes JSON'])
t_ins = [t for t in changes if t['type'] == 'in']
t_outs = [t for t in changes if t['type'] == 'out']
if row_dict['Accounting Type'] == 'Income':
if len(t_ins) > 1:
_do_zerion_multi_deposit(data_row, data_rows, row_index, t_ins)
else:
(buy_quantity, buy_asset, buy_value) = _get_data(data_row, 'Buy Amount', 'Buy Currency', 'Buy Fiat Amount', 'Buy Fiat Currency', t_ins)
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_DEPOSIT, data_row.timestamp, buy_quantity=buy_quantity, buy_asset=buy_asset, buy_value=buy_value, fee_quantity=fee_quantity, fee_asset=fee_asset, fee_value=fee_value, wallet=WALLET)
elif row_dict['Accounting Type'] == 'Spend':
if len(t_outs) > 1:
_do_zerion_multi_withdrawal(data_row, data_rows, row_index, t_outs)
else:
(sell_quantity, sell_asset, sell_value) = _get_data(data_row, 'Sell Amount', 'Sell Currency', 'Sell Fiat Amount', 'Sell Fiat Currency', t_outs)
if sell_quantity is None and fee_quantity is None:
return
if sell_quantity is None:
sell_quantity = 0
sell_asset = fee_asset
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_WITHDRAWAL, data_row.timestamp, sell_quantity=sell_quantity, sell_asset=sell_asset, sell_value=sell_value, fee_quantity=fee_quantity, fee_asset=fee_asset, fee_value=fee_value, wallet=WALLET)
elif row_dict['Accounting Type'] == 'Trade':
if len(t_ins) == 1:
_do_zerion_multi_sell(data_row, data_rows, row_index, t_ins, t_outs)
elif len(t_outs) == 1:
_do_zerion_multi_buy(data_row, data_rows, row_index, t_ins, t_outs)
else:
raise UnexpectedContentError(parser.in_header.index('Changes JSON'), 'Changes JSON', row_dict['Changes JSON'])
else:
raise UnexpectedTypeError(parser.in_header.index('Accounting Type'), 'Accounting Type', row_dict['Accounting Type'])
except DataRowError as e:
data_row.failure = e
except (ValueError, ArithmeticError) as e:
if config.debug:
raise
data_row.failure = e
|
BittyTax
|
positive
|
def add_range(self, column: int, start: int, end: int):
"""
Extends the active cells in the column by the range (start,end).
Ranges smaller than the current one are ignored.
Note (1, m+1), not (0,m) corresponds to an entire column.
Parameters
----------
column
Column int index
start
Row element int index where start >= 1 and start <= end
end:
Row element int index where end >= 1 and end <= m+1
"""
if start < 1 or start > self.m:
raise IndexError(f'Start must be >=1 and <=m, got {start}')
if end < 1 or end > self.m + 1:
raise IndexError(f'End must be >=1 and <=m+1, got {end}')
start_idx = column * 2 + 0
end_idx = column * 2 + 1
orig_start = self.column_ranges[start_idx]
orig_end = self.column_ranges[end_idx]
(start, end) = (min(orig_start, start), max(orig_end, end))
<DeepExtract>
orig_row_length = orig_end - orig_start if orig_end - orig_start > 0 else 0
</DeepExtract>
<DeepExtract>
row_length = end - start if end - start > 0 else 0
</DeepExtract>
self.length += row_length - orig_row_length
self.column_ranges[start_idx] = start
self.column_ranges[end_idx] = end
|
def add_range(self, column: int, start: int, end: int):
"""
Extends the active cells in the column by the range (start,end).
Ranges smaller than the current one are ignored.
Note (1, m+1), not (0,m) corresponds to an entire column.
Parameters
----------
column
Column int index
start
Row element int index where start >= 1 and start <= end
end:
Row element int index where end >= 1 and end <= m+1
"""
if start < 1 or start > self.m:
raise IndexError(f'Start must be >=1 and <=m, got {start}')
if end < 1 or end > self.m + 1:
raise IndexError(f'End must be >=1 and <=m+1, got {end}')
start_idx = column * 2 + 0
end_idx = column * 2 + 1
orig_start = self.column_ranges[start_idx]
orig_end = self.column_ranges[end_idx]
(start, end) = (min(orig_start, start), max(orig_end, end))
orig_row_length = orig_end - orig_start if orig_end - orig_start > 0 else 0
row_length = end - start if end - start > 0 else 0
self.length += row_length - orig_row_length
self.column_ranges[start_idx] = start
self.column_ranges[end_idx] = end
|
darts
|
positive
|
def test_step(self, data):
"""One test step
Arguments:
data {dict of data} -- required keys and values:
'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences
'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences
'Y_floor' {LongTensor [batch_size]} -- floor of response sentence
Returns:
dict of data -- returned keys and values
'symbols' {LongTensor [batch_size, max_decode_len]} -- token ids of response hypothesis
dict of statistics -- returned keys and values
"""
(X, Y) = (data['X'], data['Y'])
(X_floor, Y_floor) = (data['X_floor'], data['Y_floor'])
batch_size = X.size(0)
with torch.no_grad():
<DeepExtract>
(batch_size, history_len, max_x_sent_len) = X.size()
assert history_len == 1
X = X.view(batch_size, max_x_sent_len)
input_lens = (X != self.pad_token_id).sum(-1)
(word_encodings, _, sent_encodings) = self.sent_encoder(X, input_lens)
word_encodings = word_encodings.view(batch_size, max_x_sent_len, -1)
sent_encodings = sent_encodings.view(batch_size, -1)
if self.floor_encoder is not None:
src_floors = X_floor.view(-1)
tgt_floors = Y_floor.view(-1)
sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors)
(word_encodings, sent_encodings) = (word_encodings, sent_encodings)
</DeepExtract>
attn_ctx = word_encodings.view(batch_size, -1, word_encodings.size(-1))
<DeepExtract>
attn_mask = X.view(batch_size, -1) != self.pad_token_id
attn_mask = attn_mask
</DeepExtract>
<DeepExtract>
batch_size = sent_encodings.size(0)
hiddens = self._init_dec_hiddens(sent_encodings)
feats = None
feats = sent_encodings.unsqueeze(1).repeat(1, self.decode_max_len, 1)
ret_dict = self.decoder.forward(batch_size=batch_size, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_FREE_RUN, gen_type=self.gen_type, temp=self.temp, top_p=self.top_p, top_k=self.top_k)
decoder_ret_dict = ret_dict
</DeepExtract>
ret_data = {'symbols': decoder_ret_dict['symbols']}
ret_stat = {}
return (ret_data, ret_stat)
|
def test_step(self, data):
"""One test step
Arguments:
data {dict of data} -- required keys and values:
'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of context sentences
'X_floor' {LongTensor [batch_size, history_len]} -- floors of context sentences
'Y_floor' {LongTensor [batch_size]} -- floor of response sentence
Returns:
dict of data -- returned keys and values
'symbols' {LongTensor [batch_size, max_decode_len]} -- token ids of response hypothesis
dict of statistics -- returned keys and values
"""
(X, Y) = (data['X'], data['Y'])
(X_floor, Y_floor) = (data['X_floor'], data['Y_floor'])
batch_size = X.size(0)
with torch.no_grad():
(batch_size, history_len, max_x_sent_len) = X.size()
assert history_len == 1
X = X.view(batch_size, max_x_sent_len)
input_lens = (X != self.pad_token_id).sum(-1)
(word_encodings, _, sent_encodings) = self.sent_encoder(X, input_lens)
word_encodings = word_encodings.view(batch_size, max_x_sent_len, -1)
sent_encodings = sent_encodings.view(batch_size, -1)
if self.floor_encoder is not None:
src_floors = X_floor.view(-1)
tgt_floors = Y_floor.view(-1)
sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors)
(word_encodings, sent_encodings) = (word_encodings, sent_encodings)
attn_ctx = word_encodings.view(batch_size, -1, word_encodings.size(-1))
attn_mask = X.view(batch_size, -1) != self.pad_token_id
attn_mask = attn_mask
batch_size = sent_encodings.size(0)
hiddens = self._init_dec_hiddens(sent_encodings)
feats = None
feats = sent_encodings.unsqueeze(1).repeat(1, self.decode_max_len, 1)
ret_dict = self.decoder.forward(batch_size=batch_size, hiddens=hiddens, feats=feats, attn_ctx=attn_ctx, attn_mask=attn_mask, mode=DecoderRNN.MODE_FREE_RUN, gen_type=self.gen_type, temp=self.temp, top_p=self.top_p, top_k=self.top_k)
decoder_ret_dict = ret_dict
ret_data = {'symbols': decoder_ret_dict['symbols']}
ret_stat = {}
return (ret_data, ret_stat)
|
dialog-processing
|
positive
|
def test_itersearch(self):
a = bitarray('10011')
self.assertRaises(ValueError, a.itersearch, bitarray())
self.assertRaises(TypeError, a.itersearch, 1, 0)
self.assertRaises(TypeError, a.itersearch, '')
it = a.itersearch(1)
<DeepExtract>
self.assertEqual(type(it).__name__, 'searchiterator')
self.assertEqual(repr(type(it)), "<%s 'bitarray.%s'>" % ('class' if is_py3k or 'searchiterator' == 'frozenbitarray' else 'type', 'searchiterator'))
</DeepExtract>
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 3)
self.assertEqual(next(it), 4)
<DeepExtract>
self.assertRaises(StopIteration, next, it)
</DeepExtract>
x = bitarray('11')
it = a.itersearch(x)
del a, x
self.assertEqual(next(it), 3)
|
def test_itersearch(self):
a = bitarray('10011')
self.assertRaises(ValueError, a.itersearch, bitarray())
self.assertRaises(TypeError, a.itersearch, 1, 0)
self.assertRaises(TypeError, a.itersearch, '')
it = a.itersearch(1)
self.assertEqual(type(it).__name__, 'searchiterator')
self.assertEqual(repr(type(it)), "<%s 'bitarray.%s'>" % ('class' if is_py3k or 'searchiterator' == 'frozenbitarray' else 'type', 'searchiterator'))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 3)
self.assertEqual(next(it), 4)
self.assertRaises(StopIteration, next, it)
x = bitarray('11')
it = a.itersearch(x)
del a, x
self.assertEqual(next(it), 3)
|
bitarray
|
positive
|
def _get_data(self):
"""Load frame paths and annotations. """
list_filenames = [os.path.join(cfg.AVA.FRAME_LIST_DIR, filename) for filename in (cfg.AVA.TRAIN_LISTS if self._split == 'train' or cfg.GET_TRAIN_LFB else cfg.AVA.TEST_LISTS)]
(self._image_paths, _, self._video_idx_to_name, _) = dataset_helper.load_image_lists(list_filenames)
if self._lfb_infer_only:
ann_filenames = [os.path.join(cfg.AVA.ANNOTATION_DIR, filename) for filename in (cfg.AVA.TRAIN_LFB_BOX_LISTS if cfg.GET_TRAIN_LFB else cfg.AVA.TEST_LFB_BOX_LISTS)]
else:
ann_filenames = [os.path.join(cfg.AVA.ANNOTATION_DIR, filename) for filename in (cfg.AVA.TRAIN_BOX_LISTS if self._split == 'train' else cfg.AVA.TEST_BOX_LISTS)]
<DeepExtract>
ret = {}
count = 0
unique_box_count = 0
for filename in ann_filenames:
with open(filename, 'r') as f:
for line in f:
row = line.strip().split(',')
assert len(row) == 7 or len(row) == 8
(video_name, frame_sec) = (row[0], int(row[1]))
if not self._split == 'train' and (not self._full_eval) and (frame_sec % 4 != 0):
continue
box_key = ','.join(row[2:6])
box = map(float, row[2:6])
label = -1 if row[6] == '' else int(row[6])
if 'predicted' in filename:
score = float(row[7])
if score < self._detect_thresh:
continue
if video_name not in ret:
ret[video_name] = {}
for sec in AVA_VALID_FRAMES:
ret[video_name][sec] = {}
if box_key not in ret[video_name][frame_sec]:
if self._split == 'train':
if cfg.TRAIN.MAX_BOX_NUM is not None and len(ret[video_name][frame_sec]) >= cfg.TRAIN.MAX_BOX_NUM:
continue
ret[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
ret[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in ret.keys():
for frame_sec in ret[video_name].keys():
ret[video_name][frame_sec] = ret[video_name][frame_sec].values()
logger.info('Finished loading annotations from')
for filename in ann_filenames:
logger.info(' %s' % filename)
logger.info('Number of unique boxes: %d' % unique_box_count)
logger.info('Number of annotations: %d' % count)
self._boxes_and_labels = ret
</DeepExtract>
assert len(self._boxes_and_labels) == len(self._image_paths), (len(self._boxes_and_labels), len(self._image_paths))
self._boxes_and_labels = [self._boxes_and_labels[self._video_idx_to_name[i]] for i in range(len(self._image_paths))]
<DeepExtract>
keyframe_indices = []
count = 0
for video_idx in range(len(self._boxes_and_labels)):
for sec in self._boxes_and_labels[video_idx].keys():
if sec not in AVA_VALID_FRAMES:
logger.info(sec)
continue
if len(self._boxes_and_labels[video_idx][sec]) > 0:
keyframe_indices.append((video_idx, sec, sec_to_frame(sec)))
count += 1
logger.info('%d keyframes used.' % count)
self._keyframe_indices = keyframe_indices
</DeepExtract>
<DeepExtract>
box_indices = []
each_class_box_indices = [[] for i in range(80)]
count = 0
for video_idx in range(len(self._boxes_and_labels)):
for sec in self._boxes_and_labels[video_idx].keys():
if sec not in AVA_VALID_FRAMES:
logger.info(sec)
continue
for box_idx in range(len(self._boxes_and_labels[video_idx][sec])):
box_indices.append((video_idx, sec, sec_to_frame(sec), box_idx))
count += 1
for c in self._boxes_and_labels[video_idx][sec][box_idx][1]:
each_class_box_indices[c - 1].append(count - 1)
logger.info('%d boxes used.' % count)
(self._box_indices, self._each_class_box_indices) = (box_indices, each_class_box_indices)
</DeepExtract>
<DeepExtract>
count = 0
for (video_idx, sec, _) in self._keyframe_indices:
count += len(self._boxes_and_labels[video_idx][sec])
self._num_boxes_used = count
</DeepExtract>
<DeepExtract>
logger.info('=== AVA dataset summary ===')
logger.info('Split: {}'.format(self._split))
logger.info('Use LFB? {}'.format(self._lfb_enabled))
logger.info('Detection threshold: {}'.format(self._detect_thresh))
if self._split != 'train':
logger.info('Full evaluation? {}'.format(self._full_eval))
logger.info('Spatial shift position: {}'.format(self._shift))
logger.info('Number of videos: {}'.format(len(self._image_paths)))
total_frames = sum((len(video_img_paths) for video_img_paths in self._image_paths))
logger.info('Number of frames: {}'.format(total_frames))
logger.info('Number of dataset: {}'.format(self.get_db_size()))
logger.info('Number of boxes: {}.'.format(self._num_boxes_used))
logger.info('Number of box indices: {}.'.format(len(self._box_indices)))
</DeepExtract>
|
def _get_data(self):
"""Load frame paths and annotations. """
list_filenames = [os.path.join(cfg.AVA.FRAME_LIST_DIR, filename) for filename in (cfg.AVA.TRAIN_LISTS if self._split == 'train' or cfg.GET_TRAIN_LFB else cfg.AVA.TEST_LISTS)]
(self._image_paths, _, self._video_idx_to_name, _) = dataset_helper.load_image_lists(list_filenames)
if self._lfb_infer_only:
ann_filenames = [os.path.join(cfg.AVA.ANNOTATION_DIR, filename) for filename in (cfg.AVA.TRAIN_LFB_BOX_LISTS if cfg.GET_TRAIN_LFB else cfg.AVA.TEST_LFB_BOX_LISTS)]
else:
ann_filenames = [os.path.join(cfg.AVA.ANNOTATION_DIR, filename) for filename in (cfg.AVA.TRAIN_BOX_LISTS if self._split == 'train' else cfg.AVA.TEST_BOX_LISTS)]
ret = {}
count = 0
unique_box_count = 0
for filename in ann_filenames:
with open(filename, 'r') as f:
for line in f:
row = line.strip().split(',')
assert len(row) == 7 or len(row) == 8
(video_name, frame_sec) = (row[0], int(row[1]))
if not self._split == 'train' and (not self._full_eval) and (frame_sec % 4 != 0):
continue
box_key = ','.join(row[2:6])
box = map(float, row[2:6])
label = -1 if row[6] == '' else int(row[6])
if 'predicted' in filename:
score = float(row[7])
if score < self._detect_thresh:
continue
if video_name not in ret:
ret[video_name] = {}
for sec in AVA_VALID_FRAMES:
ret[video_name][sec] = {}
if box_key not in ret[video_name][frame_sec]:
if self._split == 'train':
if cfg.TRAIN.MAX_BOX_NUM is not None and len(ret[video_name][frame_sec]) >= cfg.TRAIN.MAX_BOX_NUM:
continue
ret[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
ret[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in ret.keys():
for frame_sec in ret[video_name].keys():
ret[video_name][frame_sec] = ret[video_name][frame_sec].values()
logger.info('Finished loading annotations from')
for filename in ann_filenames:
logger.info(' %s' % filename)
logger.info('Number of unique boxes: %d' % unique_box_count)
logger.info('Number of annotations: %d' % count)
self._boxes_and_labels = ret
assert len(self._boxes_and_labels) == len(self._image_paths), (len(self._boxes_and_labels), len(self._image_paths))
self._boxes_and_labels = [self._boxes_and_labels[self._video_idx_to_name[i]] for i in range(len(self._image_paths))]
keyframe_indices = []
count = 0
for video_idx in range(len(self._boxes_and_labels)):
for sec in self._boxes_and_labels[video_idx].keys():
if sec not in AVA_VALID_FRAMES:
logger.info(sec)
continue
if len(self._boxes_and_labels[video_idx][sec]) > 0:
keyframe_indices.append((video_idx, sec, sec_to_frame(sec)))
count += 1
logger.info('%d keyframes used.' % count)
self._keyframe_indices = keyframe_indices
box_indices = []
each_class_box_indices = [[] for i in range(80)]
count = 0
for video_idx in range(len(self._boxes_and_labels)):
for sec in self._boxes_and_labels[video_idx].keys():
if sec not in AVA_VALID_FRAMES:
logger.info(sec)
continue
for box_idx in range(len(self._boxes_and_labels[video_idx][sec])):
box_indices.append((video_idx, sec, sec_to_frame(sec), box_idx))
count += 1
for c in self._boxes_and_labels[video_idx][sec][box_idx][1]:
each_class_box_indices[c - 1].append(count - 1)
logger.info('%d boxes used.' % count)
(self._box_indices, self._each_class_box_indices) = (box_indices, each_class_box_indices)
count = 0
for (video_idx, sec, _) in self._keyframe_indices:
count += len(self._boxes_and_labels[video_idx][sec])
self._num_boxes_used = count
logger.info('=== AVA dataset summary ===')
logger.info('Split: {}'.format(self._split))
logger.info('Use LFB? {}'.format(self._lfb_enabled))
logger.info('Detection threshold: {}'.format(self._detect_thresh))
if self._split != 'train':
logger.info('Full evaluation? {}'.format(self._full_eval))
logger.info('Spatial shift position: {}'.format(self._shift))
logger.info('Number of videos: {}'.format(len(self._image_paths)))
total_frames = sum((len(video_img_paths) for video_img_paths in self._image_paths))
logger.info('Number of frames: {}'.format(total_frames))
logger.info('Number of dataset: {}'.format(self.get_db_size()))
logger.info('Number of boxes: {}.'.format(self._num_boxes_used))
logger.info('Number of box indices: {}.'.format(len(self._box_indices)))
</DeepExtract>
|
CRCNN-Action
|
positive
|
def test_add_positional(self):
from colander import Positional
p = Positional()
node = DummySchemaNode(p)
<DeepExtract>
from colander import Invalid
exc = Invalid(node, 'msg', val)
exc = exc
</DeepExtract>
other = Dummy()
exc.add(other)
self.assertEqual(other.positional, True)
self.assertEqual(exc.children, [other])
|
def test_add_positional(self):
from colander import Positional
p = Positional()
node = DummySchemaNode(p)
from colander import Invalid
exc = Invalid(node, 'msg', val)
exc = exc
other = Dummy()
exc.add(other)
self.assertEqual(other.positional, True)
self.assertEqual(exc.children, [other])
|
colander
|
positive
|
def save_image_record(self, epoch, image_dict):
img_list = list()
tag_list = list()
for (tag, image) in image_dict.items():
<DeepExtract>
image_name = 'No.' + str(epoch) + step + tag + type
</DeepExtract>
img_list.append(image_name)
tag_list.append(tag)
image.save(os.path.join(self.img, image_name))
self.html.add_header('Epoch: %d' % epoch)
<DeepExtract>
assert len(img_list) == len(tag_list), 'check input'
self.html.add_images(img_list, tag_list, img_list)
self.html.save()
</DeepExtract>
|
def save_image_record(self, epoch, image_dict):
img_list = list()
tag_list = list()
for (tag, image) in image_dict.items():
image_name = 'No.' + str(epoch) + step + tag + type
img_list.append(image_name)
tag_list.append(tag)
image.save(os.path.join(self.img, image_name))
self.html.add_header('Epoch: %d' % epoch)
assert len(img_list) == len(tag_list), 'check input'
self.html.add_images(img_list, tag_list, img_list)
self.html.save()
</DeepExtract>
|
Component-Divide-and-Conquer-for-Real-World-Image-Super-Resolution
|
positive
|
def __iadd__(self, other):
"""add an instance (e.g., from another sentence)."""
if type(other) is tuple:
<DeepExtract>
if other[1] is not None:
self.crefs.append(cook_refs(other[1]))
if other[0] is not None:
self.ctest.append(cook_test(other[0]))
else:
self.ctest.append(None)
</DeepExtract>
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
|
def __iadd__(self, other):
"""add an instance (e.g., from another sentence)."""
if type(other) is tuple:
if other[1] is not None:
self.crefs.append(cook_refs(other[1]))
if other[0] is not None:
self.ctest.append(cook_test(other[0]))
else:
self.ctest.append(None)
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
|
ALBEF
|
positive
|
def user_agent_injection(url, vuln_parameter, payload):
def inject_user_agent(url, vuln_parameter, payload, proxy):
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
return response
except ValueError:
pass
if settings.TIME_RELATIVE_ATTACK:
start = 0
end = 0
start = time.time()
proxy = None
if menu.options.proxy:
try:
proxy = _urllib.request.ProxyHandler({settings.SCHEME: menu.options.proxy})
<DeepExtract>
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
response = response
except ValueError:
pass
</DeepExtract>
except Exception as err_msg:
<DeepExtract>
settings.VALID_URL = False
try:
error_msg = str(err_msg.args[0]).split('] ')[1]
except IndexError:
try:
error_msg = str(err_msg.args[0])
except IndexError:
error_msg = str(err_msg)
if any((x in str(error_msg).lower() for x in ['wrong version number', 'ssl', 'https'])):
settings.MAX_RETRIES = 1
error_msg = "Can't establish SSL connection. "
if settings.MULTI_TARGETS or settings.CRAWLING:
error_msg = error_msg + 'Skipping to the next target.'
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif any((x in str(error_msg).lower() for x in ['connection refused', 'timeout'])):
settings.MAX_RETRIES = 1
err = 'Unable to connect to the target URL'
if menu.options.proxy:
err += ' or proxy'
err = err + ' (Reason: ' + str(error_msg) + '). '
if settings.MULTI_TARGETS or settings.CRAWLING:
err = err + 'Skipping to the next target.'
error_msg = err
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.UNAUTHORIZED_ERROR in str(err_msg).lower():
if menu.options.ignore_code == settings.UNAUTHORIZED_ERROR or settings.PERFORM_CRACKING:
response = False
else:
err_msg = 'Not authorized (' + settings.UNAUTHORIZED_ERROR + '). '
err_msg += "Try to provide right HTTP authentication type ('--auth-type') and valid credentials ('--auth-cred')"
if menu.options.auth_type and menu.options.auth_cred:
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += '. '
else:
err_msg += ' or rerun without providing them, in order to perform a dictionary-based attack. '
else:
err_msg += " or rerun by providing option '--ignore-code=" + settings.UNAUTHORIZED_ERROR + "'. "
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += 'Skipping to the next target.'
print(settings.print_critical_msg(err_msg))
if not settings.CRAWLING:
if menu.options.auth_type and menu.options.auth_cred:
raise SystemExit()
elif settings.TOTAL_OF_REQUESTS == 1:
if 'IncompleteRead' in str(error_msg):
error_msg = 'There was an incomplete read error while retrieving data '
error_msg += 'from the target URL.'
elif 'infinite loop' in str(error_msg):
error_msg = 'Infinite redirect loop detected. '
error_msg += 'Please check all provided parameters and/or provide missing ones.'
elif 'BadStatusLine' in str(error_msg):
error_msg = 'Connection dropped or unknown HTTP '
error_msg += 'status code received.'
elif 'forcibly closed' in str(error_msg) or 'Connection is already closed' in str(error_msg):
error_msg = 'Connection was forcibly closed by the target URL.'
elif [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
status_code = [err_code for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]
warn_msg = "The web server responded with an HTTP error code '" + str(status_code[0]) + "' which could interfere with the results of the tests."
print(settings.print_warning_msg(warn_msg))
if not settings.NOT_FOUND_ERROR in str(err_msg).lower():
response = False
response = True
else:
error_msg = 'The provided target URL seems not reachable. '
error_msg += 'In case that it is, please try to re-run using '
if not menu.options.random_agent:
error_msg += "'--random-agent' switch and/or "
error_msg += "'--proxy' option."
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.IDENTIFIED_WARNINGS or settings.IDENTIFIED_PHPINFO or settings.IDENTIFIED_COMMAND_INJECTION or (menu.options.ignore_code and menu.options.ignore_code in str(error_msg).lower()):
response = False
elif settings.IGNORE_ERR_MSG == False:
if menu.options.skip_heuristics and settings.VERBOSITY_LEVEL == 0:
print(settings.SINGLE_WHITESPACE)
continue_tests = checks.continue_tests(err_msg)
if continue_tests == True:
settings.IGNORE_ERR_MSG = True
response = False
elif not settings.CRAWLING:
raise SystemExit()
else:
response = False
else:
if settings.VERBOSITY_LEVEL >= 1:
if [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
debug_msg = 'Got ' + str(err_msg)
print(settings.print_debug_msg(debug_msg))
else:
print(settings.print_critical_msg(err_msg))
response = False
</DeepExtract>
elif menu.options.tor:
try:
proxy = _urllib.request.ProxyHandler({settings.TOR_HTTP_PROXY_SCHEME: settings.TOR_HTTP_PROXY_IP + ':' + settings.TOR_HTTP_PROXY_PORT})
<DeepExtract>
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
response = response
except ValueError:
pass
</DeepExtract>
except Exception as err_msg:
<DeepExtract>
settings.VALID_URL = False
try:
error_msg = str(err_msg.args[0]).split('] ')[1]
except IndexError:
try:
error_msg = str(err_msg.args[0])
except IndexError:
error_msg = str(err_msg)
if any((x in str(error_msg).lower() for x in ['wrong version number', 'ssl', 'https'])):
settings.MAX_RETRIES = 1
error_msg = "Can't establish SSL connection. "
if settings.MULTI_TARGETS or settings.CRAWLING:
error_msg = error_msg + 'Skipping to the next target.'
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif any((x in str(error_msg).lower() for x in ['connection refused', 'timeout'])):
settings.MAX_RETRIES = 1
err = 'Unable to connect to the target URL'
if menu.options.proxy:
err += ' or proxy'
err = err + ' (Reason: ' + str(error_msg) + '). '
if settings.MULTI_TARGETS or settings.CRAWLING:
err = err + 'Skipping to the next target.'
error_msg = err
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.UNAUTHORIZED_ERROR in str(err_msg).lower():
if menu.options.ignore_code == settings.UNAUTHORIZED_ERROR or settings.PERFORM_CRACKING:
response = False
else:
err_msg = 'Not authorized (' + settings.UNAUTHORIZED_ERROR + '). '
err_msg += "Try to provide right HTTP authentication type ('--auth-type') and valid credentials ('--auth-cred')"
if menu.options.auth_type and menu.options.auth_cred:
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += '. '
else:
err_msg += ' or rerun without providing them, in order to perform a dictionary-based attack. '
else:
err_msg += " or rerun by providing option '--ignore-code=" + settings.UNAUTHORIZED_ERROR + "'. "
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += 'Skipping to the next target.'
print(settings.print_critical_msg(err_msg))
if not settings.CRAWLING:
if menu.options.auth_type and menu.options.auth_cred:
raise SystemExit()
elif settings.TOTAL_OF_REQUESTS == 1:
if 'IncompleteRead' in str(error_msg):
error_msg = 'There was an incomplete read error while retrieving data '
error_msg += 'from the target URL.'
elif 'infinite loop' in str(error_msg):
error_msg = 'Infinite redirect loop detected. '
error_msg += 'Please check all provided parameters and/or provide missing ones.'
elif 'BadStatusLine' in str(error_msg):
error_msg = 'Connection dropped or unknown HTTP '
error_msg += 'status code received.'
elif 'forcibly closed' in str(error_msg) or 'Connection is already closed' in str(error_msg):
error_msg = 'Connection was forcibly closed by the target URL.'
elif [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
status_code = [err_code for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]
warn_msg = "The web server responded with an HTTP error code '" + str(status_code[0]) + "' which could interfere with the results of the tests."
print(settings.print_warning_msg(warn_msg))
if not settings.NOT_FOUND_ERROR in str(err_msg).lower():
response = False
response = True
else:
error_msg = 'The provided target URL seems not reachable. '
error_msg += 'In case that it is, please try to re-run using '
if not menu.options.random_agent:
error_msg += "'--random-agent' switch and/or "
error_msg += "'--proxy' option."
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.IDENTIFIED_WARNINGS or settings.IDENTIFIED_PHPINFO or settings.IDENTIFIED_COMMAND_INJECTION or (menu.options.ignore_code and menu.options.ignore_code in str(error_msg).lower()):
response = False
elif settings.IGNORE_ERR_MSG == False:
if menu.options.skip_heuristics and settings.VERBOSITY_LEVEL == 0:
print(settings.SINGLE_WHITESPACE)
continue_tests = checks.continue_tests(err_msg)
if continue_tests == True:
settings.IGNORE_ERR_MSG = True
response = False
elif not settings.CRAWLING:
raise SystemExit()
else:
response = False
else:
if settings.VERBOSITY_LEVEL >= 1:
if [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
debug_msg = 'Got ' + str(err_msg)
print(settings.print_debug_msg(debug_msg))
else:
print(settings.print_critical_msg(err_msg))
response = False
</DeepExtract>
else:
try:
<DeepExtract>
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
response = response
except ValueError:
pass
</DeepExtract>
except Exception as err_msg:
<DeepExtract>
settings.VALID_URL = False
try:
error_msg = str(err_msg.args[0]).split('] ')[1]
except IndexError:
try:
error_msg = str(err_msg.args[0])
except IndexError:
error_msg = str(err_msg)
if any((x in str(error_msg).lower() for x in ['wrong version number', 'ssl', 'https'])):
settings.MAX_RETRIES = 1
error_msg = "Can't establish SSL connection. "
if settings.MULTI_TARGETS or settings.CRAWLING:
error_msg = error_msg + 'Skipping to the next target.'
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif any((x in str(error_msg).lower() for x in ['connection refused', 'timeout'])):
settings.MAX_RETRIES = 1
err = 'Unable to connect to the target URL'
if menu.options.proxy:
err += ' or proxy'
err = err + ' (Reason: ' + str(error_msg) + '). '
if settings.MULTI_TARGETS or settings.CRAWLING:
err = err + 'Skipping to the next target.'
error_msg = err
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.UNAUTHORIZED_ERROR in str(err_msg).lower():
if menu.options.ignore_code == settings.UNAUTHORIZED_ERROR or settings.PERFORM_CRACKING:
response = False
else:
err_msg = 'Not authorized (' + settings.UNAUTHORIZED_ERROR + '). '
err_msg += "Try to provide right HTTP authentication type ('--auth-type') and valid credentials ('--auth-cred')"
if menu.options.auth_type and menu.options.auth_cred:
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += '. '
else:
err_msg += ' or rerun without providing them, in order to perform a dictionary-based attack. '
else:
err_msg += " or rerun by providing option '--ignore-code=" + settings.UNAUTHORIZED_ERROR + "'. "
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += 'Skipping to the next target.'
print(settings.print_critical_msg(err_msg))
if not settings.CRAWLING:
if menu.options.auth_type and menu.options.auth_cred:
raise SystemExit()
elif settings.TOTAL_OF_REQUESTS == 1:
if 'IncompleteRead' in str(error_msg):
error_msg = 'There was an incomplete read error while retrieving data '
error_msg += 'from the target URL.'
elif 'infinite loop' in str(error_msg):
error_msg = 'Infinite redirect loop detected. '
error_msg += 'Please check all provided parameters and/or provide missing ones.'
elif 'BadStatusLine' in str(error_msg):
error_msg = 'Connection dropped or unknown HTTP '
error_msg += 'status code received.'
elif 'forcibly closed' in str(error_msg) or 'Connection is already closed' in str(error_msg):
error_msg = 'Connection was forcibly closed by the target URL.'
elif [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
status_code = [err_code for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]
warn_msg = "The web server responded with an HTTP error code '" + str(status_code[0]) + "' which could interfere with the results of the tests."
print(settings.print_warning_msg(warn_msg))
if not settings.NOT_FOUND_ERROR in str(err_msg).lower():
response = False
response = True
else:
error_msg = 'The provided target URL seems not reachable. '
error_msg += 'In case that it is, please try to re-run using '
if not menu.options.random_agent:
error_msg += "'--random-agent' switch and/or "
error_msg += "'--proxy' option."
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.IDENTIFIED_WARNINGS or settings.IDENTIFIED_PHPINFO or settings.IDENTIFIED_COMMAND_INJECTION or (menu.options.ignore_code and menu.options.ignore_code in str(error_msg).lower()):
response = False
elif settings.IGNORE_ERR_MSG == False:
if menu.options.skip_heuristics and settings.VERBOSITY_LEVEL == 0:
print(settings.SINGLE_WHITESPACE)
continue_tests = checks.continue_tests(err_msg)
if continue_tests == True:
settings.IGNORE_ERR_MSG = True
response = False
elif not settings.CRAWLING:
raise SystemExit()
else:
response = False
else:
if settings.VERBOSITY_LEVEL >= 1:
if [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
debug_msg = 'Got ' + str(err_msg)
print(settings.print_debug_msg(debug_msg))
else:
print(settings.print_critical_msg(err_msg))
response = False
</DeepExtract>
if settings.TIME_RELATIVE_ATTACK:
end = time.time()
how_long = int(end - start)
return how_long
else:
return response
|
def user_agent_injection(url, vuln_parameter, payload):
def inject_user_agent(url, vuln_parameter, payload, proxy):
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
return response
except ValueError:
pass
if settings.TIME_RELATIVE_ATTACK:
start = 0
end = 0
start = time.time()
proxy = None
if menu.options.proxy:
try:
proxy = _urllib.request.ProxyHandler({settings.SCHEME: menu.options.proxy})
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
response = response
except ValueError:
pass
except Exception as err_msg:
settings.VALID_URL = False
try:
error_msg = str(err_msg.args[0]).split('] ')[1]
except IndexError:
try:
error_msg = str(err_msg.args[0])
except IndexError:
error_msg = str(err_msg)
if any((x in str(error_msg).lower() for x in ['wrong version number', 'ssl', 'https'])):
settings.MAX_RETRIES = 1
error_msg = "Can't establish SSL connection. "
if settings.MULTI_TARGETS or settings.CRAWLING:
error_msg = error_msg + 'Skipping to the next target.'
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif any((x in str(error_msg).lower() for x in ['connection refused', 'timeout'])):
settings.MAX_RETRIES = 1
err = 'Unable to connect to the target URL'
if menu.options.proxy:
err += ' or proxy'
err = err + ' (Reason: ' + str(error_msg) + '). '
if settings.MULTI_TARGETS or settings.CRAWLING:
err = err + 'Skipping to the next target.'
error_msg = err
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.UNAUTHORIZED_ERROR in str(err_msg).lower():
if menu.options.ignore_code == settings.UNAUTHORIZED_ERROR or settings.PERFORM_CRACKING:
response = False
else:
err_msg = 'Not authorized (' + settings.UNAUTHORIZED_ERROR + '). '
err_msg += "Try to provide right HTTP authentication type ('--auth-type') and valid credentials ('--auth-cred')"
if menu.options.auth_type and menu.options.auth_cred:
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += '. '
else:
err_msg += ' or rerun without providing them, in order to perform a dictionary-based attack. '
else:
err_msg += " or rerun by providing option '--ignore-code=" + settings.UNAUTHORIZED_ERROR + "'. "
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += 'Skipping to the next target.'
print(settings.print_critical_msg(err_msg))
if not settings.CRAWLING:
if menu.options.auth_type and menu.options.auth_cred:
raise SystemExit()
elif settings.TOTAL_OF_REQUESTS == 1:
if 'IncompleteRead' in str(error_msg):
error_msg = 'There was an incomplete read error while retrieving data '
error_msg += 'from the target URL.'
elif 'infinite loop' in str(error_msg):
error_msg = 'Infinite redirect loop detected. '
error_msg += 'Please check all provided parameters and/or provide missing ones.'
elif 'BadStatusLine' in str(error_msg):
error_msg = 'Connection dropped or unknown HTTP '
error_msg += 'status code received.'
elif 'forcibly closed' in str(error_msg) or 'Connection is already closed' in str(error_msg):
error_msg = 'Connection was forcibly closed by the target URL.'
elif [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
status_code = [err_code for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]
warn_msg = "The web server responded with an HTTP error code '" + str(status_code[0]) + "' which could interfere with the results of the tests."
print(settings.print_warning_msg(warn_msg))
if not settings.NOT_FOUND_ERROR in str(err_msg).lower():
response = False
response = True
else:
error_msg = 'The provided target URL seems not reachable. '
error_msg += 'In case that it is, please try to re-run using '
if not menu.options.random_agent:
error_msg += "'--random-agent' switch and/or "
error_msg += "'--proxy' option."
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.IDENTIFIED_WARNINGS or settings.IDENTIFIED_PHPINFO or settings.IDENTIFIED_COMMAND_INJECTION or (menu.options.ignore_code and menu.options.ignore_code in str(error_msg).lower()):
response = False
elif settings.IGNORE_ERR_MSG == False:
if menu.options.skip_heuristics and settings.VERBOSITY_LEVEL == 0:
print(settings.SINGLE_WHITESPACE)
continue_tests = checks.continue_tests(err_msg)
if continue_tests == True:
settings.IGNORE_ERR_MSG = True
response = False
elif not settings.CRAWLING:
raise SystemExit()
else:
response = False
else:
if settings.VERBOSITY_LEVEL >= 1:
if [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
debug_msg = 'Got ' + str(err_msg)
print(settings.print_debug_msg(debug_msg))
else:
print(settings.print_critical_msg(err_msg))
response = False
elif menu.options.tor:
try:
proxy = _urllib.request.ProxyHandler({settings.TOR_HTTP_PROXY_SCHEME: settings.TOR_HTTP_PROXY_IP + ':' + settings.TOR_HTTP_PROXY_PORT})
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
response = response
except ValueError:
pass
except Exception as err_msg:
settings.VALID_URL = False
try:
error_msg = str(err_msg.args[0]).split('] ')[1]
except IndexError:
try:
error_msg = str(err_msg.args[0])
except IndexError:
error_msg = str(err_msg)
if any((x in str(error_msg).lower() for x in ['wrong version number', 'ssl', 'https'])):
settings.MAX_RETRIES = 1
error_msg = "Can't establish SSL connection. "
if settings.MULTI_TARGETS or settings.CRAWLING:
error_msg = error_msg + 'Skipping to the next target.'
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif any((x in str(error_msg).lower() for x in ['connection refused', 'timeout'])):
settings.MAX_RETRIES = 1
err = 'Unable to connect to the target URL'
if menu.options.proxy:
err += ' or proxy'
err = err + ' (Reason: ' + str(error_msg) + '). '
if settings.MULTI_TARGETS or settings.CRAWLING:
err = err + 'Skipping to the next target.'
error_msg = err
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.UNAUTHORIZED_ERROR in str(err_msg).lower():
if menu.options.ignore_code == settings.UNAUTHORIZED_ERROR or settings.PERFORM_CRACKING:
response = False
else:
err_msg = 'Not authorized (' + settings.UNAUTHORIZED_ERROR + '). '
err_msg += "Try to provide right HTTP authentication type ('--auth-type') and valid credentials ('--auth-cred')"
if menu.options.auth_type and menu.options.auth_cred:
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += '. '
else:
err_msg += ' or rerun without providing them, in order to perform a dictionary-based attack. '
else:
err_msg += " or rerun by providing option '--ignore-code=" + settings.UNAUTHORIZED_ERROR + "'. "
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += 'Skipping to the next target.'
print(settings.print_critical_msg(err_msg))
if not settings.CRAWLING:
if menu.options.auth_type and menu.options.auth_cred:
raise SystemExit()
elif settings.TOTAL_OF_REQUESTS == 1:
if 'IncompleteRead' in str(error_msg):
error_msg = 'There was an incomplete read error while retrieving data '
error_msg += 'from the target URL.'
elif 'infinite loop' in str(error_msg):
error_msg = 'Infinite redirect loop detected. '
error_msg += 'Please check all provided parameters and/or provide missing ones.'
elif 'BadStatusLine' in str(error_msg):
error_msg = 'Connection dropped or unknown HTTP '
error_msg += 'status code received.'
elif 'forcibly closed' in str(error_msg) or 'Connection is already closed' in str(error_msg):
error_msg = 'Connection was forcibly closed by the target URL.'
elif [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
status_code = [err_code for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]
warn_msg = "The web server responded with an HTTP error code '" + str(status_code[0]) + "' which could interfere with the results of the tests."
print(settings.print_warning_msg(warn_msg))
if not settings.NOT_FOUND_ERROR in str(err_msg).lower():
response = False
response = True
else:
error_msg = 'The provided target URL seems not reachable. '
error_msg += 'In case that it is, please try to re-run using '
if not menu.options.random_agent:
error_msg += "'--random-agent' switch and/or "
error_msg += "'--proxy' option."
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.IDENTIFIED_WARNINGS or settings.IDENTIFIED_PHPINFO or settings.IDENTIFIED_COMMAND_INJECTION or (menu.options.ignore_code and menu.options.ignore_code in str(error_msg).lower()):
response = False
elif settings.IGNORE_ERR_MSG == False:
if menu.options.skip_heuristics and settings.VERBOSITY_LEVEL == 0:
print(settings.SINGLE_WHITESPACE)
continue_tests = checks.continue_tests(err_msg)
if continue_tests == True:
settings.IGNORE_ERR_MSG = True
response = False
elif not settings.CRAWLING:
raise SystemExit()
else:
response = False
else:
if settings.VERBOSITY_LEVEL >= 1:
if [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
debug_msg = 'Got ' + str(err_msg)
print(settings.print_debug_msg(debug_msg))
else:
print(settings.print_critical_msg(err_msg))
response = False
else:
try:
if proxy == None:
opener = _urllib.request.build_opener()
else:
opener = _urllib.request.build_opener(proxy)
if menu.options.data:
menu.options.data = settings.USER_DEFINED_POST_DATA
request = _urllib.request.Request(url, menu.options.data.encode(settings.DEFAULT_CODEC))
else:
url = parameters.get_url_part(url)
request = _urllib.request.Request(url)
headers.do_check(request)
payload = checks.newline_fixation(payload)
request.add_header('User-Agent', payload)
try:
headers.check_http_traffic(request)
response = opener.open(request)
response = response
except ValueError:
pass
except Exception as err_msg:
settings.VALID_URL = False
try:
error_msg = str(err_msg.args[0]).split('] ')[1]
except IndexError:
try:
error_msg = str(err_msg.args[0])
except IndexError:
error_msg = str(err_msg)
if any((x in str(error_msg).lower() for x in ['wrong version number', 'ssl', 'https'])):
settings.MAX_RETRIES = 1
error_msg = "Can't establish SSL connection. "
if settings.MULTI_TARGETS or settings.CRAWLING:
error_msg = error_msg + 'Skipping to the next target.'
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif any((x in str(error_msg).lower() for x in ['connection refused', 'timeout'])):
settings.MAX_RETRIES = 1
err = 'Unable to connect to the target URL'
if menu.options.proxy:
err += ' or proxy'
err = err + ' (Reason: ' + str(error_msg) + '). '
if settings.MULTI_TARGETS or settings.CRAWLING:
err = err + 'Skipping to the next target.'
error_msg = err
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.UNAUTHORIZED_ERROR in str(err_msg).lower():
if menu.options.ignore_code == settings.UNAUTHORIZED_ERROR or settings.PERFORM_CRACKING:
response = False
else:
err_msg = 'Not authorized (' + settings.UNAUTHORIZED_ERROR + '). '
err_msg += "Try to provide right HTTP authentication type ('--auth-type') and valid credentials ('--auth-cred')"
if menu.options.auth_type and menu.options.auth_cred:
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += '. '
else:
err_msg += ' or rerun without providing them, in order to perform a dictionary-based attack. '
else:
err_msg += " or rerun by providing option '--ignore-code=" + settings.UNAUTHORIZED_ERROR + "'. "
if settings.MULTI_TARGETS or settings.CRAWLING:
err_msg += 'Skipping to the next target.'
print(settings.print_critical_msg(err_msg))
if not settings.CRAWLING:
if menu.options.auth_type and menu.options.auth_cred:
raise SystemExit()
elif settings.TOTAL_OF_REQUESTS == 1:
if 'IncompleteRead' in str(error_msg):
error_msg = 'There was an incomplete read error while retrieving data '
error_msg += 'from the target URL.'
elif 'infinite loop' in str(error_msg):
error_msg = 'Infinite redirect loop detected. '
error_msg += 'Please check all provided parameters and/or provide missing ones.'
elif 'BadStatusLine' in str(error_msg):
error_msg = 'Connection dropped or unknown HTTP '
error_msg += 'status code received.'
elif 'forcibly closed' in str(error_msg) or 'Connection is already closed' in str(error_msg):
error_msg = 'Connection was forcibly closed by the target URL.'
elif [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
status_code = [err_code for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]
warn_msg = "The web server responded with an HTTP error code '" + str(status_code[0]) + "' which could interfere with the results of the tests."
print(settings.print_warning_msg(warn_msg))
if not settings.NOT_FOUND_ERROR in str(err_msg).lower():
response = False
response = True
else:
error_msg = 'The provided target URL seems not reachable. '
error_msg += 'In case that it is, please try to re-run using '
if not menu.options.random_agent:
error_msg += "'--random-agent' switch and/or "
error_msg += "'--proxy' option."
print(settings.print_critical_msg(error_msg))
if not settings.CRAWLING:
raise SystemExit()
else:
response = False
elif settings.IDENTIFIED_WARNINGS or settings.IDENTIFIED_PHPINFO or settings.IDENTIFIED_COMMAND_INJECTION or (menu.options.ignore_code and menu.options.ignore_code in str(error_msg).lower()):
response = False
elif settings.IGNORE_ERR_MSG == False:
if menu.options.skip_heuristics and settings.VERBOSITY_LEVEL == 0:
print(settings.SINGLE_WHITESPACE)
continue_tests = checks.continue_tests(err_msg)
if continue_tests == True:
settings.IGNORE_ERR_MSG = True
response = False
elif not settings.CRAWLING:
raise SystemExit()
else:
response = False
else:
if settings.VERBOSITY_LEVEL >= 1:
if [True for err_code in settings.HTTP_ERROR_CODES if err_code in str(error_msg)]:
debug_msg = 'Got ' + str(err_msg)
print(settings.print_debug_msg(debug_msg))
else:
print(settings.print_critical_msg(err_msg))
response = False
if settings.TIME_RELATIVE_ATTACK:
end = time.time()
how_long = int(end - start)
return how_long
else:
return response
|
commix
|
positive
|
def to_sizes(self, object):
<DeepExtract>
elements = object.findall(self._fixxpath('flavor'))
</DeepExtract>
return [self._to_size(el) for el in elements]
|
def to_sizes(self, object):
elements = object.findall(self._fixxpath('flavor'))
return [self._to_size(el) for el in elements]
|
AEServmon
|
positive
|
@pytest.mark.parametrize('src,result', (("'hello_world'[1:]", 'ello_world'), ("'hello_world'[:5]", 'hello'), ("'hello_world'[::2]", 'hlowrd'), ("'hello_world'[::-1]", 'dlrow_olleh'), ("'hello_world'[-1:0:-2]", 'drwol'), ("'hello_world'[0]", 'h'), ("'hello_world'[-1]", 'd')))
def test_string_slice(src, result):
<DeepExtract>
tree = collect(dedent(src), minimal=True)
loc = ScanLocation(location='<unknown>')
v = Visitor.run_stages(location=loc, stages=('convert', 'rewrite'), ast_tree=tree)
if single:
tree = v.tree[-1]
else:
tree = v.tree
</DeepExtract>
assert isinstance(tree, String)
assert str(tree) == result
|
@pytest.mark.parametrize('src,result', (("'hello_world'[1:]", 'ello_world'), ("'hello_world'[:5]", 'hello'), ("'hello_world'[::2]", 'hlowrd'), ("'hello_world'[::-1]", 'dlrow_olleh'), ("'hello_world'[-1:0:-2]", 'drwol'), ("'hello_world'[0]", 'h'), ("'hello_world'[-1]", 'd')))
def test_string_slice(src, result):
tree = collect(dedent(src), minimal=True)
loc = ScanLocation(location='<unknown>')
v = Visitor.run_stages(location=loc, stages=('convert', 'rewrite'), ast_tree=tree)
if single:
tree = v.tree[-1]
else:
tree = v.tree
assert isinstance(tree, String)
assert str(tree) == result
|
aura
|
positive
|
def keypoints_error(gt, est, names, use_align=False, joint_level=True):
assert gt.shape[-1] == 4
assert est.shape[-1] == 4
isValid = est[..., -1] > 0
isValidGT = gt[..., -1] > 0
isValid_common = isValid * isValidGT
est = est[..., :-1]
gt = gt[..., :-1]
dist = {}
dist['abs'] = np.sqrt(((gt - est) ** 2).sum(axis=-1)) * 1000
dist['pck@50'] = dist['abs'] < 50
if use_align:
l_id = names.index('LHip')
r_id = names.index('RHip')
assert isValid[l_id] and isValid[r_id]
assert isValidGT[l_id] and isValidGT[r_id]
(gt, est) = (align_by_pelvis(gt, names), align_by_pelvis(est, names))
dist['ra'] = np.sqrt(((est - gt) ** 2).sum(axis=-1)) * 1000
<DeepExtract>
transposed = False
if est.shape[0] != 3 and est.shape[0] != 2:
est = est.T
gt = gt.T
transposed = True
assert gt.shape[1] == est.shape[1]
mu1 = est.mean(axis=1, keepdims=True)
mu2 = gt.mean(axis=1, keepdims=True)
X1 = est - mu1
X2 = gt - mu2
var1 = np.sum(X1 ** 2)
K = X1.dot(X2.T)
(U, s, Vh) = np.linalg.svd(K)
V = Vh.T
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
R = V.dot(Z.dot(U.T))
scale = np.trace(R.dot(K)) / var1
t = mu2 - scale * R.dot(mu1)
S1_hat = scale * R.dot(est) + t
if transposed:
S1_hat = S1_hat.T
est_hat = S1_hat
</DeepExtract>
dist['pa'] = np.sqrt(((est_hat - gt) ** 2).sum(axis=-1)) * 1000
result = {}
for key in ['abs', 'ra', 'pa', 'pck@50', 'pck@100']:
if key not in dist:
continue
result[key + '_mean'] = dist[key].mean()
if joint_level:
for (i, name) in enumerate(names):
result[key + '_' + name] = dist[key][i]
return result
|
def keypoints_error(gt, est, names, use_align=False, joint_level=True):
assert gt.shape[-1] == 4
assert est.shape[-1] == 4
isValid = est[..., -1] > 0
isValidGT = gt[..., -1] > 0
isValid_common = isValid * isValidGT
est = est[..., :-1]
gt = gt[..., :-1]
dist = {}
dist['abs'] = np.sqrt(((gt - est) ** 2).sum(axis=-1)) * 1000
dist['pck@50'] = dist['abs'] < 50
if use_align:
l_id = names.index('LHip')
r_id = names.index('RHip')
assert isValid[l_id] and isValid[r_id]
assert isValidGT[l_id] and isValidGT[r_id]
(gt, est) = (align_by_pelvis(gt, names), align_by_pelvis(est, names))
dist['ra'] = np.sqrt(((est - gt) ** 2).sum(axis=-1)) * 1000
transposed = False
if est.shape[0] != 3 and est.shape[0] != 2:
est = est.T
gt = gt.T
transposed = True
assert gt.shape[1] == est.shape[1]
mu1 = est.mean(axis=1, keepdims=True)
mu2 = gt.mean(axis=1, keepdims=True)
X1 = est - mu1
X2 = gt - mu2
var1 = np.sum(X1 ** 2)
K = X1.dot(X2.T)
(U, s, Vh) = np.linalg.svd(K)
V = Vh.T
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
R = V.dot(Z.dot(U.T))
scale = np.trace(R.dot(K)) / var1
t = mu2 - scale * R.dot(mu1)
S1_hat = scale * R.dot(est) + t
if transposed:
S1_hat = S1_hat.T
est_hat = S1_hat
dist['pa'] = np.sqrt(((est_hat - gt) ** 2).sum(axis=-1)) * 1000
result = {}
for key in ['abs', 'ra', 'pa', 'pck@50', 'pck@100']:
if key not in dist:
continue
result[key + '_mean'] = dist[key].mean()
if joint_level:
for (i, name) in enumerate(names):
result[key + '_' + name] = dist[key][i]
return result
|
EasyMocap
|
positive
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_post_quote_failure_no_exchange_pairs(mock_rqi, client):
<DeepExtract>
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
</DeepExtract>
response = client.post(ENDPOINT, {'sell_asset': data['offchain_assets'][0].asset_identification_format, 'buy_asset': data['stellar_assets'][0].asset_identification_format, 'buy_amount': 100, 'sell_delivery_method': 'cash_dropoff'}, content_type='application/json')
assert response.status_code == 400, response.content
assert response.json() == {'error': 'unsupported asset pair'}
mock_rqi.post_quote.assert_not_called()
|
@pytest.mark.django_db
@patch(f'{code_path}.rqi')
@patch('polaris.sep10.utils.check_auth', mock_check_auth_success)
def test_post_quote_failure_no_exchange_pairs(mock_rqi, client):
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
response = client.post(ENDPOINT, {'sell_asset': data['offchain_assets'][0].asset_identification_format, 'buy_asset': data['stellar_assets'][0].asset_identification_format, 'buy_amount': 100, 'sell_delivery_method': 'cash_dropoff'}, content_type='application/json')
assert response.status_code == 400, response.content
assert response.json() == {'error': 'unsupported asset pair'}
mock_rqi.post_quote.assert_not_called()
|
django-polaris
|
positive
|
def readBlockWorker(infile, is_synapse, blockNum, binsize, blockMap, norm, c1Norm, c2Norm, binPositionBox, isIntra, version):
yActual = []
xActual = []
counts = []
idx = dict()
if blockNum in blockMap:
idx = blockMap[blockNum]
else:
idx['size'] = 0
idx['position'] = 0
if idx['size'] == 0:
records = []
else:
if infile.startswith('http'):
<DeepExtract>
if is_synapse:
headers = {'range': 'bytes={0}-{1}'.format(idx['position'], idx['position'] + idx['size'])}
headers = {'range': 'bytes={0}-{1}'.format(idx['position'], idx['position'] + idx['size']), 'x-amz-meta-requester': 'straw'}
</DeepExtract>
s = requests.Session()
r = s.get(infile, headers=headers)
req = io.BytesIO(r.content)
else:
req = open(infile, 'rb')
req.seek(idx['position'])
<DeepExtract>
compressedBytes = req.read(idx['size'])
uncompressedBytes = zlib.decompress(compressedBytes)
nRecords = struct.unpack('<i', uncompressedBytes[0:4])[0]
v = []
if version < 7:
for i in range(nRecords):
binX = struct.unpack('<i', uncompressedBytes[12 * i + 4:12 * i + 8])[0]
binY = struct.unpack('<i', uncompressedBytes[12 * i + 8:12 * i + 12])[0]
counts = struct.unpack('<f', uncompressedBytes[12 * i + 12:12 * i + 16])[0]
record = dict()
record['binX'] = binX
record['binY'] = binY
record['counts'] = counts
v.append(record)
else:
binXOffset = struct.unpack('<i', uncompressedBytes[4:8])[0]
binYOffset = struct.unpack('<i', uncompressedBytes[8:12])[0]
useShort = struct.unpack('<b', uncompressedBytes[12:13])[0]
type_ = struct.unpack('<b', uncompressedBytes[13:14])[0]
index = 0
if type_ == 1:
rowCount = struct.unpack('<h', uncompressedBytes[14:16])[0]
temp = 16
for i in range(rowCount):
y = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
binY = y + binYOffset
colCount = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
for j in range(colCount):
x = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
binX = binXOffset + x
if useShort == 0:
c = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
counts = c
else:
counts = struct.unpack('<f', uncompressedBytes[temp:temp + 4])[0]
temp = temp + 4
record = dict()
record['binX'] = binX
record['binY'] = binY
record['counts'] = counts
v.append(record)
index = index + 1
elif type_ == 2:
temp = 14
nPts = struct.unpack('<i', uncompressedBytes[temp:temp + 4])[0]
temp = temp + 4
w = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
for i in range(nPts):
row = int(i / w)
col = i - row * w
bin1 = int(binXOffset + col)
bin2 = int(binYOffset + row)
if useShort == 0:
c = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
if c != -32768:
record = dict()
record['binX'] = bin1
record['binY'] = bin2
record['counts'] = c
v.append(record)
index = index + 1
else:
counts = struct.unpack('<f', uncompressedBytes[temp:temp + 4])[0]
temp = temp + 4
if counts != 2143289344:
record = dict()
record['binX'] = bin1
record['binY'] = bin2
record['counts'] = counts
v.append(record)
index = index + 1
records = v
</DeepExtract>
if norm != 'NONE':
for record in records:
binX = record['binX']
binY = record['binY']
if binPositionBox[0] <= binX <= binPositionBox[1] and binPositionBox[2] <= binY <= binPositionBox[3] or (isIntra and binPositionBox[0] <= binY <= binPositionBox[1] and (binPositionBox[2] <= binX <= binPositionBox[3])):
c = record['counts']
a = c1Norm[binX] * c2Norm[binY]
if a != 0.0:
c = c / a
else:
c = 'inf'
xActual.append(binX)
yActual.append(binY)
counts.append(c)
else:
for record in records:
binX = record['binX']
binY = record['binY']
if binPositionBox[0] <= binX <= binPositionBox[1] and binPositionBox[2] <= binY <= binPositionBox[3] or (isIntra and binPositionBox[0] <= binY <= binPositionBox[1] and (binPositionBox[2] <= binX <= binPositionBox[3])):
c = record['counts']
xActual.append(binX)
yActual.append(binY)
counts.append(c)
return (xActual, yActual, counts)
|
def readBlockWorker(infile, is_synapse, blockNum, binsize, blockMap, norm, c1Norm, c2Norm, binPositionBox, isIntra, version):
yActual = []
xActual = []
counts = []
idx = dict()
if blockNum in blockMap:
idx = blockMap[blockNum]
else:
idx['size'] = 0
idx['position'] = 0
if idx['size'] == 0:
records = []
else:
if infile.startswith('http'):
if is_synapse:
headers = {'range': 'bytes={0}-{1}'.format(idx['position'], idx['position'] + idx['size'])}
headers = {'range': 'bytes={0}-{1}'.format(idx['position'], idx['position'] + idx['size']), 'x-amz-meta-requester': 'straw'}
s = requests.Session()
r = s.get(infile, headers=headers)
req = io.BytesIO(r.content)
else:
req = open(infile, 'rb')
req.seek(idx['position'])
compressedBytes = req.read(idx['size'])
uncompressedBytes = zlib.decompress(compressedBytes)
nRecords = struct.unpack('<i', uncompressedBytes[0:4])[0]
v = []
if version < 7:
for i in range(nRecords):
binX = struct.unpack('<i', uncompressedBytes[12 * i + 4:12 * i + 8])[0]
binY = struct.unpack('<i', uncompressedBytes[12 * i + 8:12 * i + 12])[0]
counts = struct.unpack('<f', uncompressedBytes[12 * i + 12:12 * i + 16])[0]
record = dict()
record['binX'] = binX
record['binY'] = binY
record['counts'] = counts
v.append(record)
else:
binXOffset = struct.unpack('<i', uncompressedBytes[4:8])[0]
binYOffset = struct.unpack('<i', uncompressedBytes[8:12])[0]
useShort = struct.unpack('<b', uncompressedBytes[12:13])[0]
type_ = struct.unpack('<b', uncompressedBytes[13:14])[0]
index = 0
if type_ == 1:
rowCount = struct.unpack('<h', uncompressedBytes[14:16])[0]
temp = 16
for i in range(rowCount):
y = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
binY = y + binYOffset
colCount = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
for j in range(colCount):
x = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
binX = binXOffset + x
if useShort == 0:
c = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
counts = c
else:
counts = struct.unpack('<f', uncompressedBytes[temp:temp + 4])[0]
temp = temp + 4
record = dict()
record['binX'] = binX
record['binY'] = binY
record['counts'] = counts
v.append(record)
index = index + 1
elif type_ == 2:
temp = 14
nPts = struct.unpack('<i', uncompressedBytes[temp:temp + 4])[0]
temp = temp + 4
w = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
for i in range(nPts):
row = int(i / w)
col = i - row * w
bin1 = int(binXOffset + col)
bin2 = int(binYOffset + row)
if useShort == 0:
c = struct.unpack('<h', uncompressedBytes[temp:temp + 2])[0]
temp = temp + 2
if c != -32768:
record = dict()
record['binX'] = bin1
record['binY'] = bin2
record['counts'] = c
v.append(record)
index = index + 1
else:
counts = struct.unpack('<f', uncompressedBytes[temp:temp + 4])[0]
temp = temp + 4
if counts != 2143289344:
record = dict()
record['binX'] = bin1
record['binY'] = bin2
record['counts'] = counts
v.append(record)
index = index + 1
records = v
if norm != 'NONE':
for record in records:
binX = record['binX']
binY = record['binY']
if binPositionBox[0] <= binX <= binPositionBox[1] and binPositionBox[2] <= binY <= binPositionBox[3] or (isIntra and binPositionBox[0] <= binY <= binPositionBox[1] and (binPositionBox[2] <= binX <= binPositionBox[3])):
c = record['counts']
a = c1Norm[binX] * c2Norm[binY]
if a != 0.0:
c = c / a
else:
c = 'inf'
xActual.append(binX)
yActual.append(binY)
counts.append(c)
else:
for record in records:
binX = record['binX']
binY = record['binY']
if binPositionBox[0] <= binX <= binPositionBox[1] and binPositionBox[2] <= binY <= binPositionBox[3] or (isIntra and binPositionBox[0] <= binY <= binPositionBox[1] and (binPositionBox[2] <= binX <= binPositionBox[3])):
c = record['counts']
xActual.append(binX)
yActual.append(binY)
counts.append(c)
return (xActual, yActual, counts)
|
CoolBox
|
positive
|
def vizmodel(m, inputShape=None, group=None, ganNoise=False, batchn=2, output=''):
<DeepExtract>
inputShape = inputShape or getModelDefaultInputShape(m, group, ganNoise)
para = getpara(m)
inp = th.rand((batchn,) + inputShape, dtype=para.dtype, device=para.device)
if not ganNoise:
from skimage.data import astronaut
img = astronaut().mean(-1) / 255.0
mean = img.mean()
std = ((img - mean) ** 2).mean() ** 0.5
normaed = (img - mean) / std
feat = nn.functional.interpolate(tht - [[normaed]], inputShape[-2:], mode='bilinear')
inp[:] = feat.to(para.device)
x = inp
</DeepExtract>
from torchviz import make_dot
x.to(getpara(m))
model_output = m(x)
<DeepExtract>
if isinstance(model_output, (list, tuple)):
loss = sum([i.sum() for i in model_output])
loss = loss
if isinstance(model_output, dict):
loss = sum([i.sum() for i in model_output.values()])
loss = loss
loss = model_output.sum()
</DeepExtract>
graph = make_dot(loss, params=dict(m.named_parameters()))
if output:
graph.render(output, format='png')
return graph
|
def vizmodel(m, inputShape=None, group=None, ganNoise=False, batchn=2, output=''):
inputShape = inputShape or getModelDefaultInputShape(m, group, ganNoise)
para = getpara(m)
inp = th.rand((batchn,) + inputShape, dtype=para.dtype, device=para.device)
if not ganNoise:
from skimage.data import astronaut
img = astronaut().mean(-1) / 255.0
mean = img.mean()
std = ((img - mean) ** 2).mean() ** 0.5
normaed = (img - mean) / std
feat = nn.functional.interpolate(tht - [[normaed]], inputShape[-2:], mode='bilinear')
inp[:] = feat.to(para.device)
x = inp
from torchviz import make_dot
x.to(getpara(m))
model_output = m(x)
if isinstance(model_output, (list, tuple)):
loss = sum([i.sum() for i in model_output])
loss = loss
if isinstance(model_output, dict):
loss = sum([i.sum() for i in model_output.values()])
loss = loss
loss = model_output.sum()
graph = make_dot(loss, params=dict(m.named_parameters()))
if output:
graph.render(output, format='png')
return graph
|
boxx
|
positive
|
def calculate_auto_runtimes(taskname, search_range=(None, None), bin_count=None):
<DeepExtract>
if search_range and search_range != (None, None):
if taskname:
qs = DispatchedTask.objects.filter(state='SUCCESS', name=taskname, runtime__range=search_range)
else:
qs = DispatchedTask.objects.filter(state='SUCCESS', runtime__range=search_range)
elif taskname:
qs = DispatchedTask.objects.filter(state='SUCCESS', name=taskname)
else:
qs = DispatchedTask.objects.filter(state='SUCCESS')
if search_range[0] and search_range[1]:
qs = qs.filter(tstamp__range=search_range)
elif search_range[0]:
qs = qs.filter(tstamp__gte=search_range[0])
elif search_range[1]:
qs = qs.filter(tstamp__lte=search_range[1])
runtimeq = qs
</DeepExtract>
agg = runtimeq.aggregate(Max('runtime'), Min('runtime'))
runtime_min = agg['runtime__min'] if agg['runtime__min'] is not None else 0.0
runtime_max = agg['runtime__max'] if agg['runtime__max'] is not None else 1.0
runtimes = runtimeq.values_list('runtime', flat=True).order_by('runtime')
if bin_count:
bin_size = (runtime_max - runtime_min) / bin_count
else:
e = 'Bad arguments to calculate_autoruntimes(). '
e += 'The argument bin_count must be given.'
raise RuntimeError(e)
<DeepExtract>
bins = [0 for i in xrange(bin_count)]
try:
runtime_iter = iter(runtimes)
t = runtime_iter.next()
for i in xrange(len(bins)):
binmax = (i + 1) * bin_size + runtime_min
while t < binmax:
bins[i] += 1
t = runtime_iter.next()
except StopIteration:
pass
for i in xrange(len(bins)):
binmin = i * bin_size + runtime_min
binmax = (i + 1) * bin_size + runtime_min
bins[i] = ((binmin, binmax), bins[i])
bins = bins
</DeepExtract>
return bins
|
def calculate_auto_runtimes(taskname, search_range=(None, None), bin_count=None):
if search_range and search_range != (None, None):
if taskname:
qs = DispatchedTask.objects.filter(state='SUCCESS', name=taskname, runtime__range=search_range)
else:
qs = DispatchedTask.objects.filter(state='SUCCESS', runtime__range=search_range)
elif taskname:
qs = DispatchedTask.objects.filter(state='SUCCESS', name=taskname)
else:
qs = DispatchedTask.objects.filter(state='SUCCESS')
if search_range[0] and search_range[1]:
qs = qs.filter(tstamp__range=search_range)
elif search_range[0]:
qs = qs.filter(tstamp__gte=search_range[0])
elif search_range[1]:
qs = qs.filter(tstamp__lte=search_range[1])
runtimeq = qs
agg = runtimeq.aggregate(Max('runtime'), Min('runtime'))
runtime_min = agg['runtime__min'] if agg['runtime__min'] is not None else 0.0
runtime_max = agg['runtime__max'] if agg['runtime__max'] is not None else 1.0
runtimes = runtimeq.values_list('runtime', flat=True).order_by('runtime')
if bin_count:
bin_size = (runtime_max - runtime_min) / bin_count
else:
e = 'Bad arguments to calculate_autoruntimes(). '
e += 'The argument bin_count must be given.'
raise RuntimeError(e)
bins = [0 for i in xrange(bin_count)]
try:
runtime_iter = iter(runtimes)
t = runtime_iter.next()
for i in xrange(len(bins)):
binmax = (i + 1) * bin_size + runtime_min
while t < binmax:
bins[i] += 1
t = runtime_iter.next()
except StopIteration:
pass
for i in xrange(len(bins)):
binmin = i * bin_size + runtime_min
binmax = (i + 1) * bin_size + runtime_min
bins[i] = ((binmin, binmax), bins[i])
bins = bins
return bins
|
CeleryManagement
|
positive
|
def test_bad_output_structure(self):
def badfunc(S, is_training):
<DeepExtract>
batch_norm = hk.BatchNorm(False, False, 0.99)
seq = hk.Sequential((hk.Flatten(), hk.Linear(8), jax.nn.relu, partial(hk.dropout, hk.next_rng_key(), 0.25 if is_training else 0.0), partial(batch_norm, is_training=is_training), hk.Linear(8), jnp.tanh, hk.Linear(discrete.n * discrete.n), hk.Reshape((discrete.n, discrete.n)), jax.nn.softmax))
S_next = seq(S)
</DeepExtract>
S_next = (13, S_next)
return S_next
msg = 'func has bad return tree_structure, expected: PyTreeDef\\(\\*\\), got: PyTreeDef\\(\\(\\*, \\*\\)\\)'
with self.assertRaisesRegex(TypeError, msg):
env = Env(discrete, discrete)
TransitionModel(badfunc, env, random_seed=13)
|
def test_bad_output_structure(self):
def badfunc(S, is_training):
batch_norm = hk.BatchNorm(False, False, 0.99)
seq = hk.Sequential((hk.Flatten(), hk.Linear(8), jax.nn.relu, partial(hk.dropout, hk.next_rng_key(), 0.25 if is_training else 0.0), partial(batch_norm, is_training=is_training), hk.Linear(8), jnp.tanh, hk.Linear(discrete.n * discrete.n), hk.Reshape((discrete.n, discrete.n)), jax.nn.softmax))
S_next = seq(S)
S_next = (13, S_next)
return S_next
msg = 'func has bad return tree_structure, expected: PyTreeDef\\(\\*\\), got: PyTreeDef\\(\\(\\*, \\*\\)\\)'
with self.assertRaisesRegex(TypeError, msg):
env = Env(discrete, discrete)
TransitionModel(badfunc, env, random_seed=13)
|
coax
|
positive
|
def invgauss_eclipses_residual(ebparams, times, mags, errs):
"""This returns the residual between the modelmags and the actual mags.
Parameters
----------
ebparams : list of float
This contains the parameters for the eclipsing binary::
ebparams = [period (time),
epoch (time),
pdepth: primary eclipse depth (mags),
pduration: primary eclipse duration (phase),
psdepthratio: primary-secondary eclipse depth ratio,
secondaryphase: center phase of the secondary eclipse]
`period` is the period in days.
`epoch` is the time of minimum in JD.
`pdepth` is the depth of the primary eclipse.
- for magnitudes -> `pdepth` should be < 0
- for fluxes -> `pdepth` should be > 0
`pduration` is the length of the primary eclipse in phase.
`psdepthratio` is the ratio in the eclipse depths:
`depth_secondary/depth_primary`. This is generally the same as the ratio
of the `T_effs` of the two stars.
`secondaryphase` is the phase at which the minimum of the secondary
eclipse is located. This effectively parameterizes eccentricity.
All of these will then have fitted values after the fit is done.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the eclipse model will be generated. The times will be used to generate
model mags, and the input `times`, `mags`, and `errs` will be resorted
by model phase and returned.
Returns
-------
np.array
The residuals between the input `mags` and generated `modelmags`,
weighted by the measurement errors in `errs`.
"""
<DeepExtract>
(period, epoch, pdepth, pduration, depthratio, secondaryphase) = ebparams
iphase = (times - epoch) / period
iphase = iphase - np.floor(iphase)
phasesortind = np.argsort(iphase)
phase = iphase[phasesortind]
ptimes = times[phasesortind]
pmags = mags[phasesortind]
perrs = errs[phasesortind]
zerolevel = np.median(pmags)
modelmags = np.full_like(phase, zerolevel)
primaryecl_amp = -pdepth
secondaryecl_amp = -pdepth * depthratio
primaryecl_std = pduration / 5.0
secondaryecl_std = pduration / 5.0
halfduration = pduration / 2.0
primary_eclipse_ingress = (phase >= 1.0 - halfduration) & (phase <= 1.0)
primary_eclipse_egress = (phase >= 0.0) & (phase <= halfduration)
secondary_eclipse_phase = (phase >= secondaryphase - halfduration) & (phase <= secondaryphase + halfduration)
modelmags[primary_eclipse_ingress] = zerolevel + _gaussian(phase[primary_eclipse_ingress], primaryecl_amp, 1.0, primaryecl_std)
modelmags[primary_eclipse_egress] = zerolevel + _gaussian(phase[primary_eclipse_egress], primaryecl_amp, 0.0, primaryecl_std)
modelmags[secondary_eclipse_phase] = zerolevel + _gaussian(phase[secondary_eclipse_phase], secondaryecl_amp, secondaryphase, secondaryecl_std)
(modelmags, phase, ptimes, pmags, perrs) = (modelmags, phase, ptimes, pmags, perrs)
</DeepExtract>
return (pmags - modelmags) / perrs
|
def invgauss_eclipses_residual(ebparams, times, mags, errs):
"""This returns the residual between the modelmags and the actual mags.
Parameters
----------
ebparams : list of float
This contains the parameters for the eclipsing binary::
ebparams = [period (time),
epoch (time),
pdepth: primary eclipse depth (mags),
pduration: primary eclipse duration (phase),
psdepthratio: primary-secondary eclipse depth ratio,
secondaryphase: center phase of the secondary eclipse]
`period` is the period in days.
`epoch` is the time of minimum in JD.
`pdepth` is the depth of the primary eclipse.
- for magnitudes -> `pdepth` should be < 0
- for fluxes -> `pdepth` should be > 0
`pduration` is the length of the primary eclipse in phase.
`psdepthratio` is the ratio in the eclipse depths:
`depth_secondary/depth_primary`. This is generally the same as the ratio
of the `T_effs` of the two stars.
`secondaryphase` is the phase at which the minimum of the secondary
eclipse is located. This effectively parameterizes eccentricity.
All of these will then have fitted values after the fit is done.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the eclipse model will be generated. The times will be used to generate
model mags, and the input `times`, `mags`, and `errs` will be resorted
by model phase and returned.
Returns
-------
np.array
The residuals between the input `mags` and generated `modelmags`,
weighted by the measurement errors in `errs`.
"""
(period, epoch, pdepth, pduration, depthratio, secondaryphase) = ebparams
iphase = (times - epoch) / period
iphase = iphase - np.floor(iphase)
phasesortind = np.argsort(iphase)
phase = iphase[phasesortind]
ptimes = times[phasesortind]
pmags = mags[phasesortind]
perrs = errs[phasesortind]
zerolevel = np.median(pmags)
modelmags = np.full_like(phase, zerolevel)
primaryecl_amp = -pdepth
secondaryecl_amp = -pdepth * depthratio
primaryecl_std = pduration / 5.0
secondaryecl_std = pduration / 5.0
halfduration = pduration / 2.0
primary_eclipse_ingress = (phase >= 1.0 - halfduration) & (phase <= 1.0)
primary_eclipse_egress = (phase >= 0.0) & (phase <= halfduration)
secondary_eclipse_phase = (phase >= secondaryphase - halfduration) & (phase <= secondaryphase + halfduration)
modelmags[primary_eclipse_ingress] = zerolevel + _gaussian(phase[primary_eclipse_ingress], primaryecl_amp, 1.0, primaryecl_std)
modelmags[primary_eclipse_egress] = zerolevel + _gaussian(phase[primary_eclipse_egress], primaryecl_amp, 0.0, primaryecl_std)
modelmags[secondary_eclipse_phase] = zerolevel + _gaussian(phase[secondary_eclipse_phase], secondaryecl_amp, secondaryphase, secondaryecl_std)
(modelmags, phase, ptimes, pmags, perrs) = (modelmags, phase, ptimes, pmags, perrs)
return (pmags - modelmags) / perrs
|
astrobase
|
positive
|
def enip_format(data, sort_keys=False, indent=4):
"""Format a decoded EtherNet/IP data bundle in a (more) human-readable form.
There is no means by which to specify a custom sorting function. The cpppo.dotdict outputs keys
with formatting that tries to retain sorting order of lists of sub-dotdict indices.
In Python2, we need to specially handle str/bytes vs. unicode strings; we need to avoid
enip_format attempting to decode str as utf-8.
"""
assert isinstance(data, dict), 'Unknown data type {data!r}'.format(data=data)
pairs = data.items()
if sort_keys:
pairs = sorted(pairs)
prefix = ' ' * indent
newline = '\n' + prefix
result = '{'
for (key, val) in pairs:
result += newline + '{key:32}'.format(key=repr(key) + ': ')
if isinstance(val, bytes) and sys.version_info[0] < 3:
if not any((c < ' ' or c > '~' for c in val)):
result += repr(val) + ','
continue
try:
if not any((c < ' ' for c in val)):
result += repr(val.decode('utf-8')) + ','
continue
except:
pass
try:
<DeepExtract>
if isinstance(val, array.array) and val.typecode == type_bytes_array_symbol:
binary = val.tostring() if sys.version_info[0] < 3 else val.tobytes()
elif isinstance(val, bytearray):
binary = bytes(val)
elif isinstance(val, bytes):
binary = val
raise AssertionError('Unrecognized octets type: %r' % val)
</DeepExtract>
except:
pass
else:
if isinstance(val, array.array):
(beg, end) = ('array( {val.typecode!r}, '.format(val=val), ')')
elif isinstance(val, bytearray):
(beg, end) = ('bytearray(', ')')
else:
(beg, end) = ('bytes(', ')')
result += "{beg}hexload(r'''".format(beg=beg)
result += ''.join((newline + prefix + row for row in misc.hexdumper(val)))
result += newline + "'''){end},".format(end=end)
continue
if is_listlike(val) and len(val) > 10:
try:
(beg, end) = (getattr(getattr(val, '__class__'), '__name__') + '(', ')')
except:
pass
else:
result += beg
for (i, v) in enumerate(val):
if i % 10 == 0:
result += newline + prefix
fmt = '{v:<8}' if isinstance(v, type_str_base) else '{v:>8}'
result += fmt.format(v=repr(v) + ',')
result += newline + end + ','
continue
result += repr(val)
result += ','
result += '\n}'
return result
|
def enip_format(data, sort_keys=False, indent=4):
"""Format a decoded EtherNet/IP data bundle in a (more) human-readable form.
There is no means by which to specify a custom sorting function. The cpppo.dotdict outputs keys
with formatting that tries to retain sorting order of lists of sub-dotdict indices.
In Python2, we need to specially handle str/bytes vs. unicode strings; we need to avoid
enip_format attempting to decode str as utf-8.
"""
assert isinstance(data, dict), 'Unknown data type {data!r}'.format(data=data)
pairs = data.items()
if sort_keys:
pairs = sorted(pairs)
prefix = ' ' * indent
newline = '\n' + prefix
result = '{'
for (key, val) in pairs:
result += newline + '{key:32}'.format(key=repr(key) + ': ')
if isinstance(val, bytes) and sys.version_info[0] < 3:
if not any((c < ' ' or c > '~' for c in val)):
result += repr(val) + ','
continue
try:
if not any((c < ' ' for c in val)):
result += repr(val.decode('utf-8')) + ','
continue
except:
pass
try:
if isinstance(val, array.array) and val.typecode == type_bytes_array_symbol:
binary = val.tostring() if sys.version_info[0] < 3 else val.tobytes()
elif isinstance(val, bytearray):
binary = bytes(val)
elif isinstance(val, bytes):
binary = val
raise AssertionError('Unrecognized octets type: %r' % val)
except:
pass
else:
if isinstance(val, array.array):
(beg, end) = ('array( {val.typecode!r}, '.format(val=val), ')')
elif isinstance(val, bytearray):
(beg, end) = ('bytearray(', ')')
else:
(beg, end) = ('bytes(', ')')
result += "{beg}hexload(r'''".format(beg=beg)
result += ''.join((newline + prefix + row for row in misc.hexdumper(val)))
result += newline + "'''){end},".format(end=end)
continue
if is_listlike(val) and len(val) > 10:
try:
(beg, end) = (getattr(getattr(val, '__class__'), '__name__') + '(', ')')
except:
pass
else:
result += beg
for (i, v) in enumerate(val):
if i % 10 == 0:
result += newline + prefix
fmt = '{v:<8}' if isinstance(v, type_str_base) else '{v:>8}'
result += fmt.format(v=repr(v) + ',')
result += newline + end + ','
continue
result += repr(val)
result += ','
result += '\n}'
return result
|
cpppo
|
positive
|
def test_backward6(self):
params = (10, 20, 5, 5)
<DeepExtract>
if H is not None:
x = np.random.randn(*params, C, H, W).astype(np.float64)
else:
x = np.random.randn(*params, C).astype(np.float64)
gamma = np.random.randn(C).astype(np.float64)
beta = np.random.randn(C).astype(np.float64)
mean = np.random.randn(C).astype(np.float64)
var = np.abs(np.random.randn(C).astype(np.float64))
(x, gamma, beta, mean, var) = (x, gamma, beta, mean, var)
</DeepExtract>
f = lambda beta: F.batch_nrom(x, gamma, beta, mean, var)
self.assertTrue(gradient_check(f, beta))
|
def test_backward6(self):
params = (10, 20, 5, 5)
if H is not None:
x = np.random.randn(*params, C, H, W).astype(np.float64)
else:
x = np.random.randn(*params, C).astype(np.float64)
gamma = np.random.randn(C).astype(np.float64)
beta = np.random.randn(C).astype(np.float64)
mean = np.random.randn(C).astype(np.float64)
var = np.abs(np.random.randn(C).astype(np.float64))
(x, gamma, beta, mean, var) = (x, gamma, beta, mean, var)
f = lambda beta: F.batch_nrom(x, gamma, beta, mean, var)
self.assertTrue(gradient_check(f, beta))
|
deep-learning-from-scratch-3
|
positive
|
def testScalarValidationMsgs(unit_database) -> None:
def _Check(scalar, value, unit, expected_msg):
some_scalar = scalar.CreateCopy(value=value, unit=unit)
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_msg
<DeepExtract>
unit_database.AddCategory(category='test category', quantity_type='dimensionless', override=True, default_unit='-', min_value=1.0, max_value=50.0, valid_units='-', is_min_exclusive=False, is_max_exclusive=False)
unit_database.AddCategory(category='category exclusive', quantity_type='dimensionless', override=True, default_unit='-', min_value=1.0, max_value=50.0, valid_units='-', is_min_exclusive=True, is_max_exclusive=True, default_value=5.0)
</DeepExtract>
some_scalar = Scalar('test category', 10.0, '-')
expected_error_msg = 'Error in Some Property. Invalid value for Test Category: 0. Must be greater or equal to 1.0.'
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=0.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
</DeepExtract>
expected_error_msg = 'Error in Some Property. Invalid value for Test Category: 51. Must be less or equal to 50.0.'
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=51.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
</DeepExtract>
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=1.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
</DeepExtract>
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=50.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
</DeepExtract>
some_scalar = Scalar('category exclusive', 10.0, '-')
expected_error_msg = 'Error in Some Property. Invalid value for Category Exclusive: 1. Must be greater than 1.0.'
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=1.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
</DeepExtract>
expected_error_msg = 'Error in Some Property. Invalid value for Category Exclusive: 50. Must be less than 50.0.'
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=50.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
</DeepExtract>
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=49.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
</DeepExtract>
<DeepExtract>
some_scalar = some_scalar.CreateCopy(value=2.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
</DeepExtract>
|
def testScalarValidationMsgs(unit_database) -> None:
def _Check(scalar, value, unit, expected_msg):
some_scalar = scalar.CreateCopy(value=value, unit=unit)
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_msg
unit_database.AddCategory(category='test category', quantity_type='dimensionless', override=True, default_unit='-', min_value=1.0, max_value=50.0, valid_units='-', is_min_exclusive=False, is_max_exclusive=False)
unit_database.AddCategory(category='category exclusive', quantity_type='dimensionless', override=True, default_unit='-', min_value=1.0, max_value=50.0, valid_units='-', is_min_exclusive=True, is_max_exclusive=True, default_value=5.0)
some_scalar = Scalar('test category', 10.0, '-')
expected_error_msg = 'Error in Some Property. Invalid value for Test Category: 0. Must be greater or equal to 1.0.'
some_scalar = some_scalar.CreateCopy(value=0.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
expected_error_msg = 'Error in Some Property. Invalid value for Test Category: 51. Must be less or equal to 50.0.'
some_scalar = some_scalar.CreateCopy(value=51.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
some_scalar = some_scalar.CreateCopy(value=1.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
some_scalar = some_scalar.CreateCopy(value=50.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
some_scalar = Scalar('category exclusive', 10.0, '-')
expected_error_msg = 'Error in Some Property. Invalid value for Category Exclusive: 1. Must be greater than 1.0.'
some_scalar = some_scalar.CreateCopy(value=1.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
expected_error_msg = 'Error in Some Property. Invalid value for Category Exclusive: 50. Must be less than 50.0.'
some_scalar = some_scalar.CreateCopy(value=50.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == expected_error_msg
some_scalar = some_scalar.CreateCopy(value=49.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
some_scalar = some_scalar.CreateCopy(value=2.0, unit='-')
obtained_msg = ScalarMinMaxValidator.CreateScalarCheckErrorMsg(some_scalar, 'Some Property')
assert obtained_msg == None
</DeepExtract>
|
barril
|
positive
|
@micropython.viper
def sir(buf):
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 0:
tms.on()
else:
tms.off()
for i in range(2):
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
p = ptr8(addressof(buf))
l = int(len(buf))
val = 0
tms.off()
for i in range(l - 1):
byte = 0
val = p[i]
for nf in range(8):
if val >> nf & 1:
tdi.on()
else:
tdi.off()
tck.off()
tck.on()
if tdo.value():
byte |= 1 << nf
if int(0):
0[i] = byte
byte = 0
val = p[l - 1]
for nf in range(7):
if val >> nf & 1:
tdi.on()
else:
tdi.off()
tck.off()
tck.on()
if tdo.value():
byte |= 1 << nf
if 1:
tms.on()
if val >> 7 & 1:
tdi.on()
else:
tdi.off()
tck.off()
tck.on()
if tdo.value():
byte |= 1 << 7
if int(0):
0[l - 1] = byte
</DeepExtract>
<DeepExtract>
send_tms(0, 1)
send_tms(1, 3)
</DeepExtract>
|
@micropython.viper
def sir(buf):
if 1:
tms.on()
else:
tms.off()
for i in range(1):
tck.off()
tck.on()
if 0:
tms.on()
else:
tms.off()
for i in range(2):
tck.off()
tck.on()
p = ptr8(addressof(buf))
l = int(len(buf))
val = 0
tms.off()
for i in range(l - 1):
byte = 0
val = p[i]
for nf in range(8):
if val >> nf & 1:
tdi.on()
else:
tdi.off()
tck.off()
tck.on()
if tdo.value():
byte |= 1 << nf
if int(0):
0[i] = byte
byte = 0
val = p[l - 1]
for nf in range(7):
if val >> nf & 1:
tdi.on()
else:
tdi.off()
tck.off()
tck.on()
if tdo.value():
byte |= 1 << nf
if 1:
tms.on()
if val >> 7 & 1:
tdi.on()
else:
tdi.off()
tck.off()
tck.on()
if tdo.value():
byte |= 1 << 7
if int(0):
0[l - 1] = byte
send_tms(0, 1)
send_tms(1, 3)
</DeepExtract>
|
esp32ecp5
|
positive
|
def predict(self, X: Union[pd.DataFrame, np.ndarray]) -> np.ndarray:
"""
Uses the fitted NOTEARS algorithm to reconstruct y from known X data.
Returns:
Predicted y values for each row of X.
"""
<DeepExtract>
y_pred = super().predict(X)
if len(y_pred.shape) == 1:
y_pred = np.vstack([1 - y_pred, y_pred]).T
probs = y_pred
</DeepExtract>
n_classes = len(self.classes_)
if n_classes == 2:
indices = probs[:, 1].round().astype(np.int64)
else:
indices = np.argmax(probs, axis=1)
return self.classes_[indices]
|
def predict(self, X: Union[pd.DataFrame, np.ndarray]) -> np.ndarray:
"""
Uses the fitted NOTEARS algorithm to reconstruct y from known X data.
Returns:
Predicted y values for each row of X.
"""
y_pred = super().predict(X)
if len(y_pred.shape) == 1:
y_pred = np.vstack([1 - y_pred, y_pred]).T
probs = y_pred
n_classes = len(self.classes_)
if n_classes == 2:
indices = probs[:, 1].round().astype(np.int64)
else:
indices = np.argmax(probs, axis=1)
return self.classes_[indices]
|
causalnex
|
positive
|
def test():
parser = ArgumentParser(description='Run tests against a Neo4j server.\r\n\r\nexample:\r\n neotest -e 3.1.0-M09 test/run/ runtests.sh', epilog='See neoctrl-download for details of supported environment variables.\r\n\r\nReport bugs to drivers@neo4j.com', formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-e', '--enterprise', action='store_true', help='select Neo4j Enterprise Edition (default: Community)')
parser.add_argument('-v', '--verbose', action='store_true', help='show more detailed output')
parser.add_argument('versions', help='Neo4j server versions (colon-separated)')
parser.add_argument('path', help='installation path')
parser.add_argument('command', help='command to execute test')
parser.add_argument('args', nargs=REMAINDER, help='arguments for test execution')
parsed = parser.parse_args()
exit_status = 0
for version in parsed.versions.split(':'):
print('\x1b[33;1m************************************************************\x1b[0m')
print('\x1b[33;1m*** RUNNING TESTS AGAINST NEO4J SERVER %s\x1b[0m' % version)
print('\x1b[33;1m************************************************************\x1b[0m')
print()
<DeepExtract>
controller = create_controller()
try:
home = controller.install('enterprise' if parsed.enterprise else 'community', version.strip(), parsed.path, **kwargs)
except HTTPError as error:
if error.code == 401:
raise RuntimeError('Missing or incorrect authorization')
elif error.code == 403:
raise RuntimeError('Could not download package from %s (403 Forbidden)' % error.url)
else:
raise
else:
home = home
</DeepExtract>
if platform.system() == 'Windows':
controller = WindowsController(home, 1 if parsed.verbose else 0)
else:
controller = UnixController(home, 1 if parsed.verbose else 0)
controller.create_user('neotest', 'neotest')
controller.set_user_role('neotest', 'admin')
try:
controller.start(timeout=300)
exit_status = call([parsed.command] + parsed.args)
except OSError:
raise RuntimeError('Unable to run command %r with arguments %r' % (parsed.command, parsed.args))
finally:
controller.stop()
print('')
if exit_status != 0:
break
exit(exit_status)
|
def test():
parser = ArgumentParser(description='Run tests against a Neo4j server.\r\n\r\nexample:\r\n neotest -e 3.1.0-M09 test/run/ runtests.sh', epilog='See neoctrl-download for details of supported environment variables.\r\n\r\nReport bugs to drivers@neo4j.com', formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-e', '--enterprise', action='store_true', help='select Neo4j Enterprise Edition (default: Community)')
parser.add_argument('-v', '--verbose', action='store_true', help='show more detailed output')
parser.add_argument('versions', help='Neo4j server versions (colon-separated)')
parser.add_argument('path', help='installation path')
parser.add_argument('command', help='command to execute test')
parser.add_argument('args', nargs=REMAINDER, help='arguments for test execution')
parsed = parser.parse_args()
exit_status = 0
for version in parsed.versions.split(':'):
print('\x1b[33;1m************************************************************\x1b[0m')
print('\x1b[33;1m*** RUNNING TESTS AGAINST NEO4J SERVER %s\x1b[0m' % version)
print('\x1b[33;1m************************************************************\x1b[0m')
print()
controller = create_controller()
try:
home = controller.install('enterprise' if parsed.enterprise else 'community', version.strip(), parsed.path, **kwargs)
except HTTPError as error:
if error.code == 401:
raise RuntimeError('Missing or incorrect authorization')
elif error.code == 403:
raise RuntimeError('Could not download package from %s (403 Forbidden)' % error.url)
else:
raise
else:
home = home
if platform.system() == 'Windows':
controller = WindowsController(home, 1 if parsed.verbose else 0)
else:
controller = UnixController(home, 1 if parsed.verbose else 0)
controller.create_user('neotest', 'neotest')
controller.set_user_role('neotest', 'admin')
try:
controller.start(timeout=300)
exit_status = call([parsed.command] + parsed.args)
except OSError:
raise RuntimeError('Unable to run command %r with arguments %r' % (parsed.command, parsed.args))
finally:
controller.stop()
print('')
if exit_status != 0:
break
exit(exit_status)
|
boltkit
|
positive
|
def get_prob(self, state):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state: np array (batch_size, ob_dim)
TODO:
likelihood:
evaluate the discriminator D(x,x) on the same input
prob:
compute the probability density of x from the discriminator
likelihood (see homework doc)
"""
<DeepExtract>
assert state.ndim == state.ndim
assert state.shape[1] == state.shape[1] == self.ob_dim
assert state.shape[0] == state.shape[0]
likelihood = self.sess.run(self.likelihood, feed_dict={self.state1: state, self.state2: state, self.discrim_target: np.ones([state.shape[0], 1])})
likelihood = likelihood
</DeepExtract>
likelihood = np.clip(np.squeeze(likelihood), 1e-05, 1 - 1e-05)
prob = (1 - likelihood) / likelihood
return prob
|
def get_prob(self, state):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state: np array (batch_size, ob_dim)
TODO:
likelihood:
evaluate the discriminator D(x,x) on the same input
prob:
compute the probability density of x from the discriminator
likelihood (see homework doc)
"""
assert state.ndim == state.ndim
assert state.shape[1] == state.shape[1] == self.ob_dim
assert state.shape[0] == state.shape[0]
likelihood = self.sess.run(self.likelihood, feed_dict={self.state1: state, self.state2: state, self.discrim_target: np.ones([state.shape[0], 1])})
likelihood = likelihood
likelihood = np.clip(np.squeeze(likelihood), 1e-05, 1 - 1e-05)
prob = (1 - likelihood) / likelihood
return prob
|
cs294-112_hws
|
positive
|
def syn_we_c4_c5(l4, l5, l42, l52, nl42, nl52, name):
l4_dict = dict()
for matching in l4:
l4_dict[matching] = 1
total_cancelled = 0
for m in nl42:
if m in l4_dict:
total_cancelled += 1
l4.remove(m)
l5_dict = dict()
for matching in l5:
l5_dict[matching] = 1
total_cancelled = 0
for m in nl52:
if m in l5_dict:
total_cancelled += 1
l5.remove(m)
all_matchings = defaultdict(list)
all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN] = l4
all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN] = l5
all_matchings[MatchingType.L42_CLASSNAME_RELATIONNAME_SEM] = l42
all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM] = l52
combined = matcherlib.combine_matchings(all_matchings)
<DeepExtract>
l = []
for (k, v) in combined.items():
matchings = v.get_matchings()
for el in matchings:
l.append(el)
combined_list = l
</DeepExtract>
print(str(len(combined_list)))
<DeepExtract>
gtm = set(ground_truth_matchings)
total_results = len(combined_list)
true_positives = 0
for el in combined_list:
if el in gtm:
true_positives += 1
if total_results == 0:
precision = 0
else:
precision = float(true_positives / total_results)
recall = float(true_positives / len(ground_truth_matchings))
(precision, recall) = (precision, recall)
</DeepExtract>
combined_sum = matcherlib.summarize_matchings_to_ancestor(om.kr_handlers['efo'], combined_list)
<DeepExtract>
gtm = set(ground_truth_matchings)
total_results = len(combined_sum)
true_positives = 0
for el in combined_sum:
if el in gtm:
true_positives += 1
if total_results == 0:
precision = 0
else:
precision = float(true_positives / total_results)
recall = float(true_positives / len(ground_truth_matchings))
(precision_sum, recall_sum) = (precision, recall)
</DeepExtract>
print(name + ', ' + str(precision) + ', ' + str(recall))
print(name + '_sum, ' + str(precision_sum) + ', ' + str(recall_sum))
|
def syn_we_c4_c5(l4, l5, l42, l52, nl42, nl52, name):
l4_dict = dict()
for matching in l4:
l4_dict[matching] = 1
total_cancelled = 0
for m in nl42:
if m in l4_dict:
total_cancelled += 1
l4.remove(m)
l5_dict = dict()
for matching in l5:
l5_dict[matching] = 1
total_cancelled = 0
for m in nl52:
if m in l5_dict:
total_cancelled += 1
l5.remove(m)
all_matchings = defaultdict(list)
all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN] = l4
all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN] = l5
all_matchings[MatchingType.L42_CLASSNAME_RELATIONNAME_SEM] = l42
all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM] = l52
combined = matcherlib.combine_matchings(all_matchings)
l = []
for (k, v) in combined.items():
matchings = v.get_matchings()
for el in matchings:
l.append(el)
combined_list = l
print(str(len(combined_list)))
gtm = set(ground_truth_matchings)
total_results = len(combined_list)
true_positives = 0
for el in combined_list:
if el in gtm:
true_positives += 1
if total_results == 0:
precision = 0
else:
precision = float(true_positives / total_results)
recall = float(true_positives / len(ground_truth_matchings))
(precision, recall) = (precision, recall)
combined_sum = matcherlib.summarize_matchings_to_ancestor(om.kr_handlers['efo'], combined_list)
gtm = set(ground_truth_matchings)
total_results = len(combined_sum)
true_positives = 0
for el in combined_sum:
if el in gtm:
true_positives += 1
if total_results == 0:
precision = 0
else:
precision = float(true_positives / total_results)
recall = float(true_positives / len(ground_truth_matchings))
(precision_sum, recall_sum) = (precision, recall)
print(name + ', ' + str(precision) + ', ' + str(recall))
print(name + '_sum, ' + str(precision_sum) + ', ' + str(recall_sum))
|
aurum-datadiscovery
|
positive
|
def _parse_pre_commit(s: str) -> ErrorsByHook:
ret = []
current_hookid = ''
current_lines: list[str] = []
def _push_current_hook_id() -> None:
nonlocal current_hookid
if not current_hookid:
return
parsed = linting.parse_generic_output('\n'.join(current_lines))
if parsed:
ret.append((current_hookid, parsed))
for line in s.splitlines():
hook_id_match = HOOK_ID_RE.match(line)
if hook_id_match:
<DeepExtract>
nonlocal current_hookid
if not current_hookid:
return
parsed = linting.parse_generic_output('\n'.join(current_lines))
if parsed:
ret.append((current_hookid, parsed))
</DeepExtract>
current_hookid = hook_id_match[1]
current_lines.clear()
else:
current_lines.append(line)
<DeepExtract>
nonlocal current_hookid
if not current_hookid:
return
parsed = linting.parse_generic_output('\n'.join(current_lines))
if parsed:
ret.append((current_hookid, parsed))
</DeepExtract>
return tuple(ret)
|
def _parse_pre_commit(s: str) -> ErrorsByHook:
ret = []
current_hookid = ''
current_lines: list[str] = []
def _push_current_hook_id() -> None:
nonlocal current_hookid
if not current_hookid:
return
parsed = linting.parse_generic_output('\n'.join(current_lines))
if parsed:
ret.append((current_hookid, parsed))
for line in s.splitlines():
hook_id_match = HOOK_ID_RE.match(line)
if hook_id_match:
nonlocal current_hookid
if not current_hookid:
return
parsed = linting.parse_generic_output('\n'.join(current_lines))
if parsed:
ret.append((current_hookid, parsed))
current_hookid = hook_id_match[1]
current_lines.clear()
else:
current_lines.append(line)
nonlocal current_hookid
if not current_hookid:
return
parsed = linting.parse_generic_output('\n'.join(current_lines))
if parsed:
ret.append((current_hookid, parsed))
return tuple(ret)
|
babi
|
positive
|
def generate_signature_block_using_private_key(keyfiles, digest):
signature_blocks = b''
for keyfile in keyfiles:
<DeepExtract>
sk = serialization.load_pem_private_key(keyfile.read(), password=None, backend=default_backend())
if isinstance(sk, rsa.RSAPrivateKey):
if sk.key_size != 3072:
raise esptool.FatalError('Key file has length %d bits. Secure boot v2 only supports RSA-3072.' % sk.key_size)
private_key = sk
if isinstance(sk, ec.EllipticCurvePrivateKey):
if not (isinstance(sk.curve, ec.SECP192R1) or isinstance(sk.curve, ec.SECP256R1)):
raise esptool.FatalError('Key file uses incorrect curve. Secure Boot V2 + ECDSA only supports NIST192p, NIST256p (aka prime192v1, prime256v1)')
private_key = sk
raise esptool.FatalError('Unsupported signing key for Secure Boot V2')
</DeepExtract>
if isinstance(private_key, rsa.RSAPrivateKey):
signature = private_key.sign(digest, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=32), utils.Prehashed(hashes.SHA256()))
<DeepExtract>
primitives = namedtuple('primitives', ['n', 'e', 'm', 'rinv'])
numbers = private_key.public_key().public_numbers()
primitives.n = numbers.n
primitives.e = numbers.e
primitives.m = -rsa._modinv(primitives.n, 1 << 32)
rr = 1 << private_key.public_key().key_size * 2
primitives.rinv = rr % primitives.n
rsa_primitives = primitives
</DeepExtract>
<DeepExtract>
signature_block = struct.pack('<BBxx32s384sI384sI384s', SIG_BLOCK_MAGIC, SIG_BLOCK_VERSION_RSA, digest, int_to_bytes(rsa_primitives.n)[::-1], rsa_primitives.e, int_to_bytes(rsa_primitives.rinv)[::-1], rsa_primitives.m & 4294967295, signature[::-1])
signature_block = signature_block
</DeepExtract>
else:
signature = private_key.sign(digest, ec.ECDSA(utils.Prehashed(hashes.SHA256())))
numbers = private_key.public_key().public_numbers()
if isinstance(private_key.curve, ec.SECP192R1):
curve_len = 192
curve_id = CURVE_ID_P192
elif isinstance(numbers.curve, ec.SECP256R1):
curve_len = 256
curve_id = CURVE_ID_P256
else:
raise esptool.FatalError('Invalid ECDSA curve instance.')
<DeepExtract>
byte_len = int(curve_len / 8)
ab = int_to_bytes(numbers.x, byte_len)[::-1] + int_to_bytes(numbers.y, byte_len)[::-1]
assert len(ab) == 48 or len(ab) == 64
pubkey_point = ab
</DeepExtract>
(r, s) = utils.decode_dss_signature(signature)
<DeepExtract>
byte_len = int(curve_len / 8)
ab = int_to_bytes(r, byte_len)[::-1] + int_to_bytes(s, byte_len)[::-1]
assert len(ab) == 48 or len(ab) == 64
signature_rs = ab
</DeepExtract>
<DeepExtract>
signature_block = struct.pack('<BBxx32sB64s64s1031x', SIG_BLOCK_MAGIC, SIG_BLOCK_VERSION_ECDSA, digest, curve_id, pubkey_point, signature_rs)
signature_block = signature_block
</DeepExtract>
signature_block += struct.pack('<I', zlib.crc32(signature_block) & 4294967295)
signature_block += b'\x00' * 16
if len(signature_block) != SIG_BLOCK_SIZE:
raise esptool.FatalError('Incorrect signature block size')
signature_blocks += signature_block
return signature_blocks
|
def generate_signature_block_using_private_key(keyfiles, digest):
signature_blocks = b''
for keyfile in keyfiles:
sk = serialization.load_pem_private_key(keyfile.read(), password=None, backend=default_backend())
if isinstance(sk, rsa.RSAPrivateKey):
if sk.key_size != 3072:
raise esptool.FatalError('Key file has length %d bits. Secure boot v2 only supports RSA-3072.' % sk.key_size)
private_key = sk
if isinstance(sk, ec.EllipticCurvePrivateKey):
if not (isinstance(sk.curve, ec.SECP192R1) or isinstance(sk.curve, ec.SECP256R1)):
raise esptool.FatalError('Key file uses incorrect curve. Secure Boot V2 + ECDSA only supports NIST192p, NIST256p (aka prime192v1, prime256v1)')
private_key = sk
raise esptool.FatalError('Unsupported signing key for Secure Boot V2')
if isinstance(private_key, rsa.RSAPrivateKey):
signature = private_key.sign(digest, padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=32), utils.Prehashed(hashes.SHA256()))
primitives = namedtuple('primitives', ['n', 'e', 'm', 'rinv'])
numbers = private_key.public_key().public_numbers()
primitives.n = numbers.n
primitives.e = numbers.e
primitives.m = -rsa._modinv(primitives.n, 1 << 32)
rr = 1 << private_key.public_key().key_size * 2
primitives.rinv = rr % primitives.n
rsa_primitives = primitives
signature_block = struct.pack('<BBxx32s384sI384sI384s', SIG_BLOCK_MAGIC, SIG_BLOCK_VERSION_RSA, digest, int_to_bytes(rsa_primitives.n)[::-1], rsa_primitives.e, int_to_bytes(rsa_primitives.rinv)[::-1], rsa_primitives.m & 4294967295, signature[::-1])
signature_block = signature_block
else:
signature = private_key.sign(digest, ec.ECDSA(utils.Prehashed(hashes.SHA256())))
numbers = private_key.public_key().public_numbers()
if isinstance(private_key.curve, ec.SECP192R1):
curve_len = 192
curve_id = CURVE_ID_P192
elif isinstance(numbers.curve, ec.SECP256R1):
curve_len = 256
curve_id = CURVE_ID_P256
else:
raise esptool.FatalError('Invalid ECDSA curve instance.')
byte_len = int(curve_len / 8)
ab = int_to_bytes(numbers.x, byte_len)[::-1] + int_to_bytes(numbers.y, byte_len)[::-1]
assert len(ab) == 48 or len(ab) == 64
pubkey_point = ab
(r, s) = utils.decode_dss_signature(signature)
byte_len = int(curve_len / 8)
ab = int_to_bytes(r, byte_len)[::-1] + int_to_bytes(s, byte_len)[::-1]
assert len(ab) == 48 or len(ab) == 64
signature_rs = ab
signature_block = struct.pack('<BBxx32sB64s64s1031x', SIG_BLOCK_MAGIC, SIG_BLOCK_VERSION_ECDSA, digest, curve_id, pubkey_point, signature_rs)
signature_block = signature_block
signature_block += struct.pack('<I', zlib.crc32(signature_block) & 4294967295)
signature_block += b'\x00' * 16
if len(signature_block) != SIG_BLOCK_SIZE:
raise esptool.FatalError('Incorrect signature block size')
signature_blocks += signature_block
return signature_blocks
|
esptool
|
positive
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [param.grad.data for param in params if param.requires_grad and param.grad is not None]
world_size = dist.get_world_size()
if coalesce:
<DeepExtract>
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(grads, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in grads:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
</DeepExtract>
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [param.grad.data for param in params if param.requires_grad and param.grad is not None]
world_size = dist.get_world_size()
if coalesce:
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(grads, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in grads:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
DetectoRS
|
positive
|
def get_random(self):
"""
Returns a random statement from the database
"""
from random import randint
<DeepExtract>
count = self.statements.count()
</DeepExtract>
if count < 1:
raise self.EmptyDatabaseException()
random_integer = randint(0, count - 1)
statements = self.statements.find().limit(1).skip(random_integer)
return self.mongo_to_object(list(statements)[0])
|
def get_random(self):
"""
Returns a random statement from the database
"""
from random import randint
count = self.statements.count()
if count < 1:
raise self.EmptyDatabaseException()
random_integer = randint(0, count - 1)
statements = self.statements.find().limit(1).skip(random_integer)
return self.mongo_to_object(list(statements)[0])
|
ChatterBot
|
positive
|
def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
assert isinstance(src_net, Network)
with absolute_name_scope(self.scope):
with tf.name_scope('MovingAvg'):
ops = []
for (name, var) in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
<DeepExtract>
with tf.name_scope('Lerp'):
new_value = src_net.vars[name] + (var - src_net.vars[name]) * cur_beta
</DeepExtract>
ops.append(var.assign(new_value))
return tf.group(*ops)
|
def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
assert isinstance(src_net, Network)
with absolute_name_scope(self.scope):
with tf.name_scope('MovingAvg'):
ops = []
for (name, var) in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
with tf.name_scope('Lerp'):
new_value = src_net.vars[name] + (var - src_net.vars[name]) * cur_beta
ops.append(var.assign(new_value))
return tf.group(*ops)
|
BeautifyBasedOnGAN
|
positive
|
def view_policy(task, world_params, policy_fn, max_time_steps, number_of_resets, env_wrappers=np.array([]), env_wrappers_args=np.array([])):
"""
Visualizes a policy for a specified environment in the GUI
:param task: (Task) the task of the environment
:param world_params: (dict) the world_params of the environment
:param policy_fn: the policy to be evaluated
:param max_time_steps: (int) the maximum number of time steps per episode
:param number_of_resets: (int) the number of resets/episodes to be viewed
:param env_wrappers: (list) a list of gym wrappers
:param env_wrappers_args: (list) a list of kwargs for the gym wrappers
:return:
"""
actual_skip_frame = world_params['skip_frame']
<DeepExtract>
world_params['skip_frame'] = 1
if task.get_task_params() is None:
task = generate_task(task.get_task_name())
else:
if 'task_name' in task.get_task_params():
del task.get_task_params()['task_name']
task = generate_task(task.get_task_name(), **task.get_task_params())
if 'enable_visualization' in world_params.keys():
world_params_temp = dict(world_params)
del world_params_temp['enable_visualization']
env = CausalWorld(task, **world_params_temp, enable_visualization=True)
else:
env = CausalWorld(task, **world_params, enable_visualization=True)
for i in range(len(env_wrappers)):
env = env_wrappers[i](env, **env_wrappers_args[i])
env = env
</DeepExtract>
for reset_idx in range(number_of_resets):
obs = env.reset()
for time in range(int(max_time_steps / number_of_resets)):
desired_action = policy_fn(obs)
for _ in range(actual_skip_frame):
(obs, reward, done, info) = env.step(action=desired_action)
env.close()
|
def view_policy(task, world_params, policy_fn, max_time_steps, number_of_resets, env_wrappers=np.array([]), env_wrappers_args=np.array([])):
"""
Visualizes a policy for a specified environment in the GUI
:param task: (Task) the task of the environment
:param world_params: (dict) the world_params of the environment
:param policy_fn: the policy to be evaluated
:param max_time_steps: (int) the maximum number of time steps per episode
:param number_of_resets: (int) the number of resets/episodes to be viewed
:param env_wrappers: (list) a list of gym wrappers
:param env_wrappers_args: (list) a list of kwargs for the gym wrappers
:return:
"""
actual_skip_frame = world_params['skip_frame']
world_params['skip_frame'] = 1
if task.get_task_params() is None:
task = generate_task(task.get_task_name())
else:
if 'task_name' in task.get_task_params():
del task.get_task_params()['task_name']
task = generate_task(task.get_task_name(), **task.get_task_params())
if 'enable_visualization' in world_params.keys():
world_params_temp = dict(world_params)
del world_params_temp['enable_visualization']
env = CausalWorld(task, **world_params_temp, enable_visualization=True)
else:
env = CausalWorld(task, **world_params, enable_visualization=True)
for i in range(len(env_wrappers)):
env = env_wrappers[i](env, **env_wrappers_args[i])
env = env
for reset_idx in range(number_of_resets):
obs = env.reset()
for time in range(int(max_time_steps / number_of_resets)):
desired_action = policy_fn(obs)
for _ in range(actual_skip_frame):
(obs, reward, done, info) = env.step(action=desired_action)
env.close()
|
CausalWorld
|
positive
|
def preorder(self, node, *args, **kwargs):
"""
Tree traversal from top to bottom.
Args:
node(:class:`business_logic.models.Node`): node for starting tree traversal
*args: arbitrary args which should be passed to :func:`business_logic.models.NodeVisitor.visit`
**kwargs: arbitrary kwargs which should be passed to :func:`business_logic.models.NodeVisitor.visit`
"""
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
for child in self.get_children(node):
<DeepExtract>
self.visit(child, *args, **kwargs)
for child in self.get_children(child):
self.preorder(child, *args, **kwargs)
</DeepExtract>
|
def preorder(self, node, *args, **kwargs):
"""
Tree traversal from top to bottom.
Args:
node(:class:`business_logic.models.Node`): node for starting tree traversal
*args: arbitrary args which should be passed to :func:`business_logic.models.NodeVisitor.visit`
**kwargs: arbitrary kwargs which should be passed to :func:`business_logic.models.NodeVisitor.visit`
"""
raise NotImplementedError()
for child in self.get_children(node):
self.visit(child, *args, **kwargs)
for child in self.get_children(child):
self.preorder(child, *args, **kwargs)
</DeepExtract>
|
django-business-logic
|
positive
|
def _eval_batch(self, batch, is_test=False):
<DeepExtract>
if isinstance(batch, tuple):
(x_in, x_length) = batch
x_in = x_in[:, :x_length.max()]
x_channel_mask = create_channel_mask(x_length, max_len=x_in.size(1))
else:
x_in = batch
x_length = x_in.new_zeros(x_in.size(0), dtype=torch.long) + x_in.size(1)
x_channel_mask = x_in.new_ones(x_in.size(0), x_in.size(1), 1, dtype=torch.float32)
(x_in, x_length, x_channel_mask) = (x_in, x_length, x_channel_mask)
</DeepExtract>
if isinstance(self.model, LSTMModel):
return self._eval_batch_rnn(x_in, x_length, x_channel_mask)
else:
return self._eval_batch_flow(x_in, x_length, x_channel_mask, is_test=is_test)
|
def _eval_batch(self, batch, is_test=False):
if isinstance(batch, tuple):
(x_in, x_length) = batch
x_in = x_in[:, :x_length.max()]
x_channel_mask = create_channel_mask(x_length, max_len=x_in.size(1))
else:
x_in = batch
x_length = x_in.new_zeros(x_in.size(0), dtype=torch.long) + x_in.size(1)
x_channel_mask = x_in.new_ones(x_in.size(0), x_in.size(1), 1, dtype=torch.float32)
(x_in, x_length, x_channel_mask) = (x_in, x_length, x_channel_mask)
if isinstance(self.model, LSTMModel):
return self._eval_batch_rnn(x_in, x_length, x_channel_mask)
else:
return self._eval_batch_flow(x_in, x_length, x_channel_mask, is_test=is_test)
|
CategoricalNF
|
positive
|
def run_cmd(cmd, show_output=True, raise_errs=True, **kwargs):
"""Run a console command.
When show_output=True, prints output and returns exit code, otherwise returns output.
When raise_errs=True, raises a subprocess.CalledProcessError if the command fails.
"""
internal_assert(cmd and isinstance(cmd, list), 'console commands must be passed as non-empty lists')
if hasattr(shutil, 'which'):
cmd[0] = shutil.which(cmd[0]) or cmd[0]
logger.log_cmd(cmd)
try:
if show_output and raise_errs:
return subprocess.check_call(cmd, **kwargs)
elif show_output:
return subprocess.call(cmd, **kwargs)
else:
<DeepExtract>
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
(stdout, stderr, retcode) = ([], [], None)
while retcode is None:
if stdin is not None:
logger.log_prefix('STDIN < ', stdin.rstrip())
(raw_out, raw_err) = p.communicate(stdin)
stdin = None
out = raw_out.decode(get_encoding(sys.stdout), encoding_errors) if raw_out else ''
if out:
logger.log_stdout(out.rstrip())
stdout.append(out)
err = raw_err.decode(get_encoding(sys.stderr), encoding_errors) if raw_err else ''
if err:
logger.log(err.rstrip())
stderr.append(err)
retcode = p.poll()
(stdout, stderr, retcode) = (stdout, stderr, retcode)
</DeepExtract>
output = ''.join(stdout + stderr)
if retcode and raise_errs:
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
except OSError:
logger.log_exc()
if raise_errs:
raise subprocess.CalledProcessError(oserror_retcode, cmd)
elif show_output:
return oserror_retcode
else:
return ''
|
def run_cmd(cmd, show_output=True, raise_errs=True, **kwargs):
"""Run a console command.
When show_output=True, prints output and returns exit code, otherwise returns output.
When raise_errs=True, raises a subprocess.CalledProcessError if the command fails.
"""
internal_assert(cmd and isinstance(cmd, list), 'console commands must be passed as non-empty lists')
if hasattr(shutil, 'which'):
cmd[0] = shutil.which(cmd[0]) or cmd[0]
logger.log_cmd(cmd)
try:
if show_output and raise_errs:
return subprocess.check_call(cmd, **kwargs)
elif show_output:
return subprocess.call(cmd, **kwargs)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
(stdout, stderr, retcode) = ([], [], None)
while retcode is None:
if stdin is not None:
logger.log_prefix('STDIN < ', stdin.rstrip())
(raw_out, raw_err) = p.communicate(stdin)
stdin = None
out = raw_out.decode(get_encoding(sys.stdout), encoding_errors) if raw_out else ''
if out:
logger.log_stdout(out.rstrip())
stdout.append(out)
err = raw_err.decode(get_encoding(sys.stderr), encoding_errors) if raw_err else ''
if err:
logger.log(err.rstrip())
stderr.append(err)
retcode = p.poll()
(stdout, stderr, retcode) = (stdout, stderr, retcode)
output = ''.join(stdout + stderr)
if retcode and raise_errs:
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
except OSError:
logger.log_exc()
if raise_errs:
raise subprocess.CalledProcessError(oserror_retcode, cmd)
elif show_output:
return oserror_retcode
else:
return ''
|
coconut
|
positive
|
def get_name(body):
<DeepExtract>
name = get_body_info(body).body_name.decode(encoding='UTF-8')
</DeepExtract>
if name == '':
name = 'body'
return '{}{}'.format(name, int(body))
|
def get_name(body):
name = get_body_info(body).body_name.decode(encoding='UTF-8')
if name == '':
name = 'body'
return '{}{}'.format(name, int(body))
|
decentralized-multiarm
|
positive
|
def stack_fn(x):
<DeepExtract>
x = block1(x, 64, stride=1, name='conv2' + '_block1', trainable=False, weight_decay=weight_decay)
for i in range(2, 3 + 1):
x = block1(x, 64, conv_shortcut=False, name='conv2' + '_block' + str(i), trainable=False, weight_decay=weight_decay)
x = x
</DeepExtract>
<DeepExtract>
x = block1(x, 128, stride=stride1, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 4 + 1):
x = block1(x, 128, conv_shortcut=False, name='conv3' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
<DeepExtract>
x = block1(x, 256, stride=stride1, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 23 + 1):
x = block1(x, 256, conv_shortcut=False, name='conv4' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
<DeepExtract>
x = block1(x, 512, stride=stride1, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 3 + 1):
x = block1(x, 512, conv_shortcut=False, name='conv5' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
return x
|
def stack_fn(x):
x = block1(x, 64, stride=1, name='conv2' + '_block1', trainable=False, weight_decay=weight_decay)
for i in range(2, 3 + 1):
x = block1(x, 64, conv_shortcut=False, name='conv2' + '_block' + str(i), trainable=False, weight_decay=weight_decay)
x = x
x = block1(x, 128, stride=stride1, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 4 + 1):
x = block1(x, 128, conv_shortcut=False, name='conv3' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)
x = x
x = block1(x, 256, stride=stride1, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 23 + 1):
x = block1(x, 256, conv_shortcut=False, name='conv4' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)
x = x
x = block1(x, 512, stride=stride1, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 3 + 1):
x = block1(x, 512, conv_shortcut=False, name='conv5' + '_block' + str(i), trainable=trainable, weight_decay=weight_decay)
x = x
return x
|
deep-learning-models
|
positive
|
def optimize_lm(self, TWO_9d, TCW_9d, optimize_cameras=True, n_iterations=50, residuals_threshold=25, lambd0=0.001, L_down=9, L_up=11, eps=1e-05):
n_params_TWO = TWO_9d.numel()
n_params_TCW = TCW_9d.numel()
n_params = n_params_TWO + n_params_TCW
self.idJ = torch.eye(n_params).to(self.device).to(self.dtype)
prev_iter_is_update = False
lambd = lambd0
done = False
history = defaultdict(list)
for n in range(n_iterations):
if not prev_iter_is_update:
<DeepExtract>
(_, TCO_cand_aligned) = self.align_TCO_cand(TWO_9d, TCW_9d)
(cand_ids, view_ids, obj_ids, point_ids, xy_ids) = [self.residuals_ids[k] for k in ('cand_id', 'view_id', 'obj_id', 'point_id', 'xy_id')]
n_residuals = len(cand_ids)
arange_n = torch.arange(n_residuals)
TCW_9d = TCW_9d.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO_9d = TWO_9d.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO = compute_transform_from_pose9d(TWO_9d)
TCW = compute_transform_from_pose9d(TCW_9d)
TWO_n = TWO[arange_n, obj_ids]
TCW_n = TCW[arange_n, view_ids]
TCO_n = TCW_n @ TWO_n
K_n = self.K[view_ids]
TCO_cand_n = TCO_cand_aligned[cand_ids]
points_n = self.obj_points[obj_ids, point_ids].unsqueeze(1)
TCO_points_n = project_points(points_n, K_n, TCO_n).squeeze(1)[arange_n, xy_ids]
TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[arange_n, xy_ids]
y = TCO_cand_points_n
yhat = TCO_points_n
errors = y - yhat
residuals = errors ** 2
residuals = torch.min(residuals, torch.ones_like(residuals) * residuals_threshold)
loss = residuals.mean()
if torch.is_grad_enabled():
yhat.sum().backward()
(errors, loss, J_TWO, J_TCW) = (errors, loss, TWO_9d.grad, TCW_9d.grad)
</DeepExtract>
history['TWO_9d'].append(TWO_9d)
history['TCW_9d'].append(TCW_9d)
history['loss'].append(loss)
history['lambda'].append(lambd)
history['iteration'].append(n)
if done:
break
with torch.no_grad():
J = torch.cat((J_TWO.flatten(-2, -1), J_TCW.flatten(-2, -1)), dim=-1)
<DeepExtract>
errors = errors.view(errors.numel(), 1)
A = J.t() @ J + lambd * self.idJ
b = J.t() @ errors
h = torch.pinverse(A.cpu()).cuda() @ b
h = h.flatten()
</DeepExtract>
h_TWO_9d = h[:n_params_TWO].view(self.n_objects, 9)
h_TCW_9d = h[n_params_TWO:].view(self.n_views, 9)
TWO_9d_updated = TWO_9d + h_TWO_9d
if optimize_cameras:
TCW_9d_updated = TCW_9d + h_TCW_9d
else:
TCW_9d_updated = TCW_9d
<DeepExtract>
(_, TCO_cand_aligned) = self.align_TCO_cand(TWO_9d_updated, TCW_9d_updated)
(cand_ids, view_ids, obj_ids, point_ids, xy_ids) = [self.residuals_ids[k] for k in ('cand_id', 'view_id', 'obj_id', 'point_id', 'xy_id')]
n_residuals = len(cand_ids)
arange_n = torch.arange(n_residuals)
TCW_9d_updated = TCW_9d_updated.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO_9d_updated = TWO_9d_updated.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO = compute_transform_from_pose9d(TWO_9d_updated)
TCW = compute_transform_from_pose9d(TCW_9d_updated)
TWO_n = TWO[arange_n, obj_ids]
TCW_n = TCW[arange_n, view_ids]
TCO_n = TCW_n @ TWO_n
K_n = self.K[view_ids]
TCO_cand_n = TCO_cand_aligned[cand_ids]
points_n = self.obj_points[obj_ids, point_ids].unsqueeze(1)
TCO_points_n = project_points(points_n, K_n, TCO_n).squeeze(1)[arange_n, xy_ids]
TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[arange_n, xy_ids]
y = TCO_cand_points_n
yhat = TCO_points_n
errors = y - yhat
residuals = errors ** 2
residuals = torch.min(residuals, torch.ones_like(residuals) * residuals_threshold)
loss = residuals.mean()
if torch.is_grad_enabled():
yhat.sum().backward()
(errors, next_loss, J_TWO, J_TCW) = (errors, loss, TWO_9d_updated.grad, TCW_9d_updated.grad)
</DeepExtract>
rho = loss - next_loss
if rho.abs() < eps:
done = True
elif rho > eps:
TWO_9d = TWO_9d_updated
TCW_9d = TCW_9d_updated
loss = next_loss
lambd = max(lambd / L_down, 1e-07)
prev_iter_is_update = True
else:
lambd = min(lambd * L_up, 10000000.0)
prev_iter_is_update = False
return (TWO_9d, TCW_9d, history)
|
def optimize_lm(self, TWO_9d, TCW_9d, optimize_cameras=True, n_iterations=50, residuals_threshold=25, lambd0=0.001, L_down=9, L_up=11, eps=1e-05):
n_params_TWO = TWO_9d.numel()
n_params_TCW = TCW_9d.numel()
n_params = n_params_TWO + n_params_TCW
self.idJ = torch.eye(n_params).to(self.device).to(self.dtype)
prev_iter_is_update = False
lambd = lambd0
done = False
history = defaultdict(list)
for n in range(n_iterations):
if not prev_iter_is_update:
(_, TCO_cand_aligned) = self.align_TCO_cand(TWO_9d, TCW_9d)
(cand_ids, view_ids, obj_ids, point_ids, xy_ids) = [self.residuals_ids[k] for k in ('cand_id', 'view_id', 'obj_id', 'point_id', 'xy_id')]
n_residuals = len(cand_ids)
arange_n = torch.arange(n_residuals)
TCW_9d = TCW_9d.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO_9d = TWO_9d.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO = compute_transform_from_pose9d(TWO_9d)
TCW = compute_transform_from_pose9d(TCW_9d)
TWO_n = TWO[arange_n, obj_ids]
TCW_n = TCW[arange_n, view_ids]
TCO_n = TCW_n @ TWO_n
K_n = self.K[view_ids]
TCO_cand_n = TCO_cand_aligned[cand_ids]
points_n = self.obj_points[obj_ids, point_ids].unsqueeze(1)
TCO_points_n = project_points(points_n, K_n, TCO_n).squeeze(1)[arange_n, xy_ids]
TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[arange_n, xy_ids]
y = TCO_cand_points_n
yhat = TCO_points_n
errors = y - yhat
residuals = errors ** 2
residuals = torch.min(residuals, torch.ones_like(residuals) * residuals_threshold)
loss = residuals.mean()
if torch.is_grad_enabled():
yhat.sum().backward()
(errors, loss, J_TWO, J_TCW) = (errors, loss, TWO_9d.grad, TCW_9d.grad)
history['TWO_9d'].append(TWO_9d)
history['TCW_9d'].append(TCW_9d)
history['loss'].append(loss)
history['lambda'].append(lambd)
history['iteration'].append(n)
if done:
break
with torch.no_grad():
J = torch.cat((J_TWO.flatten(-2, -1), J_TCW.flatten(-2, -1)), dim=-1)
errors = errors.view(errors.numel(), 1)
A = J.t() @ J + lambd * self.idJ
b = J.t() @ errors
h = torch.pinverse(A.cpu()).cuda() @ b
h = h.flatten()
h_TWO_9d = h[:n_params_TWO].view(self.n_objects, 9)
h_TCW_9d = h[n_params_TWO:].view(self.n_views, 9)
TWO_9d_updated = TWO_9d + h_TWO_9d
if optimize_cameras:
TCW_9d_updated = TCW_9d + h_TCW_9d
else:
TCW_9d_updated = TCW_9d
(_, TCO_cand_aligned) = self.align_TCO_cand(TWO_9d_updated, TCW_9d_updated)
(cand_ids, view_ids, obj_ids, point_ids, xy_ids) = [self.residuals_ids[k] for k in ('cand_id', 'view_id', 'obj_id', 'point_id', 'xy_id')]
n_residuals = len(cand_ids)
arange_n = torch.arange(n_residuals)
TCW_9d_updated = TCW_9d_updated.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO_9d_updated = TWO_9d_updated.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()
TWO = compute_transform_from_pose9d(TWO_9d_updated)
TCW = compute_transform_from_pose9d(TCW_9d_updated)
TWO_n = TWO[arange_n, obj_ids]
TCW_n = TCW[arange_n, view_ids]
TCO_n = TCW_n @ TWO_n
K_n = self.K[view_ids]
TCO_cand_n = TCO_cand_aligned[cand_ids]
points_n = self.obj_points[obj_ids, point_ids].unsqueeze(1)
TCO_points_n = project_points(points_n, K_n, TCO_n).squeeze(1)[arange_n, xy_ids]
TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[arange_n, xy_ids]
y = TCO_cand_points_n
yhat = TCO_points_n
errors = y - yhat
residuals = errors ** 2
residuals = torch.min(residuals, torch.ones_like(residuals) * residuals_threshold)
loss = residuals.mean()
if torch.is_grad_enabled():
yhat.sum().backward()
(errors, next_loss, J_TWO, J_TCW) = (errors, loss, TWO_9d_updated.grad, TCW_9d_updated.grad)
rho = loss - next_loss
if rho.abs() < eps:
done = True
elif rho > eps:
TWO_9d = TWO_9d_updated
TCW_9d = TCW_9d_updated
loss = next_loss
lambd = max(lambd / L_down, 1e-07)
prev_iter_is_update = True
else:
lambd = min(lambd * L_up, 10000000.0)
prev_iter_is_update = False
return (TWO_9d, TCW_9d, history)
|
cosypose
|
positive
|
def load_liquids(self, group_name, pos):
""" Load liquid plane of the WMO group. Should only be called if MLIQ is present. """
vertices = []
for y in range(0, self.mliq.yVerts):
y_pos = self.mliq.Position[1] + y * 4.1666625
for x in range(0, self.mliq.xVerts):
x_pos = self.mliq.Position[0] + x * 4.1666625
vertices.append((x_pos, y_pos, self.mliq.VertexMap[y * self.mliq.xVerts + x].height[0]))
indices = []
for y in range(self.mliq.yTiles):
for x in range(self.mliq.xTiles):
indices.append(y * self.mliq.xVerts + x)
indices.append(y * self.mliq.xVerts + x + 1)
indices.append((y + 1) * self.mliq.xVerts + x)
indices.append((y + 1) * self.mliq.xVerts + x + 1)
faces = []
for i in range(0, len(indices), 4):
faces.append((indices[i], indices[i + 1], indices[i + 3], indices[i + 2]))
name = group_name + '_Liquid'
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(name, mesh)
mesh.from_pydata(vertices, [], faces)
mesh.update(calc_edges=True)
mesh.validate()
if self.mogp.LiquidType in {3, 4, 7, 8, 11, 12}:
uvMap = {}
for vertex in mesh.vertices:
uvMap[vertex.index] = (self.mliq.VertexMap[vertex.index].u, self.mliq.VertexMap[vertex.index].v)
uv1 = mesh.uv_textures.new('UVMap')
uv_layer1 = mesh.uv_layers[0]
for poly in mesh.polygons:
for loop_index in poly.loop_indices:
uv_layer1.data[loop_index].uv = (uvMap.get(mesh.loops[loop_index].vertex_index)[0], -uvMap.get(mesh.loops[loop_index].vertex_index)[1])
bit = 1
while bit <= 128:
vc_layer = mesh.vertex_colors.new('flag_' + hex(bit))
for poly in mesh.polygons:
tileFlag = self.mliq.TileFlags[poly.index]
for loop in poly.loop_indices:
if tileFlag & bit:
vc_layer.data[loop].color = (0, 0, 255)
bit <<= 1
obj.location = pos
bpy.context.scene.objects.link(obj)
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=True)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
obj.lock_scale = [True, True, True]
obj.lock_rotation[2] = True
obj.WowLiquid.Enabled = True
real_liquid_type = 0
if self.root.mohd.Flags & 4:
real_liquid_type = self.mogp.LiquidType
else:
<DeepExtract>
real_liquid_type = 0
if self.mogp.LiquidType < 20:
if self.mogp.LiquidType == 0:
real_liquid_type = 14 if self.mogp.Flags & 524288 else 13
elif self.mogp.LiquidType == 1:
real_liquid_type = 14
elif self.mogp.LiquidType == 2:
real_liquid_type = 19
elif self.mogp.LiquidType == 15:
real_liquid_type = 17
elif self.mogp.LiquidType == 3:
real_liquid_type = 20
else:
real_liquid_type = self.mogp.LiquidType + 1
real_liquid_type = real_liquid_type
</DeepExtract>
obj.WowLiquid.Color = self.root.material_lookup[self.mliq.materialID].WowMaterial.DiffColor
obj.WowLiquid.LiquidType = str(real_liquid_type)
obj.WowLiquid.WMOGroup = group_name
if self.root.parent:
obj.parent = self.root.parent
|
def load_liquids(self, group_name, pos):
""" Load liquid plane of the WMO group. Should only be called if MLIQ is present. """
vertices = []
for y in range(0, self.mliq.yVerts):
y_pos = self.mliq.Position[1] + y * 4.1666625
for x in range(0, self.mliq.xVerts):
x_pos = self.mliq.Position[0] + x * 4.1666625
vertices.append((x_pos, y_pos, self.mliq.VertexMap[y * self.mliq.xVerts + x].height[0]))
indices = []
for y in range(self.mliq.yTiles):
for x in range(self.mliq.xTiles):
indices.append(y * self.mliq.xVerts + x)
indices.append(y * self.mliq.xVerts + x + 1)
indices.append((y + 1) * self.mliq.xVerts + x)
indices.append((y + 1) * self.mliq.xVerts + x + 1)
faces = []
for i in range(0, len(indices), 4):
faces.append((indices[i], indices[i + 1], indices[i + 3], indices[i + 2]))
name = group_name + '_Liquid'
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(name, mesh)
mesh.from_pydata(vertices, [], faces)
mesh.update(calc_edges=True)
mesh.validate()
if self.mogp.LiquidType in {3, 4, 7, 8, 11, 12}:
uvMap = {}
for vertex in mesh.vertices:
uvMap[vertex.index] = (self.mliq.VertexMap[vertex.index].u, self.mliq.VertexMap[vertex.index].v)
uv1 = mesh.uv_textures.new('UVMap')
uv_layer1 = mesh.uv_layers[0]
for poly in mesh.polygons:
for loop_index in poly.loop_indices:
uv_layer1.data[loop_index].uv = (uvMap.get(mesh.loops[loop_index].vertex_index)[0], -uvMap.get(mesh.loops[loop_index].vertex_index)[1])
bit = 1
while bit <= 128:
vc_layer = mesh.vertex_colors.new('flag_' + hex(bit))
for poly in mesh.polygons:
tileFlag = self.mliq.TileFlags[poly.index]
for loop in poly.loop_indices:
if tileFlag & bit:
vc_layer.data[loop].color = (0, 0, 255)
bit <<= 1
obj.location = pos
bpy.context.scene.objects.link(obj)
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=True)
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
obj.lock_scale = [True, True, True]
obj.lock_rotation[2] = True
obj.WowLiquid.Enabled = True
real_liquid_type = 0
if self.root.mohd.Flags & 4:
real_liquid_type = self.mogp.LiquidType
else:
real_liquid_type = 0
if self.mogp.LiquidType < 20:
if self.mogp.LiquidType == 0:
real_liquid_type = 14 if self.mogp.Flags & 524288 else 13
elif self.mogp.LiquidType == 1:
real_liquid_type = 14
elif self.mogp.LiquidType == 2:
real_liquid_type = 19
elif self.mogp.LiquidType == 15:
real_liquid_type = 17
elif self.mogp.LiquidType == 3:
real_liquid_type = 20
else:
real_liquid_type = self.mogp.LiquidType + 1
real_liquid_type = real_liquid_type
obj.WowLiquid.Color = self.root.material_lookup[self.mliq.materialID].WowMaterial.DiffColor
obj.WowLiquid.LiquidType = str(real_liquid_type)
obj.WowLiquid.WMOGroup = group_name
if self.root.parent:
obj.parent = self.root.parent
|
Blender-WMO-import-export-scripts
|
positive
|
def editpermissions_group_view(self, request, group_id, forum_id=None):
""" Allows to edit group permissions for the considered forum.
The view displays a form to define which permissions are granted for the given group for the
considered forum.
"""
group = get_object_or_404(Group, pk=group_id)
forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None
<DeepExtract>
context = {'adminform': {'model_admin': self}, 'media': self.media, 'object': forum, 'app_label': self.model._meta.app_label, 'opts': self.model._meta, 'has_change_permission': self.has_change_permission(request, forum)}
try:
context.update(self.admin_site.each_context(request))
except TypeError:
context.update(self.admin_site.each_context())
except AttributeError:
pass
context = context
</DeepExtract>
context['forum'] = forum
context['title'] = '{} - {}'.format(_('Forum permissions'), group)
<DeepExtract>
editable_permissions = sorted(ForumPermission.objects.all(), key=lambda p: p.name)
granted_permissions = GroupForumPermission.objects.filter(permission__in=editable_permissions, has_perm=True, **{'forum': forum, 'group': group}).values_list('permission__codename', flat=True)
non_granted_permissions = GroupForumPermission.objects.filter(permission__in=editable_permissions, has_perm=False, **{'forum': forum, 'group': group}).values_list('permission__codename', flat=True)
permissions_dict = OrderedDict()
for p in editable_permissions:
if p.codename in granted_permissions:
perm_state = PermissionsForm.PERM_GRANTED
elif p.codename in non_granted_permissions:
perm_state = PermissionsForm.PERM_NOT_GRANTED
else:
perm_state = PermissionsForm.PERM_NOT_SET
permissions_dict[p.codename] = (p, perm_state)
if request.method == 'POST':
form = PermissionsForm(request.POST, permissions_dict=permissions_dict)
if form.is_valid():
for (codename, value) in form.cleaned_data.items():
try:
perm = GroupForumPermission.objects.get(permission=permissions_dict[codename][0], **{'forum': forum, 'group': group})
except GroupForumPermission.DoesNotExist:
if value == PermissionsForm.PERM_NOT_SET:
continue
perm = GroupForumPermission.objects.create(permission=permissions_dict[codename][0], **{'forum': forum, 'group': group})
if value == PermissionsForm.PERM_NOT_SET:
perm.delete()
continue
perm.has_perm = value == PermissionsForm.PERM_GRANTED
perm.save()
self.message_user(request, _('Permissions successfully applied'))
else:
form = PermissionsForm(permissions_dict=permissions_dict)
context['form'] = form
</DeepExtract>
return render(request, self.editpermissions_group_view_template_name, context)
|
def editpermissions_group_view(self, request, group_id, forum_id=None):
""" Allows to edit group permissions for the considered forum.
The view displays a form to define which permissions are granted for the given group for the
considered forum.
"""
group = get_object_or_404(Group, pk=group_id)
forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None
context = {'adminform': {'model_admin': self}, 'media': self.media, 'object': forum, 'app_label': self.model._meta.app_label, 'opts': self.model._meta, 'has_change_permission': self.has_change_permission(request, forum)}
try:
context.update(self.admin_site.each_context(request))
except TypeError:
context.update(self.admin_site.each_context())
except AttributeError:
pass
context = context
context['forum'] = forum
context['title'] = '{} - {}'.format(_('Forum permissions'), group)
editable_permissions = sorted(ForumPermission.objects.all(), key=lambda p: p.name)
granted_permissions = GroupForumPermission.objects.filter(permission__in=editable_permissions, has_perm=True, **{'forum': forum, 'group': group}).values_list('permission__codename', flat=True)
non_granted_permissions = GroupForumPermission.objects.filter(permission__in=editable_permissions, has_perm=False, **{'forum': forum, 'group': group}).values_list('permission__codename', flat=True)
permissions_dict = OrderedDict()
for p in editable_permissions:
if p.codename in granted_permissions:
perm_state = PermissionsForm.PERM_GRANTED
elif p.codename in non_granted_permissions:
perm_state = PermissionsForm.PERM_NOT_GRANTED
else:
perm_state = PermissionsForm.PERM_NOT_SET
permissions_dict[p.codename] = (p, perm_state)
if request.method == 'POST':
form = PermissionsForm(request.POST, permissions_dict=permissions_dict)
if form.is_valid():
for (codename, value) in form.cleaned_data.items():
try:
perm = GroupForumPermission.objects.get(permission=permissions_dict[codename][0], **{'forum': forum, 'group': group})
except GroupForumPermission.DoesNotExist:
if value == PermissionsForm.PERM_NOT_SET:
continue
perm = GroupForumPermission.objects.create(permission=permissions_dict[codename][0], **{'forum': forum, 'group': group})
if value == PermissionsForm.PERM_NOT_SET:
perm.delete()
continue
perm.has_perm = value == PermissionsForm.PERM_GRANTED
perm.save()
self.message_user(request, _('Permissions successfully applied'))
else:
form = PermissionsForm(permissions_dict=permissions_dict)
context['form'] = form
return render(request, self.editpermissions_group_view_template_name, context)
|
django-machina
|
positive
|
@native_method
def call_object_method_v(self, uc, env, obj_idx, method_id, args):
<DeepExtract>
if obj_idx == 0:
obj = None
if self._locals.in_range(obj_idx):
obj = self._locals.get(obj_idx)
if self._globals.in_range(obj_idx):
obj = self._globals.get(obj_idx)
raise RuntimeError('Invalid get_reference(%d)' % obj_idx)
</DeepExtract>
if not isinstance(obj, jobject):
raise ValueError('Expected a jobject.')
method = obj.value.__class__.find_method_by_id(method_id)
if method is None:
raise RuntimeError('Could not find method %d in object %s by id.' % (method_id, obj.value.jvm_name))
logger.debug('JNIEnv->CallObjectMethodV(%s, %s <%s>, 0x%x) was called' % (obj.value.jvm_name, method.name, method.signature, args))
<DeepExtract>
if method.args_list is None:
constructor_args = []
result = []
for arg_name in method.args_list:
if arg_name == 'jint':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(ref)
args = args + 4
elif arg_name == 'jstring':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(self.get_reference(ref))
args = args + 4
elif arg_name == 'jobject':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(self.get_reference(ref))
args = args + 4
elif arg_name == 'jbyteArray':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(self.get_reference(ref))
args = args + 4
else:
raise NotImplementedError('Unknown arg name %s' % arg_name)
constructor_args = result
</DeepExtract>
return method.func(obj.value, self._emu, *constructor_args)
|
@native_method
def call_object_method_v(self, uc, env, obj_idx, method_id, args):
if obj_idx == 0:
obj = None
if self._locals.in_range(obj_idx):
obj = self._locals.get(obj_idx)
if self._globals.in_range(obj_idx):
obj = self._globals.get(obj_idx)
raise RuntimeError('Invalid get_reference(%d)' % obj_idx)
if not isinstance(obj, jobject):
raise ValueError('Expected a jobject.')
method = obj.value.__class__.find_method_by_id(method_id)
if method is None:
raise RuntimeError('Could not find method %d in object %s by id.' % (method_id, obj.value.jvm_name))
logger.debug('JNIEnv->CallObjectMethodV(%s, %s <%s>, 0x%x) was called' % (obj.value.jvm_name, method.name, method.signature, args))
if method.args_list is None:
constructor_args = []
result = []
for arg_name in method.args_list:
if arg_name == 'jint':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(ref)
args = args + 4
elif arg_name == 'jstring':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(self.get_reference(ref))
args = args + 4
elif arg_name == 'jobject':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(self.get_reference(ref))
args = args + 4
elif arg_name == 'jbyteArray':
ref = int.from_bytes(uc.mem_read(args, 4), byteorder='little')
result.append(self.get_reference(ref))
args = args + 4
else:
raise NotImplementedError('Unknown arg name %s' % arg_name)
constructor_args = result
return method.func(obj.value, self._emu, *constructor_args)
|
AndroidNativeEmu
|
positive
|
def _write_from_reader(tlobject, builder):
builder.writeln('@classmethod')
builder.writeln('def from_reader(cls, reader):')
for arg in tlobject.args:
<DeepExtract>
if arg.generic_definition:
return
was_flag = False
if arg.is_flag:
if 'true' == arg.type:
builder.writeln('{} = bool(flags & {})', '_' + arg.name, 1 << arg.flag_index)
return
was_flag = True
builder.writeln('if flags & {}:', 1 << arg.flag_index)
arg.is_flag = False
if arg.is_vector:
if arg.use_vector_id:
builder.writeln('reader.read_int()')
builder.writeln('{} = []', '_' + arg.name)
builder.writeln('for _ in range(reader.read_int()):')
arg.is_vector = False
_write_arg_read_code(builder, arg, tlobject.args, name='_x')
builder.writeln('{}.append(_x)', '_' + arg.name)
arg.is_vector = True
elif arg.flag_indicator:
builder.writeln('flags = reader.read_int()')
builder.writeln()
elif 'int' == arg.type:
builder.writeln('{} = reader.read_int()', '_' + arg.name)
elif 'long' == arg.type:
builder.writeln('{} = reader.read_long()', '_' + arg.name)
elif 'int128' == arg.type:
builder.writeln('{} = reader.read_large_int(bits=128)', '_' + arg.name)
elif 'int256' == arg.type:
builder.writeln('{} = reader.read_large_int(bits=256)', '_' + arg.name)
elif 'double' == arg.type:
builder.writeln('{} = reader.read_double()', '_' + arg.name)
elif 'string' == arg.type:
builder.writeln('{} = reader.tgread_string()', '_' + arg.name)
elif 'Bool' == arg.type:
builder.writeln('{} = reader.tgread_bool()', '_' + arg.name)
elif 'true' == arg.type:
builder.writeln('{} = True', '_' + arg.name)
elif 'bytes' == arg.type:
builder.writeln('{} = reader.tgread_bytes()', '_' + arg.name)
elif 'date' == arg.type:
builder.writeln('{} = reader.tgread_date()', '_' + arg.name)
elif not arg.skip_constructor_id:
builder.writeln('{} = reader.tgread_object()', '_' + arg.name)
else:
sep_index = arg.type.find('.')
if sep_index == -1:
(ns, t) = ('.', arg.type)
else:
(ns, t) = ('.' + arg.type[:sep_index], arg.type[sep_index + 1:])
class_name = snake_to_camel_case(t)
builder.writeln('from {} import {}', ns, class_name)
builder.writeln('{} = {}.from_reader(reader)', '_' + arg.name, class_name)
if arg.is_vector:
builder.end_block()
if was_flag:
builder.current_indent -= 1
builder.writeln('else:')
builder.writeln('{} = None', '_' + arg.name)
builder.current_indent -= 1
arg.is_flag = True
</DeepExtract>
builder.writeln('return cls({})', ', '.join(('{0}=_{0}'.format(a.name) for a in tlobject.real_args)))
|
def _write_from_reader(tlobject, builder):
builder.writeln('@classmethod')
builder.writeln('def from_reader(cls, reader):')
for arg in tlobject.args:
if arg.generic_definition:
return
was_flag = False
if arg.is_flag:
if 'true' == arg.type:
builder.writeln('{} = bool(flags & {})', '_' + arg.name, 1 << arg.flag_index)
return
was_flag = True
builder.writeln('if flags & {}:', 1 << arg.flag_index)
arg.is_flag = False
if arg.is_vector:
if arg.use_vector_id:
builder.writeln('reader.read_int()')
builder.writeln('{} = []', '_' + arg.name)
builder.writeln('for _ in range(reader.read_int()):')
arg.is_vector = False
_write_arg_read_code(builder, arg, tlobject.args, name='_x')
builder.writeln('{}.append(_x)', '_' + arg.name)
arg.is_vector = True
elif arg.flag_indicator:
builder.writeln('flags = reader.read_int()')
builder.writeln()
elif 'int' == arg.type:
builder.writeln('{} = reader.read_int()', '_' + arg.name)
elif 'long' == arg.type:
builder.writeln('{} = reader.read_long()', '_' + arg.name)
elif 'int128' == arg.type:
builder.writeln('{} = reader.read_large_int(bits=128)', '_' + arg.name)
elif 'int256' == arg.type:
builder.writeln('{} = reader.read_large_int(bits=256)', '_' + arg.name)
elif 'double' == arg.type:
builder.writeln('{} = reader.read_double()', '_' + arg.name)
elif 'string' == arg.type:
builder.writeln('{} = reader.tgread_string()', '_' + arg.name)
elif 'Bool' == arg.type:
builder.writeln('{} = reader.tgread_bool()', '_' + arg.name)
elif 'true' == arg.type:
builder.writeln('{} = True', '_' + arg.name)
elif 'bytes' == arg.type:
builder.writeln('{} = reader.tgread_bytes()', '_' + arg.name)
elif 'date' == arg.type:
builder.writeln('{} = reader.tgread_date()', '_' + arg.name)
elif not arg.skip_constructor_id:
builder.writeln('{} = reader.tgread_object()', '_' + arg.name)
else:
sep_index = arg.type.find('.')
if sep_index == -1:
(ns, t) = ('.', arg.type)
else:
(ns, t) = ('.' + arg.type[:sep_index], arg.type[sep_index + 1:])
class_name = snake_to_camel_case(t)
builder.writeln('from {} import {}', ns, class_name)
builder.writeln('{} = {}.from_reader(reader)', '_' + arg.name, class_name)
if arg.is_vector:
builder.end_block()
if was_flag:
builder.current_indent -= 1
builder.writeln('else:')
builder.writeln('{} = None', '_' + arg.name)
builder.current_indent -= 1
arg.is_flag = True
builder.writeln('return cls({})', ', '.join(('{0}=_{0}'.format(a.name) for a in tlobject.real_args)))
|
Awesome-Scripts
|
positive
|
def sphereize_normals(modal):
targ_loc = get_np_matrix_transformed_vecs(np.array(modal._target_emp.location), modal._object.matrix_world.inverted())
local_cos = get_np_matrix_transformed_vecs(modal._container.loop_coords[modal._container.sel_status], modal._object.matrix_world.inverted())
cache_norms = modal._container.cache_norms[modal._container.sel_status] * (1.0 - modal.target_strength)
modal._container.new_norms[modal._container.sel_status] = (local_cos - targ_loc) * modal.target_strength + cache_norms
modal.redraw_active = True
<DeepExtract>
modal._object.data.edges.foreach_set('use_edge_sharp', modal._container.og_sharp)
if modal._container.filter_mask.any():
modal._container.new_norms[:] = modal._container.cache_norms * (1.0 - modal._container.filter_weights[:, None]) + modal._container.new_norms * modal._container.filter_weights[:, None]
scale = 1 / np.sqrt(np.sum(np.square(modal._container.new_norms), axis=1))
modal._container.new_norms = modal._container.new_norms * scale[:, None]
if modal._mirror_x:
sel_norms = modal._container.new_norms[modal._container.sel_status]
sel_norms[:, 0] *= -1
modal._container.new_norms[modal.mir_loops_x[modal._container.sel_status]] = sel_norms
if modal._mirror_y:
sel_norms = modal._container.new_norms[modal._container.sel_status]
sel_norms[:, 1] *= -1
modal._container.new_norms[modal.mir_loops_y[modal._container.sel_status]] = sel_norms
if modal._mirror_z:
sel_norms = modal._container.new_norms[modal._container.sel_status]
sel_norms[:, 2] *= -1
modal._container.new_norms[modal.mir_loops_z[modal._container.sel_status]] = sel_norms
modal._object.data.normals_split_custom_set(modal._container.new_norms)
modal.redraw = True
return
</DeepExtract>
return
|
def sphereize_normals(modal):
targ_loc = get_np_matrix_transformed_vecs(np.array(modal._target_emp.location), modal._object.matrix_world.inverted())
local_cos = get_np_matrix_transformed_vecs(modal._container.loop_coords[modal._container.sel_status], modal._object.matrix_world.inverted())
cache_norms = modal._container.cache_norms[modal._container.sel_status] * (1.0 - modal.target_strength)
modal._container.new_norms[modal._container.sel_status] = (local_cos - targ_loc) * modal.target_strength + cache_norms
modal.redraw_active = True
modal._object.data.edges.foreach_set('use_edge_sharp', modal._container.og_sharp)
if modal._container.filter_mask.any():
modal._container.new_norms[:] = modal._container.cache_norms * (1.0 - modal._container.filter_weights[:, None]) + modal._container.new_norms * modal._container.filter_weights[:, None]
scale = 1 / np.sqrt(np.sum(np.square(modal._container.new_norms), axis=1))
modal._container.new_norms = modal._container.new_norms * scale[:, None]
if modal._mirror_x:
sel_norms = modal._container.new_norms[modal._container.sel_status]
sel_norms[:, 0] *= -1
modal._container.new_norms[modal.mir_loops_x[modal._container.sel_status]] = sel_norms
if modal._mirror_y:
sel_norms = modal._container.new_norms[modal._container.sel_status]
sel_norms[:, 1] *= -1
modal._container.new_norms[modal.mir_loops_y[modal._container.sel_status]] = sel_norms
if modal._mirror_z:
sel_norms = modal._container.new_norms[modal._container.sel_status]
sel_norms[:, 2] *= -1
modal._container.new_norms[modal.mir_loops_z[modal._container.sel_status]] = sel_norms
modal._object.data.normals_split_custom_set(modal._container.new_norms)
modal.redraw = True
return
return
|
Abnormal
|
positive
|
def getASG(self, sem_actions=None, defaults=True):
"""
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
"""
if not self.parse_tree:
raise Exception("Parse tree is empty. You did call parse(), didn't you?")
if sem_actions is None:
if not self.sem_actions:
raise Exception('Semantic actions not defined.')
else:
sem_actions = self.sem_actions
if type(sem_actions) is not dict:
raise Exception('Semantic actions parameter must be a dictionary.')
for_second_pass = []
def tree_walk(node):
"""
Walking the parse tree and calling first_pass for every registered
semantic actions and creating list of object that needs to be
called in the second pass.
"""
if self.debug:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, 'Walking down %s type: %s str: %s' % (node.name, type(node).__name__, text(node))), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
children = SemanticActionResults()
if isinstance(node, NonTerminal):
for n in node:
<DeepExtract>
if self.debug:
self.dprint('Walking down %s type: %s str: %s' % (n.name, type(n).__name__, text(n)))
children = SemanticActionResults()
if isinstance(n, NonTerminal):
for n in n:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" % (n.name, text(n), type(n).__name__, len(n) if isinstance(n, list) else 0))
for (i, a) in enumerate(children):
self.dprint(' %d:%s type:%s' % (i + 1, text(a), type(a).__name__))
if n.rule_name in sem_actions:
sem_action = sem_actions[n.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, n, children)
else:
retval = sem_action.first_pass(self, n, children)
if hasattr(sem_action, 'second_pass'):
for_second_pass.append((n.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ if hasattr(sem_action, '__name__') else sem_action.__class__.__name__
self.dprint(' Applying semantic action %s' % action_name)
elif defaults:
if self.debug:
self.dprint(' Applying default semantic action.')
retval = SemanticAction().first_pass(self, n, children)
else:
retval = n
if self.debug:
if retval is None:
self.dprint(' Suppressed.')
else:
self.dprint(' Resolved to = %s type:%s' % (text(retval), type(retval).__name__))
child = retval
</DeepExtract>
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, "Processing %s = '%s' type:%s len:%d" % (node.name, text(node), type(node).__name__, len(node) if isinstance(node, list) else 0)), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
for (i, a) in enumerate(children):
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' %d:%s type:%s' % (i + 1, text(a), type(a).__name__)), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
if node.rule_name in sem_actions:
sem_action = sem_actions[node.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, node, children)
else:
retval = sem_action.first_pass(self, node, children)
if hasattr(sem_action, 'second_pass'):
for_second_pass.append((node.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ if hasattr(sem_action, '__name__') else sem_action.__class__.__name__
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Applying semantic action %s' % action_name), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
elif defaults:
if self.debug:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Applying default semantic action.'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
retval = SemanticAction().first_pass(self, node, children)
else:
retval = node
if self.debug:
if retval is None:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Suppressed.'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
else:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Resolved to = %s type:%s' % (text(retval), type(retval).__name__)), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
return retval
if self.debug:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, 'ASG: First pass'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
<DeepExtract>
if self.debug:
self.dprint('Walking down %s type: %s str: %s' % (self.parse_tree.name, type(self.parse_tree).__name__, text(self.parse_tree)))
children = SemanticActionResults()
if isinstance(self.parse_tree, NonTerminal):
for n in self.parse_tree:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" % (self.parse_tree.name, text(self.parse_tree), type(self.parse_tree).__name__, len(self.parse_tree) if isinstance(self.parse_tree, list) else 0))
for (i, a) in enumerate(children):
self.dprint(' %d:%s type:%s' % (i + 1, text(a), type(a).__name__))
if self.parse_tree.rule_name in sem_actions:
sem_action = sem_actions[self.parse_tree.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, self.parse_tree, children)
else:
retval = sem_action.first_pass(self, self.parse_tree, children)
if hasattr(sem_action, 'second_pass'):
for_second_pass.append((self.parse_tree.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ if hasattr(sem_action, '__name__') else sem_action.__class__.__name__
self.dprint(' Applying semantic action %s' % action_name)
elif defaults:
if self.debug:
self.dprint(' Applying default semantic action.')
retval = SemanticAction().first_pass(self, self.parse_tree, children)
else:
retval = self.parse_tree
if self.debug:
if retval is None:
self.dprint(' Suppressed.')
else:
self.dprint(' Resolved to = %s type:%s' % (text(retval), type(retval).__name__))
asg = retval
</DeepExtract>
if self.debug:
<DeepExtract>
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, 'ASG: Second pass'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
</DeepExtract>
for (sa_name, asg_node) in for_second_pass:
sem_actions[sa_name].second_pass(self, asg_node)
return asg
|
def getASG(self, sem_actions=None, defaults=True):
"""
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
"""
if not self.parse_tree:
raise Exception("Parse tree is empty. You did call parse(), didn't you?")
if sem_actions is None:
if not self.sem_actions:
raise Exception('Semantic actions not defined.')
else:
sem_actions = self.sem_actions
if type(sem_actions) is not dict:
raise Exception('Semantic actions parameter must be a dictionary.')
for_second_pass = []
def tree_walk(node):
"""
Walking the parse tree and calling first_pass for every registered
semantic actions and creating list of object that needs to be
called in the second pass.
"""
if self.debug:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, 'Walking down %s type: %s str: %s' % (node.name, type(node).__name__, text(node))), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
children = SemanticActionResults()
if isinstance(node, NonTerminal):
for n in node:
if self.debug:
self.dprint('Walking down %s type: %s str: %s' % (n.name, type(n).__name__, text(n)))
children = SemanticActionResults()
if isinstance(n, NonTerminal):
for n in n:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" % (n.name, text(n), type(n).__name__, len(n) if isinstance(n, list) else 0))
for (i, a) in enumerate(children):
self.dprint(' %d:%s type:%s' % (i + 1, text(a), type(a).__name__))
if n.rule_name in sem_actions:
sem_action = sem_actions[n.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, n, children)
else:
retval = sem_action.first_pass(self, n, children)
if hasattr(sem_action, 'second_pass'):
for_second_pass.append((n.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ if hasattr(sem_action, '__name__') else sem_action.__class__.__name__
self.dprint(' Applying semantic action %s' % action_name)
elif defaults:
if self.debug:
self.dprint(' Applying default semantic action.')
retval = SemanticAction().first_pass(self, n, children)
else:
retval = n
if self.debug:
if retval is None:
self.dprint(' Suppressed.')
else:
self.dprint(' Resolved to = %s type:%s' % (text(retval), type(retval).__name__))
child = retval
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, "Processing %s = '%s' type:%s len:%d" % (node.name, text(node), type(node).__name__, len(node) if isinstance(node, list) else 0)), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
for (i, a) in enumerate(children):
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' %d:%s type:%s' % (i + 1, text(a), type(a).__name__)), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
if node.rule_name in sem_actions:
sem_action = sem_actions[node.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, node, children)
else:
retval = sem_action.first_pass(self, node, children)
if hasattr(sem_action, 'second_pass'):
for_second_pass.append((node.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ if hasattr(sem_action, '__name__') else sem_action.__class__.__name__
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Applying semantic action %s' % action_name), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
elif defaults:
if self.debug:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Applying default semantic action.'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
retval = SemanticAction().first_pass(self, node, children)
else:
retval = node
if self.debug:
if retval is None:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Suppressed.'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
else:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, ' Resolved to = %s type:%s' % (text(retval), type(retval).__name__)), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
return retval
if self.debug:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, 'ASG: First pass'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
if self.debug:
self.dprint('Walking down %s type: %s str: %s' % (self.parse_tree.name, type(self.parse_tree).__name__, text(self.parse_tree)))
children = SemanticActionResults()
if isinstance(self.parse_tree, NonTerminal):
for n in self.parse_tree:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" % (self.parse_tree.name, text(self.parse_tree), type(self.parse_tree).__name__, len(self.parse_tree) if isinstance(self.parse_tree, list) else 0))
for (i, a) in enumerate(children):
self.dprint(' %d:%s type:%s' % (i + 1, text(a), type(a).__name__))
if self.parse_tree.rule_name in sem_actions:
sem_action = sem_actions[self.parse_tree.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, self.parse_tree, children)
else:
retval = sem_action.first_pass(self, self.parse_tree, children)
if hasattr(sem_action, 'second_pass'):
for_second_pass.append((self.parse_tree.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ if hasattr(sem_action, '__name__') else sem_action.__class__.__name__
self.dprint(' Applying semantic action %s' % action_name)
elif defaults:
if self.debug:
self.dprint(' Applying default semantic action.')
retval = SemanticAction().first_pass(self, self.parse_tree, children)
else:
retval = self.parse_tree
if self.debug:
if retval is None:
self.dprint(' Suppressed.')
else:
self.dprint(' Resolved to = %s type:%s' % (text(retval), type(retval).__name__))
asg = retval
if self.debug:
if indent_change < 0:
self._current_indent += indent_change
print('%s%s' % (' ' * self._current_indent, 'ASG: Second pass'), file=self.file)
if indent_change > 0:
self._current_indent += indent_change
for (sa_name, asg_node) in for_second_pass:
sem_actions[sa_name].second_pass(self, asg_node)
return asg
|
Arpeggio
|
positive
|
def left_move_2site(n, norm_est):
if debug:
<DeepExtract>
print('Check central: ', n + 1)
mps = cp.deepcopy(mps)
mps.calc_l()
mps.calc_r()
nums = []
errs = []
for m in range(0, n + 1):
err = la.norm(mps.l[m] - sp.eye(mps.l[m].shape[0]))
if err > 1e-06:
nums.append(m)
errs.append(err)
for m in range(n + 1, mps.N + 1):
err = la.norm(mps.r[m] - sp.eye(mps.r[m].shape[0]))
if err > 1e-06:
nums.append(m)
errs.append(err)
print(nums)
print(errs)
</DeepExtract>
if n == 0:
return (norm_est, None, None, 0.0)
if DMRG:
<DeepExtract>
AAnm2 = None
AAnp2 = None
lop = Vari_Opt_Two_Site_Op(mps, n, AAnm2, AAnp2, ham, ham_sites, KL[n - 1], KR[n + 2], HML=HML[n - 1], HMR=HMR[n], HMn=HM[n], use_local_ham=use_local_ham, sanity_checks=mps.sanity_checks)
AAn_old = tm.calc_AA(mps.A[n], mps.A[n + 1]).ravel()
if ham_is_Herm:
(evs, eVs) = las.eigsh(lop, k=1, which='SA', sigma=None, v0=AAn_old.ravel(), ncv=ncv, tol=tol)
else:
(evs, eVs) = las.eigs(lop, k=1, which='SA', sigma=None, v0=AAn_old.ravel(), ncv=ncv, tol=tol)
AAn = eVs[:, 0].reshape((mps.q[n], mps.q[n + 1], mps.D[n - 1], mps.D[n + 1]))
(An, G, Anp1, s_rest) = split_twosite(AAn, D_max, min_schmidt)
trunc_err = la.norm(s_rest) if len(s_rest) > 0 else 0.0
G /= sp.sqrt(mm.adot(G, G))
D_new = G.shape[0]
if False:
for s in range(mps.q[n + 1]):
Anp1[s] = G.dot(Anp1[s])
else:
for s in range(mps.q[n]):
An[s] = An[s].dot(G)
mps.D[n] = D_new
mps.A[n] = An
mps.A[n + 1] = Anp1
terr = trunc_err
</DeepExtract>
expm_info_AA = None
else:
<DeepExtract>
AAnm2 = None
AAnp2 = None
lop = Vari_Opt_Two_Site_Op(mps, n, AAnm2, AAnp2, ham, ham_sites, KL[n - 1], KR[n + 2], tau=fac, HML=HML[n - 1], HMR=HMR[n], HMn=HM[n], use_local_ham=use_local_ham, sanity_checks=mps.sanity_checks)
AAn_old = tm.calc_AA(mps.A[n], mps.A[n + 1]).ravel()
if debug:
print(n, sp.inner(AAn_old.conj(), lop.matvec(AAn_old)) / sp.inner(AAn_old.conj(), AAn_old), sp.inner(AAn_old.conj(), AAn_old), 'evolve_AA')
AAn = AAn_old
expm_info = {}
else:
if calc_norm_est:
nres = lop.matvec(sp.asarray(sp.randn(len(AAn_old)), dtype=AAn_old.dtype))
norm_est = max(norm_est, la.norm(nres, ord=sp.inf))
ncv_AAn = min(ncv, len(AAn_old) - 1)
(AAn, conv, nstep, brkdown, mb, err) = gexpmv(lop, AAn_old, dtau / 2.0, norm_est, m=ncv_AAn, tol=tol, mxstep=expm_max_steps)
expm_info = {'converged': conv, 'max_error': err[0], 'summed_error': err[1], 'num_krylov': mb, 'num_steps': nstep}
if not conv:
log.warn('Krylov exp(M)*v solver for AAn did not converge in %u steps for site %u.', nstep, n)
AAn = AAn.reshape([mps.q[n], mps.q[n + 1], mps.D[n - 1], mps.D[n + 1]])
(An, G, Anp1, s_rest) = split_twosite(AAn, D_max, min_schmidt)
trunc_err = la.norm(s_rest) if len(s_rest) > 0 else 0.0
G /= sp.sqrt(mm.adot(G, G))
D_new = G.shape[0]
if False:
for s in range(mps.q[n + 1]):
Anp1[s] = G.dot(Anp1[s])
else:
for s in range(mps.q[n]):
An[s] = An[s].dot(G)
mps.D[n] = D_new
mps.A[n] = An
mps.A[n + 1] = Anp1
(norm_est, expm_info_AA, terr) = (norm_est, expm_info, trunc_err)
</DeepExtract>
<DeepExtract>
Anp1 = mps.get_A(n + 1 + 1)
if ham_sites == 2 and Anp1 is not None:
AAn = tm.calc_AA(mps.A[n + 1], Anp1)
Cn = tm.calc_C_mat_op_AA(ham[n + 1], AAn)
KRnp1 = KR[n + 1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[n + 1], mps.D[n + 1]), dtype=mps.typ)
rnp1 = mm.eyemat(Anp1.shape[2], dtype=mps.typ)
(KR[n + 1], _) = tm.calc_K(KRnp1, Cn, None, rnp1, mps.A[n + 1], AAn)
if ham_sites == 3:
Anp2 = mps.get_A(n + 1 + 2)
if Anp2 is not None:
AAAn = tm.calc_AAA(mps.A[n + 1], Anp1, Anp2)
Cn = tm.calc_C_3s_mat_op_AAA(ham[n + 1], AAAn)
KRnp1 = KR[n + 1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[n + 1], mps.D[n + 1]), dtype=mps.typ)
rnp2 = mm.eyemat(Anp2.shape[2], dtype=mps.typ)
(KR[n + 1], _) = tm.calc_K_3s(KRnp1, Cn, None, rnp2, mps.A[n + 1], AAAn)
if not HMPO is None:
HMA[n + 1] = tm.apply_MPO_local(HM[n + 1], mps.A[n + 1])
HMR[n + 1 - 1] = mps.calc_MPO_rm1(HMA[n + 1], n + 1, HMR[n + 1])
</DeepExtract>
if n == 1:
<DeepExtract>
Anp1 = mps.get_A(1 + 1)
if ham_sites == 2 and Anp1 is not None:
AAn = tm.calc_AA(mps.A[1], Anp1)
Cn = tm.calc_C_mat_op_AA(ham[1], AAn)
KRnp1 = KR[1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[1], mps.D[1]), dtype=mps.typ)
rnp1 = mm.eyemat(Anp1.shape[2], dtype=mps.typ)
(KR[1], _) = tm.calc_K(KRnp1, Cn, None, rnp1, mps.A[1], AAn)
if ham_sites == 3:
Anp2 = mps.get_A(1 + 2)
if Anp2 is not None:
AAAn = tm.calc_AAA(mps.A[1], Anp1, Anp2)
Cn = tm.calc_C_3s_mat_op_AAA(ham[1], AAAn)
KRnp1 = KR[1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[1], mps.D[1]), dtype=mps.typ)
rnp2 = mm.eyemat(Anp2.shape[2], dtype=mps.typ)
(KR[1], _) = tm.calc_K_3s(KRnp1, Cn, None, rnp2, mps.A[1], AAAn)
if not HMPO is None:
HMA[1] = tm.apply_MPO_local(HM[1], mps.A[1])
HMR[1 - 1] = mps.calc_MPO_rm1(HMA[1], 1, HMR[1])
</DeepExtract>
if not DMRG and n > 1:
<DeepExtract>
AAnm2 = None
AAnp1 = None
lop = Vari_Opt_Single_Site_Op(mps, n, AAnm2, AAnp1, ham, ham_sites, KL[n - 1], KR[n + 1], tau=fac, HML=HML[n - 1], HMR=HMR[n], HMn=HM[n], use_local_ham=use_local_ham, sanity_checks=mps.sanity_checks)
An_old = mps.A[n].ravel()
if debug:
print(n, sp.inner(An_old.conj(), lop.matvec(An_old)) / sp.inner(An_old.conj(), An_old), sp.inner(An_old.conj(), An_old), 'evolve_A')
(_, expm_info_A) = (norm_est, {})
if calc_norm_est:
nres = lop.matvec(sp.asarray(sp.randn(len(An_old)), dtype=An_old.dtype))
norm_est = max(norm_est, la.norm(nres, ord=sp.inf))
ncv_An = min(ncv, len(An_old) - 1)
(An, conv, nstep, brkdown, mb, err) = gexpmv(lop, An_old, -dtau / 2.0, norm_est, m=ncv_An, tol=tol, mxstep=expm_max_steps)
expm_info = {'converged': conv, 'max_error': err[0], 'summed_error': err[1], 'num_krylov': mb, 'num_steps': nstep}
if not conv:
log.warn('Krylov exp(M)*v solver for An did not converge in %u steps for site %u.', nstep, n)
mps.A[n] = An.reshape((mps.q[n], mps.D[n - 1], mps.D[n]))
mps.A[n] /= sp.sqrt(mm.adot(mps.A[n], mps.A[n]))
(_, expm_info_A) = (norm_est, expm_info)
</DeepExtract>
else:
expm_info_A = None
return (norm_est, expm_info_AA, expm_info_A, terr)
|
def left_move_2site(n, norm_est):
if debug:
print('Check central: ', n + 1)
mps = cp.deepcopy(mps)
mps.calc_l()
mps.calc_r()
nums = []
errs = []
for m in range(0, n + 1):
err = la.norm(mps.l[m] - sp.eye(mps.l[m].shape[0]))
if err > 1e-06:
nums.append(m)
errs.append(err)
for m in range(n + 1, mps.N + 1):
err = la.norm(mps.r[m] - sp.eye(mps.r[m].shape[0]))
if err > 1e-06:
nums.append(m)
errs.append(err)
print(nums)
print(errs)
if n == 0:
return (norm_est, None, None, 0.0)
if DMRG:
AAnm2 = None
AAnp2 = None
lop = Vari_Opt_Two_Site_Op(mps, n, AAnm2, AAnp2, ham, ham_sites, KL[n - 1], KR[n + 2], HML=HML[n - 1], HMR=HMR[n], HMn=HM[n], use_local_ham=use_local_ham, sanity_checks=mps.sanity_checks)
AAn_old = tm.calc_AA(mps.A[n], mps.A[n + 1]).ravel()
if ham_is_Herm:
(evs, eVs) = las.eigsh(lop, k=1, which='SA', sigma=None, v0=AAn_old.ravel(), ncv=ncv, tol=tol)
else:
(evs, eVs) = las.eigs(lop, k=1, which='SA', sigma=None, v0=AAn_old.ravel(), ncv=ncv, tol=tol)
AAn = eVs[:, 0].reshape((mps.q[n], mps.q[n + 1], mps.D[n - 1], mps.D[n + 1]))
(An, G, Anp1, s_rest) = split_twosite(AAn, D_max, min_schmidt)
trunc_err = la.norm(s_rest) if len(s_rest) > 0 else 0.0
G /= sp.sqrt(mm.adot(G, G))
D_new = G.shape[0]
if False:
for s in range(mps.q[n + 1]):
Anp1[s] = G.dot(Anp1[s])
else:
for s in range(mps.q[n]):
An[s] = An[s].dot(G)
mps.D[n] = D_new
mps.A[n] = An
mps.A[n + 1] = Anp1
terr = trunc_err
expm_info_AA = None
else:
AAnm2 = None
AAnp2 = None
lop = Vari_Opt_Two_Site_Op(mps, n, AAnm2, AAnp2, ham, ham_sites, KL[n - 1], KR[n + 2], tau=fac, HML=HML[n - 1], HMR=HMR[n], HMn=HM[n], use_local_ham=use_local_ham, sanity_checks=mps.sanity_checks)
AAn_old = tm.calc_AA(mps.A[n], mps.A[n + 1]).ravel()
if debug:
print(n, sp.inner(AAn_old.conj(), lop.matvec(AAn_old)) / sp.inner(AAn_old.conj(), AAn_old), sp.inner(AAn_old.conj(), AAn_old), 'evolve_AA')
AAn = AAn_old
expm_info = {}
else:
if calc_norm_est:
nres = lop.matvec(sp.asarray(sp.randn(len(AAn_old)), dtype=AAn_old.dtype))
norm_est = max(norm_est, la.norm(nres, ord=sp.inf))
ncv_AAn = min(ncv, len(AAn_old) - 1)
(AAn, conv, nstep, brkdown, mb, err) = gexpmv(lop, AAn_old, dtau / 2.0, norm_est, m=ncv_AAn, tol=tol, mxstep=expm_max_steps)
expm_info = {'converged': conv, 'max_error': err[0], 'summed_error': err[1], 'num_krylov': mb, 'num_steps': nstep}
if not conv:
log.warn('Krylov exp(M)*v solver for AAn did not converge in %u steps for site %u.', nstep, n)
AAn = AAn.reshape([mps.q[n], mps.q[n + 1], mps.D[n - 1], mps.D[n + 1]])
(An, G, Anp1, s_rest) = split_twosite(AAn, D_max, min_schmidt)
trunc_err = la.norm(s_rest) if len(s_rest) > 0 else 0.0
G /= sp.sqrt(mm.adot(G, G))
D_new = G.shape[0]
if False:
for s in range(mps.q[n + 1]):
Anp1[s] = G.dot(Anp1[s])
else:
for s in range(mps.q[n]):
An[s] = An[s].dot(G)
mps.D[n] = D_new
mps.A[n] = An
mps.A[n + 1] = Anp1
(norm_est, expm_info_AA, terr) = (norm_est, expm_info, trunc_err)
Anp1 = mps.get_A(n + 1 + 1)
if ham_sites == 2 and Anp1 is not None:
AAn = tm.calc_AA(mps.A[n + 1], Anp1)
Cn = tm.calc_C_mat_op_AA(ham[n + 1], AAn)
KRnp1 = KR[n + 1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[n + 1], mps.D[n + 1]), dtype=mps.typ)
rnp1 = mm.eyemat(Anp1.shape[2], dtype=mps.typ)
(KR[n + 1], _) = tm.calc_K(KRnp1, Cn, None, rnp1, mps.A[n + 1], AAn)
if ham_sites == 3:
Anp2 = mps.get_A(n + 1 + 2)
if Anp2 is not None:
AAAn = tm.calc_AAA(mps.A[n + 1], Anp1, Anp2)
Cn = tm.calc_C_3s_mat_op_AAA(ham[n + 1], AAAn)
KRnp1 = KR[n + 1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[n + 1], mps.D[n + 1]), dtype=mps.typ)
rnp2 = mm.eyemat(Anp2.shape[2], dtype=mps.typ)
(KR[n + 1], _) = tm.calc_K_3s(KRnp1, Cn, None, rnp2, mps.A[n + 1], AAAn)
if not HMPO is None:
HMA[n + 1] = tm.apply_MPO_local(HM[n + 1], mps.A[n + 1])
HMR[n + 1 - 1] = mps.calc_MPO_rm1(HMA[n + 1], n + 1, HMR[n + 1])
if n == 1:
Anp1 = mps.get_A(1 + 1)
if ham_sites == 2 and Anp1 is not None:
AAn = tm.calc_AA(mps.A[1], Anp1)
Cn = tm.calc_C_mat_op_AA(ham[1], AAn)
KRnp1 = KR[1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[1], mps.D[1]), dtype=mps.typ)
rnp1 = mm.eyemat(Anp1.shape[2], dtype=mps.typ)
(KR[1], _) = tm.calc_K(KRnp1, Cn, None, rnp1, mps.A[1], AAn)
if ham_sites == 3:
Anp2 = mps.get_A(1 + 2)
if Anp2 is not None:
AAAn = tm.calc_AAA(mps.A[1], Anp1, Anp2)
Cn = tm.calc_C_3s_mat_op_AAA(ham[1], AAAn)
KRnp1 = KR[1 + 1]
if KRnp1 is None:
KRnp1 = sp.zeros((mps.D[1], mps.D[1]), dtype=mps.typ)
rnp2 = mm.eyemat(Anp2.shape[2], dtype=mps.typ)
(KR[1], _) = tm.calc_K_3s(KRnp1, Cn, None, rnp2, mps.A[1], AAAn)
if not HMPO is None:
HMA[1] = tm.apply_MPO_local(HM[1], mps.A[1])
HMR[1 - 1] = mps.calc_MPO_rm1(HMA[1], 1, HMR[1])
if not DMRG and n > 1:
AAnm2 = None
AAnp1 = None
lop = Vari_Opt_Single_Site_Op(mps, n, AAnm2, AAnp1, ham, ham_sites, KL[n - 1], KR[n + 1], tau=fac, HML=HML[n - 1], HMR=HMR[n], HMn=HM[n], use_local_ham=use_local_ham, sanity_checks=mps.sanity_checks)
An_old = mps.A[n].ravel()
if debug:
print(n, sp.inner(An_old.conj(), lop.matvec(An_old)) / sp.inner(An_old.conj(), An_old), sp.inner(An_old.conj(), An_old), 'evolve_A')
(_, expm_info_A) = (norm_est, {})
if calc_norm_est:
nres = lop.matvec(sp.asarray(sp.randn(len(An_old)), dtype=An_old.dtype))
norm_est = max(norm_est, la.norm(nres, ord=sp.inf))
ncv_An = min(ncv, len(An_old) - 1)
(An, conv, nstep, brkdown, mb, err) = gexpmv(lop, An_old, -dtau / 2.0, norm_est, m=ncv_An, tol=tol, mxstep=expm_max_steps)
expm_info = {'converged': conv, 'max_error': err[0], 'summed_error': err[1], 'num_krylov': mb, 'num_steps': nstep}
if not conv:
log.warn('Krylov exp(M)*v solver for An did not converge in %u steps for site %u.', nstep, n)
mps.A[n] = An.reshape((mps.q[n], mps.D[n - 1], mps.D[n]))
mps.A[n] /= sp.sqrt(mm.adot(mps.A[n], mps.A[n]))
(_, expm_info_A) = (norm_est, expm_info)
else:
expm_info_A = None
return (norm_est, expm_info_AA, expm_info_A, terr)
|
evoMPS
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.