before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def main():
<DeepExtract>
kwargs = {'n_singlets': 10, 'n_triplets': 10}
overwrite = {'adc2': {'n_singlets': 9, 'n_triplets': 10}}
dump_all('h2o_sto3g', kwargs, overwrite, spec='gen')
kwargs = {'n_singlets': 3, 'n_triplets': 3}
overwrite = {'adc0': {'n_singlets': 2, 'n_triplets': 2}, 'adc1': {'n_singlets': 2, 'n_triplets': 2}}
dump_all('h2o_sto3g', kwargs, overwrite, spec='cvs')
case = 'h2o_sto3g'
kwargs = {'n_singlets': 3, 'n_triplets': 3}
dump_method(case, 'adc2', kwargs, spec='fc')
dump_method(case, 'adc2', kwargs, spec='fc-fv')
dump_method(case, 'adc2x', kwargs, spec='fv')
dump_method(case, 'adc2x', kwargs, spec='fv-cvs')
</DeepExtract>
<DeepExtract>
kwargs = {'n_singlets': 3, 'n_triplets': 3, 'n_guess_singles': 6, 'max_subspace': 24}
dump_all('h2o_def2tzvp', kwargs, spec='gen')
dump_all('h2o_def2tzvp', kwargs, spec='cvs')
</DeepExtract>
<DeepExtract>
dump_all('cn_sto3g', {'n_states': 8, 'n_guess_singles': 10}, spec='gen')
dump_all('cn_sto3g', {'n_states': 6, 'n_guess_singles': 7}, spec='cvs')
case = 'cn_sto3g'
dump_method(case, 'adc2', {'n_states': 4, 'n_guess_singles': 12, 'max_subspace': 30}, spec='fc')
dump_method(case, 'adc2', {'n_states': 4, 'n_guess_singles': 14, 'max_subspace': 30}, spec='fc-fv')
dump_method(case, 'adc2x', {'n_states': 4, 'n_guess_singles': 8}, spec='fv')
dump_method(case, 'adc2x', {'n_states': 4}, spec='fv-cvs')
</DeepExtract>
<DeepExtract>
kwargs = {'n_states': 5, 'n_guess_singles': 7}
overwrite = {'adc1': {'n_states': 4, 'n_guess_singles': 8}}
dump_all('cn_ccpvdz', kwargs, overwrite, spec='gen')
dump_all('cn_ccpvdz', kwargs, spec='cvs')
</DeepExtract>
<DeepExtract>
dump_all('hf3_631g', {'n_spin_flip': 9}, spec='gen')
</DeepExtract>
<DeepExtract>
case = 'h2s_sto3g'
kwargs = {'n_singlets': 3, 'n_triplets': 3}
dump_method(case, 'adc2', kwargs, spec='fc-cvs')
dump_method(case, 'adc2x', kwargs, spec='fc-fv-cvs')
</DeepExtract>
<DeepExtract>
case = 'h2s_6311g'
kwargs = {'n_singlets': 3, 'n_triplets': 3}
for spec in ['gen', 'fc', 'fv', 'fc-fv']:
dump_method(case, 'adc2', kwargs, spec=spec)
kwargs = {'n_singlets': 3, 'n_triplets': 3, 'n_guess_singles': 6, 'max_subspace': 60}
for spec in ['fv-cvs', 'fc-cvs', 'fc-fv-cvs']:
dump_method(case, 'adc2x', kwargs, spec=spec)
kwargs['n_guess_singles'] = 8
dump_method(case, 'adc2x', kwargs, spec='cvs')
</DeepExtract>
<DeepExtract>
kwargs = {'n_singlets': 2}
dump_all('methox_sto3g', kwargs, spec='gen', generator='adcc')
dump_all('methox_sto3g', kwargs, spec='cvs', generator='adcc')
</DeepExtract>
|
def main():
kwargs = {'n_singlets': 10, 'n_triplets': 10}
overwrite = {'adc2': {'n_singlets': 9, 'n_triplets': 10}}
dump_all('h2o_sto3g', kwargs, overwrite, spec='gen')
kwargs = {'n_singlets': 3, 'n_triplets': 3}
overwrite = {'adc0': {'n_singlets': 2, 'n_triplets': 2}, 'adc1': {'n_singlets': 2, 'n_triplets': 2}}
dump_all('h2o_sto3g', kwargs, overwrite, spec='cvs')
case = 'h2o_sto3g'
kwargs = {'n_singlets': 3, 'n_triplets': 3}
dump_method(case, 'adc2', kwargs, spec='fc')
dump_method(case, 'adc2', kwargs, spec='fc-fv')
dump_method(case, 'adc2x', kwargs, spec='fv')
dump_method(case, 'adc2x', kwargs, spec='fv-cvs')
kwargs = {'n_singlets': 3, 'n_triplets': 3, 'n_guess_singles': 6, 'max_subspace': 24}
dump_all('h2o_def2tzvp', kwargs, spec='gen')
dump_all('h2o_def2tzvp', kwargs, spec='cvs')
dump_all('cn_sto3g', {'n_states': 8, 'n_guess_singles': 10}, spec='gen')
dump_all('cn_sto3g', {'n_states': 6, 'n_guess_singles': 7}, spec='cvs')
case = 'cn_sto3g'
dump_method(case, 'adc2', {'n_states': 4, 'n_guess_singles': 12, 'max_subspace': 30}, spec='fc')
dump_method(case, 'adc2', {'n_states': 4, 'n_guess_singles': 14, 'max_subspace': 30}, spec='fc-fv')
dump_method(case, 'adc2x', {'n_states': 4, 'n_guess_singles': 8}, spec='fv')
dump_method(case, 'adc2x', {'n_states': 4}, spec='fv-cvs')
kwargs = {'n_states': 5, 'n_guess_singles': 7}
overwrite = {'adc1': {'n_states': 4, 'n_guess_singles': 8}}
dump_all('cn_ccpvdz', kwargs, overwrite, spec='gen')
dump_all('cn_ccpvdz', kwargs, spec='cvs')
dump_all('hf3_631g', {'n_spin_flip': 9}, spec='gen')
case = 'h2s_sto3g'
kwargs = {'n_singlets': 3, 'n_triplets': 3}
dump_method(case, 'adc2', kwargs, spec='fc-cvs')
dump_method(case, 'adc2x', kwargs, spec='fc-fv-cvs')
case = 'h2s_6311g'
kwargs = {'n_singlets': 3, 'n_triplets': 3}
for spec in ['gen', 'fc', 'fv', 'fc-fv']:
dump_method(case, 'adc2', kwargs, spec=spec)
kwargs = {'n_singlets': 3, 'n_triplets': 3, 'n_guess_singles': 6, 'max_subspace': 60}
for spec in ['fv-cvs', 'fc-cvs', 'fc-fv-cvs']:
dump_method(case, 'adc2x', kwargs, spec=spec)
kwargs['n_guess_singles'] = 8
dump_method(case, 'adc2x', kwargs, spec='cvs')
kwargs = {'n_singlets': 2}
dump_all('methox_sto3g', kwargs, spec='gen', generator='adcc')
dump_all('methox_sto3g', kwargs, spec='cvs', generator='adcc')
</DeepExtract>
|
adcc
|
positive
|
def cutmix_data(input, target, gpu, cutmix_prob=0.5, alpha=1.0):
if random.uniform(0, 1) < cutmix_prob:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
rand_index = torch.randperm(input.size()[0]).cuda(gpu)
target_a = target
target_b = target[rand_index]
<DeepExtract>
W = input.size()[2]
H = input.size()[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
(bbx1, bby1, bbx2, bby2) = (bbx1, bby1, bbx2, bby2)
</DeepExtract>
input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]
lam = 1 - (bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2])
return (input, target_a, target_b, lam)
|
def cutmix_data(input, target, gpu, cutmix_prob=0.5, alpha=1.0):
if random.uniform(0, 1) < cutmix_prob:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
rand_index = torch.randperm(input.size()[0]).cuda(gpu)
target_a = target
target_b = target[rand_index]
W = input.size()[2]
H = input.size()[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
(bbx1, bby1, bbx2, bby2) = (bbx1, bby1, bbx2, bby2)
input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]
lam = 1 - (bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2])
return (input, target_a, target_b, lam)
|
cavaface.pytorch
|
positive
|
def create_inline_comment(webdriver, datasets):
page = random.choice(datasets[PAGES])
page_id = page[0]
datasets['create_comment_page'] = page
page = Page(webdriver, page_id=page_id)
@print_timing('selenium_create_comment')
def measure():
page.go_to()
page.wait_for_page_loaded()
edit_comment = Editor(webdriver)
@print_timing('selenium_create_comment:write_comment')
def sub_measure():
page.click_add_comment()
edit_comment.write_content(text='This is selenium comment')
<DeepExtract>
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:open_login_page')
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
</DeepExtract>
@print_timing('selenium_create_comment:save_comment')
def sub_measure():
edit_comment.click_submit()
page.wait_for_comment_field()
<DeepExtract>
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:open_login_page')
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
</DeepExtract>
<DeepExtract>
def sub_measure():
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:open_login_page')
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
sub_measure()
login_page.set_credentials(username=datasets['username'], password=datasets['password'])
def sub_measure():
login_page.click_login_button()
if login_page.is_first_login():
login_page.first_user_setup()
all_updates_page = AllUpdates(webdriver)
all_updates_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:login_and_view_dashboard')
if CONFLUENCE_SETTINGS.extended_metrics:
measure_browser_navi_metrics(webdriver, datasets, expected_metrics=browser_metrics['selenium_login'])
sub_measure()
</DeepExtract>
|
def create_inline_comment(webdriver, datasets):
page = random.choice(datasets[PAGES])
page_id = page[0]
datasets['create_comment_page'] = page
page = Page(webdriver, page_id=page_id)
@print_timing('selenium_create_comment')
def measure():
page.go_to()
page.wait_for_page_loaded()
edit_comment = Editor(webdriver)
@print_timing('selenium_create_comment:write_comment')
def sub_measure():
page.click_add_comment()
edit_comment.write_content(text='This is selenium comment')
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:open_login_page')
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
@print_timing('selenium_create_comment:save_comment')
def sub_measure():
edit_comment.click_submit()
page.wait_for_comment_field()
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:open_login_page')
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
def sub_measure():
login_page.go_to()
if login_page.is_logged_in():
login_page.delete_all_cookies()
login_page.go_to()
login_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:open_login_page')
webdriver.node_id = login_page.get_node_id()
print(f'node_id:{webdriver.node_id}')
sub_measure()
login_page.set_credentials(username=datasets['username'], password=datasets['password'])
def sub_measure():
login_page.click_login_button()
if login_page.is_first_login():
login_page.first_user_setup()
all_updates_page = AllUpdates(webdriver)
all_updates_page.wait_for_page_loaded()
measure_dom_requests(webdriver, interaction='selenium_login:login_and_view_dashboard')
if CONFLUENCE_SETTINGS.extended_metrics:
measure_browser_navi_metrics(webdriver, datasets, expected_metrics=browser_metrics['selenium_login'])
sub_measure()
</DeepExtract>
|
dc-app-performance-toolkit
|
positive
|
def save(self, item):
<DeepExtract>
modifiers = []
if self.widget.controlButton.isChecked():
modifiers.append(iomediator.Key.CONTROL)
if self.widget.altButton.isChecked():
modifiers.append(iomediator.Key.ALT)
if self.widget.shiftButton.isChecked():
modifiers.append(iomediator.Key.SHIFT)
if self.widget.superButton.isChecked():
modifiers.append(iomediator.Key.SUPER)
if self.widget.hyperButton.isChecked():
modifiers.append(iomediator.Key.HYPER)
if self.widget.metaButton.isChecked():
modifiers.append(iomediator.Key.META)
modifiers.sort()
modifiers = modifiers
</DeepExtract>
keyText = self.key
if keyText in self.REVERSE_KEY_MAP:
key = self.REVERSE_KEY_MAP[keyText]
else:
key = keyText
assert key != None, 'Attempt to set hotkey with no key'
item.set_hotkey(modifiers, key)
|
def save(self, item):
modifiers = []
if self.widget.controlButton.isChecked():
modifiers.append(iomediator.Key.CONTROL)
if self.widget.altButton.isChecked():
modifiers.append(iomediator.Key.ALT)
if self.widget.shiftButton.isChecked():
modifiers.append(iomediator.Key.SHIFT)
if self.widget.superButton.isChecked():
modifiers.append(iomediator.Key.SUPER)
if self.widget.hyperButton.isChecked():
modifiers.append(iomediator.Key.HYPER)
if self.widget.metaButton.isChecked():
modifiers.append(iomediator.Key.META)
modifiers.sort()
modifiers = modifiers
keyText = self.key
if keyText in self.REVERSE_KEY_MAP:
key = self.REVERSE_KEY_MAP[keyText]
else:
key = keyText
assert key != None, 'Attempt to set hotkey with no key'
item.set_hotkey(modifiers, key)
|
autokey-python2
|
positive
|
def sample(self, model_output: torch.Tensor) -> torch.Tensor:
<DeepExtract>
pass
</DeepExtract>
return torch.bernoulli(model_p)
|
def sample(self, model_output: torch.Tensor) -> torch.Tensor:
pass
return torch.bernoulli(model_p)
|
darts
|
positive
|
def wavenumber_data_array(results: Sequence[LinearPotentialFlowResult]) -> xr.DataArray:
"""Read the wavenumbers in a list of :class:`LinearPotentialFlowResult`
and store them into a :class:`xarray.DataArray`.
"""
records = pd.DataFrame([dict(g=result.g, water_depth=result.depth, omega=result.omega, wavenumber=result.wavenumber) for result in results])
<DeepExtract>
for variable_name in ['wavenumber']:
records = records[records[variable_name].notnull()].dropna(axis='columns')
records = records.drop_duplicates()
records = records.set_index(['g', 'water_depth'] + ['omega'])
da = records.to_xarray()[['wavenumber']]
da = _squeeze_dimensions(da, dimensions=['g', 'water_depth'])
ds = da
</DeepExtract>
return ds['wavenumber']
|
def wavenumber_data_array(results: Sequence[LinearPotentialFlowResult]) -> xr.DataArray:
"""Read the wavenumbers in a list of :class:`LinearPotentialFlowResult`
and store them into a :class:`xarray.DataArray`.
"""
records = pd.DataFrame([dict(g=result.g, water_depth=result.depth, omega=result.omega, wavenumber=result.wavenumber) for result in results])
for variable_name in ['wavenumber']:
records = records[records[variable_name].notnull()].dropna(axis='columns')
records = records.drop_duplicates()
records = records.set_index(['g', 'water_depth'] + ['omega'])
da = records.to_xarray()[['wavenumber']]
da = _squeeze_dimensions(da, dimensions=['g', 'water_depth'])
ds = da
return ds['wavenumber']
|
capytaine
|
positive
|
def __init__(self, parent: wx.Window, canvas: 'EditCanvas', world: 'BaseLevel', options_path: str, operation: FixedOperationType, options: Dict[str, Any]):
wx.Panel.__init__(self, parent)
DefaultOperationUI.__init__(self, parent, canvas, world, options_path)
self._operation = operation
self.Hide()
self._sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sizer)
self._options_sizer = wx.BoxSizer(wx.VERTICAL)
self._sizer.Add(self._options_sizer)
self._run_button = wx.Button(self, label='Run Operation')
self._run_button.Bind(wx.EVT_BUTTON, self._run_operation)
self._sizer.Add(self._run_button, 0, wx.ALL | wx.ALIGN_CENTRE_HORIZONTAL, 5)
self._options: Dict[str, wx.Window] = {}
<DeepExtract>
create_functions: Dict[str, Callable[[str, Sequence], None]] = {'label': self._create_label, 'bool': self._create_bool, 'int': self._create_int, 'float': self._create_float, 'str': self._create_string, 'str_choice': self._create_str_choice, 'file_open': self._create_file_open_picker, 'file_save': self._create_file_save_picker, 'directory': self._create_directory_picker, 'button': self._create_button}
for (option_name, args) in options.items():
try:
(option_type, *args) = args
if option_type not in create_functions:
raise ValueError(f'Invalid option type {option_type}')
create_functions[option_type](option_name, *args)
except Exception as e:
log.exception(e)
</DeepExtract>
self.Layout()
self.Show()
|
def __init__(self, parent: wx.Window, canvas: 'EditCanvas', world: 'BaseLevel', options_path: str, operation: FixedOperationType, options: Dict[str, Any]):
wx.Panel.__init__(self, parent)
DefaultOperationUI.__init__(self, parent, canvas, world, options_path)
self._operation = operation
self.Hide()
self._sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self._sizer)
self._options_sizer = wx.BoxSizer(wx.VERTICAL)
self._sizer.Add(self._options_sizer)
self._run_button = wx.Button(self, label='Run Operation')
self._run_button.Bind(wx.EVT_BUTTON, self._run_operation)
self._sizer.Add(self._run_button, 0, wx.ALL | wx.ALIGN_CENTRE_HORIZONTAL, 5)
self._options: Dict[str, wx.Window] = {}
create_functions: Dict[str, Callable[[str, Sequence], None]] = {'label': self._create_label, 'bool': self._create_bool, 'int': self._create_int, 'float': self._create_float, 'str': self._create_string, 'str_choice': self._create_str_choice, 'file_open': self._create_file_open_picker, 'file_save': self._create_file_save_picker, 'directory': self._create_directory_picker, 'button': self._create_button}
for (option_name, args) in options.items():
try:
(option_type, *args) = args
if option_type not in create_functions:
raise ValueError(f'Invalid option type {option_type}')
create_functions[option_type](option_name, *args)
except Exception as e:
log.exception(e)
self.Layout()
self.Show()
|
Amulet-Map-Editor
|
positive
|
def compute_results(self, alignment, uniq_conf):
def _activity_grouper(rec):
return (rec.activity,)
<DeepExtract>
def _r(init, item):
(p, t, fa, m) = init
(factorization, recs) = item
f = {}
(det_points, tfa_det_points, fa_data, measures) = self.compute_det_points_and_measures(recs, lambda x: self.total_file_duration_minutes(recs), uniq_conf, self.scoring_parameters['activity.p_miss_at_rfa_targets'], self.scoring_parameters['activity.n_mide_at_rfa_targets'], self.scoring_parameters['activity.fa_at_rfa_targets'], self.scoring_parameters['wpmiss.denominator'], self.scoring_parameters['wpmiss.numerator'])
p['-'.join(factorization)] = det_points
f['-'.join(factorization)] = fa_data
t['-'.join(factorization)] = tfa_det_points
for k in f:
for i in f[k]:
ii = i[0] if 'e' in str(i[0]) else round(i[0], 5)
fa.append((k, ii, 'p_miss', i[2]))
fa.append((k, ii, 'rfa', i[1]))
fa.append((k, ii, 'tfa', i[3]))
fa.append((k, ii, 'tfa_denom', i[4]))
fa.append((k, ii, 'tfa_numer', i[5]))
for (_m, v) in measures.items():
m.append(factorization + (_m, v))
(out_det_points, out_tfa_det_points, out_fa_data, det_measures) = (p, t, fa, m)
grouped = merge_dicts({k: [] for k in self.default_activity_groups}, group_by_func(_activity_grouper, alignment))
_r_srlz = dill.dumps(_r)
args = []
for key in grouped:
args.append((_r_srlz, (key, grouped[key]), ({}, {}, [], [])))
with ProcessPoolExecutor(self.pn) as pool:
res = pool.map(unserialize_fct_res, args)
(p, t, fa, m) = ({}, {}, [], [])
for entry in res:
p.update(entry[0])
t.update(entry[1])
fa.extend(entry[2])
m.extend(entry[3])
(out_det_points, out_tfa_det_points, out_fa_data, det_measures) = (p, t, fa, m)
</DeepExtract>
def _empty_grouper(rec):
return tuple()
<DeepExtract>
_core_nmide = self.build_nmide_measure()
def _nmide(ars):
c = filter(lambda rec: rec.alignment == 'CD', ars)
ar_nmide_measure = _core_nmide(c, None, None)
ar_nmide_measure = _nmide
</DeepExtract>
activity_nmides = self.compute_aggregate_measures(alignment, _activity_grouper, [ar_nmide_measure], self.default_activity_groups)
activity_results = activity_nmides + det_measures
overall_nmide = self.compute_aggregate_measures(alignment, _empty_grouper, [ar_nmide_measure])
<DeepExtract>
raw_means = self.compute_means(activity_results, selected_measures)
def _r(init, r):
(f, m, v) = r
if m == 'mean':
init.append(('mean-{}'.format(f), v))
activity_means = init
activity_means = reduce(_r, raw_means, [])
</DeepExtract>
def _pair_arg_map(rec):
return (rec.ref, rec.sys)
pair_measures = [self.build_simple_measure(_pair_arg_map, 'temporal_intersection', temporal_intersection), self.build_simple_measure(_pair_arg_map, 'temporal_union', temporal_union), self.build_simple_measure(_pair_arg_map, 'temporal_fa', temporal_fa), self.build_simple_measure(_pair_arg_map, 'temporal_miss', temporal_miss), self.build_simple_measure(lambda x: (x.kernel_components.get('temporal_intersection-over-union'),), 'temporal_intersection-over-union', identity)]
(c, m, f) = partition_alignment(alignment)
def _pair_properties_map(rec):
return (rec.activity, rec.ref.activityID, rec.sys.activityID)
pair_results = self.compute_atomic_measures(c, _pair_properties_map, pair_measures)
def _align_rec_mapper(rec):
return (rec.activity,) + tuple(rec.iter_with_extended_properties(['temporal_intersection-over-union', 'presenceconf_congruence']))
output_alignment_records = map(_align_rec_mapper, alignment)
return {'pair_metrics': pair_results, 'scores_by_activity': activity_results, 'scores_aggregated': activity_means + overall_nmide, 'det_point_records': out_det_points, 'tfa_det_point_records': out_tfa_det_points, 'output_alignment_records': output_alignment_records, 'scores_by_activity_and_threshold': out_fa_data}
|
def compute_results(self, alignment, uniq_conf):
def _activity_grouper(rec):
return (rec.activity,)
def _r(init, item):
(p, t, fa, m) = init
(factorization, recs) = item
f = {}
(det_points, tfa_det_points, fa_data, measures) = self.compute_det_points_and_measures(recs, lambda x: self.total_file_duration_minutes(recs), uniq_conf, self.scoring_parameters['activity.p_miss_at_rfa_targets'], self.scoring_parameters['activity.n_mide_at_rfa_targets'], self.scoring_parameters['activity.fa_at_rfa_targets'], self.scoring_parameters['wpmiss.denominator'], self.scoring_parameters['wpmiss.numerator'])
p['-'.join(factorization)] = det_points
f['-'.join(factorization)] = fa_data
t['-'.join(factorization)] = tfa_det_points
for k in f:
for i in f[k]:
ii = i[0] if 'e' in str(i[0]) else round(i[0], 5)
fa.append((k, ii, 'p_miss', i[2]))
fa.append((k, ii, 'rfa', i[1]))
fa.append((k, ii, 'tfa', i[3]))
fa.append((k, ii, 'tfa_denom', i[4]))
fa.append((k, ii, 'tfa_numer', i[5]))
for (_m, v) in measures.items():
m.append(factorization + (_m, v))
(out_det_points, out_tfa_det_points, out_fa_data, det_measures) = (p, t, fa, m)
grouped = merge_dicts({k: [] for k in self.default_activity_groups}, group_by_func(_activity_grouper, alignment))
_r_srlz = dill.dumps(_r)
args = []
for key in grouped:
args.append((_r_srlz, (key, grouped[key]), ({}, {}, [], [])))
with ProcessPoolExecutor(self.pn) as pool:
res = pool.map(unserialize_fct_res, args)
(p, t, fa, m) = ({}, {}, [], [])
for entry in res:
p.update(entry[0])
t.update(entry[1])
fa.extend(entry[2])
m.extend(entry[3])
(out_det_points, out_tfa_det_points, out_fa_data, det_measures) = (p, t, fa, m)
def _empty_grouper(rec):
return tuple()
_core_nmide = self.build_nmide_measure()
def _nmide(ars):
c = filter(lambda rec: rec.alignment == 'CD', ars)
ar_nmide_measure = _core_nmide(c, None, None)
ar_nmide_measure = _nmide
activity_nmides = self.compute_aggregate_measures(alignment, _activity_grouper, [ar_nmide_measure], self.default_activity_groups)
activity_results = activity_nmides + det_measures
overall_nmide = self.compute_aggregate_measures(alignment, _empty_grouper, [ar_nmide_measure])
raw_means = self.compute_means(activity_results, selected_measures)
def _r(init, r):
(f, m, v) = r
if m == 'mean':
init.append(('mean-{}'.format(f), v))
activity_means = init
activity_means = reduce(_r, raw_means, [])
def _pair_arg_map(rec):
return (rec.ref, rec.sys)
pair_measures = [self.build_simple_measure(_pair_arg_map, 'temporal_intersection', temporal_intersection), self.build_simple_measure(_pair_arg_map, 'temporal_union', temporal_union), self.build_simple_measure(_pair_arg_map, 'temporal_fa', temporal_fa), self.build_simple_measure(_pair_arg_map, 'temporal_miss', temporal_miss), self.build_simple_measure(lambda x: (x.kernel_components.get('temporal_intersection-over-union'),), 'temporal_intersection-over-union', identity)]
(c, m, f) = partition_alignment(alignment)
def _pair_properties_map(rec):
return (rec.activity, rec.ref.activityID, rec.sys.activityID)
pair_results = self.compute_atomic_measures(c, _pair_properties_map, pair_measures)
def _align_rec_mapper(rec):
return (rec.activity,) + tuple(rec.iter_with_extended_properties(['temporal_intersection-over-union', 'presenceconf_congruence']))
output_alignment_records = map(_align_rec_mapper, alignment)
return {'pair_metrics': pair_results, 'scores_by_activity': activity_results, 'scores_aggregated': activity_means + overall_nmide, 'det_point_records': out_det_points, 'tfa_det_point_records': out_tfa_det_points, 'output_alignment_records': output_alignment_records, 'scores_by_activity_and_threshold': out_fa_data}
|
ActEV_Scorer
|
positive
|
def tablebyindex(filehandle, index):
"""fast extraction of the table using the index to identify the table
This function reads only one table from the HTML file. This is in contrast to `results.readhtml.titletable` that will read all the tables into memory and allows you to interactively look thru them. The function `results.readhtml.titletable` can be very slow on large HTML files.
This function is useful when you know which file you are looking for. It does not work with negative indices, like you can in a list. If you know a way to make negative indices work, do a pull request :-)
Parameters
----------
fhandle : file like object
A file handle to the E+ HTML table file
index: int
This is the index of the table you are looking for
Returns
-------
titleandtable : (str, list)
- (title, table)
- title = previous item with a <b> tag
- table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]
"""
with filehandle:
tableindex = 0
for i in range(index + 1):
<DeepExtract>
lines = filehandle
tablelines = []
for line in lines:
line = _decodeline(line)
tablelines.append(line)
if line.strip().startswith('</table'):
break
thetable = ''.join(tablelines)
</DeepExtract>
filehandle = StringIO(thetable)
htables = readhtml.titletable(filehandle)
try:
return htables[0]
except IndexError as e:
None
|
def tablebyindex(filehandle, index):
"""fast extraction of the table using the index to identify the table
This function reads only one table from the HTML file. This is in contrast to `results.readhtml.titletable` that will read all the tables into memory and allows you to interactively look thru them. The function `results.readhtml.titletable` can be very slow on large HTML files.
This function is useful when you know which file you are looking for. It does not work with negative indices, like you can in a list. If you know a way to make negative indices work, do a pull request :-)
Parameters
----------
fhandle : file like object
A file handle to the E+ HTML table file
index: int
This is the index of the table you are looking for
Returns
-------
titleandtable : (str, list)
- (title, table)
- title = previous item with a <b> tag
- table = rows -> [[cell1, cell2, ..], [cell1, cell2, ..], ..]
"""
with filehandle:
tableindex = 0
for i in range(index + 1):
lines = filehandle
tablelines = []
for line in lines:
line = _decodeline(line)
tablelines.append(line)
if line.strip().startswith('</table'):
break
thetable = ''.join(tablelines)
filehandle = StringIO(thetable)
htables = readhtml.titletable(filehandle)
try:
return htables[0]
except IndexError as e:
None
|
eppy
|
positive
|
def _get_all_entities(self, query):
"""
Fetch all entities from source ES
"""
response = {'hits': [], 'total': 0, 'aggregations': {}}
url = self.source + '/' + self.es_alias + '/_search'
url = url + '?scroll=2m'
query['size'] = 10000
r = requests.post(url, json=query, timeout=30)
results = json.loads(r.content)
if 'error' not in results:
<DeepExtract>
data_new = []
total_records = 0
if 'hits' in results:
total_records = results['hits']['total']
for post in results['hits']['hits']:
data_new.append(post)
if '_scroll_id' in results:
scroll_size = len(results['hits']['hits'])
while scroll_size > 0:
scroll_id = results['_scroll_id']
scroll_req = {'scroll': '2m', 'scroll_id': scroll_id}
r = requests.post(self.source + '/_search/scroll', json=scroll_req, timeout=30)
results = json.loads(r.content)
for post in results['hits']['hits']:
data_new.append(post)
scroll_size = len(results['hits']['hits'])
derived_data = {'hits': data_new, 'total': total_records}
</DeepExtract>
response['hits'] = derived_data['hits']
response['total'] = derived_data['total']
if 'aggregations' in results:
response['aggregations'] = results['aggregations']
return response
|
def _get_all_entities(self, query):
"""
Fetch all entities from source ES
"""
response = {'hits': [], 'total': 0, 'aggregations': {}}
url = self.source + '/' + self.es_alias + '/_search'
url = url + '?scroll=2m'
query['size'] = 10000
r = requests.post(url, json=query, timeout=30)
results = json.loads(r.content)
if 'error' not in results:
data_new = []
total_records = 0
if 'hits' in results:
total_records = results['hits']['total']
for post in results['hits']['hits']:
data_new.append(post)
if '_scroll_id' in results:
scroll_size = len(results['hits']['hits'])
while scroll_size > 0:
scroll_id = results['_scroll_id']
scroll_req = {'scroll': '2m', 'scroll_id': scroll_id}
r = requests.post(self.source + '/_search/scroll', json=scroll_req, timeout=30)
results = json.loads(r.content)
for post in results['hits']['hits']:
data_new.append(post)
scroll_size = len(results['hits']['hits'])
derived_data = {'hits': data_new, 'total': total_records}
response['hits'] = derived_data['hits']
response['total'] = derived_data['total']
if 'aggregations' in results:
response['aggregations'] = results['aggregations']
return response
|
chatbot_ner
|
positive
|
def _after(self, ret, *args, **kw):
<DeepExtract>
args = self._resolve_args(*args, **kw)
values = {k: args[k] for k in self._basic_fields}
for (key, expression) in self._complex_fields.items():
values[key] = eval(expression, args)
if ret != NOTSET:
args['ret'] = ret
for (key, expression) in self._after_complex_fields.items():
values[key] = eval(expression, args)
data = values
</DeepExtract>
<DeepExtract>
for listener in data._after_listeners:
listener(**data)
</DeepExtract>
|
def _after(self, ret, *args, **kw):
args = self._resolve_args(*args, **kw)
values = {k: args[k] for k in self._basic_fields}
for (key, expression) in self._complex_fields.items():
values[key] = eval(expression, args)
if ret != NOTSET:
args['ret'] = ret
for (key, expression) in self._after_complex_fields.items():
values[key] = eval(expression, args)
data = values
for listener in data._after_listeners:
listener(**data)
</DeepExtract>
|
decorated
|
positive
|
def iou_distance(atracks, btracks, frame_id=0, use_prediction=True):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if len(atracks) > 0 and isinstance(atracks[0], np.ndarray) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
if use_prediction:
atlbrs = [track.prediction_at_frame_tlbr(frame_id) for track in atracks]
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
<DeepExtract>
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
_ious = ious
ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float), np.ascontiguousarray(btlbrs, dtype=np.float))
_ious = ious
</DeepExtract>
cost_matrix = 1 - _ious
return cost_matrix
|
def iou_distance(atracks, btracks, frame_id=0, use_prediction=True):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if len(atracks) > 0 and isinstance(atracks[0], np.ndarray) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
if use_prediction:
atlbrs = [track.prediction_at_frame_tlbr(frame_id) for track in atracks]
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
_ious = ious
ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float), np.ascontiguousarray(btlbrs, dtype=np.float))
_ious = ious
cost_matrix = 1 - _ious
return cost_matrix
|
DEFT
|
positive
|
@classmethod
def create(cls, opt_func, lr, layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
<DeepExtract>
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None:
mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
(opt.model_params, opt.master_params) = (model_params, master_params)
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp:
param.requires_grad = True
(opt.model_params, opt.master_params) = (model_params, master_params)
</DeepExtract>
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
(mom, wd, beta) = (opt.mom, opt.wd, opt.beta)
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for (mp, lr) in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
(opt.mom, opt.wd, opt.beta) = (mom, wd, beta)
return opt
|
@classmethod
def create(cls, opt_func, lr, layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None:
mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
(opt.model_params, opt.master_params) = (model_params, master_params)
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp:
param.requires_grad = True
(opt.model_params, opt.master_params) = (model_params, master_params)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
(mom, wd, beta) = (opt.mom, opt.wd, opt.beta)
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for (mp, lr) in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
(opt.mom, opt.wd, opt.beta) = (mom, wd, beta)
return opt
|
ebms_3dod
|
positive
|
def getOrganizationPolicyObjectsGroups(apiKey, organizationId, query=None):
url = '/organizations/' + str(organizationId) + '/policyObjects/groups'
<DeepExtract>
if p_retry > API_MAX_RETRIES:
if FLAG_REQUEST_VERBOSE:
print('ERROR: Reached max retries')
(success, errors, headers, response) = (False, None, None, None)
bearerString = 'Bearer ' + str(apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not query is None:
qArrayFix = {}
for item in query:
if isinstance(query[item], list):
qArrayFix['%s[]' % item] = query[item]
else:
qArrayFix[item] = query[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + url + query
verb = 'get'.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if FLAG_REQUEST_VERBOSE:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, headers, response) = (False, None, None, None)
except:
(success, errors, headers, response) = (False, None, None, None)
if FLAG_REQUEST_VERBOSE:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if FLAG_REQUEST_VERBOSE:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, query, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1)
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if FLAG_REQUEST_VERBOSE:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if FLAG_REQUEST_VERBOSE:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
</DeepExtract>
return (success, errors, response)
|
def getOrganizationPolicyObjectsGroups(apiKey, organizationId, query=None):
url = '/organizations/' + str(organizationId) + '/policyObjects/groups'
if p_retry > API_MAX_RETRIES:
if FLAG_REQUEST_VERBOSE:
print('ERROR: Reached max retries')
(success, errors, headers, response) = (False, None, None, None)
bearerString = 'Bearer ' + str(apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not query is None:
qArrayFix = {}
for item in query:
if isinstance(query[item], list):
qArrayFix['%s[]' % item] = query[item]
else:
qArrayFix[item] = query[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + url + query
verb = 'get'.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if FLAG_REQUEST_VERBOSE:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, headers, response) = (False, None, None, None)
except:
(success, errors, headers, response) = (False, None, None, None)
if FLAG_REQUEST_VERBOSE:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if FLAG_REQUEST_VERBOSE:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, query, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1)
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if FLAG_REQUEST_VERBOSE:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if FLAG_REQUEST_VERBOSE:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
return (success, errors, response)
|
automation-scripts
|
positive
|
def setup_filling_properties(obj):
if obj.Fill == 'None':
return
if obj.Fill == 'Preset Window':
window_presets.add_preset_window_properties(obj)
<DeepExtract>
for property in obj.PropertiesList:
if obj.getGroupOfProperty(property) != 'Component - Filling - Options' or property == 'FillType':
continue
obj.removeProperty(property)
window_presets.add_preset_window_subproperties(obj)
</DeepExtract>
if obj.Fill == 'Preset Door':
pass
|
def setup_filling_properties(obj):
if obj.Fill == 'None':
return
if obj.Fill == 'Preset Window':
window_presets.add_preset_window_properties(obj)
for property in obj.PropertiesList:
if obj.getGroupOfProperty(property) != 'Component - Filling - Options' or property == 'FillType':
continue
obj.removeProperty(property)
window_presets.add_preset_window_subproperties(obj)
if obj.Fill == 'Preset Door':
pass
|
BIM_Workbench
|
positive
|
def test_policy_page_with_correct_html(self):
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/course/1/policy')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course', response.content)
self.assertIn(b'<h1>Policy and Grades</h1>', response.content)
|
def test_policy_page_with_correct_html(self):
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/course/1/policy')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course', response.content)
self.assertIn(b'<h1>Policy and Grades</h1>', response.content)
|
academicstoday-django
|
positive
|
def apply_segmentation(self, segmentation):
<DeepExtract>
(h, w) = self.output_size
ret = Image.fromarray(segmentation).transform(size=(w, h), method=Image.EXTENT, data=self.src_rect, resample=Image.NEAREST if Image.NEAREST else self.interp, fill=self.fill)
segmentation = np.asarray(ret)
</DeepExtract>
return segmentation
|
def apply_segmentation(self, segmentation):
(h, w) = self.output_size
ret = Image.fromarray(segmentation).transform(size=(w, h), method=Image.EXTENT, data=self.src_rect, resample=Image.NEAREST if Image.NEAREST else self.interp, fill=self.fill)
segmentation = np.asarray(ret)
return segmentation
|
DynamicRouting
|
positive
|
def __getitem__(self, key_item) -> 'DataTensor':
"""Get data in the tensor.
"""
print('')
print('key_item: {}'.format(key_item))
<DeepExtract>
if isinstance(key_item, list):
index_item = [self.get_index(0, el) for el in key_item]
elif isinstance(key_item, tuple):
index_item = tuple([self.get_index(el_i, el) for (el_i, el) in enumerate(key_item)])
else:
index_item = self.get_index(0, key_item)
</DeepExtract>
print('index_item: {}'.format(index_item))
tensor_data = self._tensor[index_item]
print('data: {}'.format(tensor_data.size()))
<DeepExtract>
if isinstance(key_item, list):
tensor_keys = [self.get_key(0, el) for el in key_item]
elif isinstance(key_item, tuple):
tensor_keys = tuple([self.get_key(el_i, el) for (el_i, el) in enumerate(key_item)])
else:
tensor_keys = self.get_key(0, key_item)
</DeepExtract>
print('keys: {}'.format(tensor_keys))
<DeepExtract>
output_keys = list()
print('_remove_keys, item: {}'.format(index_item))
print('_dataindex keys: {}'.format(self.keys))
item_values = index_item if isinstance(index_item, tuple) else tuple((index_item,))
item_types = [type(el) for el in item_values]
n_dim = len(index_item) if isinstance(index_item, tuple) else 1
print('n_dim: {}'.format(n_dim))
print('item_values: {}'.format(item_values))
print('item_types: {}'.format(item_types))
for (el_i, data_index) in enumerate(self.keys):
if el_i >= len(item_types) or item_types[el_i] is list or item_types[el_i] is slice:
if isinstance(data_index, DataIndexer):
output_keys.append(data_index.keys)
else:
output_keys.append(None)
tensor_keys = output_keys
</DeepExtract>
print('tensor_keys: {}'.format(tensor_keys))
return DataTensor(tensor_data, tensor_keys)
|
def __getitem__(self, key_item) -> 'DataTensor':
"""Get data in the tensor.
"""
print('')
print('key_item: {}'.format(key_item))
if isinstance(key_item, list):
index_item = [self.get_index(0, el) for el in key_item]
elif isinstance(key_item, tuple):
index_item = tuple([self.get_index(el_i, el) for (el_i, el) in enumerate(key_item)])
else:
index_item = self.get_index(0, key_item)
print('index_item: {}'.format(index_item))
tensor_data = self._tensor[index_item]
print('data: {}'.format(tensor_data.size()))
if isinstance(key_item, list):
tensor_keys = [self.get_key(0, el) for el in key_item]
elif isinstance(key_item, tuple):
tensor_keys = tuple([self.get_key(el_i, el) for (el_i, el) in enumerate(key_item)])
else:
tensor_keys = self.get_key(0, key_item)
print('keys: {}'.format(tensor_keys))
output_keys = list()
print('_remove_keys, item: {}'.format(index_item))
print('_dataindex keys: {}'.format(self.keys))
item_values = index_item if isinstance(index_item, tuple) else tuple((index_item,))
item_types = [type(el) for el in item_values]
n_dim = len(index_item) if isinstance(index_item, tuple) else 1
print('n_dim: {}'.format(n_dim))
print('item_values: {}'.format(item_values))
print('item_types: {}'.format(item_types))
for (el_i, data_index) in enumerate(self.keys):
if el_i >= len(item_types) or item_types[el_i] is list or item_types[el_i] is slice:
if isinstance(data_index, DataIndexer):
output_keys.append(data_index.keys)
else:
output_keys.append(None)
tensor_keys = output_keys
print('tensor_keys: {}'.format(tensor_keys))
return DataTensor(tensor_data, tensor_keys)
|
EchoTorch
|
positive
|
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
step_results = super(FurnLiftNApartStateLoggingEpisode, self).multi_step(actions_as_ints=actions_as_ints)
<DeepExtract>
visibility = [self._is_goal_object_visible(agentId) for agentId in range(self.environment.num_agents)]
</DeepExtract>
for i in range(self.environment.num_agents):
if visibility[i]:
self._first_view_of_target[i] = min(self._first_view_of_target[i], self.num_steps_taken_in_episode())
return step_results
|
def multi_step(self, actions_as_ints: Tuple[int, ...]) -> List[Dict[str, Any]]:
step_results = super(FurnLiftNApartStateLoggingEpisode, self).multi_step(actions_as_ints=actions_as_ints)
visibility = [self._is_goal_object_visible(agentId) for agentId in range(self.environment.num_agents)]
for i in range(self.environment.num_agents):
if visibility[i]:
self._first_view_of_target[i] = min(self._first_view_of_target[i], self.num_steps_taken_in_episode())
return step_results
|
cordial-sync
|
positive
|
def __call__(self, r):
<DeepExtract>
authstr = 'Basic ' + to_native_string(b64encode(('%s:%s' % (self.username, self.password)).encode('latin1')).strip())
r.headers['Authorization'] = authstr
</DeepExtract>
return r
|
def __call__(self, r):
authstr = 'Basic ' + to_native_string(b64encode(('%s:%s' % (self.username, self.password)).encode('latin1')).strip())
r.headers['Authorization'] = authstr
return r
|
Crunchyroll-XML-Decoder
|
positive
|
def main(argd):
""" Main entry point, expects docopt arg dict as argd. """
global git_branch, profiler
debugprinter.enable(argd['--debug'])
if argd['--profile']:
profiler = Profile(subcalls=True)
<DeepExtract>
cmd = ['git', 'status', '--porcelain=v2', '--branch']
out = subprocess.check_output(cmd).decode()
for line in out.splitlines():
if not line.startswith('# branch.head'):
continue
branch = line.split(' ')[-1]
git_branch = branch
raise ValueError('\n'.join(('Unable to determine branch from `git status`.', f'Output was:\n{out}\n')))
</DeepExtract>
config['times'].setdefault(git_branch, {})
if argd['--list']:
return list_benchmarks()
return run_benchmarks(pattern=try_repat(argd['PATTERN'], default=None), repeat=max(1, parse_int(argd['--repeat'], default=DEFAULT_REPEAT)), number=max(100, parse_int(argd['--number'], default=DEFAULT_NUMBER)), save=argd['--save'])
|
def main(argd):
""" Main entry point, expects docopt arg dict as argd. """
global git_branch, profiler
debugprinter.enable(argd['--debug'])
if argd['--profile']:
profiler = Profile(subcalls=True)
cmd = ['git', 'status', '--porcelain=v2', '--branch']
out = subprocess.check_output(cmd).decode()
for line in out.splitlines():
if not line.startswith('# branch.head'):
continue
branch = line.split(' ')[-1]
git_branch = branch
raise ValueError('\n'.join(('Unable to determine branch from `git status`.', f'Output was:\n{out}\n')))
config['times'].setdefault(git_branch, {})
if argd['--list']:
return list_benchmarks()
return run_benchmarks(pattern=try_repat(argd['PATTERN'], default=None), repeat=max(1, parse_int(argd['--repeat'], default=DEFAULT_REPEAT)), number=max(100, parse_int(argd['--number'], default=DEFAULT_NUMBER)), save=argd['--save'])
|
colr
|
positive
|
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), 'Invalid settings for work load. '
slices = _split_input_slice(self.batch_size, work_load_list)
im_array_list = []
levels_data_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
(im_array, levels_data) = get_fpn_maskrcnn_batch(iroidb)
im_array_list.append(im_array)
levels_data_list.append(levels_data)
<DeepExtract>
data_list = []
label_list = []
rois_num_on_levels = {'stride%s' % s: 0 for s in config.RCNN_FEAT_STRIDE}
for s in config.RCNN_FEAT_STRIDE:
max_rois_num = 0
for levels_data in levels_data_list:
for im_i in levels_data:
rois_num = levels_data[im_i]['rois_on_levels']['stride%s' % s].shape[0]
max_rois_num = max(rois_num, max_rois_num)
rois_num_on_levels['stride%s' % s] = max_rois_num
num_imgs = len(levels_data_list[0])
for s in config.RCNN_FEAT_STRIDE:
if rois_num_on_levels['stride%s' % s] == 0:
rois_num_on_levels['stride%s' % s] = num_imgs
continue
if rois_num_on_levels['stride%s' % s] % num_imgs != 0:
ex = num_imgs - rois_num_on_levels['stride%s' % s] % num_imgs
rois_num_on_levels['stride%s' % s] += ex
for (im_array, data_on_imgs) in zip(im_array_list, levels_data_list):
num_imgs = len(data_on_imgs)
for s in config.RCNN_FEAT_STRIDE:
bucket_size = rois_num_on_levels['stride%s' % s]
for im_i in range(num_imgs):
_rois = data_on_imgs['img_%s' % im_i]['rois_on_levels']['stride%s' % s]
_labels = data_on_imgs['img_%s' % im_i]['labels_on_levels']['stride%s' % s]
_bbox_targets = data_on_imgs['img_%s' % im_i]['bbox_targets_on_levels']['stride%s' % s]
_bbox_weights = data_on_imgs['img_%s' % im_i]['bbox_weights_on_levels']['stride%s' % s]
_mask_targets = data_on_imgs['img_%s' % im_i]['mask_targets_on_levels']['stride%s' % s]
_mask_weights = data_on_imgs['img_%s' % im_i]['mask_weights_on_levels']['stride%s' % s]
rois_num = _rois.shape[0]
if rois_num < bucket_size:
num_pad = bucket_size - rois_num
rois_pad = np.array([[12, 34, 56, 78]] * num_pad)
labels_pad = np.array([-1] * num_pad)
bbox_targets_pad = np.array([[1, 2, 3, 4] * config.NUM_CLASSES] * num_pad)
bbox_weights_pad = np.array([[0, 0, 0, 0] * config.NUM_CLASSES] * num_pad)
mask_targets_pad = np.zeros((num_pad, config.NUM_CLASSES, 28, 28), dtype=np.int8)
mask_weights_pad = np.zeros((num_pad, config.NUM_CLASSES, 1, 1), dtype=np.int8)
data_on_imgs['img_%s' % im_i]['rois_on_levels']['stride%s' % s] = np.concatenate([_rois, rois_pad])
data_on_imgs['img_%s' % im_i]['labels_on_levels']['stride%s' % s] = np.concatenate([_labels, labels_pad])
data_on_imgs['img_%s' % im_i]['bbox_targets_on_levels']['stride%s' % s] = np.concatenate([_bbox_targets, bbox_targets_pad])
data_on_imgs['img_%s' % im_i]['bbox_weights_on_levels']['stride%s' % s] = np.concatenate([_bbox_weights, bbox_weights_pad])
data_on_imgs['img_%s' % im_i]['mask_targets_on_levels']['stride%s' % s] = np.concatenate([_mask_targets, mask_targets_pad])
data_on_imgs['img_%s' % im_i]['mask_weights_on_levels']['stride%s' % s] = np.concatenate([_mask_weights, mask_weights_pad])
rois_on_imgs = dict()
labels_on_imgs = dict()
bbox_targets_on_imgs = dict()
bbox_weights_on_imgs = dict()
mask_targets_on_imgs = dict()
mask_weights_on_imgs = dict()
for s in config.RCNN_FEAT_STRIDE:
rois_on_imgs.update({'stride%s' % s: list()})
labels_on_imgs.update({'stride%s' % s: list()})
bbox_targets_on_imgs.update({'stride%s' % s: list()})
bbox_weights_on_imgs.update({'stride%s' % s: list()})
mask_targets_on_imgs.update({'stride%s' % s: list()})
mask_weights_on_imgs.update({'stride%s' % s: list()})
for im_i in range(num_imgs):
for s in config.RCNN_FEAT_STRIDE:
im_rois_on_levels = data_on_imgs['img_%s' % im_i]['rois_on_levels']
labels_on_levels = data_on_imgs['img_%s' % im_i]['labels_on_levels']
bbox_targets_on_levels = data_on_imgs['img_%s' % im_i]['bbox_targets_on_levels']
bbox_weights_on_levels = data_on_imgs['img_%s' % im_i]['bbox_weights_on_levels']
mask_targets_on_levels = data_on_imgs['img_%s' % im_i]['mask_targets_on_levels']
mask_weights_on_levels = data_on_imgs['img_%s' % im_i]['mask_weights_on_levels']
_rois = im_rois_on_levels['stride%s' % s]
batch_index = im_i * np.ones((_rois.shape[0], 1))
rois_on_imgs['stride%s' % s].append(np.hstack((batch_index, _rois)))
labels_on_imgs['stride%s' % s].append(labels_on_levels['stride%s' % s])
bbox_targets_on_imgs['stride%s' % s].append(bbox_targets_on_levels['stride%s' % s])
bbox_weights_on_imgs['stride%s' % s].append(bbox_weights_on_levels['stride%s' % s])
mask_targets_on_imgs['stride%s' % s].append(mask_targets_on_levels['stride%s' % s])
mask_weights_on_imgs['stride%s' % s].append(mask_weights_on_levels['stride%s' % s])
label = dict()
for s in config.RCNN_FEAT_STRIDE:
label.update({'label_stride%s' % s: np.reshape(np.concatenate(labels_on_imgs['stride%s' % s], axis=0), [num_imgs, -1])})
label.update({'bbox_target_stride%s' % s: np.reshape(np.concatenate(bbox_targets_on_imgs['stride%s' % s], axis=0), [num_imgs, -1])})
label.update({'bbox_weight_stride%s' % s: np.reshape(np.concatenate(bbox_weights_on_imgs['stride%s' % s], axis=0), [num_imgs, -1])})
label.update({'mask_target_stride%s' % s: np.reshape(np.concatenate(mask_targets_on_imgs['stride%s' % s], axis=0), [num_imgs, -1, config.NUM_CLASSES, 28, 28])})
label.update({'mask_weight_stride%s' % s: np.reshape(np.concatenate(mask_weights_on_imgs['stride%s' % s], axis=0), [num_imgs, -1, config.NUM_CLASSES, 1, 1])})
data = dict()
data.update({'data': im_array})
for s in config.RCNN_FEAT_STRIDE:
rois_array = np.array(rois_on_imgs['stride%s' % s])
data.update({'rois_stride%s' % s: rois_array})
data_list.append(data)
label_list.append(label)
all_data = dict()
for key in data_list[0].keys():
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in label_list[0].keys():
all_label[key] = tensor_vstack([batch[key] for batch in label_list])
(all_data, all_label) = (all_data, all_label)
</DeepExtract>
self.data = [mx.nd.array(all_data[name]) for name in self.data_name]
self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
|
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), 'Invalid settings for work load. '
slices = _split_input_slice(self.batch_size, work_load_list)
im_array_list = []
levels_data_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
(im_array, levels_data) = get_fpn_maskrcnn_batch(iroidb)
im_array_list.append(im_array)
levels_data_list.append(levels_data)
data_list = []
label_list = []
rois_num_on_levels = {'stride%s' % s: 0 for s in config.RCNN_FEAT_STRIDE}
for s in config.RCNN_FEAT_STRIDE:
max_rois_num = 0
for levels_data in levels_data_list:
for im_i in levels_data:
rois_num = levels_data[im_i]['rois_on_levels']['stride%s' % s].shape[0]
max_rois_num = max(rois_num, max_rois_num)
rois_num_on_levels['stride%s' % s] = max_rois_num
num_imgs = len(levels_data_list[0])
for s in config.RCNN_FEAT_STRIDE:
if rois_num_on_levels['stride%s' % s] == 0:
rois_num_on_levels['stride%s' % s] = num_imgs
continue
if rois_num_on_levels['stride%s' % s] % num_imgs != 0:
ex = num_imgs - rois_num_on_levels['stride%s' % s] % num_imgs
rois_num_on_levels['stride%s' % s] += ex
for (im_array, data_on_imgs) in zip(im_array_list, levels_data_list):
num_imgs = len(data_on_imgs)
for s in config.RCNN_FEAT_STRIDE:
bucket_size = rois_num_on_levels['stride%s' % s]
for im_i in range(num_imgs):
_rois = data_on_imgs['img_%s' % im_i]['rois_on_levels']['stride%s' % s]
_labels = data_on_imgs['img_%s' % im_i]['labels_on_levels']['stride%s' % s]
_bbox_targets = data_on_imgs['img_%s' % im_i]['bbox_targets_on_levels']['stride%s' % s]
_bbox_weights = data_on_imgs['img_%s' % im_i]['bbox_weights_on_levels']['stride%s' % s]
_mask_targets = data_on_imgs['img_%s' % im_i]['mask_targets_on_levels']['stride%s' % s]
_mask_weights = data_on_imgs['img_%s' % im_i]['mask_weights_on_levels']['stride%s' % s]
rois_num = _rois.shape[0]
if rois_num < bucket_size:
num_pad = bucket_size - rois_num
rois_pad = np.array([[12, 34, 56, 78]] * num_pad)
labels_pad = np.array([-1] * num_pad)
bbox_targets_pad = np.array([[1, 2, 3, 4] * config.NUM_CLASSES] * num_pad)
bbox_weights_pad = np.array([[0, 0, 0, 0] * config.NUM_CLASSES] * num_pad)
mask_targets_pad = np.zeros((num_pad, config.NUM_CLASSES, 28, 28), dtype=np.int8)
mask_weights_pad = np.zeros((num_pad, config.NUM_CLASSES, 1, 1), dtype=np.int8)
data_on_imgs['img_%s' % im_i]['rois_on_levels']['stride%s' % s] = np.concatenate([_rois, rois_pad])
data_on_imgs['img_%s' % im_i]['labels_on_levels']['stride%s' % s] = np.concatenate([_labels, labels_pad])
data_on_imgs['img_%s' % im_i]['bbox_targets_on_levels']['stride%s' % s] = np.concatenate([_bbox_targets, bbox_targets_pad])
data_on_imgs['img_%s' % im_i]['bbox_weights_on_levels']['stride%s' % s] = np.concatenate([_bbox_weights, bbox_weights_pad])
data_on_imgs['img_%s' % im_i]['mask_targets_on_levels']['stride%s' % s] = np.concatenate([_mask_targets, mask_targets_pad])
data_on_imgs['img_%s' % im_i]['mask_weights_on_levels']['stride%s' % s] = np.concatenate([_mask_weights, mask_weights_pad])
rois_on_imgs = dict()
labels_on_imgs = dict()
bbox_targets_on_imgs = dict()
bbox_weights_on_imgs = dict()
mask_targets_on_imgs = dict()
mask_weights_on_imgs = dict()
for s in config.RCNN_FEAT_STRIDE:
rois_on_imgs.update({'stride%s' % s: list()})
labels_on_imgs.update({'stride%s' % s: list()})
bbox_targets_on_imgs.update({'stride%s' % s: list()})
bbox_weights_on_imgs.update({'stride%s' % s: list()})
mask_targets_on_imgs.update({'stride%s' % s: list()})
mask_weights_on_imgs.update({'stride%s' % s: list()})
for im_i in range(num_imgs):
for s in config.RCNN_FEAT_STRIDE:
im_rois_on_levels = data_on_imgs['img_%s' % im_i]['rois_on_levels']
labels_on_levels = data_on_imgs['img_%s' % im_i]['labels_on_levels']
bbox_targets_on_levels = data_on_imgs['img_%s' % im_i]['bbox_targets_on_levels']
bbox_weights_on_levels = data_on_imgs['img_%s' % im_i]['bbox_weights_on_levels']
mask_targets_on_levels = data_on_imgs['img_%s' % im_i]['mask_targets_on_levels']
mask_weights_on_levels = data_on_imgs['img_%s' % im_i]['mask_weights_on_levels']
_rois = im_rois_on_levels['stride%s' % s]
batch_index = im_i * np.ones((_rois.shape[0], 1))
rois_on_imgs['stride%s' % s].append(np.hstack((batch_index, _rois)))
labels_on_imgs['stride%s' % s].append(labels_on_levels['stride%s' % s])
bbox_targets_on_imgs['stride%s' % s].append(bbox_targets_on_levels['stride%s' % s])
bbox_weights_on_imgs['stride%s' % s].append(bbox_weights_on_levels['stride%s' % s])
mask_targets_on_imgs['stride%s' % s].append(mask_targets_on_levels['stride%s' % s])
mask_weights_on_imgs['stride%s' % s].append(mask_weights_on_levels['stride%s' % s])
label = dict()
for s in config.RCNN_FEAT_STRIDE:
label.update({'label_stride%s' % s: np.reshape(np.concatenate(labels_on_imgs['stride%s' % s], axis=0), [num_imgs, -1])})
label.update({'bbox_target_stride%s' % s: np.reshape(np.concatenate(bbox_targets_on_imgs['stride%s' % s], axis=0), [num_imgs, -1])})
label.update({'bbox_weight_stride%s' % s: np.reshape(np.concatenate(bbox_weights_on_imgs['stride%s' % s], axis=0), [num_imgs, -1])})
label.update({'mask_target_stride%s' % s: np.reshape(np.concatenate(mask_targets_on_imgs['stride%s' % s], axis=0), [num_imgs, -1, config.NUM_CLASSES, 28, 28])})
label.update({'mask_weight_stride%s' % s: np.reshape(np.concatenate(mask_weights_on_imgs['stride%s' % s], axis=0), [num_imgs, -1, config.NUM_CLASSES, 1, 1])})
data = dict()
data.update({'data': im_array})
for s in config.RCNN_FEAT_STRIDE:
rois_array = np.array(rois_on_imgs['stride%s' % s])
data.update({'rois_stride%s' % s: rois_array})
data_list.append(data)
label_list.append(label)
all_data = dict()
for key in data_list[0].keys():
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in label_list[0].keys():
all_label[key] = tensor_vstack([batch[key] for batch in label_list])
(all_data, all_label) = (all_data, all_label)
self.data = [mx.nd.array(all_data[name]) for name in self.data_name]
self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
|
enhanced-ssh-mxnet
|
positive
|
def test_equals_Config_cli_params(self):
<DeepExtract>
config_dict = {'region': 'region a', 'tags': {'key_a': 'value a'}, 'stacks': {'stack_a': {'template-url': 'template_a', 'parameters': 'any parameters'}}}
config_a_I = Config(config_dict=config_dict, cli_params=['stack_a.cli_parameter_a=cli_value_a'])
</DeepExtract>
<DeepExtract>
config_dict = {'region': 'region a', 'tags': {'key_a': 'value a'}, 'stacks': {'stack_a': {'template-url': 'template_a', 'parameters': 'any parameters'}}}
config_b_cli_params = Config(config_dict=config_dict, cli_params=['stack_a.cli_parameter_a=cli_value_a'])
</DeepExtract>
config_b_cli_params.cli_params = {}
self.assertNotEqual(config_a_I, config_b_cli_params)
|
def test_equals_Config_cli_params(self):
config_dict = {'region': 'region a', 'tags': {'key_a': 'value a'}, 'stacks': {'stack_a': {'template-url': 'template_a', 'parameters': 'any parameters'}}}
config_a_I = Config(config_dict=config_dict, cli_params=['stack_a.cli_parameter_a=cli_value_a'])
config_dict = {'region': 'region a', 'tags': {'key_a': 'value a'}, 'stacks': {'stack_a': {'template-url': 'template_a', 'parameters': 'any parameters'}}}
config_b_cli_params = Config(config_dict=config_dict, cli_params=['stack_a.cli_parameter_a=cli_value_a'])
config_b_cli_params.cli_params = {}
self.assertNotEqual(config_a_I, config_b_cli_params)
|
cfn-sphere
|
positive
|
def _transform_work(self) -> TaskOutcome:
"""
Perform the transformation work.
Transformation work steps: read input, process, write output, display analysis.
"""
mode = ''
if self._simulate:
mode = 'simulated-'
if not self._config:
logger.warning('Config missing')
return TaskOutcome(mode + 'failure')
try:
idir = self._config['input-dir']
ipth = pathlib.Path(idir)
odir = self._config['output-dir']
opth = pathlib.Path(odir)
except KeyError as e:
logger.debug(f'key {e.args[0]} missing')
return TaskOutcome(mode + 'failure')
self._overwrite = self._config.getboolean('output-overwrite', True)
quiet = self._config.get('quiet', False)
self._verbose = not self._simulate and (not quiet)
timestamp = self._config.get('timestamp')
if timestamp is not None:
try:
TaniumTransformer.set_timestamp(timestamp)
except Exception:
logger.warning('config invalid "timestamp"')
return TaskOutcome(mode + 'failure')
modes = {'blocksize': self._config.getint('blocksize', 10000), 'cpus_max': self._config.getint('cpus-max', 1), 'cpus_min': self._config.getint('cpus-min', 1), 'aggregate': self._config.getboolean('aggregate', True), 'caching': self._config.getboolean('caching', True), 'checking': self._config.getboolean('checking', False)}
opth.mkdir(exist_ok=True, parents=True)
for ifile in sorted(ipth.iterdir()):
<DeepExtract>
if not self._simulate and self._verbose:
logger.info(f'input: {ifile}')
with open(ifile, 'r', encoding=const.FILE_ENCODING) as fp:
blob = fp.read()
blob = blob
</DeepExtract>
tanium_transformer = TaniumTransformer()
tanium_transformer.set_modes(modes)
results = tanium_transformer.transform(blob)
oname = ifile.stem + '.oscal' + '.json'
ofile = opth / oname
if not self._overwrite and pathlib.Path(ofile).exists():
logger.warning(f'output: {ofile} already exists')
return TaskOutcome(mode + 'failure')
<DeepExtract>
if not self._simulate:
if self._verbose:
logger.info(f'output: {ofile}')
results.oscal_write(pathlib.Path(ofile))
</DeepExtract>
<DeepExtract>
if not self._simulate and self._verbose:
analysis = tanium_transformer.analysis
for line in analysis:
logger.info(line)
</DeepExtract>
return TaskOutcome(mode + 'success')
|
def _transform_work(self) -> TaskOutcome:
"""
Perform the transformation work.
Transformation work steps: read input, process, write output, display analysis.
"""
mode = ''
if self._simulate:
mode = 'simulated-'
if not self._config:
logger.warning('Config missing')
return TaskOutcome(mode + 'failure')
try:
idir = self._config['input-dir']
ipth = pathlib.Path(idir)
odir = self._config['output-dir']
opth = pathlib.Path(odir)
except KeyError as e:
logger.debug(f'key {e.args[0]} missing')
return TaskOutcome(mode + 'failure')
self._overwrite = self._config.getboolean('output-overwrite', True)
quiet = self._config.get('quiet', False)
self._verbose = not self._simulate and (not quiet)
timestamp = self._config.get('timestamp')
if timestamp is not None:
try:
TaniumTransformer.set_timestamp(timestamp)
except Exception:
logger.warning('config invalid "timestamp"')
return TaskOutcome(mode + 'failure')
modes = {'blocksize': self._config.getint('blocksize', 10000), 'cpus_max': self._config.getint('cpus-max', 1), 'cpus_min': self._config.getint('cpus-min', 1), 'aggregate': self._config.getboolean('aggregate', True), 'caching': self._config.getboolean('caching', True), 'checking': self._config.getboolean('checking', False)}
opth.mkdir(exist_ok=True, parents=True)
for ifile in sorted(ipth.iterdir()):
if not self._simulate and self._verbose:
logger.info(f'input: {ifile}')
with open(ifile, 'r', encoding=const.FILE_ENCODING) as fp:
blob = fp.read()
blob = blob
tanium_transformer = TaniumTransformer()
tanium_transformer.set_modes(modes)
results = tanium_transformer.transform(blob)
oname = ifile.stem + '.oscal' + '.json'
ofile = opth / oname
if not self._overwrite and pathlib.Path(ofile).exists():
logger.warning(f'output: {ofile} already exists')
return TaskOutcome(mode + 'failure')
if not self._simulate:
if self._verbose:
logger.info(f'output: {ofile}')
results.oscal_write(pathlib.Path(ofile))
if not self._simulate and self._verbose:
analysis = tanium_transformer.analysis
for line in analysis:
logger.info(line)
return TaskOutcome(mode + 'success')
|
compliance-trestle
|
positive
|
def update_checkpoint(self):
while self.version != SavedCalamariModel.VERSION:
if SavedCalamariModel.VERSION < self.version:
raise Exception('Downgrading of models is not supported ({} to {}). Please upgrade your Calamari instance (currently installed: {})'.format(self.version, SavedCalamariModel.VERSION, __version__))
<DeepExtract>
logger.info(f'Upgrading from version {self.version}')
shutil.copyfile(self.json_path, self.json_path + f'_v{self.version}')
shutil.copyfile(self.ckpt_path + '.h5', self.ckpt_path + f'.h5_v{self.version}')
if self.version < 2:
raise Exception(f'Models of checkpoint-version lower than {self.version} are not supported anymore. Use an older version of calamari to upgrade them to an supported version.')
elif self.version == 2:
from calamari_ocr.ocr.savedmodel.migrations.version2to5 import migrate2to5, update_model
import tensorflow as tf
if packaging.version.parse(tf.__version__) >= packaging.version.parse('2.5.0'):
raise Exception('Modules of checkpoint version 2 can only be upgraded by Tensorflow version 2.4. Please downgrade Tensorflow to 2.4.x (`pip install tensorflow~=2.4.0`) to convert the model to a newer version. Afterwards you can upgrade Tensorflow to a newer version (`pip install -U tensorflow`) and continue the model upgrade (if required).')
self.dict = migrate2to5(self.dict)
update_model(self.dict, self.ckpt_path)
self.version = 5
elif self.version == 3 or self.version == 4:
from calamari_ocr.ocr.savedmodel.migrations.version3_4to5 import migrate3to5, update_model
if self.version == 3:
self.dict = migrate3to5(self.dict)
update_model(self.dict, self.ckpt_path)
self.version = 5
self._update_json_version()
</DeepExtract>
logger.info(f'Successfully upgraded checkpoint version to {SavedCalamariModel.VERSION}')
|
def update_checkpoint(self):
while self.version != SavedCalamariModel.VERSION:
if SavedCalamariModel.VERSION < self.version:
raise Exception('Downgrading of models is not supported ({} to {}). Please upgrade your Calamari instance (currently installed: {})'.format(self.version, SavedCalamariModel.VERSION, __version__))
logger.info(f'Upgrading from version {self.version}')
shutil.copyfile(self.json_path, self.json_path + f'_v{self.version}')
shutil.copyfile(self.ckpt_path + '.h5', self.ckpt_path + f'.h5_v{self.version}')
if self.version < 2:
raise Exception(f'Models of checkpoint-version lower than {self.version} are not supported anymore. Use an older version of calamari to upgrade them to an supported version.')
elif self.version == 2:
from calamari_ocr.ocr.savedmodel.migrations.version2to5 import migrate2to5, update_model
import tensorflow as tf
if packaging.version.parse(tf.__version__) >= packaging.version.parse('2.5.0'):
raise Exception('Modules of checkpoint version 2 can only be upgraded by Tensorflow version 2.4. Please downgrade Tensorflow to 2.4.x (`pip install tensorflow~=2.4.0`) to convert the model to a newer version. Afterwards you can upgrade Tensorflow to a newer version (`pip install -U tensorflow`) and continue the model upgrade (if required).')
self.dict = migrate2to5(self.dict)
update_model(self.dict, self.ckpt_path)
self.version = 5
elif self.version == 3 or self.version == 4:
from calamari_ocr.ocr.savedmodel.migrations.version3_4to5 import migrate3to5, update_model
if self.version == 3:
self.dict = migrate3to5(self.dict)
update_model(self.dict, self.ckpt_path)
self.version = 5
self._update_json_version()
logger.info(f'Successfully upgraded checkpoint version to {SavedCalamariModel.VERSION}')
|
calamari
|
positive
|
def propose_string(self, string, parent_node=None, is_field_name=False, possible_file_types=None, used_at_runtime=None, fix_paths=True):
p_types = 0
if isinstance(string, str):
string = string.encode('ascii', 'ignore')
elif isinstance(string, bytes):
try:
string.decode('utf-8')
except UnicodeDecodeError:
return None
else:
return None
if fix_paths:
string = string.replace(b'\\\\', b'/').replace(b'\\', b'/')
parent_uid = None
if parent_node is not None:
parent_uid = parent_node.uid
if possible_file_types is None:
pass
elif isinstance(possible_file_types, list):
for pt in possible_file_types:
p_types = p_types | ftype_list[pt]
else:
p_types = p_types | ftype_list[possible_file_types]
hash_string_tuple = make_hash_string_tuple(string)
rec = (*hash_string_tuple, parent_uid, is_field_name, used_at_runtime, p_types)
self._string_hash_to_add.append(rec)
substrings = [string]
seps = [b',', b'|']
for sep in seps:
substrings_new = []
for substring in substrings:
substrings_new += substring.split(sep)
substrings = substrings_new
is_field_name = False
for substring in substrings:
if substring != string:
<DeepExtract>
p_types = 0
if isinstance(substring.strip(), str):
substring.strip() = substring.strip().encode('ascii', 'ignore')
elif isinstance(substring.strip(), bytes):
try:
substring.strip().decode('utf-8')
except UnicodeDecodeError:
return None
else:
return None
if fix_paths:
substring.strip() = substring.strip().replace(b'\\\\', b'/').replace(b'\\', b'/')
parent_uid = None
if parent_node is not None:
parent_uid = parent_node.uid
if possible_file_types is None:
pass
elif isinstance(possible_file_types, list):
for pt in possible_file_types:
p_types = p_types | ftype_list[pt]
else:
p_types = p_types | ftype_list[possible_file_types]
hash_string_tuple = make_hash_string_tuple(substring.strip())
rec = (*hash_string_tuple, parent_uid, is_field_name, used_at_runtime, p_types)
self._string_hash_to_add.append(rec)
substrings = [substring.strip()]
seps = [b',', b'|']
for sep in seps:
substrings_new = []
for substring in substrings:
substrings_new += substring.split(sep)
substrings = substrings_new
is_field_name = False
for substring in substrings:
if substring != substring.strip():
self.propose_string(substring.strip(), parent_node, is_field_name, possible_file_types, used_at_runtime, fix_paths)
return rec
</DeepExtract>
return rec
|
def propose_string(self, string, parent_node=None, is_field_name=False, possible_file_types=None, used_at_runtime=None, fix_paths=True):
p_types = 0
if isinstance(string, str):
string = string.encode('ascii', 'ignore')
elif isinstance(string, bytes):
try:
string.decode('utf-8')
except UnicodeDecodeError:
return None
else:
return None
if fix_paths:
string = string.replace(b'\\\\', b'/').replace(b'\\', b'/')
parent_uid = None
if parent_node is not None:
parent_uid = parent_node.uid
if possible_file_types is None:
pass
elif isinstance(possible_file_types, list):
for pt in possible_file_types:
p_types = p_types | ftype_list[pt]
else:
p_types = p_types | ftype_list[possible_file_types]
hash_string_tuple = make_hash_string_tuple(string)
rec = (*hash_string_tuple, parent_uid, is_field_name, used_at_runtime, p_types)
self._string_hash_to_add.append(rec)
substrings = [string]
seps = [b',', b'|']
for sep in seps:
substrings_new = []
for substring in substrings:
substrings_new += substring.split(sep)
substrings = substrings_new
is_field_name = False
for substring in substrings:
if substring != string:
p_types = 0
if isinstance(substring.strip(), str):
substring.strip() = substring.strip().encode('ascii', 'ignore')
elif isinstance(substring.strip(), bytes):
try:
substring.strip().decode('utf-8')
except UnicodeDecodeError:
return None
else:
return None
if fix_paths:
substring.strip() = substring.strip().replace(b'\\\\', b'/').replace(b'\\', b'/')
parent_uid = None
if parent_node is not None:
parent_uid = parent_node.uid
if possible_file_types is None:
pass
elif isinstance(possible_file_types, list):
for pt in possible_file_types:
p_types = p_types | ftype_list[pt]
else:
p_types = p_types | ftype_list[possible_file_types]
hash_string_tuple = make_hash_string_tuple(substring.strip())
rec = (*hash_string_tuple, parent_uid, is_field_name, used_at_runtime, p_types)
self._string_hash_to_add.append(rec)
substrings = [substring.strip()]
seps = [b',', b'|']
for sep in seps:
substrings_new = []
for substring in substrings:
substrings_new += substring.split(sep)
substrings = substrings_new
is_field_name = False
for substring in substrings:
if substring != substring.strip():
self.propose_string(substring.strip(), parent_node, is_field_name, possible_file_types, used_at_runtime, fix_paths)
return rec
return rec
|
deca
|
positive
|
def combinationSum3(k, n):
total = []
if k > n or n > 45:
return total
<DeepExtract>
if is_valid([], k, n):
total.append([])
if sum([]) >= n:
return
for i in range(1, 10):
index = range(1, 10).index(i) + 1
rec = [][:]
rec.append(i)
range(1, 10) = range(1, 10)[index:]
find_all_combinations(total, range(1, 10), rec, k, n)
</DeepExtract>
return total
|
def combinationSum3(k, n):
total = []
if k > n or n > 45:
return total
if is_valid([], k, n):
total.append([])
if sum([]) >= n:
return
for i in range(1, 10):
index = range(1, 10).index(i) + 1
rec = [][:]
rec.append(i)
range(1, 10) = range(1, 10)[index:]
find_all_combinations(total, range(1, 10), rec, k, n)
return total
|
cabbird
|
positive
|
def test_valid_provider_form(self):
<DeepExtract>
trigger = self.create_triggerservice(consumer_name='ServiceRss', provider_name='ServiceEvernote')
name = 'TriggerHappy RSS'
url = 'https://blog.trigger-happy.eu/feeds/all.rss.xml'
status = True
self.uuid = uuid.uuid4()
r = Rss.objects.create(uuid=self.uuid, url=url, name=name, trigger=trigger, status=status)
</DeepExtract>
data = {'name': r.name, 'url': r.url}
form = RssProviderForm(data=data)
self.assertTrue(form.is_valid())
|
def test_valid_provider_form(self):
trigger = self.create_triggerservice(consumer_name='ServiceRss', provider_name='ServiceEvernote')
name = 'TriggerHappy RSS'
url = 'https://blog.trigger-happy.eu/feeds/all.rss.xml'
status = True
self.uuid = uuid.uuid4()
r = Rss.objects.create(uuid=self.uuid, url=url, name=name, trigger=trigger, status=status)
data = {'name': r.name, 'url': r.url}
form = RssProviderForm(data=data)
self.assertTrue(form.is_valid())
|
django-th
|
positive
|
def test_layout(self):
form = RegistrationForm()
with self.assertTemplateUsed('floppyforms/layouts/ul.html'):
with self.assertTemplateUsed('floppyforms/rows/li.html'):
<DeepExtract>
if {'form': form} is None:
{'form': form} = {}
c = Context({'form': form})
t = Template('{% load floppyforms %}' + '{% form form using "floppyforms/layouts/ul.html" %}')
layout = t.render(c)
</DeepExtract>
self.assertHTMLEqual(layout, '\n <li><label for="id_firstname">Your first name?</label> <input type="text" name="firstname" id="id_firstname" required /></li>\n <li><label for="id_lastname">Your last name:</label> <input type="text" name="lastname" id="id_lastname" required /></li>\n <li><label for="id_username">Username:</label> <input type="text" name="username" id="id_username" maxlength="30" required /></li>\n <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required />\n <span class="helptext">Make sure to use a secure password.</span></li>\n <li><label for="id_password2">Retype password:</label> <input type="password" name="password2" id="id_password2" required /></li>\n <li><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" />\n <input type="hidden" name="honeypot" id="id_honeypot" /></li>\n ')
|
def test_layout(self):
form = RegistrationForm()
with self.assertTemplateUsed('floppyforms/layouts/ul.html'):
with self.assertTemplateUsed('floppyforms/rows/li.html'):
if {'form': form} is None:
{'form': form} = {}
c = Context({'form': form})
t = Template('{% load floppyforms %}' + '{% form form using "floppyforms/layouts/ul.html" %}')
layout = t.render(c)
self.assertHTMLEqual(layout, '\n <li><label for="id_firstname">Your first name?</label> <input type="text" name="firstname" id="id_firstname" required /></li>\n <li><label for="id_lastname">Your last name:</label> <input type="text" name="lastname" id="id_lastname" required /></li>\n <li><label for="id_username">Username:</label> <input type="text" name="username" id="id_username" maxlength="30" required /></li>\n <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" required />\n <span class="helptext">Make sure to use a secure password.</span></li>\n <li><label for="id_password2">Retype password:</label> <input type="password" name="password2" id="id_password2" required /></li>\n <li><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" />\n <input type="hidden" name="honeypot" id="id_honeypot" /></li>\n ')
|
django-floppyforms
|
positive
|
def parse_symbol_table_64(table, section, string_table):
strings = self.data.read(string_table.offset, string_table.size)
for i in range(0, section.size / 24):
table[i].seek(section.offset + i * 24)
table[i].uint32('name_offset')
table[i].uint8('info')
table[i].uint8('other')
table[i].uint16('section')
table[i].uint64('value')
table[i].uint64('size')
<DeepExtract>
end = strings.find('\x00', table[i].name_offset)
table[i].name = strings[table[i].name_offset:end]
</DeepExtract>
if len(table[i].name) > 0:
self.symbols_by_name[table[i].name] = table[i].value
self.symbols_by_addr[table[i].value] = table[i].name
|
def parse_symbol_table_64(table, section, string_table):
strings = self.data.read(string_table.offset, string_table.size)
for i in range(0, section.size / 24):
table[i].seek(section.offset + i * 24)
table[i].uint32('name_offset')
table[i].uint8('info')
table[i].uint8('other')
table[i].uint16('section')
table[i].uint64('value')
table[i].uint64('size')
end = strings.find('\x00', table[i].name_offset)
table[i].name = strings[table[i].name_offset:end]
if len(table[i].name) > 0:
self.symbols_by_name[table[i].name] = table[i].value
self.symbols_by_addr[table[i].value] = table[i].name
|
deprecated-binaryninja-python
|
positive
|
def simplify_covariance_and_sdcorr_constraints(constraints, lower_bounds, upper_bounds, is_fixed_to_value, fixed_value):
"""Enforce covariance and sdcorr constraints by bounds if possible.
This is possible if the dimension is <= 2 or all covariances are fexd to 0.
"""
<DeepExtract>
filtered = [c for c in constraints if c['type'] == 'covariance']
rest = [c for c in constraints if c['type'] != 'covariance']
(cov_constraints, others) = (filtered, rest)
</DeepExtract>
<DeepExtract>
filtered = [c for c in others if c['type'] == 'sdcorr']
rest = [c for c in others if c['type'] != 'sdcorr']
(sdcorr_constraints, others) = (filtered, rest)
</DeepExtract>
to_simplify = cov_constraints + sdcorr_constraints
lower = lower_bounds.copy()
upper = upper_bounds.copy()
not_simplifyable = []
for constr in to_simplify:
dim = number_of_triangular_elements_to_dimension(len(constr['index']))
if constr['type'] == 'covariance':
diag_positions = [0, *np.cumsum(range(2, dim + 1)).tolist()]
diag_indices = np.array(constr['index'])[diag_positions].tolist()
off_indices = [i for i in constr['index'] if i not in diag_positions]
if constr['type'] == 'sdcorr':
diag_indices = constr['index'][:dim]
off_indices = constr['index'][dim:]
uncorrelated = False
if is_fixed_to_value[off_indices].all():
if (fixed_value[off_indices] == 0).all():
uncorrelated = True
if uncorrelated:
lower[diag_indices] = np.maximum(0, lower[diag_indices])
elif dim <= 2 and constr['type'] == 'sdcorr':
lower[diag_indices] = np.maximum(0, lower[diag_indices])
lower[off_indices] = -1
upper[off_indices] = 1
else:
not_simplifyable.append(constr)
return (others + not_simplifyable, lower, upper)
|
def simplify_covariance_and_sdcorr_constraints(constraints, lower_bounds, upper_bounds, is_fixed_to_value, fixed_value):
"""Enforce covariance and sdcorr constraints by bounds if possible.
This is possible if the dimension is <= 2 or all covariances are fexd to 0.
"""
filtered = [c for c in constraints if c['type'] == 'covariance']
rest = [c for c in constraints if c['type'] != 'covariance']
(cov_constraints, others) = (filtered, rest)
filtered = [c for c in others if c['type'] == 'sdcorr']
rest = [c for c in others if c['type'] != 'sdcorr']
(sdcorr_constraints, others) = (filtered, rest)
to_simplify = cov_constraints + sdcorr_constraints
lower = lower_bounds.copy()
upper = upper_bounds.copy()
not_simplifyable = []
for constr in to_simplify:
dim = number_of_triangular_elements_to_dimension(len(constr['index']))
if constr['type'] == 'covariance':
diag_positions = [0, *np.cumsum(range(2, dim + 1)).tolist()]
diag_indices = np.array(constr['index'])[diag_positions].tolist()
off_indices = [i for i in constr['index'] if i not in diag_positions]
if constr['type'] == 'sdcorr':
diag_indices = constr['index'][:dim]
off_indices = constr['index'][dim:]
uncorrelated = False
if is_fixed_to_value[off_indices].all():
if (fixed_value[off_indices] == 0).all():
uncorrelated = True
if uncorrelated:
lower[diag_indices] = np.maximum(0, lower[diag_indices])
elif dim <= 2 and constr['type'] == 'sdcorr':
lower[diag_indices] = np.maximum(0, lower[diag_indices])
lower[off_indices] = -1
upper[off_indices] = 1
else:
not_simplifyable.append(constr)
return (others + not_simplifyable, lower, upper)
|
estimagic
|
positive
|
def _setup_cloudbiolinux_fabric_properties(env, options):
<DeepExtract>
value = None
if 'fabricrc_file' in options:
value = options['fabricrc_file']
fabricrc_file = value
</DeepExtract>
env.config_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'config')
env.tool_data_table_conf_file = os.path.join(env.config_dir, '..', 'installed_files', 'tool_data_table_conf.xml')
if fabricrc_file:
env.update(load_settings(fabricrc_file))
else:
_parse_fabricrc(env)
overrides = options.get('fabricrc_overrides', {})
for (key, value) in overrides.iteritems():
if isinstance(value, bool):
overrides[key] = str(value)
env.update(overrides)
_setup_galaxy_env_defaults(env)
|
def _setup_cloudbiolinux_fabric_properties(env, options):
value = None
if 'fabricrc_file' in options:
value = options['fabricrc_file']
fabricrc_file = value
env.config_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'config')
env.tool_data_table_conf_file = os.path.join(env.config_dir, '..', 'installed_files', 'tool_data_table_conf.xml')
if fabricrc_file:
env.update(load_settings(fabricrc_file))
else:
_parse_fabricrc(env)
overrides = options.get('fabricrc_overrides', {})
for (key, value) in overrides.iteritems():
if isinstance(value, bool):
overrides[key] = str(value)
env.update(overrides)
_setup_galaxy_env_defaults(env)
|
cloudbiolinux
|
positive
|
def bin_labels(addr, collections):
labels = []
for bins in collections:
bins_type = bins.get('type', None)
if not bins_type:
continue
for size in filter(lambda x: x != 'type', bins.keys()):
b = bins[size]
if isinstance(size, int):
size = hex(size)
count = '/{:d}'.format(b[1]) if bins_type == 'tcachebins' else None
<DeepExtract>
addrs = []
if bins_type == 'fastbins':
chunks = b
elif bins_type == 'tcachebins':
(addrs, _) = b
else:
(addrs, _, _) = b
chunks = addrs
</DeepExtract>
for chunk_addr in chunks:
if addr == chunk_addr:
labels.append('{:s}[{:s}][{:d}{}]'.format(bins_type, size, chunks.index(addr), count or ''))
return labels
|
def bin_labels(addr, collections):
labels = []
for bins in collections:
bins_type = bins.get('type', None)
if not bins_type:
continue
for size in filter(lambda x: x != 'type', bins.keys()):
b = bins[size]
if isinstance(size, int):
size = hex(size)
count = '/{:d}'.format(b[1]) if bins_type == 'tcachebins' else None
addrs = []
if bins_type == 'fastbins':
chunks = b
elif bins_type == 'tcachebins':
(addrs, _) = b
else:
(addrs, _, _) = b
chunks = addrs
for chunk_addr in chunks:
if addr == chunk_addr:
labels.append('{:s}[{:s}][{:d}{}]'.format(bins_type, size, chunks.index(addr), count or ''))
return labels
|
217gdb
|
positive
|
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
<DeepExtract>
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
(w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr)
</DeepExtract>
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
<DeepExtract>
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))
anchors = anchors
</DeepExtract>
return anchors
|
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
(w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))
anchors = anchors
return anchors
|
DF-Traffic-Sign-Identification
|
positive
|
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(400, 300)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.modsLabel = QtGui.QLabel(Form)
self.modsLabel.setObjectName(_fromUtf8('modsLabel'))
self.verticalLayout.addWidget(self.modsLabel)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, -1, -1, 10)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.controlButton = KPushButton(Form)
self.controlButton.setCheckable(True)
self.controlButton.setChecked(False)
self.controlButton.setObjectName(_fromUtf8('controlButton'))
self.horizontalLayout.addWidget(self.controlButton)
self.altButton = KPushButton(Form)
self.altButton.setCheckable(True)
self.altButton.setChecked(False)
self.altButton.setObjectName(_fromUtf8('altButton'))
self.horizontalLayout.addWidget(self.altButton)
self.shiftButton = KPushButton(Form)
self.shiftButton.setCheckable(True)
self.shiftButton.setChecked(False)
self.shiftButton.setObjectName(_fromUtf8('shiftButton'))
self.horizontalLayout.addWidget(self.shiftButton)
self.superButton = KPushButton(Form)
self.superButton.setCheckable(True)
self.superButton.setChecked(False)
self.superButton.setObjectName(_fromUtf8('superButton'))
self.horizontalLayout.addWidget(self.superButton)
self.hyperButton = KPushButton(Form)
self.hyperButton.setCheckable(True)
self.hyperButton.setChecked(False)
self.hyperButton.setObjectName(_fromUtf8('hyperButton'))
self.horizontalLayout.addWidget(self.hyperButton)
self.metaButton = KPushButton(Form)
self.metaButton.setCheckable(True)
self.metaButton.setChecked(False)
self.metaButton.setObjectName(_fromUtf8('metaButton'))
self.horizontalLayout.addWidget(self.metaButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, -1, -1, 5)
self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))
self.keyLabel = QtGui.QLabel(Form)
self.keyLabel.setObjectName(_fromUtf8('keyLabel'))
self.horizontalLayout_2.addWidget(self.keyLabel)
self.setButton = QtGui.QPushButton(Form)
self.setButton.setObjectName(_fromUtf8('setButton'))
self.horizontalLayout_2.addWidget(self.setButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.kseparator = KSeparator(Form)
self.kseparator.setObjectName(_fromUtf8('kseparator'))
self.verticalLayout.addWidget(self.kseparator)
<DeepExtract>
Form.setWindowTitle(kdecore.i18n(_fromUtf8('Form')))
self.modsLabel.setText(kdecore.i18n(_fromUtf8('Modifiers:')))
self.controlButton.setText(kdecore.i18n(_fromUtf8('Control')))
self.altButton.setText(kdecore.i18n(_fromUtf8('Alt')))
self.shiftButton.setText(kdecore.i18n(_fromUtf8('Shift')))
self.superButton.setText(kdecore.i18n(_fromUtf8('Super')))
self.hyperButton.setText(kdecore.i18n(_fromUtf8('Hyper')))
self.metaButton.setText(kdecore.i18n(_fromUtf8('Meta')))
self.keyLabel.setText(kdecore.i18n(_fromUtf8('Key: %s')))
self.setButton.setText(kdecore.i18n(_fromUtf8('Press to set')))
</DeepExtract>
QtCore.QMetaObject.connectSlotsByName(Form)
|
def setupUi(self, Form):
Form.setObjectName(_fromUtf8('Form'))
Form.resize(400, 300)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.modsLabel = QtGui.QLabel(Form)
self.modsLabel.setObjectName(_fromUtf8('modsLabel'))
self.verticalLayout.addWidget(self.modsLabel)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, -1, -1, 10)
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
self.controlButton = KPushButton(Form)
self.controlButton.setCheckable(True)
self.controlButton.setChecked(False)
self.controlButton.setObjectName(_fromUtf8('controlButton'))
self.horizontalLayout.addWidget(self.controlButton)
self.altButton = KPushButton(Form)
self.altButton.setCheckable(True)
self.altButton.setChecked(False)
self.altButton.setObjectName(_fromUtf8('altButton'))
self.horizontalLayout.addWidget(self.altButton)
self.shiftButton = KPushButton(Form)
self.shiftButton.setCheckable(True)
self.shiftButton.setChecked(False)
self.shiftButton.setObjectName(_fromUtf8('shiftButton'))
self.horizontalLayout.addWidget(self.shiftButton)
self.superButton = KPushButton(Form)
self.superButton.setCheckable(True)
self.superButton.setChecked(False)
self.superButton.setObjectName(_fromUtf8('superButton'))
self.horizontalLayout.addWidget(self.superButton)
self.hyperButton = KPushButton(Form)
self.hyperButton.setCheckable(True)
self.hyperButton.setChecked(False)
self.hyperButton.setObjectName(_fromUtf8('hyperButton'))
self.horizontalLayout.addWidget(self.hyperButton)
self.metaButton = KPushButton(Form)
self.metaButton.setCheckable(True)
self.metaButton.setChecked(False)
self.metaButton.setObjectName(_fromUtf8('metaButton'))
self.horizontalLayout.addWidget(self.metaButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, -1, -1, 5)
self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))
self.keyLabel = QtGui.QLabel(Form)
self.keyLabel.setObjectName(_fromUtf8('keyLabel'))
self.horizontalLayout_2.addWidget(self.keyLabel)
self.setButton = QtGui.QPushButton(Form)
self.setButton.setObjectName(_fromUtf8('setButton'))
self.horizontalLayout_2.addWidget(self.setButton)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.kseparator = KSeparator(Form)
self.kseparator.setObjectName(_fromUtf8('kseparator'))
self.verticalLayout.addWidget(self.kseparator)
Form.setWindowTitle(kdecore.i18n(_fromUtf8('Form')))
self.modsLabel.setText(kdecore.i18n(_fromUtf8('Modifiers:')))
self.controlButton.setText(kdecore.i18n(_fromUtf8('Control')))
self.altButton.setText(kdecore.i18n(_fromUtf8('Alt')))
self.shiftButton.setText(kdecore.i18n(_fromUtf8('Shift')))
self.superButton.setText(kdecore.i18n(_fromUtf8('Super')))
self.hyperButton.setText(kdecore.i18n(_fromUtf8('Hyper')))
self.metaButton.setText(kdecore.i18n(_fromUtf8('Meta')))
self.keyLabel.setText(kdecore.i18n(_fromUtf8('Key: %s')))
self.setButton.setText(kdecore.i18n(_fromUtf8('Press to set')))
QtCore.QMetaObject.connectSlotsByName(Form)
|
autokey-python2
|
positive
|
def write_string(self, string):
<DeepExtract>
if len(string) < 0:
raise SerializationError('attempt to write size < 0')
elif len(string) < 253:
self.write(chr(len(string)))
elif len(string) < 2 ** 16:
self.write('ý')
self._write_num('<H', len(string))
elif len(string) < 2 ** 32:
self.write('þ')
self._write_num('<I', len(string))
elif len(string) < 2 ** 64:
self.write('ÿ')
self._write_num('<Q', len(string))
</DeepExtract>
<DeepExtract>
if self.input is None:
self.input = string
else:
self.input += string
</DeepExtract>
|
def write_string(self, string):
if len(string) < 0:
raise SerializationError('attempt to write size < 0')
elif len(string) < 253:
self.write(chr(len(string)))
elif len(string) < 2 ** 16:
self.write('ý')
self._write_num('<H', len(string))
elif len(string) < 2 ** 32:
self.write('þ')
self._write_num('<I', len(string))
elif len(string) < 2 ** 64:
self.write('ÿ')
self._write_num('<Q', len(string))
if self.input is None:
self.input = string
else:
self.input += string
</DeepExtract>
|
counterparty-lib
|
positive
|
def test_has_changed(self):
<DeepExtract>
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
</DeepExtract>
self.instance.number = 7.5
<DeepExtract>
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
</DeepExtract>
self.instance.save()
<DeepExtract>
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
</DeepExtract>
self.instance.number = 7.2
<DeepExtract>
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
</DeepExtract>
self.instance.number = 7.8
<DeepExtract>
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
</DeepExtract>
|
def test_has_changed(self):
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
self.instance.number = 7.5
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
self.instance.save()
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
self.instance.number = 7.2
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
self.instance.number = 7.8
tracker = kwargs.pop('tracker', self.tracker)
for (field, value) in kwargs.items():
if value is None:
with self.assertRaises(FieldError):
tracker.has_changed(field)
else:
self.assertEqual(tracker.has_changed(field), value)
</DeepExtract>
|
django-model-utils
|
positive
|
def main(self):
game_state = game.GameState()
<DeepExtract>
action = np.zeros([self.Num_action])
(state, _, _) = game_state.frame_step(action)
state = self.reshape_input(state)
for i in range(self.Num_skipping * self.Num_stacking):
self.state_set.append(state)
state = state
</DeepExtract>
<DeepExtract>
self.state_set.append(state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_state = state_in
</DeepExtract>
while True:
<DeepExtract>
progress = ''
if self.step <= self.Num_Exploration:
progress = 'Exploring'
elif self.step <= self.Num_Exploration + self.Num_Training:
progress = 'Training'
elif self.step <= self.Num_Exploration + self.Num_Training + self.Num_Testing:
progress = 'Testing'
else:
progress = 'Finished'
self.progress = progress
</DeepExtract>
<DeepExtract>
action = np.zeros([self.Num_action])
action_index = 0
if self.progress == 'Exploring':
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
elif self.progress == 'Training':
if random.random() < self.epsilon:
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
else:
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state]})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
if self.epsilon > self.final_epsilon:
self.epsilon -= self.first_epsilon / self.Num_Training
elif self.progress == 'Testing':
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state]})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
self.epsilon = 0
action = action
</DeepExtract>
(next_state, reward, terminal) = game_state.frame_step(action)
<DeepExtract>
state_out = cv2.resize(next_state, (self.img_size, self.img_size))
if self.Num_colorChannel == 1:
state_out = cv2.cvtColor(state_out, cv2.COLOR_BGR2GRAY)
state_out = np.reshape(state_out, (self.img_size, self.img_size, 1))
state_out = np.uint8(state_out)
next_state = state_out
</DeepExtract>
<DeepExtract>
self.state_set.append(next_state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_next_state = state_in
</DeepExtract>
<DeepExtract>
if len(self.replay_memory) >= self.Num_replay_memory:
del self.replay_memory[0]
self.replay_memory.append([stacked_state, action, reward, stacked_next_state, terminal])
</DeepExtract>
if self.progress == 'Training':
if self.step % self.Num_update_target == 0:
<DeepExtract>
trainable_variables = tf.trainable_variables()
trainable_variables_network = [var for var in trainable_variables if var.name.startswith('network')]
trainable_variables_target = [var for var in trainable_variables if var.name.startswith('target')]
for i in range(len(trainable_variables_network)):
self.sess.run(tf.assign(trainable_variables_target[i], trainable_variables_network[i]))
</DeepExtract>
<DeepExtract>
minibatch = random.sample(self.replay_memory, self.Num_batch)
state_batch = [batch[0] for batch in minibatch]
action_batch = [batch[1] for batch in minibatch]
reward_batch = [batch[2] for batch in minibatch]
next_state_batch = [batch[3] for batch in minibatch]
terminal_batch = [batch[4] for batch in minibatch]
Q_batch = self.Q_action.eval(feed_dict={self.input: next_state_batch})
theta_batch = self.logits_target.eval(feed_dict={self.input_target: next_state_batch})
theta_target = []
for i in range(len(minibatch)):
theta_target.append([])
for j in range(self.Num_quantile):
if terminal_batch[i] == True:
theta_target[i].append(reward_batch[i])
else:
theta_target[i].append(reward_batch[i] + self.gamma * theta_batch[i, np.argmax(Q_batch[i]), j])
action_binary = np.zeros([self.Num_batch, self.Num_action, self.Num_quantile])
for i in range(len(action_batch)):
action_batch_max = np.argmax(action_batch[i])
action_binary[i, action_batch_max, :] = 1
(_, self.loss) = self.sess.run([self.train_step, self.loss_train], feed_dict={self.input: state_batch, self.theta_target: theta_target, self.action_binary_loss: action_binary})
</DeepExtract>
<DeepExtract>
if self.step == self.Num_Exploration + self.Num_Training:
save_path = self.saver.save(self.sess, 'saved_networks/' + self.game_name + '/' + self.date_time + '_' + self.algorithm + '/model.ckpt')
print('Model saved in file: %s' % save_path)
</DeepExtract>
stacked_state = stacked_next_state
self.score += reward
self.step += 1
<DeepExtract>
if self.progress != 'Exploring':
if terminal:
self.score_board += self.score
self.maxQ_board += self.maxQ
self.loss_board += self.loss
if self.episode % self.Num_plot_episode == 0 and self.episode != 0 and terminal:
diff_step = self.step - self.step_old
tensorboard_info = [self.score_board / self.Num_plot_episode, self.maxQ_board / diff_step, self.loss_board / diff_step]
for i in range(len(tensorboard_info)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(tensorboard_info[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.step)
self.score_board = 0
self.maxQ_board = 0
self.loss_board = 0
self.step_old = self.step
else:
self.step_old = self.step
</DeepExtract>
if terminal:
<DeepExtract>
print('Step: ' + str(self.step) + ' / ' + 'Episode: ' + str(self.episode) + ' / ' + 'Progress: ' + self.progress + ' / ' + 'Epsilon: ' + str(self.epsilon) + ' / ' + 'Score: ' + str(self.score))
if self.progress != 'Exploring':
self.episode += 1
self.score = 0
state = self.initialization(game_state)
stacked_state = self.skip_and_stack_frame(state)
stacked_state = stacked_state
</DeepExtract>
if self.progress == 'Finished':
print('Finished!')
break
|
def main(self):
game_state = game.GameState()
action = np.zeros([self.Num_action])
(state, _, _) = game_state.frame_step(action)
state = self.reshape_input(state)
for i in range(self.Num_skipping * self.Num_stacking):
self.state_set.append(state)
state = state
self.state_set.append(state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_state = state_in
while True:
progress = ''
if self.step <= self.Num_Exploration:
progress = 'Exploring'
elif self.step <= self.Num_Exploration + self.Num_Training:
progress = 'Training'
elif self.step <= self.Num_Exploration + self.Num_Training + self.Num_Testing:
progress = 'Testing'
else:
progress = 'Finished'
self.progress = progress
action = np.zeros([self.Num_action])
action_index = 0
if self.progress == 'Exploring':
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
elif self.progress == 'Training':
if random.random() < self.epsilon:
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
else:
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state]})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
if self.epsilon > self.final_epsilon:
self.epsilon -= self.first_epsilon / self.Num_Training
elif self.progress == 'Testing':
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state]})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
self.epsilon = 0
action = action
(next_state, reward, terminal) = game_state.frame_step(action)
state_out = cv2.resize(next_state, (self.img_size, self.img_size))
if self.Num_colorChannel == 1:
state_out = cv2.cvtColor(state_out, cv2.COLOR_BGR2GRAY)
state_out = np.reshape(state_out, (self.img_size, self.img_size, 1))
state_out = np.uint8(state_out)
next_state = state_out
self.state_set.append(next_state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_next_state = state_in
if len(self.replay_memory) >= self.Num_replay_memory:
del self.replay_memory[0]
self.replay_memory.append([stacked_state, action, reward, stacked_next_state, terminal])
if self.progress == 'Training':
if self.step % self.Num_update_target == 0:
trainable_variables = tf.trainable_variables()
trainable_variables_network = [var for var in trainable_variables if var.name.startswith('network')]
trainable_variables_target = [var for var in trainable_variables if var.name.startswith('target')]
for i in range(len(trainable_variables_network)):
self.sess.run(tf.assign(trainable_variables_target[i], trainable_variables_network[i]))
minibatch = random.sample(self.replay_memory, self.Num_batch)
state_batch = [batch[0] for batch in minibatch]
action_batch = [batch[1] for batch in minibatch]
reward_batch = [batch[2] for batch in minibatch]
next_state_batch = [batch[3] for batch in minibatch]
terminal_batch = [batch[4] for batch in minibatch]
Q_batch = self.Q_action.eval(feed_dict={self.input: next_state_batch})
theta_batch = self.logits_target.eval(feed_dict={self.input_target: next_state_batch})
theta_target = []
for i in range(len(minibatch)):
theta_target.append([])
for j in range(self.Num_quantile):
if terminal_batch[i] == True:
theta_target[i].append(reward_batch[i])
else:
theta_target[i].append(reward_batch[i] + self.gamma * theta_batch[i, np.argmax(Q_batch[i]), j])
action_binary = np.zeros([self.Num_batch, self.Num_action, self.Num_quantile])
for i in range(len(action_batch)):
action_batch_max = np.argmax(action_batch[i])
action_binary[i, action_batch_max, :] = 1
(_, self.loss) = self.sess.run([self.train_step, self.loss_train], feed_dict={self.input: state_batch, self.theta_target: theta_target, self.action_binary_loss: action_binary})
if self.step == self.Num_Exploration + self.Num_Training:
save_path = self.saver.save(self.sess, 'saved_networks/' + self.game_name + '/' + self.date_time + '_' + self.algorithm + '/model.ckpt')
print('Model saved in file: %s' % save_path)
stacked_state = stacked_next_state
self.score += reward
self.step += 1
if self.progress != 'Exploring':
if terminal:
self.score_board += self.score
self.maxQ_board += self.maxQ
self.loss_board += self.loss
if self.episode % self.Num_plot_episode == 0 and self.episode != 0 and terminal:
diff_step = self.step - self.step_old
tensorboard_info = [self.score_board / self.Num_plot_episode, self.maxQ_board / diff_step, self.loss_board / diff_step]
for i in range(len(tensorboard_info)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(tensorboard_info[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.step)
self.score_board = 0
self.maxQ_board = 0
self.loss_board = 0
self.step_old = self.step
else:
self.step_old = self.step
if terminal:
print('Step: ' + str(self.step) + ' / ' + 'Episode: ' + str(self.episode) + ' / ' + 'Progress: ' + self.progress + ' / ' + 'Epsilon: ' + str(self.epsilon) + ' / ' + 'Score: ' + str(self.score))
if self.progress != 'Exploring':
self.episode += 1
self.score = 0
state = self.initialization(game_state)
stacked_state = self.skip_and_stack_frame(state)
stacked_state = stacked_state
if self.progress == 'Finished':
print('Finished!')
break
|
DRL
|
positive
|
def weighted_counts(points, counts, k):
"""
Calculate and return the weighted count of each centroid.
Performs k-means clustering in order to identify the centroids and the
distances between all points and their centroids. Weights and distances
for the centroids are then constructed from all the points in that
centroids cluster. Using those values, the weighted count is computed for
each of the centroids and returned along with the centroid themselves. The
points returned are a list of point objects where each point in the list
has a list of 'values' representing the centroid coordinates and a 'count'
which is the weighted count as computed for that centroid. Also returned
is the list of outlier indexes in the 'points' list as determined by the
k-means algorithm.
Arguments:
points - list of n-dimensional points to run k-means on.
counts - the count for each point in the 'points' list. This list
must be of the same length as the 'points' list because the
count for a point is determined by accessing both 'points'
and 'counts' with the same index.
k - the number of clusters(k-value) to send to the k-means algorithm.
Returns:
centroid_counts - A list of points where each point has 'values' list
containing the coordinates of the centroid it is associated with and a
'count' which is an integer representing the weighted count as
computed for that centroid.
outliers - A list of indexes of the outliers in 'point's as returned
by the k-means algorithm.
"""
<DeepExtract>
if outlier_threshold:
outliers = find_outliers(points, outlier_threshold, False)
points = [p for (i, p) in enumerate(points) if i not in outliers]
else:
outliers = []
d = get_dimension(points)
n = len(points)
k = k or int(math.sqrt(n / 2))
if d == 1 and (not is_nested(points)):
std = std_dev(points)
else:
dimensions = zip(*points)
std = [std_dev(dim) for dim in dimensions]
norm_points = normalize(points)
if d == 1 and (not is_nested(points)):
sorted_points = sorted(norm_points)
else:
norm_dimensions = zip(*norm_points)
sorted_dimensions = [sorted(dim) for dim in norm_dimensions]
sorted_points = [list(dim) for dim in zip(*sorted_dimensions)]
step = n / k
offset = step / 2
initial_centroids = sorted_points[offset::step]
(centroids, _) = kmeans(norm_points, initial_centroids)
(indexes, distances) = compute_clusters(norm_points, centroids)
if d == 1 and (not is_nested(points)):
denorm_centroids = [c * std for c in centroids]
else:
denorm_centroids = [[dim * s for (dim, s) in zip(c, std)] for c in centroids]
result = {'centroids': denorm_centroids, 'indexes': indexes, 'distances': distances, 'outliers': outliers}
</DeepExtract>
outliers = [points[i] for i in result['outliers']]
dist_weights = defaultdict(lambda : {'dist': [], 'count': []})
for (i, idx) in enumerate(result['indexes']):
dist_weights[idx]['dist'].append(result['distances'][i])
dist_weights[idx]['count'].append(counts[i])
centroid_counts = []
for (i, centroid) in enumerate(result['centroids']):
dist_sum = sum(dist_weights[i]['dist'])
weighted_counts = []
for (j, dist) in enumerate(dist_weights[i]['dist']):
if dist_sum:
wc = (1 - dist / dist_sum) * dist_weights[i]['count'][j]
else:
wc = dist_weights[i]['count'][j]
weighted_counts.append(wc)
if is_iterable(centroid):
values = list(centroid)
else:
values = centroid
centroid_counts.append({'values': values, 'count': int(sum(weighted_counts))})
return (centroid_counts, outliers)
|
def weighted_counts(points, counts, k):
"""
Calculate and return the weighted count of each centroid.
Performs k-means clustering in order to identify the centroids and the
distances between all points and their centroids. Weights and distances
for the centroids are then constructed from all the points in that
centroids cluster. Using those values, the weighted count is computed for
each of the centroids and returned along with the centroid themselves. The
points returned are a list of point objects where each point in the list
has a list of 'values' representing the centroid coordinates and a 'count'
which is the weighted count as computed for that centroid. Also returned
is the list of outlier indexes in the 'points' list as determined by the
k-means algorithm.
Arguments:
points - list of n-dimensional points to run k-means on.
counts - the count for each point in the 'points' list. This list
must be of the same length as the 'points' list because the
count for a point is determined by accessing both 'points'
and 'counts' with the same index.
k - the number of clusters(k-value) to send to the k-means algorithm.
Returns:
centroid_counts - A list of points where each point has 'values' list
containing the coordinates of the centroid it is associated with and a
'count' which is an integer representing the weighted count as
computed for that centroid.
outliers - A list of indexes of the outliers in 'point's as returned
by the k-means algorithm.
"""
if outlier_threshold:
outliers = find_outliers(points, outlier_threshold, False)
points = [p for (i, p) in enumerate(points) if i not in outliers]
else:
outliers = []
d = get_dimension(points)
n = len(points)
k = k or int(math.sqrt(n / 2))
if d == 1 and (not is_nested(points)):
std = std_dev(points)
else:
dimensions = zip(*points)
std = [std_dev(dim) for dim in dimensions]
norm_points = normalize(points)
if d == 1 and (not is_nested(points)):
sorted_points = sorted(norm_points)
else:
norm_dimensions = zip(*norm_points)
sorted_dimensions = [sorted(dim) for dim in norm_dimensions]
sorted_points = [list(dim) for dim in zip(*sorted_dimensions)]
step = n / k
offset = step / 2
initial_centroids = sorted_points[offset::step]
(centroids, _) = kmeans(norm_points, initial_centroids)
(indexes, distances) = compute_clusters(norm_points, centroids)
if d == 1 and (not is_nested(points)):
denorm_centroids = [c * std for c in centroids]
else:
denorm_centroids = [[dim * s for (dim, s) in zip(c, std)] for c in centroids]
result = {'centroids': denorm_centroids, 'indexes': indexes, 'distances': distances, 'outliers': outliers}
outliers = [points[i] for i in result['outliers']]
dist_weights = defaultdict(lambda : {'dist': [], 'count': []})
for (i, idx) in enumerate(result['indexes']):
dist_weights[idx]['dist'].append(result['distances'][i])
dist_weights[idx]['count'].append(counts[i])
centroid_counts = []
for (i, centroid) in enumerate(result['centroids']):
dist_sum = sum(dist_weights[i]['dist'])
weighted_counts = []
for (j, dist) in enumerate(dist_weights[i]['dist']):
if dist_sum:
wc = (1 - dist / dist_sum) * dist_weights[i]['count'][j]
else:
wc = dist_weights[i]['count'][j]
weighted_counts.append(wc)
if is_iterable(centroid):
values = list(centroid)
else:
values = centroid
centroid_counts.append({'values': values, 'count': int(sum(weighted_counts))})
return (centroid_counts, outliers)
|
avocado
|
positive
|
def generate(package: str) -> str:
module = f'apischema.{package}.methods'
pyx_file_name = ROOT_DIR / 'apischema' / package / 'methods.pyx'
with open(pyx_file_name, 'w') as pyx_file:
pyx = IndentedWriter(pyx_file)
pyx.writeln('cimport cython')
pyx.writeln('from cpython cimport *')
pyx.writelines(import_lines(ROOT_DIR / 'apischema' / package / 'methods.py'))
for cls in module_elements(module, type):
<DeepExtract>
bases = ', '.join((b.__name__ for b in cls.__bases__ if b is not object))
with pyx.write_block(f'cdef class {cls.__name__}({bases}):'):
annotations = cls.__dict__.get('__annotations__', {})
for (name, tp) in get_type_hints(cls).items():
if name in annotations:
pyx.writeln(f'cdef readonly {cython_type(tp, cls.__module__)} {name}')
dispatch = None
if cls.__bases__ == (object,):
if cls.__subclasses__():
pyx.writeln(f'cdef int {DISPATCH_FIELD}')
else:
base_class = cls.__mro__[-2]
dispatch = get_dispatch(base_class)[cls]
for (name, obj) in cls.__dict__.items():
if not name.startswith('_') and name not in annotations and isinstance(obj, FunctionType):
pyx.writeln()
base_method = getattr(base_class, name)
with pyx.write_block(cython_signature('cpdef', base_method)):
args = ', '.join(inspect.signature(base_method).parameters)
pyx.writeln(f'return {cls.__name__}_{name}({args})')
if annotations or dispatch is not None:
pyx.writeln()
init_fields: List[str] = []
if dataclasses.is_dataclass(cls):
init_fields.extend((field.name for field in dataclasses.fields(cls) if field.init))
with pyx.write_block('def __init__(' + ', '.join(['self'] + init_fields) + '):'):
for name in init_fields:
pyx.writeln(f'self.{name} = {name}')
if hasattr(cls, '__post_init__'):
(lines, _) = inspect.getsourcelines(cls.__post_init__)
pyx.writelines(lines[1:])
if dispatch is not None:
pyx.writeln(f'self.{DISPATCH_FIELD} = {dispatch}')
</DeepExtract>
pyx.writeln()
for func in module_elements(module, FunctionType):
if not func.__name__.startswith('Py'):
<DeepExtract>
pyx.writeln(cython_signature('cpdef inline', func))
pyx.writelines(get_body(func))
</DeepExtract>
pyx.writeln()
<DeepExtract>
all_methods = [Method(cls, func) for cls in module_elements(module, type) if cls.__bases__ == (object,) and cls.__subclasses__() for func in cls.__dict__.values() if isinstance(func, FunctionType) and (not func.__name__.startswith('_'))]
methods_by_name = {method.name: method for method in all_methods}
assert len(methods_by_name) == len(all_methods), 'method substitution requires unique method names'
methods = methods_by_name
</DeepExtract>
for method in methods.values():
<DeepExtract>
for (cls, dispatch) in get_dispatch(method.base_class).items():
if method.name in cls.__dict__:
sub_method = cls.__dict__[method.name]
with pyx.write_block(cython_signature('cdef inline', sub_method, cls)):
pyx.writelines(get_body(sub_method, cls))
pyx.writeln()
</DeepExtract>
for method in methods.values():
<DeepExtract>
with pyx.write_block(cython_signature('cdef inline', method.function, method.base_class)):
pyx.writeln(f'cdef int {DISPATCH_FIELD} = self.{DISPATCH_FIELD}')
for (cls, dispatch) in get_dispatch(method.base_class).items():
if method.name in cls.__dict__:
if_ = 'if' if dispatch == 0 else 'elif'
with pyx.write_block(f'{if_} {DISPATCH_FIELD} == {dispatch}:'):
(self, *params) = inspect.signature(method.function).parameters
args = ', '.join([f'<{cls.__name__}>{self}', *params])
pyx.writeln(f'return {method_name(cls, method.name)}({args})')
</DeepExtract>
pyx.writeln()
return str(pyx_file_name)
|
def generate(package: str) -> str:
module = f'apischema.{package}.methods'
pyx_file_name = ROOT_DIR / 'apischema' / package / 'methods.pyx'
with open(pyx_file_name, 'w') as pyx_file:
pyx = IndentedWriter(pyx_file)
pyx.writeln('cimport cython')
pyx.writeln('from cpython cimport *')
pyx.writelines(import_lines(ROOT_DIR / 'apischema' / package / 'methods.py'))
for cls in module_elements(module, type):
bases = ', '.join((b.__name__ for b in cls.__bases__ if b is not object))
with pyx.write_block(f'cdef class {cls.__name__}({bases}):'):
annotations = cls.__dict__.get('__annotations__', {})
for (name, tp) in get_type_hints(cls).items():
if name in annotations:
pyx.writeln(f'cdef readonly {cython_type(tp, cls.__module__)} {name}')
dispatch = None
if cls.__bases__ == (object,):
if cls.__subclasses__():
pyx.writeln(f'cdef int {DISPATCH_FIELD}')
else:
base_class = cls.__mro__[-2]
dispatch = get_dispatch(base_class)[cls]
for (name, obj) in cls.__dict__.items():
if not name.startswith('_') and name not in annotations and isinstance(obj, FunctionType):
pyx.writeln()
base_method = getattr(base_class, name)
with pyx.write_block(cython_signature('cpdef', base_method)):
args = ', '.join(inspect.signature(base_method).parameters)
pyx.writeln(f'return {cls.__name__}_{name}({args})')
if annotations or dispatch is not None:
pyx.writeln()
init_fields: List[str] = []
if dataclasses.is_dataclass(cls):
init_fields.extend((field.name for field in dataclasses.fields(cls) if field.init))
with pyx.write_block('def __init__(' + ', '.join(['self'] + init_fields) + '):'):
for name in init_fields:
pyx.writeln(f'self.{name} = {name}')
if hasattr(cls, '__post_init__'):
(lines, _) = inspect.getsourcelines(cls.__post_init__)
pyx.writelines(lines[1:])
if dispatch is not None:
pyx.writeln(f'self.{DISPATCH_FIELD} = {dispatch}')
pyx.writeln()
for func in module_elements(module, FunctionType):
if not func.__name__.startswith('Py'):
pyx.writeln(cython_signature('cpdef inline', func))
pyx.writelines(get_body(func))
pyx.writeln()
all_methods = [Method(cls, func) for cls in module_elements(module, type) if cls.__bases__ == (object,) and cls.__subclasses__() for func in cls.__dict__.values() if isinstance(func, FunctionType) and (not func.__name__.startswith('_'))]
methods_by_name = {method.name: method for method in all_methods}
assert len(methods_by_name) == len(all_methods), 'method substitution requires unique method names'
methods = methods_by_name
for method in methods.values():
for (cls, dispatch) in get_dispatch(method.base_class).items():
if method.name in cls.__dict__:
sub_method = cls.__dict__[method.name]
with pyx.write_block(cython_signature('cdef inline', sub_method, cls)):
pyx.writelines(get_body(sub_method, cls))
pyx.writeln()
for method in methods.values():
with pyx.write_block(cython_signature('cdef inline', method.function, method.base_class)):
pyx.writeln(f'cdef int {DISPATCH_FIELD} = self.{DISPATCH_FIELD}')
for (cls, dispatch) in get_dispatch(method.base_class).items():
if method.name in cls.__dict__:
if_ = 'if' if dispatch == 0 else 'elif'
with pyx.write_block(f'{if_} {DISPATCH_FIELD} == {dispatch}:'):
(self, *params) = inspect.signature(method.function).parameters
args = ', '.join([f'<{cls.__name__}>{self}', *params])
pyx.writeln(f'return {method_name(cls, method.name)}({args})')
pyx.writeln()
return str(pyx_file_name)
|
apischema
|
positive
|
def cycle(self) -> bool:
"""Run a cycle of Job dispatch. Returns True if worker should continue; False if time to exit."""
<DeepExtract>
(done, error) = ([], [])
for (id, retcode) in self.check_retcodes():
if retcode is None:
continue
elif retcode == 0:
done.append(id)
logger.debug(f'Job {id} WORKER_DONE')
self.cleanup_proc(id)
else:
logger.info(f'Job {id} WORKER_ERROR')
status = self.handle_error(id, retcode)
if status != 'running':
(retcode, tail) = cast(Tuple[int, str], status)
error.append((id, retcode, tail))
(done_ids, errors) = (done, error)
</DeepExtract>
<DeepExtract>
started_ids = []
for (id, job_spec) in self.runnable_cache.items():
try:
node_spec = self.node_manager.assign_from_params(**job_spec, num_nodes=1, ranks_per_node=1)
except InsufficientResources:
continue
else:
self.job_specs[id] = job_spec
self.node_specs[id] = node_spec
self.start_times[id] = time.time()
self.retry_counts[id] = 1
self.launch_run(id)
started_ids.append(id)
self.runnable_cache = {k: v for (k, v) in self.runnable_cache.items() if k not in started_ids}
started_ids = started_ids
</DeepExtract>
request_num_jobs = max(0, self.num_prefetch_jobs - len(self.runnable_cache))
msg = {'source': self.hostname, 'started': started_ids, 'done': done_ids, 'error': errors, 'request_num_jobs': request_num_jobs}
self.socket.send_json(msg)
logger.debug('Worker awaiting response...')
response_msg = self.socket.recv_json()
logger.debug('Worker response received')
if response_msg.get('exit'):
logger.info(f'Worker {self.hostname} received exit message: break')
return False
if response_msg.get('new_jobs'):
self.runnable_cache.update({job['id']: job for job in response_msg['new_jobs']})
logger.debug(f'{self.hostname} fraction available: {self.node_manager.aggregate_free_nodes()} [{len(self.runnable_cache)} additional prefetched jobs in cache]')
return True
|
def cycle(self) -> bool:
"""Run a cycle of Job dispatch. Returns True if worker should continue; False if time to exit."""
(done, error) = ([], [])
for (id, retcode) in self.check_retcodes():
if retcode is None:
continue
elif retcode == 0:
done.append(id)
logger.debug(f'Job {id} WORKER_DONE')
self.cleanup_proc(id)
else:
logger.info(f'Job {id} WORKER_ERROR')
status = self.handle_error(id, retcode)
if status != 'running':
(retcode, tail) = cast(Tuple[int, str], status)
error.append((id, retcode, tail))
(done_ids, errors) = (done, error)
started_ids = []
for (id, job_spec) in self.runnable_cache.items():
try:
node_spec = self.node_manager.assign_from_params(**job_spec, num_nodes=1, ranks_per_node=1)
except InsufficientResources:
continue
else:
self.job_specs[id] = job_spec
self.node_specs[id] = node_spec
self.start_times[id] = time.time()
self.retry_counts[id] = 1
self.launch_run(id)
started_ids.append(id)
self.runnable_cache = {k: v for (k, v) in self.runnable_cache.items() if k not in started_ids}
started_ids = started_ids
request_num_jobs = max(0, self.num_prefetch_jobs - len(self.runnable_cache))
msg = {'source': self.hostname, 'started': started_ids, 'done': done_ids, 'error': errors, 'request_num_jobs': request_num_jobs}
self.socket.send_json(msg)
logger.debug('Worker awaiting response...')
response_msg = self.socket.recv_json()
logger.debug('Worker response received')
if response_msg.get('exit'):
logger.info(f'Worker {self.hostname} received exit message: break')
return False
if response_msg.get('new_jobs'):
self.runnable_cache.update({job['id']: job for job in response_msg['new_jobs']})
logger.debug(f'{self.hostname} fraction available: {self.node_manager.aggregate_free_nodes()} [{len(self.runnable_cache)} additional prefetched jobs in cache]')
return True
|
balsam
|
positive
|
@pytest.fixture
def b_mock(b, network_validators):
<DeepExtract>
def validator_set(height):
validators = []
for (public_key, power) in network_validators.items():
validators.append({'public_key': {'type': 'ed25519-base64', 'value': public_key}, 'voting_power': power})
b.get_validators = validators
b.get_validators = validator_set
</DeepExtract>
return b
|
@pytest.fixture
def b_mock(b, network_validators):
def validator_set(height):
validators = []
for (public_key, power) in network_validators.items():
validators.append({'public_key': {'type': 'ed25519-base64', 'value': public_key}, 'voting_power': power})
b.get_validators = validators
b.get_validators = validator_set
return b
|
bigchaindb
|
positive
|
def send_choke(self):
if self.partial_message:
self.send_choke_queued = True
else:
<DeepExtract>
if DEBUG2:
if CHOKE:
print((self.ccount, 'SENDING MESSAGE', ord(CHOKE[0]), len(CHOKE)))
else:
print((self.ccount, 'SENDING MESSAGE', -1, 0))
CHOKE = len(CHOKE).to_bytes(4, 'big') + CHOKE
if self.partial_message:
self.outqueue.append(CHOKE)
else:
self.connection.send_message_raw(CHOKE)
</DeepExtract>
self.upload.choke_sent()
self.just_unchoked = 0
|
def send_choke(self):
if self.partial_message:
self.send_choke_queued = True
else:
if DEBUG2:
if CHOKE:
print((self.ccount, 'SENDING MESSAGE', ord(CHOKE[0]), len(CHOKE)))
else:
print((self.ccount, 'SENDING MESSAGE', -1, 0))
CHOKE = len(CHOKE).to_bytes(4, 'big') + CHOKE
if self.partial_message:
self.outqueue.append(CHOKE)
else:
self.connection.send_message_raw(CHOKE)
self.upload.choke_sent()
self.just_unchoked = 0
|
BitTornado
|
positive
|
def main(args):
logger = logging.getLogger(__name__)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
cfg_orig = load_cfg(yaml.dump(cfg))
im = cv2.imread(args.im_file)
if args.rpn_pkl is not None:
<DeepExtract>
cfg.immutable(False)
merge_cfg_from_file(args.rpn_cfg)
cfg.NUM_GPUS = 1
cfg.MODEL.RPN_ONLY = True
cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.rpn_pkl)
with c2_utils.NamedCudaScope(0):
(boxes, scores) = rpn_engine.im_proposals(model, im)
(proposal_boxes, _proposal_scores) = (boxes, scores)
</DeepExtract>
workspace.ResetWorkspace()
else:
proposal_boxes = None
(cls_boxes, cls_segms, cls_keyps) = (None, None, None)
for i in range(0, len(args.models_to_run), 2):
pkl = args.models_to_run[i]
yml = args.models_to_run[i + 1]
cfg.immutable(False)
merge_cfg_from_cfg(cfg_orig)
merge_cfg_from_file(yml)
if len(pkl) > 0:
weights_file = pkl
else:
weights_file = cfg.TEST.WEIGHTS
cfg.NUM_GPUS = 1
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(weights_file)
with c2_utils.NamedCudaScope(0):
(cls_boxes_, cls_segms_, cls_keyps_) = model_engine.im_detect_all(model, im, proposal_boxes)
cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
workspace.ResetWorkspace()
out_name = os.path.join(args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf'))
logger.info('Processing {} -> {}'.format(args.im_file, out_name))
vis_utils.vis_one_image(im[:, :, ::-1], args.im_file, args.output_dir, cls_boxes, cls_segms, cls_keyps, dataset=dummy_coco_dataset, box_alpha=0.3, show_class=True, thresh=0.7, kp_thresh=2)
|
def main(args):
logger = logging.getLogger(__name__)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
cfg_orig = load_cfg(yaml.dump(cfg))
im = cv2.imread(args.im_file)
if args.rpn_pkl is not None:
cfg.immutable(False)
merge_cfg_from_file(args.rpn_cfg)
cfg.NUM_GPUS = 1
cfg.MODEL.RPN_ONLY = True
cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.rpn_pkl)
with c2_utils.NamedCudaScope(0):
(boxes, scores) = rpn_engine.im_proposals(model, im)
(proposal_boxes, _proposal_scores) = (boxes, scores)
workspace.ResetWorkspace()
else:
proposal_boxes = None
(cls_boxes, cls_segms, cls_keyps) = (None, None, None)
for i in range(0, len(args.models_to_run), 2):
pkl = args.models_to_run[i]
yml = args.models_to_run[i + 1]
cfg.immutable(False)
merge_cfg_from_cfg(cfg_orig)
merge_cfg_from_file(yml)
if len(pkl) > 0:
weights_file = pkl
else:
weights_file = cfg.TEST.WEIGHTS
cfg.NUM_GPUS = 1
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(weights_file)
with c2_utils.NamedCudaScope(0):
(cls_boxes_, cls_segms_, cls_keyps_) = model_engine.im_detect_all(model, im, proposal_boxes)
cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
workspace.ResetWorkspace()
out_name = os.path.join(args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf'))
logger.info('Processing {} -> {}'.format(args.im_file, out_name))
vis_utils.vis_one_image(im[:, :, ::-1], args.im_file, args.output_dir, cls_boxes, cls_segms, cls_keyps, dataset=dummy_coco_dataset, box_alpha=0.3, show_class=True, thresh=0.7, kp_thresh=2)
|
CBNet
|
positive
|
def get_latest_valid_bindings():
<DeepExtract>
version_str = open(os.path.join(DIR, 'VERSION')).read().strip()
major_minor_version = semvar(version_str).version[:2]
major_minor_version_str = '.'.join((str(vx) for vx in major_minor_version))
(_, _, major_minor) = (version_str, major_minor_version, major_minor_version_str)
</DeepExtract>
aws_bucket = 'deepdrive'
aws_bucket_url = 'https://s3-us-west-1.amazonaws.com/' + aws_bucket
from boto.s3.connection import S3Connection
prefix = f'validated-bindings-versions/{major_minor}'
conn = S3Connection(anon=True)
bucket = conn.get_bucket('deepdrive')
bucket_search_str = prefix
bindings_versions = list(bucket.list(bucket_search_str))
if not bindings_versions:
raise RuntimeError('Could not find a bindings version matching %s in bucket %s' % (bucket_search_str, aws_bucket_url))
bindings_versions = [b.name.split('/')[1] for b in bindings_versions]
bindings_versions = sorted(bindings_versions)
ret = bindings_versions[-1]
return ret
|
def get_latest_valid_bindings():
version_str = open(os.path.join(DIR, 'VERSION')).read().strip()
major_minor_version = semvar(version_str).version[:2]
major_minor_version_str = '.'.join((str(vx) for vx in major_minor_version))
(_, _, major_minor) = (version_str, major_minor_version, major_minor_version_str)
aws_bucket = 'deepdrive'
aws_bucket_url = 'https://s3-us-west-1.amazonaws.com/' + aws_bucket
from boto.s3.connection import S3Connection
prefix = f'validated-bindings-versions/{major_minor}'
conn = S3Connection(anon=True)
bucket = conn.get_bucket('deepdrive')
bucket_search_str = prefix
bindings_versions = list(bucket.list(bucket_search_str))
if not bindings_versions:
raise RuntimeError('Could not find a bindings version matching %s in bucket %s' % (bucket_search_str, aws_bucket_url))
bindings_versions = [b.name.split('/')[1] for b in bindings_versions]
bindings_versions = sorted(bindings_versions)
ret = bindings_versions[-1]
return ret
|
deepdrive
|
positive
|
def __init__(self, messages: Optional[Union[str, List[str]]]=None, response: Optional[GenResponse]=None, verbs: Optional[Iterable[str]]=None) -> None:
self.data = []
self.response = None
self.verbs = None
<DeepExtract>
verbs_ = set(verbs or [])
known_options = ['method+url']
if messages is None:
messages = []
if isinstance(messages, str):
messages = [messages]
assert isinstance(messages, list)
assert not verbs_.difference(known_options)
data = None
if response is not None and 'json' in response.headers.get('Content-Type', '') and response.text:
data = json.loads(response.text)
if data:
data_0 = data[0]
if 'errorCode' in data_0:
subreq = ''
if 'referenceId' in data_0:
subreq = ' (in subrequest {!r})'.format(data_0['referenceId'])
messages = [data_0['errorCode'] + subreq] + messages
if data_0.get('fields'):
messages.append('FIELDS: {}'.format(data_0['fields']))
if len(data) > 1:
messages.append('MORE_ERRORS ({})'.format(len(data)))
if 'method+url' in verbs_:
assert response is not None and response.request.url
method = response.request.method
url = response.request.url
if len(url) > 100:
url = url[:100] + '...'
data_info = ''
if method in ('POST', 'PATCH') and (not response.request.body or 'json' not in response.request.headers.get('content-type', '')):
data_info = ' (without json request data)'
messages.append('in {} "{}"{}'.format(method, url, data_info))
separ = '\n '
messages = [x.replace('\n', separ) for x in messages]
message = separ.join(messages)
if self:
self.data = data
self.response = response
self.verbs = verbs_
message = message
</DeepExtract>
super().__init__(message)
|
def __init__(self, messages: Optional[Union[str, List[str]]]=None, response: Optional[GenResponse]=None, verbs: Optional[Iterable[str]]=None) -> None:
self.data = []
self.response = None
self.verbs = None
verbs_ = set(verbs or [])
known_options = ['method+url']
if messages is None:
messages = []
if isinstance(messages, str):
messages = [messages]
assert isinstance(messages, list)
assert not verbs_.difference(known_options)
data = None
if response is not None and 'json' in response.headers.get('Content-Type', '') and response.text:
data = json.loads(response.text)
if data:
data_0 = data[0]
if 'errorCode' in data_0:
subreq = ''
if 'referenceId' in data_0:
subreq = ' (in subrequest {!r})'.format(data_0['referenceId'])
messages = [data_0['errorCode'] + subreq] + messages
if data_0.get('fields'):
messages.append('FIELDS: {}'.format(data_0['fields']))
if len(data) > 1:
messages.append('MORE_ERRORS ({})'.format(len(data)))
if 'method+url' in verbs_:
assert response is not None and response.request.url
method = response.request.method
url = response.request.url
if len(url) > 100:
url = url[:100] + '...'
data_info = ''
if method in ('POST', 'PATCH') and (not response.request.body or 'json' not in response.request.headers.get('content-type', '')):
data_info = ' (without json request data)'
messages.append('in {} "{}"{}'.format(method, url, data_info))
separ = '\n '
messages = [x.replace('\n', separ) for x in messages]
message = separ.join(messages)
if self:
self.data = data
self.response = response
self.verbs = verbs_
message = message
super().__init__(message)
|
django-salesforce
|
positive
|
def _create_matrix_of_one_qubit_gate_circuit(self, gate, ctx, matrix_of_gate):
n_qubits = ctx.n_qubits
<DeepExtract>
gate.target_iter(n_qubits) = [idx for idx in gate.target_iter(n_qubits)]
gates = []
for idx in range(n_qubits):
if idx in gate.target_iter(n_qubits):
gates.append(matrix_of_gate)
else:
gates.append(eye(2))
m = reduce(TensorProduct, reversed(gates))
</DeepExtract>
ctx.matrix_of_circuit = m * ctx.matrix_of_circuit
return ctx
|
def _create_matrix_of_one_qubit_gate_circuit(self, gate, ctx, matrix_of_gate):
n_qubits = ctx.n_qubits
gate.target_iter(n_qubits) = [idx for idx in gate.target_iter(n_qubits)]
gates = []
for idx in range(n_qubits):
if idx in gate.target_iter(n_qubits):
gates.append(matrix_of_gate)
else:
gates.append(eye(2))
m = reduce(TensorProduct, reversed(gates))
ctx.matrix_of_circuit = m * ctx.matrix_of_circuit
return ctx
|
Blueqat
|
positive
|
def _generate_uniform(self, num_samples=10):
(self.container, self.sampled_params) = ({}, {})
values = []
for (var_index, var_name) in enumerate(self.var_names):
<DeepExtract>
if self.param_dict['variables'][var_name]['type'] == 'float':
sampled_values = np.random.uniform(low=0, high=1, size=(self.param_dict['variables'][var_name]['size'], num_samples))
else:
raise NotImplementedError
</DeepExtract>
values.extend(sampled_values)
self.container[var_name] = sampled_values
values = np.array(values)
self.proposed = values.transpose()
|
def _generate_uniform(self, num_samples=10):
(self.container, self.sampled_params) = ({}, {})
values = []
for (var_index, var_name) in enumerate(self.var_names):
if self.param_dict['variables'][var_name]['type'] == 'float':
sampled_values = np.random.uniform(low=0, high=1, size=(self.param_dict['variables'][var_name]['size'], num_samples))
else:
raise NotImplementedError
values.extend(sampled_values)
self.container[var_name] = sampled_values
values = np.array(values)
self.proposed = values.transpose()
|
ChemOS
|
positive
|
def delete(self, chart):
<DeepExtract>
if not self.has_permissions('helmsman.delete_chart', chart):
self.raise_no_permissions('helmsman.delete_chart')
</DeepExtract>
HelmClient().releases.delete(chart.namespace, chart.id)
|
def delete(self, chart):
if not self.has_permissions('helmsman.delete_chart', chart):
self.raise_no_permissions('helmsman.delete_chart')
HelmClient().releases.delete(chart.namespace, chart.id)
|
cloudman
|
positive
|
def norm(p):
"""
Normalize the path
"""
if p.startswith(UNC_PREFIX) or p.startswith(to_posix(UNC_PREFIX)):
p = p.strip(UNC_PREFIX).strip(to_posix(UNC_PREFIX))
<DeepExtract>
p = p.replace(ntpath.sep, posixpath.sep)
</DeepExtract>
p = p.strip(posixpath.sep)
p = posixpath.normpath(p)
return p
|
def norm(p):
"""
Normalize the path
"""
if p.startswith(UNC_PREFIX) or p.startswith(to_posix(UNC_PREFIX)):
p = p.strip(UNC_PREFIX).strip(to_posix(UNC_PREFIX))
p = p.replace(ntpath.sep, posixpath.sep)
p = p.strip(posixpath.sep)
p = posixpath.normpath(p)
return p
|
aboutcode-toolkit
|
positive
|
def _sample_par_sim_pairs(n_samples, n_samples_per_param):
"""
Not for end use; please use `sample_par_sim_pairs`.
Samples (parameter, simulation) pairs from the prior distribution from the model distribution. Specifically,
parameter values are sampled from the prior and used to generate the specified number of simulations per
parameter value. This returns arrays.
This is an helper function called by the main `sample_par_sim_pair` one
in order to split drawing from the prior in chunks to avoid parallelization issues with MPI.
Parameters
----------
n_samples: integer
Number of samples to generate
n_samples_per_param: integer
Number of data points in each simulated data set.
Returns
-------
tuple
A tuple of numpy.ndarray's containing parameter and simulation values. The first element of the tuple is an
array with shape (n_samples, d_theta), where d_theta is the dimension of the parameters. The second element
of the tuple is an array with shape (n_samples, n_samples_per_param, d_x), where d_x is the dimension of
each simulation.
"""
self.n_samples_per_param = n_samples_per_param
self.accepted_parameters_manager.broadcast(self.backend, 1)
<DeepExtract>
seed_arr = self.rng.randint(0, np.iinfo(np.uint32).max, size=n_samples, dtype=np.uint32)
sorted_seed_arr = np.sort(seed_arr)
indices = sorted_seed_arr[:-1] == sorted_seed_arr[1:]
if np.sum(indices) > 0:
sorted_seed_arr[:-1][indices] = sorted_seed_arr[:-1][indices] + 1
rng_arr = np.array([np.random.RandomState(seed) for seed in sorted_seed_arr])
rng_pds = self.backend.parallelize(rng_arr)
rng_pds = rng_pds
</DeepExtract>
parameters_simulations_pds = self.backend.map(self._sample_parameter_simulation, rng_pds)
parameters_simulations = self.backend.collect(parameters_simulations_pds)
(parameters, simulations) = [list(t) for t in zip(*parameters_simulations)]
parameters = np.array(parameters)
simulations = np.array(simulations)
parameters = parameters.reshape((parameters.shape[0], parameters.shape[1]))
simulations = simulations.reshape((simulations.shape[0], simulations.shape[2], simulations.shape[3]))
return (parameters, simulations)
|
def _sample_par_sim_pairs(n_samples, n_samples_per_param):
"""
Not for end use; please use `sample_par_sim_pairs`.
Samples (parameter, simulation) pairs from the prior distribution from the model distribution. Specifically,
parameter values are sampled from the prior and used to generate the specified number of simulations per
parameter value. This returns arrays.
This is an helper function called by the main `sample_par_sim_pair` one
in order to split drawing from the prior in chunks to avoid parallelization issues with MPI.
Parameters
----------
n_samples: integer
Number of samples to generate
n_samples_per_param: integer
Number of data points in each simulated data set.
Returns
-------
tuple
A tuple of numpy.ndarray's containing parameter and simulation values. The first element of the tuple is an
array with shape (n_samples, d_theta), where d_theta is the dimension of the parameters. The second element
of the tuple is an array with shape (n_samples, n_samples_per_param, d_x), where d_x is the dimension of
each simulation.
"""
self.n_samples_per_param = n_samples_per_param
self.accepted_parameters_manager.broadcast(self.backend, 1)
seed_arr = self.rng.randint(0, np.iinfo(np.uint32).max, size=n_samples, dtype=np.uint32)
sorted_seed_arr = np.sort(seed_arr)
indices = sorted_seed_arr[:-1] == sorted_seed_arr[1:]
if np.sum(indices) > 0:
sorted_seed_arr[:-1][indices] = sorted_seed_arr[:-1][indices] + 1
rng_arr = np.array([np.random.RandomState(seed) for seed in sorted_seed_arr])
rng_pds = self.backend.parallelize(rng_arr)
rng_pds = rng_pds
parameters_simulations_pds = self.backend.map(self._sample_parameter_simulation, rng_pds)
parameters_simulations = self.backend.collect(parameters_simulations_pds)
(parameters, simulations) = [list(t) for t in zip(*parameters_simulations)]
parameters = np.array(parameters)
simulations = np.array(simulations)
parameters = parameters.reshape((parameters.shape[0], parameters.shape[1]))
simulations = simulations.reshape((simulations.shape[0], simulations.shape[2], simulations.shape[3]))
return (parameters, simulations)
|
abcpy
|
positive
|
def get_available_name(self, name, max_length=None):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
This file storage solves overwrite on upload problem. Another
proposed solution was to override the save method on the model
like so (from https://code.djangoproject.com/ticket/11663):
def save(self, *args, **kwargs):
try:
this = MyModelName.objects.get(id=self.id)
if this.MyImageFieldName != self.MyImageFieldName:
this.MyImageFieldName.delete()
except: pass
super(MyModelName, self).save(*args, **kwargs)
"""
if self.exists(name):
<DeepExtract>
return self._dispatch(name, 'delete')
</DeepExtract>
if max_length is not None and len(name) > max_length:
return name[:max_length]
else:
return name
|
def get_available_name(self, name, max_length=None):
"""Returns a filename that's free on the target storage system, and
available for new content to be written to.
Found at http://djangosnippets.org/snippets/976/
This file storage solves overwrite on upload problem. Another
proposed solution was to override the save method on the model
like so (from https://code.djangoproject.com/ticket/11663):
def save(self, *args, **kwargs):
try:
this = MyModelName.objects.get(id=self.id)
if this.MyImageFieldName != self.MyImageFieldName:
this.MyImageFieldName.delete()
except: pass
super(MyModelName, self).save(*args, **kwargs)
"""
if self.exists(name):
return self._dispatch(name, 'delete')
if max_length is not None and len(name) > max_length:
return name[:max_length]
else:
return name
|
anytask
|
positive
|
def add_transform_to_server(self, server_name, transform_id):
<DeepExtract>
pass
</DeepExtract>
if transform_id not in self._servers[server_name]['transforms']:
self._servers[server_name]['transforms'].append(transform_id)
|
def add_transform_to_server(self, server_name, transform_id):
pass
if transform_id not in self._servers[server_name]['transforms']:
self._servers[server_name]['transforms'].append(transform_id)
|
canari3
|
positive
|
def is_palindrome_permutation(phrase):
"""checks if a string is a permutation of a palindrome"""
table = [0 for _ in range(ord('z') - ord('a') + 1)]
countodd = 0
for c in phrase:
<DeepExtract>
a = ord('a')
z = ord('z')
upper_a = ord('A')
upper_z = ord('Z')
val = ord(c)
if a <= val <= z:
x = val - a
if upper_a <= val <= upper_z:
x = val - upper_a
x = -1
</DeepExtract>
if x != -1:
table[x] += 1
if table[x] % 2:
countodd += 1
else:
countodd -= 1
return countodd <= 1
|
def is_palindrome_permutation(phrase):
"""checks if a string is a permutation of a palindrome"""
table = [0 for _ in range(ord('z') - ord('a') + 1)]
countodd = 0
for c in phrase:
a = ord('a')
z = ord('z')
upper_a = ord('A')
upper_z = ord('Z')
val = ord(c)
if a <= val <= z:
x = val - a
if upper_a <= val <= upper_z:
x = val - upper_a
x = -1
if x != -1:
table[x] += 1
if table[x] % 2:
countodd += 1
else:
countodd -= 1
return countodd <= 1
|
CtCI-6th-Edition-Python
|
positive
|
def ev_f_verifyerror(self, l_filename, msg, foundok):
if not foundok:
if self.config.list & LISTBAD:
<DeepExtract>
self.stdout.write(self.perhaps_showpath(l_filename) + self.config.listsep)
</DeepExtract>
<DeepExtract>
if self.config.verbose >= -1:
self.stdout.flush()
self.stderr.write('%s : %s' % (self.perhaps_showpath(l_filename), msg) + nl)
</DeepExtract>
|
def ev_f_verifyerror(self, l_filename, msg, foundok):
if not foundok:
if self.config.list & LISTBAD:
self.stdout.write(self.perhaps_showpath(l_filename) + self.config.listsep)
if self.config.verbose >= -1:
self.stdout.flush()
self.stderr.write('%s : %s' % (self.perhaps_showpath(l_filename), msg) + nl)
</DeepExtract>
|
cfv
|
positive
|
def prepare(self):
<DeepExtract>
self.W = self.create_weight(self.input_dim, self.output_dim, initializer=self.initializer)
self.register_parameters(self.W)
if self.disable_bias:
self.B = T.constant(0, dtype=FLOATX)
elif self.random_bias:
self.B = self.create_weight(initializer=self.initializer, shape=(self.output_dim,))
self.register_parameters(self.B)
else:
self.B = self.create_bias(self.output_dim, 'B')
self.register_parameters(self.B)
</DeepExtract>
<DeepExtract>
from deepy.tensor.activations import get_activation
self._activation = get_activation(self.activation)
</DeepExtract>
|
def prepare(self):
self.W = self.create_weight(self.input_dim, self.output_dim, initializer=self.initializer)
self.register_parameters(self.W)
if self.disable_bias:
self.B = T.constant(0, dtype=FLOATX)
elif self.random_bias:
self.B = self.create_weight(initializer=self.initializer, shape=(self.output_dim,))
self.register_parameters(self.B)
else:
self.B = self.create_bias(self.output_dim, 'B')
self.register_parameters(self.B)
from deepy.tensor.activations import get_activation
self._activation = get_activation(self.activation)
</DeepExtract>
|
deepy
|
positive
|
def get_void_shape(obj):
import Part
void = None
if obj.Void == 'Rectangular':
if obj.Addition == 'None':
<DeepExtract>
import Part
void = Part.makeBox(obj.OpeningWidth.Value, obj.HostThickness.Value + 50, obj.OpeningHeight.Value)
void.Placement.Base.x -= obj.OpeningWidth.Value / 2
void.Placement.Base.y -= obj.HostThickness.Value / 2
void.Placement = obj.Placement.multiply(void.Placement)
void = void
</DeepExtract>
if obj.Addition == 'Default Sill':
<DeepExtract>
import Part
void = Part.makeBox(obj.OpeningWidth.Value, obj.HostThickness.Value + 50, obj.OpeningHeight.Value)
void.Placement.Base.x -= obj.OpeningWidth.Value / 2
void.Placement.Base.y -= obj.HostThickness.Value / 2
void.Placement = obj.Placement.multiply(void.Placement)
void = void
</DeepExtract>
if obj.VoidSubtractAll:
ps = []
for s in obj.Shape.Solids:
ps.append(s.copy())
void = void.fuse(ps)
return void
|
def get_void_shape(obj):
import Part
void = None
if obj.Void == 'Rectangular':
if obj.Addition == 'None':
import Part
void = Part.makeBox(obj.OpeningWidth.Value, obj.HostThickness.Value + 50, obj.OpeningHeight.Value)
void.Placement.Base.x -= obj.OpeningWidth.Value / 2
void.Placement.Base.y -= obj.HostThickness.Value / 2
void.Placement = obj.Placement.multiply(void.Placement)
void = void
if obj.Addition == 'Default Sill':
import Part
void = Part.makeBox(obj.OpeningWidth.Value, obj.HostThickness.Value + 50, obj.OpeningHeight.Value)
void.Placement.Base.x -= obj.OpeningWidth.Value / 2
void.Placement.Base.y -= obj.HostThickness.Value / 2
void.Placement = obj.Placement.multiply(void.Placement)
void = void
if obj.VoidSubtractAll:
ps = []
for s in obj.Shape.Solids:
ps.append(s.copy())
void = void.fuse(ps)
return void
|
BIM_Workbench
|
positive
|
def _runAll(dirname):
dirname = self.__pm.mapPath(dirname)
if os.path.isdir(dirname):
<DeepExtract>
print('--- Installed packages ---')
for self.__pkgName in self._psort(list(self.__installed[0].keys())):
ins = self.getInstalledVersion()
new = 0
if self.__pkgName in self.__dists[self.__rc.distname] and self.__ballTarget in self.__dists[self.__rc.distname][self.__pkgName]:
new = self.getVersion()
s = '{0:<19} {1:<15}'.format(self.__pkgName, self._versionToString(ins))
if new and new != ins:
s += '({0})'.format(self._versionToString(new))
print(s)
</DeepExtract>
for filename in os.listdir(dirname):
if os.path.splitext(filename)[1] in ['.sh', '.dash', '.bat', '.cmd']:
lst.append(filename)
<DeepExtract>
print('--- Installed packages ---')
for self.__pkgName in self._psort(list(self.__installed[0].keys())):
ins = self.getInstalledVersion()
new = 0
if self.__pkgName in self.__dists[self.__rc.distname] and self.__ballTarget in self.__dists[self.__rc.distname][self.__pkgName]:
new = self.getVersion()
s = '{0:<19} {1:<15}'.format(self.__pkgName, self._versionToString(ins))
if new and new != ins:
s += '({0})'.format(self._versionToString(new))
print(s)
</DeepExtract>
<DeepExtract>
print('--- Installed packages ---')
for self.__pkgName in self._psort(list(self.__installed[0].keys())):
ins = self.getInstalledVersion()
new = 0
if self.__pkgName in self.__dists[self.__rc.distname] and self.__ballTarget in self.__dists[self.__rc.distname][self.__pkgName]:
new = self.getVersion()
s = '{0:<19} {1:<15}'.format(self.__pkgName, self._versionToString(ins))
if new and new != ins:
s += '({0})'.format(self._versionToString(new))
print(s)
</DeepExtract>
for filename in lst:
if filename[:3] in ['0p_', 'zp_']:
perpetualScripts.append(filename)
else:
regularScripts.append(filename)
perpetualScripts.sort()
lst = perpetualScripts + regularScripts
for i in lst:
<DeepExtract>
mapped_file = self.__pm.mapPath('{0}/{1}'.format(dirname, i))
mapped_file_done = mapped_file + '.done'
if os.path.isfile(mapped_file):
sys.stderr.write('running: {0}\n'.format('{0}/{1}'.format(dirname, i)))
cmd = ['bash'] + self.SH_OPTIONS + [mapped_file]
if not self.__cygwinPlatform:
cmd[0] = self.__dosBash
cwd = None
extension = os.path.splitext(mapped_file)[1]
if '.dash' == extension:
cmd = ['dash'] + self.DASH_OPTIONS + [mapped_file]
if not self.__cygwinPlatform:
cmd[0] = self.__dosDash
if extension in ['.bat', '.cmd']:
cmd = ['cmd'] + self.CMD_OPTIONS + [os.path.basename(mapped_file)]
cwd = os.path.dirname(mapped_file)
retval = Process(cmd, cwd).run(True)
if os.path.exists(mapped_file_done):
os.remove(mapped_file_done)
if retval == 0 and os.path.basename('{0}/{1}'.format(dirname, i))[:3] not in ['0p_', 'zp_']:
shutil.move(mapped_file, mapped_file_done)
elif not optional:
sys.stderr.write("{0}: WARNING couldn't find {1}.\n".format(self.__appName, mapped_file))
</DeepExtract>
|
def _runAll(dirname):
dirname = self.__pm.mapPath(dirname)
if os.path.isdir(dirname):
print('--- Installed packages ---')
for self.__pkgName in self._psort(list(self.__installed[0].keys())):
ins = self.getInstalledVersion()
new = 0
if self.__pkgName in self.__dists[self.__rc.distname] and self.__ballTarget in self.__dists[self.__rc.distname][self.__pkgName]:
new = self.getVersion()
s = '{0:<19} {1:<15}'.format(self.__pkgName, self._versionToString(ins))
if new and new != ins:
s += '({0})'.format(self._versionToString(new))
print(s)
for filename in os.listdir(dirname):
if os.path.splitext(filename)[1] in ['.sh', '.dash', '.bat', '.cmd']:
lst.append(filename)
print('--- Installed packages ---')
for self.__pkgName in self._psort(list(self.__installed[0].keys())):
ins = self.getInstalledVersion()
new = 0
if self.__pkgName in self.__dists[self.__rc.distname] and self.__ballTarget in self.__dists[self.__rc.distname][self.__pkgName]:
new = self.getVersion()
s = '{0:<19} {1:<15}'.format(self.__pkgName, self._versionToString(ins))
if new and new != ins:
s += '({0})'.format(self._versionToString(new))
print(s)
print('--- Installed packages ---')
for self.__pkgName in self._psort(list(self.__installed[0].keys())):
ins = self.getInstalledVersion()
new = 0
if self.__pkgName in self.__dists[self.__rc.distname] and self.__ballTarget in self.__dists[self.__rc.distname][self.__pkgName]:
new = self.getVersion()
s = '{0:<19} {1:<15}'.format(self.__pkgName, self._versionToString(ins))
if new and new != ins:
s += '({0})'.format(self._versionToString(new))
print(s)
for filename in lst:
if filename[:3] in ['0p_', 'zp_']:
perpetualScripts.append(filename)
else:
regularScripts.append(filename)
perpetualScripts.sort()
lst = perpetualScripts + regularScripts
for i in lst:
mapped_file = self.__pm.mapPath('{0}/{1}'.format(dirname, i))
mapped_file_done = mapped_file + '.done'
if os.path.isfile(mapped_file):
sys.stderr.write('running: {0}\n'.format('{0}/{1}'.format(dirname, i)))
cmd = ['bash'] + self.SH_OPTIONS + [mapped_file]
if not self.__cygwinPlatform:
cmd[0] = self.__dosBash
cwd = None
extension = os.path.splitext(mapped_file)[1]
if '.dash' == extension:
cmd = ['dash'] + self.DASH_OPTIONS + [mapped_file]
if not self.__cygwinPlatform:
cmd[0] = self.__dosDash
if extension in ['.bat', '.cmd']:
cmd = ['cmd'] + self.CMD_OPTIONS + [os.path.basename(mapped_file)]
cwd = os.path.dirname(mapped_file)
retval = Process(cmd, cwd).run(True)
if os.path.exists(mapped_file_done):
os.remove(mapped_file_done)
if retval == 0 and os.path.basename('{0}/{1}'.format(dirname, i))[:3] not in ['0p_', 'zp_']:
shutil.move(mapped_file, mapped_file_done)
elif not optional:
sys.stderr.write("{0}: WARNING couldn't find {1}.\n".format(self.__appName, mapped_file))
</DeepExtract>
|
cyg-apt
|
positive
|
def init(self):
self.trainModel = None
if self.in_path != None:
self.lib.setInPath(ctypes.create_string_buffer(self.in_path.encode(), len(self.in_path) * 2))
self.lib.setBern(self.bern)
self.lib.setWorkThreads(self.workThreads)
self.lib.randReset()
self.lib.importTrainFiles()
self.relTotal = self.lib.getRelationTotal()
self.entTotal = self.lib.getEntityTotal()
self.trainTotal = self.lib.getTrainTotal()
self.testTotal = self.lib.getTestTotal()
self.validTotal = self.lib.getValidTotal()
self.batch_size = int(self.lib.getTrainTotal() / self.nbatches)
self.batch_seq_size = self.batch_size * (1 + self.negative_ent + self.negative_rel)
self.batch_h = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.int64)
self.batch_t = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.int64)
self.batch_r = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.int64)
self.batch_y = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.float32)
self.batch_h_addr = self.batch_h.__array_interface__['data'][0]
self.batch_t_addr = self.batch_t.__array_interface__['data'][0]
self.batch_r_addr = self.batch_r.__array_interface__['data'][0]
self.batch_y_addr = self.batch_y.__array_interface__['data'][0]
if self.test_link_prediction:
<DeepExtract>
self.lib.importTestFiles()
self.lib.importTypeFiles()
self.test_h = np.zeros(self.lib.getEntityTotal(), dtype=np.int64)
self.test_t = np.zeros(self.lib.getEntityTotal(), dtype=np.int64)
self.test_r = np.zeros(self.lib.getEntityTotal(), dtype=np.int64)
self.test_h_addr = self.test_h.__array_interface__['data'][0]
self.test_t_addr = self.test_t.__array_interface__['data'][0]
self.test_r_addr = self.test_r.__array_interface__['data'][0]
</DeepExtract>
if self.test_triple_classification:
<DeepExtract>
self.lib.importTestFiles()
self.lib.importTypeFiles()
self.test_pos_h = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_pos_t = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_pos_r = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_neg_h = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_neg_t = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_neg_r = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_pos_h_addr = self.test_pos_h.__array_interface__['data'][0]
self.test_pos_t_addr = self.test_pos_t.__array_interface__['data'][0]
self.test_pos_r_addr = self.test_pos_r.__array_interface__['data'][0]
self.test_neg_h_addr = self.test_neg_h.__array_interface__['data'][0]
self.test_neg_t_addr = self.test_neg_t.__array_interface__['data'][0]
self.test_neg_r_addr = self.test_neg_r.__array_interface__['data'][0]
self.valid_pos_h = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_pos_t = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_pos_r = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_neg_h = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_neg_t = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_neg_r = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_pos_h_addr = self.valid_pos_h.__array_interface__['data'][0]
self.valid_pos_t_addr = self.valid_pos_t.__array_interface__['data'][0]
self.valid_pos_r_addr = self.valid_pos_r.__array_interface__['data'][0]
self.valid_neg_h_addr = self.valid_neg_h.__array_interface__['data'][0]
self.valid_neg_t_addr = self.valid_neg_t.__array_interface__['data'][0]
self.valid_neg_r_addr = self.valid_neg_r.__array_interface__['data'][0]
self.relThresh = np.zeros(self.lib.getRelationTotal(), dtype=np.float32)
self.relThresh_addr = self.relThresh.__array_interface__['data'][0]
</DeepExtract>
|
def init(self):
self.trainModel = None
if self.in_path != None:
self.lib.setInPath(ctypes.create_string_buffer(self.in_path.encode(), len(self.in_path) * 2))
self.lib.setBern(self.bern)
self.lib.setWorkThreads(self.workThreads)
self.lib.randReset()
self.lib.importTrainFiles()
self.relTotal = self.lib.getRelationTotal()
self.entTotal = self.lib.getEntityTotal()
self.trainTotal = self.lib.getTrainTotal()
self.testTotal = self.lib.getTestTotal()
self.validTotal = self.lib.getValidTotal()
self.batch_size = int(self.lib.getTrainTotal() / self.nbatches)
self.batch_seq_size = self.batch_size * (1 + self.negative_ent + self.negative_rel)
self.batch_h = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.int64)
self.batch_t = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.int64)
self.batch_r = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.int64)
self.batch_y = np.zeros(self.batch_size * (1 + self.negative_ent + self.negative_rel), dtype=np.float32)
self.batch_h_addr = self.batch_h.__array_interface__['data'][0]
self.batch_t_addr = self.batch_t.__array_interface__['data'][0]
self.batch_r_addr = self.batch_r.__array_interface__['data'][0]
self.batch_y_addr = self.batch_y.__array_interface__['data'][0]
if self.test_link_prediction:
self.lib.importTestFiles()
self.lib.importTypeFiles()
self.test_h = np.zeros(self.lib.getEntityTotal(), dtype=np.int64)
self.test_t = np.zeros(self.lib.getEntityTotal(), dtype=np.int64)
self.test_r = np.zeros(self.lib.getEntityTotal(), dtype=np.int64)
self.test_h_addr = self.test_h.__array_interface__['data'][0]
self.test_t_addr = self.test_t.__array_interface__['data'][0]
self.test_r_addr = self.test_r.__array_interface__['data'][0]
if self.test_triple_classification:
self.lib.importTestFiles()
self.lib.importTypeFiles()
self.test_pos_h = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_pos_t = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_pos_r = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_neg_h = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_neg_t = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_neg_r = np.zeros(self.lib.getTestTotal(), dtype=np.int64)
self.test_pos_h_addr = self.test_pos_h.__array_interface__['data'][0]
self.test_pos_t_addr = self.test_pos_t.__array_interface__['data'][0]
self.test_pos_r_addr = self.test_pos_r.__array_interface__['data'][0]
self.test_neg_h_addr = self.test_neg_h.__array_interface__['data'][0]
self.test_neg_t_addr = self.test_neg_t.__array_interface__['data'][0]
self.test_neg_r_addr = self.test_neg_r.__array_interface__['data'][0]
self.valid_pos_h = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_pos_t = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_pos_r = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_neg_h = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_neg_t = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_neg_r = np.zeros(self.lib.getValidTotal(), dtype=np.int64)
self.valid_pos_h_addr = self.valid_pos_h.__array_interface__['data'][0]
self.valid_pos_t_addr = self.valid_pos_t.__array_interface__['data'][0]
self.valid_pos_r_addr = self.valid_pos_r.__array_interface__['data'][0]
self.valid_neg_h_addr = self.valid_neg_h.__array_interface__['data'][0]
self.valid_neg_t_addr = self.valid_neg_t.__array_interface__['data'][0]
self.valid_neg_r_addr = self.valid_neg_r.__array_interface__['data'][0]
self.relThresh = np.zeros(self.lib.getRelationTotal(), dtype=np.float32)
self.relThresh_addr = self.relThresh.__array_interface__['data'][0]
</DeepExtract>
|
CPL
|
positive
|
def train(train_loader, model, criterion, optimizer, epoch, args):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
start = time.time()
for (i, (image, target)) in enumerate(train_loader):
if epoch < args.warmup:
<DeepExtract>
overall_steps = args.warmup * len(train_loader)
current_steps = epoch * len(train_loader) + i + 1
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p['lr'] = lr
</DeepExtract>
image = image.cuda()
target = target.cuda()
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print('Epoch: [{0}][{1}/{2}]\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAccuracy {top1.val:.3f} ({top1.avg:.3f})\tTime {3:.2f}'.format(epoch, i, len(train_loader), end - start, loss=losses, top1=top1))
start = time.time()
print('train_accuracy {top1.avg:.3f}'.format(top1=top1))
return top1.avg
|
def train(train_loader, model, criterion, optimizer, epoch, args):
losses = AverageMeter()
top1 = AverageMeter()
model.train()
start = time.time()
for (i, (image, target)) in enumerate(train_loader):
if epoch < args.warmup:
overall_steps = args.warmup * len(train_loader)
current_steps = epoch * len(train_loader) + i + 1
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p['lr'] = lr
image = image.cuda()
target = target.cuda()
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print('Epoch: [{0}][{1}/{2}]\tLoss {loss.val:.4f} ({loss.avg:.4f})\tAccuracy {top1.val:.3f} ({top1.avg:.3f})\tTime {3:.2f}'.format(epoch, i, len(train_loader), end - start, loss=losses, top1=top1))
start = time.time()
print('train_accuracy {top1.avg:.3f}'.format(top1=top1))
return top1.avg
|
CV_LTH_Pre-training
|
positive
|
def wait_pid_file(self, pid_file, timeout=None):
""" wait some seconds for the pid file to appear and return the pid """
timeout = int(timeout or DefaultTimeoutStartSec / 2)
timeout = max(timeout, MinimumTimeoutStartSec)
dirpath = os.path.dirname(os.path.abspath(pid_file))
for x in xrange(timeout):
if not os.path.isdir(dirpath):
time.sleep(1)
continue
<DeepExtract>
pid = default
if not pid_file:
pid = default
if not os.path.isfile(pid_file):
pid = default
if self.truncate_old(pid_file):
pid = default
try:
for line in open(pid_file):
if line.strip():
pid = to_intN(line.strip())
break
except Exception as e:
logg.warning("bad read of pid file '%s': %s", pid_file, e)
pid = pid
</DeepExtract>
if not pid:
time.sleep(1)
continue
if not pid_exists(pid):
time.sleep(1)
continue
return pid
return None
|
def wait_pid_file(self, pid_file, timeout=None):
""" wait some seconds for the pid file to appear and return the pid """
timeout = int(timeout or DefaultTimeoutStartSec / 2)
timeout = max(timeout, MinimumTimeoutStartSec)
dirpath = os.path.dirname(os.path.abspath(pid_file))
for x in xrange(timeout):
if not os.path.isdir(dirpath):
time.sleep(1)
continue
pid = default
if not pid_file:
pid = default
if not os.path.isfile(pid_file):
pid = default
if self.truncate_old(pid_file):
pid = default
try:
for line in open(pid_file):
if line.strip():
pid = to_intN(line.strip())
break
except Exception as e:
logg.warning("bad read of pid file '%s': %s", pid_file, e)
pid = pid
if not pid:
time.sleep(1)
continue
if not pid_exists(pid):
time.sleep(1)
continue
return pid
return None
|
docker-systemctl-images
|
positive
|
def __floordiv__(self, other):
<DeepExtract>
a = Amount(amount=self['amount'], asset=self['asset'].copy(), graphene_instance=self.graphene)
</DeepExtract>
if isinstance(other, Amount):
from .price import Price
return Price(self, other)
else:
a['amount'] //= other
return a
|
def __floordiv__(self, other):
a = Amount(amount=self['amount'], asset=self['asset'].copy(), graphene_instance=self.graphene)
if isinstance(other, Amount):
from .price import Price
return Price(self, other)
else:
a['amount'] //= other
return a
|
CocosFactory
|
positive
|
def has_type(self, pyname):
"""Tells if a Python type is registered.
Note that it doesn't mean there are C++ types associated with it.
Args:
pyname: str, the name of the Python type. It can be a dotted name.
Returns:
bool, True if any C++ types are registered, False if not.
"""
parts = pyname.split('.')
<DeepExtract>
assert '.' not in parts[0]
current = self._current_scope
while current:
if current.has_nested_type(parts[0]):
break
current = current.parent
current = current
</DeepExtract>
if not current:
return False
for name in parts:
if not current.has_nested_type(name):
return False
current = current.get_nested_type(name)
return True
|
def has_type(self, pyname):
"""Tells if a Python type is registered.
Note that it doesn't mean there are C++ types associated with it.
Args:
pyname: str, the name of the Python type. It can be a dotted name.
Returns:
bool, True if any C++ types are registered, False if not.
"""
parts = pyname.split('.')
assert '.' not in parts[0]
current = self._current_scope
while current:
if current.has_nested_type(parts[0]):
break
current = current.parent
current = current
if not current:
return False
for name in parts:
if not current.has_nested_type(name):
return False
current = current.get_nested_type(name)
return True
|
clif
|
positive
|
def edit(self):
for i in range(19):
<DeepExtract>
if len(self.scene.mask_points[i]) > 0:
for (idx, pt) in enumerate(self.scene.mask_points[i]):
cv2.line(self.mask_m, pt['prev'], pt['curr'], (i, i, i), self.scene.size_points[i][idx])
self.mask_m = self.mask_m
</DeepExtract>
params = get_params(self.opt, (512, 512))
transform_mask = get_transform(self.opt, params, method=Image.NEAREST, normalize=False, normalize_mask=True)
transform_image = get_transform(self.opt, params)
mask = self.mask.copy()
mask_m = self.mask_m.copy()
mask = transform_mask(Image.fromarray(np.uint8(mask)))
mask_m = transform_mask(Image.fromarray(np.uint8(mask_m)))
img = transform_image(self.img)
start_t = time.time()
generated = model.inference(torch.FloatTensor([mask_m.numpy()]), torch.FloatTensor([mask.numpy()]), torch.FloatTensor([img.numpy()]))
end_t = time.time()
print('inference time : {}'.format(end_t - start_t))
result = generated.permute(0, 2, 3, 1)
result = result.detach().cpu().numpy()
result = (result + 1) * 127.5
result = np.asarray(result[0, :, :, :], dtype=np.uint8)
qim = QImage(result.data, result.shape[1], result.shape[0], result.strides[0], QImage.Format_RGB888)
result_name = 'temp/ref_result.png'
img_pil = ImageQt.fromqpixmap(qim)
img_pil.save(result_name)
(pimg, bgPIL, _, _) = evaluate_one(self.mask_net, img_path=result_name)
datasetAP = GetUpdatedAPdrawDataset(self.AP_opt, result_name, bgPIL)
apd_pil = CallAPdrawModel(self.AP_model, datasetAP)
apd_qt = ImageQt.toqpixmap(apd_pil)
if len(self.result_scene.items()) > 0:
self.result_scene.removeItem(self.result_scene.items()[-1])
self.result_scene.addPixmap(apd_qt)
img_pixmap = ImageQt.toqpixmap(img_pil)
if len(self.ref_scene.items()) > 0:
self.ref_scene.removeItem(self.ref_scene.items()[-1])
self.ref_scene.addPixmap(img_pixmap)
|
def edit(self):
for i in range(19):
if len(self.scene.mask_points[i]) > 0:
for (idx, pt) in enumerate(self.scene.mask_points[i]):
cv2.line(self.mask_m, pt['prev'], pt['curr'], (i, i, i), self.scene.size_points[i][idx])
self.mask_m = self.mask_m
params = get_params(self.opt, (512, 512))
transform_mask = get_transform(self.opt, params, method=Image.NEAREST, normalize=False, normalize_mask=True)
transform_image = get_transform(self.opt, params)
mask = self.mask.copy()
mask_m = self.mask_m.copy()
mask = transform_mask(Image.fromarray(np.uint8(mask)))
mask_m = transform_mask(Image.fromarray(np.uint8(mask_m)))
img = transform_image(self.img)
start_t = time.time()
generated = model.inference(torch.FloatTensor([mask_m.numpy()]), torch.FloatTensor([mask.numpy()]), torch.FloatTensor([img.numpy()]))
end_t = time.time()
print('inference time : {}'.format(end_t - start_t))
result = generated.permute(0, 2, 3, 1)
result = result.detach().cpu().numpy()
result = (result + 1) * 127.5
result = np.asarray(result[0, :, :, :], dtype=np.uint8)
qim = QImage(result.data, result.shape[1], result.shape[0], result.strides[0], QImage.Format_RGB888)
result_name = 'temp/ref_result.png'
img_pil = ImageQt.fromqpixmap(qim)
img_pil.save(result_name)
(pimg, bgPIL, _, _) = evaluate_one(self.mask_net, img_path=result_name)
datasetAP = GetUpdatedAPdrawDataset(self.AP_opt, result_name, bgPIL)
apd_pil = CallAPdrawModel(self.AP_model, datasetAP)
apd_qt = ImageQt.toqpixmap(apd_pil)
if len(self.result_scene.items()) > 0:
self.result_scene.removeItem(self.result_scene.items()[-1])
self.result_scene.addPixmap(apd_qt)
img_pixmap = ImageQt.toqpixmap(img_pil)
if len(self.ref_scene.items()) > 0:
self.ref_scene.removeItem(self.ref_scene.items()[-1])
self.ref_scene.addPixmap(img_pixmap)
|
dualFace
|
positive
|
def visit_With(self, node):
<DeepExtract>
self.new_lines = max(self.new_lines, n)
</DeepExtract>
<DeepExtract>
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append('with ')
</DeepExtract>
<DeepExtract>
f = self.get_visitor(node.context_expr)
if f is not None:
return f(node.context_expr)
return self.generic_visit(node.context_expr)
</DeepExtract>
if node.optional_vars is not None:
<DeepExtract>
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(' as ')
</DeepExtract>
<DeepExtract>
f = self.get_visitor(node.optional_vars)
if f is not None:
return f(node.optional_vars)
return self.generic_visit(node.optional_vars)
</DeepExtract>
<DeepExtract>
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(':')
</DeepExtract>
<DeepExtract>
self.new_line = True
self.indentation += 1
for stmt in node.body:
self.visit(stmt)
self.indentation -= 1
</DeepExtract>
|
def visit_With(self, node):
self.new_lines = max(self.new_lines, n)
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append('with ')
f = self.get_visitor(node.context_expr)
if f is not None:
return f(node.context_expr)
return self.generic_visit(node.context_expr)
if node.optional_vars is not None:
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(' as ')
f = self.get_visitor(node.optional_vars)
if f is not None:
return f(node.optional_vars)
return self.generic_visit(node.optional_vars)
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(':')
self.new_line = True
self.indentation += 1
for stmt in node.body:
self.visit(stmt)
self.indentation -= 1
</DeepExtract>
|
atsf4g-co
|
positive
|
def _find_and_split_before(self, before_base1):
if before_base1 is not None and before_base1 <= 0:
raise Exception('chunk index should be 1-based')
chunk = None
prev_chunk = None
next_chunk = None
bases_visited = 0
<DeepExtract>
prev_chunk = None
next_chunk = None
chunk = None
bases_visited = None
if before_base1 is not None:
q = self.fragment_chunk_location_set.filter(base_first__lte=before_base1, base_last__gte=before_base1)
q = list(q)
if len(q) > 0:
fc = q[0]
chunk = fc.chunk
bases_visited = fc.base_last
next_chunk = fc.next_chunk
if fc.base_first == 1:
prev_chunk = None
else:
prev_chunk = fc.prev_fragment_chunk.chunk
if chunk is None:
total_bases = self.length
if total_bases > 0:
prev_chunk = self.fragment_chunk_location_set.get(base_last=total_bases).chunk
bases_visited = total_bases
(prev_chunk, chunk, next_chunk, bases_visited) = (prev_chunk, chunk, next_chunk, bases_visited)
</DeepExtract>
if before_base1 is not None and bases_visited >= before_base1:
chunk_len = chunk.length
if bases_visited - chunk_len + 1 == before_base1:
return (prev_chunk, chunk)
first_bp_in_chunk = bases_visited - chunk_len + 1
bps_to_split = before_base1 - first_bp_in_chunk
original_annotations = self.fragment_chunk(chunk).annotations()
<DeepExtract>
candidates = chunk.fragment_chunk_location_set.filter(fragment__fragment_index__fresh=True).values_list('fragment_id').distinct()
index_to_update = [row[0] for row in candidates]
if chunk.is_sequence_based:
chunk_sequence = chunk.get_sequence()
s1 = chunk_sequence[0:bps_to_split]
s2 = chunk_sequence[bps_to_split:]
split2 = self._add_chunk(s2, chunk.initial_fragment)
self.__reset_chunk_sequence(chunk, s1)
elif chunk.is_reference_based:
split_start = chunk.ref_start_index + bps_to_split
split2 = self._add_reference_chunk(split_start, chunk.ref_end_index, chunk.initial_fragment)
self.__reset_chunk_reference(chunk, chunk.ref_start_index, split_start - 1)
Edge.objects.filter(from_chunk=chunk).update(from_chunk=split2)
assert chunk.out_edges.count() == 0
self._add_edges(chunk, Edge(from_chunk=chunk, fragment=chunk.initial_fragment, to_chunk=split2))
entries = []
for fcl in chunk.fragment_chunk_location_set.filter(fragment_id__in=index_to_update):
entries.append(Fragment_Chunk_Location(fragment_id=fcl.fragment_id, chunk_id=split2.id, base_first=fcl.base_first + bps_to_split, base_last=fcl.base_last))
Fragment_Chunk_Location.bulk_create(entries)
chunk.fragment_chunk_location_set.filter(fragment_id__in=index_to_update).update(base_last=F('base_first') + bps_to_split - 1)
split2 = split2
</DeepExtract>
chunk = chunk.reload()
<DeepExtract>
cfs = []
for a in original_annotations:
if a.feature.strand > 0:
a1 = (a.feature, a.feature_base_first, a.feature_base_first + bps_to_split - 1)
a2 = (a.feature, a.feature_base_first + bps_to_split, a.feature_base_last)
else:
a1 = (a.feature, a.feature_base_last - bps_to_split + 1, a.feature_base_last)
a2 = (a.feature, a.feature_base_first, a.feature_base_last - bps_to_split)
cfs.append(self._create_chunk_annotation(chunk, *a1))
cfs.append(self._create_chunk_annotation(split2, *a2))
Chunk_Feature.bulk_create(cfs)
</DeepExtract>
return (chunk, split2)
else:
return (prev_chunk, None)
|
def _find_and_split_before(self, before_base1):
if before_base1 is not None and before_base1 <= 0:
raise Exception('chunk index should be 1-based')
chunk = None
prev_chunk = None
next_chunk = None
bases_visited = 0
prev_chunk = None
next_chunk = None
chunk = None
bases_visited = None
if before_base1 is not None:
q = self.fragment_chunk_location_set.filter(base_first__lte=before_base1, base_last__gte=before_base1)
q = list(q)
if len(q) > 0:
fc = q[0]
chunk = fc.chunk
bases_visited = fc.base_last
next_chunk = fc.next_chunk
if fc.base_first == 1:
prev_chunk = None
else:
prev_chunk = fc.prev_fragment_chunk.chunk
if chunk is None:
total_bases = self.length
if total_bases > 0:
prev_chunk = self.fragment_chunk_location_set.get(base_last=total_bases).chunk
bases_visited = total_bases
(prev_chunk, chunk, next_chunk, bases_visited) = (prev_chunk, chunk, next_chunk, bases_visited)
if before_base1 is not None and bases_visited >= before_base1:
chunk_len = chunk.length
if bases_visited - chunk_len + 1 == before_base1:
return (prev_chunk, chunk)
first_bp_in_chunk = bases_visited - chunk_len + 1
bps_to_split = before_base1 - first_bp_in_chunk
original_annotations = self.fragment_chunk(chunk).annotations()
candidates = chunk.fragment_chunk_location_set.filter(fragment__fragment_index__fresh=True).values_list('fragment_id').distinct()
index_to_update = [row[0] for row in candidates]
if chunk.is_sequence_based:
chunk_sequence = chunk.get_sequence()
s1 = chunk_sequence[0:bps_to_split]
s2 = chunk_sequence[bps_to_split:]
split2 = self._add_chunk(s2, chunk.initial_fragment)
self.__reset_chunk_sequence(chunk, s1)
elif chunk.is_reference_based:
split_start = chunk.ref_start_index + bps_to_split
split2 = self._add_reference_chunk(split_start, chunk.ref_end_index, chunk.initial_fragment)
self.__reset_chunk_reference(chunk, chunk.ref_start_index, split_start - 1)
Edge.objects.filter(from_chunk=chunk).update(from_chunk=split2)
assert chunk.out_edges.count() == 0
self._add_edges(chunk, Edge(from_chunk=chunk, fragment=chunk.initial_fragment, to_chunk=split2))
entries = []
for fcl in chunk.fragment_chunk_location_set.filter(fragment_id__in=index_to_update):
entries.append(Fragment_Chunk_Location(fragment_id=fcl.fragment_id, chunk_id=split2.id, base_first=fcl.base_first + bps_to_split, base_last=fcl.base_last))
Fragment_Chunk_Location.bulk_create(entries)
chunk.fragment_chunk_location_set.filter(fragment_id__in=index_to_update).update(base_last=F('base_first') + bps_to_split - 1)
split2 = split2
chunk = chunk.reload()
cfs = []
for a in original_annotations:
if a.feature.strand > 0:
a1 = (a.feature, a.feature_base_first, a.feature_base_first + bps_to_split - 1)
a2 = (a.feature, a.feature_base_first + bps_to_split, a.feature_base_last)
else:
a1 = (a.feature, a.feature_base_last - bps_to_split + 1, a.feature_base_last)
a2 = (a.feature, a.feature_base_first, a.feature_base_last - bps_to_split)
cfs.append(self._create_chunk_annotation(chunk, *a1))
cfs.append(self._create_chunk_annotation(split2, *a2))
Chunk_Feature.bulk_create(cfs)
return (chunk, split2)
else:
return (prev_chunk, None)
|
edge
|
positive
|
@patch('smtplib.SMTP', autospec=True)
def test_jurisdiction_admin_reuse_code(mock_smtp, client: FlaskClient, ja_email: str):
config.LOGIN_CODE_LIFETIME = timedelta(seconds=1)
rv = post_json(client, '/auth/jurisdictionadmin/code', dict(email=ja_email))
assert_ok(rv)
<DeepExtract>
message = mock_smtp.return_value.send_message.call_args.args[0]
code = parse_login_code(message.get_body('plain').get_content())
</DeepExtract>
rv = post_json(client, '/auth/jurisdictionadmin/code', dict(email=ja_email))
assert_ok(rv)
assert parse_login_code_from_smtp(mock_smtp) == code
time.sleep(1.0)
rv = post_json(client, '/auth/jurisdictionadmin/code', dict(email=ja_email))
assert_ok(rv)
assert parse_login_code_from_smtp(mock_smtp) != code
|
@patch('smtplib.SMTP', autospec=True)
def test_jurisdiction_admin_reuse_code(mock_smtp, client: FlaskClient, ja_email: str):
config.LOGIN_CODE_LIFETIME = timedelta(seconds=1)
rv = post_json(client, '/auth/jurisdictionadmin/code', dict(email=ja_email))
assert_ok(rv)
message = mock_smtp.return_value.send_message.call_args.args[0]
code = parse_login_code(message.get_body('plain').get_content())
rv = post_json(client, '/auth/jurisdictionadmin/code', dict(email=ja_email))
assert_ok(rv)
assert parse_login_code_from_smtp(mock_smtp) == code
time.sleep(1.0)
rv = post_json(client, '/auth/jurisdictionadmin/code', dict(email=ja_email))
assert_ok(rv)
assert parse_login_code_from_smtp(mock_smtp) != code
|
arlo
|
positive
|
def depart_topic_title(self, node):
<DeepExtract>
if isinstance('\\par', six.string_types):
'\\par' = ['\\par']
for c in '\\par':
if c:
self.context.append(c)
self.last_output_char = c[-1]
</DeepExtract>
self.src_sp()
self.sp()
|
def depart_topic_title(self, node):
if isinstance('\\par', six.string_types):
'\\par' = ['\\par']
for c in '\\par':
if c:
self.context.append(c)
self.last_output_char = c[-1]
self.src_sp()
self.sp()
|
ebookmaker
|
positive
|
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
<DeepExtract>
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
loss = loss
elif reduction_enum == 1:
loss = loss.mean()
elif reduction_enum == 2:
loss = loss.sum()
</DeepExtract>
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
|
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
loss = loss
elif reduction_enum == 1:
loss = loss.mean()
elif reduction_enum == 2:
loss = loss.sum()
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
|
ACSL
|
positive
|
def post(self, request, *args, **kwargs):
if self.dotpath() and '_delete' in self.request.POST:
return self.delete_subobject()
<DeepExtract>
form_class = self.form_class
</DeepExtract>
<DeepExtract>
form = form_class(**self.get_form_kwargs())
</DeepExtract>
<DeepExtract>
formsets_cls = self.admin.get_formsets(self.request, obj=self.get_active_object(), view=self)
prefixes = {}
formsets = list()
for (FormSet, inline) in zip(formsets_cls, self.admin.get_inline_instances()):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = '%s-%s' % (prefix, prefixes[prefix])
kwargs = self.get_formset_kwargs()
kwargs['prefix'] = prefix
if self.dotpath():
kwargs['dotpath'] = self.dotpath() + '.' + inline.dotpath
else:
kwargs['dotpath'] = inline.dotpath
formset = FormSet(**kwargs)
formsets.append(formset)
formsets = formsets
</DeepExtract>
if not form.is_valid():
return self.form_invalid(form)
for formset in formsets:
if not formset.is_valid():
return self.form_invalid(form)
obj = form.save(commit=False)
for formset in formsets:
formset.save(instance=obj)
obj._tempinfo.user = request.user
if obj._tempinfo.number_of_changes is None:
obj._tempinfo.number_of_changes = 0
if not self.next_dotpath():
obj._tempinfo.number_of_changes += 1
obj.save()
assert obj._meta.collection == self.temp_document._meta.collection
params = {'_tempdoc': obj.get_id()}
if self.basepath():
<DeepExtract>
params['_basepath'] = self.request.GET.get('_basepath', None)
</DeepExtract>
if self.next_dotpath():
<DeepExtract>
token = '[fragment]'
for key in self.request.POST.iterkeys():
if key.startswith(token):
info = dict(parse_qsl(key[len(token):]))
if info:
info = info
info = {}
</DeepExtract>
<DeepExtract>
dotpath = self.next_dotpath()
token = '[fragment-passthrough]'
passthrough = dict()
for (key, value) in self.request.POST.iteritems():
if key.startswith(token):
info = dict(parse_qsl(key[len(token):]))
if info and info.pop('next_dotpath', None) == dotpath:
passthrough[info['name']] = value
passthrough = passthrough
</DeepExtract>
params.update({'_dotpath': self.next_dotpath(), '_parent_dotpath': self.dotpath() or ''})
params.update(passthrough)
return HttpResponseRedirect('%s?%s' % (request.path, urlencode(params)))
if self.dotpath():
next_dotpath = None
if self.dotpath() == self.basepath():
del params['_tempdoc']
if obj._meta.collection != self.document._meta.collection:
self.object = obj.commit_changes(self.kwargs.get('pk', None))
else:
self.object = obj
if 'pk' in self.kwargs:
assert str(self.object.pk) == self.kwargs['pk']
if self.temporary_document_id():
self.get_temporary_store().delete()
if self.request.POST.get('_continue', False):
<DeepExtract>
next_dotpath = self.request.GET.get('_dotpath', None)
</DeepExtract>
else:
return self.form_valid(form)
elif self.request.POST.get('_continue', False):
<DeepExtract>
next_dotpath = self.request.GET.get('_dotpath', None)
</DeepExtract>
else:
<DeepExtract>
next_dotpath = self.request.GET.get('_parent_dotpath', None)
</DeepExtract>
if next_dotpath is None:
<DeepExtract>
dotpath = self.request.GET.get('_dotpath', None)
</DeepExtract>
if '.' in dotpath:
next_dotpath = dotpath[:dotpath.rfind('.')]
field = obj.dot_notation_to_field(next_dotpath)
if isinstance(field, ListField):
if '.' in next_dotpath:
next_dotpath = next_dotpath[:next_dotpath.rfind('.')]
else:
next_dotpath = None
if next_dotpath:
params['_dotpath'] = next_dotpath
return HttpResponseRedirect('%s?%s' % (request.path, urlencode(params)))
if obj._meta.collection != self.document._meta.collection:
self.object = obj.commit_changes(self.kwargs.get('pk', None))
else:
self.object = obj
if 'pk' in self.kwargs:
assert str(self.object.pk) == self.kwargs['pk']
if self.temporary_document_id():
self.get_temporary_store().delete()
return self.form_valid(form)
|
def post(self, request, *args, **kwargs):
if self.dotpath() and '_delete' in self.request.POST:
return self.delete_subobject()
form_class = self.form_class
form = form_class(**self.get_form_kwargs())
formsets_cls = self.admin.get_formsets(self.request, obj=self.get_active_object(), view=self)
prefixes = {}
formsets = list()
for (FormSet, inline) in zip(formsets_cls, self.admin.get_inline_instances()):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1:
prefix = '%s-%s' % (prefix, prefixes[prefix])
kwargs = self.get_formset_kwargs()
kwargs['prefix'] = prefix
if self.dotpath():
kwargs['dotpath'] = self.dotpath() + '.' + inline.dotpath
else:
kwargs['dotpath'] = inline.dotpath
formset = FormSet(**kwargs)
formsets.append(formset)
formsets = formsets
if not form.is_valid():
return self.form_invalid(form)
for formset in formsets:
if not formset.is_valid():
return self.form_invalid(form)
obj = form.save(commit=False)
for formset in formsets:
formset.save(instance=obj)
obj._tempinfo.user = request.user
if obj._tempinfo.number_of_changes is None:
obj._tempinfo.number_of_changes = 0
if not self.next_dotpath():
obj._tempinfo.number_of_changes += 1
obj.save()
assert obj._meta.collection == self.temp_document._meta.collection
params = {'_tempdoc': obj.get_id()}
if self.basepath():
params['_basepath'] = self.request.GET.get('_basepath', None)
if self.next_dotpath():
token = '[fragment]'
for key in self.request.POST.iterkeys():
if key.startswith(token):
info = dict(parse_qsl(key[len(token):]))
if info:
info = info
info = {}
dotpath = self.next_dotpath()
token = '[fragment-passthrough]'
passthrough = dict()
for (key, value) in self.request.POST.iteritems():
if key.startswith(token):
info = dict(parse_qsl(key[len(token):]))
if info and info.pop('next_dotpath', None) == dotpath:
passthrough[info['name']] = value
passthrough = passthrough
params.update({'_dotpath': self.next_dotpath(), '_parent_dotpath': self.dotpath() or ''})
params.update(passthrough)
return HttpResponseRedirect('%s?%s' % (request.path, urlencode(params)))
if self.dotpath():
next_dotpath = None
if self.dotpath() == self.basepath():
del params['_tempdoc']
if obj._meta.collection != self.document._meta.collection:
self.object = obj.commit_changes(self.kwargs.get('pk', None))
else:
self.object = obj
if 'pk' in self.kwargs:
assert str(self.object.pk) == self.kwargs['pk']
if self.temporary_document_id():
self.get_temporary_store().delete()
if self.request.POST.get('_continue', False):
next_dotpath = self.request.GET.get('_dotpath', None)
else:
return self.form_valid(form)
elif self.request.POST.get('_continue', False):
next_dotpath = self.request.GET.get('_dotpath', None)
else:
next_dotpath = self.request.GET.get('_parent_dotpath', None)
if next_dotpath is None:
dotpath = self.request.GET.get('_dotpath', None)
if '.' in dotpath:
next_dotpath = dotpath[:dotpath.rfind('.')]
field = obj.dot_notation_to_field(next_dotpath)
if isinstance(field, ListField):
if '.' in next_dotpath:
next_dotpath = next_dotpath[:next_dotpath.rfind('.')]
else:
next_dotpath = None
if next_dotpath:
params['_dotpath'] = next_dotpath
return HttpResponseRedirect('%s?%s' % (request.path, urlencode(params)))
if obj._meta.collection != self.document._meta.collection:
self.object = obj.commit_changes(self.kwargs.get('pk', None))
else:
self.object = obj
if 'pk' in self.kwargs:
assert str(self.object.pk) == self.kwargs['pk']
if self.temporary_document_id():
self.get_temporary_store().delete()
return self.form_valid(form)
|
django-dockit
|
positive
|
def test_verify_active_entry(self):
"""
A user shouldnt be able to verify a timesheet if it contains
an active entry and should be redirect back to the ledger
"""
<DeepExtract>
self.admin = factories.Superuser()
self.login_user(self.admin)
</DeepExtract>
entry1 = factories.Entry(**{'user': self.user, 'start_time': self.now - relativedelta(hours=5), 'end_time': self.now - relativedelta(hours=4), 'status': Entry.UNVERIFIED})
entry2 = factories.Entry(**{'user': self.user, 'start_time': self.now - relativedelta(hours=1), 'status': Entry.UNVERIFIED})
response = self.client.get(self.verify_url(), follow=True)
self.assertEquals(response.status_code, 200)
messages = response.context['messages']
msg = 'You cannot verify/approve this timesheet while the user {0} has an active entry. Please have them close any active entries.'.format(self.user.get_name_or_username())
self.assertEquals(messages._loaded_messages[0].message, msg)
self.assertEquals(entry1.status, Entry.UNVERIFIED)
self.assertEquals(entry2.status, Entry.UNVERIFIED)
response = self.client.post(self.verify_url(), follow=True)
self.assertEquals(response.status_code, 200)
messages = response.context['messages']
self.assertEquals(messages._loaded_messages[0].message, msg)
self.assertEquals(entry1.status, Entry.UNVERIFIED)
self.assertEquals(entry2.status, Entry.UNVERIFIED)
|
def test_verify_active_entry(self):
"""
A user shouldnt be able to verify a timesheet if it contains
an active entry and should be redirect back to the ledger
"""
self.admin = factories.Superuser()
self.login_user(self.admin)
entry1 = factories.Entry(**{'user': self.user, 'start_time': self.now - relativedelta(hours=5), 'end_time': self.now - relativedelta(hours=4), 'status': Entry.UNVERIFIED})
entry2 = factories.Entry(**{'user': self.user, 'start_time': self.now - relativedelta(hours=1), 'status': Entry.UNVERIFIED})
response = self.client.get(self.verify_url(), follow=True)
self.assertEquals(response.status_code, 200)
messages = response.context['messages']
msg = 'You cannot verify/approve this timesheet while the user {0} has an active entry. Please have them close any active entries.'.format(self.user.get_name_or_username())
self.assertEquals(messages._loaded_messages[0].message, msg)
self.assertEquals(entry1.status, Entry.UNVERIFIED)
self.assertEquals(entry2.status, Entry.UNVERIFIED)
response = self.client.post(self.verify_url(), follow=True)
self.assertEquals(response.status_code, 200)
messages = response.context['messages']
self.assertEquals(messages._loaded_messages[0].message, msg)
self.assertEquals(entry1.status, Entry.UNVERIFIED)
self.assertEquals(entry2.status, Entry.UNVERIFIED)
|
django-timepiece
|
positive
|
def prepare_nodes(self, access: GraphAccess, node_cursor: Iterable[Json], model: Model) -> Tuple[GraphUpdate, List[Json], List[Json], List[Json]]:
log.info(f'Prepare nodes for subgraph {access.root()}')
info = GraphUpdate()
resource_inserts: List[Json] = []
resource_updates: List[Json] = []
resource_deletes: List[Json] = []
optional_properties = [*Section.all_ordered, 'refs', 'kinds', 'flat', 'hash']
def insert_node(node: Json) -> None:
<DeepExtract>
reported = node[Section.reported]
if not reported.get('ctime', None):
kind = model[reported]
if isinstance(kind, ComplexKind) and 'ctime' in kind:
reported['ctime'] = access.at_json
elem = self.node_adjuster.adjust(node)
</DeepExtract>
js_doc: Json = {'_key': elem['id'], 'created': access.at_json, 'updated': access.at_json}
for prop in optional_properties:
value = node.get(prop, None)
if value:
js_doc[prop] = value
resource_inserts.append(js_doc)
info.nodes_created += 1
def update_or_delete_node(node: Json) -> None:
key = node['_key']
hash_string = node['hash']
elem = access.node(key)
if elem is None:
resource_deletes.append({'_key': key, 'deleted': access.at_json})
info.nodes_deleted += 1
elif elem['hash'] != hash_string:
adjusted: Json = self.adjust_node(model, elem, node['created'])
js = {'_key': key, 'created': node['created'], 'updated': access.at_json}
for prop in optional_properties:
value = adjusted.get(prop, None)
if value:
js[prop] = value
resource_updates.append(js)
info.nodes_updated += 1
for doc in node_cursor:
<DeepExtract>
key = doc['_key']
hash_string = doc['hash']
elem = access.node(key)
if elem is None:
resource_deletes.append({'_key': key, 'deleted': access.at_json})
info.nodes_deleted += 1
elif elem['hash'] != hash_string:
adjusted: Json = self.adjust_node(model, elem, doc['created'])
js = {'_key': key, 'created': doc['created'], 'updated': access.at_json}
for prop in optional_properties:
value = adjusted.get(prop, None)
if value:
js[prop] = value
resource_updates.append(js)
info.nodes_updated += 1
</DeepExtract>
for not_visited in access.not_visited_nodes():
<DeepExtract>
elem = self.adjust_node(model, not_visited, access.at_json)
js_doc: Json = {'_key': elem['id'], 'created': access.at_json, 'updated': access.at_json}
for prop in optional_properties:
value = not_visited.get(prop, None)
if value:
js_doc[prop] = value
resource_inserts.append(js_doc)
info.nodes_created += 1
</DeepExtract>
return (info, resource_inserts, resource_updates, resource_deletes)
|
def prepare_nodes(self, access: GraphAccess, node_cursor: Iterable[Json], model: Model) -> Tuple[GraphUpdate, List[Json], List[Json], List[Json]]:
log.info(f'Prepare nodes for subgraph {access.root()}')
info = GraphUpdate()
resource_inserts: List[Json] = []
resource_updates: List[Json] = []
resource_deletes: List[Json] = []
optional_properties = [*Section.all_ordered, 'refs', 'kinds', 'flat', 'hash']
def insert_node(node: Json) -> None:
reported = node[Section.reported]
if not reported.get('ctime', None):
kind = model[reported]
if isinstance(kind, ComplexKind) and 'ctime' in kind:
reported['ctime'] = access.at_json
elem = self.node_adjuster.adjust(node)
js_doc: Json = {'_key': elem['id'], 'created': access.at_json, 'updated': access.at_json}
for prop in optional_properties:
value = node.get(prop, None)
if value:
js_doc[prop] = value
resource_inserts.append(js_doc)
info.nodes_created += 1
def update_or_delete_node(node: Json) -> None:
key = node['_key']
hash_string = node['hash']
elem = access.node(key)
if elem is None:
resource_deletes.append({'_key': key, 'deleted': access.at_json})
info.nodes_deleted += 1
elif elem['hash'] != hash_string:
adjusted: Json = self.adjust_node(model, elem, node['created'])
js = {'_key': key, 'created': node['created'], 'updated': access.at_json}
for prop in optional_properties:
value = adjusted.get(prop, None)
if value:
js[prop] = value
resource_updates.append(js)
info.nodes_updated += 1
for doc in node_cursor:
key = doc['_key']
hash_string = doc['hash']
elem = access.node(key)
if elem is None:
resource_deletes.append({'_key': key, 'deleted': access.at_json})
info.nodes_deleted += 1
elif elem['hash'] != hash_string:
adjusted: Json = self.adjust_node(model, elem, doc['created'])
js = {'_key': key, 'created': doc['created'], 'updated': access.at_json}
for prop in optional_properties:
value = adjusted.get(prop, None)
if value:
js[prop] = value
resource_updates.append(js)
info.nodes_updated += 1
for not_visited in access.not_visited_nodes():
elem = self.adjust_node(model, not_visited, access.at_json)
js_doc: Json = {'_key': elem['id'], 'created': access.at_json, 'updated': access.at_json}
for prop in optional_properties:
value = not_visited.get(prop, None)
if value:
js_doc[prop] = value
resource_inserts.append(js_doc)
info.nodes_created += 1
return (info, resource_inserts, resource_updates, resource_deletes)
|
cloudkeeper
|
positive
|
def toc(self, average=True):
<DeepExtract>
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
</DeepExtract>
if average:
return self.average_time
else:
return self.diff
|
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
if average:
return self.average_time
else:
return self.diff
|
Domain-Adaptive-Faster-RCNN-PyTorch
|
positive
|
def is_blurry(image, threshold):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
<DeepExtract>
fm = cv2.Laplacian(gray, cv2.CV_64F).var()
</DeepExtract>
return (fm < threshold, fm)
|
def is_blurry(image, threshold):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = cv2.Laplacian(gray, cv2.CV_64F).var()
return (fm < threshold, fm)
|
DeepFakeTutorial
|
positive
|
def merge_voc(label_root_list, style='intersection', label_major=True):
"""
For intersection: only merges their intersection part;
For union: will merge all of them
merge VOC with multiple datasets (one of them may have partial object labeled)
"""
logging.info('labels root: {}'.format(label_root_list))
filenames = []
if style == 'union':
logging.info('merge with union style.')
for l in label_root_list:
xmls = [os.path.basename(i) for i in glob(os.path.join(l, '*.xml'))]
logging.info('found {} xmls under: {}'.format(len(xmls), l))
filenames.extend(xmls)
else:
logging.info('merge with intersection style.')
for l in label_root_list:
xmls = [os.path.basename(i) for i in glob(os.path.join(l, '*.xml'))]
logging.info('found {} xmls under: {}'.format(len(xmls), l))
if len(filenames) > 0:
filenames = set(xmls) & set(filenames)
filenames = xmls
filenames = list(set(filenames))
logging.info('found {} xmls which exist both inside all provided label roots.'.format(len(filenames)))
target_save_root = './merged_voc_annotations'
os.makedirs(target_save_root, exist_ok=True)
for f in filenames:
n_merge_files = []
for l in label_root_list:
n_merge_files.append(os.path.join(l, f))
<DeepExtract>
f_name = os.path.basename(n_merge_files[0])
n = len(n_merge_files)
for f in n_merge_files:
if os.path.exists(f):
tree = ET.parse(f)
root = tree.getroot()
objs = get(root, 'object')
n_merge_files.remove(f)
break
for xml in n_merge_files:
if os.path.exists(xml):
t = ET.parse(xml)
r = t.getroot()
for obj in get(r, 'object'):
root.append(obj)
tree.write(os.path.join(target_save_root, f_name))
print('merged {} xmls and saved into: {}'.format(n, os.path.join(target_save_root, f_name)))
</DeepExtract>
print('done.')
|
def merge_voc(label_root_list, style='intersection', label_major=True):
"""
For intersection: only merges their intersection part;
For union: will merge all of them
merge VOC with multiple datasets (one of them may have partial object labeled)
"""
logging.info('labels root: {}'.format(label_root_list))
filenames = []
if style == 'union':
logging.info('merge with union style.')
for l in label_root_list:
xmls = [os.path.basename(i) for i in glob(os.path.join(l, '*.xml'))]
logging.info('found {} xmls under: {}'.format(len(xmls), l))
filenames.extend(xmls)
else:
logging.info('merge with intersection style.')
for l in label_root_list:
xmls = [os.path.basename(i) for i in glob(os.path.join(l, '*.xml'))]
logging.info('found {} xmls under: {}'.format(len(xmls), l))
if len(filenames) > 0:
filenames = set(xmls) & set(filenames)
filenames = xmls
filenames = list(set(filenames))
logging.info('found {} xmls which exist both inside all provided label roots.'.format(len(filenames)))
target_save_root = './merged_voc_annotations'
os.makedirs(target_save_root, exist_ok=True)
for f in filenames:
n_merge_files = []
for l in label_root_list:
n_merge_files.append(os.path.join(l, f))
f_name = os.path.basename(n_merge_files[0])
n = len(n_merge_files)
for f in n_merge_files:
if os.path.exists(f):
tree = ET.parse(f)
root = tree.getroot()
objs = get(root, 'object')
n_merge_files.remove(f)
break
for xml in n_merge_files:
if os.path.exists(xml):
t = ET.parse(xml)
r = t.getroot()
for obj in get(r, 'object'):
root.append(obj)
tree.write(os.path.join(target_save_root, f_name))
print('merged {} xmls and saved into: {}'.format(n, os.path.join(target_save_root, f_name)))
print('done.')
|
alfred
|
positive
|
def send_verification_email(request, user, new_email=None, age=None, data=None):
"""Send an email prompting the user to verify their email address."""
if not new_email:
user.email_verifications.all().delete()
<DeepExtract>
verification = EmailVerification.objects.create(user=user, email=email, token=uuid4().hex[:30], expiry=timezone.now() + datetime.timedelta(hours=1), verified=preverified)
</DeepExtract>
if age is not None and age < 13:
message = parentsEmailVerificationNeededEmail(request, user, verification.token)
<DeepExtract>
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
</DeepExtract>
else:
if data is not None and _newsletter_ticked(data):
<DeepExtract>
try:
create_contact(user.first_name, user.last_name, user.email)
add_contact_to_address_book(user.first_name, user.last_name, user.email, DotmailerUserType.STUDENT)
except RequestException:
return HttpResponse(status=404)
</DeepExtract>
message = emailVerificationNeededEmail(request, verification.token)
<DeepExtract>
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
</DeepExtract>
else:
<DeepExtract>
verification = EmailVerification.objects.create(user=user, email=new_email, token=uuid4().hex[:30], expiry=timezone.now() + datetime.timedelta(hours=1), verified=preverified)
</DeepExtract>
message = emailChangeVerificationEmail(request, verification.token)
<DeepExtract>
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
</DeepExtract>
message = emailChangeNotificationEmail(request, new_email)
<DeepExtract>
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
</DeepExtract>
|
def send_verification_email(request, user, new_email=None, age=None, data=None):
"""Send an email prompting the user to verify their email address."""
if not new_email:
user.email_verifications.all().delete()
verification = EmailVerification.objects.create(user=user, email=email, token=uuid4().hex[:30], expiry=timezone.now() + datetime.timedelta(hours=1), verified=preverified)
if age is not None and age < 13:
message = parentsEmailVerificationNeededEmail(request, user, verification.token)
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
else:
if data is not None and _newsletter_ticked(data):
try:
create_contact(user.first_name, user.last_name, user.email)
add_contact_to_address_book(user.first_name, user.last_name, user.email, DotmailerUserType.STUDENT)
except RequestException:
return HttpResponse(status=404)
message = emailVerificationNeededEmail(request, verification.token)
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
else:
verification = EmailVerification.objects.create(user=user, email=new_email, token=uuid4().hex[:30], expiry=timezone.now() + datetime.timedelta(hours=1), verified=preverified)
message = emailChangeVerificationEmail(request, verification.token)
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
message = emailChangeNotificationEmail(request, new_email)
plaintext = loader.get_template(plaintext_template)
html = loader.get_template(html_template)
plaintext_email_context = {'content': message['message']}
html_email_context = {'content': message['message'], 'title': message['subject'], 'url_prefix': domain()}
plaintext_body = plaintext.render(plaintext_email_context)
html_body = html.render(html_email_context)
message = EmailMultiAlternatives(message['subject'], plaintext_body, VERIFICATION_EMAIL, [user.email])
message.attach_alternative(html_body, 'text/html')
message.send()
</DeepExtract>
|
codeforlife-portal
|
positive
|
def _to_expr(self, u):
if u == 1:
return 'TRUE'
if u == -1:
return 'FALSE'
(i, v, w) = self._succ[abs(u)]
var = self._level_to_var[i]
<DeepExtract>
if v == 1:
p = 'TRUE'
if v == -1:
p = 'FALSE'
(i, v, w) = self._succ[abs(v)]
var = self._level_to_var[i]
p = self._to_expr(v)
q = self._to_expr(w)
if p == 'FALSE' and q == 'TRUE':
s = var
else:
s = 'ite({var}, {q}, {p})'.format(var=var, p=p, q=q)
if v < 0:
s = '(~ {s})'.format(s=s)
p = s
</DeepExtract>
<DeepExtract>
if w == 1:
q = 'TRUE'
if w == -1:
q = 'FALSE'
(i, v, w) = self._succ[abs(w)]
var = self._level_to_var[i]
p = self._to_expr(v)
q = self._to_expr(w)
if p == 'FALSE' and q == 'TRUE':
s = var
else:
s = 'ite({var}, {q}, {p})'.format(var=var, p=p, q=q)
if w < 0:
s = '(~ {s})'.format(s=s)
q = s
</DeepExtract>
if p == 'FALSE' and q == 'TRUE':
s = var
else:
s = 'ite({var}, {q}, {p})'.format(var=var, p=p, q=q)
if u < 0:
s = '(~ {s})'.format(s=s)
return s
|
def _to_expr(self, u):
if u == 1:
return 'TRUE'
if u == -1:
return 'FALSE'
(i, v, w) = self._succ[abs(u)]
var = self._level_to_var[i]
if v == 1:
p = 'TRUE'
if v == -1:
p = 'FALSE'
(i, v, w) = self._succ[abs(v)]
var = self._level_to_var[i]
p = self._to_expr(v)
q = self._to_expr(w)
if p == 'FALSE' and q == 'TRUE':
s = var
else:
s = 'ite({var}, {q}, {p})'.format(var=var, p=p, q=q)
if v < 0:
s = '(~ {s})'.format(s=s)
p = s
if w == 1:
q = 'TRUE'
if w == -1:
q = 'FALSE'
(i, v, w) = self._succ[abs(w)]
var = self._level_to_var[i]
p = self._to_expr(v)
q = self._to_expr(w)
if p == 'FALSE' and q == 'TRUE':
s = var
else:
s = 'ite({var}, {q}, {p})'.format(var=var, p=p, q=q)
if w < 0:
s = '(~ {s})'.format(s=s)
q = s
if p == 'FALSE' and q == 'TRUE':
s = var
else:
s = 'ite({var}, {q}, {p})'.format(var=var, p=p, q=q)
if u < 0:
s = '(~ {s})'.format(s=s)
return s
|
dd
|
positive
|
def search_test(t, test_nocrc=0, extra=None):
cfn = os.path.join(os.getcwd(), 'test.' + t)
<DeepExtract>
hassize = fmt_info[t][1]
</DeepExtract>
if test_nocrc:
hascrc = 0
cmd = cfvcmd + ' -m'
else:
<DeepExtract>
hascrc = fmt_info[t][0]
</DeepExtract>
cmd = cfvcmd
if extra:
cmd += ' ' + extra
if not hascrc and (not hassize):
d = tempfile.mkdtemp()
try:
for (n, n2) in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -n -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -n -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -u -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4, unv=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -u -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
return
d = tempfile.mkdtemp()
try:
def dont_find_same_file_twice_test(s, o):
if not (o.count('fOoO3') == 1 and o.count('fOoO4') == 1):
return str((o.count('fOoO3'), o.count('fOoO4')))
return cfv_all_test(s, o, ok=4, misnamed=4)
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
for (n, n2) in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = dont_find_same_file_twice_test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4, misnamed=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -u -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -u -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
d = tempfile.mkdtemp()
try:
misnamed1 = misnamed2 = 4
if hassize and hascrc:
experrs = {'badcrc': 1, 'badsize': 2}
elif hassize:
experrs = {'badsize': 2, 'ok': 1}
misnamed1 = 3
misnamed2 = OneOf(3, 4)
else:
experrs = {'badcrc': 3}
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
for (n, n2) in zip([1, 3, 4], [4, 2, 1]):
shutil.copyfile('data%s' % n, os.path.join(d, 'data%s' % n2))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=1, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4, misnamed=misnamed1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=1, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4, misnamed=misnamed2)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -u -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -u -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
if hasattr(os, 'symlink'):
d = tempfile.mkdtemp()
try:
for (n, n2) in zip([4], [2]):
shutil.copyfile('data%s' % n, os.path.join(d, 'foo%s' % n2))
for n in string.ascii_lowercase:
os.symlink('noexist', os.path.join(d, n))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
d = tempfile.mkdtemp()
ffoo = fdata4 = None
try:
with open('data4', 'rb') as f:
<DeepExtract>
writefile(os.path.join(d, 'foo'), f.read())
f = open(os.path.join(d, 'foo'), 'rb')
ffoo = f
</DeepExtract>
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
try:
os.rename(os.path.join(d, 'foo'), os.path.join(d, 'foo2'))
print('rename of open file in read-only dir worked? skipping this test.')
except EnvironmentError:
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
os.chmod(d, stat.S_IRWXU)
<DeepExtract>
writefile(os.path.join(d, 'data4'), '')
f = open(os.path.join(d, 'data4'), 'rb')
fdata4 = f
</DeepExtract>
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=2, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
finally:
os.chmod(d, stat.S_IRWXU)
if ffoo:
ffoo.close()
if fdata4:
fdata4.close()
shutil.rmtree(d)
d = tempfile.mkdtemp()
try:
shutil.copyfile('data4', os.path.join(d, 'foo'))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
open(os.path.join(d, 'data1'), 'wb').close()
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=0, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=1, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
if fmt_cancreate(t):
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'deep.' + t)
os.mkdir(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr'))
shutil.copyfile('data1', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -rr -C -p %s -t %s -f %s' % (d, t, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -rr -C -p %s -t %s -f %s' % (d, t, dcfn), s, o, r, kw)
</DeepExtract>
os.rename(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'), os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'Foo1'))
shutil.copyfile('data4', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1, misnamed=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
shutil.rmtree(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'AoEu.aOeU'))
os.mkdir(os.path.join(d, 'AoEu.aOeU', 'BOo.fArR'))
shutil.copyfile('data4', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'dAtA1'))
shutil.copyfile('data1', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'Foo1'))
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -s -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1, misnamed=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -s -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=0, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -s -n -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1, misnamed=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -s -n -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
if fmt_cancreate(t) and hassize:
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'foo.' + t)
os.mkdir(os.path.join(d, 'aoeu'))
dirsize = os.path.getsize(os.path.join(d, 'aoeu'))
with open(os.path.join(d, 'idth'), 'wb') as f:
f.write(b'a' * dirsize)
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -C -p %s -t %s -f %s' % (d, t, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -C -p %s -t %s -f %s' % (d, t, dcfn), s, o, r, kw)
</DeepExtract>
os.remove(os.path.join(d, 'idth'))
os.rename(os.path.join(d, 'aoeu'), os.path.join(d, 'idth'))
def dont_find_dir_test(s, o):
if not o.count('idth') == 1:
return str((o.count('idth'),))
return cfv_all_test(s, o, ok=0, notfound=1)
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -m -T -p %s -f %s' % (d, dcfn),), **kw)
r = dont_find_dir_test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -m -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
<DeepExtract>
(s, o) = cfvtest.runcfv(*(cmd + ' -v -m -s -T -p %s -f %s' % (d, dcfn),), **kw)
r = dont_find_dir_test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -m -s -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
</DeepExtract>
finally:
shutil.rmtree(d)
|
def search_test(t, test_nocrc=0, extra=None):
cfn = os.path.join(os.getcwd(), 'test.' + t)
hassize = fmt_info[t][1]
if test_nocrc:
hascrc = 0
cmd = cfvcmd + ' -m'
else:
hascrc = fmt_info[t][0]
cmd = cfvcmd
if extra:
cmd += ' ' + extra
if not hascrc and (not hassize):
d = tempfile.mkdtemp()
try:
for (n, n2) in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -n -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -n -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -u -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4, unv=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -u -T -p %s -f %s' % (d, cfn), s, o, r, kw)
finally:
shutil.rmtree(d)
return
d = tempfile.mkdtemp()
try:
def dont_find_same_file_twice_test(s, o):
if not (o.count('fOoO3') == 1 and o.count('fOoO4') == 1):
return str((o.count('fOoO3'), o.count('fOoO4')))
return cfv_all_test(s, o, ok=4, misnamed=4)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
for (n, n2) in zip(list(range(1, 5)), list(range(4, 0, -1))):
shutil.copyfile('data%s' % n, os.path.join(d, 'fOoO%s' % n2))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = dont_find_same_file_twice_test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4, misnamed=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -u -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -u -T -p %s -f %s' % (d, cfn), s, o, r, kw)
finally:
shutil.rmtree(d)
d = tempfile.mkdtemp()
try:
misnamed1 = misnamed2 = 4
if hassize and hascrc:
experrs = {'badcrc': 1, 'badsize': 2}
elif hassize:
experrs = {'badsize': 2, 'ok': 1}
misnamed1 = 3
misnamed2 = OneOf(3, 4)
else:
experrs = {'badcrc': 3}
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
for (n, n2) in zip([1, 3, 4], [4, 2, 1]):
shutil.copyfile('data%s' % n, os.path.join(d, 'data%s' % n2))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=1, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4, misnamed=misnamed1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=1, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4, misnamed=misnamed2)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -u -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -u -T -p %s -f %s' % (d, cfn), s, o, r, kw)
finally:
shutil.rmtree(d)
if hasattr(os, 'symlink'):
d = tempfile.mkdtemp()
try:
for (n, n2) in zip([4], [2]):
shutil.copyfile('data%s' % n, os.path.join(d, 'foo%s' % n2))
for n in string.ascii_lowercase:
os.symlink('noexist', os.path.join(d, n))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, notfound=4)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=1, misnamed=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, ok=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -T -p %s -f %s' % (d, cfn), s, o, r, kw)
finally:
shutil.rmtree(d)
d = tempfile.mkdtemp()
ffoo = fdata4 = None
try:
with open('data4', 'rb') as f:
writefile(os.path.join(d, 'foo'), f.read())
f = open(os.path.join(d, 'foo'), 'rb')
ffoo = f
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
try:
os.rename(os.path.join(d, 'foo'), os.path.join(d, 'foo2'))
print('rename of open file in read-only dir worked? skipping this test.')
except EnvironmentError:
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=1, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
os.chmod(d, stat.S_IRWXU)
writefile(os.path.join(d, 'data4'), '')
f = open(os.path.join(d, 'data4'), 'rb')
fdata4 = f
os.chmod(d, stat.S_IRUSR | stat.S_IXUSR)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, ferror=2, notfound=3)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -n -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
finally:
os.chmod(d, stat.S_IRWXU)
if ffoo:
ffoo.close()
if fdata4:
fdata4.close()
shutil.rmtree(d)
d = tempfile.mkdtemp()
try:
shutil.copyfile('data4', os.path.join(d, 'foo'))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=1, notfound=3, unv=0)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), s, o, r, kw)
open(os.path.join(d, 'data1'), 'wb').close()
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=0, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -T -p %s -f %s' % (d, cfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn),), **kw)
r = rcurry(cfv_all_test, files=4, ok=1, misnamed=0, notfound=2, unv=1, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -uu -s -n -T -p %s -f %s' % (d, cfn), s, o, r, kw)
finally:
shutil.rmtree(d)
if fmt_cancreate(t):
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'deep.' + t)
os.mkdir(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr'))
shutil.copyfile('data1', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -rr -C -p %s -t %s -f %s' % (d, t, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -rr -C -p %s -t %s -f %s' % (d, t, dcfn), s, o, r, kw)
os.rename(os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'), os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'Foo1'))
shutil.copyfile('data4', os.path.join(d, 'aOeU.AoEu', 'boO.FaRr', 'DaTa1'))
(s, o) = cfvtest.runcfv(*(cmd + ' -v -s -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1, misnamed=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -s -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
shutil.rmtree(os.path.join(d, 'aOeU.AoEu'))
os.mkdir(os.path.join(d, 'AoEu.aOeU'))
os.mkdir(os.path.join(d, 'AoEu.aOeU', 'BOo.fArR'))
shutil.copyfile('data4', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'dAtA1'))
shutil.copyfile('data1', os.path.join(d, 'AoEu.aOeU', 'BOo.fArR', 'Foo1'))
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -s -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1, misnamed=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -s -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
if hassize:
experrs = {'badsize': 1}
else:
experrs = {'badcrc': 1}
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=0, **experrs)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -s -n -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1, misnamed=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -s -n -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -i -v -T -p %s -f %s' % (d, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -i -v -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
finally:
shutil.rmtree(d)
if fmt_cancreate(t) and hassize:
d = tempfile.mkdtemp()
try:
dcfn = os.path.join(d, 'foo.' + t)
os.mkdir(os.path.join(d, 'aoeu'))
dirsize = os.path.getsize(os.path.join(d, 'aoeu'))
with open(os.path.join(d, 'idth'), 'wb') as f:
f.write(b'a' * dirsize)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -C -p %s -t %s -f %s' % (d, t, dcfn),), **kw)
r = rcurry(cfv_all_test, files=1, ok=1)(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -C -p %s -t %s -f %s' % (d, t, dcfn), s, o, r, kw)
os.remove(os.path.join(d, 'idth'))
os.rename(os.path.join(d, 'aoeu'), os.path.join(d, 'idth'))
def dont_find_dir_test(s, o):
if not o.count('idth') == 1:
return str((o.count('idth'),))
return cfv_all_test(s, o, ok=0, notfound=1)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -m -T -p %s -f %s' % (d, dcfn),), **kw)
r = dont_find_dir_test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -m -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
(s, o) = cfvtest.runcfv(*(cmd + ' -v -m -s -T -p %s -f %s' % (d, dcfn),), **kw)
r = dont_find_dir_test(s, o)
test_log_results(cfvtest.cfvenv + cfvtest.cfvfn + ' ' + cmd + ' -v -m -s -T -p %s -f %s' % (d, dcfn), s, o, r, kw)
finally:
shutil.rmtree(d)
|
cfv
|
positive
|
def queryURL(self, xri, service_type=None):
"""Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str
"""
qxri = toURINormal(xri)[6:]
hxri = self.proxy_url + qxri
args = {'_xrd_r': 'application/xrds+xml'}
if service_type:
args['_xrd_t'] = service_type
else:
args['_xrd_r'] += ';sep=false'
<DeepExtract>
if hasattr(args, 'items'):
args = args.items()
args.sort()
if len(args) == 0:
query = hxri
if '?' in hxri.rstrip('?'):
sep = '&'
else:
sep = '?'
query = '%s%s%s' % (hxri, sep, urlencode(args))
</DeepExtract>
return query
|
def queryURL(self, xri, service_type=None):
"""Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str
"""
qxri = toURINormal(xri)[6:]
hxri = self.proxy_url + qxri
args = {'_xrd_r': 'application/xrds+xml'}
if service_type:
args['_xrd_t'] = service_type
else:
args['_xrd_r'] += ';sep=false'
if hasattr(args, 'items'):
args = args.items()
args.sort()
if len(args) == 0:
query = hxri
if '?' in hxri.rstrip('?'):
sep = '&'
else:
sep = '?'
query = '%s%s%s' % (hxri, sep, urlencode(args))
return query
|
contributing
|
positive
|
def unset_config(name, bundleid=None):
"""Delete a workflow variable from ``info.plist``.
.. versionadded:: 1.33
Args:
name (str): Name of variable to delete.
bundleid (str, optional): Bundle ID of workflow variable belongs to.
"""
bundleid = bundleid or os.getenv('alfred_workflow_bundleid')
<DeepExtract>
if os.getenv('alfred_version', '').startswith('3'):
appname = u'Alfred 3'
appname = u'com.runningwithcrayons.Alfred'
</DeepExtract>
opts = {'inWorkflow': bundleid}
script = JXA_UNSET_CONFIG.format(app=json.dumps(appname), arg=json.dumps(name), opts=json.dumps(opts, sort_keys=True))
<DeepExtract>
'JavaScript' = 'AppleScript'
if 'lang' in kwargs:
'JavaScript' = kwargs['lang']
del kwargs['lang']
cmd = ['/usr/bin/osascript', '-l', 'JavaScript']
if os.path.exists(script):
cmd += [script]
else:
cmd += ['-e', script]
cmd.extend(args)
return run_command(cmd, **kwargs)
</DeepExtract>
|
def unset_config(name, bundleid=None):
"""Delete a workflow variable from ``info.plist``.
.. versionadded:: 1.33
Args:
name (str): Name of variable to delete.
bundleid (str, optional): Bundle ID of workflow variable belongs to.
"""
bundleid = bundleid or os.getenv('alfred_workflow_bundleid')
if os.getenv('alfred_version', '').startswith('3'):
appname = u'Alfred 3'
appname = u'com.runningwithcrayons.Alfred'
opts = {'inWorkflow': bundleid}
script = JXA_UNSET_CONFIG.format(app=json.dumps(appname), arg=json.dumps(name), opts=json.dumps(opts, sort_keys=True))
'JavaScript' = 'AppleScript'
if 'lang' in kwargs:
'JavaScript' = kwargs['lang']
del kwargs['lang']
cmd = ['/usr/bin/osascript', '-l', 'JavaScript']
if os.path.exists(script):
cmd += [script]
else:
cmd += ['-e', script]
cmd.extend(args)
return run_command(cmd, **kwargs)
</DeepExtract>
|
alfred-pocket
|
positive
|
@kafka_versions('all')
@inlineCallbacks
def test_producer_batched_by_bytes(self):
start_offset0 = (yield self.current_offset(self.topic, 0))
start_offset1 = (yield self.current_offset(self.topic, 1))
producer = Producer(self.client, batch_send=True, batch_every_b=4096, batch_every_n=0, batch_every_t=0)
send1D = producer.send_messages(self.topic, msgs=[self.msg('one'), self.msg('two'), self.msg('three'), self.msg('four')])
yield self.assert_fetch_offset(0, start_offset0, [])
yield self.assert_fetch_offset(1, start_offset1, [])
self.assertNoResult(send1D)
send2D = producer.send_messages(self.topic, msgs=[self.msg('five'), self.msg('six'), self.msg('seven')])
yield self.assert_fetch_offset(0, start_offset0, [])
yield self.assert_fetch_offset(1, start_offset1, [])
self.assertNoResult(send2D)
send3D = producer.send_messages(self.topic, msgs=[self.msg('eight'), self.msg('nine'), self.msg('ten'), self.msg('eleven')])
yield self.assert_fetch_offset(0, start_offset0, [])
yield self.assert_fetch_offset(1, start_offset1, [])
self.assertNoResult(send3D)
send4D = producer.send_messages(self.topic, msgs=[self.msg('1234' * 1024)])
yield self.assert_fetch_offset(0, start_offset0, [self.msg('one'), self.msg('two'), self.msg('three'), self.msg('four'), self.msg('eight'), self.msg('nine'), self.msg('ten'), self.msg('eleven')], fetch_size=2048)
yield self.assert_fetch_offset(1, start_offset1, [self.msg('five'), self.msg('six'), self.msg('seven'), self.msg('1234' * 1024)], fetch_size=5 * 1024)
resp1 = self.successResultOf(send1D)
resp2 = self.successResultOf(send2D)
resp3 = self.successResultOf(send3D)
resp4 = self.successResultOf(send4D)
<DeepExtract>
self.assertEqual(resp1.error, 0)
self.assertEqual(resp1.offset, start_offset0)
</DeepExtract>
<DeepExtract>
self.assertEqual(resp2.error, 0)
self.assertEqual(resp2.offset, start_offset1)
</DeepExtract>
<DeepExtract>
self.assertEqual(resp3.error, 0)
self.assertEqual(resp3.offset, start_offset0)
</DeepExtract>
<DeepExtract>
self.assertEqual(resp4.error, 0)
self.assertEqual(resp4.offset, start_offset1)
</DeepExtract>
yield producer.stop()
|
@kafka_versions('all')
@inlineCallbacks
def test_producer_batched_by_bytes(self):
start_offset0 = (yield self.current_offset(self.topic, 0))
start_offset1 = (yield self.current_offset(self.topic, 1))
producer = Producer(self.client, batch_send=True, batch_every_b=4096, batch_every_n=0, batch_every_t=0)
send1D = producer.send_messages(self.topic, msgs=[self.msg('one'), self.msg('two'), self.msg('three'), self.msg('four')])
yield self.assert_fetch_offset(0, start_offset0, [])
yield self.assert_fetch_offset(1, start_offset1, [])
self.assertNoResult(send1D)
send2D = producer.send_messages(self.topic, msgs=[self.msg('five'), self.msg('six'), self.msg('seven')])
yield self.assert_fetch_offset(0, start_offset0, [])
yield self.assert_fetch_offset(1, start_offset1, [])
self.assertNoResult(send2D)
send3D = producer.send_messages(self.topic, msgs=[self.msg('eight'), self.msg('nine'), self.msg('ten'), self.msg('eleven')])
yield self.assert_fetch_offset(0, start_offset0, [])
yield self.assert_fetch_offset(1, start_offset1, [])
self.assertNoResult(send3D)
send4D = producer.send_messages(self.topic, msgs=[self.msg('1234' * 1024)])
yield self.assert_fetch_offset(0, start_offset0, [self.msg('one'), self.msg('two'), self.msg('three'), self.msg('four'), self.msg('eight'), self.msg('nine'), self.msg('ten'), self.msg('eleven')], fetch_size=2048)
yield self.assert_fetch_offset(1, start_offset1, [self.msg('five'), self.msg('six'), self.msg('seven'), self.msg('1234' * 1024)], fetch_size=5 * 1024)
resp1 = self.successResultOf(send1D)
resp2 = self.successResultOf(send2D)
resp3 = self.successResultOf(send3D)
resp4 = self.successResultOf(send4D)
self.assertEqual(resp1.error, 0)
self.assertEqual(resp1.offset, start_offset0)
self.assertEqual(resp2.error, 0)
self.assertEqual(resp2.offset, start_offset1)
self.assertEqual(resp3.error, 0)
self.assertEqual(resp3.offset, start_offset0)
self.assertEqual(resp4.error, 0)
self.assertEqual(resp4.offset, start_offset1)
yield producer.stop()
|
afkak
|
positive
|
def render_frame(self, render_what: VisualizeEnum=None):
if render_what is None:
render_what = self.cfg.VisualType
if render_what == VisualizeEnum.Velocity:
<DeepExtract>
for I in ti.static(self.grid.v_pair.cur):
v = ts.vec(self.grid.v_pair.cur[I].x, self.grid.v_pair.cur[I].y, 0.0)
self.clr_bffr[I] = v + ts.vec3(0.5)
</DeepExtract>
elif render_what == VisualizeEnum.Density:
<DeepExtract>
for I in ti.static(self.grid.density_pair.cur):
self.clr_bffr[I] = ti.abs(self.grid.density_pair.cur[I])
</DeepExtract>
elif render_what == VisualizeEnum.Divergence:
<DeepExtract>
for I in ti.static(self.grid.v_divs):
v = ts.vec(self.grid.v_divs[I], 0.0, 0.0)
self.clr_bffr[I] = 0.3 * v + ts.vec3(0.5)
</DeepExtract>
elif render_what == VisualizeEnum.Vorticity:
<DeepExtract>
for I in ti.static(self.grid.v_curl):
v = ts.vec(self.grid.v_curl[I], 0.0, 0.0)
self.clr_bffr[I] = 0.03 * v + ts.vec3(0.5)
</DeepExtract>
elif render_what == VisualizeEnum.VelocityMagnitude:
<DeepExtract>
for I in ti.static(self.grid.v_pair.cur):
v_norm = self.grid.v_pair.cur[I].norm() * 0.4
self.clr_bffr[I] = self.mapper.color_map(v_norm)
</DeepExtract>
elif render_what == VisualizeEnum.Temperature:
<DeepExtract>
for I in ti.static(self.grid.t):
self.clr_bffr[I] = ts.vec3(self.grid.t[I][0] / self.cfg.GasMaxT)
</DeepExtract>
elif render_what == VisualizeEnum.Distortion:
<DeepExtract>
for I in ti.static(self.grid.distortion):
self.clr_bffr[I] = ti.abs(self.grid.distortion[I])
</DeepExtract>
elif render_what == VisualizeEnum.BM:
<DeepExtract>
for I in ti.static(self.grid.BM):
self.clr_bffr[I] = self.grid.BM[I]
</DeepExtract>
elif render_what == VisualizeEnum.FM:
<DeepExtract>
for I in ti.static(self.grid.FM):
self.clr_bffr[I] = self.grid.FM[I]
</DeepExtract>
|
def render_frame(self, render_what: VisualizeEnum=None):
if render_what is None:
render_what = self.cfg.VisualType
if render_what == VisualizeEnum.Velocity:
for I in ti.static(self.grid.v_pair.cur):
v = ts.vec(self.grid.v_pair.cur[I].x, self.grid.v_pair.cur[I].y, 0.0)
self.clr_bffr[I] = v + ts.vec3(0.5)
elif render_what == VisualizeEnum.Density:
for I in ti.static(self.grid.density_pair.cur):
self.clr_bffr[I] = ti.abs(self.grid.density_pair.cur[I])
elif render_what == VisualizeEnum.Divergence:
for I in ti.static(self.grid.v_divs):
v = ts.vec(self.grid.v_divs[I], 0.0, 0.0)
self.clr_bffr[I] = 0.3 * v + ts.vec3(0.5)
elif render_what == VisualizeEnum.Vorticity:
for I in ti.static(self.grid.v_curl):
v = ts.vec(self.grid.v_curl[I], 0.0, 0.0)
self.clr_bffr[I] = 0.03 * v + ts.vec3(0.5)
elif render_what == VisualizeEnum.VelocityMagnitude:
for I in ti.static(self.grid.v_pair.cur):
v_norm = self.grid.v_pair.cur[I].norm() * 0.4
self.clr_bffr[I] = self.mapper.color_map(v_norm)
elif render_what == VisualizeEnum.Temperature:
for I in ti.static(self.grid.t):
self.clr_bffr[I] = ts.vec3(self.grid.t[I][0] / self.cfg.GasMaxT)
elif render_what == VisualizeEnum.Distortion:
for I in ti.static(self.grid.distortion):
self.clr_bffr[I] = ti.abs(self.grid.distortion[I])
elif render_what == VisualizeEnum.BM:
for I in ti.static(self.grid.BM):
self.clr_bffr[I] = self.grid.BM[I]
elif render_what == VisualizeEnum.FM:
for I in ti.static(self.grid.FM):
self.clr_bffr[I] = self.grid.FM[I]
</DeepExtract>
|
a-toy-fluid-engine
|
positive
|
def construct_arg(cls: Type[T], param_name: str, annotation: Type, default: Any, params: Params, **extras) -> Any:
"""
Does the work of actually constructing an individual argument for :func:`create_kwargs`.
Here we're in the inner loop of iterating over the parameters to a particular constructor,
trying to construct just one of them. The information we get for that parameter is its name,
its type annotation, and its default value; we also get the full set of ``Params`` for
constructing the object (which we may mutate), and any ``extras`` that the constructor might
need.
We take the type annotation and default value here separately, instead of using an
``inspect.Parameter`` object directly, so that we can handle ``Union`` types using recursion on
this method, trying the different annotation types in the union in turn.
"""
from allennlp.models.archival import load_archive
name = param_name
origin = getattr(annotation, '__origin__', None)
args = getattr(annotation, '__args__', [])
optional = default != _NO_DEFAULT
if name in extras:
return extras[name]
elif name in params and isinstance(params.get(name), Params) and ('_pretrained' in params.get(name)):
load_module_params = params.pop(name).pop('_pretrained')
archive_file = load_module_params.pop('archive_file')
module_path = load_module_params.pop('module_path')
freeze = load_module_params.pop('freeze', True)
archive = load_archive(archive_file)
result = archive.extract_module(module_path, freeze)
if not isinstance(result, annotation):
raise ConfigurationError(f'The module from model at {archive_file} at path {module_path} was expected of type {annotation} but is of type {type(result)}')
return result
elif hasattr(annotation, 'from_params'):
if name in params:
subparams = params.pop(name)
<DeepExtract>
subextras: Dict[str, Any] = {}
if hasattr(annotation, 'from_params'):
from_params_method = annotation.from_params
else:
from_params_method = annotation
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
</DeepExtract>
if isinstance(subparams, str):
return annotation.by_name(subparams)()
else:
return annotation.from_params(params=subparams, **subextras)
elif not optional:
raise ConfigurationError(f'expected key {name} for {cls.__name__}')
else:
return default
elif annotation == str:
return params.pop(name, default) if optional else params.pop(name)
elif annotation == int:
return params.pop_int(name, default) if optional else params.pop_int(name)
elif annotation == bool:
return params.pop_bool(name, default) if optional else params.pop_bool(name)
elif annotation == float:
return params.pop_float(name, default) if optional else params.pop_float(name)
elif origin in (Dict, dict) and len(args) == 2 and hasattr(args[-1], 'from_params'):
value_cls = annotation.__args__[-1]
value_dict = {}
for (key, value_params) in params.pop(name, Params({})).items():
<DeepExtract>
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
</DeepExtract>
value_dict[key] = value_cls.from_params(params=value_params, **subextras)
return value_dict
elif origin in (List, list) and len(args) == 1 and hasattr(args[0], 'from_params'):
value_cls = annotation.__args__[0]
value_list = []
for value_params in params.pop(name, Params({})):
<DeepExtract>
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
</DeepExtract>
value_list.append(value_cls.from_params(params=value_params, **subextras))
return value_list
elif origin in (Tuple, tuple) and all((hasattr(arg, 'from_params') for arg in args)):
value_list = []
for (value_cls, value_params) in zip(annotation.__args__, params.pop(name, Params({}))):
<DeepExtract>
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
</DeepExtract>
value_list.append(value_cls.from_params(params=value_params, **subextras))
return tuple(value_list)
elif origin in (Set, set) and len(args) == 1 and hasattr(args[0], 'from_params'):
value_cls = annotation.__args__[0]
value_set = set()
for value_params in params.pop(name, Params({})):
<DeepExtract>
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
</DeepExtract>
value_set.add(value_cls.from_params(params=value_params, **subextras))
return value_set
elif origin == Union:
param_value = params.get(name, Params({}))
if isinstance(param_value, Params):
param_value = param_value.duplicate()
for arg in args:
try:
return construct_arg(cls, name, arg, default, params, **extras)
except (ValueError, TypeError, ConfigurationError, AttributeError):
params[name] = param_value
if isinstance(param_value, Params):
param_value = param_value.duplicate()
continue
raise ConfigurationError(f'Failed to construct argument {name} with type {annotation}')
elif optional:
return params.pop(name, default)
else:
return params.pop(name)
|
def construct_arg(cls: Type[T], param_name: str, annotation: Type, default: Any, params: Params, **extras) -> Any:
"""
Does the work of actually constructing an individual argument for :func:`create_kwargs`.
Here we're in the inner loop of iterating over the parameters to a particular constructor,
trying to construct just one of them. The information we get for that parameter is its name,
its type annotation, and its default value; we also get the full set of ``Params`` for
constructing the object (which we may mutate), and any ``extras`` that the constructor might
need.
We take the type annotation and default value here separately, instead of using an
``inspect.Parameter`` object directly, so that we can handle ``Union`` types using recursion on
this method, trying the different annotation types in the union in turn.
"""
from allennlp.models.archival import load_archive
name = param_name
origin = getattr(annotation, '__origin__', None)
args = getattr(annotation, '__args__', [])
optional = default != _NO_DEFAULT
if name in extras:
return extras[name]
elif name in params and isinstance(params.get(name), Params) and ('_pretrained' in params.get(name)):
load_module_params = params.pop(name).pop('_pretrained')
archive_file = load_module_params.pop('archive_file')
module_path = load_module_params.pop('module_path')
freeze = load_module_params.pop('freeze', True)
archive = load_archive(archive_file)
result = archive.extract_module(module_path, freeze)
if not isinstance(result, annotation):
raise ConfigurationError(f'The module from model at {archive_file} at path {module_path} was expected of type {annotation} but is of type {type(result)}')
return result
elif hasattr(annotation, 'from_params'):
if name in params:
subparams = params.pop(name)
subextras: Dict[str, Any] = {}
if hasattr(annotation, 'from_params'):
from_params_method = annotation.from_params
else:
from_params_method = annotation
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
if isinstance(subparams, str):
return annotation.by_name(subparams)()
else:
return annotation.from_params(params=subparams, **subextras)
elif not optional:
raise ConfigurationError(f'expected key {name} for {cls.__name__}')
else:
return default
elif annotation == str:
return params.pop(name, default) if optional else params.pop(name)
elif annotation == int:
return params.pop_int(name, default) if optional else params.pop_int(name)
elif annotation == bool:
return params.pop_bool(name, default) if optional else params.pop_bool(name)
elif annotation == float:
return params.pop_float(name, default) if optional else params.pop_float(name)
elif origin in (Dict, dict) and len(args) == 2 and hasattr(args[-1], 'from_params'):
value_cls = annotation.__args__[-1]
value_dict = {}
for (key, value_params) in params.pop(name, Params({})).items():
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
value_dict[key] = value_cls.from_params(params=value_params, **subextras)
return value_dict
elif origin in (List, list) and len(args) == 1 and hasattr(args[0], 'from_params'):
value_cls = annotation.__args__[0]
value_list = []
for value_params in params.pop(name, Params({})):
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
value_list.append(value_cls.from_params(params=value_params, **subextras))
return value_list
elif origin in (Tuple, tuple) and all((hasattr(arg, 'from_params') for arg in args)):
value_list = []
for (value_cls, value_params) in zip(annotation.__args__, params.pop(name, Params({}))):
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
value_list.append(value_cls.from_params(params=value_params, **subextras))
return tuple(value_list)
elif origin in (Set, set) and len(args) == 1 and hasattr(args[0], 'from_params'):
value_cls = annotation.__args__[0]
value_set = set()
for value_params in params.pop(name, Params({})):
subextras: Dict[str, Any] = {}
if hasattr(value_cls, 'from_params'):
from_params_method = value_cls.from_params
else:
from_params_method = value_cls
if takes_kwargs(from_params_method):
subextras = extras
else:
subextras = {k: v for (k, v) in extras.items() if takes_arg(from_params_method, k)}
subextras = subextras
value_set.add(value_cls.from_params(params=value_params, **subextras))
return value_set
elif origin == Union:
param_value = params.get(name, Params({}))
if isinstance(param_value, Params):
param_value = param_value.duplicate()
for arg in args:
try:
return construct_arg(cls, name, arg, default, params, **extras)
except (ValueError, TypeError, ConfigurationError, AttributeError):
params[name] = param_value
if isinstance(param_value, Params):
param_value = param_value.duplicate()
continue
raise ConfigurationError(f'Failed to construct argument {name} with type {annotation}')
elif optional:
return params.pop(name, default)
else:
return params.pop(name)
|
errudite
|
positive
|
def get_config(cfg_path=None, options=None):
agentConfig = {}
try:
path = os.path.realpath(__file__)
path = os.path.dirname(path)
<DeepExtract>
if cfg_path is not None and os.path.exists(cfg_path):
config_path = cfg_path
if get_os() is None:
get_os() = get_os()
if get_os() == 'windows':
config_path = _windows_config_path()
elif get_os() == 'mac':
config_path = _mac_config_path()
else:
config_path = _unix_config_path()
</DeepExtract>
config = configparser.ConfigParser()
with open(config_path) as config_file:
if is_p3k():
config.read_file(skip_leading_wsp(config_file))
else:
config.readfp(skip_leading_wsp(config_file))
for option in config.options('Main'):
agentConfig[option] = config.get('Main', option)
except Exception:
raise CfgNotFound
return agentConfig
|
def get_config(cfg_path=None, options=None):
agentConfig = {}
try:
path = os.path.realpath(__file__)
path = os.path.dirname(path)
if cfg_path is not None and os.path.exists(cfg_path):
config_path = cfg_path
if get_os() is None:
get_os() = get_os()
if get_os() == 'windows':
config_path = _windows_config_path()
elif get_os() == 'mac':
config_path = _mac_config_path()
else:
config_path = _unix_config_path()
config = configparser.ConfigParser()
with open(config_path) as config_file:
if is_p3k():
config.read_file(skip_leading_wsp(config_file))
else:
config.readfp(skip_leading_wsp(config_file))
for option in config.options('Main'):
agentConfig[option] = config.get('Main', option)
except Exception:
raise CfgNotFound
return agentConfig
|
datadogpy
|
positive
|
def getrawtransaction_batch(db, txhash_list, verbose=False):
result = {}
for txhash in txhash_list:
<DeepExtract>
cursor = db.cursor()
if isinstance(txhash, bytes):
txhash = binascii.hexlify(txhash).decode('ascii')
(tx_hex, confirmations) = list(cursor.execute('SELECT tx_hex, confirmations FROM raw_transactions WHERE tx_hash = ?', (txhash,)))[0]
cursor.close()
if verbose:
result[txhash] = mock_bitcoind_verbose_tx_output(tx_hex, txhash, confirmations)
else:
result[txhash] = tx_hex
</DeepExtract>
return result
|
def getrawtransaction_batch(db, txhash_list, verbose=False):
result = {}
for txhash in txhash_list:
cursor = db.cursor()
if isinstance(txhash, bytes):
txhash = binascii.hexlify(txhash).decode('ascii')
(tx_hex, confirmations) = list(cursor.execute('SELECT tx_hex, confirmations FROM raw_transactions WHERE tx_hash = ?', (txhash,)))[0]
cursor.close()
if verbose:
result[txhash] = mock_bitcoind_verbose_tx_output(tx_hex, txhash, confirmations)
else:
result[txhash] = tx_hex
return result
|
counterparty-lib
|
positive
|
def __init__(self, dataset, num_steps, vocab=None):
self.num_steps = num_steps
all_premise_tokens = d2l.tokenize(dataset[0])
all_hypothesis_tokens = d2l.tokenize(dataset[1])
if vocab is None:
self.vocab = d2l.Vocab(all_premise_tokens + all_hypothesis_tokens, min_freq=5, reserved_tokens=['<pad>'])
else:
self.vocab = vocab
<DeepExtract>
self.premises = torch.tensor([d2l.truncate_pad(self.vocab[line], self.num_steps, self.vocab['<pad>']) for line in all_premise_tokens])
</DeepExtract>
<DeepExtract>
self.hypotheses = torch.tensor([d2l.truncate_pad(self.vocab[line], self.num_steps, self.vocab['<pad>']) for line in all_hypothesis_tokens])
</DeepExtract>
self.labels = torch.tensor(dataset[2])
print('read ' + str(len(self.premises)) + ' examples')
|
def __init__(self, dataset, num_steps, vocab=None):
self.num_steps = num_steps
all_premise_tokens = d2l.tokenize(dataset[0])
all_hypothesis_tokens = d2l.tokenize(dataset[1])
if vocab is None:
self.vocab = d2l.Vocab(all_premise_tokens + all_hypothesis_tokens, min_freq=5, reserved_tokens=['<pad>'])
else:
self.vocab = vocab
self.premises = torch.tensor([d2l.truncate_pad(self.vocab[line], self.num_steps, self.vocab['<pad>']) for line in all_premise_tokens])
self.hypotheses = torch.tensor([d2l.truncate_pad(self.vocab[line], self.num_steps, self.vocab['<pad>']) for line in all_hypothesis_tokens])
self.labels = torch.tensor(dataset[2])
print('read ' + str(len(self.premises)) + ' examples')
|
d2l-zh
|
positive
|
def main(argv):
arg_apiKey = None
arg_orgName = None
try:
(opts, args) = getopt.getopt(argv, 'k:o:')
except getopt.GetoptError:
sys.exit(2)
for (opt, arg) in opts:
if opt == '-k':
arg_apiKey = arg
if opt == '-o':
arg_orgName = arg
if arg_apiKey is None:
<DeepExtract>
if p_printHelp:
print(readMe)
sys.exit(2)
</DeepExtract>
organizationList = []
<DeepExtract>
endpoint = '/organizations'
(success, errors, headers, response) = merakiRequest(arg_apiKey, 'GET', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, orgs) = (success, errors, headers, response)
</DeepExtract>
if not arg_orgName is None:
<DeepExtract>
if not orgs is None:
for org in orgs:
if org['name'] == arg_orgName:
orgId = org['id']
orgId = None
</DeepExtract>
if not orgId is None:
orgItem = {'id': orgId, 'name': arg_orgName}
organizationList.append(orgItem)
else:
print('ERROR: Organization name cannot be found')
<DeepExtract>
if False:
print(readMe)
sys.exit(2)
</DeepExtract>
else:
organizationList = orgs
for org in organizationList:
print('\n---\n')
<DeepExtract>
endpoint = '/organizations/%s/licenses/overview' % org['id']
(success, errors, headers, response) = merakiRequest(arg_apiKey, 'GET', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, licenses) = (success, errors, headers, response)
</DeepExtract>
if not licenses is None:
print('\n---\n\nLicense info for organization "%s" (ID: %s)\n' % (org['name'], org['id']))
if 'status' in licenses:
print('%-20s%s' % ('Status:', licenses['status']))
if 'expirationDate' in licenses:
print('%-20s%s' % ('Expiration date:', licenses['expirationDate']))
if 'licensedDeviceCounts' in licenses:
print('\nLicensed device counts:')
for deviceType in licenses['licensedDeviceCounts']:
print('%-20s%s' % (deviceType, licenses['licensedDeviceCounts'][deviceType]))
else:
print('ERROR: Unable to fetch license info for organization "%s"' % org['name'])
|
def main(argv):
arg_apiKey = None
arg_orgName = None
try:
(opts, args) = getopt.getopt(argv, 'k:o:')
except getopt.GetoptError:
sys.exit(2)
for (opt, arg) in opts:
if opt == '-k':
arg_apiKey = arg
if opt == '-o':
arg_orgName = arg
if arg_apiKey is None:
if p_printHelp:
print(readMe)
sys.exit(2)
organizationList = []
endpoint = '/organizations'
(success, errors, headers, response) = merakiRequest(arg_apiKey, 'GET', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, orgs) = (success, errors, headers, response)
if not arg_orgName is None:
if not orgs is None:
for org in orgs:
if org['name'] == arg_orgName:
orgId = org['id']
orgId = None
if not orgId is None:
orgItem = {'id': orgId, 'name': arg_orgName}
organizationList.append(orgItem)
else:
print('ERROR: Organization name cannot be found')
if False:
print(readMe)
sys.exit(2)
else:
organizationList = orgs
for org in organizationList:
print('\n---\n')
endpoint = '/organizations/%s/licenses/overview' % org['id']
(success, errors, headers, response) = merakiRequest(arg_apiKey, 'GET', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, licenses) = (success, errors, headers, response)
if not licenses is None:
print('\n---\n\nLicense info for organization "%s" (ID: %s)\n' % (org['name'], org['id']))
if 'status' in licenses:
print('%-20s%s' % ('Status:', licenses['status']))
if 'expirationDate' in licenses:
print('%-20s%s' % ('Expiration date:', licenses['expirationDate']))
if 'licensedDeviceCounts' in licenses:
print('\nLicensed device counts:')
for deviceType in licenses['licensedDeviceCounts']:
print('%-20s%s' % (deviceType, licenses['licensedDeviceCounts'][deviceType]))
else:
print('ERROR: Unable to fetch license info for organization "%s"' % org['name'])
|
automation-scripts
|
positive
|
def set_pool(mod, pool_to_set, balances):
parts = []
for (bal, weight) in balances:
<DeepExtract>
if bal is None:
full_bal = None
endpart = bal.rsplit('/', 1)[-1]
if '.' in endpart:
full_bal = bal
else:
full_bal = '{}.{}'.format(bal, endpart)
</DeepExtract>
part = '(InventoryBalanceData={},ResolvedInventoryBalanceData=InventoryBalanceData\'"{}"\',Weight=(BaseValueConstant={}))'.format(full_bal, full_bal, round(weight, 6))
parts.append(part)
mod.reg_hotfix(Mod.PATCH, '', pool_to_set, 'BalancedItems', '({})'.format(','.join(parts)))
|
def set_pool(mod, pool_to_set, balances):
parts = []
for (bal, weight) in balances:
if bal is None:
full_bal = None
endpart = bal.rsplit('/', 1)[-1]
if '.' in endpart:
full_bal = bal
else:
full_bal = '{}.{}'.format(bal, endpart)
part = '(InventoryBalanceData={},ResolvedInventoryBalanceData=InventoryBalanceData\'"{}"\',Weight=(BaseValueConstant={}))'.format(full_bal, full_bal, round(weight, 6))
parts.append(part)
mod.reg_hotfix(Mod.PATCH, '', pool_to_set, 'BalancedItems', '({})'.format(','.join(parts)))
|
bl3mods
|
positive
|
def __init__(self):
"""Constructor of the class"""
super(ParentWindow, self).__init__()
self.windowsize = QSize(WIDTH, HEIGHT)
<DeepExtract>
self.setMinimumSize(self.windowsize)
self.init_statusbar()
self.timer = QTimer()
self.timer.setInterval(100)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
self.main_layout = QVBoxLayout()
self.central_widget = QWidget()
self.central_widget.setStyleSheet('background-color: rgb(51,51,51)')
self.central_widget.setLayout(self.main_layout)
self.setCentralWidget(self.central_widget)
self.location_on_the_screen()
</DeepExtract>
self.robot_selection = None
self.closing = False
|
def __init__(self):
"""Constructor of the class"""
super(ParentWindow, self).__init__()
self.windowsize = QSize(WIDTH, HEIGHT)
self.setMinimumSize(self.windowsize)
self.init_statusbar()
self.timer = QTimer()
self.timer.setInterval(100)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
self.main_layout = QVBoxLayout()
self.central_widget = QWidget()
self.central_widget.setStyleSheet('background-color: rgb(51,51,51)')
self.central_widget.setLayout(self.main_layout)
self.setCentralWidget(self.central_widget)
self.location_on_the_screen()
self.robot_selection = None
self.closing = False
|
BehaviorMetrics
|
positive
|
def _connect_all_listeners(provider_id):
"""Connect all listeners connected to a provider.
As soon as they are connected, they receive the initial selection.
"""
provider = self._providers[provider_id]
selection = provider.get_selection()
for func in self._listeners[provider_id]:
<DeepExtract>
provider = self._providers[provider_id]
provider.on_trait_change(func, 'selection', remove=False)
</DeepExtract>
func(selection)
|
def _connect_all_listeners(provider_id):
"""Connect all listeners connected to a provider.
As soon as they are connected, they receive the initial selection.
"""
provider = self._providers[provider_id]
selection = provider.get_selection()
for func in self._listeners[provider_id]:
provider = self._providers[provider_id]
provider.on_trait_change(func, 'selection', remove=False)
func(selection)
|
apptools
|
positive
|
def download_v1_data(token, target_dir, logfile, limit=None):
"""
Download the data from B2SHARE V1 records using token in to target_dir .
"""
V1_URL_BASE = current_app.config.get('V1_URL_BASE')
url = '%srecords' % V1_URL_BASE
params = {}
params['access_token'] = token
params['page_size'] = 100
page_counter = 0
os.chdir(target_dir)
while True:
params['page_offset'] = page_counter
click.secho('Params to download: %s' % str(params))
r = requests.get(url, params=params, verify=False)
r.raise_for_status()
recs = json.loads(r.text)['records']
if len(recs) == 0:
return
for record in recs:
recid = str(record.get('record_id'))
click.secho('Download record : %s' % recid)
if not os.path.exists(recid):
os.mkdir(recid)
<DeepExtract>
click.secho('Download record {} "{}"'.format(recid, record.get('title')))
directory = recid
target_file = os.path.join(directory, '___record___.json')
with open(target_file, 'w') as f:
f.write(json.dumps(record))
for (index, file_dict) in enumerate(record.get('files', [])):
click.secho(' Download file "{}"'.format(file_dict.get('name')))
filepath = os.path.join(directory, 'file_{}'.format(index))
if not os.path.exists(filepath) or int(os.path.getsize(filepath)) != int(file_dict.get('size')):
_save_file(logfile, file_dict['url'], filepath)
if int(os.path.getsize(filepath)) != int(file_dict.get('size')):
logfile.write('\n********************\n')
logfile.write('\nERROR: downloaded file size differs for file {}\n'.format(filepath))
logfile.write(' {} instead of {}\n'.format(os.path.getsize(filepath), file_dict.get('size')))
logfile.write('\n********************\n')
</DeepExtract>
if limit is not None and int(recid) >= limit:
return
page_counter = page_counter + 1
|
def download_v1_data(token, target_dir, logfile, limit=None):
"""
Download the data from B2SHARE V1 records using token in to target_dir .
"""
V1_URL_BASE = current_app.config.get('V1_URL_BASE')
url = '%srecords' % V1_URL_BASE
params = {}
params['access_token'] = token
params['page_size'] = 100
page_counter = 0
os.chdir(target_dir)
while True:
params['page_offset'] = page_counter
click.secho('Params to download: %s' % str(params))
r = requests.get(url, params=params, verify=False)
r.raise_for_status()
recs = json.loads(r.text)['records']
if len(recs) == 0:
return
for record in recs:
recid = str(record.get('record_id'))
click.secho('Download record : %s' % recid)
if not os.path.exists(recid):
os.mkdir(recid)
click.secho('Download record {} "{}"'.format(recid, record.get('title')))
directory = recid
target_file = os.path.join(directory, '___record___.json')
with open(target_file, 'w') as f:
f.write(json.dumps(record))
for (index, file_dict) in enumerate(record.get('files', [])):
click.secho(' Download file "{}"'.format(file_dict.get('name')))
filepath = os.path.join(directory, 'file_{}'.format(index))
if not os.path.exists(filepath) or int(os.path.getsize(filepath)) != int(file_dict.get('size')):
_save_file(logfile, file_dict['url'], filepath)
if int(os.path.getsize(filepath)) != int(file_dict.get('size')):
logfile.write('\n********************\n')
logfile.write('\nERROR: downloaded file size differs for file {}\n'.format(filepath))
logfile.write(' {} instead of {}\n'.format(os.path.getsize(filepath), file_dict.get('size')))
logfile.write('\n********************\n')
if limit is not None and int(recid) >= limit:
return
page_counter = page_counter + 1
|
b2share
|
positive
|
def test_integration_multiple_retrain_overrides(self):
train_ratio = '0.8'
batch_size = '64'
num_workers = '2'
epochs = '2'
disable_tensorboard = 'True'
layers_to_freeze = 'encoder'
name_of_the_retrain_parser = 'AName'
logging_path = self.logging_path
<DeepExtract>
if model_type is None:
model_type = self.a_fasttext_model_type
if train_dataset_path is None:
train_dataset_path = self.a_train_pickle_dataset_path
if logging_path is None:
logging_path = self.logging_path
parser_params = [model_type, train_dataset_path, '--train_ratio', train_ratio, '--batch_size', batch_size, '--epochs', epochs, '--num_workers', num_workers, '--learning_rate', learning_rate, '--seed', seed, '--logging_path', logging_path, '--disable_tensorboard', disable_tensorboard, '--layers_to_freeze', layers_to_freeze, '--device', device, '--csv_column_separator', csv_column_separator]
if val_dataset_path is not None:
parser_params.extend(['--val_dataset_path', val_dataset_path])
if cache_dir is not None:
parser_params.extend(['--cache_dir', cache_dir])
if name_of_the_retrain_parser is not None:
parser_params.extend(['--name_of_the_retrain_parser', name_of_the_retrain_parser])
if csv_column_names is not None:
parser_params.extend(['--csv_column_names'])
parser_params.extend(csv_column_names)
if prediction_tags is not None:
parser_params.extend(['--prediction_tags', prediction_tags])
parser_params = parser_params
</DeepExtract>
retrain.main(parser_params)
self.assertTrue(os.path.isfile(os.path.join(self.temp_checkpoints_obj.name, 'checkpoints', 'AName.ckpt')))
|
def test_integration_multiple_retrain_overrides(self):
train_ratio = '0.8'
batch_size = '64'
num_workers = '2'
epochs = '2'
disable_tensorboard = 'True'
layers_to_freeze = 'encoder'
name_of_the_retrain_parser = 'AName'
logging_path = self.logging_path
if model_type is None:
model_type = self.a_fasttext_model_type
if train_dataset_path is None:
train_dataset_path = self.a_train_pickle_dataset_path
if logging_path is None:
logging_path = self.logging_path
parser_params = [model_type, train_dataset_path, '--train_ratio', train_ratio, '--batch_size', batch_size, '--epochs', epochs, '--num_workers', num_workers, '--learning_rate', learning_rate, '--seed', seed, '--logging_path', logging_path, '--disable_tensorboard', disable_tensorboard, '--layers_to_freeze', layers_to_freeze, '--device', device, '--csv_column_separator', csv_column_separator]
if val_dataset_path is not None:
parser_params.extend(['--val_dataset_path', val_dataset_path])
if cache_dir is not None:
parser_params.extend(['--cache_dir', cache_dir])
if name_of_the_retrain_parser is not None:
parser_params.extend(['--name_of_the_retrain_parser', name_of_the_retrain_parser])
if csv_column_names is not None:
parser_params.extend(['--csv_column_names'])
parser_params.extend(csv_column_names)
if prediction_tags is not None:
parser_params.extend(['--prediction_tags', prediction_tags])
parser_params = parser_params
retrain.main(parser_params)
self.assertTrue(os.path.isfile(os.path.join(self.temp_checkpoints_obj.name, 'checkpoints', 'AName.ckpt')))
|
deepparse
|
positive
|
def __repr__(self):
"""Create a human-readable representation of a AsteroidStrike
:type self: AsteroidStrike
:rtype: str
"""
result = _lib.bc_AsteroidStrike_debug(self._ptr)
<DeepExtract>
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
</DeepExtract>
_result = _ffi.string(result)
_lib.bc_free_string(result)
result = _result.decode()
return result
|
def __repr__(self):
"""Create a human-readable representation of a AsteroidStrike
:type self: AsteroidStrike
:rtype: str
"""
result = _lib.bc_AsteroidStrike_debug(self._ptr)
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
_result = _ffi.string(result)
_lib.bc_free_string(result)
result = _result.decode()
return result
|
bc18-scaffold
|
positive
|
def test_start_tls_tunnel(mocker, tmpdir):
<DeepExtract>
return mocker.patch('subprocess.Popen', return_value=_get_popen_mock())
</DeepExtract>
mocker.patch('watchdog.is_pid_running', return_value=True)
<DeepExtract>
state = {'pid': PID - 1, 'cmd': cmd if cmd else ['/usr/bin/stunnel', '/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007']}
state_file = tempfile.mkstemp(prefix='state', dir=str(tmpdir))[1]
with open(state_file, 'w') as f:
f.write(json.dumps(state))
(state, state_file) = (state, state_file)
</DeepExtract>
procs = []
pid = watchdog.start_tls_tunnel(procs, state, str(tmpdir), state_file)
assert PID == pid
assert 1 == len(procs)
|
def test_start_tls_tunnel(mocker, tmpdir):
return mocker.patch('subprocess.Popen', return_value=_get_popen_mock())
mocker.patch('watchdog.is_pid_running', return_value=True)
state = {'pid': PID - 1, 'cmd': cmd if cmd else ['/usr/bin/stunnel', '/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007']}
state_file = tempfile.mkstemp(prefix='state', dir=str(tmpdir))[1]
with open(state_file, 'w') as f:
f.write(json.dumps(state))
(state, state_file) = (state, state_file)
procs = []
pid = watchdog.start_tls_tunnel(procs, state, str(tmpdir), state_file)
assert PID == pid
assert 1 == len(procs)
|
efs-utils
|
positive
|
def compile_folder(self, directory, write=True, package=True, **kwargs):
"""Compile a directory and returns paths to compiled files."""
if not isinstance(write, bool) and os.path.isfile(write):
raise CoconutException('destination path cannot point to a file when compiling a directory')
filepaths = []
for (dirpath, dirnames, filenames) in os.walk(directory):
if isinstance(write, bool):
writedir = write
else:
writedir = os.path.join(write, os.path.relpath(dirpath, directory))
for filename in filenames:
if os.path.splitext(filename)[1] in code_exts:
with self.handling_exceptions():
<DeepExtract>
set_ext = False
if writedir is False:
destpath = None
elif writedir is True:
destpath = os.path.join(dirpath, filename)
set_ext = True
elif os.path.splitext(writedir)[1]:
destpath = writedir
else:
destpath = os.path.join(writedir, os.path.basename(os.path.join(dirpath, filename)))
set_ext = True
if set_ext:
(base, ext) = os.path.splitext(os.path.splitext(destpath)[0])
if not ext:
ext = comp_ext
destpath = fixpath(base + ext)
if os.path.join(dirpath, filename) == destpath:
raise CoconutException('cannot compile ' + showpath(os.path.join(dirpath, filename)) + ' to itself', extra='incorrect file extension')
if destpath is not None:
dest_ext = os.path.splitext(destpath)[1]
if dest_ext in code_exts:
if force:
logger.warn('found destination path with ' + dest_ext + ' extension; compiling anyway due to --force')
else:
raise CoconutException('found destination path with ' + dest_ext + ' extension; aborting compilation', extra='pass --force to override')
self.compile(os.path.join(dirpath, filename), destpath, package, force=force, **kwargs)
destpath = destpath
</DeepExtract>
if destpath is not None:
filepaths.append(destpath)
for name in dirnames[:]:
if not is_special_dir(name) and name.startswith('.'):
if logger.verbose:
logger.show_tabulated('Skipped directory', name, '(explicitly pass as source to override).')
dirnames.remove(name)
return filepaths
|
def compile_folder(self, directory, write=True, package=True, **kwargs):
"""Compile a directory and returns paths to compiled files."""
if not isinstance(write, bool) and os.path.isfile(write):
raise CoconutException('destination path cannot point to a file when compiling a directory')
filepaths = []
for (dirpath, dirnames, filenames) in os.walk(directory):
if isinstance(write, bool):
writedir = write
else:
writedir = os.path.join(write, os.path.relpath(dirpath, directory))
for filename in filenames:
if os.path.splitext(filename)[1] in code_exts:
with self.handling_exceptions():
set_ext = False
if writedir is False:
destpath = None
elif writedir is True:
destpath = os.path.join(dirpath, filename)
set_ext = True
elif os.path.splitext(writedir)[1]:
destpath = writedir
else:
destpath = os.path.join(writedir, os.path.basename(os.path.join(dirpath, filename)))
set_ext = True
if set_ext:
(base, ext) = os.path.splitext(os.path.splitext(destpath)[0])
if not ext:
ext = comp_ext
destpath = fixpath(base + ext)
if os.path.join(dirpath, filename) == destpath:
raise CoconutException('cannot compile ' + showpath(os.path.join(dirpath, filename)) + ' to itself', extra='incorrect file extension')
if destpath is not None:
dest_ext = os.path.splitext(destpath)[1]
if dest_ext in code_exts:
if force:
logger.warn('found destination path with ' + dest_ext + ' extension; compiling anyway due to --force')
else:
raise CoconutException('found destination path with ' + dest_ext + ' extension; aborting compilation', extra='pass --force to override')
self.compile(os.path.join(dirpath, filename), destpath, package, force=force, **kwargs)
destpath = destpath
if destpath is not None:
filepaths.append(destpath)
for name in dirnames[:]:
if not is_special_dir(name) and name.startswith('.'):
if logger.verbose:
logger.show_tabulated('Skipped directory', name, '(explicitly pass as source to override).')
dirnames.remove(name)
return filepaths
|
coconut
|
positive
|
@wraps(view_func)
def wrapped_target(*args, **kwargs):
with silk_meta_profiler():
try:
func_code = target.__code__
except AttributeError:
raise NotImplementedError('Profile not implemented to decorate type %s' % target.__class__.__name__)
line_num = func_code.co_firstlineno
file_path = func_code.co_filename
func_name = target.__name__
if not self.name:
self.name = func_name
self.profile = {'func_name': func_name, 'name': self.name, 'file_path': file_path, 'line_num': line_num, 'dynamic': self._dynamic, 'start_time': timezone.now(), 'request': DataCollector().request}
<DeepExtract>
self._queries_before = self._query_identifiers_from_collector()
</DeepExtract>
try:
result = target(*args, **kwargs)
except Exception:
self.profile['exception_raised'] = True
raise
finally:
with silk_meta_profiler():
self.profile['end_time'] = timezone.now()
<DeepExtract>
collector = DataCollector()
self._end_queries()
assert self.profile, 'no profile was created'
diff = set(self._queries_after).difference(set(self._queries_before))
self.profile['queries'] = diff
collector.register_profile(self.profile)
</DeepExtract>
return result
|
@wraps(view_func)
def wrapped_target(*args, **kwargs):
with silk_meta_profiler():
try:
func_code = target.__code__
except AttributeError:
raise NotImplementedError('Profile not implemented to decorate type %s' % target.__class__.__name__)
line_num = func_code.co_firstlineno
file_path = func_code.co_filename
func_name = target.__name__
if not self.name:
self.name = func_name
self.profile = {'func_name': func_name, 'name': self.name, 'file_path': file_path, 'line_num': line_num, 'dynamic': self._dynamic, 'start_time': timezone.now(), 'request': DataCollector().request}
self._queries_before = self._query_identifiers_from_collector()
try:
result = target(*args, **kwargs)
except Exception:
self.profile['exception_raised'] = True
raise
finally:
with silk_meta_profiler():
self.profile['end_time'] = timezone.now()
collector = DataCollector()
self._end_queries()
assert self.profile, 'no profile was created'
diff = set(self._queries_after).difference(set(self._queries_before))
self.profile['queries'] = diff
collector.register_profile(self.profile)
return result
|
django-silk
|
positive
|
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
<DeepExtract>
arch = self.GetArch(config)
if arch == 'x64' and (not config.endswith('_x64')):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
config = config
</DeepExtract>
<DeepExtract>
libpaths = self._GetAndMunge(self.msvs_settings[config], (root, 'AdditionalLibraryDirectories'), [], prefix, append, map)
</DeepExtract>
libpaths = [os.path.normpath(gyp_to_build_path(self.ConvertVSMacros(p, config=config))) for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
|
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
arch = self.GetArch(config)
if arch == 'x64' and (not config.endswith('_x64')):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
config = config
libpaths = self._GetAndMunge(self.msvs_settings[config], (root, 'AdditionalLibraryDirectories'), [], prefix, append, map)
libpaths = [os.path.normpath(gyp_to_build_path(self.ConvertVSMacros(p, config=config))) for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
|
archived-pangyp
|
positive
|
def _app(self, name):
<DeepExtract>
if self.cur_token.type == '(':
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format('(', self.cur_token.type))
</DeepExtract>
args = []
while self.cur_token.type != ')':
args.append(self._expr())
if self.cur_token.type == ',':
<DeepExtract>
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError as e:
self._error('Lexer error at position {}: {}'.format(e.pos, e))
</DeepExtract>
elif self.cur_token.type == ')':
pass
else:
<DeepExtract>
raise ParseError('Unexpected {} in application'.format(self.cur_token.val))
</DeepExtract>
<DeepExtract>
if self.cur_token.type == ')':
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format(')', self.cur_token.type))
</DeepExtract>
return ast.AppExpr(ast.Identifier(name), args)
|
def _app(self, name):
if self.cur_token.type == '(':
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format('(', self.cur_token.type))
args = []
while self.cur_token.type != ')':
args.append(self._expr())
if self.cur_token.type == ',':
try:
self.cur_token = self.lexer.token()
if self.cur_token is None:
self.cur_token = lexer.Token(None, None, None)
except lexer.LexerError as e:
self._error('Lexer error at position {}: {}'.format(e.pos, e))
elif self.cur_token.type == ')':
pass
else:
raise ParseError('Unexpected {} in application'.format(self.cur_token.val))
if self.cur_token.type == ')':
val = self.cur_token.val
self._get_next_token()
return val
else:
self._error('Unmatched {} (found {})'.format(')', self.cur_token.type))
return ast.AppExpr(ast.Identifier(name), args)
|
code-for-blog
|
positive
|
def fit(self, X, R):
"""Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
<DeepExtract>
if not isinstance(X, list):
raise TypeError('Input data should be a list')
if not isinstance(R, list):
raise TypeError('Coordinates should be a list')
if len(X) < 1:
raise ValueError('Need at leat one subject to train the model. Got {0:d}'.format(len(X)))
for (idx, x) in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError('Each subject data should be an array')
if x.ndim != 2:
raise TypeError('Each subject data should be 2D array')
if not isinstance(R[idx], np.ndarray):
raise TypeError('Each scanner coordinate matrix should be an array')
if R[idx].ndim != 2:
raise TypeError('Each scanner coordinate matrix should be 2D array')
if x.shape[0] != R[idx].shape[0]:
raise TypeError('n_voxel should be the same in X[idx] and R[idx]')
return self
</DeepExtract>
if self.verbose:
logger.info('Start to fit HTFA')
self.n_dim = R[0].shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
self.prior_size = self.K * (self.n_dim + 1)
self.prior_bcast_size = self.K * (self.n_dim + 2 + self.cov_vec_size)
self.get_map_offset()
<DeepExtract>
(rank, size) = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
(max_sample_tr, max_sample_voxel) = self._get_subject_info(n_local_subj, X)
tfa = []
for (s, subj_data) in enumerate(X):
tfa.append(TFA(max_iter=self.max_local_iter, threshold=self.threshold, K=self.K, nlss_method=self.nlss_method, nlss_loss=self.nlss_loss, x_scale=self.x_scale, tr_solver=self.tr_solver, weight_method=self.weight_method, upper_ratio=self.upper_ratio, lower_ratio=self.lower_ratio, verbose=self.verbose, max_num_tr=max_sample_tr[s], max_num_voxel=max_sample_voxel[s]))
(gather_size, gather_offset, subject_map) = self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
(node_weight_size, local_weight_offset) = self._get_weight_size(X, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and (not outer_converged[0]):
if self.verbose:
logger.info('HTFA global iter %d ' % m)
self.comm.Bcast(self.global_prior_, root=0)
for (s, subj_data) in enumerate(X):
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(subj_data, R=R[s], template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] = tfa[s].local_posterior_
self._gather_local_posterior(use_gather, gather_size, gather_offset)
outer_converged = self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
self._update_weight(X, R, n_local_subj, local_weight_offset)
return self
</DeepExtract>
return self
|
def fit(self, X, R):
"""Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
if not isinstance(X, list):
raise TypeError('Input data should be a list')
if not isinstance(R, list):
raise TypeError('Coordinates should be a list')
if len(X) < 1:
raise ValueError('Need at leat one subject to train the model. Got {0:d}'.format(len(X)))
for (idx, x) in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError('Each subject data should be an array')
if x.ndim != 2:
raise TypeError('Each subject data should be 2D array')
if not isinstance(R[idx], np.ndarray):
raise TypeError('Each scanner coordinate matrix should be an array')
if R[idx].ndim != 2:
raise TypeError('Each scanner coordinate matrix should be 2D array')
if x.shape[0] != R[idx].shape[0]:
raise TypeError('n_voxel should be the same in X[idx] and R[idx]')
return self
if self.verbose:
logger.info('Start to fit HTFA')
self.n_dim = R[0].shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
self.prior_size = self.K * (self.n_dim + 1)
self.prior_bcast_size = self.K * (self.n_dim + 2 + self.cov_vec_size)
self.get_map_offset()
(rank, size) = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
(max_sample_tr, max_sample_voxel) = self._get_subject_info(n_local_subj, X)
tfa = []
for (s, subj_data) in enumerate(X):
tfa.append(TFA(max_iter=self.max_local_iter, threshold=self.threshold, K=self.K, nlss_method=self.nlss_method, nlss_loss=self.nlss_loss, x_scale=self.x_scale, tr_solver=self.tr_solver, weight_method=self.weight_method, upper_ratio=self.upper_ratio, lower_ratio=self.lower_ratio, verbose=self.verbose, max_num_tr=max_sample_tr[s], max_num_voxel=max_sample_voxel[s]))
(gather_size, gather_offset, subject_map) = self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
(node_weight_size, local_weight_offset) = self._get_weight_size(X, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and (not outer_converged[0]):
if self.verbose:
logger.info('HTFA global iter %d ' % m)
self.comm.Bcast(self.global_prior_, root=0)
for (s, subj_data) in enumerate(X):
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(subj_data, R=R[s], template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] = tfa[s].local_posterior_
self._gather_local_posterior(use_gather, gather_size, gather_offset)
outer_converged = self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
self._update_weight(X, R, n_local_subj, local_weight_offset)
return self
return self
|
brainiak
|
positive
|
def create_fork(self, parent_org, parent_repo):
<DeepExtract>
headers = get_bloom_headers(self.auth)
url = urlunsplit(['https', site, '/repos/{parent_org}/{parent_repo}/forks'.format(**locals()), '', ''])
if {} is None:
request = Request(url, headers=headers)
else:
{} = json.dumps({})
if sys.version_info[0] >= 3:
{} = {}.encode('utf-8')
request = Request(url, data={}, headers=headers)
try:
response = urlopen(request, timeout=120)
except HTTPError as e:
if e.code in [401]:
raise GitHubAuthException(str(e) + ' (%s)' % url)
else:
raise GithubException(str(e) + ' (%s)' % url)
except URLError as e:
raise GithubException(str(e) + ' (%s)' % url)
resp = response
</DeepExtract>
if '{0}'.format(resp.getcode()) not in ['200', '202']:
raise GithubException("Failed to create a fork of '{parent_org}/{parent_repo}'".format(**locals()), resp)
return json_loads(resp)
|
def create_fork(self, parent_org, parent_repo):
headers = get_bloom_headers(self.auth)
url = urlunsplit(['https', site, '/repos/{parent_org}/{parent_repo}/forks'.format(**locals()), '', ''])
if {} is None:
request = Request(url, headers=headers)
else:
{} = json.dumps({})
if sys.version_info[0] >= 3:
{} = {}.encode('utf-8')
request = Request(url, data={}, headers=headers)
try:
response = urlopen(request, timeout=120)
except HTTPError as e:
if e.code in [401]:
raise GitHubAuthException(str(e) + ' (%s)' % url)
else:
raise GithubException(str(e) + ' (%s)' % url)
except URLError as e:
raise GithubException(str(e) + ' (%s)' % url)
resp = response
if '{0}'.format(resp.getcode()) not in ['200', '202']:
raise GithubException("Failed to create a fork of '{parent_org}/{parent_repo}'".format(**locals()), resp)
return json_loads(resp)
|
bloom
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.