before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def bbox_augmentation(anno, image_id, im_shape):
human_boxes = []
object_boxes = []
human_labels = []
object_labels = []
ho_pair_labels = []
object_word_embeddings = []
mask = []
for human_object_pair in anno:
<DeepExtract>
height = human_object_pair['human_box'][3] - human_object_pair['human_box'][1]
width = human_object_pair['human_box'][2] - human_object_pair['human_box'][0]
y_center = (human_object_pair['human_box'][3] + human_object_pair['human_box'][1]) / 2
x_center = (human_object_pair['human_box'][2] + human_object_pair['human_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(human_object_pair['human_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
human_box_aug = box
human_box_aug = human_object_pair['human_box']
</DeepExtract>
human_boxes.append(human_box_aug)
<DeepExtract>
height = human_object_pair['object_box'][3] - human_object_pair['object_box'][1]
width = human_object_pair['object_box'][2] - human_object_pair['object_box'][0]
y_center = (human_object_pair['object_box'][3] + human_object_pair['object_box'][1]) / 2
x_center = (human_object_pair['object_box'][2] + human_object_pair['object_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(human_object_pair['object_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
object_box_aug = box
object_box_aug = human_object_pair['object_box']
</DeepExtract>
object_boxes.append(object_box_aug)
<DeepExtract>
action_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['human_action_id_list']:
action_[GT_idx] = 1
human_verbs_to_vector = action_
</DeepExtract>
human_labels.append(human_verbs_to_vector)
<DeepExtract>
action_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['object_action_id_list']:
action_[GT_idx] = 1
object_verbs_to_vector = action_
</DeepExtract>
object_labels.append(object_verbs_to_vector)
<DeepExtract>
action_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['verb_id_list']:
action_[GT_idx] = 1
ho_verbs_to_vector = action_
</DeepExtract>
ho_pair_labels.append(ho_verbs_to_vector)
object_class = human_object_pair['object_class']
object_word_embeddings.append(self.word_embeddings[object_class].reshape(300))
<DeepExtract>
mask_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['possible_verb_with_object']:
mask_[GT_idx] = 1
curr_mask = mask_
</DeepExtract>
mask.append(curr_mask)
num_pos = len(human_boxes)
if image_id in self.tran_val_neg_file.keys():
human_boxes_neg = []
object_boxes_neg = []
object_word_embeddings_neg = []
mask_neg = []
for negative_pair in self.tran_val_neg_file[image_id]:
if self.bbox_iou(negative_pair['human_box'], anno[0]['human_box']) > 0.6:
<DeepExtract>
height = negative_pair['human_box'][3] - negative_pair['human_box'][1]
width = negative_pair['human_box'][2] - negative_pair['human_box'][0]
y_center = (negative_pair['human_box'][3] + negative_pair['human_box'][1]) / 2
x_center = (negative_pair['human_box'][2] + negative_pair['human_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(negative_pair['human_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
human_box_neg_aug = box
human_box_neg_aug = negative_pair['human_box']
</DeepExtract>
human_boxes_neg.append(human_box_neg_aug)
<DeepExtract>
height = negative_pair['object_box'][3] - negative_pair['object_box'][1]
width = negative_pair['object_box'][2] - negative_pair['object_box'][0]
y_center = (negative_pair['object_box'][3] + negative_pair['object_box'][1]) / 2
x_center = (negative_pair['object_box'][2] + negative_pair['object_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(negative_pair['object_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
object_box_neg_aug = box
object_box_neg_aug = negative_pair['object_box']
</DeepExtract>
object_boxes_neg.append(object_box_neg_aug)
object_class = negative_pair['object_class']
object_word_embeddings_neg.append(self.word_embeddings[object_class].reshape(300))
<DeepExtract>
mask_ = np.zeros(self.num_classes)
for GT_idx in negative_pair['possible_verb_with_object']:
mask_[GT_idx] = 1
curr_mask = mask_
</DeepExtract>
mask_neg.append(curr_mask)
if self.negative_sample_ratio != -1:
if len(human_boxes_neg) >= self.negative_sample_ratio * num_pos:
idx_list = random.sample(range(len(human_boxes_neg)), len(human_boxes_neg))
idx_list = idx_list[:self.negative_sample_ratio * num_pos]
human_boxes_neg = [human_boxes_neg[i] for i in idx_list]
object_boxes_neg = [object_boxes_neg[i] for i in idx_list]
object_word_embeddings_neg = [object_word_embeddings_neg[i] for i in idx_list]
mask_neg = [mask_neg[i] for i in idx_list]
if len(human_boxes_neg) < self.negative_sample_ratio * num_pos and len(human_boxes_neg) != 0:
idx_list = np.random.choice(len(human_boxes_neg), self.negative_sample_ratio * num_pos - len(human_boxes_neg)).tolist()
human_boxes_neg += [human_boxes_neg[i] for i in idx_list]
object_boxes_neg += [object_boxes_neg[i] for i in idx_list]
object_word_embeddings_neg += [object_word_embeddings_neg[i] for i in idx_list]
mask_neg += [mask_neg[i] for i in idx_list]
human_boxes += human_boxes_neg
object_boxes += object_boxes_neg
object_word_embeddings += object_word_embeddings_neg
mask += mask_neg
num_pos_neg = len(human_boxes)
if cfg.DATASETS.NEG_VERB_ALLZERO == 1:
for _ in range(num_pos_neg - num_pos):
ho_pair_labels.append(np.zeros(self.num_classes))
elif cfg.DATASETS.NEG_VERB_ALLZERO == 0:
for _ in range(num_pos_neg - num_pos):
ho_pair_labels.append(self.verb_list_to_vector([57]))
else:
assert 0
assert len(ho_pair_labels) == num_pos_neg
blobs = {}
blobs['human_boxes'] = torch.FloatTensor(human_boxes).reshape(num_pos_neg, 4)
blobs['object_boxes'] = torch.FloatTensor(object_boxes).reshape(num_pos_neg, 4)
blobs['object_word_embeddings'] = torch.FloatTensor(object_word_embeddings).reshape(num_pos_neg, 300)
blobs['ho_pair_labels'] = torch.FloatTensor(ho_pair_labels).reshape(num_pos_neg, self.num_classes)
blobs['mask_ho'] = torch.FloatTensor(mask).reshape(num_pos_neg, self.num_classes)
blobs['human_labels'] = torch.FloatTensor(human_labels).reshape(num_pos, self.num_classes)
blobs['object_labels'] = torch.FloatTensor(object_labels).reshape(num_pos, self.num_classes)
blobs['pos_num'] = num_pos
return blobs
|
def bbox_augmentation(anno, image_id, im_shape):
human_boxes = []
object_boxes = []
human_labels = []
object_labels = []
ho_pair_labels = []
object_word_embeddings = []
mask = []
for human_object_pair in anno:
height = human_object_pair['human_box'][3] - human_object_pair['human_box'][1]
width = human_object_pair['human_box'][2] - human_object_pair['human_box'][0]
y_center = (human_object_pair['human_box'][3] + human_object_pair['human_box'][1]) / 2
x_center = (human_object_pair['human_box'][2] + human_object_pair['human_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(human_object_pair['human_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
human_box_aug = box
human_box_aug = human_object_pair['human_box']
human_boxes.append(human_box_aug)
height = human_object_pair['object_box'][3] - human_object_pair['object_box'][1]
width = human_object_pair['object_box'][2] - human_object_pair['object_box'][0]
y_center = (human_object_pair['object_box'][3] + human_object_pair['object_box'][1]) / 2
x_center = (human_object_pair['object_box'][2] + human_object_pair['object_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(human_object_pair['object_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
object_box_aug = box
object_box_aug = human_object_pair['object_box']
object_boxes.append(object_box_aug)
action_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['human_action_id_list']:
action_[GT_idx] = 1
human_verbs_to_vector = action_
human_labels.append(human_verbs_to_vector)
action_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['object_action_id_list']:
action_[GT_idx] = 1
object_verbs_to_vector = action_
object_labels.append(object_verbs_to_vector)
action_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['verb_id_list']:
action_[GT_idx] = 1
ho_verbs_to_vector = action_
ho_pair_labels.append(ho_verbs_to_vector)
object_class = human_object_pair['object_class']
object_word_embeddings.append(self.word_embeddings[object_class].reshape(300))
mask_ = np.zeros(self.num_classes)
for GT_idx in human_object_pair['possible_verb_with_object']:
mask_[GT_idx] = 1
curr_mask = mask_
mask.append(curr_mask)
num_pos = len(human_boxes)
if image_id in self.tran_val_neg_file.keys():
human_boxes_neg = []
object_boxes_neg = []
object_word_embeddings_neg = []
mask_neg = []
for negative_pair in self.tran_val_neg_file[image_id]:
if self.bbox_iou(negative_pair['human_box'], anno[0]['human_box']) > 0.6:
height = negative_pair['human_box'][3] - negative_pair['human_box'][1]
width = negative_pair['human_box'][2] - negative_pair['human_box'][0]
y_center = (negative_pair['human_box'][3] + negative_pair['human_box'][1]) / 2
x_center = (negative_pair['human_box'][2] + negative_pair['human_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(negative_pair['human_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
human_box_neg_aug = box
human_box_neg_aug = negative_pair['human_box']
human_boxes_neg.append(human_box_neg_aug)
height = negative_pair['object_box'][3] - negative_pair['object_box'][1]
width = negative_pair['object_box'][2] - negative_pair['object_box'][0]
y_center = (negative_pair['object_box'][3] + negative_pair['object_box'][1]) / 2
x_center = (negative_pair['object_box'][2] + negative_pair['object_box'][0]) / 2
thres = 0.7
for count in range(20):
ratio = 1 + randint(-10, 10) * 0.01
y_shift = randint(-np.floor(height), np.floor(height)) * 0.1
x_shift = randint(-np.floor(width), np.floor(width)) * 0.1
x1 = max(0, x_center + x_shift - ratio * width / 2)
x2 = min(im_shape[1] - 1, x_center + x_shift + ratio * width / 2)
y1 = max(0, y_center + y_shift - ratio * height / 2)
y2 = min(im_shape[0] - 1, y_center + y_shift + ratio * height / 2)
if self.bbox_iou(negative_pair['object_box'], np.array([x1, y1, x2, y2])) > thres:
box = np.array([x1, y1, x2, y2]).astype(np.float32)
object_box_neg_aug = box
object_box_neg_aug = negative_pair['object_box']
object_boxes_neg.append(object_box_neg_aug)
object_class = negative_pair['object_class']
object_word_embeddings_neg.append(self.word_embeddings[object_class].reshape(300))
mask_ = np.zeros(self.num_classes)
for GT_idx in negative_pair['possible_verb_with_object']:
mask_[GT_idx] = 1
curr_mask = mask_
mask_neg.append(curr_mask)
if self.negative_sample_ratio != -1:
if len(human_boxes_neg) >= self.negative_sample_ratio * num_pos:
idx_list = random.sample(range(len(human_boxes_neg)), len(human_boxes_neg))
idx_list = idx_list[:self.negative_sample_ratio * num_pos]
human_boxes_neg = [human_boxes_neg[i] for i in idx_list]
object_boxes_neg = [object_boxes_neg[i] for i in idx_list]
object_word_embeddings_neg = [object_word_embeddings_neg[i] for i in idx_list]
mask_neg = [mask_neg[i] for i in idx_list]
if len(human_boxes_neg) < self.negative_sample_ratio * num_pos and len(human_boxes_neg) != 0:
idx_list = np.random.choice(len(human_boxes_neg), self.negative_sample_ratio * num_pos - len(human_boxes_neg)).tolist()
human_boxes_neg += [human_boxes_neg[i] for i in idx_list]
object_boxes_neg += [object_boxes_neg[i] for i in idx_list]
object_word_embeddings_neg += [object_word_embeddings_neg[i] for i in idx_list]
mask_neg += [mask_neg[i] for i in idx_list]
human_boxes += human_boxes_neg
object_boxes += object_boxes_neg
object_word_embeddings += object_word_embeddings_neg
mask += mask_neg
num_pos_neg = len(human_boxes)
if cfg.DATASETS.NEG_VERB_ALLZERO == 1:
for _ in range(num_pos_neg - num_pos):
ho_pair_labels.append(np.zeros(self.num_classes))
elif cfg.DATASETS.NEG_VERB_ALLZERO == 0:
for _ in range(num_pos_neg - num_pos):
ho_pair_labels.append(self.verb_list_to_vector([57]))
else:
assert 0
assert len(ho_pair_labels) == num_pos_neg
blobs = {}
blobs['human_boxes'] = torch.FloatTensor(human_boxes).reshape(num_pos_neg, 4)
blobs['object_boxes'] = torch.FloatTensor(object_boxes).reshape(num_pos_neg, 4)
blobs['object_word_embeddings'] = torch.FloatTensor(object_word_embeddings).reshape(num_pos_neg, 300)
blobs['ho_pair_labels'] = torch.FloatTensor(ho_pair_labels).reshape(num_pos_neg, self.num_classes)
blobs['mask_ho'] = torch.FloatTensor(mask).reshape(num_pos_neg, self.num_classes)
blobs['human_labels'] = torch.FloatTensor(human_labels).reshape(num_pos, self.num_classes)
blobs['object_labels'] = torch.FloatTensor(object_labels).reshape(num_pos, self.num_classes)
blobs['pos_num'] = num_pos
return blobs
|
DRG
|
positive
|
def replay(self):
<DeepExtract>
if self._recording:
self.pipeline_logger.unregister('.*', self)
self._recording = False
</DeepExtract>
for (step, kwargs) in self.session:
self.pipeline_logger(step, **kwargs)
|
def replay(self):
if self._recording:
self.pipeline_logger.unregister('.*', self)
self._recording = False
for (step, kwargs) in self.session:
self.pipeline_logger(step, **kwargs)
|
axcell
|
positive
|
def _get_scheduling_setup(conf):
assert conf.lr_change_epochs is not None
assert conf.lr_fields is not None
assert conf.lr_scale_indicators is not None
<DeepExtract>
conf.lr_fields = [[float(_lr) for _lr in lr_field.split(',')] for lr_field in conf.lr_fields.split('/')]
</DeepExtract>
<DeepExtract>
def digital2name(x):
scale_indicators = {'0': 'linear', '1': 'poly', '2': 'convex'}[x]
scale_indicators = [digital2name(l) for l in conf.lr_scale_indicators.split(',')]
</DeepExtract>
<DeepExtract>
conf.lr_change_epochs = [int(l) for l in conf.lr_change_epochs.split(',')]
from_s = conf.lr_change_epochs[:-1]
to_s = conf.lr_change_epochs[1:]
epoch_fields = list(zip(from_s, to_s))
</DeepExtract>
return (epoch_fields, lr_fields, scale_indicators)
|
def _get_scheduling_setup(conf):
assert conf.lr_change_epochs is not None
assert conf.lr_fields is not None
assert conf.lr_scale_indicators is not None
conf.lr_fields = [[float(_lr) for _lr in lr_field.split(',')] for lr_field in conf.lr_fields.split('/')]
def digital2name(x):
scale_indicators = {'0': 'linear', '1': 'poly', '2': 'convex'}[x]
scale_indicators = [digital2name(l) for l in conf.lr_scale_indicators.split(',')]
conf.lr_change_epochs = [int(l) for l in conf.lr_change_epochs.split(',')]
from_s = conf.lr_change_epochs[:-1]
to_s = conf.lr_change_epochs[1:]
epoch_fields = list(zip(from_s, to_s))
return (epoch_fields, lr_fields, scale_indicators)
|
ChocoSGD
|
positive
|
def test_tuple(self):
@runtime_validation
def sample(data: typing.Tuple[int, str]) -> typing.Tuple[int, str]:
return data
@runtime_validation
def sample_bad(data: typing.Any) -> typing.Tuple[int, str]:
return data
@runtime_validation
def sample_any_in(data: typing.Tuple) -> typing.Tuple:
return data
@runtime_validation
def sample_any_out(data: typing.Any) -> typing.Tuple:
return data
self.assertEqual(sample((1, '')), (1, ''))
with self.assertRaises(RuntimeTypeError):
<DeepExtract>
return (1, 1)
</DeepExtract>
with self.assertRaises(RuntimeTypeError):
<DeepExtract>
return ()
</DeepExtract>
with self.assertRaises(RuntimeTypeError):
<DeepExtract>
return []
</DeepExtract>
with self.assertRaises(RuntimeTypeError):
<DeepExtract>
return ''
</DeepExtract>
self.assertEqual(sample_any_in((1, '')), (1, ''))
with self.assertRaises(RuntimeTypeError):
<DeepExtract>
return 1
</DeepExtract>
self.assertEqual(sample_any_out((1,)), (1,))
with self.assertRaises(RuntimeTypeError):
<DeepExtract>
return 1
</DeepExtract>
|
def test_tuple(self):
@runtime_validation
def sample(data: typing.Tuple[int, str]) -> typing.Tuple[int, str]:
return data
@runtime_validation
def sample_bad(data: typing.Any) -> typing.Tuple[int, str]:
return data
@runtime_validation
def sample_any_in(data: typing.Tuple) -> typing.Tuple:
return data
@runtime_validation
def sample_any_out(data: typing.Any) -> typing.Tuple:
return data
self.assertEqual(sample((1, '')), (1, ''))
with self.assertRaises(RuntimeTypeError):
return (1, 1)
with self.assertRaises(RuntimeTypeError):
return ()
with self.assertRaises(RuntimeTypeError):
return []
with self.assertRaises(RuntimeTypeError):
return ''
self.assertEqual(sample_any_in((1, '')), (1, ''))
with self.assertRaises(RuntimeTypeError):
return 1
self.assertEqual(sample_any_out((1,)), (1,))
with self.assertRaises(RuntimeTypeError):
return 1
</DeepExtract>
|
enforce
|
positive
|
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
<DeepExtract>
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, '.svn')):
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, '.svn')):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
googlename = fullname[len(prefix) + 1:]
root_dir = os.path.dirname(fullname)
while root_dir != os.path.dirname(root_dir) and (not os.path.exists(os.path.join(root_dir, '.git'))) and (not os.path.exists(os.path.join(root_dir, '.hg'))) and (not os.path.exists(os.path.join(root_dir, '.svn'))):
root_dir = os.path.dirname(root_dir)
if os.path.exists(os.path.join(root_dir, '.git')) or os.path.exists(os.path.join(root_dir, '.hg')) or os.path.exists(os.path.join(root_dir, '.svn')):
prefix = os.path.commonprefix([root_dir, project_dir])
googlename = fullname[len(prefix) + 1:]
googlename = fullname
</DeepExtract>
(project, rest) = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
|
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, '.svn')):
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, '.svn')):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
googlename = fullname[len(prefix) + 1:]
root_dir = os.path.dirname(fullname)
while root_dir != os.path.dirname(root_dir) and (not os.path.exists(os.path.join(root_dir, '.git'))) and (not os.path.exists(os.path.join(root_dir, '.hg'))) and (not os.path.exists(os.path.join(root_dir, '.svn'))):
root_dir = os.path.dirname(root_dir)
if os.path.exists(os.path.join(root_dir, '.git')) or os.path.exists(os.path.join(root_dir, '.hg')) or os.path.exists(os.path.join(root_dir, '.svn')):
prefix = os.path.commonprefix([root_dir, project_dir])
googlename = fullname[len(prefix) + 1:]
googlename = fullname
(project, rest) = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
|
cowry
|
positive
|
def test_is_done(self):
<DeepExtract>
env = MultiagentPettingZooEnv(simple_world_comm_v2.env(), name='simple_world_comm_v2', device='cpu')
</DeepExtract>
env.reset()
self.assertFalse(env.is_done('leadadversary_0'))
self.assertFalse(env.is_done('adversary_0'))
|
def test_is_done(self):
env = MultiagentPettingZooEnv(simple_world_comm_v2.env(), name='simple_world_comm_v2', device='cpu')
env.reset()
self.assertFalse(env.is_done('leadadversary_0'))
self.assertFalse(env.is_done('adversary_0'))
|
autonomous-learning-library
|
positive
|
def parseNamingProps(meta, rn):
rnFormat = meta.rnFormat
if not len(meta.namingProps):
if rn == rnFormat:
return []
else:
raise ValueError('rn prefix mismatch')
nPropVals = []
rnLen = len(rn)
end = 0
start = 0
propMetaIter = iter(meta.namingProps)
needPropDelimiter = False
nPropMeta = None
for (rnPrefix, hasProp) in meta.rnPrefixes:
if start > end:
if needPropDelimiter:
<DeepExtract>
stk = deque()
first = True
end = start
while end < len(rn):
if not first and len(stk) == 0:
end = end
symbol = rn[end]
if symbol == '[':
if first and len(stk) == 0:
first = False
stk.append(symbol)
elif symbol == ']':
if first and len(stk) == 0:
raise ValueError('Invalid Rn: Found closing prop ' + 'delimiter before opening prop ' + 'delimiter.')
else:
stk.pop()
end += 1
end = -1
</DeepExtract>
else:
end = rnStr.find(rnPrefix, start)
if end == -1:
raise ValueError("rn prefix '%s' not found in '%s'" % (rnPrefix, rn))
nPropVal = rn[start:end]
if needPropDelimiter:
nPropVal = nPropVal[1:-1]
if nPropMeta:
nPropVals.append(nPropVal)
start = end
if not rn.startswith(rnPrefix, start):
raise ValueError('rn "%s" must be %s' % (rn, rnFormat))
if hasProp:
nPropMeta = next(propMetaIter)
needPropDelimiter = nPropMeta.needDelimiter
start += len(rnPrefix)
end = rnLen
nPropVal = rn[start:end]
if needPropDelimiter:
nPropVal = nPropVal[1:-1]
if nPropMeta:
nPropVals.append(nPropVal)
return nPropVals
|
def parseNamingProps(meta, rn):
rnFormat = meta.rnFormat
if not len(meta.namingProps):
if rn == rnFormat:
return []
else:
raise ValueError('rn prefix mismatch')
nPropVals = []
rnLen = len(rn)
end = 0
start = 0
propMetaIter = iter(meta.namingProps)
needPropDelimiter = False
nPropMeta = None
for (rnPrefix, hasProp) in meta.rnPrefixes:
if start > end:
if needPropDelimiter:
stk = deque()
first = True
end = start
while end < len(rn):
if not first and len(stk) == 0:
end = end
symbol = rn[end]
if symbol == '[':
if first and len(stk) == 0:
first = False
stk.append(symbol)
elif symbol == ']':
if first and len(stk) == 0:
raise ValueError('Invalid Rn: Found closing prop ' + 'delimiter before opening prop ' + 'delimiter.')
else:
stk.pop()
end += 1
end = -1
else:
end = rnStr.find(rnPrefix, start)
if end == -1:
raise ValueError("rn prefix '%s' not found in '%s'" % (rnPrefix, rn))
nPropVal = rn[start:end]
if needPropDelimiter:
nPropVal = nPropVal[1:-1]
if nPropMeta:
nPropVals.append(nPropVal)
start = end
if not rn.startswith(rnPrefix, start):
raise ValueError('rn "%s" must be %s' % (rn, rnFormat))
if hasProp:
nPropMeta = next(propMetaIter)
needPropDelimiter = nPropMeta.needDelimiter
start += len(rnPrefix)
end = rnLen
nPropVal = rn[start:end]
if needPropDelimiter:
nPropVal = nPropVal[1:-1]
if nPropMeta:
nPropVals.append(nPropVal)
return nPropVals
|
cobra
|
positive
|
def unhandled_exception():
exc_msg = str(traceback.format_exc())
if 'bad marshal data' in exc_msg:
match = re.search('\\s*(.+)\\s+ValueError', exc_msg)
err_msg = 'Identified corrupted .pyc file(s).'
err_msg += 'Please delete .pyc files on your system to fix the problem.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'must be pinned buffer, not bytearray' in exc_msg:
err_msg = 'Error occurred at Python interpreter which '
err_msg += 'is fixed in 2.7.x. Please update accordingly. '
err_msg += '(Reference: https://bugs.python.org/issue8104)'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif any((_ in exc_msg for _ in ('MemoryError', 'Cannot allocate memory'))):
err_msg = 'Memory exhaustion detected.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif "Permission denied: '" in exc_msg:
match = re.search("Permission denied: '([^']*)", exc_msg)
err_msg = "Permission error occurred while accessing file '" + match.group(1) + "'."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('Access is denied', 'subprocess', 'metasploit'))):
err_msg = 'Permission error occurred while running Metasploit.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('Permission denied', 'metasploit'))):
err_msg = 'Permission error occurred while using Metasploit.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Invalid argument' in exc_msg:
err_msg = 'Corrupted installation detected. '
err_msg += 'You should retrieve the latest (dev) version from official GitHub '
err_msg += "repository at '" + settings.GIT_URL + "'."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('No such file', "_'"))):
err_msg = "Corrupted installation detected ('" + exc_msg.strip().split('\n')[-1] + "'). "
err_msg += 'You should retrieve the latest (dev) version from official GitHub '
err_msg += "repository at '" + settings.GIT_URL + "'."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Invalid IPv6 URL' in exc_msg:
err_msg = "invalid URL ('" + exc_msg.strip().split('\n')[-1] + "')"
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif any((_ in exc_msg for _ in ('Broken pipe',))):
raise SystemExit()
elif any((_ in exc_msg for _ in ('The paging file is too small',))):
err_msg = 'No space left for paging file.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('SyntaxError: Non-ASCII character', '.py on line', 'but no encoding declared'))) or any((_ in exc_msg for _ in ('source code string cannot contain null bytes', 'No module named'))) or any((_ in exc_msg for _ in ('ImportError', 'ModuleNotFoundError', '<frozen', "Can't find file for module"))):
err_msg = "Invalid runtime environment ('" + exc_msg.split('Error: ')[-1].strip() + "')."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif any((_ in exc_msg for _ in ('No space left', 'Disk quota exceeded', 'Disk full while accessing'))):
err_msg = 'No space left on output device.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Read-only file system' in exc_msg:
err_msg = 'Output device is mounted as read-only.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'OperationalError: disk I/O error' in exc_msg:
err_msg = 'I/O error on output device.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Violation of BIDI' in exc_msg:
err_msg = 'Invalid URL (violation of Bidi IDNA rule - RFC 5893).'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
else:
err_msg = "Unhandled exception occurred in '" + settings.VERSION[1:] + "'. It is recommended to retry your "
err_msg += 'run with the latest (dev) version from official GitHub '
err_msg += "repository at '" + settings.GIT_URL + "'. If the exception persists, please open a new issue "
err_msg += "at '" + settings.ISSUES_PAGE + "' "
err_msg += 'with the following text and any other information required to '
err_msg += 'reproduce the bug. The '
err_msg += 'developers will try to reproduce the bug, fix it accordingly '
err_msg += 'and get back to you.\n'
err_msg += 'Commix version: ' + settings.VERSION[1:] + '\n'
err_msg += 'Python version: ' + settings.PYTHON_VERSION + '\n'
err_msg += 'Operating system: ' + os.name + '\n'
err_msg += 'Command line: ' + re.sub('.+?\\bcommix\\.py\\b', 'commix.py', ' '.join(sys.argv)) + '\n'
<DeepExtract>
for item in settings.SENSITIVE_OPTIONS:
match = re.search('(?i)commix.+(' + str(item) + ')(\\s+|=)([^-]+)', err_msg)
if match:
err_msg = err_msg.replace(match.group(3), '*' * len(match.group(3)) + settings.SINGLE_WHITESPACE)
err_msg = err_msg
</DeepExtract>
exc_msg = re.sub('".+?[/\\\\](\\w+\\.py)', '"\\g<1>', exc_msg)
print(settings.print_critical_msg(err_msg + '\n' + exc_msg.rstrip()))
<DeepExtract>
_ = re.sub("'[^']+'", "''", exc_msg[:])
_ = re.sub('\\s+line \\d+', '', _)
_ = re.sub('File ".+?/(\\w+\\.py)', '\\g<1>', _)
_ = re.sub('.+\\Z', '', _)
_ = re.sub('(Unicode[^:]*Error:).+', '\\g<1>', _)
_ = re.sub('= _', '= ', _)
_ = _.encode(settings.DEFAULT_CODEC)
key = hashlib.md5(_).hexdigest()[:8]
bug_report = 'Bug Report: Unhandled exception "' + str([i for i in exc_msg[:].split('\n') if i][-1]) + '" ' + '(#' + key + ')'
while True:
try:
message = 'Do you want to automatically create a new (anonymized) issue '
message += 'with the unhandled exception information at '
message += 'the official Github repository? [y/N] '
choise = read_input(message, default='N', check_batch=True)
if choise in settings.CHOICE_YES:
break
elif choise in settings.CHOICE_NO:
print(settings.SINGLE_WHITESPACE)
return
else:
invalid_option(choise)
pass
except:
print('\n')
raise SystemExit()
err_msg = err_msg[err_msg.find('\n'):]
request = _urllib.request.Request(url='https://api.github.com/search/issues?q=' + _urllib.parse.quote('repo:commixproject/commix' + settings.SINGLE_WHITESPACE + str(bug_report)))
try:
content = _urllib.request.urlopen(request, timeout=settings.TIMEOUT).read()
_ = json.loads(content)
duplicate = _['total_count'] > 0
closed = duplicate and _['items'][0]['state'] == 'closed'
if duplicate:
warn_msg = 'That issue seems to be already reported'
if closed:
warn_msg += ' and resolved. Please update to the latest '
warn_msg += "(dev) version from official GitHub repository at '" + settings.GIT_URL + "'"
warn_msg += '.\n'
print(settings.print_warning_msg(warn_msg))
return
except:
pass
data = {'title': str(bug_report), 'body': '```' + str(err_msg) + '\n```\n```\n' + str(exc_msg[:]) + '```'}
request = _urllib.request.Request(url='https://api.github.com/repos/commixproject/commix/issues', data=json.dumps(data).encode(), headers={'Authorization': 'token ' + base64.b64decode(settings.GITHUB_REPORT_OAUTH_TOKEN.encode(settings.DEFAULT_CODEC)).decode()})
try:
content = _urllib.request.urlopen(request, timeout=settings.TIMEOUT).read()
except Exception as err:
content = None
issue_url = re.search('https://github.com/commixproject/commix/issues/\\d+', content.decode(settings.DEFAULT_CODEC) or '')
if issue_url:
info_msg = "The created Github issue can been found at the address '" + str(issue_url.group(0)) + "'.\n"
print(settings.print_info_msg(info_msg))
else:
warn_msg = 'Something went wrong while creating a Github issue.'
if settings.UNAUTHORIZED_ERROR in str(err):
warn_msg += ' Please update to the latest revision.\n'
print(settings.print_warning_msg(warn_msg))
</DeepExtract>
|
def unhandled_exception():
exc_msg = str(traceback.format_exc())
if 'bad marshal data' in exc_msg:
match = re.search('\\s*(.+)\\s+ValueError', exc_msg)
err_msg = 'Identified corrupted .pyc file(s).'
err_msg += 'Please delete .pyc files on your system to fix the problem.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'must be pinned buffer, not bytearray' in exc_msg:
err_msg = 'Error occurred at Python interpreter which '
err_msg += 'is fixed in 2.7.x. Please update accordingly. '
err_msg += '(Reference: https://bugs.python.org/issue8104)'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif any((_ in exc_msg for _ in ('MemoryError', 'Cannot allocate memory'))):
err_msg = 'Memory exhaustion detected.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif "Permission denied: '" in exc_msg:
match = re.search("Permission denied: '([^']*)", exc_msg)
err_msg = "Permission error occurred while accessing file '" + match.group(1) + "'."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('Access is denied', 'subprocess', 'metasploit'))):
err_msg = 'Permission error occurred while running Metasploit.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('Permission denied', 'metasploit'))):
err_msg = 'Permission error occurred while using Metasploit.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Invalid argument' in exc_msg:
err_msg = 'Corrupted installation detected. '
err_msg += 'You should retrieve the latest (dev) version from official GitHub '
err_msg += "repository at '" + settings.GIT_URL + "'."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('No such file', "_'"))):
err_msg = "Corrupted installation detected ('" + exc_msg.strip().split('\n')[-1] + "'). "
err_msg += 'You should retrieve the latest (dev) version from official GitHub '
err_msg += "repository at '" + settings.GIT_URL + "'."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Invalid IPv6 URL' in exc_msg:
err_msg = "invalid URL ('" + exc_msg.strip().split('\n')[-1] + "')"
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif any((_ in exc_msg for _ in ('Broken pipe',))):
raise SystemExit()
elif any((_ in exc_msg for _ in ('The paging file is too small',))):
err_msg = 'No space left for paging file.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif all((_ in exc_msg for _ in ('SyntaxError: Non-ASCII character', '.py on line', 'but no encoding declared'))) or any((_ in exc_msg for _ in ('source code string cannot contain null bytes', 'No module named'))) or any((_ in exc_msg for _ in ('ImportError', 'ModuleNotFoundError', '<frozen', "Can't find file for module"))):
err_msg = "Invalid runtime environment ('" + exc_msg.split('Error: ')[-1].strip() + "')."
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif any((_ in exc_msg for _ in ('No space left', 'Disk quota exceeded', 'Disk full while accessing'))):
err_msg = 'No space left on output device.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Read-only file system' in exc_msg:
err_msg = 'Output device is mounted as read-only.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'OperationalError: disk I/O error' in exc_msg:
err_msg = 'I/O error on output device.'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
elif 'Violation of BIDI' in exc_msg:
err_msg = 'Invalid URL (violation of Bidi IDNA rule - RFC 5893).'
print(settings.print_critical_msg(err_msg))
raise SystemExit()
else:
err_msg = "Unhandled exception occurred in '" + settings.VERSION[1:] + "'. It is recommended to retry your "
err_msg += 'run with the latest (dev) version from official GitHub '
err_msg += "repository at '" + settings.GIT_URL + "'. If the exception persists, please open a new issue "
err_msg += "at '" + settings.ISSUES_PAGE + "' "
err_msg += 'with the following text and any other information required to '
err_msg += 'reproduce the bug. The '
err_msg += 'developers will try to reproduce the bug, fix it accordingly '
err_msg += 'and get back to you.\n'
err_msg += 'Commix version: ' + settings.VERSION[1:] + '\n'
err_msg += 'Python version: ' + settings.PYTHON_VERSION + '\n'
err_msg += 'Operating system: ' + os.name + '\n'
err_msg += 'Command line: ' + re.sub('.+?\\bcommix\\.py\\b', 'commix.py', ' '.join(sys.argv)) + '\n'
for item in settings.SENSITIVE_OPTIONS:
match = re.search('(?i)commix.+(' + str(item) + ')(\\s+|=)([^-]+)', err_msg)
if match:
err_msg = err_msg.replace(match.group(3), '*' * len(match.group(3)) + settings.SINGLE_WHITESPACE)
err_msg = err_msg
exc_msg = re.sub('".+?[/\\\\](\\w+\\.py)', '"\\g<1>', exc_msg)
print(settings.print_critical_msg(err_msg + '\n' + exc_msg.rstrip()))
_ = re.sub("'[^']+'", "''", exc_msg[:])
_ = re.sub('\\s+line \\d+', '', _)
_ = re.sub('File ".+?/(\\w+\\.py)', '\\g<1>', _)
_ = re.sub('.+\\Z', '', _)
_ = re.sub('(Unicode[^:]*Error:).+', '\\g<1>', _)
_ = re.sub('= _', '= ', _)
_ = _.encode(settings.DEFAULT_CODEC)
key = hashlib.md5(_).hexdigest()[:8]
bug_report = 'Bug Report: Unhandled exception "' + str([i for i in exc_msg[:].split('\n') if i][-1]) + '" ' + '(#' + key + ')'
while True:
try:
message = 'Do you want to automatically create a new (anonymized) issue '
message += 'with the unhandled exception information at '
message += 'the official Github repository? [y/N] '
choise = read_input(message, default='N', check_batch=True)
if choise in settings.CHOICE_YES:
break
elif choise in settings.CHOICE_NO:
print(settings.SINGLE_WHITESPACE)
return
else:
invalid_option(choise)
pass
except:
print('\n')
raise SystemExit()
err_msg = err_msg[err_msg.find('\n'):]
request = _urllib.request.Request(url='https://api.github.com/search/issues?q=' + _urllib.parse.quote('repo:commixproject/commix' + settings.SINGLE_WHITESPACE + str(bug_report)))
try:
content = _urllib.request.urlopen(request, timeout=settings.TIMEOUT).read()
_ = json.loads(content)
duplicate = _['total_count'] > 0
closed = duplicate and _['items'][0]['state'] == 'closed'
if duplicate:
warn_msg = 'That issue seems to be already reported'
if closed:
warn_msg += ' and resolved. Please update to the latest '
warn_msg += "(dev) version from official GitHub repository at '" + settings.GIT_URL + "'"
warn_msg += '.\n'
print(settings.print_warning_msg(warn_msg))
return
except:
pass
data = {'title': str(bug_report), 'body': '```' + str(err_msg) + '\n```\n```\n' + str(exc_msg[:]) + '```'}
request = _urllib.request.Request(url='https://api.github.com/repos/commixproject/commix/issues', data=json.dumps(data).encode(), headers={'Authorization': 'token ' + base64.b64decode(settings.GITHUB_REPORT_OAUTH_TOKEN.encode(settings.DEFAULT_CODEC)).decode()})
try:
content = _urllib.request.urlopen(request, timeout=settings.TIMEOUT).read()
except Exception as err:
content = None
issue_url = re.search('https://github.com/commixproject/commix/issues/\\d+', content.decode(settings.DEFAULT_CODEC) or '')
if issue_url:
info_msg = "The created Github issue can been found at the address '" + str(issue_url.group(0)) + "'.\n"
print(settings.print_info_msg(info_msg))
else:
warn_msg = 'Something went wrong while creating a Github issue.'
if settings.UNAUTHORIZED_ERROR in str(err):
warn_msg += ' Please update to the latest revision.\n'
print(settings.print_warning_msg(warn_msg))
</DeepExtract>
|
commix
|
positive
|
def test_bytereverse_random(self):
t = bitarray(endian=self.random_endian())
t.frombytes(bytearray(range(256)))
t.bytereverse()
table = t.tobytes()
self.assertEqual(table[:9], b'\x00\x80@\xc0 \xa0`\xe0\x10')
for n in range(100):
<DeepExtract>
a = bitarray(0, self.random_endian())
a.frombytes(os.urandom(bits2bytes(8 * n)))
del a[8 * n:]
a = a
</DeepExtract>
i = randint(0, n)
j = randint(0, n)
b = a.copy()
memoryview(b)[i:j] = b.tobytes()[i:j].translate(table)
a.bytereverse(i, j)
<DeepExtract>
self.assertEqual(a, b)
self.assertEqual(a.endian(), b.endian())
</DeepExtract>
<DeepExtract>
self.assertIsInstance(a, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = a.buffer_info()
self.assertEqual(size, bits2bytes(len(a)))
self.assertEqual(padbits, 8 * size - len(a))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, a.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(a.nbytes, size)
self.assertEqual(a.padbits, padbits)
self.assertEqual(a.readonly, readonly)
self.assertEqual(len(a) + a.padbits, 8 * a.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(a) % 8, 0)
self.assertEqual(len(a), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(a).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(a.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
def test_bytereverse_random(self):
t = bitarray(endian=self.random_endian())
t.frombytes(bytearray(range(256)))
t.bytereverse()
table = t.tobytes()
self.assertEqual(table[:9], b'\x00\x80@\xc0 \xa0`\xe0\x10')
for n in range(100):
a = bitarray(0, self.random_endian())
a.frombytes(os.urandom(bits2bytes(8 * n)))
del a[8 * n:]
a = a
i = randint(0, n)
j = randint(0, n)
b = a.copy()
memoryview(b)[i:j] = b.tobytes()[i:j].translate(table)
a.bytereverse(i, j)
self.assertEqual(a, b)
self.assertEqual(a.endian(), b.endian())
self.assertIsInstance(a, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = a.buffer_info()
self.assertEqual(size, bits2bytes(len(a)))
self.assertEqual(padbits, 8 * size - len(a))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, a.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(a.nbytes, size)
self.assertEqual(a.padbits, padbits)
self.assertEqual(a.readonly, readonly)
self.assertEqual(len(a) + a.padbits, 8 * a.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(a) % 8, 0)
self.assertEqual(len(a), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(a).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(a.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
bitarray
|
positive
|
def _create_data_input(self):
create_data_input = create_data_input_map[cfg.DATASET]
(context_execution, fetch_func) = create_data_input(self._input_db, self._expected_data_size, self._num_processes, self._num_workers, self._split, self._batch_size, crop_size=self._crop_size)
self._context_execution = context_execution
self._minibatch_fetch_func = fetch_func
<DeepExtract>
if self._split == 'train':
worker_ids = range(0, self._num_workers)
else:
assert self._num_workers < 100
worker_ids = range(100, 100 + self._num_workers)
</DeepExtract>
self._context_execution(worker_ids)
|
def _create_data_input(self):
create_data_input = create_data_input_map[cfg.DATASET]
(context_execution, fetch_func) = create_data_input(self._input_db, self._expected_data_size, self._num_processes, self._num_workers, self._split, self._batch_size, crop_size=self._crop_size)
self._context_execution = context_execution
self._minibatch_fetch_func = fetch_func
if self._split == 'train':
worker_ids = range(0, self._num_workers)
else:
assert self._num_workers < 100
worker_ids = range(100, 100 + self._num_workers)
self._context_execution(worker_ids)
|
CRCNN-Action
|
positive
|
def sample_from_discretized_mix_logistic_1d(m, nr_mix):
m = m.permute(0, 2, 3, 1)
ls = [int(y) for y in m.size()]
xs = ls[:-1] + [1]
logit_probs = m[:, :, :, :nr_mix]
m = m[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2])
temp = torch.FloatTensor(logit_probs.size())
if m.is_cuda:
temp = temp.cuda()
temp.uniform_(1e-05, 1.0 - 1e-05)
temp = logit_probs.data - torch.log(-torch.log(temp))
(_, argmax) = temp.max(dim=3)
<DeepExtract>
one_hot = torch.FloatTensor(argmax.size() + (nr_mix,)).zero_()
if argmax.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(len(argmax.size()), argmax.unsqueeze(-1), fill_with)
one_hot = Variable(one_hot)
</DeepExtract>
sel = one_hot.view(xs[:-1] + [1, nr_mix])
means = torch.sum(m[:, :, :, :, :nr_mix] * sel, dim=4)
log_scales = torch.clamp(torch.sum(m[:, :, :, :, nr_mix:2 * nr_mix] * sel, dim=4), min=-7.0)
u = torch.FloatTensor(means.size())
if m.is_cuda:
u = u.cuda()
u.uniform_(1e-05, 1.0 - 1e-05)
u = Variable(u)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))
x0 = torch.clamp(torch.clamp(x[:, :, :, 0], min=-1.0), max=1.0)
out = x0.unsqueeze(1)
return out
|
def sample_from_discretized_mix_logistic_1d(m, nr_mix):
m = m.permute(0, 2, 3, 1)
ls = [int(y) for y in m.size()]
xs = ls[:-1] + [1]
logit_probs = m[:, :, :, :nr_mix]
m = m[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2])
temp = torch.FloatTensor(logit_probs.size())
if m.is_cuda:
temp = temp.cuda()
temp.uniform_(1e-05, 1.0 - 1e-05)
temp = logit_probs.data - torch.log(-torch.log(temp))
(_, argmax) = temp.max(dim=3)
one_hot = torch.FloatTensor(argmax.size() + (nr_mix,)).zero_()
if argmax.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(len(argmax.size()), argmax.unsqueeze(-1), fill_with)
one_hot = Variable(one_hot)
sel = one_hot.view(xs[:-1] + [1, nr_mix])
means = torch.sum(m[:, :, :, :, :nr_mix] * sel, dim=4)
log_scales = torch.clamp(torch.sum(m[:, :, :, :, nr_mix:2 * nr_mix] * sel, dim=4), min=-7.0)
u = torch.FloatTensor(means.size())
if m.is_cuda:
u = u.cuda()
u.uniform_(1e-05, 1.0 - 1e-05)
u = Variable(u)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1.0 - u))
x0 = torch.clamp(torch.clamp(x[:, :, :, 0], min=-1.0), max=1.0)
out = x0.unsqueeze(1)
return out
|
Benchmarks
|
positive
|
def activateBGLcallback(self, context):
self.handler2 = bpy.types.SpaceView3D.draw_handler_add(Draw_hdr_callback, (self, context), 'WINDOW', 'POST_PIXEL')
self.view3d_area = context.area
<DeepExtract>
for obj in self.object:
obj.view3d_area = self.view3d_area
</DeepExtract>
bpy.ops.sunpos.hdr('INVOKE_DEFAULT')
|
def activateBGLcallback(self, context):
self.handler2 = bpy.types.SpaceView3D.draw_handler_add(Draw_hdr_callback, (self, context), 'WINDOW', 'POST_PIXEL')
self.view3d_area = context.area
for obj in self.object:
obj.view3d_area = self.view3d_area
bpy.ops.sunpos.hdr('INVOKE_DEFAULT')
|
blender-architecture-scripts
|
positive
|
def __init__(self, name):
self.name = name
(base_name, inc) = re.match(self.inc_re, name, re.I).groups()
<DeepExtract>
self.base_name = re.sub(u'[^A-Z]', '', string.upper())
</DeepExtract>
<DeepExtract>
self.inc = re.sub(u'[^A-Z]', '', string.upper())
</DeepExtract>
self._is_politician = False
self.pname = None
if not self.inc:
for pattern in self.individual_pac_patterns:
match = re.match(pattern, name, re.I)
if match:
self.base_name = match.group(1)
self.pname = PersonName(self.base_name)
break
if not self.pname:
parts = self.base_name.split()
if len(parts) == 3 and parts[0] in get_nicknames() and (len(parts[1]) == 1):
self.pname = PersonName(self.base_name)
|
def __init__(self, name):
self.name = name
(base_name, inc) = re.match(self.inc_re, name, re.I).groups()
self.base_name = re.sub(u'[^A-Z]', '', string.upper())
self.inc = re.sub(u'[^A-Z]', '', string.upper())
self._is_politician = False
self.pname = None
if not self.inc:
for pattern in self.individual_pac_patterns:
match = re.match(pattern, name, re.I)
if match:
self.base_name = match.group(1)
self.pname = PersonName(self.base_name)
break
if not self.pname:
parts = self.base_name.split()
if len(parts) == 3 and parts[0] in get_nicknames() and (len(parts[1]) == 1):
self.pname = PersonName(self.base_name)
|
datacommons
|
positive
|
def __call__(self, r):
<DeepExtract>
r.headers['Authorization'] = 'Basic ' + b64encode(('%s:%s' % (self.username, self.password)).encode('latin1')).strip().decode('latin1')
</DeepExtract>
return r
|
def __call__(self, r):
r.headers['Authorization'] = 'Basic ' + b64encode(('%s:%s' % (self.username, self.password)).encode('latin1')).strip().decode('latin1')
return r
|
acousticbrainz-client
|
positive
|
def stacked_viz(df: pd.DataFrame, x: str, y: str, grp_cnt_stats: Dict[str, int], plot_width: int, plot_height: int, timeunit: Optional[str]=None) -> Panel:
"""
Render a stacked bar chart
"""
df2 = df.div(df.sum(axis=1), axis=0) * 100
df.columns = [f'{col}_cnt' for col in df.columns]
df = pd.concat([df2, df], axis=1)
<DeepExtract>
(x_ttl, y_ttl) = (None, None)
if f'{x}_ttl' in grp_cnt_stats:
x_ttl = grp_cnt_stats[f'{x}_ttl']
x_shw = grp_cnt_stats[f'{x}_shw']
if f'{y}_ttl' in grp_cnt_stats:
y_ttl = grp_cnt_stats[f'{y}_ttl']
y_shw = grp_cnt_stats[f'{y}_shw']
if x_ttl and y_ttl:
if x_ttl > x_shw and y_ttl > y_shw:
title = f'(top {y_shw} out of {y_ttl}) {y} by (top {x_shw} out of {x_ttl}) {x}'
elif x_ttl:
if x_ttl > x_shw:
title = f'{y} by (top {x_shw} out of {x_ttl}) {x}'
elif y_ttl:
if y_ttl > y_shw:
title = f'(top {y_shw} out of {y_ttl}) {y} by {x}'
title = f'{y} by {x}'
</DeepExtract>
if not timeunit:
if grp_cnt_stats[f'{x}_shw'] > 30:
plot_width = 32 * grp_cnt_stats[f'{x}_shw']
elif len(df) > 30:
plot_width = 32 * len(df)
fig = figure(plot_height=plot_height, plot_width=plot_width, title=title, toolbar_location=None, x_range=list(df.index))
grps = list(df2.columns)
palette = PASTEL1 * (len(grps) // len(PASTEL1) + 1)
if 'Others' in grps:
colours = palette[0:len(grps) - 1] + ('#636363',)
else:
colours = palette[0:len(grps)]
source = ColumnDataSource(data=df)
renderers = fig.vbar_stack(stackers=grps, x='index', width=0.9, source=source, line_width=1, color=colours)
grps = [grp[:14] + '...' if len(grp) > 15 else grp for grp in grps]
legend = Legend(items=[(grp, [rend]) for (grp, rend) in zip(grps, renderers)])
legend.label_text_font_size = '8pt'
fig.add_layout(legend, 'right')
if not timeunit:
formatter = CustomJSHover(args=dict(source=source), code="\n const cur_bar = special_vars.data_x - 0.5\n const name_cnt = special_vars.name + '_cnt'\n return source.data[name_cnt][cur_bar] + '';\n ")
for rend in renderers:
hover = HoverTool(tooltips=[(x, '@index'), (y, '$name'), ('Percentage', '@$name%'), ('Count', '@{%s}{custom}' % rend.name)], formatters={'@{%s}' % rend.name: formatter}, renderers=[rend])
fig.add_tools(hover)
fig.yaxis.axis_label = 'Percent'
else:
formatter = CustomJSHover(args=dict(source=source), code="\n const columns = Object.keys(source.data)\n const cur_bar = special_vars.data_x - 0.5\n var ttl_bar = 0\n for (let i = 0; i < columns.length; i++) {\n if (columns[i] != 'index'){\n ttl_bar = ttl_bar + source.data[columns[i]][cur_bar]\n }\n }\n const cur_val = source.data[special_vars.name][cur_bar]\n return (cur_val/ttl_bar * 100).toFixed(2)+'%';\n ")
for rend in renderers:
hover = HoverTool(tooltips=[(y, '$name'), (timeunit, '@index'), ('Count', '@$name'), ('Percent', '@{%s}{custom}' % rend.name)], formatters={'@{%s}' % rend.name: formatter}, renderers=[rend])
fig.add_tools(hover)
fig.yaxis.axis_label = 'Count'
_format_axis(fig, 0, df.sum(axis=1).max(), 'y')
fig.xaxis.axis_label = x
if timeunit == 'Week of':
fig.xaxis.axis_label = x + ', the week of'
tweak_figure(fig, 'stacked')
return Panel(child=fig, title='Stacked Bar Chart')
|
def stacked_viz(df: pd.DataFrame, x: str, y: str, grp_cnt_stats: Dict[str, int], plot_width: int, plot_height: int, timeunit: Optional[str]=None) -> Panel:
"""
Render a stacked bar chart
"""
df2 = df.div(df.sum(axis=1), axis=0) * 100
df.columns = [f'{col}_cnt' for col in df.columns]
df = pd.concat([df2, df], axis=1)
(x_ttl, y_ttl) = (None, None)
if f'{x}_ttl' in grp_cnt_stats:
x_ttl = grp_cnt_stats[f'{x}_ttl']
x_shw = grp_cnt_stats[f'{x}_shw']
if f'{y}_ttl' in grp_cnt_stats:
y_ttl = grp_cnt_stats[f'{y}_ttl']
y_shw = grp_cnt_stats[f'{y}_shw']
if x_ttl and y_ttl:
if x_ttl > x_shw and y_ttl > y_shw:
title = f'(top {y_shw} out of {y_ttl}) {y} by (top {x_shw} out of {x_ttl}) {x}'
elif x_ttl:
if x_ttl > x_shw:
title = f'{y} by (top {x_shw} out of {x_ttl}) {x}'
elif y_ttl:
if y_ttl > y_shw:
title = f'(top {y_shw} out of {y_ttl}) {y} by {x}'
title = f'{y} by {x}'
if not timeunit:
if grp_cnt_stats[f'{x}_shw'] > 30:
plot_width = 32 * grp_cnt_stats[f'{x}_shw']
elif len(df) > 30:
plot_width = 32 * len(df)
fig = figure(plot_height=plot_height, plot_width=plot_width, title=title, toolbar_location=None, x_range=list(df.index))
grps = list(df2.columns)
palette = PASTEL1 * (len(grps) // len(PASTEL1) + 1)
if 'Others' in grps:
colours = palette[0:len(grps) - 1] + ('#636363',)
else:
colours = palette[0:len(grps)]
source = ColumnDataSource(data=df)
renderers = fig.vbar_stack(stackers=grps, x='index', width=0.9, source=source, line_width=1, color=colours)
grps = [grp[:14] + '...' if len(grp) > 15 else grp for grp in grps]
legend = Legend(items=[(grp, [rend]) for (grp, rend) in zip(grps, renderers)])
legend.label_text_font_size = '8pt'
fig.add_layout(legend, 'right')
if not timeunit:
formatter = CustomJSHover(args=dict(source=source), code="\n const cur_bar = special_vars.data_x - 0.5\n const name_cnt = special_vars.name + '_cnt'\n return source.data[name_cnt][cur_bar] + '';\n ")
for rend in renderers:
hover = HoverTool(tooltips=[(x, '@index'), (y, '$name'), ('Percentage', '@$name%'), ('Count', '@{%s}{custom}' % rend.name)], formatters={'@{%s}' % rend.name: formatter}, renderers=[rend])
fig.add_tools(hover)
fig.yaxis.axis_label = 'Percent'
else:
formatter = CustomJSHover(args=dict(source=source), code="\n const columns = Object.keys(source.data)\n const cur_bar = special_vars.data_x - 0.5\n var ttl_bar = 0\n for (let i = 0; i < columns.length; i++) {\n if (columns[i] != 'index'){\n ttl_bar = ttl_bar + source.data[columns[i]][cur_bar]\n }\n }\n const cur_val = source.data[special_vars.name][cur_bar]\n return (cur_val/ttl_bar * 100).toFixed(2)+'%';\n ")
for rend in renderers:
hover = HoverTool(tooltips=[(y, '$name'), (timeunit, '@index'), ('Count', '@$name'), ('Percent', '@{%s}{custom}' % rend.name)], formatters={'@{%s}' % rend.name: formatter}, renderers=[rend])
fig.add_tools(hover)
fig.yaxis.axis_label = 'Count'
_format_axis(fig, 0, df.sum(axis=1).max(), 'y')
fig.xaxis.axis_label = x
if timeunit == 'Week of':
fig.xaxis.axis_label = x + ', the week of'
tweak_figure(fig, 'stacked')
return Panel(child=fig, title='Stacked Bar Chart')
|
dataprep
|
positive
|
def dump(db, file, **options):
head_top = ['ID', 'Frame Name', 'Cycle Time [ms]', 'Launch Type', 'Launch Parameter', 'Signal Byte No.', 'Signal Bit No.', 'Signal Name', 'Signal Function', 'Signal Length [Bit]', 'Signal Default', ' Signal Not Available', 'Byteorder']
head_tail = ['Value', 'Name / Phys. Range', 'Function / Increment Unit']
if len(options.get('additionalSignalAttributes', '')) > 0:
additional_signal_columns = options.get('additionalSignalAttributes').split(',')
else:
additional_signal_columns = []
if len(options.get('additionalFrameAttributes', '')) > 0:
additional_frame_columns = options.get('additionalFrameAttributes').split(',')
else:
additional_frame_columns = []
motorola_bit_format = options.get('xlsMotorolaBitFormat', 'msbreverse')
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('K-Matrix ')
row_array = []
col = 0
ecu_list = [ecu.name for ecu in db.ecus]
row_array += head_top
head_start = len(row_array)
row_array += ecu_list
for col in range(len(row_array)):
worksheet.col(col).width = 1111
tail_start = len(row_array)
row_array += head_tail
additional_frame_start = len(row_array)
for col in range(tail_start, len(row_array)):
worksheet.col(col).width = 3333
for additionalCol in additional_frame_columns:
row_array.append('frame.' + additionalCol)
col += 1
for additionalCol in additional_signal_columns:
row_array.append('signal.' + additionalCol)
col += 1
<DeepExtract>
for item in row_array:
worksheet.write(0, 0, label=item, style=sty_header)
0 += 1
return 0
</DeepExtract>
worksheet.col(1).width = 5555
worksheet.col(3).width = 3333
worksheet.col(7).width = 5555
worksheet.col(8).width = 7777
worksheet.col(head_start).width = 1111
worksheet.col(head_start + 1).width = 5555
frame_hash = {}
if db.type == canmatrix.matrix_class.CAN:
logger.debug('Length of db.frames is %d', len(db.frames))
for frame in db.frames:
if frame.is_complex_multiplexed:
logger.error('export complex multiplexers is not supported - ignoring frame %s', frame.name)
continue
frame_hash[int(frame.arbitration_id.id)] = frame
else:
frame_hash = {a.name: a for a in db.frames}
row = 1
for idx in sorted(frame_hash.keys()):
frame = frame_hash[idx]
frame_style = sty_first_frame
sig_hash = {'{:02d}{}'.format(sig.get_startbit(), sig.name): sig for sig in frame.signals}
sig_style = sty_first_frame
additional_frame_info = [frame.attribute(frameInfo, default='') for frameInfo in additional_frame_columns]
row_array = []
if len(sig_hash) == 0:
row_array += canmatrix.formats.xls_common.get_frame_info(db, frame)
for _ in range(5, head_start):
row_array.append('')
<DeepExtract>
for item in row_array:
worksheet.write(row, 0, label=item, style=frame_style)
0 += 1
temp_col = 0
</DeepExtract>
<DeepExtract>
if frame_style == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
for ecu_name in ecu_list:
if temp_col % 2 == 0:
loc_style = norm
loc_style_sender = sender
else:
loc_style = norm_green
loc_style_sender = sender_green
if None and ecu_name in None.receivers and (ecu_name in frame.transmitters):
worksheet.write(row, temp_col, label='r/s', style=loc_style_sender)
elif None and ecu_name in None.receivers:
worksheet.write(row, temp_col, label='r', style=loc_style)
elif ecu_name in frame.transmitters:
worksheet.write(row, temp_col, label='s', style=loc_style_sender)
else:
worksheet.write(row, temp_col, label='', style=loc_style)
temp_col += 1
temp_col = temp_col
</DeepExtract>
row_array = []
for col in range(temp_col, additional_frame_start):
row_array.append('')
row_array += additional_frame_info
for _ in additional_signal_columns:
row_array.append('')
<DeepExtract>
for item in row_array:
worksheet.write(row, temp_col, label=item, style=frame_style)
temp_col += 1
return temp_col
</DeepExtract>
row += 1
continue
for sig_idx in sorted(sig_hash.keys()):
sig = sig_hash[sig_idx]
if sig_style != sty_first_frame:
sig_style = sty_norm
if sig.values.__len__() > 0:
val_style = sig_style
for val in sorted(sig.values.keys()):
row_array = canmatrix.formats.xls_common.get_frame_info(db, frame)
<DeepExtract>
for item in row_array:
worksheet.write(row, 0, label=item, style=frame_style)
0 += 1
front_col = 0
</DeepExtract>
if frame_style != sty_first_frame:
worksheet.row(row).level = 1
col = head_start
<DeepExtract>
if frame_style == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
for ecu_name in ecu_list:
if col % 2 == 0:
loc_style = norm
loc_style_sender = sender
else:
loc_style = norm_green
loc_style_sender = sender_green
if sig and ecu_name in sig.receivers and (ecu_name in frame.transmitters):
worksheet.write(row, col, label='r/s', style=loc_style_sender)
elif sig and ecu_name in sig.receivers:
worksheet.write(row, col, label='r', style=loc_style)
elif ecu_name in frame.transmitters:
worksheet.write(row, col, label='s', style=loc_style_sender)
else:
worksheet.write(row, col, label='', style=loc_style)
col += 1
col = col
</DeepExtract>
(frontRow, backRow) = canmatrix.formats.xls_common.get_signal(db, frame, sig, motorola_bit_format)
<DeepExtract>
for item in frontRow:
worksheet.write(row, front_col, label=item, style=sig_style)
front_col += 1
return front_col
</DeepExtract>
backRow += additional_frame_info
for item in additional_signal_columns:
temp = getattr(sig, item, '')
backRow.append(temp)
<DeepExtract>
for item in backRow:
worksheet.write(row, col + 2, label=item, style=sig_style)
col + 2 += 1
return col + 2
</DeepExtract>
<DeepExtract>
for item in [val, sig.values[val]]:
worksheet.write(row, col, label=item, style=val_style)
col += 1
return col
</DeepExtract>
row += 1
sig_style = sty_white
frame_style = sty_white
val_style = sty_norm
else:
row_array = canmatrix.formats.xls_common.get_frame_info(db, frame)
<DeepExtract>
for item in row_array:
worksheet.write(row, 0, label=item, style=frame_style)
0 += 1
front_col = 0
</DeepExtract>
if frame_style != sty_first_frame:
worksheet.row(row).level = 1
col = head_start
<DeepExtract>
if frame_style == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
for ecu_name in ecu_list:
if col % 2 == 0:
loc_style = norm
loc_style_sender = sender
else:
loc_style = norm_green
loc_style_sender = sender_green
if sig and ecu_name in sig.receivers and (ecu_name in frame.transmitters):
worksheet.write(row, col, label='r/s', style=loc_style_sender)
elif sig and ecu_name in sig.receivers:
worksheet.write(row, col, label='r', style=loc_style)
elif ecu_name in frame.transmitters:
worksheet.write(row, col, label='s', style=loc_style_sender)
else:
worksheet.write(row, col, label='', style=loc_style)
col += 1
col = col
</DeepExtract>
(frontRow, backRow) = canmatrix.formats.xls_common.get_signal(db, frame, sig, motorola_bit_format)
<DeepExtract>
for item in frontRow:
worksheet.write(row, front_col, label=item, style=sig_style)
front_col += 1
return front_col
</DeepExtract>
if float(sig.min) != 0 or float(sig.max) != 1.0:
backRow.insert(0, str('%g..%g' % (sig.min, sig.max)))
else:
backRow.insert(0, '')
backRow.insert(0, '')
backRow += additional_frame_info
for item in additional_signal_columns:
temp = getattr(sig, item, '')
backRow.append(temp)
<DeepExtract>
for item in backRow:
worksheet.write(row, col, label=item, style=sig_style)
col += 1
return col
</DeepExtract>
row += 1
sig_style = sty_white
frame_style = sty_white
worksheet.set_panes_frozen(True)
worksheet.set_horz_split_pos(1)
worksheet.set_remove_splits(True)
workbook.save(file)
|
def dump(db, file, **options):
head_top = ['ID', 'Frame Name', 'Cycle Time [ms]', 'Launch Type', 'Launch Parameter', 'Signal Byte No.', 'Signal Bit No.', 'Signal Name', 'Signal Function', 'Signal Length [Bit]', 'Signal Default', ' Signal Not Available', 'Byteorder']
head_tail = ['Value', 'Name / Phys. Range', 'Function / Increment Unit']
if len(options.get('additionalSignalAttributes', '')) > 0:
additional_signal_columns = options.get('additionalSignalAttributes').split(',')
else:
additional_signal_columns = []
if len(options.get('additionalFrameAttributes', '')) > 0:
additional_frame_columns = options.get('additionalFrameAttributes').split(',')
else:
additional_frame_columns = []
motorola_bit_format = options.get('xlsMotorolaBitFormat', 'msbreverse')
workbook = xlwt.Workbook(encoding='utf8')
worksheet = workbook.add_sheet('K-Matrix ')
row_array = []
col = 0
ecu_list = [ecu.name for ecu in db.ecus]
row_array += head_top
head_start = len(row_array)
row_array += ecu_list
for col in range(len(row_array)):
worksheet.col(col).width = 1111
tail_start = len(row_array)
row_array += head_tail
additional_frame_start = len(row_array)
for col in range(tail_start, len(row_array)):
worksheet.col(col).width = 3333
for additionalCol in additional_frame_columns:
row_array.append('frame.' + additionalCol)
col += 1
for additionalCol in additional_signal_columns:
row_array.append('signal.' + additionalCol)
col += 1
for item in row_array:
worksheet.write(0, 0, label=item, style=sty_header)
0 += 1
return 0
worksheet.col(1).width = 5555
worksheet.col(3).width = 3333
worksheet.col(7).width = 5555
worksheet.col(8).width = 7777
worksheet.col(head_start).width = 1111
worksheet.col(head_start + 1).width = 5555
frame_hash = {}
if db.type == canmatrix.matrix_class.CAN:
logger.debug('Length of db.frames is %d', len(db.frames))
for frame in db.frames:
if frame.is_complex_multiplexed:
logger.error('export complex multiplexers is not supported - ignoring frame %s', frame.name)
continue
frame_hash[int(frame.arbitration_id.id)] = frame
else:
frame_hash = {a.name: a for a in db.frames}
row = 1
for idx in sorted(frame_hash.keys()):
frame = frame_hash[idx]
frame_style = sty_first_frame
sig_hash = {'{:02d}{}'.format(sig.get_startbit(), sig.name): sig for sig in frame.signals}
sig_style = sty_first_frame
additional_frame_info = [frame.attribute(frameInfo, default='') for frameInfo in additional_frame_columns]
row_array = []
if len(sig_hash) == 0:
row_array += canmatrix.formats.xls_common.get_frame_info(db, frame)
for _ in range(5, head_start):
row_array.append('')
for item in row_array:
worksheet.write(row, 0, label=item, style=frame_style)
0 += 1
temp_col = 0
if frame_style == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
for ecu_name in ecu_list:
if temp_col % 2 == 0:
loc_style = norm
loc_style_sender = sender
else:
loc_style = norm_green
loc_style_sender = sender_green
if None and ecu_name in None.receivers and (ecu_name in frame.transmitters):
worksheet.write(row, temp_col, label='r/s', style=loc_style_sender)
elif None and ecu_name in None.receivers:
worksheet.write(row, temp_col, label='r', style=loc_style)
elif ecu_name in frame.transmitters:
worksheet.write(row, temp_col, label='s', style=loc_style_sender)
else:
worksheet.write(row, temp_col, label='', style=loc_style)
temp_col += 1
temp_col = temp_col
row_array = []
for col in range(temp_col, additional_frame_start):
row_array.append('')
row_array += additional_frame_info
for _ in additional_signal_columns:
row_array.append('')
for item in row_array:
worksheet.write(row, temp_col, label=item, style=frame_style)
temp_col += 1
return temp_col
row += 1
continue
for sig_idx in sorted(sig_hash.keys()):
sig = sig_hash[sig_idx]
if sig_style != sty_first_frame:
sig_style = sty_norm
if sig.values.__len__() > 0:
val_style = sig_style
for val in sorted(sig.values.keys()):
row_array = canmatrix.formats.xls_common.get_frame_info(db, frame)
for item in row_array:
worksheet.write(row, 0, label=item, style=frame_style)
0 += 1
front_col = 0
if frame_style != sty_first_frame:
worksheet.row(row).level = 1
col = head_start
if frame_style == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
for ecu_name in ecu_list:
if col % 2 == 0:
loc_style = norm
loc_style_sender = sender
else:
loc_style = norm_green
loc_style_sender = sender_green
if sig and ecu_name in sig.receivers and (ecu_name in frame.transmitters):
worksheet.write(row, col, label='r/s', style=loc_style_sender)
elif sig and ecu_name in sig.receivers:
worksheet.write(row, col, label='r', style=loc_style)
elif ecu_name in frame.transmitters:
worksheet.write(row, col, label='s', style=loc_style_sender)
else:
worksheet.write(row, col, label='', style=loc_style)
col += 1
col = col
(frontRow, backRow) = canmatrix.formats.xls_common.get_signal(db, frame, sig, motorola_bit_format)
for item in frontRow:
worksheet.write(row, front_col, label=item, style=sig_style)
front_col += 1
return front_col
backRow += additional_frame_info
for item in additional_signal_columns:
temp = getattr(sig, item, '')
backRow.append(temp)
for item in backRow:
worksheet.write(row, col + 2, label=item, style=sig_style)
col + 2 += 1
return col + 2
for item in [val, sig.values[val]]:
worksheet.write(row, col, label=item, style=val_style)
col += 1
return col
row += 1
sig_style = sty_white
frame_style = sty_white
val_style = sty_norm
else:
row_array = canmatrix.formats.xls_common.get_frame_info(db, frame)
for item in row_array:
worksheet.write(row, 0, label=item, style=frame_style)
0 += 1
front_col = 0
if frame_style != sty_first_frame:
worksheet.row(row).level = 1
col = head_start
if frame_style == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
for ecu_name in ecu_list:
if col % 2 == 0:
loc_style = norm
loc_style_sender = sender
else:
loc_style = norm_green
loc_style_sender = sender_green
if sig and ecu_name in sig.receivers and (ecu_name in frame.transmitters):
worksheet.write(row, col, label='r/s', style=loc_style_sender)
elif sig and ecu_name in sig.receivers:
worksheet.write(row, col, label='r', style=loc_style)
elif ecu_name in frame.transmitters:
worksheet.write(row, col, label='s', style=loc_style_sender)
else:
worksheet.write(row, col, label='', style=loc_style)
col += 1
col = col
(frontRow, backRow) = canmatrix.formats.xls_common.get_signal(db, frame, sig, motorola_bit_format)
for item in frontRow:
worksheet.write(row, front_col, label=item, style=sig_style)
front_col += 1
return front_col
if float(sig.min) != 0 or float(sig.max) != 1.0:
backRow.insert(0, str('%g..%g' % (sig.min, sig.max)))
else:
backRow.insert(0, '')
backRow.insert(0, '')
backRow += additional_frame_info
for item in additional_signal_columns:
temp = getattr(sig, item, '')
backRow.append(temp)
for item in backRow:
worksheet.write(row, col, label=item, style=sig_style)
col += 1
return col
row += 1
sig_style = sty_white
frame_style = sty_white
worksheet.set_panes_frozen(True)
worksheet.set_horz_split_pos(1)
worksheet.set_remove_splits(True)
workbook.save(file)
|
canmatrix
|
positive
|
def add_data_to_plot(gui: Interface, target_graph: PlotWidget, plot_index: int, y: float, timestamp: float):
"""
Adds data to plot in provided graph.
:param gui: Graphical user interface in which to set up graphs.
:param target_graph: Graph to use for plot to add data to.
:param plot_index: Index of plot in target graph's list of plots.
:param y: Y value to add.
:param timestamp: Timestamp value to add.
"""
<DeepExtract>
for graph in gui.graphs:
if graph['graph'] == target_graph:
graph_dict = graph
</DeepExtract>
plot = graph_dict['plots'][plot_index]
seconds_in_day = 86400
if len(plot['x']) >= seconds_in_day:
plot['x'] = [0]
plot['y'] = [y]
plot['z'] = [timestamp]
else:
plot['x'].append(plot['x'][-1] + 1)
plot['y'].append(y)
plot['z'].append(timestamp)
plot['plot'].setData(plot['x'], plot['y'])
|
def add_data_to_plot(gui: Interface, target_graph: PlotWidget, plot_index: int, y: float, timestamp: float):
"""
Adds data to plot in provided graph.
:param gui: Graphical user interface in which to set up graphs.
:param target_graph: Graph to use for plot to add data to.
:param plot_index: Index of plot in target graph's list of plots.
:param y: Y value to add.
:param timestamp: Timestamp value to add.
"""
for graph in gui.graphs:
if graph['graph'] == target_graph:
graph_dict = graph
plot = graph_dict['plots'][plot_index]
seconds_in_day = 86400
if len(plot['x']) >= seconds_in_day:
plot['x'] = [0]
plot['y'] = [y]
plot['z'] = [timestamp]
else:
plot['x'].append(plot['x'][-1] + 1)
plot['y'].append(y)
plot['z'].append(timestamp)
plot['plot'].setData(plot['x'], plot['y'])
|
algobot
|
positive
|
def parse(self, model, **kwargs):
"""General parse function.
Responsible for calling the sub-parsers and logging progress.
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set verbose mode. Optional. Default=False
:type verbose: bool
:returns: 1 for success, -1 for failure
:rtype: int
"""
start = time.time()
self.source_name = 'Sourcebus'
if not self.is_opendssdirect_built:
<DeepExtract>
logger.info('Reading DSS file {name}...'.format(name=self.DSS_file_names['master']))
try:
self.function('redirect {master_file}'.format(master_file=self.DSS_file_names['master']))
except:
logger.error('Unable to redirect master file: {filename}'.format(filename=self.DSS_file_names['master']))
return -1
self.is_opendssdirect_built = True
logger.info('build_opendssdirect succesful')
return 1
</DeepExtract>
end = time.time()
logger.debug('Build OpenDSSdirect= {}'.format(end - start))
if 'feeder_file' in kwargs:
self.feeder_file = kwargs['feeder_file']
<DeepExtract>
with open(self.feeder_file, 'r') as f:
lines = f.readlines()
feeders = {}
substations = {}
substation_transformers = {}
for line in lines[1:]:
(node, sub, feed, sub_trans) = list(map(lambda x: x.strip(), line.split(' ')))
if not feed in feeders:
feeders[feed] = [node.lower().replace('.', '')]
else:
feeders[feed].append(node.lower().replace('.', ''))
if not feed in substations:
substations[feed] = sub.lower().replace('.', '')
if not feed in substation_transformers:
substation_transformers[feed] = sub.lower().replace('.', '')
for (f_name, f_data) in feeders.items():
api_feeder_metadata = Feeder_metadata(model)
api_feeder_metadata.name = f_name
if f_name in substation_transformers:
api_feeder_metadata.transformer = substation_transformers[f_name]
if f_name in substations:
api_feeder_metadata.substation = substations[f_name]
</DeepExtract>
read_power_source = True
if 'read_power_source' in kwargs:
read_power_source = kwargs['read_power_source']
if read_power_source:
<DeepExtract>
sources = _dss_class_to_dict('Vsource')
for (source_name, source_data) in sources.items():
if not source_data['enabled'] == 'Yes':
continue
try:
api_power_source = PowerSource(model)
except:
continue
try:
api_power_source.name = source_name
except:
pass
try:
api_power_source.nominal_voltage = float(source_data['basekv']) * 10 ** 3
except:
pass
try:
api_power_source.per_unit = float(source_data['pu'])
except:
pass
try:
api_power_source.is_sourcebus = True
except:
pass
try:
api_power_source.rated_power = float(source_data['baseMVA']) * 10 ** 6
except:
pass
try:
api_power_source.emergency_power = float(source_data['MVAsc3']) * 10 ** 6
except:
pass
if 'Z0' in source_data and isinstance(source_data['Z0'], list):
if len(source_data['Z0']) == 2:
try:
api_power_source.zero_sequence_impedance = complex(float(source_data['Z0'][0]), float(source_data['Z0'][1]))
except:
pass
elif 'R0' in source_data and source_data['R0'] != '' and ('X0' in source_data) and (source_data['X0'] != ''):
try:
api_power_source.zero_sequence_impedance = complex(float(source_data['R0']), float(source_data['X0']))
except:
pass
if 'Z1' in source_data and isinstance(source_data['Z1'], list):
if len(source_data['Z1']) == 2:
try:
api_power_source.positive_sequence_impedance = complex(float(source_data['Z1'][0]), float(source_data['Z1'][1]))
except:
pass
elif 'R1' in source_data and source_data['R1'] != '' and ('X1' in source_data) and (source_data['X1'] != ''):
try:
api_power_source.positive_sequence_impedance = complex(float(source_data['R1']), float(source_data['X1']))
except:
pass
try:
api_power_source.phase_angle = float(source_data['angle'])
except:
pass
api_power_source.phases = list(map(lambda x: Unicode(self.phase_mapping(x)), [1, 2, 3]))
self.bus_coord_file = self.DSS_file_names['Nodes']
skip_coordinate_parsing = False
try:
with open(self.bus_coord_file, 'r') as g:
coordinates = g.readlines()
except IOError:
skip_coordinate_parsing = True
(X, Y) = (None, None)
if not skip_coordinate_parsing:
for line in coordinates:
if line.strip() == '':
continue
try:
(name, X, Y) = list(map(lambda x: x.strip(), line.split(self.coordinates_delimiter)))
name = name.lower()
except:
logger.warning('Could not parse: ' + str(line))
name = None
X = None
Y = None
pass
try:
X = float(X)
Y = float(Y)
except:
logger.warning('Could not cast coordinates {X}, {Y} for bus {name}'.format(X=X, Y=Y, name=name))
pass
powersource_pos = Position(model)
powersource_pos.long = X
powersource_pos.lat = Y
api_power_source.positions.append(powersource_pos)
try:
if '.' in source_data['bus1']:
api_power_source.connecting_element = source_data['bus1'].split('.')[0]
else:
api_power_source.connecting_element = source_data['bus1']
self.source_name = api_power_source.connecting_element + '_src'
api_feeder_metadata = Feeder_metadata(model)
api_feeder_metadata.name = self.source_name
api_feeder_metadata.headnode = api_power_source.connecting_element
api_feeder_metadata.substation = api_power_source.connecting_element
api_feeder_metadata.nominal_voltage = api_power_source.nominal_voltage
except:
pass
return 1
</DeepExtract>
super(Reader, self).parse(model, **kwargs)
<DeepExtract>
storages = _dss_class_to_dict('storage')
for (name, data) in storages.items():
if not data['enabled'] == 'Yes':
continue
api_storage = Storage(model)
api_storage.feeder_name = self.source_name
try:
api_storage.name = name
except:
pass
try:
api_storage.connecting_element = data['bus1']
except:
pass
try:
api_storage.nominal_voltage = float(data['kv']) * 10 ** 3
except:
pass
try:
api_storage.rated_power = float(data['kWrated']) * 10 ** 3
except:
pass
try:
api_storage.rated_kWh = float(data['kWhrated'])
except:
pass
try:
api_storage.stored_kWh = float(data['kWhstored'])
except:
pass
try:
api_storage.reserve = float(data['%reserve'])
except:
pass
try:
api_storage.state = data['State']
except:
pass
try:
api_storage.discharge_rate = float(data['%Discharge'])
except:
pass
try:
api_storage.charge_rate = float(data['%Charge'])
except:
pass
try:
api_storage.charging_efficiency = float(data['%EffCharge'])
except:
pass
try:
api_storage.discharging_efficiency = float(data['%EffDischarge'])
except:
pass
try:
api_storage.resistance = float(data['%R'])
except:
pass
try:
api_storage.reactance = float(data['%X'])
except:
pass
try:
api_storage.model_ = int(data['model'])
except:
pass
try:
api_storage.yearly = data['yearly']
except:
pass
try:
api_storage.daily = data['daily']
except:
pass
try:
api_storage.duty = data['duty']
except:
pass
try:
api_storage.discharge_trigger = float(data['DischargeTrigger'])
except:
pass
try:
api_storage.charge_trigger = float(data['ChargeTrigger'])
except:
pass
N_phases = int(data['phases'])
for phase in range(N_phases):
try:
api_phase_storage = PhaseStorage(model)
except:
pass
try:
api_phase_storage.p = float(data['kW']) / float(N_phases)
except:
pass
try:
api_phase_storage.q = float(data['kvar']) / float(N_phases)
except:
pass
api_storage.phase_storages.append(api_phase_storage)
</DeepExtract>
<DeepExtract>
model.set_names()
AllBusNames = dss.Circuit.AllBusNames()
for bus_name in AllBusNames:
dss.Circuit.SetActiveBus(bus_name)
try:
model[bus_name.lower()].nominal_voltage = dss.Bus.kVBase() * math.sqrt(3) * 10 ** 3
except:
print('Could not set nominal voltage for bus {b}'.format(b=bus_name))
pass
for obj in model.models:
if hasattr(obj, 'nominal_voltage') and obj.nominal_voltage is None:
if hasattr(obj, 'connecting_element'):
try:
obj.nominal_voltage = model[obj.connecting_element].nominal_voltage
except:
pass
elif hasattr(obj, 'from_element'):
try:
obj.nominal_voltage = model[obj.from_element].nominal_voltage
except:
pass
elif isinstance(obj, PowerTransformer) or isinstance(obj, Regulator):
_from = obj.from_element
_to = obj.to_element
mapp = {0: _from, 1: _to, 2: _to}
for x in range(3):
if len(obj.windings) > x and obj.windings[x].nominal_voltage is None:
try:
obj.windings[x].nominal_voltage = model[mapp[x]].nominal_voltage
except:
pass
</DeepExtract>
return 1
|
def parse(self, model, **kwargs):
"""General parse function.
Responsible for calling the sub-parsers and logging progress.
:param model: DiTTo model
:type model: DiTTo model
:param verbose: Set verbose mode. Optional. Default=False
:type verbose: bool
:returns: 1 for success, -1 for failure
:rtype: int
"""
start = time.time()
self.source_name = 'Sourcebus'
if not self.is_opendssdirect_built:
logger.info('Reading DSS file {name}...'.format(name=self.DSS_file_names['master']))
try:
self.function('redirect {master_file}'.format(master_file=self.DSS_file_names['master']))
except:
logger.error('Unable to redirect master file: {filename}'.format(filename=self.DSS_file_names['master']))
return -1
self.is_opendssdirect_built = True
logger.info('build_opendssdirect succesful')
return 1
end = time.time()
logger.debug('Build OpenDSSdirect= {}'.format(end - start))
if 'feeder_file' in kwargs:
self.feeder_file = kwargs['feeder_file']
with open(self.feeder_file, 'r') as f:
lines = f.readlines()
feeders = {}
substations = {}
substation_transformers = {}
for line in lines[1:]:
(node, sub, feed, sub_trans) = list(map(lambda x: x.strip(), line.split(' ')))
if not feed in feeders:
feeders[feed] = [node.lower().replace('.', '')]
else:
feeders[feed].append(node.lower().replace('.', ''))
if not feed in substations:
substations[feed] = sub.lower().replace('.', '')
if not feed in substation_transformers:
substation_transformers[feed] = sub.lower().replace('.', '')
for (f_name, f_data) in feeders.items():
api_feeder_metadata = Feeder_metadata(model)
api_feeder_metadata.name = f_name
if f_name in substation_transformers:
api_feeder_metadata.transformer = substation_transformers[f_name]
if f_name in substations:
api_feeder_metadata.substation = substations[f_name]
read_power_source = True
if 'read_power_source' in kwargs:
read_power_source = kwargs['read_power_source']
if read_power_source:
sources = _dss_class_to_dict('Vsource')
for (source_name, source_data) in sources.items():
if not source_data['enabled'] == 'Yes':
continue
try:
api_power_source = PowerSource(model)
except:
continue
try:
api_power_source.name = source_name
except:
pass
try:
api_power_source.nominal_voltage = float(source_data['basekv']) * 10 ** 3
except:
pass
try:
api_power_source.per_unit = float(source_data['pu'])
except:
pass
try:
api_power_source.is_sourcebus = True
except:
pass
try:
api_power_source.rated_power = float(source_data['baseMVA']) * 10 ** 6
except:
pass
try:
api_power_source.emergency_power = float(source_data['MVAsc3']) * 10 ** 6
except:
pass
if 'Z0' in source_data and isinstance(source_data['Z0'], list):
if len(source_data['Z0']) == 2:
try:
api_power_source.zero_sequence_impedance = complex(float(source_data['Z0'][0]), float(source_data['Z0'][1]))
except:
pass
elif 'R0' in source_data and source_data['R0'] != '' and ('X0' in source_data) and (source_data['X0'] != ''):
try:
api_power_source.zero_sequence_impedance = complex(float(source_data['R0']), float(source_data['X0']))
except:
pass
if 'Z1' in source_data and isinstance(source_data['Z1'], list):
if len(source_data['Z1']) == 2:
try:
api_power_source.positive_sequence_impedance = complex(float(source_data['Z1'][0]), float(source_data['Z1'][1]))
except:
pass
elif 'R1' in source_data and source_data['R1'] != '' and ('X1' in source_data) and (source_data['X1'] != ''):
try:
api_power_source.positive_sequence_impedance = complex(float(source_data['R1']), float(source_data['X1']))
except:
pass
try:
api_power_source.phase_angle = float(source_data['angle'])
except:
pass
api_power_source.phases = list(map(lambda x: Unicode(self.phase_mapping(x)), [1, 2, 3]))
self.bus_coord_file = self.DSS_file_names['Nodes']
skip_coordinate_parsing = False
try:
with open(self.bus_coord_file, 'r') as g:
coordinates = g.readlines()
except IOError:
skip_coordinate_parsing = True
(X, Y) = (None, None)
if not skip_coordinate_parsing:
for line in coordinates:
if line.strip() == '':
continue
try:
(name, X, Y) = list(map(lambda x: x.strip(), line.split(self.coordinates_delimiter)))
name = name.lower()
except:
logger.warning('Could not parse: ' + str(line))
name = None
X = None
Y = None
pass
try:
X = float(X)
Y = float(Y)
except:
logger.warning('Could not cast coordinates {X}, {Y} for bus {name}'.format(X=X, Y=Y, name=name))
pass
powersource_pos = Position(model)
powersource_pos.long = X
powersource_pos.lat = Y
api_power_source.positions.append(powersource_pos)
try:
if '.' in source_data['bus1']:
api_power_source.connecting_element = source_data['bus1'].split('.')[0]
else:
api_power_source.connecting_element = source_data['bus1']
self.source_name = api_power_source.connecting_element + '_src'
api_feeder_metadata = Feeder_metadata(model)
api_feeder_metadata.name = self.source_name
api_feeder_metadata.headnode = api_power_source.connecting_element
api_feeder_metadata.substation = api_power_source.connecting_element
api_feeder_metadata.nominal_voltage = api_power_source.nominal_voltage
except:
pass
return 1
super(Reader, self).parse(model, **kwargs)
storages = _dss_class_to_dict('storage')
for (name, data) in storages.items():
if not data['enabled'] == 'Yes':
continue
api_storage = Storage(model)
api_storage.feeder_name = self.source_name
try:
api_storage.name = name
except:
pass
try:
api_storage.connecting_element = data['bus1']
except:
pass
try:
api_storage.nominal_voltage = float(data['kv']) * 10 ** 3
except:
pass
try:
api_storage.rated_power = float(data['kWrated']) * 10 ** 3
except:
pass
try:
api_storage.rated_kWh = float(data['kWhrated'])
except:
pass
try:
api_storage.stored_kWh = float(data['kWhstored'])
except:
pass
try:
api_storage.reserve = float(data['%reserve'])
except:
pass
try:
api_storage.state = data['State']
except:
pass
try:
api_storage.discharge_rate = float(data['%Discharge'])
except:
pass
try:
api_storage.charge_rate = float(data['%Charge'])
except:
pass
try:
api_storage.charging_efficiency = float(data['%EffCharge'])
except:
pass
try:
api_storage.discharging_efficiency = float(data['%EffDischarge'])
except:
pass
try:
api_storage.resistance = float(data['%R'])
except:
pass
try:
api_storage.reactance = float(data['%X'])
except:
pass
try:
api_storage.model_ = int(data['model'])
except:
pass
try:
api_storage.yearly = data['yearly']
except:
pass
try:
api_storage.daily = data['daily']
except:
pass
try:
api_storage.duty = data['duty']
except:
pass
try:
api_storage.discharge_trigger = float(data['DischargeTrigger'])
except:
pass
try:
api_storage.charge_trigger = float(data['ChargeTrigger'])
except:
pass
N_phases = int(data['phases'])
for phase in range(N_phases):
try:
api_phase_storage = PhaseStorage(model)
except:
pass
try:
api_phase_storage.p = float(data['kW']) / float(N_phases)
except:
pass
try:
api_phase_storage.q = float(data['kvar']) / float(N_phases)
except:
pass
api_storage.phase_storages.append(api_phase_storage)
model.set_names()
AllBusNames = dss.Circuit.AllBusNames()
for bus_name in AllBusNames:
dss.Circuit.SetActiveBus(bus_name)
try:
model[bus_name.lower()].nominal_voltage = dss.Bus.kVBase() * math.sqrt(3) * 10 ** 3
except:
print('Could not set nominal voltage for bus {b}'.format(b=bus_name))
pass
for obj in model.models:
if hasattr(obj, 'nominal_voltage') and obj.nominal_voltage is None:
if hasattr(obj, 'connecting_element'):
try:
obj.nominal_voltage = model[obj.connecting_element].nominal_voltage
except:
pass
elif hasattr(obj, 'from_element'):
try:
obj.nominal_voltage = model[obj.from_element].nominal_voltage
except:
pass
elif isinstance(obj, PowerTransformer) or isinstance(obj, Regulator):
_from = obj.from_element
_to = obj.to_element
mapp = {0: _from, 1: _to, 2: _to}
for x in range(3):
if len(obj.windings) > x and obj.windings[x].nominal_voltage is None:
try:
obj.windings[x].nominal_voltage = model[mapp[x]].nominal_voltage
except:
pass
return 1
|
ditto
|
positive
|
def next(self, states, **runopts):
betas = self.betas
if betas is None:
betas = [state.beta for state in states]
for i in range(len(states) - 1):
<DeepExtract>
beta_diff = betas[i] - betas[i + 1]
energy_diff = states[i].samples.first.energy - states[i + 1].samples.first.energy
w = math.exp(min(0, beta_diff * energy_diff))
p = self.random.uniform(0, 1)
if w > p:
(states[i].samples, states[i + 1].samples) = (states[i + 1].samples, states[i].samples)
states = states
</DeepExtract>
return states
|
def next(self, states, **runopts):
betas = self.betas
if betas is None:
betas = [state.beta for state in states]
for i in range(len(states) - 1):
beta_diff = betas[i] - betas[i + 1]
energy_diff = states[i].samples.first.energy - states[i + 1].samples.first.energy
w = math.exp(min(0, beta_diff * energy_diff))
p = self.random.uniform(0, 1)
if w > p:
(states[i].samples, states[i + 1].samples) = (states[i + 1].samples, states[i].samples)
states = states
return states
|
dwave-hybrid
|
positive
|
def monkey_patch(state_path: Optional[str]) -> None:
"""
Apply all monkey patches to swap in high performance implementations.
This function must be called before any of the ethereum modules are
imported anywhere.
"""
<DeepExtract>
import ethereum.frontier.state as slow_state
from . import state_db as fast_state
optimized_state_db_patches = {'State': fast_state.State, 'get_account_optional': fast_state.get_account_optional, 'set_account': fast_state.set_account, 'destroy_storage': fast_state.destroy_storage, 'get_storage': fast_state.get_storage, 'get_storage_original': fast_state.get_storage_original, 'set_storage': fast_state.set_storage, 'state_root': fast_state.state_root, 'storage_root': fast_state.storage_root, 'begin_transaction': fast_state.begin_transaction, 'rollback_transaction': fast_state.rollback_transaction, 'commit_transaction': fast_state.commit_transaction, 'close_state': fast_state.close_state}
for (name, value) in optimized_state_db_patches.items():
setattr(slow_state, name, value)
if state_path is not None:
fast_state.State.default_path = state_path
</DeepExtract>
<DeepExtract>
import ethereum.frontier.fork as slow_spec
from . import fork as fast_spec
slow_spec.validate_proof_of_work = fast_spec.validate_proof_of_work
</DeepExtract>
|
def monkey_patch(state_path: Optional[str]) -> None:
"""
Apply all monkey patches to swap in high performance implementations.
This function must be called before any of the ethereum modules are
imported anywhere.
"""
import ethereum.frontier.state as slow_state
from . import state_db as fast_state
optimized_state_db_patches = {'State': fast_state.State, 'get_account_optional': fast_state.get_account_optional, 'set_account': fast_state.set_account, 'destroy_storage': fast_state.destroy_storage, 'get_storage': fast_state.get_storage, 'get_storage_original': fast_state.get_storage_original, 'set_storage': fast_state.set_storage, 'state_root': fast_state.state_root, 'storage_root': fast_state.storage_root, 'begin_transaction': fast_state.begin_transaction, 'rollback_transaction': fast_state.rollback_transaction, 'commit_transaction': fast_state.commit_transaction, 'close_state': fast_state.close_state}
for (name, value) in optimized_state_db_patches.items():
setattr(slow_state, name, value)
if state_path is not None:
fast_state.State.default_path = state_path
import ethereum.frontier.fork as slow_spec
from . import fork as fast_spec
slow_spec.validate_proof_of_work = fast_spec.validate_proof_of_work
</DeepExtract>
|
eth1.0-specs
|
positive
|
def test_exception_raised_if_reset_not_called_between_last_and_first(self):
sequence = list('fmmmmmlfm')
with self.assertRaisesRegex(RuntimeError, 'reset'):
for (i, step_type_str) in enumerate(sequence, start=1):
<DeepExtract>
if step_type_str == 'f':
timestep = dm_env.restart(observation=i)
elif step_type_str == 'm':
timestep = dm_env.transition(reward=0, observation=i)
elif step_type_str == 'l':
timestep = dm_env.termination(reward=0, observation=i)
else:
raise ValueError('Unknown step type string %s.' % step_type_str)
</DeepExtract>
self.processor(timestep)
|
def test_exception_raised_if_reset_not_called_between_last_and_first(self):
sequence = list('fmmmmmlfm')
with self.assertRaisesRegex(RuntimeError, 'reset'):
for (i, step_type_str) in enumerate(sequence, start=1):
if step_type_str == 'f':
timestep = dm_env.restart(observation=i)
elif step_type_str == 'm':
timestep = dm_env.transition(reward=0, observation=i)
elif step_type_str == 'l':
timestep = dm_env.termination(reward=0, observation=i)
else:
raise ValueError('Unknown step type string %s.' % step_type_str)
self.processor(timestep)
|
dqn_zoo
|
positive
|
def delete_model(self):
<DeepExtract>
try:
portal_pos = UserSettings.objects.get(user=self.obj.user, key='dashboard:%s:pos' % self.obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(self.obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
</DeepExtract>
super(UserWidgetAdmin, self).delete_model()
|
def delete_model(self):
try:
portal_pos = UserSettings.objects.get(user=self.obj.user, key='dashboard:%s:pos' % self.obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(self.obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
super(UserWidgetAdmin, self).delete_model()
|
book
|
positive
|
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=False, difficult=1):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
dif = difficult
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
print('classname', detpath)
print('classname', classname)
print('difficulty:', dif)
if not os.path.isfile(cachefile):
recs = {}
for (i, imagename) in enumerate(imagenames):
<DeepExtract>
tree = ET.parse(annopath.format(imagename))
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = float(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(np.floor(float(bbox.find('xmin').text))), int(np.floor(float(bbox.find('ymin').text))), int(np.floor(float(bbox.find('xmax').text))), int(np.floor(float(bbox.find('ymax').text)))]
objects.append(obj_struct)
recs[imagename] = objects
</DeepExtract>
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] != dif for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det}
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
if BB.shape[0] > 0:
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / (float(npos) + 1e-05)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
<DeepExtract>
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
ap = ap
</DeepExtract>
print('ap:', ap)
return (rec, prec, ap)
|
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=False, difficult=1):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
dif = difficult
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
print('classname', detpath)
print('classname', classname)
print('difficulty:', dif)
if not os.path.isfile(cachefile):
recs = {}
for (i, imagename) in enumerate(imagenames):
tree = ET.parse(annopath.format(imagename))
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = float(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(np.floor(float(bbox.find('xmin').text))), int(np.floor(float(bbox.find('ymin').text))), int(np.floor(float(bbox.find('xmax').text))), int(np.floor(float(bbox.find('ymax').text)))]
objects.append(obj_struct)
recs[imagename] = objects
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(i + 1, len(imagenames)))
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] != dif for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det}
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
if BB.shape[0] > 0:
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / (float(npos) + 1e-05)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
if use_07_metric:
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
ap = ap
print('ap:', ap)
return (rec, prec, ap)
|
DatasetCulling
|
positive
|
def get_user_inline_policies(user, **kwargs):
<DeepExtract>
marker = {}
inline_policies = []
while True:
response = client.list_user_policies(UserName=user['UserName'], **marker)
inline_policies.extend(response['PolicyNames'])
if response['IsTruncated']:
marker['Marker'] = response['Marker']
else:
policy_names = inline_policies
</DeepExtract>
policies = {}
for policy_name in policy_names:
<DeepExtract>
response = client.get_user_policy(UserName=user['UserName'], PolicyName=policy_name)
policies[policy_name] = response.get('PolicyDocument')
</DeepExtract>
return policies
|
def get_user_inline_policies(user, **kwargs):
marker = {}
inline_policies = []
while True:
response = client.list_user_policies(UserName=user['UserName'], **marker)
inline_policies.extend(response['PolicyNames'])
if response['IsTruncated']:
marker['Marker'] = response['Marker']
else:
policy_names = inline_policies
policies = {}
for policy_name in policy_names:
response = client.get_user_policy(UserName=user['UserName'], PolicyName=policy_name)
policies[policy_name] = response.get('PolicyDocument')
return policies
|
cloudaux
|
positive
|
def _set_reversesort(self, val):
<DeepExtract>
if 'reversesort' in 'field_names':
self._validate_field_names(val)
elif 'reversesort' in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format'):
self._validate_nonnegative_int('reversesort', val)
elif 'reversesort' in 'sortby':
self._validate_field_name('reversesort', val)
elif 'reversesort' in 'sort_key':
self._validate_function('reversesort', val)
elif 'reversesort' in 'hrules':
self._validate_hrules('reversesort', val)
elif 'reversesort' in 'vrules':
self._validate_vrules('reversesort', val)
elif 'reversesort' in 'fields':
self._validate_all_field_names('reversesort', val)
elif 'reversesort' in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
self._validate_true_or_false('reversesort', val)
elif 'reversesort' in 'header_style':
self._validate_header_style(val)
elif 'reversesort' in 'int_format':
self._validate_int_format('reversesort', val)
elif 'reversesort' in 'float_format':
self._validate_float_format('reversesort', val)
elif 'reversesort' in ('vertical_char', 'horizontal_char', 'junction_char'):
self._validate_single_char('reversesort', val)
elif 'reversesort' in 'attributes':
self._validate_attributes('reversesort', val)
else:
raise Exception('Unrecognised option: %s!' % 'reversesort')
</DeepExtract>
self._reversesort = val
|
def _set_reversesort(self, val):
if 'reversesort' in 'field_names':
self._validate_field_names(val)
elif 'reversesort' in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format'):
self._validate_nonnegative_int('reversesort', val)
elif 'reversesort' in 'sortby':
self._validate_field_name('reversesort', val)
elif 'reversesort' in 'sort_key':
self._validate_function('reversesort', val)
elif 'reversesort' in 'hrules':
self._validate_hrules('reversesort', val)
elif 'reversesort' in 'vrules':
self._validate_vrules('reversesort', val)
elif 'reversesort' in 'fields':
self._validate_all_field_names('reversesort', val)
elif 'reversesort' in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
self._validate_true_or_false('reversesort', val)
elif 'reversesort' in 'header_style':
self._validate_header_style(val)
elif 'reversesort' in 'int_format':
self._validate_int_format('reversesort', val)
elif 'reversesort' in 'float_format':
self._validate_float_format('reversesort', val)
elif 'reversesort' in ('vertical_char', 'horizontal_char', 'junction_char'):
self._validate_single_char('reversesort', val)
elif 'reversesort' in 'attributes':
self._validate_attributes('reversesort', val)
else:
raise Exception('Unrecognised option: %s!' % 'reversesort')
self._reversesort = val
|
C--Compiler
|
positive
|
def forward(self, ys):
n = len(ys)
(inputs, zs) = ([None] * n, [None] * n)
for i in range(n):
inputs[i] = concatenate([ones(1), ys[i]])
<DeepExtract>
zs[i] = 1.0 / (1.0 + exp(-dot(self.W2, inputs[i])))
</DeepExtract>
self.state = (inputs, zs)
return zs
|
def forward(self, ys):
n = len(ys)
(inputs, zs) = ([None] * n, [None] * n)
for i in range(n):
inputs[i] = concatenate([ones(1), ys[i]])
zs[i] = 1.0 / (1.0 + exp(-dot(self.W2, inputs[i])))
self.state = (inputs, zs)
return zs
|
deep_ocr
|
positive
|
def insertDirectly(self, pair, low):
(tag, x) = pair
d = self.lookup[tag]
<DeepExtract>
if self.space() < _width(tag):
raise error.ClassfileLimitExceeded()
temp = len(self.vals)
self.vals += [None] * _width(tag)
d[x] = index = temp
</DeepExtract>
self.vals[index] = pair
|
def insertDirectly(self, pair, low):
(tag, x) = pair
d = self.lookup[tag]
if self.space() < _width(tag):
raise error.ClassfileLimitExceeded()
temp = len(self.vals)
self.vals += [None] * _width(tag)
d[x] = index = temp
self.vals[index] = pair
|
Apk-Changer
|
positive
|
@pytest.mark.skipif(not faiss_knn_wrapper.is_available(), reason='requires the faiss library')
def test_faiss_knne():
<DeepExtract>
X = np.tile(np.arange(15).reshape(-1, 1), 3)
y = np.array(5 * [0] + 5 * [1] + 5 * [2])
knne = KNNE(n_neighbors=6, knn_classifier='faiss')
knne.fit(X, y)
(X, y, knne) = (X, y, knne)
</DeepExtract>
y_pred = knne.predict(X)
assert np.allclose(y, y_pred)
|
@pytest.mark.skipif(not faiss_knn_wrapper.is_available(), reason='requires the faiss library')
def test_faiss_knne():
X = np.tile(np.arange(15).reshape(-1, 1), 3)
y = np.array(5 * [0] + 5 * [1] + 5 * [2])
knne = KNNE(n_neighbors=6, knn_classifier='faiss')
knne.fit(X, y)
(X, y, knne) = (X, y, knne)
y_pred = knne.predict(X)
assert np.allclose(y, y_pred)
|
DESlib
|
positive
|
def test_crud_chart(self):
"""
Ensure we can register a new chart with cloudman.
Only staff are allowed to directly manipulate charts.
Other users must go through a project in projman.
"""
<DeepExtract>
url = reverse('helmsman:charts-list')
response = self.client.post(url, self.CHART_DATA, format='json')
</DeepExtract>
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
with self.assertRaises(ChartExistsException):
<DeepExtract>
url = reverse('helmsman:charts-list')
return self.client.post(url, self.CHART_DATA, format='json')
</DeepExtract>
<DeepExtract>
url = reverse('helmsman:charts-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertDictContainsSubset(self.CHART_DATA, response.data['results'][1])
chart_id = response.data['results'][1]['id']
</DeepExtract>
self.assertEquals(response.data['id'], chart_id)
<DeepExtract>
url = reverse('helmsman:charts-detail', args=[chart_id])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictContainsSubset(self.CHART_DATA, response.data)
chart_id = response.data['id']
</DeepExtract>
<DeepExtract>
url = reverse('helmsman:charts-detail', args=[chart_id])
response = self.client.put(url, self.CHART_DATA_UPDATE, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = response
</DeepExtract>
self.assertDictContainsSubset(self.CHART_DATA_UPDATE, response.data)
<DeepExtract>
url = reverse('helmsman:charts-detail', args=[chart_id])
response = self.client.delete(url)
</DeepExtract>
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)
<DeepExtract>
url = reverse('helmsman:charts-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 1)
</DeepExtract>
|
def test_crud_chart(self):
"""
Ensure we can register a new chart with cloudman.
Only staff are allowed to directly manipulate charts.
Other users must go through a project in projman.
"""
url = reverse('helmsman:charts-list')
response = self.client.post(url, self.CHART_DATA, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
with self.assertRaises(ChartExistsException):
url = reverse('helmsman:charts-list')
return self.client.post(url, self.CHART_DATA, format='json')
url = reverse('helmsman:charts-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertDictContainsSubset(self.CHART_DATA, response.data['results'][1])
chart_id = response.data['results'][1]['id']
self.assertEquals(response.data['id'], chart_id)
url = reverse('helmsman:charts-detail', args=[chart_id])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictContainsSubset(self.CHART_DATA, response.data)
chart_id = response.data['id']
url = reverse('helmsman:charts-detail', args=[chart_id])
response = self.client.put(url, self.CHART_DATA_UPDATE, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = response
self.assertDictContainsSubset(self.CHART_DATA_UPDATE, response.data)
url = reverse('helmsman:charts-detail', args=[chart_id])
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, response.data)
url = reverse('helmsman:charts-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 1)
</DeepExtract>
|
cloudman
|
positive
|
def test_has_dependencies_remaining_with_no_dependencies(self):
<DeepExtract>
task_to_run = dict(task_reference=task_to_run_reference, dependencies_by_reference=[])
all_tasks = dict()
all_tasks[task_to_run_reference] = task_to_run
(task_to_run, all_tasks, _) = (task_to_run, all_tasks, list(all_tasks.keys()))
</DeepExtract>
expected_is_currently_blocked = False
expected_is_permanently_blocked = False
(actual_is_currently_blocked, actual_is_permanently_blocked) = self.sut.has_dependencies_remaining(task_to_run, all_tasks)
self.assertEqual(expected_is_currently_blocked, actual_is_currently_blocked)
self.assertEqual(expected_is_permanently_blocked, actual_is_permanently_blocked)
|
def test_has_dependencies_remaining_with_no_dependencies(self):
task_to_run = dict(task_reference=task_to_run_reference, dependencies_by_reference=[])
all_tasks = dict()
all_tasks[task_to_run_reference] = task_to_run
(task_to_run, all_tasks, _) = (task_to_run, all_tasks, list(all_tasks.keys()))
expected_is_currently_blocked = False
expected_is_permanently_blocked = False
(actual_is_currently_blocked, actual_is_permanently_blocked) = self.sut.has_dependencies_remaining(task_to_run, all_tasks)
self.assertEqual(expected_is_currently_blocked, actual_is_currently_blocked)
self.assertEqual(expected_is_permanently_blocked, actual_is_permanently_blocked)
|
aws-service-catalog-puppet
|
positive
|
def example_gradient(x):
<DeepExtract>
if hasattr(x, '__len__'):
assert len(x) == 1
if isinstance(x, pd.DataFrame):
res = x['value'].to_numpy()[0]
elif isinstance(x, pd.Series):
res = x.to_numpy()[0]
elif isinstance(x, (np.ndarray, list, tuple)):
res = x[0]
else:
res = float(x)
x = res
</DeepExtract>
exponents = np.arange(len(WEIGHTS))
return WEIGHTS * exponents @ x ** (exponents - 1).clip(0)
|
def example_gradient(x):
if hasattr(x, '__len__'):
assert len(x) == 1
if isinstance(x, pd.DataFrame):
res = x['value'].to_numpy()[0]
elif isinstance(x, pd.Series):
res = x.to_numpy()[0]
elif isinstance(x, (np.ndarray, list, tuple)):
res = x[0]
else:
res = float(x)
x = res
exponents = np.arange(len(WEIGHTS))
return WEIGHTS * exponents @ x ** (exponents - 1).clip(0)
|
estimagic
|
positive
|
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_not_found(urlopen, Request, mirrorurl):
excp = HTTPError('https://archlinux.org/404.txt', 404, 'Not Found', '', None)
<DeepExtract>
urlopen.return_value.read.side_effect = excp
Request.get_host.return_value = HOSTNAME
Request.type.return_value = PROTOCOL
</DeepExtract>
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert mirrorlog.error == str(excp)
assert not mirrorlog.is_success
|
@mock.patch('urllib.request.Request')
@mock.patch('urllib.request.urlopen')
def test_not_found(urlopen, Request, mirrorurl):
excp = HTTPError('https://archlinux.org/404.txt', 404, 'Not Found', '', None)
urlopen.return_value.read.side_effect = excp
Request.get_host.return_value = HOSTNAME
Request.type.return_value = PROTOCOL
call_command('mirrorcheck')
mirrorlog = MirrorLog.objects.first()
assert mirrorlog.error == str(excp)
assert not mirrorlog.is_success
|
archweb
|
positive
|
@cli.command()
@config_file_options()
@endpoint_options
@solver_options
@click.option('--sampling-params', '-m', default=None, help='Sampling parameters (JSON encoded)')
@click.option('--request-timeout', default=None, type=float, help='Connection and read timeouts (in seconds) for all API requests')
@click.option('--polling-timeout', default=None, type=float, help='Problem polling timeout in seconds (time-to-solution timeout)')
@click.option('--label', default='dwave ping', type=str, help='Problem label')
@click.option('--json', 'json_output', default=False, is_flag=True, help='JSON output')
@standardized_output
def ping(*, config_file, profile, endpoint, region, client_type, solver_def, sampling_params, request_timeout, polling_timeout, label, json_output, output):
"""Ping the QPU by submitting a single-qubit problem."""
params = {}
if sampling_params is not None:
try:
params = json.loads(sampling_params)
assert isinstance(params, dict)
except:
raise CLIError('sampling parameters required as JSON-encoded map of param names to values', code=99)
if label:
params.update(label=label)
config = dict(config_file=config_file, profile=profile, endpoint=endpoint, region=region, client=client_type, solver=solver_def, request_timeout=request_timeout, polling_timeout=polling_timeout)
t0 = timer()
<DeepExtract>
if output is None:
output = click.echo
try:
client = Client.from_config(**config)
except Exception as e:
raise CLIError('Invalid configuration: {}'.format(e), code=1)
config_file = config.get('config_file')
if config_file:
output('Using configuration file: {config_file}', config_file=config_file)
profile = config.get('profile')
if profile:
output('Using profile: {profile}', profile=profile)
output('Using endpoint: {endpoint}', endpoint=client.endpoint)
output('Using region: {region}', region=client.region)
try:
solver = client.get_solver()
except SolverAuthenticationError:
raise CLIError('Authentication error. Check credentials in your configuration file.', 2)
except SolverNotFoundError:
raise CLIError('Solver not available.', 6)
except (InvalidAPIResponseError, UnsupportedSolverError):
raise CLIError('Invalid or unexpected API response.', 3)
except RequestTimeout:
raise CLIError('API connection timed out.', 4)
except requests.exceptions.SSLError as e:
if 'CERTIFICATE_VERIFY_FAILED' in str(e):
raise CLIError('Certificate verification failed. Please check that your API endpoint is correct. If you are connecting to a private or third-party D-Wave system that uses self-signed certificate(s), please see https://support.dwavesys.com/hc/en-us/community/posts/360018930954.', 5)
raise CLIError('Unexpected SSL error while fetching solver: {!r}'.format(e), 5)
except Exception as e:
raise CLIError('Unexpected error while fetching solver: {!r}'.format(e), 5)
output('Using solver: {solver_id}', solver_id=solver.id)
(client, solver) = (client, solver)
</DeepExtract>
if hasattr(solver, 'nodes'):
problem = ({min(solver.nodes): 0}, {})
else:
problem = ({0: 1}, {})
t1 = timer()
<DeepExtract>
try:
response = solver.sample_ising(*problem, **params)
problem_id = response.wait_id()
output('Submitted problem ID: {problem_id}', problem_id=problem_id)
response.result()
except RequestTimeout:
raise CLIError('API connection timed out.', 8)
except PollingTimeout:
raise CLIError('Polling timeout exceeded.', 9)
except Exception as e:
raise CLIError('Sampling error: {!r}'.format(e), 10)
response = response
</DeepExtract>
t2 = timer()
<DeepExtract>
info.update(params)
if not json_output:
msg = '\nWall clock time:'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
<DeepExtract>
info.update(params)
if not json_output:
msg = ' * Solver definition fetch: {wallclock_solver_definition:.3f} ms'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
<DeepExtract>
info.update(params)
if not json_output:
msg = ' * Problem submit and results fetch: {wallclock_sampling:.3f} ms'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
<DeepExtract>
info.update(params)
if not json_output:
msg = ' * Total: {wallclock_total:.3f} ms'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
if response.timing:
<DeepExtract>
info.update(params)
if not json_output:
msg = '\nQPU timing:'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
for (component, duration) in sorted(response.timing.items()):
<DeepExtract>
info.update(params)
if not json_output:
msg = ' * %(name)s = {%(name)s} us' % {'name': component}.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
else:
<DeepExtract>
info.update(params)
if not json_output:
msg = '\nQPU timing data not available.'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
|
@cli.command()
@config_file_options()
@endpoint_options
@solver_options
@click.option('--sampling-params', '-m', default=None, help='Sampling parameters (JSON encoded)')
@click.option('--request-timeout', default=None, type=float, help='Connection and read timeouts (in seconds) for all API requests')
@click.option('--polling-timeout', default=None, type=float, help='Problem polling timeout in seconds (time-to-solution timeout)')
@click.option('--label', default='dwave ping', type=str, help='Problem label')
@click.option('--json', 'json_output', default=False, is_flag=True, help='JSON output')
@standardized_output
def ping(*, config_file, profile, endpoint, region, client_type, solver_def, sampling_params, request_timeout, polling_timeout, label, json_output, output):
"""Ping the QPU by submitting a single-qubit problem."""
params = {}
if sampling_params is not None:
try:
params = json.loads(sampling_params)
assert isinstance(params, dict)
except:
raise CLIError('sampling parameters required as JSON-encoded map of param names to values', code=99)
if label:
params.update(label=label)
config = dict(config_file=config_file, profile=profile, endpoint=endpoint, region=region, client=client_type, solver=solver_def, request_timeout=request_timeout, polling_timeout=polling_timeout)
t0 = timer()
if output is None:
output = click.echo
try:
client = Client.from_config(**config)
except Exception as e:
raise CLIError('Invalid configuration: {}'.format(e), code=1)
config_file = config.get('config_file')
if config_file:
output('Using configuration file: {config_file}', config_file=config_file)
profile = config.get('profile')
if profile:
output('Using profile: {profile}', profile=profile)
output('Using endpoint: {endpoint}', endpoint=client.endpoint)
output('Using region: {region}', region=client.region)
try:
solver = client.get_solver()
except SolverAuthenticationError:
raise CLIError('Authentication error. Check credentials in your configuration file.', 2)
except SolverNotFoundError:
raise CLIError('Solver not available.', 6)
except (InvalidAPIResponseError, UnsupportedSolverError):
raise CLIError('Invalid or unexpected API response.', 3)
except RequestTimeout:
raise CLIError('API connection timed out.', 4)
except requests.exceptions.SSLError as e:
if 'CERTIFICATE_VERIFY_FAILED' in str(e):
raise CLIError('Certificate verification failed. Please check that your API endpoint is correct. If you are connecting to a private or third-party D-Wave system that uses self-signed certificate(s), please see https://support.dwavesys.com/hc/en-us/community/posts/360018930954.', 5)
raise CLIError('Unexpected SSL error while fetching solver: {!r}'.format(e), 5)
except Exception as e:
raise CLIError('Unexpected error while fetching solver: {!r}'.format(e), 5)
output('Using solver: {solver_id}', solver_id=solver.id)
(client, solver) = (client, solver)
if hasattr(solver, 'nodes'):
problem = ({min(solver.nodes): 0}, {})
else:
problem = ({0: 1}, {})
t1 = timer()
try:
response = solver.sample_ising(*problem, **params)
problem_id = response.wait_id()
output('Submitted problem ID: {problem_id}', problem_id=problem_id)
response.result()
except RequestTimeout:
raise CLIError('API connection timed out.', 8)
except PollingTimeout:
raise CLIError('Polling timeout exceeded.', 9)
except Exception as e:
raise CLIError('Sampling error: {!r}'.format(e), 10)
response = response
t2 = timer()
info.update(params)
if not json_output:
msg = '\nWall clock time:'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
info.update(params)
if not json_output:
msg = ' * Solver definition fetch: {wallclock_solver_definition:.3f} ms'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
info.update(params)
if not json_output:
msg = ' * Problem submit and results fetch: {wallclock_sampling:.3f} ms'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
info.update(params)
if not json_output:
msg = ' * Total: {wallclock_total:.3f} ms'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
if response.timing:
info.update(params)
if not json_output:
msg = '\nQPU timing:'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
for (component, duration) in sorted(response.timing.items()):
info.update(params)
if not json_output:
msg = ' * %(name)s = {%(name)s} us' % {'name': component}.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
else:
info.update(params)
if not json_output:
msg = '\nQPU timing data not available.'.format(**params)
if maxlen is not None:
msg = strtrunc(msg, maxlen)
click.echo(msg)
</DeepExtract>
|
dwave-cloud-client
|
positive
|
def forward(self, src: torch.Tensor, src_mask: Optional[torch.Tensor]=None, src_key_padding_mask: Optional[torch.Tensor]=None, query_stream: Optional[torch.Tensor]=None, attention_mask_query: Optional[torch.Tensor]=None, target_mapping: Optional[torch.Tensor]=None, relative_distances: Optional[Tuple[torch.Tensor]]=None, token_distances: Optional[torch.Tensor]=None, edge_indices: Optional[torch.Tensor]=None, edge_embeddings: Optional[torch.Tensor]=None, need_weights: bool=False, mems: Optional[torch.tensor]=None, asserts=True) -> TransformerLayerOutput:
"""
Forward step of the code transformer layer.
Parameters
----------
src: torch.Tensor, dim [seq_len, batch_size, d_model]
The content stream embeddings.
src_mask: torch.Tensor, dim [seq_len, seq_len, batch_size, 1]
Attention mask for the content stream.
src_key_padding_mask: torch.Tensor (currently not used)
query_stream: torch.tensor, dim [num_pedict, batch_size, d_model]
The query stream embeddings.
attention_mask_query: torch.Tensor, dim [seq_len, seq_len, batch_size, 1]
Attention mask for the content stream (target nodes cannot see themselves).
target_mapping: torch.Tensor, dim [num_predict, seq_len, batch_size]
Mapping indicating which tokens are being predicted during pre-training.
Entry [i,j,k] is 1 if token [j,k] is the i-th target.
relative_distances: Tuple[torch.Tensor].
Tuple containing the relative distances.
The first element is the [num_distances, batch_size, seq_len, seq_len] dimensional index tensor, indexing
the second element, which is the [num_distances, num_bins, batch_size, d_model] dimensional tensor
containing the encoded distance bins.
That is, the value at [d, b, i, j] of the index tensor is the index of the distance bin of the d-th distance
of tokens i and j in batch b.
token_distances: torch.Tensor, dim [num_token_distances, batch_size, d_model]
The distances between tokens.
edge_indices: torch.Tensor, dim [3, num_edges_in_batch]
edge_embeddings: torch.Tensor, dim [num_edges_in_batch, d_model]
need_weights: bool
Whether to also return the attention probabilities.
mems: torch.tensor (currently not used)
asserts: bool
Whether to verify dimensions via asserts.
Returns
-------
outputs: tuple containing
* the new content stream embeddings, dim [seq_len, batch_size, d_model]
* the new query stream embeddings (or None if no query stream input is provided),
dim [num_pedict, batch_size, d_model]
* (Optional) if need_weights=True, tuple containing
- content stream attention probabilities, dim [seq_len, seq_len, batch_size, num_head]
- query stream attention probabilities, or None if no query stream input provided,
dim [seq_len, seq_len, batch_size, num_head]
"""
if mems is not None:
raise NotImplementedError('memory currently not implemented')
content_stream_cat = src
(seq_len, bsz) = src.shape[:2]
if asserts:
assert src.shape == (seq_len, bsz, self.d_model)
if relative_distances is not None and len(relative_distances) > 0:
assert relative_distances[0].shape == (self.self_attn.num_relative_distances - int(self.self_attn.use_token_distances) - int(self.self_attn.use_edge_embeddings), bsz, seq_len, seq_len)
assert relative_distances[1].shape[0] == self.self_attn.num_relative_distances - int(self.self_attn.use_token_distances) - int(self.self_attn.use_edge_embeddings)
assert relative_distances[1].shape[-2:] == (bsz, self.d_model)
if src_mask is not None:
assert src_mask.shape == (seq_len, seq_len, bsz)
if query_stream is not None:
num_predict = query_stream.shape[0]
assert query_stream.shape == (num_predict, bsz, self.d_model)
if attention_mask_query is not None:
assert attention_mask_query.shape == (seq_len, seq_len, bsz)
k_content_stream = F.linear(content_stream_cat, self.self_attn.get_k_proj_weight(), self.self_attn.get_k_proj_bias())
v_content_stream = F.linear(content_stream_cat, self.self_attn.get_v_proj_weight(), self.self_attn.get_v_proj_bias())
q_content_stream = F.linear(src, self.self_attn.get_q_proj_weight(), self.self_attn.get_q_proj_bias())
k_position_encoding = None
dist_ixs = None
if relative_distances is not None:
dist_ixs = relative_distances[0]
encoded_distances = relative_distances[1]
k_position_encoding = torch.einsum('rkbd,rhd->rkbh', encoded_distances, self.self_attn.get_r_proj_weight())
k_position_encoding = k_position_encoding + self.self_attn.get_r_proj_bias()[:, None, None]
k_token_pos_encoding = None
if token_distances is not None:
k_token_pos_encoding = torch.einsum('kbd,hd->kbh', token_distances, self.self_attn.get_r_token_proj_weight())
k_token_pos_encoding = k_token_pos_encoding + self.self_attn.get_r_token_proj_bias()[None, None]
k_edge_type_encoding = None
if edge_embeddings is not None and self.self_attn.use_edge_embeddings:
assert edge_indices is not None
k_edge_type_encoding = torch.einsum('kd,hd->kh', edge_embeddings, self.self_attn.get_r_edge_proj_weight())
k_edge_type_encoding = k_edge_type_encoding + self.self_attn.get_r_edge_proj_bias()[None]
else:
edge_indices = None
att_out_content = self.self_attn.forward(q_content_stream, k_content_stream, v_content_stream, position_keys=k_position_encoding, token_pos_keys=k_token_pos_encoding, attn_mask=src_mask, distance_indices=dist_ixs, need_weights=need_weights, edge_embeddings=k_edge_type_encoding, edge_indices=edge_indices, key_padding_mask=src_key_padding_mask)
if need_weights:
(att_out_content, att_probs_content) = att_out_content
<DeepExtract>
src = src + self.dropout1(att_out_content)
src = self.norm1(src)
att_out_content = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(att_out_content)
src = self.norm2(src)
att_out_content = src
</DeepExtract>
if query_stream is not None:
q_query_stream = F.linear(query_stream, self.self_attn.get_q_proj_weight(), self.self_attn.get_q_proj_bias())
if target_mapping is not None:
q_query_stream = torch.einsum('mbk,mlb->lbk', q_query_stream, target_mapping)
att_out_query = self.self_attn.forward(q_query_stream, k_content_stream, v_content_stream, position_keys=k_position_encoding, token_pos_keys=k_token_pos_encoding, attn_mask=attention_mask_query, distance_indices=dist_ixs, need_weights=need_weights, edge_embeddings=k_edge_type_encoding, edge_indices=edge_indices, key_padding_mask=src_key_padding_mask)
if need_weights:
(att_out_query, att_probs_query) = att_out_query
att_out_query = torch.einsum('lbk,mlb->mbk', att_out_query, target_mapping)
else:
att_out_query = self.self_attn.forward(q_query_stream, k_content_stream, v_content_stream, position_keys=k_position_encoding, token_pos_keys=k_token_pos_encoding, attn_mask=attention_mask_query, distance_indices=dist_ixs, need_weights=need_weights, edge_embeddings=k_edge_type_encoding, edge_indices=edge_indices, key_padding_mask=src_key_padding_mask)
if need_weights:
(att_out_query, att_probs_query) = att_out_query
<DeepExtract>
query_stream = query_stream + self.dropout1(att_out_query)
query_stream = self.norm1(query_stream)
att_out_query = self.linear2(self.dropout(self.activation(self.linear1(query_stream))))
query_stream = query_stream + self.dropout2(att_out_query)
query_stream = self.norm2(query_stream)
att_out_query = query_stream
</DeepExtract>
else:
att_out_query = None
att_probs_query = None
if not need_weights:
att_probs_content = None
att_probs_query = None
outputs = TransformerLayerOutput(content_stream_out=att_out_content, query_stream_out=att_out_query, attentions=Attentions(content_attention=att_probs_content, query_attention=att_probs_query))
return outputs
|
def forward(self, src: torch.Tensor, src_mask: Optional[torch.Tensor]=None, src_key_padding_mask: Optional[torch.Tensor]=None, query_stream: Optional[torch.Tensor]=None, attention_mask_query: Optional[torch.Tensor]=None, target_mapping: Optional[torch.Tensor]=None, relative_distances: Optional[Tuple[torch.Tensor]]=None, token_distances: Optional[torch.Tensor]=None, edge_indices: Optional[torch.Tensor]=None, edge_embeddings: Optional[torch.Tensor]=None, need_weights: bool=False, mems: Optional[torch.tensor]=None, asserts=True) -> TransformerLayerOutput:
"""
Forward step of the code transformer layer.
Parameters
----------
src: torch.Tensor, dim [seq_len, batch_size, d_model]
The content stream embeddings.
src_mask: torch.Tensor, dim [seq_len, seq_len, batch_size, 1]
Attention mask for the content stream.
src_key_padding_mask: torch.Tensor (currently not used)
query_stream: torch.tensor, dim [num_pedict, batch_size, d_model]
The query stream embeddings.
attention_mask_query: torch.Tensor, dim [seq_len, seq_len, batch_size, 1]
Attention mask for the content stream (target nodes cannot see themselves).
target_mapping: torch.Tensor, dim [num_predict, seq_len, batch_size]
Mapping indicating which tokens are being predicted during pre-training.
Entry [i,j,k] is 1 if token [j,k] is the i-th target.
relative_distances: Tuple[torch.Tensor].
Tuple containing the relative distances.
The first element is the [num_distances, batch_size, seq_len, seq_len] dimensional index tensor, indexing
the second element, which is the [num_distances, num_bins, batch_size, d_model] dimensional tensor
containing the encoded distance bins.
That is, the value at [d, b, i, j] of the index tensor is the index of the distance bin of the d-th distance
of tokens i and j in batch b.
token_distances: torch.Tensor, dim [num_token_distances, batch_size, d_model]
The distances between tokens.
edge_indices: torch.Tensor, dim [3, num_edges_in_batch]
edge_embeddings: torch.Tensor, dim [num_edges_in_batch, d_model]
need_weights: bool
Whether to also return the attention probabilities.
mems: torch.tensor (currently not used)
asserts: bool
Whether to verify dimensions via asserts.
Returns
-------
outputs: tuple containing
* the new content stream embeddings, dim [seq_len, batch_size, d_model]
* the new query stream embeddings (or None if no query stream input is provided),
dim [num_pedict, batch_size, d_model]
* (Optional) if need_weights=True, tuple containing
- content stream attention probabilities, dim [seq_len, seq_len, batch_size, num_head]
- query stream attention probabilities, or None if no query stream input provided,
dim [seq_len, seq_len, batch_size, num_head]
"""
if mems is not None:
raise NotImplementedError('memory currently not implemented')
content_stream_cat = src
(seq_len, bsz) = src.shape[:2]
if asserts:
assert src.shape == (seq_len, bsz, self.d_model)
if relative_distances is not None and len(relative_distances) > 0:
assert relative_distances[0].shape == (self.self_attn.num_relative_distances - int(self.self_attn.use_token_distances) - int(self.self_attn.use_edge_embeddings), bsz, seq_len, seq_len)
assert relative_distances[1].shape[0] == self.self_attn.num_relative_distances - int(self.self_attn.use_token_distances) - int(self.self_attn.use_edge_embeddings)
assert relative_distances[1].shape[-2:] == (bsz, self.d_model)
if src_mask is not None:
assert src_mask.shape == (seq_len, seq_len, bsz)
if query_stream is not None:
num_predict = query_stream.shape[0]
assert query_stream.shape == (num_predict, bsz, self.d_model)
if attention_mask_query is not None:
assert attention_mask_query.shape == (seq_len, seq_len, bsz)
k_content_stream = F.linear(content_stream_cat, self.self_attn.get_k_proj_weight(), self.self_attn.get_k_proj_bias())
v_content_stream = F.linear(content_stream_cat, self.self_attn.get_v_proj_weight(), self.self_attn.get_v_proj_bias())
q_content_stream = F.linear(src, self.self_attn.get_q_proj_weight(), self.self_attn.get_q_proj_bias())
k_position_encoding = None
dist_ixs = None
if relative_distances is not None:
dist_ixs = relative_distances[0]
encoded_distances = relative_distances[1]
k_position_encoding = torch.einsum('rkbd,rhd->rkbh', encoded_distances, self.self_attn.get_r_proj_weight())
k_position_encoding = k_position_encoding + self.self_attn.get_r_proj_bias()[:, None, None]
k_token_pos_encoding = None
if token_distances is not None:
k_token_pos_encoding = torch.einsum('kbd,hd->kbh', token_distances, self.self_attn.get_r_token_proj_weight())
k_token_pos_encoding = k_token_pos_encoding + self.self_attn.get_r_token_proj_bias()[None, None]
k_edge_type_encoding = None
if edge_embeddings is not None and self.self_attn.use_edge_embeddings:
assert edge_indices is not None
k_edge_type_encoding = torch.einsum('kd,hd->kh', edge_embeddings, self.self_attn.get_r_edge_proj_weight())
k_edge_type_encoding = k_edge_type_encoding + self.self_attn.get_r_edge_proj_bias()[None]
else:
edge_indices = None
att_out_content = self.self_attn.forward(q_content_stream, k_content_stream, v_content_stream, position_keys=k_position_encoding, token_pos_keys=k_token_pos_encoding, attn_mask=src_mask, distance_indices=dist_ixs, need_weights=need_weights, edge_embeddings=k_edge_type_encoding, edge_indices=edge_indices, key_padding_mask=src_key_padding_mask)
if need_weights:
(att_out_content, att_probs_content) = att_out_content
src = src + self.dropout1(att_out_content)
src = self.norm1(src)
att_out_content = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(att_out_content)
src = self.norm2(src)
att_out_content = src
if query_stream is not None:
q_query_stream = F.linear(query_stream, self.self_attn.get_q_proj_weight(), self.self_attn.get_q_proj_bias())
if target_mapping is not None:
q_query_stream = torch.einsum('mbk,mlb->lbk', q_query_stream, target_mapping)
att_out_query = self.self_attn.forward(q_query_stream, k_content_stream, v_content_stream, position_keys=k_position_encoding, token_pos_keys=k_token_pos_encoding, attn_mask=attention_mask_query, distance_indices=dist_ixs, need_weights=need_weights, edge_embeddings=k_edge_type_encoding, edge_indices=edge_indices, key_padding_mask=src_key_padding_mask)
if need_weights:
(att_out_query, att_probs_query) = att_out_query
att_out_query = torch.einsum('lbk,mlb->mbk', att_out_query, target_mapping)
else:
att_out_query = self.self_attn.forward(q_query_stream, k_content_stream, v_content_stream, position_keys=k_position_encoding, token_pos_keys=k_token_pos_encoding, attn_mask=attention_mask_query, distance_indices=dist_ixs, need_weights=need_weights, edge_embeddings=k_edge_type_encoding, edge_indices=edge_indices, key_padding_mask=src_key_padding_mask)
if need_weights:
(att_out_query, att_probs_query) = att_out_query
query_stream = query_stream + self.dropout1(att_out_query)
query_stream = self.norm1(query_stream)
att_out_query = self.linear2(self.dropout(self.activation(self.linear1(query_stream))))
query_stream = query_stream + self.dropout2(att_out_query)
query_stream = self.norm2(query_stream)
att_out_query = query_stream
else:
att_out_query = None
att_probs_query = None
if not need_weights:
att_probs_content = None
att_probs_query = None
outputs = TransformerLayerOutput(content_stream_out=att_out_content, query_stream_out=att_out_query, attentions=Attentions(content_attention=att_probs_content, query_attention=att_probs_query))
return outputs
|
code-transformer
|
positive
|
def get_mounts():
"""Returns a list of mounted volume paths as reported by diskutil."""
<DeepExtract>
p = subprocess.Popen(['/usr/sbin/diskutil', 'list', '-plist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=False)
(out, err) = p.communicate()
if err:
self.log.debug(f"WARNING: errors from command '{', '.join(['/usr/sbin/diskutil', 'list', '-plist'])}':")
self.log.debug(err.decode())
(out, err) = (out, err)
</DeepExtract>
try:
du_list = plistlib.loads(out)
except ExpatError:
self.log.debug('WARNING: Error parsing diskutil output.')
self.log.debug(err)
return []
vols = set()
if 'AllDisksAndPartitions' in du_list:
for disk in du_list['AllDisksAndPartitions']:
if 'MountPoint' in disk:
vols.add(disk['MountPoint'])
if 'Partitions' in disk:
for part in disk['Partitions']:
if 'MountPoint' in part:
vols.add(part['MountPoint'])
else:
self.log.debug('Missing AllDisksAndPartitions key in diskutil output')
return list(vols)
|
def get_mounts():
"""Returns a list of mounted volume paths as reported by diskutil."""
p = subprocess.Popen(['/usr/sbin/diskutil', 'list', '-plist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=False)
(out, err) = p.communicate()
if err:
self.log.debug(f"WARNING: errors from command '{', '.join(['/usr/sbin/diskutil', 'list', '-plist'])}':")
self.log.debug(err.decode())
(out, err) = (out, err)
try:
du_list = plistlib.loads(out)
except ExpatError:
self.log.debug('WARNING: Error parsing diskutil output.')
self.log.debug(err)
return []
vols = set()
if 'AllDisksAndPartitions' in du_list:
for disk in du_list['AllDisksAndPartitions']:
if 'MountPoint' in disk:
vols.add(disk['MountPoint'])
if 'Partitions' in disk:
for part in disk['Partitions']:
if 'MountPoint' in part:
vols.add(part['MountPoint'])
else:
self.log.debug('Missing AllDisksAndPartitions key in diskutil output')
return list(vols)
|
autopkg
|
positive
|
def load_run(run_dir, device):
run_dir = Path(run_dir)
with open(run_dir / 'config.json', 'r') as f:
config = json.load(f)
<DeepExtract>
(train_loader, valid_loader, test_loader) = get_loaders(dataset=config['dataset'], device=device, data_root=config['data_root'], make_valid_loader=config['early_stopping'], train_batch_size=config['train_batch_size'], valid_batch_size=config['valid_batch_size'], test_batch_size=config['test_batch_size'])
density = get_density(schema=get_schema(config=config), x_train=train_loader.dataset.x)
density.to(device)
(density, train_loader, valid_loader, test_loader) = (density, train_loader, valid_loader, test_loader)
</DeepExtract>
try:
checkpoint = torch.load(run_dir / 'checkpoints' / 'best_valid.pt', map_location=device)
except FileNotFoundError:
checkpoint = torch.load(run_dir / 'checkpoints' / 'latest.pt', map_location=device)
print('Loaded checkpoint after epoch', checkpoint['epoch'])
density.load_state_dict(checkpoint['module_state_dict'])
return (density, train_loader, valid_loader, test_loader, config, checkpoint)
|
def load_run(run_dir, device):
run_dir = Path(run_dir)
with open(run_dir / 'config.json', 'r') as f:
config = json.load(f)
(train_loader, valid_loader, test_loader) = get_loaders(dataset=config['dataset'], device=device, data_root=config['data_root'], make_valid_loader=config['early_stopping'], train_batch_size=config['train_batch_size'], valid_batch_size=config['valid_batch_size'], test_batch_size=config['test_batch_size'])
density = get_density(schema=get_schema(config=config), x_train=train_loader.dataset.x)
density.to(device)
(density, train_loader, valid_loader, test_loader) = (density, train_loader, valid_loader, test_loader)
try:
checkpoint = torch.load(run_dir / 'checkpoints' / 'best_valid.pt', map_location=device)
except FileNotFoundError:
checkpoint = torch.load(run_dir / 'checkpoints' / 'latest.pt', map_location=device)
print('Loaded checkpoint after epoch', checkpoint['epoch'])
density.load_state_dict(checkpoint['module_state_dict'])
return (density, train_loader, valid_loader, test_loader, config, checkpoint)
|
cif
|
positive
|
def test_node_to_node(node_params):
"""
:class:`.Net_Node` s can directly send messages to each other with ``ROUTER``/``DEALER`` pairs.
.. code-block:: python
>>> node_1 = Net_Node(id='a', router_port=5000)
>>> node_2 = Net_Node(id='b', upstream='a', port=5000)
>>> node_2.send('a', 'KEY', 'VALUE')
>>> node_2.send('b', 'KEY', 'VALUE')
"""
global node1_received
global node2_received
node1_received = False
node2_received = False
def l_gotit(value):
global node1_received
global node2_received
if value == 'node1':
globals()['node1_received'] = True
elif value == 'node2':
globals()['node2_received'] = True
<DeepExtract>
def _node_params(**kwargs) -> dict:
paramdict = {'id': '', 'upstream': '', 'port': np.random.randint(*PORTRANGE), 'listens': {}, 'instance': False, 'upstream_ip': 'localhost', 'router_port': None, 'daemon': True, 'expand_on_receive': True}
paramdict.update(kwargs)
node_1_params = paramdict
node_1_params = _node_params
</DeepExtract>
<DeepExtract>
def _node_params(**kwargs) -> dict:
paramdict = {'id': '', 'upstream': '', 'port': np.random.randint(*PORTRANGE), 'listens': {}, 'instance': False, 'upstream_ip': 'localhost', 'router_port': None, 'daemon': True, 'expand_on_receive': True}
paramdict.update(kwargs)
node_2_params = paramdict
node_2_params = _node_params
</DeepExtract>
node_1 = Net_Node(**node_1_params)
node_2 = Net_Node(**node_2_params)
time.sleep(0.1)
node_2.send(to='a', key='GOTIT', value='node1')
time.sleep(0.1)
node_1.send(to='b', key='GOTIT', value='node2')
time.sleep(0.1)
assert node1_received
assert node2_received
node_1.release()
node_2.release()
|
def test_node_to_node(node_params):
"""
:class:`.Net_Node` s can directly send messages to each other with ``ROUTER``/``DEALER`` pairs.
.. code-block:: python
>>> node_1 = Net_Node(id='a', router_port=5000)
>>> node_2 = Net_Node(id='b', upstream='a', port=5000)
>>> node_2.send('a', 'KEY', 'VALUE')
>>> node_2.send('b', 'KEY', 'VALUE')
"""
global node1_received
global node2_received
node1_received = False
node2_received = False
def l_gotit(value):
global node1_received
global node2_received
if value == 'node1':
globals()['node1_received'] = True
elif value == 'node2':
globals()['node2_received'] = True
def _node_params(**kwargs) -> dict:
paramdict = {'id': '', 'upstream': '', 'port': np.random.randint(*PORTRANGE), 'listens': {}, 'instance': False, 'upstream_ip': 'localhost', 'router_port': None, 'daemon': True, 'expand_on_receive': True}
paramdict.update(kwargs)
node_1_params = paramdict
node_1_params = _node_params
def _node_params(**kwargs) -> dict:
paramdict = {'id': '', 'upstream': '', 'port': np.random.randint(*PORTRANGE), 'listens': {}, 'instance': False, 'upstream_ip': 'localhost', 'router_port': None, 'daemon': True, 'expand_on_receive': True}
paramdict.update(kwargs)
node_2_params = paramdict
node_2_params = _node_params
node_1 = Net_Node(**node_1_params)
node_2 = Net_Node(**node_2_params)
time.sleep(0.1)
node_2.send(to='a', key='GOTIT', value='node1')
time.sleep(0.1)
node_1.send(to='b', key='GOTIT', value='node2')
time.sleep(0.1)
assert node1_received
assert node2_received
node_1.release()
node_2.release()
|
autopilot
|
positive
|
def do_POST(self):
"""
Handle incoming HTTP POST request
"""
<DeepExtract>
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
</DeepExtract>
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
self.send_response(200)
self.end_headers()
json_data = json.loads(self.data_string)
b64_data = json_data.get('data')
ftype = json_data.get('type')
data = base64.b64decode(b64_data)
output_dir = 'output'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
fname = str().join([random.choice(string.lowercase + string.digits) for _ in range(3)]) + '.' + ftype
output_path = os.path.join(output_dir, fname)
with open(output_path, 'wb') as fp:
fp.write(data)
|
def do_POST(self):
"""
Handle incoming HTTP POST request
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
self.send_response(200)
self.end_headers()
json_data = json.loads(self.data_string)
b64_data = json_data.get('data')
ftype = json_data.get('type')
data = base64.b64decode(b64_data)
output_dir = 'output'
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
fname = str().join([random.choice(string.lowercase + string.digits) for _ in range(3)]) + '.' + ftype
output_path = os.path.join(output_dir, fname)
with open(output_path, 'wb') as fp:
fp.write(data)
|
byob
|
positive
|
def create(self, data, program_version=None):
def is_block(__data):
return 'content_type' not in __data['data']
if program_version is not None:
if not isinstance(program_version, ProgramVersion):
raise NodeTreeCreatorException('Invalid program_version argument type')
program_interface = program_version.program.program_interface
external_variable_definitions = VariableDefinition.objects.filter(Q(program_argument__program_interface=program_interface) | Q(program_argument_field__program_argument__program_interface=program_interface)).order_by('name').distinct()
else:
external_variable_definitions = None
<DeepExtract>
variable_definitions = []
variable_by_name = {}
if external_variable_definitions is not None:
for variable_definition in external_variable_definitions:
if not isinstance(variable_definition, VariableDefinition):
raise NodeTreeCreatorException('Invalid variable_definition argument type')
variable_by_name[variable_definition.name] = dict(variables=[], variable_definition=variable_definition.id)
variables = self.collect_objects(data, get_content_type_id(Variable))
for variable in variables:
variable_name = variable['data']['name']
if variable_name not in variable_by_name:
variable_definition_id = VariableDefinition.objects.create(name=variable_name).id
variable_definition = {'data': {'content_type': get_content_type_id(VariableDefinition), 'object_id': variable_definition_id}}
variable_definitions.append(variable_definition)
variable_by_name[variable_name] = dict(variables=[], variable_definition=variable_definition_id)
else:
variable_definition_id = variable_by_name[variable_name]['variable_definition']
variable_by_name[variable_name]['variables'].append(variable)
del variable['data']['name']
variable['data']['definition_id'] = variable_definition_id
variable_definitions = variable_definitions
</DeepExtract>
if not is_block(data):
data = {'data': {}, 'children': [data]}
data['children'] = variable_definitions + data['children']
def create_content_object(_data):
if not is_block(_data):
<DeepExtract>
content_type = ContentType.objects.get(id=_data['data']['content_type'])
model_class = content_type.model_class()
if 'object_id' in _data['data']:
return
if model_class == VariableDefinition:
return
node_kwargs = [x.name for x in Node._meta.get_fields()]
content_object_kwargs = dict(((k, v) for (k, v) in _data['data'].items() if k not in node_kwargs))
content_object = model_class.objects.create(**content_object_kwargs)
_data['data']['object_id'] = content_object.id
for kwarg in content_object_kwargs.keys():
del _data['data'][kwarg]
return _data['data']
</DeepExtract>
for child in _data.get('children', []):
<DeepExtract>
content_type = ContentType.objects.get(id=child['content_type'])
model_class = content_type.model_class()
if 'object_id' in child:
return
if model_class == VariableDefinition:
return
node_kwargs = [x.name for x in Node._meta.get_fields()]
content_object_kwargs = dict(((k, v) for (k, v) in child.items() if k not in node_kwargs))
content_object = model_class.objects.create(**content_object_kwargs)
child['object_id'] = content_object.id
for kwarg in content_object_kwargs.keys():
del child[kwarg]
return child
</DeepExtract>
<DeepExtract>
content_type = ContentType.objects.get(id=data['content_type'])
model_class = content_type.model_class()
if 'object_id' in data:
return
if model_class == VariableDefinition:
return
node_kwargs = [x.name for x in Node._meta.get_fields()]
content_object_kwargs = dict(((k, v) for (k, v) in data.items() if k not in node_kwargs))
content_object = model_class.objects.create(**content_object_kwargs)
data['object_id'] = content_object.id
for kwarg in content_object_kwargs.keys():
del data[kwarg]
return data
</DeepExtract>
return Node.objects.get(id=Node.load_bulk([data])[0])
|
def create(self, data, program_version=None):
def is_block(__data):
return 'content_type' not in __data['data']
if program_version is not None:
if not isinstance(program_version, ProgramVersion):
raise NodeTreeCreatorException('Invalid program_version argument type')
program_interface = program_version.program.program_interface
external_variable_definitions = VariableDefinition.objects.filter(Q(program_argument__program_interface=program_interface) | Q(program_argument_field__program_argument__program_interface=program_interface)).order_by('name').distinct()
else:
external_variable_definitions = None
variable_definitions = []
variable_by_name = {}
if external_variable_definitions is not None:
for variable_definition in external_variable_definitions:
if not isinstance(variable_definition, VariableDefinition):
raise NodeTreeCreatorException('Invalid variable_definition argument type')
variable_by_name[variable_definition.name] = dict(variables=[], variable_definition=variable_definition.id)
variables = self.collect_objects(data, get_content_type_id(Variable))
for variable in variables:
variable_name = variable['data']['name']
if variable_name not in variable_by_name:
variable_definition_id = VariableDefinition.objects.create(name=variable_name).id
variable_definition = {'data': {'content_type': get_content_type_id(VariableDefinition), 'object_id': variable_definition_id}}
variable_definitions.append(variable_definition)
variable_by_name[variable_name] = dict(variables=[], variable_definition=variable_definition_id)
else:
variable_definition_id = variable_by_name[variable_name]['variable_definition']
variable_by_name[variable_name]['variables'].append(variable)
del variable['data']['name']
variable['data']['definition_id'] = variable_definition_id
variable_definitions = variable_definitions
if not is_block(data):
data = {'data': {}, 'children': [data]}
data['children'] = variable_definitions + data['children']
def create_content_object(_data):
if not is_block(_data):
content_type = ContentType.objects.get(id=_data['data']['content_type'])
model_class = content_type.model_class()
if 'object_id' in _data['data']:
return
if model_class == VariableDefinition:
return
node_kwargs = [x.name for x in Node._meta.get_fields()]
content_object_kwargs = dict(((k, v) for (k, v) in _data['data'].items() if k not in node_kwargs))
content_object = model_class.objects.create(**content_object_kwargs)
_data['data']['object_id'] = content_object.id
for kwarg in content_object_kwargs.keys():
del _data['data'][kwarg]
return _data['data']
for child in _data.get('children', []):
content_type = ContentType.objects.get(id=child['content_type'])
model_class = content_type.model_class()
if 'object_id' in child:
return
if model_class == VariableDefinition:
return
node_kwargs = [x.name for x in Node._meta.get_fields()]
content_object_kwargs = dict(((k, v) for (k, v) in child.items() if k not in node_kwargs))
content_object = model_class.objects.create(**content_object_kwargs)
child['object_id'] = content_object.id
for kwarg in content_object_kwargs.keys():
del child[kwarg]
return child
content_type = ContentType.objects.get(id=data['content_type'])
model_class = content_type.model_class()
if 'object_id' in data:
return
if model_class == VariableDefinition:
return
node_kwargs = [x.name for x in Node._meta.get_fields()]
content_object_kwargs = dict(((k, v) for (k, v) in data.items() if k not in node_kwargs))
content_object = model_class.objects.create(**content_object_kwargs)
data['object_id'] = content_object.id
for kwarg in content_object_kwargs.keys():
del data[kwarg]
return data
return Node.objects.get(id=Node.load_bulk([data])[0])
|
django-business-logic
|
positive
|
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
(blobs['data'], im_scale, blobs['im_info']) = blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
<DeepExtract>
(rois, levels) = _project_im_rois(rois, im_scale)
rois_blob = np.hstack((levels, rois))
blobs['rois'] = rois_blob.astype(np.float32, copy=False)
</DeepExtract>
return (blobs, im_scale)
|
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
(blobs['data'], im_scale, blobs['im_info']) = blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
(rois, levels) = _project_im_rois(rois, im_scale)
rois_blob = np.hstack((levels, rois))
blobs['rois'] = rois_blob.astype(np.float32, copy=False)
return (blobs, im_scale)
|
Context-aware-ZSR
|
positive
|
def consume(self):
if self.p == -1:
<DeepExtract>
self._fillBuffer(self.root)
self.p = 0
</DeepExtract>
self.p += 1
|
def consume(self):
if self.p == -1:
self._fillBuffer(self.root)
self.p = 0
self.p += 1
|
cpy
|
positive
|
def test_datetime_translators(self):
<DeepExtract>
async def create():
port = find_unused_port()
server = await aiozmq.rpc.serve_rpc(MyHandler(self.loop), bind='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions)
client = await aiozmq.rpc.connect_rpc(connect='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, error_table=error_table, timeout=timeout)
(client, server) = (client, server)
(self.client, self.server) = self.loop.run_until_complete(create())
(client, server) = (self.client, self.server)
</DeepExtract>
async def communicate():
ret = await client.call.add(datetime.date(2014, 3, 21), datetime.timedelta(days=2))
self.assertEqual(datetime.date(2014, 3, 23), ret)
self.loop.run_until_complete(communicate())
|
def test_datetime_translators(self):
async def create():
port = find_unused_port()
server = await aiozmq.rpc.serve_rpc(MyHandler(self.loop), bind='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions)
client = await aiozmq.rpc.connect_rpc(connect='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, error_table=error_table, timeout=timeout)
(client, server) = (client, server)
(self.client, self.server) = self.loop.run_until_complete(create())
(client, server) = (self.client, self.server)
async def communicate():
ret = await client.call.add(datetime.date(2014, 3, 21), datetime.timedelta(days=2))
self.assertEqual(datetime.date(2014, 3, 23), ret)
self.loop.run_until_complete(communicate())
|
aiozmq
|
positive
|
def _block_tri(c, p):
<DeepExtract>
ngram_set = set()
text_length = len(c.split())
max_index_ngram_start = text_length - 3
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(c.split()[i:i + 3]))
tri_c = ngram_set
</DeepExtract>
for s in p:
<DeepExtract>
ngram_set = set()
text_length = len(s.split())
max_index_ngram_start = text_length - 3
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(s.split()[i:i + 3]))
tri_s = ngram_set
</DeepExtract>
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
|
def _block_tri(c, p):
ngram_set = set()
text_length = len(c.split())
max_index_ngram_start = text_length - 3
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(c.split()[i:i + 3]))
tri_c = ngram_set
for s in p:
ngram_set = set()
text_length = len(s.split())
max_index_ngram_start = text_length - 3
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(s.split()[i:i + 3]))
tri_s = ngram_set
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
|
EssayKiller_V2
|
positive
|
def argument_preprocessor(args):
"""Perform processing of argument patterns which are not captured by
argparse, before being passed to argparse
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: a tuple containing a list of the arguments which can be handled
by argparse and a dict of the extra arguments which this function has
extracted
:rtype: tuple
"""
args = sys.argv[1:] if args is None else args
extract_make_args = extract_cmake_and_make_and_catkin_make_arguments
(args, cmake_args, make_args, catkin_make_args) = extract_make_args(args)
<DeepExtract>
if not ' '.join(args):
jobs_args = None
regex = '(?:^|\\s)(-j\\s*\\d*)(?=$|\\s)|' + '(?:^|\\s)(-l\\s*\\d*\\.?\\d*)(?=$|\\s)|' + '(?:^|\\s)(--jobs(?:(?:=|\\s+)\\d+)?)(?=$|\\s)|' + '(?:^|\\s)(--load-average(?:(?:=|\\s+)\\d*\\.?\\d+)?)(?=$|\\s)'
filtered_flags = []
for match in re.findall(regex, ' '.join(args)):
filtered_flags.extend([m.strip() for m in match if m])
jobs_args = filtered_flags or None
</DeepExtract>
if jobs_args:
args = [arg for arg in args if arg not in jobs_args]
elif make_args is not None:
<DeepExtract>
if not ' '.join(make_args):
jobs_args = None
regex = '(?:^|\\s)(-j\\s*\\d*)(?=$|\\s)|' + '(?:^|\\s)(-l\\s*\\d*\\.?\\d*)(?=$|\\s)|' + '(?:^|\\s)(--jobs(?:(?:=|\\s+)\\d+)?)(?=$|\\s)|' + '(?:^|\\s)(--load-average(?:(?:=|\\s+)\\d*\\.?\\d+)?)(?=$|\\s)'
filtered_flags = []
for match in re.findall(regex, ' '.join(make_args)):
filtered_flags.extend([m.strip() for m in match if m])
jobs_args = filtered_flags or None
</DeepExtract>
if jobs_args:
make_args = [arg for arg in make_args if arg not in jobs_args]
extras = {'cmake_args': cmake_args, 'make_args': make_args, 'jobs_args': jobs_args, 'catkin_make_args': catkin_make_args}
return (args, extras)
|
def argument_preprocessor(args):
"""Perform processing of argument patterns which are not captured by
argparse, before being passed to argparse
:param args: system arguments from which special arguments need to be extracted
:type args: list
:returns: a tuple containing a list of the arguments which can be handled
by argparse and a dict of the extra arguments which this function has
extracted
:rtype: tuple
"""
args = sys.argv[1:] if args is None else args
extract_make_args = extract_cmake_and_make_and_catkin_make_arguments
(args, cmake_args, make_args, catkin_make_args) = extract_make_args(args)
if not ' '.join(args):
jobs_args = None
regex = '(?:^|\\s)(-j\\s*\\d*)(?=$|\\s)|' + '(?:^|\\s)(-l\\s*\\d*\\.?\\d*)(?=$|\\s)|' + '(?:^|\\s)(--jobs(?:(?:=|\\s+)\\d+)?)(?=$|\\s)|' + '(?:^|\\s)(--load-average(?:(?:=|\\s+)\\d*\\.?\\d+)?)(?=$|\\s)'
filtered_flags = []
for match in re.findall(regex, ' '.join(args)):
filtered_flags.extend([m.strip() for m in match if m])
jobs_args = filtered_flags or None
if jobs_args:
args = [arg for arg in args if arg not in jobs_args]
elif make_args is not None:
if not ' '.join(make_args):
jobs_args = None
regex = '(?:^|\\s)(-j\\s*\\d*)(?=$|\\s)|' + '(?:^|\\s)(-l\\s*\\d*\\.?\\d*)(?=$|\\s)|' + '(?:^|\\s)(--jobs(?:(?:=|\\s+)\\d+)?)(?=$|\\s)|' + '(?:^|\\s)(--load-average(?:(?:=|\\s+)\\d*\\.?\\d+)?)(?=$|\\s)'
filtered_flags = []
for match in re.findall(regex, ' '.join(make_args)):
filtered_flags.extend([m.strip() for m in match if m])
jobs_args = filtered_flags or None
if jobs_args:
make_args = [arg for arg in make_args if arg not in jobs_args]
extras = {'cmake_args': cmake_args, 'make_args': make_args, 'jobs_args': jobs_args, 'catkin_make_args': catkin_make_args}
return (args, extras)
|
catkin_tools
|
positive
|
def replace_isotime(subfilter):
(op, value) = list(subfilter.items())[0]
if op.lower() not in self.complex_operators:
field = list(value.keys())[0]
if field in self.timestamp_fields:
<DeepExtract>
try:
date_time = timeutils.parse_isotime(subfilter[op][field])
date_time = date_time.replace(tzinfo=None)
date_time = date_time
except ValueError:
LOG.exception('String %s is not a valid isotime', subfilter[op][field])
msg = _('Failed to parse the timestamp value %s') % subfilter[op][field]
raise base.ClientSideError(msg)
</DeepExtract>
subfilter[op][field] = date_time
|
def replace_isotime(subfilter):
(op, value) = list(subfilter.items())[0]
if op.lower() not in self.complex_operators:
field = list(value.keys())[0]
if field in self.timestamp_fields:
try:
date_time = timeutils.parse_isotime(subfilter[op][field])
date_time = date_time.replace(tzinfo=None)
date_time = date_time
except ValueError:
LOG.exception('String %s is not a valid isotime', subfilter[op][field])
msg = _('Failed to parse the timestamp value %s') % subfilter[op][field]
raise base.ClientSideError(msg)
subfilter[op][field] = date_time
|
aodh
|
positive
|
def calculate_skeletons(peaks: dict, animals_number: int) -> list:
"""
Creating skeletons from given peaks
There could be no more skeletons than animals_number
Only unique skeletons output
adaptive to chosen model origin
"""
if MODEL_ORIGIN == 'DLC':
if USE_DLSTREAM_POSTURE_DETECTION:
<DeepExtract>
cartesian_p = product(*peaks.values(), repeat=1)
def calculate_closest_distances(dots_cluster: list) -> float:
"""
Calculating a sum of all distances between all dots in a cluster
"""
dots_coordinates = (dot[0] for dot in dots_cluster)
product_sum = sum((calculate_distance(*c) for c in combinations(dots_coordinates, 2)))
animal_skeletons = product_sum
sorted_product = sorted(cartesian_p, key=lambda c: calculate_closest_distances(c), reverse=False)
def compare_clusters(unique_clusters: list, new_cluster: tuple) -> bool:
"""
Compare some new cluster against every existing unique cluster to find if it is unique
:param unique_clusters: list of existing unique cluster
:param new_cluster: cluster with same dots
:return: if new cluster is unique
"""
compare = lambda cl1, cl2: not any([s1 == s2 for (s1, s2) in zip(cl1, cl2)])
comparison = [compare(u_cluster, new_cluster) for u_cluster in unique_clusters]
animal_skeletons = all(comparison)
def create_animal_skeleton(dots_cluster: tuple) -> dict:
"""
Creating a easy to read skeleton from dots cluster
Format for each joint:
{'joint_name': (x,y)}
"""
skeleton = {}
for dot in dots_cluster:
skeleton[dot[-1]] = dot[0]
animal_skeletons = skeleton
top_unique_clusters = []
animal_skeletons = []
if sorted_product:
top_unique_clusters.append(sorted_product[0])
for cluster in sorted_product[1:]:
if compare_clusters(top_unique_clusters, cluster) and len(top_unique_clusters) < animals_number:
top_unique_clusters.append(cluster)
elif len(top_unique_clusters) == animals_number:
break
for unique_cluster in top_unique_clusters:
animal_skeletons.append(create_animal_skeleton(unique_cluster))
animal_skeletons = animal_skeletons
</DeepExtract>
else:
if FILTER_LIKELIHOOD:
<DeepExtract>
filtered_pose = peaks.copy()
if MODEL_ORIGIN == 'DLC':
' DLC pose output is an np.array with [bp*[X,Y, Likelihood]]'
for (num, bp) in enumerate(filtered_pose):
if bp[2] < LIKELIHOOD_THRESHOLD:
filtered_pose[num] = np.array([np.NaN, np.NaN, 2])
peaks = filtered_pose
</DeepExtract>
<DeepExtract>
skeletons = [transform_2skeleton(peaks)]
animal_skeletons = skeletons
</DeepExtract>
if animals_number != 1 and SPLIT_MA:
<DeepExtract>
flat_skeletons = animal_skeletons[0]
split_skeletons = []
(bp_per_animal, remainder) = divmod(len(flat_skeletons), ANIMALS_NUMBER)
if remainder > 0:
raise SkeletonError(f'The number of body parts ({len(flat_skeletons)}) cannot be split equally into {ANIMALS_NUMBER} animals.')
else:
for animal in range(ANIMALS_NUMBER):
single_skeleton = list(flat_skeletons.keys())[bp_per_animal * animal:bp_per_animal * animal + bp_per_animal]
split_skeletons.append({x: flat_skeletons[x] for x in flat_skeletons if x in single_skeleton})
animal_skeletons = split_skeletons
</DeepExtract>
else:
pass
elif MODEL_ORIGIN == 'MADLC':
<DeepExtract>
def filter_mapredictions(pose):
detection = []
conf = np.array(peaks['confidence'])
coords = np.array(peaks['coordinates'])
for (num, bp) in enumerate(peaks['coordinates'][0]):
if len(bp) > 0:
conf_bp = conf[num].flatten()
fltred_bp = bp[conf_bp >= threshold, :]
detection.append(fltred_bp)
else:
detection.append(np.array([]))
animal_skeletons = detection
def extract_to_animal_skeleton(coords):
"""
Creating a easy to read skeleton from dots cluster
Format for each joint:
{'joint_name': (x,y)}
"""
bodyparts = np.array(coords)
skeletons = {}
for bp in range(len(bodyparts)):
for animal_num in range(animals_number):
if 'Mouse' + str(animal_num + 1) not in skeletons.keys():
skeletons['Mouse' + str(animal_num + 1)] = {}
if len(bodyparts[bp]) >= animals_number:
skeletons['Mouse' + str(animal_num + 1)]['bp' + str(bp + 1)] = bodyparts[bp][animal_num].astype(float)
elif animal_num < len(bodyparts[bp]):
skeletons['Mouse' + str(animal_num + 1)]['bp' + str(bp + 1)] = bodyparts[bp][animal_num].astype(float)
else:
skeletons['Mouse' + str(animal_num + 1)]['bp' + str(bp + 1)] = np.array([np.NaN, np.NaN])
animal_skeletons = skeletons
detections = filter_mapredictions(peaks)
animal_skeletons = extract_to_animal_skeleton(detections)
animal_skeletons = list(animal_skeletons.values())
animal_skeletons = animal_skeletons
</DeepExtract>
if FLATTEN_MA:
<DeepExtract>
flat_skeletons = dict()
for (num, skeleton) in enumerate(animal_skeletons):
for (bp, value) in skeleton.items():
flat_skeletons[f'{num}_{bp}'] = value
animal_skeletons = [flat_skeletons]
</DeepExtract>
else:
pass
elif MODEL_ORIGIN == 'DLC-LIVE' or MODEL_ORIGIN == 'DEEPPOSEKIT':
<DeepExtract>
skeletons = [transform_2skeleton(peaks)]
animal_skeletons = skeletons
</DeepExtract>
if animals_number != 1 and (not SPLIT_MA):
raise SkeletonError('Multiple animals are currently not supported by DLC-LIVE. If you are using differently colored animals, please refer to the bodyparts directly (as a flattened skeleton) or use SPLIT_MA in the advanced settings.')
elif SPLIT_MA:
<DeepExtract>
flat_skeletons = animal_skeletons[0]
split_skeletons = []
(bp_per_animal, remainder) = divmod(len(flat_skeletons), ANIMALS_NUMBER)
if remainder > 0:
raise SkeletonError(f'The number of body parts ({len(flat_skeletons)}) cannot be split equally into {ANIMALS_NUMBER} animals.')
else:
for animal in range(ANIMALS_NUMBER):
single_skeleton = list(flat_skeletons.keys())[bp_per_animal * animal:bp_per_animal * animal + bp_per_animal]
split_skeletons.append({x: flat_skeletons[x] for x in flat_skeletons if x in single_skeleton})
animal_skeletons = split_skeletons
</DeepExtract>
else:
pass
elif MODEL_ORIGIN == 'SLEAP':
<DeepExtract>
skeletons = []
for animal in range(peaks.shape[0]):
skeleton = transform_2skeleton(peaks[animal])
skeletons.append(skeleton)
animal_skeletons = skeletons
</DeepExtract>
if FLATTEN_MA:
<DeepExtract>
flat_skeletons = dict()
for (num, skeleton) in enumerate(animal_skeletons):
for (bp, value) in skeleton.items():
flat_skeletons[f'{num}_{bp}'] = value
animal_skeletons = [flat_skeletons]
</DeepExtract>
elif animals_number != 1 and SPLIT_MA:
<DeepExtract>
flat_skeletons = animal_skeletons[0]
split_skeletons = []
(bp_per_animal, remainder) = divmod(len(flat_skeletons), ANIMALS_NUMBER)
if remainder > 0:
raise SkeletonError(f'The number of body parts ({len(flat_skeletons)}) cannot be split equally into {ANIMALS_NUMBER} animals.')
else:
for animal in range(ANIMALS_NUMBER):
single_skeleton = list(flat_skeletons.keys())[bp_per_animal * animal:bp_per_animal * animal + bp_per_animal]
split_skeletons.append({x: flat_skeletons[x] for x in flat_skeletons if x in single_skeleton})
animal_skeletons = split_skeletons
</DeepExtract>
else:
pass
<DeepExtract>
for skeleton in animal_skeletons:
for (bodypart, coordinates) in skeleton.items():
np_coords = np.array(coordinates)
if any(np.isnan(np_coords)):
if HANDLE_MISSING == 'pass':
pass
elif HANDLE_MISSING == 'skip':
animal_skeletons.remove(skeleton)
break
elif HANDLE_MISSING == 'null':
new_coordinates = np.nan_to_num(np_coords, copy=True)
skeleton[bodypart] = tuple(new_coordinates)
elif HANDLE_MISSING == 'reset':
reset_skeleton = {bp: (np.NaN, np.NaN) for bp in skeleton}
animal_skeletons = [reset_skeleton if i == skeleton else i for i in animal_skeletons]
break
else:
animal_skeletons.remove(skeleton)
break
animal_skeletons = animal_skeletons
</DeepExtract>
return animal_skeletons
|
def calculate_skeletons(peaks: dict, animals_number: int) -> list:
"""
Creating skeletons from given peaks
There could be no more skeletons than animals_number
Only unique skeletons output
adaptive to chosen model origin
"""
if MODEL_ORIGIN == 'DLC':
if USE_DLSTREAM_POSTURE_DETECTION:
cartesian_p = product(*peaks.values(), repeat=1)
def calculate_closest_distances(dots_cluster: list) -> float:
"""
Calculating a sum of all distances between all dots in a cluster
"""
dots_coordinates = (dot[0] for dot in dots_cluster)
product_sum = sum((calculate_distance(*c) for c in combinations(dots_coordinates, 2)))
animal_skeletons = product_sum
sorted_product = sorted(cartesian_p, key=lambda c: calculate_closest_distances(c), reverse=False)
def compare_clusters(unique_clusters: list, new_cluster: tuple) -> bool:
"""
Compare some new cluster against every existing unique cluster to find if it is unique
:param unique_clusters: list of existing unique cluster
:param new_cluster: cluster with same dots
:return: if new cluster is unique
"""
compare = lambda cl1, cl2: not any([s1 == s2 for (s1, s2) in zip(cl1, cl2)])
comparison = [compare(u_cluster, new_cluster) for u_cluster in unique_clusters]
animal_skeletons = all(comparison)
def create_animal_skeleton(dots_cluster: tuple) -> dict:
"""
Creating a easy to read skeleton from dots cluster
Format for each joint:
{'joint_name': (x,y)}
"""
skeleton = {}
for dot in dots_cluster:
skeleton[dot[-1]] = dot[0]
animal_skeletons = skeleton
top_unique_clusters = []
animal_skeletons = []
if sorted_product:
top_unique_clusters.append(sorted_product[0])
for cluster in sorted_product[1:]:
if compare_clusters(top_unique_clusters, cluster) and len(top_unique_clusters) < animals_number:
top_unique_clusters.append(cluster)
elif len(top_unique_clusters) == animals_number:
break
for unique_cluster in top_unique_clusters:
animal_skeletons.append(create_animal_skeleton(unique_cluster))
animal_skeletons = animal_skeletons
else:
if FILTER_LIKELIHOOD:
filtered_pose = peaks.copy()
if MODEL_ORIGIN == 'DLC':
' DLC pose output is an np.array with [bp*[X,Y, Likelihood]]'
for (num, bp) in enumerate(filtered_pose):
if bp[2] < LIKELIHOOD_THRESHOLD:
filtered_pose[num] = np.array([np.NaN, np.NaN, 2])
peaks = filtered_pose
skeletons = [transform_2skeleton(peaks)]
animal_skeletons = skeletons
if animals_number != 1 and SPLIT_MA:
flat_skeletons = animal_skeletons[0]
split_skeletons = []
(bp_per_animal, remainder) = divmod(len(flat_skeletons), ANIMALS_NUMBER)
if remainder > 0:
raise SkeletonError(f'The number of body parts ({len(flat_skeletons)}) cannot be split equally into {ANIMALS_NUMBER} animals.')
else:
for animal in range(ANIMALS_NUMBER):
single_skeleton = list(flat_skeletons.keys())[bp_per_animal * animal:bp_per_animal * animal + bp_per_animal]
split_skeletons.append({x: flat_skeletons[x] for x in flat_skeletons if x in single_skeleton})
animal_skeletons = split_skeletons
else:
pass
elif MODEL_ORIGIN == 'MADLC':
def filter_mapredictions(pose):
detection = []
conf = np.array(peaks['confidence'])
coords = np.array(peaks['coordinates'])
for (num, bp) in enumerate(peaks['coordinates'][0]):
if len(bp) > 0:
conf_bp = conf[num].flatten()
fltred_bp = bp[conf_bp >= threshold, :]
detection.append(fltred_bp)
else:
detection.append(np.array([]))
animal_skeletons = detection
def extract_to_animal_skeleton(coords):
"""
Creating a easy to read skeleton from dots cluster
Format for each joint:
{'joint_name': (x,y)}
"""
bodyparts = np.array(coords)
skeletons = {}
for bp in range(len(bodyparts)):
for animal_num in range(animals_number):
if 'Mouse' + str(animal_num + 1) not in skeletons.keys():
skeletons['Mouse' + str(animal_num + 1)] = {}
if len(bodyparts[bp]) >= animals_number:
skeletons['Mouse' + str(animal_num + 1)]['bp' + str(bp + 1)] = bodyparts[bp][animal_num].astype(float)
elif animal_num < len(bodyparts[bp]):
skeletons['Mouse' + str(animal_num + 1)]['bp' + str(bp + 1)] = bodyparts[bp][animal_num].astype(float)
else:
skeletons['Mouse' + str(animal_num + 1)]['bp' + str(bp + 1)] = np.array([np.NaN, np.NaN])
animal_skeletons = skeletons
detections = filter_mapredictions(peaks)
animal_skeletons = extract_to_animal_skeleton(detections)
animal_skeletons = list(animal_skeletons.values())
animal_skeletons = animal_skeletons
if FLATTEN_MA:
flat_skeletons = dict()
for (num, skeleton) in enumerate(animal_skeletons):
for (bp, value) in skeleton.items():
flat_skeletons[f'{num}_{bp}'] = value
animal_skeletons = [flat_skeletons]
else:
pass
elif MODEL_ORIGIN == 'DLC-LIVE' or MODEL_ORIGIN == 'DEEPPOSEKIT':
skeletons = [transform_2skeleton(peaks)]
animal_skeletons = skeletons
if animals_number != 1 and (not SPLIT_MA):
raise SkeletonError('Multiple animals are currently not supported by DLC-LIVE. If you are using differently colored animals, please refer to the bodyparts directly (as a flattened skeleton) or use SPLIT_MA in the advanced settings.')
elif SPLIT_MA:
flat_skeletons = animal_skeletons[0]
split_skeletons = []
(bp_per_animal, remainder) = divmod(len(flat_skeletons), ANIMALS_NUMBER)
if remainder > 0:
raise SkeletonError(f'The number of body parts ({len(flat_skeletons)}) cannot be split equally into {ANIMALS_NUMBER} animals.')
else:
for animal in range(ANIMALS_NUMBER):
single_skeleton = list(flat_skeletons.keys())[bp_per_animal * animal:bp_per_animal * animal + bp_per_animal]
split_skeletons.append({x: flat_skeletons[x] for x in flat_skeletons if x in single_skeleton})
animal_skeletons = split_skeletons
else:
pass
elif MODEL_ORIGIN == 'SLEAP':
skeletons = []
for animal in range(peaks.shape[0]):
skeleton = transform_2skeleton(peaks[animal])
skeletons.append(skeleton)
animal_skeletons = skeletons
if FLATTEN_MA:
flat_skeletons = dict()
for (num, skeleton) in enumerate(animal_skeletons):
for (bp, value) in skeleton.items():
flat_skeletons[f'{num}_{bp}'] = value
animal_skeletons = [flat_skeletons]
elif animals_number != 1 and SPLIT_MA:
flat_skeletons = animal_skeletons[0]
split_skeletons = []
(bp_per_animal, remainder) = divmod(len(flat_skeletons), ANIMALS_NUMBER)
if remainder > 0:
raise SkeletonError(f'The number of body parts ({len(flat_skeletons)}) cannot be split equally into {ANIMALS_NUMBER} animals.')
else:
for animal in range(ANIMALS_NUMBER):
single_skeleton = list(flat_skeletons.keys())[bp_per_animal * animal:bp_per_animal * animal + bp_per_animal]
split_skeletons.append({x: flat_skeletons[x] for x in flat_skeletons if x in single_skeleton})
animal_skeletons = split_skeletons
else:
pass
for skeleton in animal_skeletons:
for (bodypart, coordinates) in skeleton.items():
np_coords = np.array(coordinates)
if any(np.isnan(np_coords)):
if HANDLE_MISSING == 'pass':
pass
elif HANDLE_MISSING == 'skip':
animal_skeletons.remove(skeleton)
break
elif HANDLE_MISSING == 'null':
new_coordinates = np.nan_to_num(np_coords, copy=True)
skeleton[bodypart] = tuple(new_coordinates)
elif HANDLE_MISSING == 'reset':
reset_skeleton = {bp: (np.NaN, np.NaN) for bp in skeleton}
animal_skeletons = [reset_skeleton if i == skeleton else i for i in animal_skeletons]
break
else:
animal_skeletons.remove(skeleton)
break
animal_skeletons = animal_skeletons
return animal_skeletons
|
DeepLabStream
|
positive
|
def GetProfileCallBack(self, text):
def ProfileCallBack(event):
self.Manager.AddSpecificEntryToCurrent(text)
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
pass
</DeepExtract>
return ProfileCallBack
|
def GetProfileCallBack(self, text):
def ProfileCallBack(event):
self.Manager.AddSpecificEntryToCurrent(text)
pass
pass
return ProfileCallBack
|
CANFestivino
|
positive
|
def parse_app_metadata(xml: str, schema: str, pre_xslt: str, xslt: str) -> Dict:
"""
Parses, validates and maps the xml onto a dict
:argument xml the info.xml string to parse
:argument schema the schema xml as string
:argument pre_xslt xslt which is run before validation to ensure that
everything is in the correct order and that unknown elements are excluded
:argument xslt the xslt to transform it to a matching structure
:raises InvalidAppMetadataXmlException if the schema does not validate
:return the parsed xml as dict
"""
<DeepExtract>
parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True, remove_comments=True, load_dtd=False, remove_blank_text=True, dtd_validation=False)
</DeepExtract>
try:
doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)
except lxml.etree.XMLSyntaxError as e:
msg = 'info.xml contains malformed xml: %s' % e
raise XMLSyntaxError(msg)
for _ in doc.iter(lxml.etree.Entity):
raise InvalidAppMetadataXmlException('Must not contain entities')
pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))
pre_transformed_doc = pre_transform(doc)
schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)
schema = lxml.etree.XMLSchema(schema_doc)
try:
schema.assertValid(pre_transformed_doc)
except lxml.etree.DocumentInvalid as e:
msg = 'info.xml did not validate: %s' % e
raise InvalidAppMetadataXmlException(msg)
transform = lxml.etree.XSLT(lxml.etree.XML(xslt))
transformed_doc = transform(pre_transformed_doc)
<DeepExtract>
type = transformed_doc.getroot().get('type')
key = transformed_doc.getroot().tag.replace('-', '_')
if type == 'int' and transformed_doc.getroot().text is not None:
mapped = {key: int(transformed_doc.getroot().text)}
elif type == 'list':
mapped = {key: list(map(element_to_dict, transformed_doc.getroot().iterchildren()))}
elif type == 'min-version':
mapped = {key: pad_min_version(transformed_doc.getroot().text), 'raw_%s' % key: raw_version(transformed_doc.getroot().text)}
elif type == 'max-version':
mapped = {key: pad_max_version(transformed_doc.getroot().text), 'raw_%s' % key: raw_version(transformed_doc.getroot().text)}
elif len(list(transformed_doc.getroot())) > 0:
contents = {}
for child in transformed_doc.getroot().iterchildren():
contents.update(element_to_dict(child))
mapped = {key: contents}
else:
mapped = {key: transformed_doc.getroot().text}
</DeepExtract>
<DeepExtract>
app = mapped['app']
translated_fields = ['name', 'summary', 'description']
for field in translated_fields:
if 'en' not in app[field]:
msg = 'At least one element "%s" with lang "en" required' % field
raise InvalidAppMetadataXmlException(msg)
</DeepExtract>
<DeepExtract>
app = mapped['app']
trans_fields = ['name', 'summary', 'description']
fields = [field for field in trans_fields if field in app]
codes = set()
for field in fields:
codes |= set(app[field].keys())
for field in fields:
absent_codes = [code for code in codes if code not in app[field]]
for code in absent_codes:
app[field][code] = app[field]['en']
</DeepExtract>
return mapped
|
def parse_app_metadata(xml: str, schema: str, pre_xslt: str, xslt: str) -> Dict:
"""
Parses, validates and maps the xml onto a dict
:argument xml the info.xml string to parse
:argument schema the schema xml as string
:argument pre_xslt xslt which is run before validation to ensure that
everything is in the correct order and that unknown elements are excluded
:argument xslt the xslt to transform it to a matching structure
:raises InvalidAppMetadataXmlException if the schema does not validate
:return the parsed xml as dict
"""
parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True, remove_comments=True, load_dtd=False, remove_blank_text=True, dtd_validation=False)
try:
doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)
except lxml.etree.XMLSyntaxError as e:
msg = 'info.xml contains malformed xml: %s' % e
raise XMLSyntaxError(msg)
for _ in doc.iter(lxml.etree.Entity):
raise InvalidAppMetadataXmlException('Must not contain entities')
pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))
pre_transformed_doc = pre_transform(doc)
schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)
schema = lxml.etree.XMLSchema(schema_doc)
try:
schema.assertValid(pre_transformed_doc)
except lxml.etree.DocumentInvalid as e:
msg = 'info.xml did not validate: %s' % e
raise InvalidAppMetadataXmlException(msg)
transform = lxml.etree.XSLT(lxml.etree.XML(xslt))
transformed_doc = transform(pre_transformed_doc)
type = transformed_doc.getroot().get('type')
key = transformed_doc.getroot().tag.replace('-', '_')
if type == 'int' and transformed_doc.getroot().text is not None:
mapped = {key: int(transformed_doc.getroot().text)}
elif type == 'list':
mapped = {key: list(map(element_to_dict, transformed_doc.getroot().iterchildren()))}
elif type == 'min-version':
mapped = {key: pad_min_version(transformed_doc.getroot().text), 'raw_%s' % key: raw_version(transformed_doc.getroot().text)}
elif type == 'max-version':
mapped = {key: pad_max_version(transformed_doc.getroot().text), 'raw_%s' % key: raw_version(transformed_doc.getroot().text)}
elif len(list(transformed_doc.getroot())) > 0:
contents = {}
for child in transformed_doc.getroot().iterchildren():
contents.update(element_to_dict(child))
mapped = {key: contents}
else:
mapped = {key: transformed_doc.getroot().text}
app = mapped['app']
translated_fields = ['name', 'summary', 'description']
for field in translated_fields:
if 'en' not in app[field]:
msg = 'At least one element "%s" with lang "en" required' % field
raise InvalidAppMetadataXmlException(msg)
app = mapped['app']
trans_fields = ['name', 'summary', 'description']
fields = [field for field in trans_fields if field in app]
codes = set()
for field in fields:
codes |= set(app[field].keys())
for field in fields:
absent_codes = [code for code in codes if code not in app[field]]
for code in absent_codes:
app[field][code] = app[field]['en']
return mapped
|
appstore
|
positive
|
def run(self):
self.helper.log_info('Starting SentinelOne Threats Connector')
while True:
try:
current_state = self.helper.get_state()
last_created_at = None
if current_state is not None and 'last_created_at' in current_state:
last_created_at = current_state['last_created_at']
self.helper.log_info(f'Connector last processed a threat created at: {last_created_at}')
else:
self.helper.log_info('Connector has never run')
for threat_list in self.s1.get_threats(created_at_gt=last_created_at):
for threat_dict in threat_list['data']:
threat_info_dict = threat_dict.get('threatInfo')
threat_id = threat_info_dict.get('threatId')
threat_name = threat_info_dict.get('threatName')
sha1 = threat_info_dict.get('sha1')
classification = threat_info_dict.get('classification')
classification_source = threat_info_dict.get('classificationSource')
file_ext = threat_info_dict.get('fileExtension')
verdict = threat_info_dict.get('analystVerdict')
file_path = threat_info_dict.get('filePath', threat_name)
confidence = threat_info_dict.get('confidenceLevel')
created_at = threat_info_dict.get('createdAt')
self.helper.log_info(f'Processing threat name {threat_name} with sha1 {sha1} created at {created_at}')
self.helper.set_state({'last_created_at': created_at})
if self.skip_false_positives and verdict == 'false_positive':
self.helper.log_info('Skipping as it was a false positive.')
continue
if self.skip_suspicious and confidence == 'suspicious':
self.helper.log_info('Skipping as confidence level for the threat is suspicious.')
continue
if self.skip_pua and classification == 'PUA':
self.helper.log_info('Skipping as it was a PUA.')
continue
if self.include_file_extensions:
if file_ext not in self.include_file_extensions:
self.helper.log_info(f'Skipping as it did not match a file extension in the included list: {file_ext}')
continue
if sha1 and self.artifact_exists_opencti(sha1):
self.helper.log_info('Skipping Artifact as it already exists in OpenCTI.')
continue
file_contents = self.s1.download_threat(threat_id)
if not file_contents:
self.helper.log_info('Skipping as the download failed.')
continue
<DeepExtract>
mime_type = magic.from_buffer(file_contents, mime=True)
kwargs = {'file_name': file_path, 'data': file_contents, 'mime_type': mime_type, 'x_opencti_description': f'Threat detected by SentinelOne, classification source: {classification_source}, verdict: {verdict}.'}
response = self.helper.api.stix_cyber_observable.upload_artifact(**kwargs)
</DeepExtract>
for label_id in self.label_ids:
self.helper.api.stix_cyber_observable.add_label(id=response['id'], label_id=label_id)
if self.file_extension_label and file_ext:
label = self.helper.api.label.create(value=file_ext, color=self.file_extension_label_color)
self.helper.api.stix_cyber_observable.add_label(id=response['id'], label_id=label['id'])
if self.classification_label and classification:
label = self.helper.api.label.create(value=classification, color=self.classification_label_color)
self.helper.api.stix_cyber_observable.add_label(id=response['id'], label_id=label['id'])
except (KeyboardInterrupt, SystemExit):
self.helper.log_info('Connector stop')
sys.exit(0)
except Exception as e:
self.helper.log_error(f'Exception: {e}, traceback: {traceback.format_exc()}')
if self.helper.connect_run_and_terminate:
self.helper.log_info('Connector stop')
sys.exit(0)
self.helper.log_info(f'Re-checking for new threats in {self.cooldown_seconds} seconds...')
time.sleep(self.cooldown_seconds)
|
def run(self):
self.helper.log_info('Starting SentinelOne Threats Connector')
while True:
try:
current_state = self.helper.get_state()
last_created_at = None
if current_state is not None and 'last_created_at' in current_state:
last_created_at = current_state['last_created_at']
self.helper.log_info(f'Connector last processed a threat created at: {last_created_at}')
else:
self.helper.log_info('Connector has never run')
for threat_list in self.s1.get_threats(created_at_gt=last_created_at):
for threat_dict in threat_list['data']:
threat_info_dict = threat_dict.get('threatInfo')
threat_id = threat_info_dict.get('threatId')
threat_name = threat_info_dict.get('threatName')
sha1 = threat_info_dict.get('sha1')
classification = threat_info_dict.get('classification')
classification_source = threat_info_dict.get('classificationSource')
file_ext = threat_info_dict.get('fileExtension')
verdict = threat_info_dict.get('analystVerdict')
file_path = threat_info_dict.get('filePath', threat_name)
confidence = threat_info_dict.get('confidenceLevel')
created_at = threat_info_dict.get('createdAt')
self.helper.log_info(f'Processing threat name {threat_name} with sha1 {sha1} created at {created_at}')
self.helper.set_state({'last_created_at': created_at})
if self.skip_false_positives and verdict == 'false_positive':
self.helper.log_info('Skipping as it was a false positive.')
continue
if self.skip_suspicious and confidence == 'suspicious':
self.helper.log_info('Skipping as confidence level for the threat is suspicious.')
continue
if self.skip_pua and classification == 'PUA':
self.helper.log_info('Skipping as it was a PUA.')
continue
if self.include_file_extensions:
if file_ext not in self.include_file_extensions:
self.helper.log_info(f'Skipping as it did not match a file extension in the included list: {file_ext}')
continue
if sha1 and self.artifact_exists_opencti(sha1):
self.helper.log_info('Skipping Artifact as it already exists in OpenCTI.')
continue
file_contents = self.s1.download_threat(threat_id)
if not file_contents:
self.helper.log_info('Skipping as the download failed.')
continue
mime_type = magic.from_buffer(file_contents, mime=True)
kwargs = {'file_name': file_path, 'data': file_contents, 'mime_type': mime_type, 'x_opencti_description': f'Threat detected by SentinelOne, classification source: {classification_source}, verdict: {verdict}.'}
response = self.helper.api.stix_cyber_observable.upload_artifact(**kwargs)
for label_id in self.label_ids:
self.helper.api.stix_cyber_observable.add_label(id=response['id'], label_id=label_id)
if self.file_extension_label and file_ext:
label = self.helper.api.label.create(value=file_ext, color=self.file_extension_label_color)
self.helper.api.stix_cyber_observable.add_label(id=response['id'], label_id=label['id'])
if self.classification_label and classification:
label = self.helper.api.label.create(value=classification, color=self.classification_label_color)
self.helper.api.stix_cyber_observable.add_label(id=response['id'], label_id=label['id'])
except (KeyboardInterrupt, SystemExit):
self.helper.log_info('Connector stop')
sys.exit(0)
except Exception as e:
self.helper.log_error(f'Exception: {e}, traceback: {traceback.format_exc()}')
if self.helper.connect_run_and_terminate:
self.helper.log_info('Connector stop')
sys.exit(0)
self.helper.log_info(f'Re-checking for new threats in {self.cooldown_seconds} seconds...')
time.sleep(self.cooldown_seconds)
|
connectors
|
positive
|
def __init__(self, args=None):
"""
Initialize.
:param args: optional arguments if not to use sys.argv
:type args: [str]
"""
self.args = None
' Arguments of program. '
<DeepExtract>
parser = argparse.ArgumentParser(description='Test attacks on classifier.')
parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-test_theta_file', default=paths.results_file('test_theta'), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-train_theta_file', default=paths.results_file('train_theta'), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing labels.', type=str)
parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder files.', type=str)
parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int)
parser.add_argument('-accuracy_file', default=paths.results_file('learned_decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str)
parser.add_argument('-perturbations_file', default=paths.results_file('learned_decoder/perturbations'), help='HDF5 file containing perturbations.', type=str)
parser.add_argument('-success_file', default=paths.results_file('learned_decoder/success'), help='HDF5 file indicating attack success.', type=str)
parser.add_argument('-plot_directory', default=paths.experiment_dir('learned_decoder'), help='Path to PNG plot file for success rate.', type=str)
parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str)
parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int)
parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.')
parser.add_argument('-no_gpu', dest='use_gpu', action='store_false')
parser.add_argument('-bound', default=2, help='Bound to consider for samples in latent space.', type=float)
parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int)
parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str)
parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str)
parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true')
parser.add_argument('-decoder_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int)
parser.add_argument('-decoder_dropout', default=False, action='store_true', help='Whether to use dropout.')
parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.')
parser = parser
</DeepExtract>
if args is not None:
self.args = parser.parse_args(args)
else:
self.args = parser.parse_args()
self.test_images = None
' (numpy.ndarray) Test images. '
self.train_images = None
' (numpy.ndarray) Train images. '
self.test_theta = None
' (numpy.ndarray) Transformation parameters. '
self.train_theta = None
' (numpy.ndarra) Transformation parameters, i.e. latent codes. '
self.perturbations = None
' (numpy.ndarray) Perturbations per test image. '
self.test_codes = None
' (numpy.ndarray) Test codes.'
self.success = None
' (numpy.ndarray) Success indicator for perturbations.'
self.norms = [1, 2, float('inf')]
' ([float]) Norms to evaluate. '
self.results = []
' (dict) Dictionary containing all statistics. '
for n in range(len(self.norms)):
self.results.append(dict())
self.N_font = None
' (int) Number of fonts. '
self.N_class = None
' (int) Number of classes. '
self.N_attempts = None
' (int) Numbe rof attack attempts. '
self.theta_images = None
' (numpy.ndarray) WIll hold images. '
self.perturbation_images = None
' (numpy.ndarray) Will hold images for perturbations. '
self.perturbation_codes = None
' (numpy.ndarray) Perturbation codes. '
self.perturbation_theta = None
' (numpy.ndarray) Perturbation transformations. '
self.model = None
' (Decoder) Decoder. '
self.pca = None
' (sklearn.decomposition.IncrementalPCA) PCA to make nearest neighbor more efficient. '
self.neighbors = None
' (sklearn.neighbors.NearestNeighbors) Nearest neighbor model. '
log('-- ' + self.__class__.__name__)
for key in vars(self.args):
log('[Testing] %s=%s' % (key, str(getattr(self.args, key))))
|
def __init__(self, args=None):
"""
Initialize.
:param args: optional arguments if not to use sys.argv
:type args: [str]
"""
self.args = None
' Arguments of program. '
parser = argparse.ArgumentParser(description='Test attacks on classifier.')
parser.add_argument('-test_images_file', default=paths.test_images_file(), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-train_images_file', default=paths.train_images_file(), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-test_theta_file', default=paths.results_file('test_theta'), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-train_theta_file', default=paths.results_file('train_theta'), help='HDF5 file containing dataset.', type=str)
parser.add_argument('-test_codes_file', default=paths.test_codes_file(), help='HDF5 file containing labels.', type=str)
parser.add_argument('-decoder_files', default=paths.state_file('decoder'), help='Decoder files.', type=str)
parser.add_argument('-label_index', default=2, help='Column index in label file.', type=int)
parser.add_argument('-accuracy_file', default=paths.results_file('learned_decoder/accuracy'), help='Correctly classified test samples of classifier.', type=str)
parser.add_argument('-perturbations_file', default=paths.results_file('learned_decoder/perturbations'), help='HDF5 file containing perturbations.', type=str)
parser.add_argument('-success_file', default=paths.results_file('learned_decoder/success'), help='HDF5 file indicating attack success.', type=str)
parser.add_argument('-plot_directory', default=paths.experiment_dir('learned_decoder'), help='Path to PNG plot file for success rate.', type=str)
parser.add_argument('-results_file', default='', help='Path to pickled results file.', type=str)
parser.add_argument('-batch_size', default=128, help='Batch size of attack.', type=int)
parser.add_argument('-plot_manifolds', default=False, action='store_true', help='Whether to plot manifolds.')
parser.add_argument('-no_gpu', dest='use_gpu', action='store_false')
parser.add_argument('-bound', default=2, help='Bound to consider for samples in latent space.', type=float)
parser.add_argument('-latent_space_size', default=10, help='Size of latent space.', type=int)
parser.add_argument('-decoder_architecture', default='standard', help='Architecture to use.', type=str)
parser.add_argument('-decoder_activation', default='relu', help='Activation function to use.', type=str)
parser.add_argument('-decoder_no_batch_normalization', default=False, help='Do not use batch normalization.', action='store_true')
parser.add_argument('-decoder_channels', default=16, help='Channels of first convolutional layer, afterwards channels are doubled.', type=int)
parser.add_argument('-decoder_dropout', default=False, action='store_true', help='Whether to use dropout.')
parser.add_argument('-decoder_units', default='1024,1024,1024,1024', help='Units for MLP.')
parser = parser
if args is not None:
self.args = parser.parse_args(args)
else:
self.args = parser.parse_args()
self.test_images = None
' (numpy.ndarray) Test images. '
self.train_images = None
' (numpy.ndarray) Train images. '
self.test_theta = None
' (numpy.ndarray) Transformation parameters. '
self.train_theta = None
' (numpy.ndarra) Transformation parameters, i.e. latent codes. '
self.perturbations = None
' (numpy.ndarray) Perturbations per test image. '
self.test_codes = None
' (numpy.ndarray) Test codes.'
self.success = None
' (numpy.ndarray) Success indicator for perturbations.'
self.norms = [1, 2, float('inf')]
' ([float]) Norms to evaluate. '
self.results = []
' (dict) Dictionary containing all statistics. '
for n in range(len(self.norms)):
self.results.append(dict())
self.N_font = None
' (int) Number of fonts. '
self.N_class = None
' (int) Number of classes. '
self.N_attempts = None
' (int) Numbe rof attack attempts. '
self.theta_images = None
' (numpy.ndarray) WIll hold images. '
self.perturbation_images = None
' (numpy.ndarray) Will hold images for perturbations. '
self.perturbation_codes = None
' (numpy.ndarray) Perturbation codes. '
self.perturbation_theta = None
' (numpy.ndarray) Perturbation transformations. '
self.model = None
' (Decoder) Decoder. '
self.pca = None
' (sklearn.decomposition.IncrementalPCA) PCA to make nearest neighbor more efficient. '
self.neighbors = None
' (sklearn.neighbors.NearestNeighbors) Nearest neighbor model. '
log('-- ' + self.__class__.__name__)
for key in vars(self.args):
log('[Testing] %s=%s' % (key, str(getattr(self.args, key))))
|
disentangling-robustness-generalization
|
positive
|
def unflatten(self, node, paths, fstruct):
<DeepExtract>
if get_child is None:
get_child = node.__getitem__
if rewrite_subpath is None:
def rewrite_subpath(subpath):
mapstruct = subpath
node_name = node.name
if node_name:
prefix = node_name + '.'
else:
prefix = ''
prefix_len = len(prefix)
appstruct = {}
subfstruct = {}
subpaths = []
curname = None
for path in paths:
if path == node_name:
continue
assert path.startswith(prefix), 'Bad node: %s' % path
subpath = path[prefix_len:]
if '.' in subpath:
name = subpath[:subpath.index('.')]
else:
name = subpath
if curname is None:
curname = name
elif name != curname:
subnode = get_child(curname)
appstruct[curname] = subnode.typ.unflatten(subnode, subpaths, subfstruct)
subfstruct = {}
subpaths = []
curname = name
subpath = rewrite_subpath(subpath)
subfstruct[subpath] = fstruct[path]
subpaths.append(subpath)
if curname is not None:
subnode = get_child(curname)
appstruct[curname] = subnode.typ.unflatten(subnode, subpaths, subfstruct)
mapstruct = appstruct
</DeepExtract>
appstruct = []
for subnode in node.children:
appstruct.append(mapstruct[subnode.name])
return tuple(appstruct)
|
def unflatten(self, node, paths, fstruct):
if get_child is None:
get_child = node.__getitem__
if rewrite_subpath is None:
def rewrite_subpath(subpath):
mapstruct = subpath
node_name = node.name
if node_name:
prefix = node_name + '.'
else:
prefix = ''
prefix_len = len(prefix)
appstruct = {}
subfstruct = {}
subpaths = []
curname = None
for path in paths:
if path == node_name:
continue
assert path.startswith(prefix), 'Bad node: %s' % path
subpath = path[prefix_len:]
if '.' in subpath:
name = subpath[:subpath.index('.')]
else:
name = subpath
if curname is None:
curname = name
elif name != curname:
subnode = get_child(curname)
appstruct[curname] = subnode.typ.unflatten(subnode, subpaths, subfstruct)
subfstruct = {}
subpaths = []
curname = name
subpath = rewrite_subpath(subpath)
subfstruct[subpath] = fstruct[path]
subpaths.append(subpath)
if curname is not None:
subnode = get_child(curname)
appstruct[curname] = subnode.typ.unflatten(subnode, subpaths, subfstruct)
mapstruct = appstruct
appstruct = []
for subnode in node.children:
appstruct.append(mapstruct[subnode.name])
return tuple(appstruct)
|
colander
|
positive
|
def export(self, pipeline: Pipeline, pipeline_export_format: str, pipeline_export_path: str, overwrite: bool) -> str:
"""
Export pipeline as Airflow DAG
"""
self._verify_export_format(pipeline_export_format)
timestamp = datetime.now().strftime('%m%d%H%M%S')
pipeline_instance_id = f'{pipeline.name}-{timestamp}'
absolute_pipeline_export_path = get_absolute_path(self.root_dir, pipeline_export_path)
if os.path.exists(absolute_pipeline_export_path) and (not overwrite):
raise ValueError(f"File '{absolute_pipeline_export_path}' already exists.")
self.log_pipeline_info(pipeline.name, f'exporting pipeline as a .{pipeline_export_format} file')
<DeepExtract>
self.log.info(f"Creating pipeline definition as a .{'py'} file")
if 'py' == 'json':
with open(absolute_pipeline_export_path, 'w', encoding='utf-8') as file:
json.dump(absolute_pipeline_export_path, file, ensure_ascii=False, indent=4)
else:
loader = PackageLoader('elyra', 'templates/airflow')
template_env = Environment(loader=loader)
template_env.filters['regex_replace'] = lambda x: AirflowPipelineProcessor.scrub_invalid_characters(x)
template = template_env.get_template('airflow_template.jinja2')
ordered_ops = self._cc_pipeline(pipeline, pipeline.name, pipeline_instance_id)
runtime_configuration = self._get_metadata_configuration(schemaspace=Runtimes.RUNTIMES_SCHEMASPACE_ID, name=pipeline.runtime_config)
user_namespace = runtime_configuration.metadata.get('user_namespace', 'default')
cos_secret = runtime_configuration.metadata.get('cos_secret')
pipeline_description = pipeline.description
if pipeline_description is None:
pipeline_description = f'Created with Elyra {__version__} pipeline editor using `{pipeline.source}`.'
python_output = template.render(operations_list=ordered_ops, pipeline_name=pipeline_instance_id, user_namespace=user_namespace, cos_secret=cos_secret if any((op.get('is_generic_operator') for op in ordered_ops.values())) else None, kube_config_path=None, is_paused_upon_creation='False', in_cluster='True', pipeline_description=pipeline_description, processor=self)
with open(absolute_pipeline_export_path, 'w') as fh:
import black
autopep_output = autopep8.fix_code(python_output)
output_to_file = black.format_str(autopep_output, mode=black.FileMode())
fh.write(output_to_file)
new_pipeline_file_path = absolute_pipeline_export_path
</DeepExtract>
return new_pipeline_file_path
|
def export(self, pipeline: Pipeline, pipeline_export_format: str, pipeline_export_path: str, overwrite: bool) -> str:
"""
Export pipeline as Airflow DAG
"""
self._verify_export_format(pipeline_export_format)
timestamp = datetime.now().strftime('%m%d%H%M%S')
pipeline_instance_id = f'{pipeline.name}-{timestamp}'
absolute_pipeline_export_path = get_absolute_path(self.root_dir, pipeline_export_path)
if os.path.exists(absolute_pipeline_export_path) and (not overwrite):
raise ValueError(f"File '{absolute_pipeline_export_path}' already exists.")
self.log_pipeline_info(pipeline.name, f'exporting pipeline as a .{pipeline_export_format} file')
self.log.info(f"Creating pipeline definition as a .{'py'} file")
if 'py' == 'json':
with open(absolute_pipeline_export_path, 'w', encoding='utf-8') as file:
json.dump(absolute_pipeline_export_path, file, ensure_ascii=False, indent=4)
else:
loader = PackageLoader('elyra', 'templates/airflow')
template_env = Environment(loader=loader)
template_env.filters['regex_replace'] = lambda x: AirflowPipelineProcessor.scrub_invalid_characters(x)
template = template_env.get_template('airflow_template.jinja2')
ordered_ops = self._cc_pipeline(pipeline, pipeline.name, pipeline_instance_id)
runtime_configuration = self._get_metadata_configuration(schemaspace=Runtimes.RUNTIMES_SCHEMASPACE_ID, name=pipeline.runtime_config)
user_namespace = runtime_configuration.metadata.get('user_namespace', 'default')
cos_secret = runtime_configuration.metadata.get('cos_secret')
pipeline_description = pipeline.description
if pipeline_description is None:
pipeline_description = f'Created with Elyra {__version__} pipeline editor using `{pipeline.source}`.'
python_output = template.render(operations_list=ordered_ops, pipeline_name=pipeline_instance_id, user_namespace=user_namespace, cos_secret=cos_secret if any((op.get('is_generic_operator') for op in ordered_ops.values())) else None, kube_config_path=None, is_paused_upon_creation='False', in_cluster='True', pipeline_description=pipeline_description, processor=self)
with open(absolute_pipeline_export_path, 'w') as fh:
import black
autopep_output = autopep8.fix_code(python_output)
output_to_file = black.format_str(autopep_output, mode=black.FileMode())
fh.write(output_to_file)
new_pipeline_file_path = absolute_pipeline_export_path
return new_pipeline_file_path
|
elyra
|
positive
|
def url(self, name, expire=None, parameters=None):
<DeepExtract>
name = name.strip('./')
if len(name) > _AZURE_NAME_MAX_LEN:
raise ValueError('File name max len is %d' % _AZURE_NAME_MAX_LEN)
if not len(name):
raise ValueError('File name must contain one or more printable characters')
if name.count('/') > 256:
raise ValueError('File name must not contain more than 256 slashes')
name = name
</DeepExtract>
params = parameters or {}
if expire is None:
expire = self.expiration_secs
credential = None
if expire:
<DeepExtract>
expiry = datetime.utcnow() + timedelta(seconds=expire)
</DeepExtract>
<DeepExtract>
if self.token_credential is None:
user_delegation_key = None
if self._user_delegation_key is None or expiry > self._user_delegation_key_expiry:
now = datetime.utcnow()
key_expiry_time = now + timedelta(days=7)
self._user_delegation_key = self.custom_service_client.get_user_delegation_key(key_start_time=now, key_expiry_time=key_expiry_time)
self._user_delegation_key_expiry = key_expiry_time
user_delegation_key = self._user_delegation_key
</DeepExtract>
sas_token = generate_blob_sas(self.account_name, self.azure_container, name, account_key=self.account_key, user_delegation_key=user_delegation_key, permission=BlobSasPermissions(read=True), expiry=expiry, **params)
credential = sas_token
container_blob_url = self.custom_client.get_blob_client(name).url
return BlobClient.from_blob_url(container_blob_url, credential=credential).url
|
def url(self, name, expire=None, parameters=None):
name = name.strip('./')
if len(name) > _AZURE_NAME_MAX_LEN:
raise ValueError('File name max len is %d' % _AZURE_NAME_MAX_LEN)
if not len(name):
raise ValueError('File name must contain one or more printable characters')
if name.count('/') > 256:
raise ValueError('File name must not contain more than 256 slashes')
name = name
params = parameters or {}
if expire is None:
expire = self.expiration_secs
credential = None
if expire:
expiry = datetime.utcnow() + timedelta(seconds=expire)
if self.token_credential is None:
user_delegation_key = None
if self._user_delegation_key is None or expiry > self._user_delegation_key_expiry:
now = datetime.utcnow()
key_expiry_time = now + timedelta(days=7)
self._user_delegation_key = self.custom_service_client.get_user_delegation_key(key_start_time=now, key_expiry_time=key_expiry_time)
self._user_delegation_key_expiry = key_expiry_time
user_delegation_key = self._user_delegation_key
sas_token = generate_blob_sas(self.account_name, self.azure_container, name, account_key=self.account_key, user_delegation_key=user_delegation_key, permission=BlobSasPermissions(read=True), expiry=expiry, **params)
credential = sas_token
container_blob_url = self.custom_client.get_blob_client(name).url
return BlobClient.from_blob_url(container_blob_url, credential=credential).url
|
django-storages
|
positive
|
def set_mypy_path():
"""Put Coconut stubs in MYPYPATH."""
install_dir = install_mypy_stubs().replace(os.sep, '/')
original = os.getenv(mypy_path_env_var)
if original is None:
new_mypy_path = install_dir
elif not original.startswith(install_dir):
new_mypy_path = install_dir + ':' + original
else:
new_mypy_path = None
if new_mypy_path is not None:
<DeepExtract>
os.environ[py_str(mypy_path_env_var)] = py_str(new_mypy_path)
</DeepExtract>
logger.log_func(lambda : (mypy_path_env_var, '=', os.getenv(mypy_path_env_var)))
return install_dir
|
def set_mypy_path():
"""Put Coconut stubs in MYPYPATH."""
install_dir = install_mypy_stubs().replace(os.sep, '/')
original = os.getenv(mypy_path_env_var)
if original is None:
new_mypy_path = install_dir
elif not original.startswith(install_dir):
new_mypy_path = install_dir + ':' + original
else:
new_mypy_path = None
if new_mypy_path is not None:
os.environ[py_str(mypy_path_env_var)] = py_str(new_mypy_path)
logger.log_func(lambda : (mypy_path_env_var, '=', os.getenv(mypy_path_env_var)))
return install_dir
|
coconut
|
positive
|
def validate_isd_hourly_temp_data_cache(usaf_id, year):
<DeepExtract>
key = 'isd-hourly-{}-{}'.format(usaf_id, year)
</DeepExtract>
store = eeweather.connections.key_value_store_proxy.get_store()
if not store.key_exists(key):
return False
if cached_isd_hourly_temp_data_is_expired(usaf_id, year):
store.clear(key)
return False
return True
|
def validate_isd_hourly_temp_data_cache(usaf_id, year):
key = 'isd-hourly-{}-{}'.format(usaf_id, year)
store = eeweather.connections.key_value_store_proxy.get_store()
if not store.key_exists(key):
return False
if cached_isd_hourly_temp_data_is_expired(usaf_id, year):
store.clear(key)
return False
return True
|
eeweather
|
positive
|
def _ord_4(self, q1, p1, q2, p2, delta):
"""
Order 4 Integration Scheme
References
----------
.. [1] Yoshida, Haruo,
"Construction of higher order symplectic integrators";
Physics Letters A, vol. 150, no. 5-7, pp. 262-268, 1990.
`DOI: <https://doi.org/10.1016/0375-9601(90)90092-3>`__
"""
dl = delta
(Z0, Z1) = _Z(self.order)
<DeepExtract>
(dl, omg) = (dl * Z1, self.omega)
g = self.metric
g_prms = self.metric_params
HA1 = np.array([q1, _flow_A(g, g_prms, q1, p1, q2, p2, 0.5 * dl)[1], _flow_A(g, g_prms, q1, p1, q2, p2, 0.5 * dl)[0], p2])
HB1 = np.array([_flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[0], HA1[1], HA1[2], _flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[1]])
HC = _flow_mixed(HB1[0], HB1[1], HB1[2], HB1[3], dl, omg)
HB2 = np.array([_flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[0], HC[1], HC[2], _flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[1]])
HA2 = np.array([HB2[0], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[1], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[0], HB2[3]])
step1 = HA2
</DeepExtract>
<DeepExtract>
(dl, omg) = (dl * Z0, self.omega)
g = self.metric
g_prms = self.metric_params
HA1 = np.array([step1[0], _flow_A(g, g_prms, step1[0], step1[1], step1[2], step1[3], 0.5 * dl)[1], _flow_A(g, g_prms, step1[0], step1[1], step1[2], step1[3], 0.5 * dl)[0], step1[3]])
HB1 = np.array([_flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[0], HA1[1], HA1[2], _flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[1]])
HC = _flow_mixed(HB1[0], HB1[1], HB1[2], HB1[3], dl, omg)
HB2 = np.array([_flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[0], HC[1], HC[2], _flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[1]])
HA2 = np.array([HB2[0], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[1], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[0], HB2[3]])
step2 = HA2
</DeepExtract>
<DeepExtract>
(dl, omg) = (dl * Z1, self.omega)
g = self.metric
g_prms = self.metric_params
HA1 = np.array([step2[0], _flow_A(g, g_prms, step2[0], step2[1], step2[2], step2[3], 0.5 * dl)[1], _flow_A(g, g_prms, step2[0], step2[1], step2[2], step2[3], 0.5 * dl)[0], step2[3]])
HB1 = np.array([_flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[0], HA1[1], HA1[2], _flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[1]])
HC = _flow_mixed(HB1[0], HB1[1], HB1[2], HB1[3], dl, omg)
HB2 = np.array([_flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[0], HC[1], HC[2], _flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[1]])
HA2 = np.array([HB2[0], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[1], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[0], HB2[3]])
step3 = HA2
</DeepExtract>
return step3
|
def _ord_4(self, q1, p1, q2, p2, delta):
"""
Order 4 Integration Scheme
References
----------
.. [1] Yoshida, Haruo,
"Construction of higher order symplectic integrators";
Physics Letters A, vol. 150, no. 5-7, pp. 262-268, 1990.
`DOI: <https://doi.org/10.1016/0375-9601(90)90092-3>`__
"""
dl = delta
(Z0, Z1) = _Z(self.order)
(dl, omg) = (dl * Z1, self.omega)
g = self.metric
g_prms = self.metric_params
HA1 = np.array([q1, _flow_A(g, g_prms, q1, p1, q2, p2, 0.5 * dl)[1], _flow_A(g, g_prms, q1, p1, q2, p2, 0.5 * dl)[0], p2])
HB1 = np.array([_flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[0], HA1[1], HA1[2], _flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[1]])
HC = _flow_mixed(HB1[0], HB1[1], HB1[2], HB1[3], dl, omg)
HB2 = np.array([_flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[0], HC[1], HC[2], _flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[1]])
HA2 = np.array([HB2[0], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[1], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[0], HB2[3]])
step1 = HA2
(dl, omg) = (dl * Z0, self.omega)
g = self.metric
g_prms = self.metric_params
HA1 = np.array([step1[0], _flow_A(g, g_prms, step1[0], step1[1], step1[2], step1[3], 0.5 * dl)[1], _flow_A(g, g_prms, step1[0], step1[1], step1[2], step1[3], 0.5 * dl)[0], step1[3]])
HB1 = np.array([_flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[0], HA1[1], HA1[2], _flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[1]])
HC = _flow_mixed(HB1[0], HB1[1], HB1[2], HB1[3], dl, omg)
HB2 = np.array([_flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[0], HC[1], HC[2], _flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[1]])
HA2 = np.array([HB2[0], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[1], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[0], HB2[3]])
step2 = HA2
(dl, omg) = (dl * Z1, self.omega)
g = self.metric
g_prms = self.metric_params
HA1 = np.array([step2[0], _flow_A(g, g_prms, step2[0], step2[1], step2[2], step2[3], 0.5 * dl)[1], _flow_A(g, g_prms, step2[0], step2[1], step2[2], step2[3], 0.5 * dl)[0], step2[3]])
HB1 = np.array([_flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[0], HA1[1], HA1[2], _flow_B(g, g_prms, HA1[0], HA1[1], HA1[2], HA1[3], 0.5 * dl)[1]])
HC = _flow_mixed(HB1[0], HB1[1], HB1[2], HB1[3], dl, omg)
HB2 = np.array([_flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[0], HC[1], HC[2], _flow_B(g, g_prms, HC[0], HC[1], HC[2], HC[3], 0.5 * dl)[1]])
HA2 = np.array([HB2[0], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[1], _flow_A(g, g_prms, HB2[0], HB2[1], HB2[2], HB2[3], 0.5 * dl)[0], HB2[3]])
step3 = HA2
return step3
|
einsteinpy
|
positive
|
def create_streams_template(new_resources, settings):
"""
Function to create the root template for Kinesis streams
:param list<ecs_composex.kinesis.kinesis_stack.Stream> new_resources:
:param ecs_composex.common.settings.ComposeXSettings settings:
:return:
"""
root_template = build_template('Root stack for ecs_composex.kinesis')
for res in new_resources:
<DeepExtract>
props = import_record_properties(res.properties, Stream)
if not keyisset('ShardCount', res.properties):
LOG.warning('ShardCount must be set. Defaulting to 1')
props['ShardCount'] = 1
props['Tags'] = Tags(Name=res.logical_name, ComposeName=res.name)
res.cfn_resource = Stream(res.logical_name, **props)
res.init_outputs()
res.generate_outputs()
</DeepExtract>
add_resource(root_template, res.cfn_resource)
add_outputs(root_template, res.outputs)
return root_template
|
def create_streams_template(new_resources, settings):
"""
Function to create the root template for Kinesis streams
:param list<ecs_composex.kinesis.kinesis_stack.Stream> new_resources:
:param ecs_composex.common.settings.ComposeXSettings settings:
:return:
"""
root_template = build_template('Root stack for ecs_composex.kinesis')
for res in new_resources:
props = import_record_properties(res.properties, Stream)
if not keyisset('ShardCount', res.properties):
LOG.warning('ShardCount must be set. Defaulting to 1')
props['ShardCount'] = 1
props['Tags'] = Tags(Name=res.logical_name, ComposeName=res.name)
res.cfn_resource = Stream(res.logical_name, **props)
res.init_outputs()
res.generate_outputs()
add_resource(root_template, res.cfn_resource)
add_outputs(root_template, res.outputs)
return root_template
|
ecs_composex
|
positive
|
def __init__(self, method, full_space_poly=None, sample_points=None, sample_outputs=None, subspace_dimension=2, polynomial_degree=2, param_args=None, poly_args=None, dr_args=None):
self.full_space_poly = full_space_poly
self.sample_points = sample_points
self.Y = None
self.sample_outputs = sample_outputs
self.method = method
self.subspace_dimension = subspace_dimension
self.polynomial_degree = polynomial_degree
my_poly_args = {'method': 'least-squares', 'solver_args': {}}
if poly_args is not None:
my_poly_args.update(poly_args)
self.poly_args = my_poly_args
my_param_args = {'distribution': 'uniform', 'order': self.polynomial_degree, 'lower': -1, 'upper': 1}
if param_args is not None:
my_param_args.update(param_args)
bounded_distrs = ['analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian', 'uniform']
unbounded_distrs = ['gaussian', 'normal', 'gumbel', 'logistic', 'students-t', 'studentst']
semi_bounded_distrs = ['chi', 'chi-squared', 'exponential', 'gamma', 'lognormal', 'log-normal', 'pareto', 'rayleigh', 'weibull']
if dr_args is not None:
if 'standardize' in dr_args:
dr_args['standardise'] = dr_args['standardize']
if self.method.lower() == 'active-subspace' or self.method.lower() == 'active-subspaces':
self.method = 'active-subspace'
if dr_args is not None:
self.standardise = getattr(dr_args, 'standardise', True)
else:
self.standardise = True
if self.full_space_poly is None:
(N, d) = self.sample_points.shape
if self.standardise:
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
param = Parameter(**my_param_args)
if param_args is not None:
if (hasattr(dr_args, 'lower') or hasattr(dr_args, 'upper')) and self.standardise:
warnings.warn('Points standardised but parameter range provided. Overriding default ([-1,1])...', UserWarning)
myparameters = [param for _ in range(d)]
mybasis = Basis('total-order')
mypoly = Poly(myparameters, mybasis, sampling_args={'sample-points': self.std_sample_points, 'sample-outputs': self.sample_outputs}, **my_poly_args)
mypoly.set_model()
self.full_space_poly = mypoly
else:
user_params = self.full_space_poly.parameters
d = len(user_params)
self.sample_points = self.full_space_poly.get_points()
if self.standardise:
scale_factors = np.zeros(d)
centers = np.zeros(d)
for (dd, p) in enumerate(user_params):
if p.name.lower() in bounded_distrs:
scale_factors[dd] = (p.upper - p.lower) / 2.0
centers[dd] = (p.upper + p.lower) / 2.0
elif p.name.lower() in unbounded_distrs:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = p.mean
else:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = 0.0
self.param_scaler = scaler_custom(centers, scale_factors)
self.std_sample_points = self.param_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
if not hasattr(self.full_space_poly, 'coefficients'):
raise ValueError('Please call set_model() first on poly.')
self.sample_outputs = self.full_space_poly.get_model_evaluations()
as_args = {'grad_points': None}
if dr_args is not None:
as_args.update(dr_args)
<DeepExtract>
if grad_points is None:
X = self.full_space_poly.get_points()
elif hasattr(self, 'data_scaler'):
X = self.data_scaler.transform(grad_points)
else:
X = grad_points.copy()
(M, d) = X.shape
if d != self.sample_points.shape[1]:
raise ValueError('In _get_active_subspace: dimensions of gradient evaluation points mismatched with input dimension!')
alpha = 2.0
num_grad_lb = alpha * self.subspace_dimension * np.log(d)
if M < num_grad_lb:
warnings.warn('Number of gradient evaluation points is likely to be insufficient. Consider resampling!', UserWarning)
polygrad = self.full_space_poly.get_polyfit_grad(X)
if hasattr(self, 'param_scaler'):
polygrad = self.param_scaler.div[:, np.newaxis] * polygrad
weights = np.ones((M, 1)) / M
R = polygrad.transpose() * weights
C = np.dot(polygrad, R)
(e, W) = np.linalg.eigh(C)
idx = e.argsort()[::-1]
eigs = e[idx]
eigVecs = W[:, idx]
if hasattr(self, 'data_scaler'):
scale_factors = 2.0 / (self.data_scaler.Xmax - self.data_scaler.Xmin)
eigVecs = scale_factors[:, np.newaxis] * eigVecs
eigVecs = np.linalg.qr(eigVecs)[0]
self._subspace = eigVecs
self._eigenvalues = eigs
</DeepExtract>
elif self.method == 'variable-projection':
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
if dr_args is not None:
vp_args = {'gamma': 0.1, 'beta': 0.0001, 'tol': 1e-07, 'maxiter': 1000, 'U0': None, 'verbose': False}
vp_args.update(dr_args)
<DeepExtract>
(M, m) = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
(U, _) = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points, U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V, coeff)
for iteration in range(0, maxiter):
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
G = np.zeros((m, self.subspace_dimension))
for i in range(0, M):
G += res[i] * J[i, :, :]
vec_J = np.reshape(J, (M, m * self.subspace_dimension))
(Y, S, Z) = np.linalg.svd(vec_J, full_matrices=False)
delta = np.dot(Y[:, :-self.subspace_dimension ** 2].T, res)
delta = np.dot(np.diag(1 / S[:-self.subspace_dimension ** 2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension ** 2, :].T, delta).reshape(U.shape)
vec_delta = delta.flatten()
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
if alpha >= 0:
delta = -G
alpha = -norm_G
(Y, S, Z) = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U, Z.T)
t = 1
for iter2 in range(0, 20):
U_new = np.dot(UZ, np.diag(np.cos(S * t))) + np.dot(Y, np.diag(np.sin(S * t)))
U_new = orth(U_new)
y = np.dot(self.std_sample_points, U_new)
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V_new, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new, coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res) + alpha * beta * t or t < 1e-10:
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
V_plus = V_plus_new
res = res_new
if dist_change < tol:
if verbose:
print('VP finished with %d iterations' % iteration)
break
if iteration == maxiter - 1 and verbose:
print('VP finished with %d iterations' % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
</DeepExtract>
else:
<DeepExtract>
(M, m) = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
(U, _) = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points, U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V, coeff)
for iteration in range(0, maxiter):
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
G = np.zeros((m, self.subspace_dimension))
for i in range(0, M):
G += res[i] * J[i, :, :]
vec_J = np.reshape(J, (M, m * self.subspace_dimension))
(Y, S, Z) = np.linalg.svd(vec_J, full_matrices=False)
delta = np.dot(Y[:, :-self.subspace_dimension ** 2].T, res)
delta = np.dot(np.diag(1 / S[:-self.subspace_dimension ** 2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension ** 2, :].T, delta).reshape(U.shape)
vec_delta = delta.flatten()
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
if alpha >= 0:
delta = -G
alpha = -norm_G
(Y, S, Z) = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U, Z.T)
t = 1
for iter2 in range(0, 20):
U_new = np.dot(UZ, np.diag(np.cos(S * t))) + np.dot(Y, np.diag(np.sin(S * t)))
U_new = orth(U_new)
y = np.dot(self.std_sample_points, U_new)
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V_new, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new, coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res) + alpha * beta * t or t < 1e-10:
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
V_plus = V_plus_new
res = res_new
if dist_change < tol:
if verbose:
print('VP finished with %d iterations' % iteration)
break
if iteration == maxiter - 1 and verbose:
print('VP finished with %d iterations' % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
</DeepExtract>
|
def __init__(self, method, full_space_poly=None, sample_points=None, sample_outputs=None, subspace_dimension=2, polynomial_degree=2, param_args=None, poly_args=None, dr_args=None):
self.full_space_poly = full_space_poly
self.sample_points = sample_points
self.Y = None
self.sample_outputs = sample_outputs
self.method = method
self.subspace_dimension = subspace_dimension
self.polynomial_degree = polynomial_degree
my_poly_args = {'method': 'least-squares', 'solver_args': {}}
if poly_args is not None:
my_poly_args.update(poly_args)
self.poly_args = my_poly_args
my_param_args = {'distribution': 'uniform', 'order': self.polynomial_degree, 'lower': -1, 'upper': 1}
if param_args is not None:
my_param_args.update(param_args)
bounded_distrs = ['analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian', 'uniform']
unbounded_distrs = ['gaussian', 'normal', 'gumbel', 'logistic', 'students-t', 'studentst']
semi_bounded_distrs = ['chi', 'chi-squared', 'exponential', 'gamma', 'lognormal', 'log-normal', 'pareto', 'rayleigh', 'weibull']
if dr_args is not None:
if 'standardize' in dr_args:
dr_args['standardise'] = dr_args['standardize']
if self.method.lower() == 'active-subspace' or self.method.lower() == 'active-subspaces':
self.method = 'active-subspace'
if dr_args is not None:
self.standardise = getattr(dr_args, 'standardise', True)
else:
self.standardise = True
if self.full_space_poly is None:
(N, d) = self.sample_points.shape
if self.standardise:
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
param = Parameter(**my_param_args)
if param_args is not None:
if (hasattr(dr_args, 'lower') or hasattr(dr_args, 'upper')) and self.standardise:
warnings.warn('Points standardised but parameter range provided. Overriding default ([-1,1])...', UserWarning)
myparameters = [param for _ in range(d)]
mybasis = Basis('total-order')
mypoly = Poly(myparameters, mybasis, sampling_args={'sample-points': self.std_sample_points, 'sample-outputs': self.sample_outputs}, **my_poly_args)
mypoly.set_model()
self.full_space_poly = mypoly
else:
user_params = self.full_space_poly.parameters
d = len(user_params)
self.sample_points = self.full_space_poly.get_points()
if self.standardise:
scale_factors = np.zeros(d)
centers = np.zeros(d)
for (dd, p) in enumerate(user_params):
if p.name.lower() in bounded_distrs:
scale_factors[dd] = (p.upper - p.lower) / 2.0
centers[dd] = (p.upper + p.lower) / 2.0
elif p.name.lower() in unbounded_distrs:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = p.mean
else:
scale_factors[dd] = np.sqrt(p.variance)
centers[dd] = 0.0
self.param_scaler = scaler_custom(centers, scale_factors)
self.std_sample_points = self.param_scaler.transform(self.sample_points)
else:
self.std_sample_points = self.sample_points.copy()
if not hasattr(self.full_space_poly, 'coefficients'):
raise ValueError('Please call set_model() first on poly.')
self.sample_outputs = self.full_space_poly.get_model_evaluations()
as_args = {'grad_points': None}
if dr_args is not None:
as_args.update(dr_args)
if grad_points is None:
X = self.full_space_poly.get_points()
elif hasattr(self, 'data_scaler'):
X = self.data_scaler.transform(grad_points)
else:
X = grad_points.copy()
(M, d) = X.shape
if d != self.sample_points.shape[1]:
raise ValueError('In _get_active_subspace: dimensions of gradient evaluation points mismatched with input dimension!')
alpha = 2.0
num_grad_lb = alpha * self.subspace_dimension * np.log(d)
if M < num_grad_lb:
warnings.warn('Number of gradient evaluation points is likely to be insufficient. Consider resampling!', UserWarning)
polygrad = self.full_space_poly.get_polyfit_grad(X)
if hasattr(self, 'param_scaler'):
polygrad = self.param_scaler.div[:, np.newaxis] * polygrad
weights = np.ones((M, 1)) / M
R = polygrad.transpose() * weights
C = np.dot(polygrad, R)
(e, W) = np.linalg.eigh(C)
idx = e.argsort()[::-1]
eigs = e[idx]
eigVecs = W[:, idx]
if hasattr(self, 'data_scaler'):
scale_factors = 2.0 / (self.data_scaler.Xmax - self.data_scaler.Xmin)
eigVecs = scale_factors[:, np.newaxis] * eigVecs
eigVecs = np.linalg.qr(eigVecs)[0]
self._subspace = eigVecs
self._eigenvalues = eigs
elif self.method == 'variable-projection':
self.data_scaler = scaler_minmax()
self.data_scaler.fit(self.sample_points)
self.std_sample_points = self.data_scaler.transform(self.sample_points)
if dr_args is not None:
vp_args = {'gamma': 0.1, 'beta': 0.0001, 'tol': 1e-07, 'maxiter': 1000, 'U0': None, 'verbose': False}
vp_args.update(dr_args)
(M, m) = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
(U, _) = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points, U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V, coeff)
for iteration in range(0, maxiter):
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
G = np.zeros((m, self.subspace_dimension))
for i in range(0, M):
G += res[i] * J[i, :, :]
vec_J = np.reshape(J, (M, m * self.subspace_dimension))
(Y, S, Z) = np.linalg.svd(vec_J, full_matrices=False)
delta = np.dot(Y[:, :-self.subspace_dimension ** 2].T, res)
delta = np.dot(np.diag(1 / S[:-self.subspace_dimension ** 2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension ** 2, :].T, delta).reshape(U.shape)
vec_delta = delta.flatten()
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
if alpha >= 0:
delta = -G
alpha = -norm_G
(Y, S, Z) = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U, Z.T)
t = 1
for iter2 in range(0, 20):
U_new = np.dot(UZ, np.diag(np.cos(S * t))) + np.dot(Y, np.diag(np.sin(S * t)))
U_new = orth(U_new)
y = np.dot(self.std_sample_points, U_new)
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V_new, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new, coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res) + alpha * beta * t or t < 1e-10:
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
V_plus = V_plus_new
res = res_new
if dist_change < tol:
if verbose:
print('VP finished with %d iterations' % iteration)
break
if iteration == maxiter - 1 and verbose:
print('VP finished with %d iterations' % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
else:
(M, m) = self.std_sample_points.shape
if U0 is None:
Z = np.random.randn(m, self.subspace_dimension)
(U, _) = np.linalg.qr(Z)
else:
U = orth(U0)
y = np.dot(self.std_sample_points, U)
minmax = np.zeros((2, self.subspace_dimension))
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus = np.linalg.pinv(V)
coeff = np.dot(V_plus, self.sample_outputs)
res = self.sample_outputs - np.dot(V, coeff)
for iteration in range(0, maxiter):
J = jacobian_vp(V, V_plus, U, self.sample_outputs, poly_obj, eta, minmax, self.std_sample_points)
G = np.zeros((m, self.subspace_dimension))
for i in range(0, M):
G += res[i] * J[i, :, :]
vec_J = np.reshape(J, (M, m * self.subspace_dimension))
(Y, S, Z) = np.linalg.svd(vec_J, full_matrices=False)
delta = np.dot(Y[:, :-self.subspace_dimension ** 2].T, res)
delta = np.dot(np.diag(1 / S[:-self.subspace_dimension ** 2]), delta)
delta = -np.dot(Z[:-self.subspace_dimension ** 2, :].T, delta).reshape(U.shape)
vec_delta = delta.flatten()
vec_G = G.flatten()
alpha = np.dot(vec_G.T, vec_delta)
norm_G = np.dot(vec_G.T, vec_G)
if alpha >= 0:
delta = -G
alpha = -norm_G
(Y, S, Z) = np.linalg.svd(delta, full_matrices=False)
UZ = np.dot(U, Z.T)
t = 1
for iter2 in range(0, 20):
U_new = np.dot(UZ, np.diag(np.cos(S * t))) + np.dot(Y, np.diag(np.sin(S * t)))
U_new = orth(U_new)
y = np.dot(self.std_sample_points, U_new)
minmax[0, :] = np.amin(y, axis=0)
minmax[1, :] = np.amax(y, axis=0)
eta = 2 * np.divide(y - minmax[0, :], minmax[1, :] - minmax[0, :]) - 1
(V_new, poly_obj) = vandermonde(eta, self.polynomial_degree)
V_plus_new = np.linalg.pinv(V_new)
coeff_new = np.dot(V_plus_new, self.sample_outputs)
res_new = self.sample_outputs - np.dot(V_new, coeff_new)
R_new = np.linalg.norm(res_new)
if np.linalg.norm(res_new) <= np.linalg.norm(res) + alpha * beta * t or t < 1e-10:
break
t = t * gamma
dist_change = subspace_dist(U, U_new)
U = U_new
V = V_new
V_plus = V_plus_new
res = res_new
if dist_change < tol:
if verbose:
print('VP finished with %d iterations' % iteration)
break
if iteration == maxiter - 1 and verbose:
print('VP finished with %d iterations' % iteration)
active_subspace = U
inactive_subspace = _null_space(active_subspace.T)
self._subspace = np.hstack([active_subspace, inactive_subspace])
</DeepExtract>
|
equadratures
|
positive
|
def rebuild(self):
self.thread = self.dbman.get_thread(self.tid)
self.widgets = []
self.structure = settings.get_threadline_theming(self.thread)
columns = []
def add_column(width, part):
width_tuple = self.structure[partname]['width']
if width_tuple[0] == 'weight':
columnentry = width_tuple + (part,)
else:
columnentry = ('fixed', width, part)
columns.append(columnentry)
for partname in self.structure['parts']:
if partname == 'tags':
<DeepExtract>
part_w = None
width = None
tag_widgets = []
cols = []
width = -1
tag_widgets = [TagWidget(t, self.structure['tags']['normal'], self.structure['tags']['focus']) for t in self.thread.get_tags()]
tag_widgets = sorted(tag_widgets)
for tag_widget in tag_widgets:
if not tag_widget.hidden:
wrapped_tagwidget = tag_widget
tag_width = tag_widget.width()
cols.append(('fixed', tag_width, wrapped_tagwidget))
width += tag_width + 1
if cols:
part_w = urwid.Columns(cols, dividechars=1)
(width, part) = (width, part_w)
</DeepExtract>
if part:
<DeepExtract>
width_tuple = self.structure[partname]['width']
if width_tuple[0] == 'weight':
columnentry = width_tuple + (part,)
else:
columnentry = ('fixed', width, part)
columns.append(columnentry)
</DeepExtract>
for w in part.widget_list:
self.widgets.append(w)
else:
<DeepExtract>
part_w = None
width = None
minw = 0
maxw = None
width_tuple = self.structure[partname]['width']
if width_tuple is not None:
if width_tuple[0] == 'fit':
(minw, maxw) = width_tuple[1:]
content = prepare_string(partname, self.thread, maxw)
if minw:
alignment = self.structure[partname]['alignment']
if alignment == 'left':
content = content.ljust(minw)
elif alignment == 'center':
content = content.center(minw)
else:
content = content.rjust(minw)
text = urwid.Text(content, wrap='clip')
width = text.pack((maxw or minw,))[0]
part_w = AttrFlipWidget(text, self.structure[partname])
(width, part) = (width, part_w)
</DeepExtract>
<DeepExtract>
width_tuple = self.structure[partname]['width']
if width_tuple[0] == 'weight':
columnentry = width_tuple + (part,)
else:
columnentry = ('fixed', width, part)
columns.append(columnentry)
</DeepExtract>
self.widgets.append(part)
self.columns = urwid.Columns(columns, dividechars=1)
self.original_widget = self.columns
|
def rebuild(self):
self.thread = self.dbman.get_thread(self.tid)
self.widgets = []
self.structure = settings.get_threadline_theming(self.thread)
columns = []
def add_column(width, part):
width_tuple = self.structure[partname]['width']
if width_tuple[0] == 'weight':
columnentry = width_tuple + (part,)
else:
columnentry = ('fixed', width, part)
columns.append(columnentry)
for partname in self.structure['parts']:
if partname == 'tags':
part_w = None
width = None
tag_widgets = []
cols = []
width = -1
tag_widgets = [TagWidget(t, self.structure['tags']['normal'], self.structure['tags']['focus']) for t in self.thread.get_tags()]
tag_widgets = sorted(tag_widgets)
for tag_widget in tag_widgets:
if not tag_widget.hidden:
wrapped_tagwidget = tag_widget
tag_width = tag_widget.width()
cols.append(('fixed', tag_width, wrapped_tagwidget))
width += tag_width + 1
if cols:
part_w = urwid.Columns(cols, dividechars=1)
(width, part) = (width, part_w)
if part:
width_tuple = self.structure[partname]['width']
if width_tuple[0] == 'weight':
columnentry = width_tuple + (part,)
else:
columnentry = ('fixed', width, part)
columns.append(columnentry)
for w in part.widget_list:
self.widgets.append(w)
else:
part_w = None
width = None
minw = 0
maxw = None
width_tuple = self.structure[partname]['width']
if width_tuple is not None:
if width_tuple[0] == 'fit':
(minw, maxw) = width_tuple[1:]
content = prepare_string(partname, self.thread, maxw)
if minw:
alignment = self.structure[partname]['alignment']
if alignment == 'left':
content = content.ljust(minw)
elif alignment == 'center':
content = content.center(minw)
else:
content = content.rjust(minw)
text = urwid.Text(content, wrap='clip')
width = text.pack((maxw or minw,))[0]
part_w = AttrFlipWidget(text, self.structure[partname])
(width, part) = (width, part_w)
width_tuple = self.structure[partname]['width']
if width_tuple[0] == 'weight':
columnentry = width_tuple + (part,)
else:
columnentry = ('fixed', width, part)
columns.append(columnentry)
self.widgets.append(part)
self.columns = urwid.Columns(columns, dividechars=1)
self.original_widget = self.columns
|
alot
|
positive
|
def walk_stack2(offset=0):
sp = pwndbg.regs.sp
if not sp:
return AUXV()
<DeepExtract>
sp = pwndbg.memory.page_align(int(sp))
try:
while True:
if b'\x7fELF' == pwndbg.memory.read(sp, 4):
break
sp += pwndbg.memory.PAGE_SIZE
except gdb.MemoryError:
pass
end = sp
</DeepExtract>
p = gdb.Value(end).cast(pwndbg.typeinfo.ulong.pointer())
p -= offset
p -= 2
while p.dereference() != 0 or (p + 1).dereference() != 0:
p -= 2
for i in range(1024):
if p.dereference() == AT_BASE:
break
p -= 2
else:
return AUXV()
while (p - 2).dereference() < 37:
p -= 2
auxv = AUXV()
while True:
const = int((p + 0).dereference()) & pwndbg.arch.ptrmask
value = int((p + 1).dereference()) & pwndbg.arch.ptrmask
if const == AT_NULL:
break
auxv.set(const, value)
p += 2
return auxv
|
def walk_stack2(offset=0):
sp = pwndbg.regs.sp
if not sp:
return AUXV()
sp = pwndbg.memory.page_align(int(sp))
try:
while True:
if b'\x7fELF' == pwndbg.memory.read(sp, 4):
break
sp += pwndbg.memory.PAGE_SIZE
except gdb.MemoryError:
pass
end = sp
p = gdb.Value(end).cast(pwndbg.typeinfo.ulong.pointer())
p -= offset
p -= 2
while p.dereference() != 0 or (p + 1).dereference() != 0:
p -= 2
for i in range(1024):
if p.dereference() == AT_BASE:
break
p -= 2
else:
return AUXV()
while (p - 2).dereference() < 37:
p -= 2
auxv = AUXV()
while True:
const = int((p + 0).dereference()) & pwndbg.arch.ptrmask
value = int((p + 1).dereference()) & pwndbg.arch.ptrmask
if const == AT_NULL:
break
auxv.set(const, value)
p += 2
return auxv
|
217gdb
|
positive
|
def test_empty_dict__custom(self):
d = CustomDict()
<DeepExtract>
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
</DeepExtract>
<DeepExtract>
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
</DeepExtract>
<DeepExtract>
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
</DeepExtract>
<DeepExtract>
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
</DeepExtract>
|
def test_empty_dict__custom(self):
d = CustomDict()
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
return super(Iterable, self).assert_no_match(__unit__.Iterable(), d)
</DeepExtract>
|
callee
|
positive
|
def collapse(nodes):
collapsed = []
for node in nodes:
<DeepExtract>
collapsed_node = _collapse_node(node)
if not _try_collapse(collapsed, collapsed_node):
collapsed.append(collapsed_node)
</DeepExtract>
return collapsed
|
def collapse(nodes):
collapsed = []
for node in nodes:
collapsed_node = _collapse_node(node)
if not _try_collapse(collapsed, collapsed_node):
collapsed.append(collapsed_node)
return collapsed
|
docximport-sigil-plugin
|
positive
|
def prepare_targets(self, proposals, targets):
labels = []
masks = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
<DeepExtract>
match_quality_matrix = boxlist_iou(targets_per_image, proposals_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
targets_per_image = targets_per_image.copy_with_fields(['labels', 'masks'])
matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
matched_targets = matched_targets
</DeepExtract>
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)
segmentation_masks = matched_targets.get_field('masks')
segmentation_masks = segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
<DeepExtract>
masks = []
M = self.discretization_size
device = positive_proposals.bbox.device
positive_proposals = positive_proposals.convert('xyxy')
assert segmentation_masks.size == positive_proposals.size, '{}, {}'.format(segmentation_masks, positive_proposals)
positive_proposals = positive_proposals.bbox.to(torch.device('cpu'))
for (segmentation_mask, proposal) in zip(segmentation_masks, positive_proposals):
cropped_mask = segmentation_mask.crop(proposal)
scaled_mask = cropped_mask.resize((M, M))
mask = scaled_mask.get_mask_tensor()
masks.append(mask)
if len(masks) == 0:
masks_per_image = torch.empty(0, dtype=torch.float32, device=device)
masks_per_image = torch.stack(masks, dim=0).to(device, dtype=torch.float32)
</DeepExtract>
labels.append(labels_per_image)
masks.append(masks_per_image)
return (labels, masks)
|
def prepare_targets(self, proposals, targets):
labels = []
masks = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
match_quality_matrix = boxlist_iou(targets_per_image, proposals_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
targets_per_image = targets_per_image.copy_with_fields(['labels', 'masks'])
matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
matched_targets = matched_targets
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)
segmentation_masks = matched_targets.get_field('masks')
segmentation_masks = segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks = []
M = self.discretization_size
device = positive_proposals.bbox.device
positive_proposals = positive_proposals.convert('xyxy')
assert segmentation_masks.size == positive_proposals.size, '{}, {}'.format(segmentation_masks, positive_proposals)
positive_proposals = positive_proposals.bbox.to(torch.device('cpu'))
for (segmentation_mask, proposal) in zip(segmentation_masks, positive_proposals):
cropped_mask = segmentation_mask.crop(proposal)
scaled_mask = cropped_mask.resize((M, M))
mask = scaled_mask.get_mask_tensor()
masks.append(mask)
if len(masks) == 0:
masks_per_image = torch.empty(0, dtype=torch.float32, device=device)
masks_per_image = torch.stack(masks, dim=0).to(device, dtype=torch.float32)
labels.append(labels_per_image)
masks.append(masks_per_image)
return (labels, masks)
|
EveryPixelMatters
|
positive
|
@BACKBONE_REGISTRY.register()
def resnet50_efdmix_l1(pretrained=True, **kwargs):
from dassl.modeling.ops import EFDMix
model = ResNet(block=Bottleneck, layers=[3, 4, 6, 3], ms_class=EFDMix, ms_layers=['layer1'])
if pretrained:
<DeepExtract>
pretrain_dict = model_zoo.load_url(model_urls['resnet50'])
model.load_state_dict(pretrain_dict, strict=False)
</DeepExtract>
return model
|
@BACKBONE_REGISTRY.register()
def resnet50_efdmix_l1(pretrained=True, **kwargs):
from dassl.modeling.ops import EFDMix
model = ResNet(block=Bottleneck, layers=[3, 4, 6, 3], ms_class=EFDMix, ms_layers=['layer1'])
if pretrained:
pretrain_dict = model_zoo.load_url(model_urls['resnet50'])
model.load_state_dict(pretrain_dict, strict=False)
return model
|
Dassl.pytorch
|
positive
|
def test_reset_all_metadata(self):
brokers = [BrokerMetadata(node_id=1, host='kafka01', port=9092), BrokerMetadata(node_id=2, host='kafka02', port=9092)]
<DeepExtract>
broker_id_seq = cycle((bm.node_id for bm in brokers))
if topic_metadata is not None:
assert {'Topic1': 1, 'Topic2': 1, 'Topic3': 4} is None
topic_map = topic_metadata
elif {'Topic1': 1, 'Topic2': 1, 'Topic3': 4} is None:
topic_map = {}
else:
topic_map = {}
for (topic, partition_count) in {'Topic1': 1, 'Topic2': 1, 'Topic3': 4}.items():
node_id = next(broker_id_seq)
topic_map[topic] = TopicMetadata(topic, 0, {p: PartitionMetadata(topic=topic, partition=p, partition_error_code=0, leader=node_id, replicas=(node_id,), isr=(node_id,)) for p in range(partition_count)})
reactor = Clock()
connections = Connections()
client = KafkaClient(hosts='bootstrap:9092', reactor=reactor, endpoint_factory=connections, disconnect_on_timeout=disconnect_on_timeout)
d = client.load_metadata_for_topics()
conn = connections.accept('bootstrap')
connections.flush()
request = self.successResultOf(conn.server.expectRequest(KafkaCodec.METADATA_KEY, 0, ANY))
request.respond(encode_metadata_response(brokers, topic_map))
connections.flush()
self.assertTrue(self.successResultOf(d))
(reactor, connections, client) = (reactor, connections, client)
</DeepExtract>
client._group_to_coordinator = {u'ConsumerGroup1': brokers[1]}
self.assertTrue(client.has_metadata_for_topic('Topic1'))
self.assertTrue(client.has_metadata_for_topic('Topic2'))
self.assertTrue(client.has_metadata_for_topic('Topic3'))
self.assertFalse(client.has_metadata_for_topic('Unknown'))
client.reset_all_metadata()
self.assertEqual(client.topics_to_brokers, {})
self.assertEqual(client.topic_partitions, {})
self.assertEqual(client.consumer_group_to_brokers, {})
|
def test_reset_all_metadata(self):
brokers = [BrokerMetadata(node_id=1, host='kafka01', port=9092), BrokerMetadata(node_id=2, host='kafka02', port=9092)]
broker_id_seq = cycle((bm.node_id for bm in brokers))
if topic_metadata is not None:
assert {'Topic1': 1, 'Topic2': 1, 'Topic3': 4} is None
topic_map = topic_metadata
elif {'Topic1': 1, 'Topic2': 1, 'Topic3': 4} is None:
topic_map = {}
else:
topic_map = {}
for (topic, partition_count) in {'Topic1': 1, 'Topic2': 1, 'Topic3': 4}.items():
node_id = next(broker_id_seq)
topic_map[topic] = TopicMetadata(topic, 0, {p: PartitionMetadata(topic=topic, partition=p, partition_error_code=0, leader=node_id, replicas=(node_id,), isr=(node_id,)) for p in range(partition_count)})
reactor = Clock()
connections = Connections()
client = KafkaClient(hosts='bootstrap:9092', reactor=reactor, endpoint_factory=connections, disconnect_on_timeout=disconnect_on_timeout)
d = client.load_metadata_for_topics()
conn = connections.accept('bootstrap')
connections.flush()
request = self.successResultOf(conn.server.expectRequest(KafkaCodec.METADATA_KEY, 0, ANY))
request.respond(encode_metadata_response(brokers, topic_map))
connections.flush()
self.assertTrue(self.successResultOf(d))
(reactor, connections, client) = (reactor, connections, client)
client._group_to_coordinator = {u'ConsumerGroup1': brokers[1]}
self.assertTrue(client.has_metadata_for_topic('Topic1'))
self.assertTrue(client.has_metadata_for_topic('Topic2'))
self.assertTrue(client.has_metadata_for_topic('Topic3'))
self.assertFalse(client.has_metadata_for_topic('Unknown'))
client.reset_all_metadata()
self.assertEqual(client.topics_to_brokers, {})
self.assertEqual(client.topic_partitions, {})
self.assertEqual(client.consumer_group_to_brokers, {})
|
afkak
|
positive
|
def getLevel(root, data, d, level):
if not root:
return
if root.val == data:
d.append(level)
return
<DeepExtract>
if not root.left:
return
if root.left.val == data:
d.append(level + 1)
return
self.getLevel(root.left.left, data, d, level + 1 + 1)
self.getLevel(root.left.right, data, d, level + 1 + 1)
</DeepExtract>
<DeepExtract>
if not root.right:
return
if root.right.val == data:
d.append(level + 1)
return
self.getLevel(root.right.left, data, d, level + 1 + 1)
self.getLevel(root.right.right, data, d, level + 1 + 1)
</DeepExtract>
|
def getLevel(root, data, d, level):
if not root:
return
if root.val == data:
d.append(level)
return
if not root.left:
return
if root.left.val == data:
d.append(level + 1)
return
self.getLevel(root.left.left, data, d, level + 1 + 1)
self.getLevel(root.left.right, data, d, level + 1 + 1)
if not root.right:
return
if root.right.val == data:
d.append(level + 1)
return
self.getLevel(root.right.left, data, d, level + 1 + 1)
self.getLevel(root.right.right, data, d, level + 1 + 1)
</DeepExtract>
|
Competitive_Programming
|
positive
|
def post_parse(self, ba):
super(ParameterWithValue, self).post_parse(ba)
if self in ba.not_provided:
<DeepExtract>
if self.cli_default is not util.UNSET:
default_value = self.cli_default.value_after_conversion(partial(self.coerce_value, ba=ba))
if self.default is not util.UNSET:
try:
info = self.conv._clize__value_converter
except AttributeError:
pass
else:
if info['convert_default'] and info['convert_default_filter'](self.default):
default_value = self.conv(self.default)
default_value = util.UNSET
</DeepExtract>
if default_value is not util.UNSET:
<DeepExtract>
raise NotImplementedError
</DeepExtract>
|
def post_parse(self, ba):
super(ParameterWithValue, self).post_parse(ba)
if self in ba.not_provided:
if self.cli_default is not util.UNSET:
default_value = self.cli_default.value_after_conversion(partial(self.coerce_value, ba=ba))
if self.default is not util.UNSET:
try:
info = self.conv._clize__value_converter
except AttributeError:
pass
else:
if info['convert_default'] and info['convert_default_filter'](self.default):
default_value = self.conv(self.default)
default_value = util.UNSET
if default_value is not util.UNSET:
raise NotImplementedError
</DeepExtract>
|
clize
|
positive
|
def finalize(self):
"""
Redundant finalization checks, verify tasks completed,
and adjust backgrounds of supervised devices
"""
self.log.debug('finalizing devices')
if self.stopped:
raise Stopped('finalization')
<DeepExtract>
with self.lock.acquire(timeout=-1):
if self.stopped:
self.log.info('verification stopped')
return
last_run = self.config.get('finished')
if last_run:
_seconds = (dt.datetime.now() - last_run).seconds
if _seconds < 60:
self.log.debug('ran less than 1 minute ago...')
return
self.log.info('verifying automation')
if self.task.queries():
self.log.debug('found pending queries')
self.verified = False
else:
self.log.debug('all queries completed!')
if not self.task.alldone():
self.log.debug('found pending tasks')
self.verified = False
else:
self.log.debug('all tasks completed!')
if not self.verified:
self.verified = self._verify()
if not self.verified:
self.log.info('verification failed...')
if run:
self.log.debug('running automation')
self.run()
else:
self.log.info('all devices and tasks were verified!')
now = dt.datetime.now()
timestamp = self.config.get('verification', now)
vtimedelta = now - timestamp
self.log.debug('verified for: %s', vtimedelta)
if vtimedelta > dt.timedelta(minutes=5):
self.load_balance()
else:
self.log.debug('load balancing skipped')
</DeepExtract>
_retasked = set()
if not self.verified:
for (k, v) in self.task.record.items():
if v:
_retasked.update(v)
self.log.debug('retasked: %s: %s', k, v)
_wallpapers = {'alert': DeviceList(), 'background': DeviceList()}
for d in self.available():
if d.ecid not in _retasked and d.background != 'background':
_wallpapers['background'].append(d)
for (image, _devices) in _wallpapers.items():
if _devices:
try:
<DeepExtract>
if self.stopped:
raise Stopped('set_background')
if not _devices:
self.log.error('no devices specified')
return
if not self.images:
self.log.error('no images available')
return
if _devices.unsupervised:
err = 'cannot modify wallpaper for unsupervised devices'
self.log.error(err + ': %s', _devices.unsupervised)
tasked = _devices.supervised
if not tasked:
self.log.error('no wallpapers modified')
return
images = {}
for image in os.listdir(self.images):
(name, ext) = os.path.splitext(image)
if ext in ['.png', '.jpeg', '.jpg']:
path = os.path.join(self.images, image)
images[name] = path
try:
image = images[image]
result = cfgutil.wallpaper(tasked.ecids, image, self.authorization())
for device in self.devices(result.ecids):
device.background = image
except cfgutil.CfgutilError as e:
self.log.exception('failed to set background: %s', tasked)
self.log.debug('unaffected: %s', e.unaffected)
self.log.debug('affected: %s', e.affected)
raise
except KeyError as e:
self.log.error('no image for: {0}'.format(e))
return
</DeepExtract>
except cfgutil.Error as e:
self.log.error('wallpaper failed: %r: %s', image, e)
self.config.update({'finished': dt.datetime.now()})
self.log.debug('finalization complete')
|
def finalize(self):
"""
Redundant finalization checks, verify tasks completed,
and adjust backgrounds of supervised devices
"""
self.log.debug('finalizing devices')
if self.stopped:
raise Stopped('finalization')
with self.lock.acquire(timeout=-1):
if self.stopped:
self.log.info('verification stopped')
return
last_run = self.config.get('finished')
if last_run:
_seconds = (dt.datetime.now() - last_run).seconds
if _seconds < 60:
self.log.debug('ran less than 1 minute ago...')
return
self.log.info('verifying automation')
if self.task.queries():
self.log.debug('found pending queries')
self.verified = False
else:
self.log.debug('all queries completed!')
if not self.task.alldone():
self.log.debug('found pending tasks')
self.verified = False
else:
self.log.debug('all tasks completed!')
if not self.verified:
self.verified = self._verify()
if not self.verified:
self.log.info('verification failed...')
if run:
self.log.debug('running automation')
self.run()
else:
self.log.info('all devices and tasks were verified!')
now = dt.datetime.now()
timestamp = self.config.get('verification', now)
vtimedelta = now - timestamp
self.log.debug('verified for: %s', vtimedelta)
if vtimedelta > dt.timedelta(minutes=5):
self.load_balance()
else:
self.log.debug('load balancing skipped')
_retasked = set()
if not self.verified:
for (k, v) in self.task.record.items():
if v:
_retasked.update(v)
self.log.debug('retasked: %s: %s', k, v)
_wallpapers = {'alert': DeviceList(), 'background': DeviceList()}
for d in self.available():
if d.ecid not in _retasked and d.background != 'background':
_wallpapers['background'].append(d)
for (image, _devices) in _wallpapers.items():
if _devices:
try:
if self.stopped:
raise Stopped('set_background')
if not _devices:
self.log.error('no devices specified')
return
if not self.images:
self.log.error('no images available')
return
if _devices.unsupervised:
err = 'cannot modify wallpaper for unsupervised devices'
self.log.error(err + ': %s', _devices.unsupervised)
tasked = _devices.supervised
if not tasked:
self.log.error('no wallpapers modified')
return
images = {}
for image in os.listdir(self.images):
(name, ext) = os.path.splitext(image)
if ext in ['.png', '.jpeg', '.jpg']:
path = os.path.join(self.images, image)
images[name] = path
try:
image = images[image]
result = cfgutil.wallpaper(tasked.ecids, image, self.authorization())
for device in self.devices(result.ecids):
device.background = image
except cfgutil.CfgutilError as e:
self.log.exception('failed to set background: %s', tasked)
self.log.debug('unaffected: %s', e.unaffected)
self.log.debug('affected: %s', e.affected)
raise
except KeyError as e:
self.log.error('no image for: {0}'.format(e))
return
except cfgutil.Error as e:
self.log.error('wallpaper failed: %r: %s', image, e)
self.config.update({'finished': dt.datetime.now()})
self.log.debug('finalization complete')
|
aeios
|
positive
|
def _create_url_observable(value: str, description: str) -> Observation:
"""Create an observation based on a URL
:param value: URL value
:param description: Description
:return: An observation
"""
sco = stix2.URL(value=value, object_marking_refs=[self._default_tlp], custom_properties=dict(x_opencti_created_by_ref=self._identity['standard_id'], x_opencti_description=description, x_opencti_labels=self._default_labels, x_opencti_score=self._helper.connect_confidence_level))
sdo = None
sro = None
if self._create_indicators:
pattern = create_indicator_pattern_url(value)
<DeepExtract>
sdo = stix2.Indicator(pattern_type='stix', pattern=pattern.pattern, name=value, description=description, labels=self._default_labels, confidence=self._helper.connect_confidence_level, object_marking_refs=[self._default_tlp], custom_properties=dict(x_opencti_score=self._helper.connect_confidence_level, x_opencti_main_observable_type=pattern.main_observable_type))
</DeepExtract>
<DeepExtract>
confidence = self._helper.connect_confidence_level
created_by_ref = self._identity['standard_id']
sro = stix2.Relationship(id=pycti.StixCoreRelationship.generate_id('based-on', sdo.id, sco.id), source_ref=sdo.id, relationship_type='based-on', target_ref=sco.id, created_by_ref=created_by_ref, confidence=confidence, description=description, labels=self._default_labels, object_marking_refs=[self._default_tlp])
</DeepExtract>
return Observation(sco, sdo, sro)
|
def _create_url_observable(value: str, description: str) -> Observation:
"""Create an observation based on a URL
:param value: URL value
:param description: Description
:return: An observation
"""
sco = stix2.URL(value=value, object_marking_refs=[self._default_tlp], custom_properties=dict(x_opencti_created_by_ref=self._identity['standard_id'], x_opencti_description=description, x_opencti_labels=self._default_labels, x_opencti_score=self._helper.connect_confidence_level))
sdo = None
sro = None
if self._create_indicators:
pattern = create_indicator_pattern_url(value)
sdo = stix2.Indicator(pattern_type='stix', pattern=pattern.pattern, name=value, description=description, labels=self._default_labels, confidence=self._helper.connect_confidence_level, object_marking_refs=[self._default_tlp], custom_properties=dict(x_opencti_score=self._helper.connect_confidence_level, x_opencti_main_observable_type=pattern.main_observable_type))
confidence = self._helper.connect_confidence_level
created_by_ref = self._identity['standard_id']
sro = stix2.Relationship(id=pycti.StixCoreRelationship.generate_id('based-on', sdo.id, sco.id), source_ref=sdo.id, relationship_type='based-on', target_ref=sco.id, created_by_ref=created_by_ref, confidence=confidence, description=description, labels=self._default_labels, object_marking_refs=[self._default_tlp])
return Observation(sco, sdo, sro)
|
connectors
|
positive
|
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(Qt, '_uic'):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, '_QtUiTools'):
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
self.custom_widgets = {}
def _loadCustomWidgets(self, etree):
"""
Workaround to pyside-77 bug.
From QUiLoader doc we should use registerCustomWidget method.
But this causes a segfault on some platforms.
Instead we fetch from customwidgets DOM node the python class
objects. Then we can directly use them in createWidget method.
"""
def headerToModule(header):
"""
Translate a header file to python module path
foo/bar.h => foo.bar
"""
module = os.path.splitext(header)[0]
return module.replace('/', '.').replace('\\', '.')
custom_widgets = etree.find('customwidgets')
if custom_widgets is None:
return
for custom_widget in custom_widgets:
class_name = custom_widget.find('class').text
header = custom_widget.find('header').text
module = importlib.import_module(headerToModule(header))
self.custom_widgets[class_name] = getattr(module, class_name)
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
etree = ElementTree()
etree.parse(uifile)
<DeepExtract>
def headerToModule(header):
"""
Translate a header file to python module path
foo/bar.h => foo.bar
"""
module = os.path.splitext(header)[0]
return module.replace('/', '.').replace('\\', '.')
custom_widgets = etree.find('customwidgets')
if custom_widgets is None:
return
for custom_widget in custom_widgets:
class_name = custom_widget.find('class').text
header = custom_widget.find('header').text
module = importlib.import_module(headerToModule(header))
self.custom_widgets[class_name] = getattr(module, class_name)
</DeepExtract>
widget = Qt._QtUiTools.QUiLoader.load(self, uifile, *args, **kwargs)
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=''):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
return self.baseinstance
if class_name in self.availableWidgets() + ['Line']:
widget = Qt._QtUiTools.QUiLoader.createWidget(self, class_name, parent, name)
elif class_name in self.custom_widgets:
widget = self.custom_widgets[class_name](parent)
else:
raise Exception("Custom widget '%s' not supported" % class_name)
if self.baseinstance:
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError('No implementation available for loadUi')
|
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(Qt, '_uic'):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, '_QtUiTools'):
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
self.custom_widgets = {}
def _loadCustomWidgets(self, etree):
"""
Workaround to pyside-77 bug.
From QUiLoader doc we should use registerCustomWidget method.
But this causes a segfault on some platforms.
Instead we fetch from customwidgets DOM node the python class
objects. Then we can directly use them in createWidget method.
"""
def headerToModule(header):
"""
Translate a header file to python module path
foo/bar.h => foo.bar
"""
module = os.path.splitext(header)[0]
return module.replace('/', '.').replace('\\', '.')
custom_widgets = etree.find('customwidgets')
if custom_widgets is None:
return
for custom_widget in custom_widgets:
class_name = custom_widget.find('class').text
header = custom_widget.find('header').text
module = importlib.import_module(headerToModule(header))
self.custom_widgets[class_name] = getattr(module, class_name)
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
etree = ElementTree()
etree.parse(uifile)
def headerToModule(header):
"""
Translate a header file to python module path
foo/bar.h => foo.bar
"""
module = os.path.splitext(header)[0]
return module.replace('/', '.').replace('\\', '.')
custom_widgets = etree.find('customwidgets')
if custom_widgets is None:
return
for custom_widget in custom_widgets:
class_name = custom_widget.find('class').text
header = custom_widget.find('header').text
module = importlib.import_module(headerToModule(header))
self.custom_widgets[class_name] = getattr(module, class_name)
widget = Qt._QtUiTools.QUiLoader.load(self, uifile, *args, **kwargs)
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=''):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
return self.baseinstance
if class_name in self.availableWidgets() + ['Line']:
widget = Qt._QtUiTools.QUiLoader.createWidget(self, class_name, parent, name)
elif class_name in self.custom_widgets:
widget = self.custom_widgets[class_name](parent)
else:
raise Exception("Custom widget '%s' not supported" % class_name)
if self.baseinstance:
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError('No implementation available for loadUi')
|
allzpark
|
positive
|
def onedge(self, edge_map: torch.Tensor, image_shape, radius=1 / 20, threshold=0.1, compute_method='masking') -> torch.Tensor:
"""
This forces every instance to "think" semantically and globally about its existence.
:param edge_map: (torch.Tensor) HxW (image_shape), be aware of the scaling
:param image_shape: (tuple of 2) desired output shape
:param radius: (float) controls how big the circle is spanned
:param threshold: (float) to suppress instances
:param compute_method: (str) specifies computing method (speed testing)
:return: (torch.Tensor) a binary vector which represents whether each instance is on edge
(False) or not (True).
"""
ext_pts = self.tensor
N = len(ext_pts)
if N == 0:
return torch.tensor(0)
if compute_method == 'masking':
<DeepExtract>
self.spread(N, radius, 'linear', image_shape)
m = torch.zeros((N,) + image_shape, device=self.device)
num_nodes = []
for (i, node) in enumerate(self.spanned_nodes):
node = node.long()
m[i, node[:, 1], node[:, 0]] = 1
num_nodes.append(node.size(0))
edge_map_m = edge_map.unsqueeze(0) * m
instance_score = edge_map_m.sum(dim=1).sum(dim=1)
num_nodes = torch.tensor(num_nodes, device=self.device)
instance_score = instance_score / num_nodes
</DeepExtract>
keep = instance_score > threshold
return keep
elif compute_method == 'sampling':
<DeepExtract>
self.spread(N, radius, 'linear', image_shape)
mean_scores = []
for (i, node) in enumerate(self.spanned_nodes):
sampled_nodes = torch.nn.functional.grid_sample(edge_map.unsqueeze(0).unsqueeze(0), node.unsqueeze(0).unsqueeze(0))
mean_scores.append(sampled_nodes.mean())
instance_score = torch.stack(mean_scores)
</DeepExtract>
keep = instance_score > threshold
return keep
else:
raise ValueError('Unsupported compute method:', compute_method)
|
def onedge(self, edge_map: torch.Tensor, image_shape, radius=1 / 20, threshold=0.1, compute_method='masking') -> torch.Tensor:
"""
This forces every instance to "think" semantically and globally about its existence.
:param edge_map: (torch.Tensor) HxW (image_shape), be aware of the scaling
:param image_shape: (tuple of 2) desired output shape
:param radius: (float) controls how big the circle is spanned
:param threshold: (float) to suppress instances
:param compute_method: (str) specifies computing method (speed testing)
:return: (torch.Tensor) a binary vector which represents whether each instance is on edge
(False) or not (True).
"""
ext_pts = self.tensor
N = len(ext_pts)
if N == 0:
return torch.tensor(0)
if compute_method == 'masking':
self.spread(N, radius, 'linear', image_shape)
m = torch.zeros((N,) + image_shape, device=self.device)
num_nodes = []
for (i, node) in enumerate(self.spanned_nodes):
node = node.long()
m[i, node[:, 1], node[:, 0]] = 1
num_nodes.append(node.size(0))
edge_map_m = edge_map.unsqueeze(0) * m
instance_score = edge_map_m.sum(dim=1).sum(dim=1)
num_nodes = torch.tensor(num_nodes, device=self.device)
instance_score = instance_score / num_nodes
keep = instance_score > threshold
return keep
elif compute_method == 'sampling':
self.spread(N, radius, 'linear', image_shape)
mean_scores = []
for (i, node) in enumerate(self.spanned_nodes):
sampled_nodes = torch.nn.functional.grid_sample(edge_map.unsqueeze(0).unsqueeze(0), node.unsqueeze(0).unsqueeze(0))
mean_scores.append(sampled_nodes.mean())
instance_score = torch.stack(mean_scores)
keep = instance_score > threshold
return keep
else:
raise ValueError('Unsupported compute method:', compute_method)
|
dance
|
positive
|
def LastToFirst(BWT, i):
first = sorted(BWT)
ch = BWT[i]
<DeepExtract>
n = 0
i = 0
while i <= i:
if ch == BWT[i]:
n += 1
i += 1
n = n
</DeepExtract>
return get_char(ch, first, n)
|
def LastToFirst(BWT, i):
first = sorted(BWT)
ch = BWT[i]
n = 0
i = 0
while i <= i:
if ch == BWT[i]:
n += 1
i += 1
n = n
return get_char(ch, first, n)
|
bioinformatics
|
positive
|
@meta.command(name='iam_policy')
@click.option('--resource_name', required=True, type=str, help='IAM policy name')
@click.option('--policy_content', help='The path to JSON file with IAM policy content. If not specified, template value will be set', type=click.File(mode='r'))
@click.pass_context
@timeit()
def iam_policy(ctx, **kwargs):
"""Generates IAM policy deployment resources template"""
kwargs[PROJECT_PATH_PARAM] = ctx.obj[PROJECT_PATH_PARAM]
if kwargs['policy_content']:
try:
kwargs['policy_content'] = json.load(kwargs['policy_content'])
except json.decoder.JSONDecodeError as e:
raise click.BadParameter(str(e), param_hint='policy_content')
generator = IAMPolicyGenerator(**kwargs)
<DeepExtract>
try:
generator.write()
except ValueError as e:
raise click.BadParameter(e)
except RuntimeError as e:
raise click.Abort(e)
except Exception as e:
raise Exception(f'An unexpected error occurred: {e}')
</DeepExtract>
click.echo(f"Iam policy '{kwargs['resource_name']}' was added successfully")
|
@meta.command(name='iam_policy')
@click.option('--resource_name', required=True, type=str, help='IAM policy name')
@click.option('--policy_content', help='The path to JSON file with IAM policy content. If not specified, template value will be set', type=click.File(mode='r'))
@click.pass_context
@timeit()
def iam_policy(ctx, **kwargs):
"""Generates IAM policy deployment resources template"""
kwargs[PROJECT_PATH_PARAM] = ctx.obj[PROJECT_PATH_PARAM]
if kwargs['policy_content']:
try:
kwargs['policy_content'] = json.load(kwargs['policy_content'])
except json.decoder.JSONDecodeError as e:
raise click.BadParameter(str(e), param_hint='policy_content')
generator = IAMPolicyGenerator(**kwargs)
try:
generator.write()
except ValueError as e:
raise click.BadParameter(e)
except RuntimeError as e:
raise click.Abort(e)
except Exception as e:
raise Exception(f'An unexpected error occurred: {e}')
click.echo(f"Iam policy '{kwargs['resource_name']}' was added successfully")
|
aws-syndicate
|
positive
|
def parse(self):
if not self.initialized:
<DeepExtract>
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='resnet', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='PATN', help='selects model to use for netG')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='blocks used in D')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
self.parser.add_argument('--P_input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--BP_input_nc', type=int, default=1, help='# of input image channels')
self.parser.add_argument('--padding_type', type=str, default='reflect', help='# of input image channels')
self.parser.add_argument('--pairLst', type=str, default='./keypoint_data/market-pairs-train.csv', help='market pairs')
self.parser.add_argument('--with_D_PP', type=int, default=1, help='use D to judge P and P is pair or not')
self.parser.add_argument('--with_D_PB', type=int, default=1, help='use D to judge P and B is pair or not')
self.parser.add_argument('--use_flip', type=int, default=0, help='flip or not')
self.parser.add_argument('--G_n_downsampling', type=int, default=2, help='down-sampling blocks for generator')
self.parser.add_argument('--D_n_downsampling', type=int, default=2, help='down-sampling blocks for discriminator')
self.initialized = True
</DeepExtract>
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
def parse(self):
if not self.initialized:
self.parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=286, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str, default='resnet', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str, default='PATN', help='selects model to use for netG')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='blocks used in D')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. cycle_gan, pix2pix, test')
self.parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
self.parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
self.parser.add_argument('--P_input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--BP_input_nc', type=int, default=1, help='# of input image channels')
self.parser.add_argument('--padding_type', type=str, default='reflect', help='# of input image channels')
self.parser.add_argument('--pairLst', type=str, default='./keypoint_data/market-pairs-train.csv', help='market pairs')
self.parser.add_argument('--with_D_PP', type=int, default=1, help='use D to judge P and P is pair or not')
self.parser.add_argument('--with_D_PB', type=int, default=1, help='use D to judge P and B is pair or not')
self.parser.add_argument('--use_flip', type=int, default=0, help='flip or not')
self.parser.add_argument('--G_n_downsampling', type=int, default=2, help='down-sampling blocks for generator')
self.parser.add_argument('--D_n_downsampling', type=int, default=2, help='down-sampling blocks for discriminator')
self.initialized = True
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
|
BiGraphGAN
|
positive
|
def target_precheck(root_dir, configs_dir, target_name, info_defaults, required_scripts):
"""
Checks:
1. That the target (subsys or experiment) config includes an 'active' field indicating whether to run it
2. If the target is active, check that all required
scripts are present and executable
This function returns:
1. a dict containing a 'status' field
(boolean, true if all is preconfigured correctly) and a 'message' containing an
explanation as a string if one is necessary
2. A dict containing the target config's entries for
each of the fields in info_defaults (uses the default
if it's not specified)
"""
<DeepExtract>
conf_subdir = os.path.join(configs_dir, target_name)
if not check_file_exists(conf_subdir, 'config.json'):
target_conf = None
try:
target_conf = read_json(conf_subdir, 'config.json')
except Exception as e:
target_conf = None
</DeepExtract>
if target_conf is None:
return ({'success': False, 'message': 'config.json for {} is missing or fails to parse'.format(target_name)}, None)
update_fields = []
target_info = {}
for (field, default) in info_defaults.items():
update_fields.append(field)
target_info[field] = default
for field in update_fields:
if field in target_conf:
target_info[field] = target_conf[field]
if not target_conf['active']:
return ({'success': True, 'message': 'Inactive'}, target_info)
target_subdir = os.path.join(root_dir, target_name)
if not os.path.exists(target_subdir):
return ({'success': False, 'message': 'Script subdirectory for {} missing'.format(target_name)}, None)
<DeepExtract>
invalid = []
for filename in required_scripts:
path = os.path.join(target_subdir, filename)
if not os.path.isfile(path) or not os.access(path, os.X_OK):
invalid.append(filename)
invalid_scripts = invalid
</DeepExtract>
if invalid_scripts:
return ({'success': False, 'message': 'Necessary files are missing from {} or not executable: {}'.format(target_subdir, ', '.join(invalid_scripts))}, None)
return ({'success': True, 'message': ''}, target_info)
|
def target_precheck(root_dir, configs_dir, target_name, info_defaults, required_scripts):
"""
Checks:
1. That the target (subsys or experiment) config includes an 'active' field indicating whether to run it
2. If the target is active, check that all required
scripts are present and executable
This function returns:
1. a dict containing a 'status' field
(boolean, true if all is preconfigured correctly) and a 'message' containing an
explanation as a string if one is necessary
2. A dict containing the target config's entries for
each of the fields in info_defaults (uses the default
if it's not specified)
"""
conf_subdir = os.path.join(configs_dir, target_name)
if not check_file_exists(conf_subdir, 'config.json'):
target_conf = None
try:
target_conf = read_json(conf_subdir, 'config.json')
except Exception as e:
target_conf = None
if target_conf is None:
return ({'success': False, 'message': 'config.json for {} is missing or fails to parse'.format(target_name)}, None)
update_fields = []
target_info = {}
for (field, default) in info_defaults.items():
update_fields.append(field)
target_info[field] = default
for field in update_fields:
if field in target_conf:
target_info[field] = target_conf[field]
if not target_conf['active']:
return ({'success': True, 'message': 'Inactive'}, target_info)
target_subdir = os.path.join(root_dir, target_name)
if not os.path.exists(target_subdir):
return ({'success': False, 'message': 'Script subdirectory for {} missing'.format(target_name)}, None)
invalid = []
for filename in required_scripts:
path = os.path.join(target_subdir, filename)
if not os.path.isfile(path) or not os.access(path, os.X_OK):
invalid.append(filename)
invalid_scripts = invalid
if invalid_scripts:
return ({'success': False, 'message': 'Necessary files are missing from {} or not executable: {}'.format(target_subdir, ', '.join(invalid_scripts))}, None)
return ({'success': True, 'message': ''}, target_info)
|
dtr-prototype
|
positive
|
@transaction.atomic()
def convert_chunks_to_reference_based(self, force=False, dirn='.'):
<DeepExtract>
genomes = [g for g in self.genome_set.all()]
if len(genomes) == 0:
return
genomes = sorted(genomes, key=lambda g: g.id)
genomes[0].lock()
return self
</DeepExtract>
q = self.fragment_chunk_location_set.select_related('chunk').order_by('base_first')
last_chunk_base_last = None
chunks_to_update = []
for fcl in q:
chunk = fcl.chunk
if not force and chunk.is_reference_based:
print(f'Fragment {self.id} is already reference-based')
return False
if chunk.initial_fragment.id == self.id:
chunk.sequence = None
chunk.ref_start_index = fcl.base_first
chunk.ref_end_index = fcl.base_last
chunks_to_update.append(chunk)
if last_chunk_base_last is not None and fcl.base_first != last_chunk_base_last + 1:
raise Exception('Fragment chunk location table missing chunks before %s' % (fcl.base_first,))
if last_chunk_base_last is None:
self.start_chunk = chunk
last_chunk_base_last = fcl.base_last
Chunk.CHUNK_REFERENCE_CLASS.generate_from_fragment(self, self.sequence, dirn=dirn)
Chunk.objects.bulk_update(objs=chunks_to_update, fields=['sequence', 'ref_start_index', 'ref_end_index'], batch_size=BULK_CREATE_BATCH_SIZE)
self.save()
return True
|
@transaction.atomic()
def convert_chunks_to_reference_based(self, force=False, dirn='.'):
genomes = [g for g in self.genome_set.all()]
if len(genomes) == 0:
return
genomes = sorted(genomes, key=lambda g: g.id)
genomes[0].lock()
return self
q = self.fragment_chunk_location_set.select_related('chunk').order_by('base_first')
last_chunk_base_last = None
chunks_to_update = []
for fcl in q:
chunk = fcl.chunk
if not force and chunk.is_reference_based:
print(f'Fragment {self.id} is already reference-based')
return False
if chunk.initial_fragment.id == self.id:
chunk.sequence = None
chunk.ref_start_index = fcl.base_first
chunk.ref_end_index = fcl.base_last
chunks_to_update.append(chunk)
if last_chunk_base_last is not None and fcl.base_first != last_chunk_base_last + 1:
raise Exception('Fragment chunk location table missing chunks before %s' % (fcl.base_first,))
if last_chunk_base_last is None:
self.start_chunk = chunk
last_chunk_base_last = fcl.base_last
Chunk.CHUNK_REFERENCE_CLASS.generate_from_fragment(self, self.sequence, dirn=dirn)
Chunk.objects.bulk_update(objs=chunks_to_update, fields=['sequence', 'ref_start_index', 'ref_end_index'], batch_size=BULK_CREATE_BATCH_SIZE)
self.save()
return True
|
edge
|
positive
|
def _create_resource(package, output_files):
"""Given a package, create an Atom resource entry to send to LOCKSS.
Parses metadata for the Atom entry from the METS file, uses
LOCKSS-o-matic-specific tags to describe size and checksums.
"""
relative_mets_path = os.path.join(os.path.splitext(os.path.basename(package.current_path))[0], 'data', 'METS.{}.xml'.format(package.uuid))
(mets_path, temp_dir) = package.extract_file(relative_mets_path)
mets = etree.parse(mets_path)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
slug = str(package.uuid)
title = os.path.basename(package.current_path)
summary = 'AIP generated by Archivematica with uuid {}'.format(package.uuid)
dublincore = mets.find('mets:dmdSec/mets:mdWrap[@MDTYPE="DC"]/mets:xmlData/dcterms:dublincore', namespaces=utils.NSMAP)
if dublincore is not None:
title = dublincore.findtext('dcterms:title', namespaces=utils.NSMAP, default=title)
slug = dublincore.findtext('dcterms:title', namespaces=utils.NSMAP, default=slug)
summary = dublincore.findtext('dcterms:description', namespaces=utils.NSMAP, default=summary)
authors = mets.xpath(".//mets:mdWrap[@MDTYPE='PREMIS:AGENT']//mets:agentType[text()='organization']/ancestor::mets:agent/*/mets:agentIdentifierValue", namespaces=utils.NSMAP)
author = authors[0].text if authors else None
entry = sword2.Entry(title=title, id='urn:uuid:' + package.uuid, author={'name': author}, summary=summary)
if not self.pointer_root:
self.pointer_root = etree.parse(package.full_pointer_file_path)
entry.register_namespace('lom', utils.NSMAP['lom'])
for (index, file_path) in enumerate(output_files):
if len(output_files) == 1:
<DeepExtract>
if index is not None:
download_url = reverse('download_lockss', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid, 'chunk_number': str(index)})
else:
download_url = reverse('download_request', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid})
download_url = self.external_domain + download_url
external_url = download_url
</DeepExtract>
else:
<DeepExtract>
if index + 1 is not None:
download_url = reverse('download_lockss', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid, 'chunk_number': str(index + 1)})
else:
download_url = reverse('download_request', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid})
download_url = self.external_domain + download_url
external_url = download_url
</DeepExtract>
file_e = self.pointer_root.find(".//mets:fileGrp[@USE='LOCKSS chunk']/mets:file[@ID='{}']".format(os.path.basename(file_path)), namespaces=utils.NSMAP)
if file_e is not None:
checksum_name = file_e.get('CHECKSUMTYPE')
checksum_value = file_e.get('CHECKSUM')
size = int(file_e.get('SIZE'))
else:
try:
checksum = utils.generate_checksum(file_path, self.checksum_type)
except ValueError:
checksum = utils.generate_checksum(file_path, 'md5')
checksum_name = checksum.name.upper().replace('SHA', 'SHA-')
checksum_value = checksum.hexdigest()
size = os.path.getsize(file_path)
size = str(math.ceil(size / 1000))
entry.add_field('lom_content', external_url)
content_entry = entry.entry[-1]
content_entry.set('size', size)
content_entry.set('checksumType', checksum_name)
content_entry.set('checksumValue', checksum_value)
LOGGER.debug('LOCKSS atom entry: %s', entry)
return (entry, slug)
|
def _create_resource(package, output_files):
"""Given a package, create an Atom resource entry to send to LOCKSS.
Parses metadata for the Atom entry from the METS file, uses
LOCKSS-o-matic-specific tags to describe size and checksums.
"""
relative_mets_path = os.path.join(os.path.splitext(os.path.basename(package.current_path))[0], 'data', 'METS.{}.xml'.format(package.uuid))
(mets_path, temp_dir) = package.extract_file(relative_mets_path)
mets = etree.parse(mets_path)
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
slug = str(package.uuid)
title = os.path.basename(package.current_path)
summary = 'AIP generated by Archivematica with uuid {}'.format(package.uuid)
dublincore = mets.find('mets:dmdSec/mets:mdWrap[@MDTYPE="DC"]/mets:xmlData/dcterms:dublincore', namespaces=utils.NSMAP)
if dublincore is not None:
title = dublincore.findtext('dcterms:title', namespaces=utils.NSMAP, default=title)
slug = dublincore.findtext('dcterms:title', namespaces=utils.NSMAP, default=slug)
summary = dublincore.findtext('dcterms:description', namespaces=utils.NSMAP, default=summary)
authors = mets.xpath(".//mets:mdWrap[@MDTYPE='PREMIS:AGENT']//mets:agentType[text()='organization']/ancestor::mets:agent/*/mets:agentIdentifierValue", namespaces=utils.NSMAP)
author = authors[0].text if authors else None
entry = sword2.Entry(title=title, id='urn:uuid:' + package.uuid, author={'name': author}, summary=summary)
if not self.pointer_root:
self.pointer_root = etree.parse(package.full_pointer_file_path)
entry.register_namespace('lom', utils.NSMAP['lom'])
for (index, file_path) in enumerate(output_files):
if len(output_files) == 1:
if index is not None:
download_url = reverse('download_lockss', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid, 'chunk_number': str(index)})
else:
download_url = reverse('download_request', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid})
download_url = self.external_domain + download_url
external_url = download_url
else:
if index + 1 is not None:
download_url = reverse('download_lockss', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid, 'chunk_number': str(index + 1)})
else:
download_url = reverse('download_request', kwargs={'api_name': 'v1', 'resource_name': 'file', 'uuid': package.uuid})
download_url = self.external_domain + download_url
external_url = download_url
file_e = self.pointer_root.find(".//mets:fileGrp[@USE='LOCKSS chunk']/mets:file[@ID='{}']".format(os.path.basename(file_path)), namespaces=utils.NSMAP)
if file_e is not None:
checksum_name = file_e.get('CHECKSUMTYPE')
checksum_value = file_e.get('CHECKSUM')
size = int(file_e.get('SIZE'))
else:
try:
checksum = utils.generate_checksum(file_path, self.checksum_type)
except ValueError:
checksum = utils.generate_checksum(file_path, 'md5')
checksum_name = checksum.name.upper().replace('SHA', 'SHA-')
checksum_value = checksum.hexdigest()
size = os.path.getsize(file_path)
size = str(math.ceil(size / 1000))
entry.add_field('lom_content', external_url)
content_entry = entry.entry[-1]
content_entry.set('size', size)
content_entry.set('checksumType', checksum_name)
content_entry.set('checksumValue', checksum_value)
LOGGER.debug('LOCKSS atom entry: %s', entry)
return (entry, slug)
|
archivematica-storage-service
|
positive
|
def forward(self, x):
<DeepExtract>
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
f = x
</DeepExtract>
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return (y, v)
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
f = x
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == {'xent'}:
return y
elif self.loss == {'xent', 'htri'}:
return (y, v)
else:
raise KeyError('Unsupported loss: {}'.format(self.loss))
|
ABD-Net
|
positive
|
def merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders=None, p_queryItems=None, p_requestBody=None, p_verbose=False, p_retry=0):
if p_retry > API_MAX_RETRIES:
if p_verbose:
print('ERROR: Reached max retries')
return (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix['%s[]' % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if p_verbose:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
return (False, None, None, None)
except:
return (False, None, None, None)
if p_verbose:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if p_verbose:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
<DeepExtract>
if p_retry + 1 > API_MAX_RETRIES:
if p_verbose:
print('ERROR: Reached max retries')
(success, errors, responseHeaders, responseBody) = (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix['%s[]' % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if p_verbose:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, responseHeaders, responseBody) = (False, None, None, None)
except:
(success, errors, responseHeaders, responseBody) = (False, None, None, None)
if p_verbose:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if p_verbose:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1 + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders, p_queryItems, p_requestBody, p_verbose, p_retry + 1 + 1)
(success, errors, responseHeaders, responseBody) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if p_verbose:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if p_verbose:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(p_apiKey, p_httpVerb, splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, responseHeaders, responseBody) = (success, errors, responseHeaders, responseBody)
</DeepExtract>
return (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if p_verbose:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if p_verbose:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
<DeepExtract>
if p_retry > API_MAX_RETRIES:
if p_verbose:
print('ERROR: Reached max retries')
(success, errors, responseHeaders, nextBody) = (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix['%s[]' % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + splitLink[1] + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if p_verbose:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, responseHeaders, nextBody) = (False, None, None, None)
except:
(success, errors, responseHeaders, nextBody) = (False, None, None, None)
if p_verbose:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if p_verbose:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(p_apiKey, p_httpVerb, splitLink[1], p_additionalHeaders, p_queryItems, p_requestBody, p_verbose, p_retry + 1)
(success, errors, responseHeaders, nextBody) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if p_verbose:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if p_verbose:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(p_apiKey, p_httpVerb, splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, responseHeaders, nextBody) = (success, errors, responseHeaders, responseBody)
</DeepExtract>
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
return (success, errors, responseHeaders, responseBody)
|
def merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders=None, p_queryItems=None, p_requestBody=None, p_verbose=False, p_retry=0):
if p_retry > API_MAX_RETRIES:
if p_verbose:
print('ERROR: Reached max retries')
return (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix['%s[]' % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if p_verbose:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
return (False, None, None, None)
except:
return (False, None, None, None)
if p_verbose:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if p_verbose:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
if p_retry + 1 > API_MAX_RETRIES:
if p_verbose:
print('ERROR: Reached max retries')
(success, errors, responseHeaders, responseBody) = (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix['%s[]' % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if p_verbose:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, responseHeaders, responseBody) = (False, None, None, None)
except:
(success, errors, responseHeaders, responseBody) = (False, None, None, None)
if p_verbose:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if p_verbose:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1 + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders, p_queryItems, p_requestBody, p_verbose, p_retry + 1 + 1)
(success, errors, responseHeaders, responseBody) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if p_verbose:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if p_verbose:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(p_apiKey, p_httpVerb, splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, responseHeaders, responseBody) = (success, errors, responseHeaders, responseBody)
return (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if p_verbose:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if p_verbose:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
if p_retry > API_MAX_RETRIES:
if p_verbose:
print('ERROR: Reached max retries')
(success, errors, responseHeaders, nextBody) = (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
qArrayFix = {}
for item in p_queryItems:
if isinstance(p_queryItems[item], list):
qArrayFix['%s[]' % item] = p_queryItems[item]
else:
qArrayFix[item] = p_queryItems[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + splitLink[1] + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if p_verbose:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, responseHeaders, nextBody) = (False, None, None, None)
except:
(success, errors, responseHeaders, nextBody) = (False, None, None, None)
if p_verbose:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if p_verbose:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(p_apiKey, p_httpVerb, splitLink[1], p_additionalHeaders, p_queryItems, p_requestBody, p_verbose, p_retry + 1)
(success, errors, responseHeaders, nextBody) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if p_verbose:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if p_verbose:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(p_apiKey, p_httpVerb, splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, responseHeaders, nextBody) = (success, errors, responseHeaders, responseBody)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
return (success, errors, responseHeaders, responseBody)
|
automation-scripts
|
positive
|
def UpdateWorkspaceLr(self, cur_iter, new_lr):
"""Updates the model's current learning rate and the workspace (learning
rate and update history/momentum blobs).
"""
cur_lr = workspace.FetchBlob('gpu_0/lr')[0]
if cur_lr != new_lr:
<DeepExtract>
eps = 1e-10
ratio = np.max((new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps))))
ratio = ratio
</DeepExtract>
if ratio > cfg.SOLVER.LOG_LR_CHANGE_THRESHOLD:
logger.info('Changing learning rate {:.6f} -> {:.6f} at iter {:d}'.format(cur_lr, new_lr, cur_iter))
<DeepExtract>
for i in range(cfg.NUM_GPUS):
with c2_utils.CudaScope(i):
workspace.FeedBlob('gpu_{}/lr'.format(i), np.array([new_lr], dtype=np.float32))
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-07 and (ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD):
self._CorrectMomentum(new_lr / cur_lr)
</DeepExtract>
return new_lr
|
def UpdateWorkspaceLr(self, cur_iter, new_lr):
"""Updates the model's current learning rate and the workspace (learning
rate and update history/momentum blobs).
"""
cur_lr = workspace.FetchBlob('gpu_0/lr')[0]
if cur_lr != new_lr:
eps = 1e-10
ratio = np.max((new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps))))
ratio = ratio
if ratio > cfg.SOLVER.LOG_LR_CHANGE_THRESHOLD:
logger.info('Changing learning rate {:.6f} -> {:.6f} at iter {:d}'.format(cur_lr, new_lr, cur_iter))
for i in range(cfg.NUM_GPUS):
with c2_utils.CudaScope(i):
workspace.FeedBlob('gpu_{}/lr'.format(i), np.array([new_lr], dtype=np.float32))
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-07 and (ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD):
self._CorrectMomentum(new_lr / cur_lr)
return new_lr
|
CBNet
|
positive
|
def test_count_submissions_to_date(self):
"""If we add some items, we can count them"""
date1 = datetime.datetime(2016, 1, 7, 10, 20, 39, tzinfo=pytz.utc)
date2 = datetime.datetime(2016, 1, 7, 12, 30, 20, tzinfo=pytz.utc)
date3 = datetime.datetime(2016, 1, 8, 1, 0, 0, tzinfo=pytz.utc)
date4 = datetime.datetime(2016, 1, 10, 1, 0, 0, tzinfo=pytz.utc)
<DeepExtract>
build_sha1 = 'sha1'
data_sha256 = str(True) + str(uuid.uuid4()) + str(date1)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(uuid.uuid4())})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': uuid.uuid4(), 'build_sha1': build_sha1, 'lossless': True, 'submitted': date1, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
</DeepExtract>
two_uuid = uuid.uuid4()
<DeepExtract>
build_sha1 = 'sha1'
data_sha256 = str(True) + str(two_uuid) + str(date2)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(two_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': two_uuid, 'build_sha1': build_sha1, 'lossless': True, 'submitted': date2, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
</DeepExtract>
three_uuid = uuid.uuid4()
<DeepExtract>
build_sha1 = 'sha1'
data_sha256 = str(False) + str(three_uuid) + str(date3)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(three_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': three_uuid, 'build_sha1': build_sha1, 'lossless': False, 'submitted': date3, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
</DeepExtract>
<DeepExtract>
build_sha1 = 'sha1'
data_sha256 = str(False) + str(three_uuid) + str(date4)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(three_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': three_uuid, 'build_sha1': build_sha1, 'lossless': False, 'submitted': date4, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
</DeepExtract>
<DeepExtract>
build_sha1 = 'sha1'
data_sha256 = str(True) + str(three_uuid) + str(date4)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(three_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': three_uuid, 'build_sha1': build_sha1, 'lossless': True, 'submitted': date4, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
</DeepExtract>
<DeepExtract>
build_sha1 = 'sha1'
data_sha256 = str(True) + str(two_uuid) + str(date4)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(two_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': two_uuid, 'build_sha1': build_sha1, 'lossless': True, 'submitted': date4, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
</DeepExtract>
with db.engine.connect() as connection:
two_submissions_date = datetime.datetime(2016, 1, 7, 15, 0, 0, tzinfo=pytz.utc)
three_submissions_date = datetime.datetime(2016, 1, 9, 15, 0, 0, tzinfo=pytz.utc)
five_submissions_date = datetime.datetime(2016, 1, 10, 15, 0, 0, tzinfo=pytz.utc)
ret = db.submission_stats._count_submissions_to_date(connection, two_submissions_date)
self.assertEqual({'lowlevel-lossless': 2, 'lowlevel-lossless-unique': 2, 'lowlevel-lossy': 0, 'lowlevel-lossy-unique': 0, 'lowlevel-total': 2, 'lowlevel-total-unique': 2}, ret)
ret = db.submission_stats._count_submissions_to_date(connection, three_submissions_date)
self.assertEqual({'lowlevel-lossless': 2, 'lowlevel-lossless-unique': 2, 'lowlevel-lossy': 1, 'lowlevel-lossy-unique': 1, 'lowlevel-total': 3, 'lowlevel-total-unique': 3}, ret)
ret = db.submission_stats._count_submissions_to_date(connection, five_submissions_date)
self.assertEqual({'lowlevel-lossless': 4, 'lowlevel-lossless-unique': 3, 'lowlevel-lossy': 2, 'lowlevel-lossy-unique': 1, 'lowlevel-total': 6, 'lowlevel-total-unique': 3}, ret)
|
def test_count_submissions_to_date(self):
"""If we add some items, we can count them"""
date1 = datetime.datetime(2016, 1, 7, 10, 20, 39, tzinfo=pytz.utc)
date2 = datetime.datetime(2016, 1, 7, 12, 30, 20, tzinfo=pytz.utc)
date3 = datetime.datetime(2016, 1, 8, 1, 0, 0, tzinfo=pytz.utc)
date4 = datetime.datetime(2016, 1, 10, 1, 0, 0, tzinfo=pytz.utc)
build_sha1 = 'sha1'
data_sha256 = str(True) + str(uuid.uuid4()) + str(date1)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(uuid.uuid4())})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': uuid.uuid4(), 'build_sha1': build_sha1, 'lossless': True, 'submitted': date1, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
two_uuid = uuid.uuid4()
build_sha1 = 'sha1'
data_sha256 = str(True) + str(two_uuid) + str(date2)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(two_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': two_uuid, 'build_sha1': build_sha1, 'lossless': True, 'submitted': date2, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
three_uuid = uuid.uuid4()
build_sha1 = 'sha1'
data_sha256 = str(False) + str(three_uuid) + str(date3)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(three_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': three_uuid, 'build_sha1': build_sha1, 'lossless': False, 'submitted': date3, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
build_sha1 = 'sha1'
data_sha256 = str(False) + str(three_uuid) + str(date4)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(three_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': three_uuid, 'build_sha1': build_sha1, 'lossless': False, 'submitted': date4, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
build_sha1 = 'sha1'
data_sha256 = str(True) + str(three_uuid) + str(date4)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(three_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': three_uuid, 'build_sha1': build_sha1, 'lossless': True, 'submitted': date4, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
build_sha1 = 'sha1'
data_sha256 = str(True) + str(two_uuid) + str(date4)
data_sha256 = data_sha256[:64]
data_json = '{}'
gid_type = gid_types.GID_TYPE_MSID
with db.engine.connect() as connection:
query = text('\n SELECT MAX(submission_offset)\n FROM lowlevel\n WHERE gid::text = :mbid\n ')
result = connection.execute(query, {'mbid': str(two_uuid)})
row = result.fetchone()
if row[0] is not None:
submission_offset = row[0] + 1
else:
submission_offset = 0
query = text('\n INSERT INTO lowlevel (gid, build_sha1, lossless, submitted, gid_type, submission_offset)\n VALUES (:mbid, :build_sha1, :lossless, :submitted, :gid_type, :submission_offset)\n RETURNING id\n ')
result = connection.execute(query, {'mbid': two_uuid, 'build_sha1': build_sha1, 'lossless': True, 'submitted': date4, 'gid_type': gid_types.GID_TYPE_MSID, 'submission_offset': submission_offset})
id = result.fetchone()[0]
version_id = db.data.insert_version(connection, {}, db.data.VERSION_TYPE_LOWLEVEL)
query = text('\n INSERT INTO lowlevel_json (id, data, data_sha256, version)\n VALUES (:id, :data, :data_sha256, :version)\n ')
connection.execute(query, {'id': id, 'data': data_json, 'data_sha256': data_sha256, 'version': version_id})
with db.engine.connect() as connection:
two_submissions_date = datetime.datetime(2016, 1, 7, 15, 0, 0, tzinfo=pytz.utc)
three_submissions_date = datetime.datetime(2016, 1, 9, 15, 0, 0, tzinfo=pytz.utc)
five_submissions_date = datetime.datetime(2016, 1, 10, 15, 0, 0, tzinfo=pytz.utc)
ret = db.submission_stats._count_submissions_to_date(connection, two_submissions_date)
self.assertEqual({'lowlevel-lossless': 2, 'lowlevel-lossless-unique': 2, 'lowlevel-lossy': 0, 'lowlevel-lossy-unique': 0, 'lowlevel-total': 2, 'lowlevel-total-unique': 2}, ret)
ret = db.submission_stats._count_submissions_to_date(connection, three_submissions_date)
self.assertEqual({'lowlevel-lossless': 2, 'lowlevel-lossless-unique': 2, 'lowlevel-lossy': 1, 'lowlevel-lossy-unique': 1, 'lowlevel-total': 3, 'lowlevel-total-unique': 3}, ret)
ret = db.submission_stats._count_submissions_to_date(connection, five_submissions_date)
self.assertEqual({'lowlevel-lossless': 4, 'lowlevel-lossless-unique': 3, 'lowlevel-lossy': 2, 'lowlevel-lossy-unique': 1, 'lowlevel-total': 6, 'lowlevel-total-unique': 3}, ret)
|
acousticbrainz-server
|
positive
|
def plotcur(im_step):
global nit
<DeepExtract>
init0 = xtuple[0]
init1 = xtuple[1]
init2 = xtuple[2]
imct = 0
if pol_solve[0] == 0:
im0 = init0
else:
im0 = im_step[imct * nimage:(imct + 1) * nimage]
imct += 1
if pol_solve[1] == 0:
im1 = init1
else:
im1 = im_step[imct * nimage:(imct + 1) * nimage]
imct += 1
if pol_solve[2] == 0:
im2 = init2
else:
im2 = im_step[imct * nimage:(imct + 1) * nimage]
imct += 1
cvtuple = np.array((im0, im1, im2))
</DeepExtract>
if pol_prim == 'amp_phase':
<DeepExtract>
iimage = cvtuple[0]
mimage = cvtuple[1]
chiimage = cvtuple[2]
mtrans = 0.5 + np.arctan(mimage / B) / np.pi
out = np.array((iimage, mtrans, chiimage))
cvtuple = out
</DeepExtract>
else:
raise Exception()
if show_updates:
<DeepExtract>
chi2_1 = polchisq(imtuple, A1, data1, sigma1, d1, ttype=ttype, mask=embed_mask, pol_prim=pol_prim)
</DeepExtract>
<DeepExtract>
chi2_2 = polchisq(imtuple, A2, data2, sigma2, d2, ttype=ttype, mask=embed_mask, pol_prim=pol_prim)
</DeepExtract>
<DeepExtract>
s_1 = polregularizer(imtuple, embed_mask, flux, Prior.xdim, Prior.ydim, Prior.psize, s1, **kwargs)
s_1 = polregularizer(imtuple, embed_mask, Prior.xdim, Prior.ydim, Prior.psize, s1, pol_prim=pol_prim, norm_reg=norm_reg)
</DeepExtract>
<DeepExtract>
s_2 = polregularizer(imtuple, embed_mask, flux, Prior.xdim, Prior.ydim, Prior.psize, s2, **kwargs)
</DeepExtract>
if np.any(np.invert(embed_mask)):
<DeepExtract>
out0 = np.zeros(len(embed_mask))
out1 = np.zeros(len(embed_mask))
out2 = np.zeros(len(embed_mask))
out0[embed_mask.nonzero()] = imtuple[0]
out1[embed_mask.nonzero()] = imtuple[1]
out2[embed_mask.nonzero()] = imtuple[2]
if clipfloor != 0.0:
if randomfloor:
out0[(embed_mask - 1).nonzero()] = clipfloor * np.abs(np.random.normal(size=len((embed_mask - 1).nonzero())))
out1[(embed_mask - 1).nonzero()] = 0
out2[(embed_mask - 1).nonzero()] = 0
else:
out0[(embed_mask - 1).nonzero()] = clipfloor
out1[(embed_mask - 1).nonzero()] = 0
out2[(embed_mask - 1).nonzero()] = 0
imtuple = (out0, out1, out2)
</DeepExtract>
<DeepExtract>
cmap = kwargs.get('cmap', 'afmhot')
interpolation = kwargs.get('interpolation', 'gaussian')
pcut = kwargs.get('pcut', 0.05)
nvec = kwargs.get('nvec', 15)
scale = kwargs.get('scale', None)
dynamic_range = kwargs.get('dynamic_range', 100000.0)
gamma = kwargs.get('dynamic_range', 0.5)
plt.ion()
plt.pause(1e-06)
plt.clf()
im = imtuple[0]
mim = imtuple[1]
chiim = imtuple[2]
imarr = im.reshape(Prior.ydim, Prior.xdim)
if scale == 'log':
if (imarr < 0.0).any():
print('clipping values less than 0')
imarr[imarr < 0.0] = 0.0
imarr = np.log(imarr + np.max(imarr) / dynamic_range)
if scale == 'gamma':
if (imarr < 0.0).any():
print('clipping values less than 0')
imarr[imarr < 0.0] = 0.0
imarr = (imarr + np.max(imarr) / dynamic_range) ** gamma
thin = int(round(Prior.xdim / nvec))
mask = imarr > pcut * np.max(im)
mask2 = mask[::thin, ::thin]
x = np.array([[i for i in range(Prior.xdim)] for j in range(Prior.ydim)])[::thin, ::thin][mask2]
y = np.array([[j for i in range(Prior.xdim)] for j in range(Prior.ydim)])[::thin, ::thin][mask2]
q = qimage(im, mim, chiim)
u = uimage(im, mim, chiim)
a = -np.sin(np.angle(q + 1j * u) / 2).reshape(Prior.ydim, Prior.xdim)[::thin, ::thin][mask2]
b = np.cos(np.angle(q + 1j * u) / 2).reshape(Prior.ydim, Prior.xdim)[::thin, ::thin][mask2]
m = (np.abs(q + 1j * u) / im).reshape(Prior.ydim, Prior.xdim)
m[~mask] = 0
plt.subplot(121)
plt.imshow(imarr, cmap=plt.get_cmap('afmhot'), interpolation='gaussian')
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.01 * Prior.xdim, units='x', pivot='mid', color='k', angles='uv', scale=1.0 / thin)
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.005 * Prior.xdim, units='x', pivot='mid', color='w', angles='uv', scale=1.1 / thin)
xticks = ticks(Prior.xdim, Prior.psize / RADPERAS / 1e-06)
yticks = ticks(Prior.ydim, Prior.psize / RADPERAS / 1e-06)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\\mu$as)')
plt.ylabel('Relative Dec ($\\mu$as)')
plt.title('Stokes I')
plt.subplot(122)
plt.imshow(m, cmap=plt.get_cmap('winter'), interpolation='gaussian', vmin=0, vmax=1)
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.01 * Prior.xdim, units='x', pivot='mid', color='k', angles='uv', scale=1.0 / thin)
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.005 * Prior.xdim, units='x', pivot='mid', color='w', angles='uv', scale=1.1 / thin)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\\mu$as)')
plt.ylabel('Relative Dec ($\\mu$as)')
plt.title('m (above %i %% max flux)' % int(pcut * 100))
plotstr = 'step: %i ' % nit
for key in {d1: chi2_1, d2: chi2_2}.keys():
plotstr += '$\\chi^2_{%s}$: %0.2f ' % (key, {d1: chi2_1, d2: chi2_2}[key])
plt.suptitle(plotstr, fontsize=18)
</DeepExtract>
print('i: %d chi2_1: %0.2f chi2_2: %0.2f s_1: %0.2f s_2: %0.2f' % (nit, chi2_1, chi2_2, s_1, s_2))
nit += 1
|
def plotcur(im_step):
global nit
init0 = xtuple[0]
init1 = xtuple[1]
init2 = xtuple[2]
imct = 0
if pol_solve[0] == 0:
im0 = init0
else:
im0 = im_step[imct * nimage:(imct + 1) * nimage]
imct += 1
if pol_solve[1] == 0:
im1 = init1
else:
im1 = im_step[imct * nimage:(imct + 1) * nimage]
imct += 1
if pol_solve[2] == 0:
im2 = init2
else:
im2 = im_step[imct * nimage:(imct + 1) * nimage]
imct += 1
cvtuple = np.array((im0, im1, im2))
if pol_prim == 'amp_phase':
iimage = cvtuple[0]
mimage = cvtuple[1]
chiimage = cvtuple[2]
mtrans = 0.5 + np.arctan(mimage / B) / np.pi
out = np.array((iimage, mtrans, chiimage))
cvtuple = out
else:
raise Exception()
if show_updates:
chi2_1 = polchisq(imtuple, A1, data1, sigma1, d1, ttype=ttype, mask=embed_mask, pol_prim=pol_prim)
chi2_2 = polchisq(imtuple, A2, data2, sigma2, d2, ttype=ttype, mask=embed_mask, pol_prim=pol_prim)
s_1 = polregularizer(imtuple, embed_mask, flux, Prior.xdim, Prior.ydim, Prior.psize, s1, **kwargs)
s_1 = polregularizer(imtuple, embed_mask, Prior.xdim, Prior.ydim, Prior.psize, s1, pol_prim=pol_prim, norm_reg=norm_reg)
s_2 = polregularizer(imtuple, embed_mask, flux, Prior.xdim, Prior.ydim, Prior.psize, s2, **kwargs)
if np.any(np.invert(embed_mask)):
out0 = np.zeros(len(embed_mask))
out1 = np.zeros(len(embed_mask))
out2 = np.zeros(len(embed_mask))
out0[embed_mask.nonzero()] = imtuple[0]
out1[embed_mask.nonzero()] = imtuple[1]
out2[embed_mask.nonzero()] = imtuple[2]
if clipfloor != 0.0:
if randomfloor:
out0[(embed_mask - 1).nonzero()] = clipfloor * np.abs(np.random.normal(size=len((embed_mask - 1).nonzero())))
out1[(embed_mask - 1).nonzero()] = 0
out2[(embed_mask - 1).nonzero()] = 0
else:
out0[(embed_mask - 1).nonzero()] = clipfloor
out1[(embed_mask - 1).nonzero()] = 0
out2[(embed_mask - 1).nonzero()] = 0
imtuple = (out0, out1, out2)
cmap = kwargs.get('cmap', 'afmhot')
interpolation = kwargs.get('interpolation', 'gaussian')
pcut = kwargs.get('pcut', 0.05)
nvec = kwargs.get('nvec', 15)
scale = kwargs.get('scale', None)
dynamic_range = kwargs.get('dynamic_range', 100000.0)
gamma = kwargs.get('dynamic_range', 0.5)
plt.ion()
plt.pause(1e-06)
plt.clf()
im = imtuple[0]
mim = imtuple[1]
chiim = imtuple[2]
imarr = im.reshape(Prior.ydim, Prior.xdim)
if scale == 'log':
if (imarr < 0.0).any():
print('clipping values less than 0')
imarr[imarr < 0.0] = 0.0
imarr = np.log(imarr + np.max(imarr) / dynamic_range)
if scale == 'gamma':
if (imarr < 0.0).any():
print('clipping values less than 0')
imarr[imarr < 0.0] = 0.0
imarr = (imarr + np.max(imarr) / dynamic_range) ** gamma
thin = int(round(Prior.xdim / nvec))
mask = imarr > pcut * np.max(im)
mask2 = mask[::thin, ::thin]
x = np.array([[i for i in range(Prior.xdim)] for j in range(Prior.ydim)])[::thin, ::thin][mask2]
y = np.array([[j for i in range(Prior.xdim)] for j in range(Prior.ydim)])[::thin, ::thin][mask2]
q = qimage(im, mim, chiim)
u = uimage(im, mim, chiim)
a = -np.sin(np.angle(q + 1j * u) / 2).reshape(Prior.ydim, Prior.xdim)[::thin, ::thin][mask2]
b = np.cos(np.angle(q + 1j * u) / 2).reshape(Prior.ydim, Prior.xdim)[::thin, ::thin][mask2]
m = (np.abs(q + 1j * u) / im).reshape(Prior.ydim, Prior.xdim)
m[~mask] = 0
plt.subplot(121)
plt.imshow(imarr, cmap=plt.get_cmap('afmhot'), interpolation='gaussian')
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.01 * Prior.xdim, units='x', pivot='mid', color='k', angles='uv', scale=1.0 / thin)
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.005 * Prior.xdim, units='x', pivot='mid', color='w', angles='uv', scale=1.1 / thin)
xticks = ticks(Prior.xdim, Prior.psize / RADPERAS / 1e-06)
yticks = ticks(Prior.ydim, Prior.psize / RADPERAS / 1e-06)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\\mu$as)')
plt.ylabel('Relative Dec ($\\mu$as)')
plt.title('Stokes I')
plt.subplot(122)
plt.imshow(m, cmap=plt.get_cmap('winter'), interpolation='gaussian', vmin=0, vmax=1)
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.01 * Prior.xdim, units='x', pivot='mid', color='k', angles='uv', scale=1.0 / thin)
plt.quiver(x, y, a, b, headaxislength=20, headwidth=1, headlength=0.01, minlength=0, minshaft=1, width=0.005 * Prior.xdim, units='x', pivot='mid', color='w', angles='uv', scale=1.1 / thin)
plt.xticks(xticks[0], xticks[1])
plt.yticks(yticks[0], yticks[1])
plt.xlabel('Relative RA ($\\mu$as)')
plt.ylabel('Relative Dec ($\\mu$as)')
plt.title('m (above %i %% max flux)' % int(pcut * 100))
plotstr = 'step: %i ' % nit
for key in {d1: chi2_1, d2: chi2_2}.keys():
plotstr += '$\\chi^2_{%s}$: %0.2f ' % (key, {d1: chi2_1, d2: chi2_2}[key])
plt.suptitle(plotstr, fontsize=18)
print('i: %d chi2_1: %0.2f chi2_2: %0.2f s_1: %0.2f s_2: %0.2f' % (nit, chi2_1, chi2_2, s_1, s_2))
nit += 1
|
eht-imaging
|
positive
|
def split_over_variables_and_labels(split_profile, type, groups, span):
"""
Inner function that calculates actual conditional profiles for one variable only. Iterated over each variable and group.
:param split_profile: pandas.DataFrame, one group of the dataset (with only one variable)
:param groups: str, name of grouping variable
:return: pd.DataFrame, dataframe with calculated conditional profile for only one variable
"""
if split_profile.shape[0] == 0:
return None
if pd.api.types.is_numeric_dtype(split_profile['_x_']):
split_profile['_original_'] = split_profile['_original_'].astype('float')
range_x = split_profile['_x_'].max() - split_profile['_x_'].min()
if range_x == 0:
range_x = 1
diffs = (split_profile['_original_'] - split_profile['_x_']) / range_x
<DeepExtract>
split_profile['_w_'] = np.exp(-1 * ((diffs - 0) / span) ** 2 / 2) / np.pi / np.sqrt(2) / span
</DeepExtract>
else:
split_profile['_w_'] = split_profile['_original_'] == split_profile['_x_']
if type == 'accumulated':
split_profile['_yhat_'] = split_profile.groupby('_ids_')['_yhat_'].transform(lambda column: column.diff())
split_profile.loc[np.isnan(split_profile['_yhat_']), '_yhat_'] = 0
par_profile = split_profile.groupby(['_x_'] + groups, sort=False).apply(lambda point: (point['_yhat_'] * point['_w_']).sum() / point['_w_'].sum() if point['_w_'].sum() != 0 else 0)
par_profile.name = '_yhat_'
par_profile = par_profile.reset_index()
if type == 'accumulated':
if len(groups) == 0:
par_profile['_yhat_'] = par_profile['_yhat_'].cumsum()
else:
par_profile['_yhat_'] = par_profile.groupby(groups, sort=False)['_yhat_'].transform(lambda column: column.cumsum())
return par_profile
|
def split_over_variables_and_labels(split_profile, type, groups, span):
"""
Inner function that calculates actual conditional profiles for one variable only. Iterated over each variable and group.
:param split_profile: pandas.DataFrame, one group of the dataset (with only one variable)
:param groups: str, name of grouping variable
:return: pd.DataFrame, dataframe with calculated conditional profile for only one variable
"""
if split_profile.shape[0] == 0:
return None
if pd.api.types.is_numeric_dtype(split_profile['_x_']):
split_profile['_original_'] = split_profile['_original_'].astype('float')
range_x = split_profile['_x_'].max() - split_profile['_x_'].min()
if range_x == 0:
range_x = 1
diffs = (split_profile['_original_'] - split_profile['_x_']) / range_x
split_profile['_w_'] = np.exp(-1 * ((diffs - 0) / span) ** 2 / 2) / np.pi / np.sqrt(2) / span
else:
split_profile['_w_'] = split_profile['_original_'] == split_profile['_x_']
if type == 'accumulated':
split_profile['_yhat_'] = split_profile.groupby('_ids_')['_yhat_'].transform(lambda column: column.diff())
split_profile.loc[np.isnan(split_profile['_yhat_']), '_yhat_'] = 0
par_profile = split_profile.groupby(['_x_'] + groups, sort=False).apply(lambda point: (point['_yhat_'] * point['_w_']).sum() / point['_w_'].sum() if point['_w_'].sum() != 0 else 0)
par_profile.name = '_yhat_'
par_profile = par_profile.reset_index()
if type == 'accumulated':
if len(groups) == 0:
par_profile['_yhat_'] = par_profile['_yhat_'].cumsum()
else:
par_profile['_yhat_'] = par_profile.groupby(groups, sort=False)['_yhat_'].transform(lambda column: column.cumsum())
return par_profile
|
DALEX
|
positive
|
def merge_subject_medial_wall_with_atlas_template(subject_id, high_res_mesh, meshes, reg_sphere, temp_dir):
"""resample the atlas medial wall roi into subjects native space then
merge with native roi"""
native_settings = meshes['AtlasSpaceNative']
high_res_settings = meshes['HighResMesh']
for hemisphere in ['L', 'R']:
atlas_roi_native_gii = metric_file(subject_id, 'atlasroi', hemisphere, native_settings)
native_roi = medial_wall_roi_file(subject_id, hemisphere, native_settings)
<DeepExtract>
global DRYRUN
dryrun = DRYRUN or dryrun
if FS_LICENSE:
run_env = {'OMP_NUM_THREADS': str(N_CPUS), 'FS_LICENSE': FS_LICENSE}
else:
run_env = {'OMP_NUM_THREADS': str(N_CPUS)}
returncode = ciftify.utils.run(['wb_command', '-metric-resample', medial_wall_roi_file(subject_id, hemisphere, high_res_settings), surf_file(subject_id, 'sphere', hemisphere, high_res_settings), surf_file(subject_id, reg_sphere, hemisphere, native_settings), 'BARYCENTRIC', atlas_roi_native_gii, '-largest'], dryrun=dryrun, suppress_stdout=suppress_stdout, suppress_stderr=suppress_stderr, env=run_env)
if returncode:
sys.exit(1)
return returncode
</DeepExtract>
<DeepExtract>
global DRYRUN
dryrun = DRYRUN or dryrun
if FS_LICENSE:
run_env = {'OMP_NUM_THREADS': str(N_CPUS), 'FS_LICENSE': FS_LICENSE}
else:
run_env = {'OMP_NUM_THREADS': str(N_CPUS)}
returncode = ciftify.utils.run(['wb_command', '-metric-math', '"(atlas + individual) > 0"', native_roi, '-var', 'atlas', atlas_roi_native_gii, '-var', 'individual', native_roi], dryrun=dryrun, suppress_stdout=suppress_stdout, suppress_stderr=suppress_stderr, env=run_env)
if returncode:
sys.exit(1)
return returncode
</DeepExtract>
|
def merge_subject_medial_wall_with_atlas_template(subject_id, high_res_mesh, meshes, reg_sphere, temp_dir):
"""resample the atlas medial wall roi into subjects native space then
merge with native roi"""
native_settings = meshes['AtlasSpaceNative']
high_res_settings = meshes['HighResMesh']
for hemisphere in ['L', 'R']:
atlas_roi_native_gii = metric_file(subject_id, 'atlasroi', hemisphere, native_settings)
native_roi = medial_wall_roi_file(subject_id, hemisphere, native_settings)
global DRYRUN
dryrun = DRYRUN or dryrun
if FS_LICENSE:
run_env = {'OMP_NUM_THREADS': str(N_CPUS), 'FS_LICENSE': FS_LICENSE}
else:
run_env = {'OMP_NUM_THREADS': str(N_CPUS)}
returncode = ciftify.utils.run(['wb_command', '-metric-resample', medial_wall_roi_file(subject_id, hemisphere, high_res_settings), surf_file(subject_id, 'sphere', hemisphere, high_res_settings), surf_file(subject_id, reg_sphere, hemisphere, native_settings), 'BARYCENTRIC', atlas_roi_native_gii, '-largest'], dryrun=dryrun, suppress_stdout=suppress_stdout, suppress_stderr=suppress_stderr, env=run_env)
if returncode:
sys.exit(1)
return returncode
global DRYRUN
dryrun = DRYRUN or dryrun
if FS_LICENSE:
run_env = {'OMP_NUM_THREADS': str(N_CPUS), 'FS_LICENSE': FS_LICENSE}
else:
run_env = {'OMP_NUM_THREADS': str(N_CPUS)}
returncode = ciftify.utils.run(['wb_command', '-metric-math', '"(atlas + individual) > 0"', native_roi, '-var', 'atlas', atlas_roi_native_gii, '-var', 'individual', native_roi], dryrun=dryrun, suppress_stdout=suppress_stdout, suppress_stderr=suppress_stderr, env=run_env)
if returncode:
sys.exit(1)
return returncode
</DeepExtract>
|
ciftify
|
positive
|
def main(argv):
arg_apikey = None
arg_org = '/all'
try:
(opts, args) = getopt.getopt(argv, 'hk:o:')
except getopt.GetoptError:
<DeepExtract>
if reason is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % reason)
sys.exit()
</DeepExtract>
for (opt, arg) in opts:
if opt == '-h':
<DeepExtract>
if reason is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % reason)
sys.exit()
</DeepExtract>
elif opt == '-k':
arg_apikey = str(arg)
elif opt == '-o':
arg_org = arg
<DeepExtract>
if not arg_apikey is None:
apiKey = str(arg_apikey)
apiKey = os.environ.get(API_KEY_ENV_VAR_NAME, None)
</DeepExtract>
if apiKey is None:
<DeepExtract>
if reason is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % reason)
sys.exit()
</DeepExtract>
<DeepExtract>
url = '/organizations'
(success, errors, headers, response) = merakiRequest(apiKey, 'get', url, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, rawOrganizations) = (success, errors, response)
</DeepExtract>
if rawOrganizations is None:
<DeepExtract>
if "Unable to fetch organizations' list" is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % "Unable to fetch organizations' list")
sys.exit()
</DeepExtract>
organizations = []
for org in rawOrganizations:
if arg_org == '/all' or org['name'] == arg_org:
organizations.append(org)
outputBuffer = []
for org in organizations:
<DeepExtract>
url = '/organizations/' + str(org['id']) + '/networks'
(success, errors, headers, response) = merakiRequest(apiKey, 'get', url, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, networks) = (success, errors, response)
</DeepExtract>
if networks is None:
continue
for net in networks:
<DeepExtract>
url = '/networks/' + str(net['id']) + '/clients'
(success, errors, headers, response) = merakiRequest(apiKey, 'get', url, p_queryItems={'timespan': 2678400}, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, networkClients) = (success, errors, response)
</DeepExtract>
if networkClients is None:
continue
for client in networkClients:
record = [org['name'], org['id'], net['name'], net['id'], client['description'], client['id'], client['mac'], client['ip'], client['ip6'], client['ip6Local'], client['user'], client['firstSeen'], client['lastSeen'], client['manufacturer'], client['os'], client['deviceTypePrediction'], client['recentDeviceSerial'], client['recentDeviceName'], client['recentDeviceMac'], client['recentDeviceConnection'], client['ssid'], client['vlan'], client['switchport'], int(client['usage']['sent']), int(client['usage']['recv']), int(client['usage']['total']), client['status'], client['notes'], client['smInstalled'], client['groupPolicy8021x'], client['adaptivePolicyGroup']]
strRecord = []
for item in record:
if item is None:
strRecord.append('')
else:
strRecord.append(str(item))
outputBuffer.append(','.join(strRecord))
if len(outputBuffer) == 0:
<DeepExtract>
if 'No clients in scope' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'No clients in scope')
sys.exit()
</DeepExtract>
csvHeader = ','.join(['organizationName', 'organizationId', 'networkName', 'networkId', 'clientDescription', 'clientId', 'clientMac', 'clientIpv4Address', 'clientIpv6Address', 'clientIpv6LocalAddress', 'user', 'firstSeen', 'lastSeen', 'manufacturer', 'os', 'deviceTypePrediction', 'lastConnectedNetworkDeviceSerial', 'lastConnectedNetworkDeviceName', 'lastConnectedNetworkDeviceMac', 'lastConnectedNetworkConnectionType', 'ssid', 'vlan', 'switchport', 'usageSentKBytes', 'usageRecvKBytes', 'usageTotalKBytes', 'status', 'notes', 'systemsManagerInstalled', 'groupPolicy8021x', 'adaptivePolicyGroup'])
reportFileName = 'clients_' + '{:%Y-%m-%d_%H.%M.%S}'.format(datetime.datetime.now())[:19] + '.csv'
<DeepExtract>
logString = '%s -- %s' % (str(datetime.datetime.now())[:19], 'Writing output to file "%s"...' % reportFileName)
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
try:
f = open(reportFileName, 'w')
f.write('%s\n' % csvHeader)
except:
<DeepExtract>
if 'Unable to open file for writing' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'Unable to open file for writing')
sys.exit()
</DeepExtract>
for line in outputBuffer:
try:
f.write('%s\n' % line)
except:
<DeepExtract>
if 'Write failed' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'Write failed')
sys.exit()
</DeepExtract>
try:
f.close()
except:
<DeepExtract>
if 'Failed to close file' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'Failed to close file')
sys.exit()
</DeepExtract>
<DeepExtract>
logString = '%s -- %s' % (str(datetime.datetime.now())[:19], 'End of script.')
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
|
def main(argv):
arg_apikey = None
arg_org = '/all'
try:
(opts, args) = getopt.getopt(argv, 'hk:o:')
except getopt.GetoptError:
if reason is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % reason)
sys.exit()
for (opt, arg) in opts:
if opt == '-h':
if reason is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % reason)
sys.exit()
elif opt == '-k':
arg_apikey = str(arg)
elif opt == '-o':
arg_org = arg
if not arg_apikey is None:
apiKey = str(arg_apikey)
apiKey = os.environ.get(API_KEY_ENV_VAR_NAME, None)
if apiKey is None:
if reason is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % reason)
sys.exit()
url = '/organizations'
(success, errors, headers, response) = merakiRequest(apiKey, 'get', url, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, rawOrganizations) = (success, errors, response)
if rawOrganizations is None:
if "Unable to fetch organizations' list" is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % "Unable to fetch organizations' list")
sys.exit()
organizations = []
for org in rawOrganizations:
if arg_org == '/all' or org['name'] == arg_org:
organizations.append(org)
outputBuffer = []
for org in organizations:
url = '/organizations/' + str(org['id']) + '/networks'
(success, errors, headers, response) = merakiRequest(apiKey, 'get', url, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, networks) = (success, errors, response)
if networks is None:
continue
for net in networks:
url = '/networks/' + str(net['id']) + '/clients'
(success, errors, headers, response) = merakiRequest(apiKey, 'get', url, p_queryItems={'timespan': 2678400}, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, networkClients) = (success, errors, response)
if networkClients is None:
continue
for client in networkClients:
record = [org['name'], org['id'], net['name'], net['id'], client['description'], client['id'], client['mac'], client['ip'], client['ip6'], client['ip6Local'], client['user'], client['firstSeen'], client['lastSeen'], client['manufacturer'], client['os'], client['deviceTypePrediction'], client['recentDeviceSerial'], client['recentDeviceName'], client['recentDeviceMac'], client['recentDeviceConnection'], client['ssid'], client['vlan'], client['switchport'], int(client['usage']['sent']), int(client['usage']['recv']), int(client['usage']['total']), client['status'], client['notes'], client['smInstalled'], client['groupPolicy8021x'], client['adaptivePolicyGroup']]
strRecord = []
for item in record:
if item is None:
strRecord.append('')
else:
strRecord.append(str(item))
outputBuffer.append(','.join(strRecord))
if len(outputBuffer) == 0:
if 'No clients in scope' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'No clients in scope')
sys.exit()
csvHeader = ','.join(['organizationName', 'organizationId', 'networkName', 'networkId', 'clientDescription', 'clientId', 'clientMac', 'clientIpv4Address', 'clientIpv6Address', 'clientIpv6LocalAddress', 'user', 'firstSeen', 'lastSeen', 'manufacturer', 'os', 'deviceTypePrediction', 'lastConnectedNetworkDeviceSerial', 'lastConnectedNetworkDeviceName', 'lastConnectedNetworkDeviceMac', 'lastConnectedNetworkConnectionType', 'ssid', 'vlan', 'switchport', 'usageSentKBytes', 'usageRecvKBytes', 'usageTotalKBytes', 'status', 'notes', 'systemsManagerInstalled', 'groupPolicy8021x', 'adaptivePolicyGroup'])
reportFileName = 'clients_' + '{:%Y-%m-%d_%H.%M.%S}'.format(datetime.datetime.now())[:19] + '.csv'
logString = '%s -- %s' % (str(datetime.datetime.now())[:19], 'Writing output to file "%s"...' % reportFileName)
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
try:
f = open(reportFileName, 'w')
f.write('%s\n' % csvHeader)
except:
if 'Unable to open file for writing' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'Unable to open file for writing')
sys.exit()
for line in outputBuffer:
try:
f.write('%s\n' % line)
except:
if 'Write failed' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'Write failed')
sys.exit()
try:
f.close()
except:
if 'Failed to close file' is None:
print(readMe)
sys.exit()
else:
log('ERROR: %s' % 'Failed to close file')
sys.exit()
logString = '%s -- %s' % (str(datetime.datetime.now())[:19], 'End of script.')
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
|
automation-scripts
|
positive
|
def draw_score_parallel_coord(col, results, type_filter='all', metadata=None, x_sort_by='name', ylabel=None, filename=None, **kwargs):
res_group = results.groupby(['type', 'task', 'framework'])
df = res_group[col].mean().unstack(['type', 'task'])
df = df if type_filter == 'all' else df.iloc[:, df.columns.get_loc(type_filter)]
sort_by = x_sort_by if callable(x_sort_by) else None if not metadata or not isinstance(x_sort_by, str) else lambda cols: getattr(metadata[cols[1]], x_sort_by)
df = sort_dataframe(df, by=sort_by, axis=1)
df.reset_index(inplace=True)
<DeepExtract>
colormap = config.colormap if colormap is None else colormap
with sb.axes_style('ticks', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):
parallel_fig = mp.pyplot.figure(dpi=120, figsize=size or (10, df.shape[0]))
colors = mp.cm.get_cmap(colormap).colors[:len(df['framework'].unique())]
axes = pd.plotting.parallel_coordinates(df, class_column='framework', color=colors, axvlines=False)
set_scales(axes, yscale=yscale)
(handles, labels) = axes.get_legend_handles_labels()
axes.legend(handles, labels, loc=legend_loc, title='Framework')
set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel or 'Score', x_labels=task_labels(df.columns.drop('framework')), x_tick_params=dict(labelrotation=90))
fig = parallel_fig
</DeepExtract>
if filename:
savefig(fig, create_file('graphics', config.results_group, filename))
return fig
|
def draw_score_parallel_coord(col, results, type_filter='all', metadata=None, x_sort_by='name', ylabel=None, filename=None, **kwargs):
res_group = results.groupby(['type', 'task', 'framework'])
df = res_group[col].mean().unstack(['type', 'task'])
df = df if type_filter == 'all' else df.iloc[:, df.columns.get_loc(type_filter)]
sort_by = x_sort_by if callable(x_sort_by) else None if not metadata or not isinstance(x_sort_by, str) else lambda cols: getattr(metadata[cols[1]], x_sort_by)
df = sort_dataframe(df, by=sort_by, axis=1)
df.reset_index(inplace=True)
colormap = config.colormap if colormap is None else colormap
with sb.axes_style('ticks', rc={'grid.linestyle': 'dotted'}), sb.plotting_context('paper'):
parallel_fig = mp.pyplot.figure(dpi=120, figsize=size or (10, df.shape[0]))
colors = mp.cm.get_cmap(colormap).colors[:len(df['framework'].unique())]
axes = pd.plotting.parallel_coordinates(df, class_column='framework', color=colors, axvlines=False)
set_scales(axes, yscale=yscale)
(handles, labels) = axes.get_legend_handles_labels()
axes.legend(handles, labels, loc=legend_loc, title='Framework')
set_labels(axes, title=title, xlabel=xlabel, ylabel=ylabel or 'Score', x_labels=task_labels(df.columns.drop('framework')), x_tick_params=dict(labelrotation=90))
fig = parallel_fig
if filename:
savefig(fig, create_file('graphics', config.results_group, filename))
return fig
|
automlbenchmark
|
positive
|
def parse_r_description(self, filename, packages):
"""Update build/runtime requirements according to the R package description."""
deps = []
with util.open_auto(filename, 'r') as desc:
content = desc.read()
<DeepExtract>
val = []
pat = re.compile('^' + 'Depends' + ':(.*?)((?=\\n\\S+:)|\\Z)', re.MULTILINE | re.DOTALL)
match = pat.search(content)
if match:
joined = match.group(1).replace('\n', ' ').strip(' ,')
if joined:
val = re.split('\\s*,\\s*', joined)
val = [re.split('\\s*\\(', v)[0] for v in val]
deps = val
</DeepExtract>
deps.extend(_get_desc_field('Imports', content))
deps.extend(_get_desc_field('LinkingTo', content))
<DeepExtract>
provides = ['KernSmooth', 'MASS', 'Matrix', 'base', 'boot', 'class', 'cluster', 'codetools', 'compiler', 'datasets', 'foreign', 'grDevices', 'graphics', 'grid', 'lattice', 'methods', 'mgcv', 'nlme', 'nnet', 'parallel', 'rpart', 'spatial', 'splines', 'stats', 'stats4', 'survival', 'tcltk', 'tools', 'translations', 'utils']
r_provides = set(provides)
</DeepExtract>
for dep in deps:
if dep == 'R':
continue
if dep in r_provides:
continue
pkg = 'R-' + dep
<DeepExtract>
new = True
if not pkg:
return False
pkg.strip()
if pkg in self.banned_buildreqs:
return False
if pkg in self.buildreqs:
new = False
if self.verbose and new:
print(' Adding buildreq:', pkg)
self.buildreqs.add(pkg)
if cache and new:
self.buildreqs_cache.add(pkg)
return new
</DeepExtract>
<DeepExtract>
new = True
pkg = pkg.strip()
if (requires := self.requires.get(subpkg)) is None:
requires = self.requires[subpkg] = set()
if pkg in requires:
new = False
if (banned_requires := self.banned_requires.get(subpkg)) is None:
banned_requires = self.banned_requires[subpkg] = set()
if pkg in banned_requires:
return False
if pkg not in self.buildreqs and pkg not in packages and (not override):
if pkg:
print("requirement '{}' not found in buildreqs or os_packages, skipping".format(pkg))
return False
if new:
requires.add(pkg)
return new
</DeepExtract>
|
def parse_r_description(self, filename, packages):
"""Update build/runtime requirements according to the R package description."""
deps = []
with util.open_auto(filename, 'r') as desc:
content = desc.read()
val = []
pat = re.compile('^' + 'Depends' + ':(.*?)((?=\\n\\S+:)|\\Z)', re.MULTILINE | re.DOTALL)
match = pat.search(content)
if match:
joined = match.group(1).replace('\n', ' ').strip(' ,')
if joined:
val = re.split('\\s*,\\s*', joined)
val = [re.split('\\s*\\(', v)[0] for v in val]
deps = val
deps.extend(_get_desc_field('Imports', content))
deps.extend(_get_desc_field('LinkingTo', content))
provides = ['KernSmooth', 'MASS', 'Matrix', 'base', 'boot', 'class', 'cluster', 'codetools', 'compiler', 'datasets', 'foreign', 'grDevices', 'graphics', 'grid', 'lattice', 'methods', 'mgcv', 'nlme', 'nnet', 'parallel', 'rpart', 'spatial', 'splines', 'stats', 'stats4', 'survival', 'tcltk', 'tools', 'translations', 'utils']
r_provides = set(provides)
for dep in deps:
if dep == 'R':
continue
if dep in r_provides:
continue
pkg = 'R-' + dep
new = True
if not pkg:
return False
pkg.strip()
if pkg in self.banned_buildreqs:
return False
if pkg in self.buildreqs:
new = False
if self.verbose and new:
print(' Adding buildreq:', pkg)
self.buildreqs.add(pkg)
if cache and new:
self.buildreqs_cache.add(pkg)
return new
new = True
pkg = pkg.strip()
if (requires := self.requires.get(subpkg)) is None:
requires = self.requires[subpkg] = set()
if pkg in requires:
new = False
if (banned_requires := self.banned_requires.get(subpkg)) is None:
banned_requires = self.banned_requires[subpkg] = set()
if pkg in banned_requires:
return False
if pkg not in self.buildreqs and pkg not in packages and (not override):
if pkg:
print("requirement '{}' not found in buildreqs or os_packages, skipping".format(pkg))
return False
if new:
requires.add(pkg)
return new
</DeepExtract>
|
autospec
|
positive
|
def search(path):
real_path = os.path.realpath(path)
res = cache.get(real_path)
if res is None:
<DeepExtract>
cfgpath = dirContainsTestSuite(path, litConfig)
if not cfgpath:
(parent, base) = os.path.split(path)
if parent == path:
cache[real_path] = res = (None, ())
(ts, relative) = search(parent)
cache[real_path] = res = (ts, relative + (base,))
config_map = litConfig.params.get('config_map')
if config_map:
cfgpath = os.path.realpath(cfgpath)
cfgpath = os.path.normcase(cfgpath)
target = config_map.get(cfgpath)
if target:
cfgpath = target
if litConfig.debug:
litConfig.note('loading suite config %r' % cfgpath)
cfg = TestingConfig.fromdefaults(litConfig)
cfg.load_from_path(cfgpath, litConfig)
source_root = os.path.realpath(cfg.test_source_root or path)
exec_root = os.path.realpath(cfg.test_exec_root or path)
cache[real_path] = res = (Test.TestSuite(cfg.name, source_root, exec_root, cfg), ())
</DeepExtract>
return res
|
def search(path):
real_path = os.path.realpath(path)
res = cache.get(real_path)
if res is None:
cfgpath = dirContainsTestSuite(path, litConfig)
if not cfgpath:
(parent, base) = os.path.split(path)
if parent == path:
cache[real_path] = res = (None, ())
(ts, relative) = search(parent)
cache[real_path] = res = (ts, relative + (base,))
config_map = litConfig.params.get('config_map')
if config_map:
cfgpath = os.path.realpath(cfgpath)
cfgpath = os.path.normcase(cfgpath)
target = config_map.get(cfgpath)
if target:
cfgpath = target
if litConfig.debug:
litConfig.note('loading suite config %r' % cfgpath)
cfg = TestingConfig.fromdefaults(litConfig)
cfg.load_from_path(cfgpath, litConfig)
source_root = os.path.realpath(cfg.test_source_root or path)
exec_root = os.path.realpath(cfg.test_exec_root or path)
cache[real_path] = res = (Test.TestSuite(cfg.name, source_root, exec_root, cfg), ())
return res
|
alive
|
positive
|
def build_secnumber(self, node):
if not self.add_secnumbers:
return ''
<DeepExtract>
if node.get('secnumber'):
secnumber = node['secnumber']
if isinstance(node.parent, nodes.section):
if self.builder.name == 'singleconfluence':
docname = self._docnames[-1]
raw_anchor = node.parent['ids'][0]
anchorname = '{}/#{}'.format(docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = '%s/' % raw_anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = ''
if self.builder.secnumbers.get(anchorname):
secnumber = self.builder.secnumbers[anchorname]
secnumber = None
</DeepExtract>
if not secnumber:
return ''
return '.'.join(map(str, secnumber)) + self.secnumber_suffix
|
def build_secnumber(self, node):
if not self.add_secnumbers:
return ''
if node.get('secnumber'):
secnumber = node['secnumber']
if isinstance(node.parent, nodes.section):
if self.builder.name == 'singleconfluence':
docname = self._docnames[-1]
raw_anchor = node.parent['ids'][0]
anchorname = '{}/#{}'.format(docname, node.parent['ids'][0])
if anchorname not in self.builder.secnumbers:
anchorname = '%s/' % raw_anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = ''
if self.builder.secnumbers.get(anchorname):
secnumber = self.builder.secnumbers[anchorname]
secnumber = None
if not secnumber:
return ''
return '.'.join(map(str, secnumber)) + self.secnumber_suffix
|
confluencebuilder
|
positive
|
def set_common_values(self, event_btn, event_x, event_y):
self.x_press = event_x
self.y_press = event_y
if event_btn != self._last_btn:
<DeepExtract>
self._use_antialias = self.get_option_value('antialias')
self.tool_width = self.window.options_manager.get_tool_width()
left_c = self.window.options_manager.get_left_color()
right_c = self.window.options_manager.get_right_color()
if event_btn == 1:
self.main_color = utilities_gdk_rgba_to_normalized_array(left_c)
self.secondary_color = utilities_gdk_rgba_to_normalized_array(right_c)
if event_btn == 3:
self.main_color = utilities_gdk_rgba_to_normalized_array(right_c)
self.secondary_color = utilities_gdk_rgba_to_normalized_array(left_c)
self._operator = self.window.options_manager.get_operator()[0]
</DeepExtract>
self._last_btn = event_btn
|
def set_common_values(self, event_btn, event_x, event_y):
self.x_press = event_x
self.y_press = event_y
if event_btn != self._last_btn:
self._use_antialias = self.get_option_value('antialias')
self.tool_width = self.window.options_manager.get_tool_width()
left_c = self.window.options_manager.get_left_color()
right_c = self.window.options_manager.get_right_color()
if event_btn == 1:
self.main_color = utilities_gdk_rgba_to_normalized_array(left_c)
self.secondary_color = utilities_gdk_rgba_to_normalized_array(right_c)
if event_btn == 3:
self.main_color = utilities_gdk_rgba_to_normalized_array(right_c)
self.secondary_color = utilities_gdk_rgba_to_normalized_array(left_c)
self._operator = self.window.options_manager.get_operator()[0]
self._last_btn = event_btn
|
drawing
|
positive
|
@router.get('/agenda', include_in_schema=False)
def agenda(request: Request, db: Session=Depends(get_db), start_date: Optional[date]=None, end_date: Optional[date]=None, days: Optional[int]=None) -> _TemplateResponse:
"""Route for the agenda page, using dates range or exact amount of days."""
user_id = 1
<DeepExtract>
if days is not None:
start_date = date.today()
end_date = start_date + timedelta(days=days)
elif start_date is None or end_date is None:
start_date = date.today()
end_date = date.today()
(start_date, end_date) = (start_date, end_date)
</DeepExtract>
events_objects = agenda_events.get_events_per_dates(db, user_id, start_date, end_date)
events = defaultdict(list)
for event_obj in events_objects:
event_duration = agenda_events.get_time_delta_string(event_obj.start, event_obj.end)
events[event_obj.start.date()].append((event_obj, event_duration))
return templates.TemplateResponse('agenda.html', {'request': request, 'events': events, 'start_date': start_date, 'end_date': end_date})
|
@router.get('/agenda', include_in_schema=False)
def agenda(request: Request, db: Session=Depends(get_db), start_date: Optional[date]=None, end_date: Optional[date]=None, days: Optional[int]=None) -> _TemplateResponse:
"""Route for the agenda page, using dates range or exact amount of days."""
user_id = 1
if days is not None:
start_date = date.today()
end_date = start_date + timedelta(days=days)
elif start_date is None or end_date is None:
start_date = date.today()
end_date = date.today()
(start_date, end_date) = (start_date, end_date)
events_objects = agenda_events.get_events_per_dates(db, user_id, start_date, end_date)
events = defaultdict(list)
for event_obj in events_objects:
event_duration = agenda_events.get_time_delta_string(event_obj.start, event_obj.end)
events[event_obj.start.date()].append((event_obj, event_duration))
return templates.TemplateResponse('agenda.html', {'request': request, 'events': events, 'start_date': start_date, 'end_date': end_date})
|
calendar
|
positive
|
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
<DeepExtract>
if name is None:
name = input_tensor.name
if 3 is not None:
assert_rank(input_tensor, 3, name)
shape = input_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
input_shape = shape
dyn_shape = tf.shape(input_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
input_shape = shape
</DeepExtract>
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.')
token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range))
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range))
position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1])
num_dims = len(output.shape.as_list())
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape)
output += position_embeddings
<DeepExtract>
output_tensor = layer_norm(output, name)
output_tensor = dropout(output_tensor, dropout_prob)
output = output_tensor
</DeepExtract>
return output
|
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
if name is None:
name = input_tensor.name
if 3 is not None:
assert_rank(input_tensor, 3, name)
shape = input_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
input_shape = shape
dyn_shape = tf.shape(input_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
input_shape = shape
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.')
token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range))
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range))
position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1])
num_dims = len(output.shape.as_list())
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape)
output += position_embeddings
output_tensor = layer_norm(output, name)
output_tensor = dropout(output_tensor, dropout_prob)
output = output_tensor
return output
|
BERT-for-Sequence-Labeling-and-Text-Classification
|
positive
|
def iou(self, bound_box):
<DeepExtract>
b1 = np.array([self.x, self.y, self.w, self.h])
</DeepExtract>
b2 = bound_box.as_centroid()
return centroid_box_iou(b1, b2)
|
def iou(self, bound_box):
b1 = np.array([self.x, self.y, self.w, self.h])
b2 = bound_box.as_centroid()
return centroid_box_iou(b1, b2)
|
aXeleRate
|
positive
|
def get_pid(self):
if not self._pid:
<DeepExtract>
pidfile = self._config.get_pidfile()
try:
with open(pidfile, 'r') as pf:
self._pid = int(pf.read().strip())
except IOError as e:
if e.errno != errno.ENOENT:
logger.warn('Cannot read pidfile: %s' % e)
self._pid = None
except ValueError as e:
logger.warn('Cannot read pidfile: %s' % e)
self._pid = None
</DeepExtract>
return self._pid
|
def get_pid(self):
if not self._pid:
pidfile = self._config.get_pidfile()
try:
with open(pidfile, 'r') as pf:
self._pid = int(pf.read().strip())
except IOError as e:
if e.errno != errno.ENOENT:
logger.warn('Cannot read pidfile: %s' % e)
self._pid = None
except ValueError as e:
logger.warn('Cannot read pidfile: %s' % e)
self._pid = None
return self._pid
|
cf-mendix-buildpack
|
positive
|
def __init__(self, root, transform=None, return_paths=False, loader=default_loader):
<DeepExtract>
images = []
assert os.path.isdir(root), '%s is not a valid directory' % root
for (root, _, fnames) in sorted(os.walk(root)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
imgs = images[:min(max_dataset_size, len(images))]
</DeepExtract>
if len(imgs) == 0:
raise RuntimeError('Found 0 images in: ' + root + '\nSupported image extensions are: ' + ','.join(IMG_EXTENSIONS))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
|
def __init__(self, root, transform=None, return_paths=False, loader=default_loader):
images = []
assert os.path.isdir(root), '%s is not a valid directory' % root
for (root, _, fnames) in sorted(os.walk(root)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
imgs = images[:min(max_dataset_size, len(images))]
if len(imgs) == 0:
raise RuntimeError('Found 0 images in: ' + root + '\nSupported image extensions are: ' + ','.join(IMG_EXTENSIONS))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
|
DMAD
|
positive
|
def create(self, xpath, xast, node, context):
<DeepExtract>
matches = node.xpath(xpath, **context)
if matches and isinstance(matches, list):
match = matches[0]
elif matches:
match = matches
</DeepExtract>
if match is not None:
return match
return _create_xml_node(xast, node, context)
|
def create(self, xpath, xast, node, context):
matches = node.xpath(xpath, **context)
if matches and isinstance(matches, list):
match = matches[0]
elif matches:
match = matches
if match is not None:
return match
return _create_xml_node(xast, node, context)
|
eulxml
|
positive
|
def load_config(config_file=None, profile=None, **kwargs):
"""Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`load_config()`. These values replace
values read from a configuration file, and therefore must be **strings**, including float
values for timeouts, boolean flags, and solver feature
constraints (a dictionary encoded as JSON).
2. Values specified as environment variables.
3. Values specified in the configuration file.
4. Values specified as :class:`~dwave.cloud.client.Client` instance defaults.
5. Values specified in :class:`~dwave.cloud.client.Client` class
:attr:`~dwave.cloud.client.Client.DEFAULTS`.
Configuration-file format is described in :mod:`dwave.cloud.config`.
Available configuration-file options are identical to
:class:`~dwave.cloud.client.Client` constructor argument names.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``,
``DWAVE_API_CLIENT``, ``DWAVE_API_REGION``, ``DWAVE_API_ENDPOINT``,
``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``,
``DWAVE_API_HEADERS``, ``DWAVE_METADATA_API_ENDPOINT``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If `False`, loading from file(s) is skipped; if `True`, forces auto-detection
(regardless of the `DWAVE_CONFIG_FILE` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from `DWAVE_PROFILE` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides `[defaults]`, the defaults
section is promoted and selected.
**kwargs (dict, optional):
:class:`~dwave.cloud.client.Client` constructor arguments.
Returns:
dict:
Mapping of configuration keys to values for the profile (section),
as read from the configuration file and optionally overridden by
environment values and specified keyword arguments.
Raises:
:exc:`ValueError`:
Invalid (non-existing) profile name.
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Note:
Prior to 0.8.0, some keyword arguments did not overwrite config
variables when their value was ``None``. Now we consistently do
:meth:`dict.update` on the config read from file/env for all ``kwargs``.
Examples:
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
>>> from dwave.cloud import config
>>> config.load_config() # doctest: +SKIP
{'client': 'qpu',
'endpoint': 'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
'solver': 'EXAMPLE_2000Q_SYSTEM_A',
'token': 'DEF-987654321987654321987654321',
'headers': None}
... # See which configuration file was loaded
>>> config.get_configfile_paths() # doctest: +SKIP
['C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
"""
logger.trace('load_config(config_file=%r, profile=%r, kwargs=%r)', config_file, profile, kwargs)
if profile is None:
profile = os.getenv('DWAVE_PROFILE')
if config_file == False:
section = {}
elif config_file == True:
<DeepExtract>
config = load_config_from_files(None)
first_section = next(iter(config.sections() + [None]))
config_defaults = config.defaults()
if not profile:
profile = config_defaults.get('profile', first_section)
if profile:
try:
section = dict(config[profile])
except KeyError:
raise ValueError('Config profile {!r} not found'.format(profile))
elif config_defaults:
section = config_defaults
else:
section = {}
section = section
</DeepExtract>
else:
if config_file is None:
config_file = os.getenv('DWAVE_CONFIG_FILE')
filenames = None
if config_file:
if isinstance(config_file, str):
filenames = [config_file]
else:
filenames = config_file
<DeepExtract>
config = load_config_from_files(filenames)
first_section = next(iter(config.sections() + [None]))
config_defaults = config.defaults()
if not profile:
profile = config_defaults.get('profile', first_section)
if profile:
try:
section = dict(config[profile])
except KeyError:
raise ValueError('Config profile {!r} not found'.format(profile))
elif config_defaults:
section = config_defaults
else:
section = {}
section = section
</DeepExtract>
logger.trace('config (from files) = %r', section)
<DeepExtract>
envopts = {opt: os.getenv(env) for (env, opt) in ENV_OPTION_MAP.items()}
update_config(section, envopts)
</DeepExtract>
logger.trace('config (from files+env) = %r', section)
<DeepExtract>
updates = {k: v for (k, v) in kwargs.items() if v is not None and v != ''}
for (group, optionset) in MUTUALLY_EXCLUSIVE_OPTIONS.items():
if updates.keys() & optionset:
for excluded in optionset:
section.pop(excluded, None)
section.update(updates)
</DeepExtract>
logger.trace('config (from files+env+kwargs) = %r', section)
return section
|
def load_config(config_file=None, profile=None, **kwargs):
"""Load D-Wave Cloud Client configuration based on a configuration file.
Configuration values can be specified in multiple ways, ranked in the following
order (with 1 the highest ranked):
1. Values specified as keyword arguments in :func:`load_config()`. These values replace
values read from a configuration file, and therefore must be **strings**, including float
values for timeouts, boolean flags, and solver feature
constraints (a dictionary encoded as JSON).
2. Values specified as environment variables.
3. Values specified in the configuration file.
4. Values specified as :class:`~dwave.cloud.client.Client` instance defaults.
5. Values specified in :class:`~dwave.cloud.client.Client` class
:attr:`~dwave.cloud.client.Client.DEFAULTS`.
Configuration-file format is described in :mod:`dwave.cloud.config`.
Available configuration-file options are identical to
:class:`~dwave.cloud.client.Client` constructor argument names.
If the location of the configuration file is not specified, auto-detection
searches for existing configuration files in the standard directories
of :func:`get_configfile_paths`.
If a configuration file explicitly specified, via an argument or
environment variable, does not exist or is unreadable, loading fails with
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`. Loading fails
with :exc:`~dwave.cloud.exceptions.ConfigFileParseError` if the file is
readable but invalid as a configuration file.
Similarly, if a profile explicitly specified, via an argument or
environment variable, is not present in the loaded configuration, loading fails
with :exc:`ValueError`. Explicit profile selection also fails if the configuration
file is not explicitly specified, detected on the system, or defined via
an environment variable.
Environment variables: ``DWAVE_CONFIG_FILE``, ``DWAVE_PROFILE``,
``DWAVE_API_CLIENT``, ``DWAVE_API_REGION``, ``DWAVE_API_ENDPOINT``,
``DWAVE_API_TOKEN``, ``DWAVE_API_SOLVER``, ``DWAVE_API_PROXY``,
``DWAVE_API_HEADERS``, ``DWAVE_METADATA_API_ENDPOINT``.
Environment variables are described in :mod:`dwave.cloud.config`.
Args:
config_file (str/[str]/None/False/True, default=None):
Path to configuration file(s).
If `None`, the value is taken from `DWAVE_CONFIG_FILE` environment
variable if defined. If the environment variable is undefined or empty,
auto-detection searches for existing configuration files in the standard
directories of :func:`get_configfile_paths`.
If `False`, loading from file(s) is skipped; if `True`, forces auto-detection
(regardless of the `DWAVE_CONFIG_FILE` environment variable).
profile (str, default=None):
Profile name (name of the profile section in the configuration file).
If undefined, inferred from `DWAVE_PROFILE` environment variable if
defined. If the environment variable is undefined or empty, a profile is
selected in the following order:
1. From the default section if it includes a profile key.
2. The first section (after the default section).
3. If no other section is defined besides `[defaults]`, the defaults
section is promoted and selected.
**kwargs (dict, optional):
:class:`~dwave.cloud.client.Client` constructor arguments.
Returns:
dict:
Mapping of configuration keys to values for the profile (section),
as read from the configuration file and optionally overridden by
environment values and specified keyword arguments.
Raises:
:exc:`ValueError`:
Invalid (non-existing) profile name.
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Note:
Prior to 0.8.0, some keyword arguments did not overwrite config
variables when their value was ``None``. Now we consistently do
:meth:`dict.update` on the config read from file/env for all ``kwargs``.
Examples:
This example loads the configuration from an auto-detected configuration file
in the home directory of a Windows system user.
>>> from dwave.cloud import config
>>> config.load_config() # doctest: +SKIP
{'client': 'qpu',
'endpoint': 'https://url.of.some.dwavesystem.com/sapi',
'proxy': None,
'solver': 'EXAMPLE_2000Q_SYSTEM_A',
'token': 'DEF-987654321987654321987654321',
'headers': None}
... # See which configuration file was loaded
>>> config.get_configfile_paths() # doctest: +SKIP
['C:\\Users\\jane\\AppData\\Local\\dwavesystem\\dwave\\dwave.conf']
Additional examples are given in :mod:`dwave.cloud.config`.
"""
logger.trace('load_config(config_file=%r, profile=%r, kwargs=%r)', config_file, profile, kwargs)
if profile is None:
profile = os.getenv('DWAVE_PROFILE')
if config_file == False:
section = {}
elif config_file == True:
config = load_config_from_files(None)
first_section = next(iter(config.sections() + [None]))
config_defaults = config.defaults()
if not profile:
profile = config_defaults.get('profile', first_section)
if profile:
try:
section = dict(config[profile])
except KeyError:
raise ValueError('Config profile {!r} not found'.format(profile))
elif config_defaults:
section = config_defaults
else:
section = {}
section = section
else:
if config_file is None:
config_file = os.getenv('DWAVE_CONFIG_FILE')
filenames = None
if config_file:
if isinstance(config_file, str):
filenames = [config_file]
else:
filenames = config_file
config = load_config_from_files(filenames)
first_section = next(iter(config.sections() + [None]))
config_defaults = config.defaults()
if not profile:
profile = config_defaults.get('profile', first_section)
if profile:
try:
section = dict(config[profile])
except KeyError:
raise ValueError('Config profile {!r} not found'.format(profile))
elif config_defaults:
section = config_defaults
else:
section = {}
section = section
logger.trace('config (from files) = %r', section)
envopts = {opt: os.getenv(env) for (env, opt) in ENV_OPTION_MAP.items()}
update_config(section, envopts)
logger.trace('config (from files+env) = %r', section)
updates = {k: v for (k, v) in kwargs.items() if v is not None and v != ''}
for (group, optionset) in MUTUALLY_EXCLUSIVE_OPTIONS.items():
if updates.keys() & optionset:
for excluded in optionset:
section.pop(excluded, None)
section.update(updates)
logger.trace('config (from files+env+kwargs) = %r', section)
return section
|
dwave-cloud-client
|
positive
|
def calc_ari(group_idx_list_1, group_idx_list_2):
from collections import defaultdict
def make_set_dict(list):
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(list))
return set_dict
def check_short_circuit(set_dict_1, list_1, set_dict_2, list_2):
both_all_apart = len(set_dict_1) == len(list_1) and len(set_dict_2) == len(list_2)
both_all_together = len(set_dict_1) == 1 and len(set_dict_2) == 1
return both_all_apart or both_all_together
def gen_contingency_data(set_dict_1, set_dict_2):
array_dim = (len(set_dict_1), len(set_dict_2))
Ns = numpy.ndarray(array_dim)
for (idx_1, value1) in enumerate(set_dict_1.values()):
for (idx_2, value2) in enumerate(set_dict_2.values()):
Ns[idx_1, idx_2] = len(value1.intersection(value2))
As = Ns.sum(axis=1)
Bs = Ns.sum(axis=0)
return (Ns, As, Bs)
def choose_2_sum(x):
return sum(x * (x - 1) / 2.0)
<DeepExtract>
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(group_idx_list_1))
group_idx_dict_1 = set_dict
</DeepExtract>
<DeepExtract>
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(group_idx_list_2))
group_idx_dict_2 = set_dict
</DeepExtract>
if check_short_circuit(group_idx_dict_1, group_idx_list_1, group_idx_dict_2, group_idx_list_2):
return 1.0
<DeepExtract>
array_dim = (len(group_idx_dict_1), len(group_idx_dict_2))
Ns = numpy.ndarray(array_dim)
for (idx_1, value1) in enumerate(group_idx_dict_1.values()):
for (idx_2, value2) in enumerate(group_idx_dict_2.values()):
Ns[idx_1, idx_2] = len(value1.intersection(value2))
As = Ns.sum(axis=1)
Bs = Ns.sum(axis=0)
(Ns, As, Bs) = (Ns, As, Bs)
</DeepExtract>
<DeepExtract>
n_choose_2 = sum(numpy.array([len(group_idx_list_1)]) * (numpy.array([len(group_idx_list_1)]) - 1) / 2.0)
</DeepExtract>
<DeepExtract>
cross_sums = sum(Ns[Ns > 1] * (Ns[Ns > 1] - 1) / 2.0)
</DeepExtract>
<DeepExtract>
a_sums = sum(As * (As - 1) / 2.0)
</DeepExtract>
<DeepExtract>
b_sums = sum(Bs * (Bs - 1) / 2.0)
</DeepExtract>
numerator = n_choose_2 * cross_sums - a_sums * b_sums
denominator = 0.5 * n_choose_2 * (a_sums + b_sums) - a_sums * b_sums
return numerator / denominator
|
def calc_ari(group_idx_list_1, group_idx_list_2):
from collections import defaultdict
def make_set_dict(list):
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(list))
return set_dict
def check_short_circuit(set_dict_1, list_1, set_dict_2, list_2):
both_all_apart = len(set_dict_1) == len(list_1) and len(set_dict_2) == len(list_2)
both_all_together = len(set_dict_1) == 1 and len(set_dict_2) == 1
return both_all_apart or both_all_together
def gen_contingency_data(set_dict_1, set_dict_2):
array_dim = (len(set_dict_1), len(set_dict_2))
Ns = numpy.ndarray(array_dim)
for (idx_1, value1) in enumerate(set_dict_1.values()):
for (idx_2, value2) in enumerate(set_dict_2.values()):
Ns[idx_1, idx_2] = len(value1.intersection(value2))
As = Ns.sum(axis=1)
Bs = Ns.sum(axis=0)
return (Ns, As, Bs)
def choose_2_sum(x):
return sum(x * (x - 1) / 2.0)
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(group_idx_list_1))
group_idx_dict_1 = set_dict
set_dict = defaultdict(set)
add_element = lambda idx_group: set_dict[idx_group[1]].add(idx_group[0])
map(add_element, enumerate(group_idx_list_2))
group_idx_dict_2 = set_dict
if check_short_circuit(group_idx_dict_1, group_idx_list_1, group_idx_dict_2, group_idx_list_2):
return 1.0
array_dim = (len(group_idx_dict_1), len(group_idx_dict_2))
Ns = numpy.ndarray(array_dim)
for (idx_1, value1) in enumerate(group_idx_dict_1.values()):
for (idx_2, value2) in enumerate(group_idx_dict_2.values()):
Ns[idx_1, idx_2] = len(value1.intersection(value2))
As = Ns.sum(axis=1)
Bs = Ns.sum(axis=0)
(Ns, As, Bs) = (Ns, As, Bs)
n_choose_2 = sum(numpy.array([len(group_idx_list_1)]) * (numpy.array([len(group_idx_list_1)]) - 1) / 2.0)
cross_sums = sum(Ns[Ns > 1] * (Ns[Ns > 1] - 1) / 2.0)
a_sums = sum(As * (As - 1) / 2.0)
b_sums = sum(Bs * (Bs - 1) / 2.0)
numerator = n_choose_2 * cross_sums - a_sums * b_sums
denominator = 0.5 * n_choose_2 * (a_sums + b_sums) - a_sums * b_sums
return numerator / denominator
|
crosscat
|
positive
|
def _generate(self, bits):
try:
<DeepExtract>
self._key = rsalib.generate(bits)
self._cipher = Cipher.new(self._key)
self._signer = Signer.new(self._key)
</DeepExtract>
finally:
self.genlock.release()
|
def _generate(self, bits):
try:
self._key = rsalib.generate(bits)
self._cipher = Cipher.new(self._key)
self._signer = Signer.new(self._key)
finally:
self.genlock.release()
|
EJTP-lib-python
|
positive
|
def fit(self, desc):
"""Fit KPCA on the precomputed descriptor matrix
Parameters
----------
desc: array-like, shape=[n_descriptors, n_samples]
design matrix
Returns
-------
"""
if self._fitted:
raise RuntimeError('PCA already fitted before, please reinitialise the object!')
<DeepExtract>
n_sample = len(desc)
if self.n_sparse is None:
self.n_sparse = max(10, n_sample // 20)
if self.n_sparse > 0:
self.sbs = self.sparsifier.sparsify(desc, self.n_sparse)
else:
print('Not using any sparsification')
self.sbs = range(n_sample)
self.desc_sbs = desc[self.sbs]
</DeepExtract>
kNN = self.k_transform.compute(self.desc_sbs)
self.kpca.fit(kNN)
self._fitted = True
|
def fit(self, desc):
"""Fit KPCA on the precomputed descriptor matrix
Parameters
----------
desc: array-like, shape=[n_descriptors, n_samples]
design matrix
Returns
-------
"""
if self._fitted:
raise RuntimeError('PCA already fitted before, please reinitialise the object!')
n_sample = len(desc)
if self.n_sparse is None:
self.n_sparse = max(10, n_sample // 20)
if self.n_sparse > 0:
self.sbs = self.sparsifier.sparsify(desc, self.n_sparse)
else:
print('Not using any sparsification')
self.sbs = range(n_sample)
self.desc_sbs = desc[self.sbs]
kNN = self.k_transform.compute(self.desc_sbs)
self.kpca.fit(kNN)
self._fitted = True
|
ASAP
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.