before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def basic_protocol(): <DeepExtract> a = random.randrange(1, p - 1) A = pow(g, a, p) (a, A) = (a, A) </DeepExtract> <DeepExtract> a = random.randrange(1, p - 1) A = pow(g, a, p) (b, B) = (a, A) </DeepExtract> s_A = pow(B, a, p) <DeepExtract> aeskey_A = sha1(hex(long(s_A))).digest()[0:16] </DeepExtract> msg_A = 'hello world' cipher_A = aes.encryptData(aeskey_A, msg_A) s_B = pow(A, b, p) <DeepExtract> aeskey_B = sha1(hex(long(s_B))).digest()[0:16] </DeepExtract> msg_B = aes.decryptData(aeskey_B, cipher_A) cipher_B = aes.encryptData(aeskey_B, msg_B) check_A = aes.decryptData(aeskey_A, cipher_B) assert msg_A == check_A and msg_B == msg_A
def basic_protocol(): a = random.randrange(1, p - 1) A = pow(g, a, p) (a, A) = (a, A) a = random.randrange(1, p - 1) A = pow(g, a, p) (b, B) = (a, A) s_A = pow(B, a, p) aeskey_A = sha1(hex(long(s_A))).digest()[0:16] msg_A = 'hello world' cipher_A = aes.encryptData(aeskey_A, msg_A) s_B = pow(A, b, p) aeskey_B = sha1(hex(long(s_B))).digest()[0:16] msg_B = aes.decryptData(aeskey_B, cipher_A) cipher_B = aes.encryptData(aeskey_B, msg_B) check_A = aes.decryptData(aeskey_A, cipher_B) assert msg_A == check_A and msg_B == msg_A
cryptopals
positive
def before_seghead_process(ref_frame_embedding=None, previous_frame_embedding=None, current_frame_embedding=None, ref_scribble_label=None, previous_frame_mask=None, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=None, gt_ids=None, k_nearest_neighbors=1, global_map_tmp_dic=None, local_map_dics=None, interaction_num=None, start_annotated_frame=None, frame_num=None, dynamic_seghead=None): """return: feature_embedding,global_match_map,local_match_map,previous_frame_mask""" global_map_tmp_dic = global_map_tmp_dic dic_tmp = {} (bs, c, h, w) = current_frame_embedding.size() scale_ref_scribble_label = torch.nn.functional.interpolate(ref_scribble_label.float(), size=(h, w), mode='nearest') scale_ref_scribble_label = scale_ref_scribble_label.int() scale_previous_frame_label = torch.nn.functional.interpolate(previous_frame_mask.float(), size=(h, w), mode='nearest') scale_previous_frame_label = scale_previous_frame_label.int() if USE_CORRELATION_COST: n_chunks = 20 else: n_chunks = 500 for n in range(bs): seq_current_frame_embedding = current_frame_embedding[n] seq_ref_frame_embedding = ref_frame_embedding[n] seq_prev_frame_embedding = previous_frame_embedding[n] seq_ref_frame_embedding = seq_ref_frame_embedding.permute(1, 2, 0) seq_current_frame_embedding = seq_current_frame_embedding.permute(1, 2, 0) seq_ref_scribble_label = scale_ref_scribble_label[n].permute(1, 2, 0) t2 = time.time() <DeepExtract> assert seq_ref_frame_embedding.size()[:2] == seq_ref_scribble_label.size()[:2] (h, w, _) = seq_current_frame_embedding.size() reference_labels_flat = seq_ref_scribble_label.view(-1) if gt_ids[n] is None: ref_obj_ids = torch.unique(reference_labels_flat)[-1] ref_obj_ids = np.arange(0, ref_obj_ids.cpu() + 1) gt_ids[n] = torch.from_numpy(ref_obj_ids) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() else: gt_ids[n] = gt_ids[n].cpu() gt_ids[n] = np.arange(0, gt_ids[n] + 1) gt_ids[n] = torch.from_numpy(gt_ids[n]) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() embedding_dim = seq_current_frame_embedding.size()[-1] query_embeddings_flat = seq_current_frame_embedding.view(-1, embedding_dim) reference_embeddings_flat = seq_ref_frame_embedding.view(-1, embedding_dim) nn_features = _nearest_neighbor_features_per_object_in_chunks(reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, gt_ids[n], k_nearest_neighbors, 10) nn_features_dim = nn_features.size()[-1] nn_features_reshape = nn_features.view(1, h, w, gt_ids[n].size(0), nn_features_dim) (nn_features_n, ref_obj_ids) = (nn_features_reshape, gt_ids[n]) </DeepExtract> if normalize_nearest_neighbor_distances: nn_features_n = (torch.sigmoid(nn_features_n) - 0.5) * 2 t3 = time.time() if seq_names[n] not in global_map_tmp_dic: global_map_tmp_dic[seq_names[n]] = torch.ones_like(nn_features_n).repeat(104, 1, 1, 1, 1) if torch.cuda.is_available(): global_map_tmp_dic[seq_names[n]] = global_map_tmp_dic[seq_names[n]].cuda() nn_features_n = torch.where(nn_features_n <= global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0), nn_features_n, global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0)) global_map_tmp_dic[seq_names[n]][frame_num[n]] = nn_features_n.detach() t4 = time.time() seq_prev_frame_embedding = seq_prev_frame_embedding.permute(1, 2, 0) seq_previous_frame_label = scale_previous_frame_label[n].permute(1, 2, 0) if use_local_map: <DeepExtract> if USE_CORRELATION_COST: d = local_pairwise_distances(seq_current_frame_embedding, seq_prev_frame_embedding, max_distance=cfg.MODEL_MAX_LOCAL_DISTANCE) else: d = local_pairwise_distances2(seq_current_frame_embedding, seq_prev_frame_embedding, max_distance=cfg.MODEL_MAX_LOCAL_DISTANCE) (height, width) = seq_prev_frame_embedding.size()[:2] if USE_CORRELATION_COST: corr_op = SpatialCorrelationSampler(kernel_size=1, patch_size=2 * cfg.MODEL_MAX_LOCAL_DISTANCE + 1, stride=1, dilation_patch=1, padding=0) tmp_prev_frame_labels = (seq_previous_frame_label + 1).float().permute(2, 0, 1) tmp_prev_frame_labels = torch.unsqueeze(tmp_prev_frame_labels, 0) ones_ = torch.ones_like(tmp_prev_frame_labels) offset_labels = corr_op(ones_, tmp_prev_frame_labels) (bs, _, _, hh, ww) = offset_labels.size() offset_labels = offset_labels.view(bs, -1, hh, ww) offset_labels = torch.squeeze(offset_labels, 0) offset_labels = offset_labels.permute(1, 2, 0) offset_labels = torch.unsqueeze(offset_labels, 3) offset_labels = torch.round(offset_labels - 1) offset_masks = torch.eq(offset_labels, ref_obj_ids.float().unsqueeze(0).unsqueeze(0).unsqueeze(0)) elif MODEL_UNFOLD: labels = seq_previous_frame_label.float().permute(2, 0, 1).unsqueeze(0) padded_labels = F.pad(labels, (2 * cfg.MODEL_MAX_LOCAL_DISTANCE, 2 * cfg.MODEL_MAX_LOCAL_DISTANCE, 2 * cfg.MODEL_MAX_LOCAL_DISTANCE, 2 * cfg.MODEL_MAX_LOCAL_DISTANCE)) offset_labels = F.unfold(padded_labels, kernel_size=(height, width), stride=(2, 2)).view(height, width, -1, 1) offset_masks = torch.eq(offset_labels, ref_obj_ids.float().unsqueeze(0).unsqueeze(0).unsqueeze(0)) else: masks = torch.eq(seq_previous_frame_label, ref_obj_ids.unsqueeze(0).unsqueeze(0)) padded_masks = nn.functional.pad(masks, (0, 0, cfg.MODEL_MAX_LOCAL_DISTANCE, cfg.MODEL_MAX_LOCAL_DISTANCE, cfg.MODEL_MAX_LOCAL_DISTANCE, cfg.MODEL_MAX_LOCAL_DISTANCE)) offset_masks = [] for y_start in range(2 * cfg.MODEL_MAX_LOCAL_DISTANCE + 1): y_end = y_start + height masks_slice = padded_masks[y_start:y_end] for x_start in range(2 * cfg.MODEL_MAX_LOCAL_DISTANCE + 1): x_end = x_start + width offset_mask = masks_slice[:, x_start:x_end] offset_masks.append(offset_mask) offset_masks = torch.stack(offset_masks, dim=2) d_tiled = d.unsqueeze(-1).repeat((1, 1, 1, ref_obj_ids.size(0))) pad = torch.ones_like(d_tiled) d_masked = torch.where(offset_masks, d_tiled, pad) (dists, _) = torch.min(d_masked, dim=2) dists = dists.view(1, height, width, ref_obj_ids.size(0), 1) prev_frame_nn_features_n = dists </DeepExtract> else: <DeepExtract> assert seq_prev_frame_embedding.size()[:2] == seq_previous_frame_label.size()[:2] (h, w, _) = seq_current_frame_embedding.size() reference_labels_flat = seq_previous_frame_label.view(-1) if gt_ids[n] is None: ref_obj_ids = torch.unique(reference_labels_flat)[-1] ref_obj_ids = np.arange(0, ref_obj_ids.cpu() + 1) gt_ids[n] = torch.from_numpy(ref_obj_ids) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() else: gt_ids[n] = gt_ids[n].cpu() gt_ids[n] = np.arange(0, gt_ids[n] + 1) gt_ids[n] = torch.from_numpy(gt_ids[n]) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() embedding_dim = seq_current_frame_embedding.size()[-1] query_embeddings_flat = seq_current_frame_embedding.view(-1, embedding_dim) reference_embeddings_flat = seq_prev_frame_embedding.view(-1, embedding_dim) nn_features = _nearest_neighbor_features_per_object_in_chunks(reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, gt_ids[n], k_nearest_neighbors, 20) nn_features_dim = nn_features.size()[-1] nn_features_reshape = nn_features.view(1, h, w, gt_ids[n].size(0), nn_features_dim) (prev_frame_nn_features_n, _) = (nn_features_reshape, gt_ids[n]) </DeepExtract> prev_frame_nn_features_n = (torch.sigmoid(prev_frame_nn_features_n) - 0.5) * 2 t5 = time.time() if local_map_dics is not None: (local_map_tmp_dic, local_map_dist_dic) = local_map_dics if seq_names[n] not in local_map_dist_dic: local_map_dist_dic[seq_names[n]] = torch.zeros(104, 9) if torch.cuda.is_available(): local_map_dist_dic[seq_names[n]] = local_map_dist_dic[seq_names[n]].cuda() if seq_names[n] not in local_map_tmp_dic: local_map_tmp_dic[seq_names[n]] = torch.zeros_like(prev_frame_nn_features_n).unsqueeze(0).repeat(104, 9, 1, 1, 1, 1) if torch.cuda.is_available(): local_map_tmp_dic[seq_names[n]] = local_map_tmp_dic[seq_names[n]].cuda() local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = 1.0 / abs(frame_num[n] - start_annotated_frame) local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = prev_frame_nn_features_n.squeeze(0).detach() if interaction_num == 1: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) elif local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] > local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 2]: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) else: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 2] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) local_map_dics = (local_map_tmp_dic, local_map_dist_dic) previous_frame_to_cat = seq_previous_frame_label.float() == ref_obj_ids.float() to_cat_current_frame_embedding = current_frame_embedding[n].unsqueeze(0).repeat((ref_obj_ids.size(0), 1, 1, 1)) to_cat_nn_feature_n = nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_cat_previous_frame = previous_frame_to_cat.unsqueeze(-1).permute(2, 3, 0, 1).float() to_cat_prev_frame_nn_feature_n = prev_frame_nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_refine = torch.cat((to_cat_current_frame_embedding, to_cat_prev_frame_nn_feature_n), 1) to_cat_refined_prev_frame_nn_feature_n = self.localrefine(to_refine) to_cat = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n, to_cat_refined_prev_frame_nn_feature_n, to_cat_previous_frame), 1) if cfg.MODEL_GLOBAL_ATTEN: cat_global_ = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n), 1) atten_maps = self.global_atten(cat_global_) atten_maps = torch.nn.functional.softmax(atten_maps, 0) pred_ = dynamic_seghead(to_cat, atten_maps) else: pred_ = dynamic_seghead(to_cat) pred_ = pred_.permute(1, 0, 2, 3) dic_tmp[seq_names[n]] = pred_ if local_map_dics is None: return (dic_tmp, global_map_tmp_dic) else: return (dic_tmp, global_map_tmp_dic, local_map_dics)
def before_seghead_process(ref_frame_embedding=None, previous_frame_embedding=None, current_frame_embedding=None, ref_scribble_label=None, previous_frame_mask=None, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=None, gt_ids=None, k_nearest_neighbors=1, global_map_tmp_dic=None, local_map_dics=None, interaction_num=None, start_annotated_frame=None, frame_num=None, dynamic_seghead=None): """return: feature_embedding,global_match_map,local_match_map,previous_frame_mask""" global_map_tmp_dic = global_map_tmp_dic dic_tmp = {} (bs, c, h, w) = current_frame_embedding.size() scale_ref_scribble_label = torch.nn.functional.interpolate(ref_scribble_label.float(), size=(h, w), mode='nearest') scale_ref_scribble_label = scale_ref_scribble_label.int() scale_previous_frame_label = torch.nn.functional.interpolate(previous_frame_mask.float(), size=(h, w), mode='nearest') scale_previous_frame_label = scale_previous_frame_label.int() if USE_CORRELATION_COST: n_chunks = 20 else: n_chunks = 500 for n in range(bs): seq_current_frame_embedding = current_frame_embedding[n] seq_ref_frame_embedding = ref_frame_embedding[n] seq_prev_frame_embedding = previous_frame_embedding[n] seq_ref_frame_embedding = seq_ref_frame_embedding.permute(1, 2, 0) seq_current_frame_embedding = seq_current_frame_embedding.permute(1, 2, 0) seq_ref_scribble_label = scale_ref_scribble_label[n].permute(1, 2, 0) t2 = time.time() assert seq_ref_frame_embedding.size()[:2] == seq_ref_scribble_label.size()[:2] (h, w, _) = seq_current_frame_embedding.size() reference_labels_flat = seq_ref_scribble_label.view(-1) if gt_ids[n] is None: ref_obj_ids = torch.unique(reference_labels_flat)[-1] ref_obj_ids = np.arange(0, ref_obj_ids.cpu() + 1) gt_ids[n] = torch.from_numpy(ref_obj_ids) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() else: gt_ids[n] = gt_ids[n].cpu() gt_ids[n] = np.arange(0, gt_ids[n] + 1) gt_ids[n] = torch.from_numpy(gt_ids[n]) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() embedding_dim = seq_current_frame_embedding.size()[-1] query_embeddings_flat = seq_current_frame_embedding.view(-1, embedding_dim) reference_embeddings_flat = seq_ref_frame_embedding.view(-1, embedding_dim) nn_features = _nearest_neighbor_features_per_object_in_chunks(reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, gt_ids[n], k_nearest_neighbors, 10) nn_features_dim = nn_features.size()[-1] nn_features_reshape = nn_features.view(1, h, w, gt_ids[n].size(0), nn_features_dim) (nn_features_n, ref_obj_ids) = (nn_features_reshape, gt_ids[n]) if normalize_nearest_neighbor_distances: nn_features_n = (torch.sigmoid(nn_features_n) - 0.5) * 2 t3 = time.time() if seq_names[n] not in global_map_tmp_dic: global_map_tmp_dic[seq_names[n]] = torch.ones_like(nn_features_n).repeat(104, 1, 1, 1, 1) if torch.cuda.is_available(): global_map_tmp_dic[seq_names[n]] = global_map_tmp_dic[seq_names[n]].cuda() nn_features_n = torch.where(nn_features_n <= global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0), nn_features_n, global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0)) global_map_tmp_dic[seq_names[n]][frame_num[n]] = nn_features_n.detach() t4 = time.time() seq_prev_frame_embedding = seq_prev_frame_embedding.permute(1, 2, 0) seq_previous_frame_label = scale_previous_frame_label[n].permute(1, 2, 0) if use_local_map: if USE_CORRELATION_COST: d = local_pairwise_distances(seq_current_frame_embedding, seq_prev_frame_embedding, max_distance=cfg.MODEL_MAX_LOCAL_DISTANCE) else: d = local_pairwise_distances2(seq_current_frame_embedding, seq_prev_frame_embedding, max_distance=cfg.MODEL_MAX_LOCAL_DISTANCE) (height, width) = seq_prev_frame_embedding.size()[:2] if USE_CORRELATION_COST: corr_op = SpatialCorrelationSampler(kernel_size=1, patch_size=2 * cfg.MODEL_MAX_LOCAL_DISTANCE + 1, stride=1, dilation_patch=1, padding=0) tmp_prev_frame_labels = (seq_previous_frame_label + 1).float().permute(2, 0, 1) tmp_prev_frame_labels = torch.unsqueeze(tmp_prev_frame_labels, 0) ones_ = torch.ones_like(tmp_prev_frame_labels) offset_labels = corr_op(ones_, tmp_prev_frame_labels) (bs, _, _, hh, ww) = offset_labels.size() offset_labels = offset_labels.view(bs, -1, hh, ww) offset_labels = torch.squeeze(offset_labels, 0) offset_labels = offset_labels.permute(1, 2, 0) offset_labels = torch.unsqueeze(offset_labels, 3) offset_labels = torch.round(offset_labels - 1) offset_masks = torch.eq(offset_labels, ref_obj_ids.float().unsqueeze(0).unsqueeze(0).unsqueeze(0)) elif MODEL_UNFOLD: labels = seq_previous_frame_label.float().permute(2, 0, 1).unsqueeze(0) padded_labels = F.pad(labels, (2 * cfg.MODEL_MAX_LOCAL_DISTANCE, 2 * cfg.MODEL_MAX_LOCAL_DISTANCE, 2 * cfg.MODEL_MAX_LOCAL_DISTANCE, 2 * cfg.MODEL_MAX_LOCAL_DISTANCE)) offset_labels = F.unfold(padded_labels, kernel_size=(height, width), stride=(2, 2)).view(height, width, -1, 1) offset_masks = torch.eq(offset_labels, ref_obj_ids.float().unsqueeze(0).unsqueeze(0).unsqueeze(0)) else: masks = torch.eq(seq_previous_frame_label, ref_obj_ids.unsqueeze(0).unsqueeze(0)) padded_masks = nn.functional.pad(masks, (0, 0, cfg.MODEL_MAX_LOCAL_DISTANCE, cfg.MODEL_MAX_LOCAL_DISTANCE, cfg.MODEL_MAX_LOCAL_DISTANCE, cfg.MODEL_MAX_LOCAL_DISTANCE)) offset_masks = [] for y_start in range(2 * cfg.MODEL_MAX_LOCAL_DISTANCE + 1): y_end = y_start + height masks_slice = padded_masks[y_start:y_end] for x_start in range(2 * cfg.MODEL_MAX_LOCAL_DISTANCE + 1): x_end = x_start + width offset_mask = masks_slice[:, x_start:x_end] offset_masks.append(offset_mask) offset_masks = torch.stack(offset_masks, dim=2) d_tiled = d.unsqueeze(-1).repeat((1, 1, 1, ref_obj_ids.size(0))) pad = torch.ones_like(d_tiled) d_masked = torch.where(offset_masks, d_tiled, pad) (dists, _) = torch.min(d_masked, dim=2) dists = dists.view(1, height, width, ref_obj_ids.size(0), 1) prev_frame_nn_features_n = dists else: assert seq_prev_frame_embedding.size()[:2] == seq_previous_frame_label.size()[:2] (h, w, _) = seq_current_frame_embedding.size() reference_labels_flat = seq_previous_frame_label.view(-1) if gt_ids[n] is None: ref_obj_ids = torch.unique(reference_labels_flat)[-1] ref_obj_ids = np.arange(0, ref_obj_ids.cpu() + 1) gt_ids[n] = torch.from_numpy(ref_obj_ids) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() else: gt_ids[n] = gt_ids[n].cpu() gt_ids[n] = np.arange(0, gt_ids[n] + 1) gt_ids[n] = torch.from_numpy(gt_ids[n]) gt_ids[n] = gt_ids[n].int() if torch.cuda.is_available(): gt_ids[n] = gt_ids[n].cuda() embedding_dim = seq_current_frame_embedding.size()[-1] query_embeddings_flat = seq_current_frame_embedding.view(-1, embedding_dim) reference_embeddings_flat = seq_prev_frame_embedding.view(-1, embedding_dim) nn_features = _nearest_neighbor_features_per_object_in_chunks(reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, gt_ids[n], k_nearest_neighbors, 20) nn_features_dim = nn_features.size()[-1] nn_features_reshape = nn_features.view(1, h, w, gt_ids[n].size(0), nn_features_dim) (prev_frame_nn_features_n, _) = (nn_features_reshape, gt_ids[n]) prev_frame_nn_features_n = (torch.sigmoid(prev_frame_nn_features_n) - 0.5) * 2 t5 = time.time() if local_map_dics is not None: (local_map_tmp_dic, local_map_dist_dic) = local_map_dics if seq_names[n] not in local_map_dist_dic: local_map_dist_dic[seq_names[n]] = torch.zeros(104, 9) if torch.cuda.is_available(): local_map_dist_dic[seq_names[n]] = local_map_dist_dic[seq_names[n]].cuda() if seq_names[n] not in local_map_tmp_dic: local_map_tmp_dic[seq_names[n]] = torch.zeros_like(prev_frame_nn_features_n).unsqueeze(0).repeat(104, 9, 1, 1, 1, 1) if torch.cuda.is_available(): local_map_tmp_dic[seq_names[n]] = local_map_tmp_dic[seq_names[n]].cuda() local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = 1.0 / abs(frame_num[n] - start_annotated_frame) local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = prev_frame_nn_features_n.squeeze(0).detach() if interaction_num == 1: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) elif local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] > local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 2]: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) else: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 2] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) local_map_dics = (local_map_tmp_dic, local_map_dist_dic) previous_frame_to_cat = seq_previous_frame_label.float() == ref_obj_ids.float() to_cat_current_frame_embedding = current_frame_embedding[n].unsqueeze(0).repeat((ref_obj_ids.size(0), 1, 1, 1)) to_cat_nn_feature_n = nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_cat_previous_frame = previous_frame_to_cat.unsqueeze(-1).permute(2, 3, 0, 1).float() to_cat_prev_frame_nn_feature_n = prev_frame_nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_refine = torch.cat((to_cat_current_frame_embedding, to_cat_prev_frame_nn_feature_n), 1) to_cat_refined_prev_frame_nn_feature_n = self.localrefine(to_refine) to_cat = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n, to_cat_refined_prev_frame_nn_feature_n, to_cat_previous_frame), 1) if cfg.MODEL_GLOBAL_ATTEN: cat_global_ = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n), 1) atten_maps = self.global_atten(cat_global_) atten_maps = torch.nn.functional.softmax(atten_maps, 0) pred_ = dynamic_seghead(to_cat, atten_maps) else: pred_ = dynamic_seghead(to_cat) pred_ = pred_.permute(1, 0, 2, 3) dic_tmp[seq_names[n]] = pred_ if local_map_dics is None: return (dic_tmp, global_map_tmp_dic) else: return (dic_tmp, global_map_tmp_dic, local_map_dics)
CVPR2020_MANet
positive
def test_21_insert_transactions(self): print('\n-----', sys._getframe().f_code.co_name, '-----') clients[2]['app'].insert_transaction(transactions[4]) time.sleep(1) print(' ----> wait for notification') for j in range(client_num - 2): <DeepExtract> msg = msg_processor[j].synchronize() if msg[KeyType.command] == bbclib.MsgType.RESPONSE_INSERT: assert msg[KeyType.transaction_id] == transactions[4].transaction_id print('[%d] inserted' % j) elif msg[KeyType.command] == bbclib.MsgType.NOTIFY_INSERTED: assert KeyType.asset_group_ids in msg print('[%d] notification txid=%s, asset_group=%s' % (j, binascii.b2a_hex(msg[KeyType.transaction_id]), [binascii.b2a_hex(a) for a in msg[KeyType.asset_group_ids]])) </DeepExtract> if j == 2: <DeepExtract> msg = msg_processor[j].synchronize() if msg[KeyType.command] == bbclib.MsgType.RESPONSE_INSERT: assert msg[KeyType.transaction_id] == transactions[4].transaction_id print('[%d] inserted' % j) elif msg[KeyType.command] == bbclib.MsgType.NOTIFY_INSERTED: assert KeyType.asset_group_ids in msg print('[%d] notification txid=%s, asset_group=%s' % (j, binascii.b2a_hex(msg[KeyType.transaction_id]), [binascii.b2a_hex(a) for a in msg[KeyType.asset_group_ids]])) </DeepExtract> time.sleep(1) clients[1]['app'].insert_transaction(transactions[5]) time.sleep(1) print(' ----> wait for notification') <DeepExtract> msg = msg_processor[1].synchronize() if msg[KeyType.command] == bbclib.MsgType.RESPONSE_INSERT: assert msg[KeyType.transaction_id] == transactions[5].transaction_id print('[%d] inserted' % 1) elif msg[KeyType.command] == bbclib.MsgType.NOTIFY_INSERTED: assert KeyType.asset_group_ids in msg print('[%d] notification txid=%s, asset_group=%s' % (1, binascii.b2a_hex(msg[KeyType.transaction_id]), [binascii.b2a_hex(a) for a in msg[KeyType.asset_group_ids]])) </DeepExtract> for i in range(core_num): diff_notify = get_stat_diffs(i)['data_handler'].get('NOTIFY_INSERTED', 0) print('[%d] received num of NOTIFY_INSERTED = %d' % (i, diff_notify)) if i in [2, 4]: assert diff_notify == 0 else: assert diff_notify == 1
def test_21_insert_transactions(self): print('\n-----', sys._getframe().f_code.co_name, '-----') clients[2]['app'].insert_transaction(transactions[4]) time.sleep(1) print(' ----> wait for notification') for j in range(client_num - 2): msg = msg_processor[j].synchronize() if msg[KeyType.command] == bbclib.MsgType.RESPONSE_INSERT: assert msg[KeyType.transaction_id] == transactions[4].transaction_id print('[%d] inserted' % j) elif msg[KeyType.command] == bbclib.MsgType.NOTIFY_INSERTED: assert KeyType.asset_group_ids in msg print('[%d] notification txid=%s, asset_group=%s' % (j, binascii.b2a_hex(msg[KeyType.transaction_id]), [binascii.b2a_hex(a) for a in msg[KeyType.asset_group_ids]])) if j == 2: msg = msg_processor[j].synchronize() if msg[KeyType.command] == bbclib.MsgType.RESPONSE_INSERT: assert msg[KeyType.transaction_id] == transactions[4].transaction_id print('[%d] inserted' % j) elif msg[KeyType.command] == bbclib.MsgType.NOTIFY_INSERTED: assert KeyType.asset_group_ids in msg print('[%d] notification txid=%s, asset_group=%s' % (j, binascii.b2a_hex(msg[KeyType.transaction_id]), [binascii.b2a_hex(a) for a in msg[KeyType.asset_group_ids]])) time.sleep(1) clients[1]['app'].insert_transaction(transactions[5]) time.sleep(1) print(' ----> wait for notification') msg = msg_processor[1].synchronize() if msg[KeyType.command] == bbclib.MsgType.RESPONSE_INSERT: assert msg[KeyType.transaction_id] == transactions[5].transaction_id print('[%d] inserted' % 1) elif msg[KeyType.command] == bbclib.MsgType.NOTIFY_INSERTED: assert KeyType.asset_group_ids in msg print('[%d] notification txid=%s, asset_group=%s' % (1, binascii.b2a_hex(msg[KeyType.transaction_id]), [binascii.b2a_hex(a) for a in msg[KeyType.asset_group_ids]])) for i in range(core_num): diff_notify = get_stat_diffs(i)['data_handler'].get('NOTIFY_INSERTED', 0) print('[%d] received num of NOTIFY_INSERTED = %d' % (i, diff_notify)) if i in [2, 4]: assert diff_notify == 0 else: assert diff_notify == 1
bbc1
positive
def multiStateResultTypeName(self): localctx = BraketPragmasParser.MultiStateResultTypeNameContext(self, self._ctx, self.state) <DeepExtract> if hasattr(localctx, 'enterBraketPragma'): localctx.enterBraketPragma(self) </DeepExtract> try: self.enterOuterAlt(localctx, 1) self.state = 248 self.match(BraketPragmasParser.AMPLITUDE) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: <DeepExtract> if hasattr(listener, 'exitBraketPragma'): listener.exitBraketPragma(self) </DeepExtract> return localctx
def multiStateResultTypeName(self): localctx = BraketPragmasParser.MultiStateResultTypeNameContext(self, self._ctx, self.state) if hasattr(localctx, 'enterBraketPragma'): localctx.enterBraketPragma(self) try: self.enterOuterAlt(localctx, 1) self.state = 248 self.match(BraketPragmasParser.AMPLITUDE) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: if hasattr(listener, 'exitBraketPragma'): listener.exitBraketPragma(self) return localctx
amazon-braket-default-simulator-python
positive
def expand_uniform_by_cells_count(self, count, renumber_division=True): """Expand blockMeshDict boundingbox for n cells from all sides. This method will increase the number of divisions by 2 to keep the size of the cells unchanged unless renumber_division is set to False. Use a negative count to shrink the bounding box. """ (x, y, z) = self.n_div_xyz <DeepExtract> _x_axis = self.x_axis for i in (0, 3, 7, 4): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_x_axis, -self.width / float(x) * count)) for i in (1, 2, 6, 5): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_x_axis, self.width / float(x) * count)) </DeepExtract> <DeepExtract> _y_axis = self.y_axis for i in (0, 1, 5, 4): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_y_axis, -self.length / float(y) * count)) for i in (3, 2, 6, 7): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_y_axis, self.length / float(y) * count)) </DeepExtract> <DeepExtract> _z_axis = (0, 0, 1) for i in (0, 1, 2, 3): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_z_axis, -self.height / float(z) * count)) for i in (4, 5, 6, 7): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_z_axis, self.height / float(z) * count)) </DeepExtract> if renumber_division: self.n_div_xyz = (x + 2 * count, y + 2 * count, z + 2 * count)
def expand_uniform_by_cells_count(self, count, renumber_division=True): """Expand blockMeshDict boundingbox for n cells from all sides. This method will increase the number of divisions by 2 to keep the size of the cells unchanged unless renumber_division is set to False. Use a negative count to shrink the bounding box. """ (x, y, z) = self.n_div_xyz _x_axis = self.x_axis for i in (0, 3, 7, 4): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_x_axis, -self.width / float(x) * count)) for i in (1, 2, 6, 5): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_x_axis, self.width / float(x) * count)) _y_axis = self.y_axis for i in (0, 1, 5, 4): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_y_axis, -self.length / float(y) * count)) for i in (3, 2, 6, 7): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_y_axis, self.length / float(y) * count)) _z_axis = (0, 0, 1) for i in (0, 1, 2, 3): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_z_axis, -self.height / float(z) * count)) for i in (4, 5, 6, 7): self.vertices[i] = vectormath.move(self.vertices[i], vectormath.scale(_z_axis, self.height / float(z) * count)) if renumber_division: self.n_div_xyz = (x + 2 * count, y + 2 * count, z + 2 * count)
butterfly
positive
def delete_targets(self): err_msg = None if self.hostname not in self.config.config['gateways']: return self.logger.info('Removing iSCSI target from LIO') for (target_iqn, target_config) in self.config.config['targets'].items(): try: <DeepExtract> target = GWTarget(self.logger, target_iqn, {}) if target.error: raise CephiSCSIError('Could not initialize target: {}'.format(target.error_msg)) target.load_config() if target.error: self.logger.debug('Could not find target {}: {}'.format(target_iqn, target.error_msg)) return try: target.delete(self.config) except RTSLibError as err: err_msg = 'Could not remove target {}: {}'.format(target_iqn, err) raise CephiSCSIError(err_msg) </DeepExtract> except CephiSCSIError as err: if err_msg is None: err_msg = err continue if err_msg: raise CephiSCSIError(err_msg)
def delete_targets(self): err_msg = None if self.hostname not in self.config.config['gateways']: return self.logger.info('Removing iSCSI target from LIO') for (target_iqn, target_config) in self.config.config['targets'].items(): try: target = GWTarget(self.logger, target_iqn, {}) if target.error: raise CephiSCSIError('Could not initialize target: {}'.format(target.error_msg)) target.load_config() if target.error: self.logger.debug('Could not find target {}: {}'.format(target_iqn, target.error_msg)) return try: target.delete(self.config) except RTSLibError as err: err_msg = 'Could not remove target {}: {}'.format(target_iqn, err) raise CephiSCSIError(err_msg) except CephiSCSIError as err: if err_msg is None: err_msg = err continue if err_msg: raise CephiSCSIError(err_msg)
ceph-iscsi
positive
def bind(family, type, proto=0): """Create (or recreate) the actual socket object.""" <DeepExtract> sock = socket.socket(family, type, proto) connections.prevent_socket_inheritance(sock) (host, port) = self.bind_addr[:2] IS_EPHEMERAL_PORT = port == 0 if not (IS_WINDOWS or IS_EPHEMERAL_PORT): 'Enable SO_REUSEADDR for the current socket.\n\n Skip for Windows (has different semantics)\n or ephemeral ports (can steal ports from others).\n\n Refs:\n * https://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx\n * https://github.com/cherrypy/cheroot/issues/114\n * https://gavv.github.io/blog/ephemeral-port-reuse/\n ' sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if self.nodelay and (not isinstance(self.bind_addr, (str, bytes))): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.ssl_adapter is not None: sock = self.ssl_adapter.bind(sock) listening_ipv6 = hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and (host in ('::', '::0', '::0.0.0.0')) if listening_ipv6: try: sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except (AttributeError, socket.error): pass sock = sock </DeepExtract> <DeepExtract> sock.bind(self.bind_addr) sock = self.socket = sock </DeepExtract> <DeepExtract> bind_addr = sock.getsockname() if sock.family in (socket.AF_INET, socket.AF_INET6): "UNIX domain sockets are strings or bytes.\n\n In case of bytes with a leading null-byte it's an abstract socket.\n " self.bind_addr = bind_addr[:2] if isinstance(bind_addr, bytes): bind_addr = bton(bind_addr) self.bind_addr = bind_addr </DeepExtract> return sock
def bind(family, type, proto=0): """Create (or recreate) the actual socket object.""" sock = socket.socket(family, type, proto) connections.prevent_socket_inheritance(sock) (host, port) = self.bind_addr[:2] IS_EPHEMERAL_PORT = port == 0 if not (IS_WINDOWS or IS_EPHEMERAL_PORT): 'Enable SO_REUSEADDR for the current socket.\n\n Skip for Windows (has different semantics)\n or ephemeral ports (can steal ports from others).\n\n Refs:\n * https://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx\n * https://github.com/cherrypy/cheroot/issues/114\n * https://gavv.github.io/blog/ephemeral-port-reuse/\n ' sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if self.nodelay and (not isinstance(self.bind_addr, (str, bytes))): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.ssl_adapter is not None: sock = self.ssl_adapter.bind(sock) listening_ipv6 = hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and (host in ('::', '::0', '::0.0.0.0')) if listening_ipv6: try: sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except (AttributeError, socket.error): pass sock = sock sock.bind(self.bind_addr) sock = self.socket = sock bind_addr = sock.getsockname() if sock.family in (socket.AF_INET, socket.AF_INET6): "UNIX domain sockets are strings or bytes.\n\n In case of bytes with a leading null-byte it's an abstract socket.\n " self.bind_addr = bind_addr[:2] if isinstance(bind_addr, bytes): bind_addr = bton(bind_addr) self.bind_addr = bind_addr return sock
cheroot
positive
def pgm(x, preds, loss_fn, y=None, eps=None, model=None, steps=16, **kwargs): raise DeprecationWarning if eps is None: eps = 3.27090588 if y is None: preds_max = preds.data.max(1)[1] y = torch.equal(preds, preds_max).float() x_adv = x.clone() x_adv.requires_grad = True for t in range(steps): loss_adv0 = loss_fn(model(x_adv), y, reduction='sum') grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0] scale = float(1.0 / np.sqrt(t + 1)) * 3.0 x_adv.data.add_(scale * grad0.data) <DeepExtract> if not True: res = x_adv.clone() else: res = x_adv direction = x_adv - x dist = direction.view(direction.size(0), -1).norm(p=2, dim=1, keepdim=True) direction.view(direction.size(0), -1).div_(dist) dist[dist > eps] = eps direction.view(direction.size(0), -1).mul_(dist) res.data.copy_(x + direction) return res </DeepExtract> return x_adv
def pgm(x, preds, loss_fn, y=None, eps=None, model=None, steps=16, **kwargs): raise DeprecationWarning if eps is None: eps = 3.27090588 if y is None: preds_max = preds.data.max(1)[1] y = torch.equal(preds, preds_max).float() x_adv = x.clone() x_adv.requires_grad = True for t in range(steps): loss_adv0 = loss_fn(model(x_adv), y, reduction='sum') grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0] scale = float(1.0 / np.sqrt(t + 1)) * 3.0 x_adv.data.add_(scale * grad0.data) if not True: res = x_adv.clone() else: res = x_adv direction = x_adv - x dist = direction.view(direction.size(0), -1).norm(p=2, dim=1, keepdim=True) direction.view(direction.size(0), -1).div_(dist) dist[dist > eps] = eps direction.view(direction.size(0), -1).mul_(dist) res.data.copy_(x + direction) return res return x_adv
ATMC
positive
def test_config_is_not_required(self): """ If the '--config' option is not provided, it defaults to ROOT_DIR/conf/carbon.conf. """ root_dir = mkdtemp() self.addCleanup(rmtree, root_dir) conf_dir = join(root_dir, 'conf') makedirs(conf_dir) <DeepExtract> if 'carbon.conf' is not None: path = join(conf_dir, 'carbon.conf') else: (fd, path) = mkstemp(dir=conf_dir) os.close(fd) self.addCleanup(os.unlink, path) if '[foo]' is not None: with open(path, 'w') as f: f.write('[foo]') return path </DeepExtract> options = FakeOptions(config=None, instance=None, pidfile=None, logdir=None) read_config('carbon-foo', options, ROOT_DIR=root_dir) self.assertEqual(join(root_dir, 'conf', 'carbon.conf'), options['config'])
def test_config_is_not_required(self): """ If the '--config' option is not provided, it defaults to ROOT_DIR/conf/carbon.conf. """ root_dir = mkdtemp() self.addCleanup(rmtree, root_dir) conf_dir = join(root_dir, 'conf') makedirs(conf_dir) if 'carbon.conf' is not None: path = join(conf_dir, 'carbon.conf') else: (fd, path) = mkstemp(dir=conf_dir) os.close(fd) self.addCleanup(os.unlink, path) if '[foo]' is not None: with open(path, 'w') as f: f.write('[foo]') return path options = FakeOptions(config=None, instance=None, pidfile=None, logdir=None) read_config('carbon-foo', options, ROOT_DIR=root_dir) self.assertEqual(join(root_dir, 'conf', 'carbon.conf'), options['config'])
carbon
positive
def parse_nexus(tree_path, treestring_regex='tree [A-Za-z\\_]+([0-9]+)', verbose=False): """ Parses the BEAST MCC tree (NEXUS format) Parameters ---------- tree_path : string or file handle open for reading The nexus tree file treestring_regex : string The regex to match the tree string in the nexus file (the really long string which typically starts with "tree" and looks similar to a newick tree) verbose : bool, optional (default: False) Should output be printed? Raises ------ AssertionError If the tree was not correctly parsed Returns ------- <class 'Bio.Phylo.BaseTree.Tree'> A tree with BEAST attrs set on each node (as applicable) Author: Gytis Dudas """ tipFlag = False tips = {} tipNum = 0 tree = None if isinstance(tree_path, str): try: handle = open(tree_path, 'r', encoding='utf-8') except FileNotFoundError: print('FATAL: No such file {}'.format(tree_path)) sys.exit(2) else: handle = tree_path for line in handle: l = line.strip('\n') nTaxa = re.search('dimensions ntax=([0-9]+);', l.lower()) if nTaxa is not None: tipNum = int(nTaxa.group(1)) if verbose: print('File should contain %d taxa' % tipNum) treeString = re.search(treestring_regex, l) if treeString is not None: treeString_start = l.index('(') <DeepExtract> i = 0 stored_i = None cur_node = Phylo.Newick.Clade() cur_node.name = 'root' cur_node.clades = [] node_count = 0 while i != len(l[treeString_start:]): if stored_i == i and verbose == True: print('%d >%s<' % (i, l[treeString_start:][i])) assert stored_i != i, '\nTree string unparseable\nStopped at >>%s<<\nstring region looks like this: %s' % (l[treeString_start:][i], l[treeString_start:][i:i + 5000]) stored_i = i if l[treeString_start:][i] == '(': node = Phylo.Newick.Clade() node.name = 'NODE_%07d' % node_count if verbose == True: print('%d adding node %s' % (i, node.name)) node.branch = 0.0 node.up = cur_node node.clades = [] node.attrs = {} cur_node.clades.append(node) cur_node = node node_count += 1 i += 1 numericalTip = re.match('(\\(|,)([0-9]+)(\\[|\\:)', l[treeString_start:][i - 1:i + 100]) if numericalTip is not None: node = Phylo.Newick.Clade() if tips: node.name = tips[numericalTip.group(2)] else: node.name = str(numericalTip.group(2)) if verbose == True: print('%d adding leaf (BEAST) %s (%s)' % (i, numericalTip.group(2), node.name)) node.up = cur_node node.attrs = {} cur_node.clades.append(node) cur_node = node i += len(numericalTip.group(2)) alphaTip = re.match('(\\(|,)(\\\'|\\")*([A-Za-z\\_\\-\\|\\.0-9\\?\\/]+)(\\\'|\\"|)(\\[)*', l[treeString_start:][i - 1:i + 200]) if alphaTip is not None: if verbose == True: print('%d adding leaf (non-BEAST) %s' % (i, alphaTip.group(3))) node = Phylo.Newick.Clade() node.name = alphaTip.group(3) node.up = cur_node node.attrs = {} cur_node.clades.append(node) cur_node = node i += len(alphaTip.group(3)) + alphaTip.group().count("'") + alphaTip.group().count('"') multitypeNode = re.match('\\)([0-9]+)\\[', l[treeString_start:][i - 1:i + 100]) if multitypeNode is not None: if verbose == True: print('%d adding multitype node %s' % (i, multitypeNode.group(1))) i += len(multitypeNode.group(1)) commentBlock = re.match('(\\:)*\\[(&[A-Za-z\\_\\-{}\\,0-9\\.\\%=\\"\\\'\\+!#]+)\\]', l[treeString_start:][i:]) if commentBlock is not None: if verbose == True: print('%d comment: %s' % (i, commentBlock.group(2))) comment = commentBlock.group(2) numerics = re.findall('[,&][A-Za-z\\_\\.0-9]+=[0-9\\-Ee\\.]+', comment) strings = re.findall('[,&][A-Za-z\\_\\.0-9]+=["|\\\']*[A-Za-z\\_0-9\\.\\+]+["|\\\']*', comment) treelist = re.findall('[,&][A-Za-z\\_\\.0-9]+={[A-Za-z\\_,{}0-9\\.]+}', comment) sets = re.findall('[,&][A-Za-z\\_\\.0-9\\%]+={[A-Za-z\\.\\-0-9eE,\\"\\_]+}', comment) figtree = re.findall('\\![A-Za-z]+=[A-Za-z0-9#]+', comment) for vals in strings: (tr, val) = vals.split('=') tr = tr[1:] if re.search('.*[^0-9\\.eE].*', val) is not None: if '+' in val: equiprobable = val.split('+') val = equiprobable[np.random.randint(len(equiprobable))] cur_node.attrs[tr] = val.strip('"') for vals in numerics: (tr, val) = vals.split('=') tr = tr[1:] if 'prob' not in tr: cur_node.attrs[tr] = float(val) states = {} for vals in sorted(sets, key=lambda s: '.set.prob' in s.split('=')[0]): (tr, val) = vals.split('=') tr = tr[1:] if 'set' in tr: trait = tr.split('.set')[0] if '.prob' not in tr: states[trait] = [v.strip('"') for v in val[1:-1].split(',')] elif '.prob' in tr: probs = map(float, val[1:-1].split(',')) cur_node.attrs['%s_confidence' % trait] = {t: p for (t, p) in zip(states[trait], probs)} elif 'range' in tr: pass elif 'HPD' in tr: cur_node.attrs[tr.replace('95%_HPD', 'confidence')] = list(map(float, val[1:-1].split(','))) if len(figtree) > 0: print('FigTree comment found, ignoring') i += len(commentBlock.group()) nodeLabel = re.match('([A-Za-z\\_\\-0-9\\.]+)(\\:|\\;)', l[treeString_start:][i:]) if nodeLabel is not None: if verbose == True: print('old school comment found: %s' % nodeLabel.group(1)) cur_node.name = nodeLabel.group(1) i += len(nodeLabel.group(1)) branchLength = re.match('(\\:)*([0-9\\.\\-Ee]+)', l[treeString_start:][i:i + 100]) if branchLength is not None: if verbose == True: print('adding branch length (%d) %.6f' % (i, float(branchLength.group(2)))) setattr(cur_node, 'branch_length', float(branchLength.group(2))) i += len(branchLength.group()) if l[treeString_start:][i] == ',' or l[treeString_start:][i] == ')': i += 1 cur_node = cur_node.up if l[treeString_start:][i] == ';': tree = cur_node break </DeepExtract> if verbose: print('Identified tree string') if tipFlag == True: tipEncoding = re.search("([0-9]+) ([A-Za-z\\-\\_\\/\\.\\'0-9 \\|?]+)", l) if tipEncoding is not None: tips[tipEncoding.group(1)] = tipEncoding.group(2).strip('"').strip("'") if verbose == True: print('Identified tip translation %s: %s' % (tipEncoding.group(1), tips[tipEncoding.group(1)])) elif ';' not in l: print('tip not captured by regex:', l.replace('\t', '')) if 'translate' in l.lower(): tipFlag = True if ';' in l: tipFlag = False assert tree, 'Tree not captured by regex' assert tree.count_terminals() == tipNum, 'Not all tips have been parsed.' print('Success parsing BEAST nexus') try: return Phylo.BaseTree.Tree.from_clade(tree) except RecursionError as err: print('FATAL ERROR') print('Recursion limit reached. You can try raising this with the `--recursion-limit` option') print('(Be careful with this). Your current limit is set to {}'.format(sys.getrecursionlimit())) sys.exit(2)
def parse_nexus(tree_path, treestring_regex='tree [A-Za-z\\_]+([0-9]+)', verbose=False): """ Parses the BEAST MCC tree (NEXUS format) Parameters ---------- tree_path : string or file handle open for reading The nexus tree file treestring_regex : string The regex to match the tree string in the nexus file (the really long string which typically starts with "tree" and looks similar to a newick tree) verbose : bool, optional (default: False) Should output be printed? Raises ------ AssertionError If the tree was not correctly parsed Returns ------- <class 'Bio.Phylo.BaseTree.Tree'> A tree with BEAST attrs set on each node (as applicable) Author: Gytis Dudas """ tipFlag = False tips = {} tipNum = 0 tree = None if isinstance(tree_path, str): try: handle = open(tree_path, 'r', encoding='utf-8') except FileNotFoundError: print('FATAL: No such file {}'.format(tree_path)) sys.exit(2) else: handle = tree_path for line in handle: l = line.strip('\n') nTaxa = re.search('dimensions ntax=([0-9]+);', l.lower()) if nTaxa is not None: tipNum = int(nTaxa.group(1)) if verbose: print('File should contain %d taxa' % tipNum) treeString = re.search(treestring_regex, l) if treeString is not None: treeString_start = l.index('(') i = 0 stored_i = None cur_node = Phylo.Newick.Clade() cur_node.name = 'root' cur_node.clades = [] node_count = 0 while i != len(l[treeString_start:]): if stored_i == i and verbose == True: print('%d >%s<' % (i, l[treeString_start:][i])) assert stored_i != i, '\nTree string unparseable\nStopped at >>%s<<\nstring region looks like this: %s' % (l[treeString_start:][i], l[treeString_start:][i:i + 5000]) stored_i = i if l[treeString_start:][i] == '(': node = Phylo.Newick.Clade() node.name = 'NODE_%07d' % node_count if verbose == True: print('%d adding node %s' % (i, node.name)) node.branch = 0.0 node.up = cur_node node.clades = [] node.attrs = {} cur_node.clades.append(node) cur_node = node node_count += 1 i += 1 numericalTip = re.match('(\\(|,)([0-9]+)(\\[|\\:)', l[treeString_start:][i - 1:i + 100]) if numericalTip is not None: node = Phylo.Newick.Clade() if tips: node.name = tips[numericalTip.group(2)] else: node.name = str(numericalTip.group(2)) if verbose == True: print('%d adding leaf (BEAST) %s (%s)' % (i, numericalTip.group(2), node.name)) node.up = cur_node node.attrs = {} cur_node.clades.append(node) cur_node = node i += len(numericalTip.group(2)) alphaTip = re.match('(\\(|,)(\\\'|\\")*([A-Za-z\\_\\-\\|\\.0-9\\?\\/]+)(\\\'|\\"|)(\\[)*', l[treeString_start:][i - 1:i + 200]) if alphaTip is not None: if verbose == True: print('%d adding leaf (non-BEAST) %s' % (i, alphaTip.group(3))) node = Phylo.Newick.Clade() node.name = alphaTip.group(3) node.up = cur_node node.attrs = {} cur_node.clades.append(node) cur_node = node i += len(alphaTip.group(3)) + alphaTip.group().count("'") + alphaTip.group().count('"') multitypeNode = re.match('\\)([0-9]+)\\[', l[treeString_start:][i - 1:i + 100]) if multitypeNode is not None: if verbose == True: print('%d adding multitype node %s' % (i, multitypeNode.group(1))) i += len(multitypeNode.group(1)) commentBlock = re.match('(\\:)*\\[(&[A-Za-z\\_\\-{}\\,0-9\\.\\%=\\"\\\'\\+!#]+)\\]', l[treeString_start:][i:]) if commentBlock is not None: if verbose == True: print('%d comment: %s' % (i, commentBlock.group(2))) comment = commentBlock.group(2) numerics = re.findall('[,&][A-Za-z\\_\\.0-9]+=[0-9\\-Ee\\.]+', comment) strings = re.findall('[,&][A-Za-z\\_\\.0-9]+=["|\\\']*[A-Za-z\\_0-9\\.\\+]+["|\\\']*', comment) treelist = re.findall('[,&][A-Za-z\\_\\.0-9]+={[A-Za-z\\_,{}0-9\\.]+}', comment) sets = re.findall('[,&][A-Za-z\\_\\.0-9\\%]+={[A-Za-z\\.\\-0-9eE,\\"\\_]+}', comment) figtree = re.findall('\\![A-Za-z]+=[A-Za-z0-9#]+', comment) for vals in strings: (tr, val) = vals.split('=') tr = tr[1:] if re.search('.*[^0-9\\.eE].*', val) is not None: if '+' in val: equiprobable = val.split('+') val = equiprobable[np.random.randint(len(equiprobable))] cur_node.attrs[tr] = val.strip('"') for vals in numerics: (tr, val) = vals.split('=') tr = tr[1:] if 'prob' not in tr: cur_node.attrs[tr] = float(val) states = {} for vals in sorted(sets, key=lambda s: '.set.prob' in s.split('=')[0]): (tr, val) = vals.split('=') tr = tr[1:] if 'set' in tr: trait = tr.split('.set')[0] if '.prob' not in tr: states[trait] = [v.strip('"') for v in val[1:-1].split(',')] elif '.prob' in tr: probs = map(float, val[1:-1].split(',')) cur_node.attrs['%s_confidence' % trait] = {t: p for (t, p) in zip(states[trait], probs)} elif 'range' in tr: pass elif 'HPD' in tr: cur_node.attrs[tr.replace('95%_HPD', 'confidence')] = list(map(float, val[1:-1].split(','))) if len(figtree) > 0: print('FigTree comment found, ignoring') i += len(commentBlock.group()) nodeLabel = re.match('([A-Za-z\\_\\-0-9\\.]+)(\\:|\\;)', l[treeString_start:][i:]) if nodeLabel is not None: if verbose == True: print('old school comment found: %s' % nodeLabel.group(1)) cur_node.name = nodeLabel.group(1) i += len(nodeLabel.group(1)) branchLength = re.match('(\\:)*([0-9\\.\\-Ee]+)', l[treeString_start:][i:i + 100]) if branchLength is not None: if verbose == True: print('adding branch length (%d) %.6f' % (i, float(branchLength.group(2)))) setattr(cur_node, 'branch_length', float(branchLength.group(2))) i += len(branchLength.group()) if l[treeString_start:][i] == ',' or l[treeString_start:][i] == ')': i += 1 cur_node = cur_node.up if l[treeString_start:][i] == ';': tree = cur_node break if verbose: print('Identified tree string') if tipFlag == True: tipEncoding = re.search("([0-9]+) ([A-Za-z\\-\\_\\/\\.\\'0-9 \\|?]+)", l) if tipEncoding is not None: tips[tipEncoding.group(1)] = tipEncoding.group(2).strip('"').strip("'") if verbose == True: print('Identified tip translation %s: %s' % (tipEncoding.group(1), tips[tipEncoding.group(1)])) elif ';' not in l: print('tip not captured by regex:', l.replace('\t', '')) if 'translate' in l.lower(): tipFlag = True if ';' in l: tipFlag = False assert tree, 'Tree not captured by regex' assert tree.count_terminals() == tipNum, 'Not all tips have been parsed.' print('Success parsing BEAST nexus') try: return Phylo.BaseTree.Tree.from_clade(tree) except RecursionError as err: print('FATAL ERROR') print('Recursion limit reached. You can try raising this with the `--recursion-limit` option') print('(Be careful with this). Your current limit is set to {}'.format(sys.getrecursionlimit())) sys.exit(2)
augur
positive
def testRandomJitterBoxes(self): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {})) <DeepExtract> boxes = tf.constant([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) boxes = boxes </DeepExtract> boxes_shape = tf.shape(boxes) tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] distorted_boxes_shape = tf.shape(distorted_boxes) with self.test_session() as sess: (boxes_shape_, distorted_boxes_shape_) = sess.run([boxes_shape, distorted_boxes_shape]) self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
def testRandomJitterBoxes(self): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {})) boxes = tf.constant([[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) boxes = boxes boxes_shape = tf.shape(boxes) tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] distorted_boxes_shape = tf.shape(distorted_boxes) with self.test_session() as sess: (boxes_shape_, distorted_boxes_shape_) = sess.run([boxes_shape, distorted_boxes_shape]) self.assertAllEqual(boxes_shape_, distorted_boxes_shape_)
Accident-Detection-on-Indian-Roads
positive
def forward(self, x, activate=True, norm=True): for layer in self.order: if layer == 'conv': x = self.conv(x) elif layer == 'norm' and norm and self.with_norm: <DeepExtract> x = getattr(self, self.norm_name) </DeepExtract> elif layer == 'act' and activate and self.with_activatation: x = self.activate(x) return x
def forward(self, x, activate=True, norm=True): for layer in self.order: if layer == 'conv': x = self.conv(x) elif layer == 'norm' and norm and self.with_norm: x = getattr(self, self.norm_name) elif layer == 'act' and activate and self.with_activatation: x = self.activate(x) return x
AE_TextSpotter
positive
def fetch_url_headers(host, url): """Return just the headers from fetch_url(host, url), as a dictionary.""" <DeepExtract> s = socket.create_connection((host, 80)).makefile() s.write('GET %s HTTP/1.0\n\n' % url) s.flush() response = s.read() s.close() response = response </DeepExtract> n = response.index('\r\n\r\n') headers_list = response[:n].split('\r\n') headers_dict = {'STATUS': headers_list[0].split(' ', 2)[1]} for header in headers_list: try: (k, v) = header.split(':', 1) headers_dict[k.strip()] = v.strip() except: pass return headers_dict
def fetch_url_headers(host, url): """Return just the headers from fetch_url(host, url), as a dictionary.""" s = socket.create_connection((host, 80)).makefile() s.write('GET %s HTTP/1.0\n\n' % url) s.flush() response = s.read() s.close() response = response n = response.index('\r\n\r\n') headers_list = response[:n].split('\r\n') headers_dict = {'STATUS': headers_list[0].split(' ', 2)[1]} for header in headers_list: try: (k, v) = header.split(':', 1) headers_dict[k.strip()] = v.strip() except: pass return headers_dict
djangy
positive
def sync_and_save(self): <DeepExtract> self.score = self.compute_score() self.update_levels(commit=False) return self </DeepExtract> self.save() return self
def sync_and_save(self): self.score = self.compute_score() self.update_levels(commit=False) return self self.save() return self
ej-server
positive
def close(self): <DeepExtract> if hasattr(self, 'content'): del self.content </DeepExtract> resp = self.browser.response() if resp is not None: resp.close() self.browser.clear_history()
def close(self): if hasattr(self, 'content'): del self.content resp = self.browser.response() if resp is not None: resp.close() self.browser.clear_history()
cola
positive
def __init__(self, table_schema, simple=False): if simple: <DeepExtract> self.table_schema = table_schema column_names = table_schema['column_names'] column_names_original = table_schema['column_names_original'] table_names = table_schema['table_names'] table_names_original = table_schema['table_names_original'] assert len(column_names) == len(column_names_original) and len(table_names) == len(table_names_original) column_keep_index = [] self.column_names_surface_form = [] self.column_names_surface_form_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names_original): column_name_surface_form = column_name column_name_surface_form = column_name_surface_form.lower() if column_name_surface_form not in self.column_names_surface_form_to_id: self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1 column_keep_index.append(i) column_keep_index_2 = [] for (i, table_name) in enumerate(table_names_original): column_name_surface_form = table_name.lower() if column_name_surface_form not in self.column_names_surface_form_to_id: self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1 column_keep_index_2.append(i) self.column_names_embedder_input = [] self.column_names_embedder_input_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names): column_name_embedder_input = column_name if i in column_keep_index: self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1 for (i, table_name) in enumerate(table_names): column_name_embedder_input = table_name if i in column_keep_index_2: self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1 max_id_1 = max((v for (k, v) in self.column_names_surface_form_to_id.items())) max_id_2 = max((v for (k, v) in self.column_names_embedder_input_to_id.items())) assert len(self.column_names_surface_form) - 1 == max_id_2 == max_id_1 self.num_col = len(self.column_names_surface_form) </DeepExtract> else: <DeepExtract> self.table_schema = table_schema column_names = table_schema['column_names'] column_names_original = table_schema['column_names_original'] table_names = table_schema['table_names'] table_names_original = table_schema['table_names_original'] assert len(column_names) == len(column_names_original) and len(table_names) == len(table_names_original) column_keep_index = [] self.column_names_surface_form = [] self.column_names_surface_form_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names_original): if table_id >= 0: table_name = table_names_original[table_id] column_name_surface_form = '{}.{}'.format(table_name, column_name) else: column_name_surface_form = column_name column_name_surface_form = column_name_surface_form.lower() if column_name_surface_form not in self.column_names_surface_form_to_id: self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1 column_keep_index.append(i) start_i = len(self.column_names_surface_form_to_id) for (i, table_name) in enumerate(table_names_original): column_name_surface_form = '{}.*'.format(table_name.lower()) self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = i + start_i self.column_names_embedder_input = [] self.column_names_embedder_input_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names): if table_id >= 0: table_name = table_names[table_id] column_name_embedder_input = table_name + ' . ' + column_name else: column_name_embedder_input = column_name if i in column_keep_index: self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1 start_i = len(self.column_names_embedder_input_to_id) for (i, table_name) in enumerate(table_names): column_name_embedder_input = table_name + ' . *' self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = i + start_i assert len(self.column_names_surface_form) == len(self.column_names_surface_form_to_id) == len(self.column_names_embedder_input) == len(self.column_names_embedder_input_to_id) max_id_1 = max((v for (k, v) in self.column_names_surface_form_to_id.items())) max_id_2 = max((v for (k, v) in self.column_names_embedder_input_to_id.items())) assert len(self.column_names_surface_form) - 1 == max_id_2 == max_id_1 self.num_col = len(self.column_names_surface_form) </DeepExtract>
def __init__(self, table_schema, simple=False): if simple: self.table_schema = table_schema column_names = table_schema['column_names'] column_names_original = table_schema['column_names_original'] table_names = table_schema['table_names'] table_names_original = table_schema['table_names_original'] assert len(column_names) == len(column_names_original) and len(table_names) == len(table_names_original) column_keep_index = [] self.column_names_surface_form = [] self.column_names_surface_form_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names_original): column_name_surface_form = column_name column_name_surface_form = column_name_surface_form.lower() if column_name_surface_form not in self.column_names_surface_form_to_id: self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1 column_keep_index.append(i) column_keep_index_2 = [] for (i, table_name) in enumerate(table_names_original): column_name_surface_form = table_name.lower() if column_name_surface_form not in self.column_names_surface_form_to_id: self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1 column_keep_index_2.append(i) self.column_names_embedder_input = [] self.column_names_embedder_input_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names): column_name_embedder_input = column_name if i in column_keep_index: self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1 for (i, table_name) in enumerate(table_names): column_name_embedder_input = table_name if i in column_keep_index_2: self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1 max_id_1 = max((v for (k, v) in self.column_names_surface_form_to_id.items())) max_id_2 = max((v for (k, v) in self.column_names_embedder_input_to_id.items())) assert len(self.column_names_surface_form) - 1 == max_id_2 == max_id_1 self.num_col = len(self.column_names_surface_form) else: self.table_schema = table_schema column_names = table_schema['column_names'] column_names_original = table_schema['column_names_original'] table_names = table_schema['table_names'] table_names_original = table_schema['table_names_original'] assert len(column_names) == len(column_names_original) and len(table_names) == len(table_names_original) column_keep_index = [] self.column_names_surface_form = [] self.column_names_surface_form_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names_original): if table_id >= 0: table_name = table_names_original[table_id] column_name_surface_form = '{}.{}'.format(table_name, column_name) else: column_name_surface_form = column_name column_name_surface_form = column_name_surface_form.lower() if column_name_surface_form not in self.column_names_surface_form_to_id: self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = len(self.column_names_surface_form) - 1 column_keep_index.append(i) start_i = len(self.column_names_surface_form_to_id) for (i, table_name) in enumerate(table_names_original): column_name_surface_form = '{}.*'.format(table_name.lower()) self.column_names_surface_form.append(column_name_surface_form) self.column_names_surface_form_to_id[column_name_surface_form] = i + start_i self.column_names_embedder_input = [] self.column_names_embedder_input_to_id = {} for (i, (table_id, column_name)) in enumerate(column_names): if table_id >= 0: table_name = table_names[table_id] column_name_embedder_input = table_name + ' . ' + column_name else: column_name_embedder_input = column_name if i in column_keep_index: self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = len(self.column_names_embedder_input) - 1 start_i = len(self.column_names_embedder_input_to_id) for (i, table_name) in enumerate(table_names): column_name_embedder_input = table_name + ' . *' self.column_names_embedder_input.append(column_name_embedder_input) self.column_names_embedder_input_to_id[column_name_embedder_input] = i + start_i assert len(self.column_names_surface_form) == len(self.column_names_surface_form_to_id) == len(self.column_names_embedder_input) == len(self.column_names_embedder_input_to_id) max_id_1 = max((v for (k, v) in self.column_names_surface_form_to_id.items())) max_id_2 = max((v for (k, v) in self.column_names_embedder_input_to_id.items())) assert len(self.column_names_surface_form) - 1 == max_id_2 == max_id_1 self.num_col = len(self.column_names_surface_form) </DeepExtract>
editsql
positive
@staticmethod def predict_batches(X: np.ndarray, predictor: Callable, batch_size: int) -> np.ndarray: """ Predict the classification labels of the input dataset. This is performed in batches. Parameters ---------- X Input to be classified. predictor Prediction function. batch_size Maximum batch size to be used during each inference step. Returns ------- Classification labels. """ n_minibatch = int(np.ceil(X.shape[0] / batch_size)) Y_m = [] for i in range(n_minibatch): (istart, istop) = (i * batch_size, min((i + 1) * batch_size, X.shape[0])) preds = predictor(X[istart:istop]) if len(preds.shape) == 2 and preds.shape[-1] > 1: <DeepExtract> if len(preds.shape) == 1 or (len(preds.shape) == 2 and preds.shape[1] == 1): if num_classes is None: raise ValueError('Number of classes has to be specified to transform the labels into one-hot encoding.') preds = preds.reshape(-1).astype(np.int32) Y_ohe = np.zeros((preds.shape[0], num_classes)) Y_ohe[np.arange(preds.shape[0]), preds] = 1 preds = Y_ohe if len(preds.shape) != 2: raise ValueError(f'Expected a 2D array, but the input array has a dimension of {len(preds.shape)}') Y_ohe = np.zeros_like(preds) Y_ohe[np.arange(preds.shape[0]), np.argmax(preds, axis=1)] = 1 preds = Y_ohe </DeepExtract> Y_m.append(preds) return np.concatenate(Y_m, axis=0)
@staticmethod def predict_batches(X: np.ndarray, predictor: Callable, batch_size: int) -> np.ndarray: """ Predict the classification labels of the input dataset. This is performed in batches. Parameters ---------- X Input to be classified. predictor Prediction function. batch_size Maximum batch size to be used during each inference step. Returns ------- Classification labels. """ n_minibatch = int(np.ceil(X.shape[0] / batch_size)) Y_m = [] for i in range(n_minibatch): (istart, istop) = (i * batch_size, min((i + 1) * batch_size, X.shape[0])) preds = predictor(X[istart:istop]) if len(preds.shape) == 2 and preds.shape[-1] > 1: if len(preds.shape) == 1 or (len(preds.shape) == 2 and preds.shape[1] == 1): if num_classes is None: raise ValueError('Number of classes has to be specified to transform the labels into one-hot encoding.') preds = preds.reshape(-1).astype(np.int32) Y_ohe = np.zeros((preds.shape[0], num_classes)) Y_ohe[np.arange(preds.shape[0]), preds] = 1 preds = Y_ohe if len(preds.shape) != 2: raise ValueError(f'Expected a 2D array, but the input array has a dimension of {len(preds.shape)}') Y_ohe = np.zeros_like(preds) Y_ohe[np.arange(preds.shape[0]), np.argmax(preds, axis=1)] = 1 preds = Y_ohe Y_m.append(preds) return np.concatenate(Y_m, axis=0)
alibi
positive
def plot_prior(self, figsize=None, xlim=None, **kwargs): <DeepExtract> if self.prior_type == 'deep': prior = self._prior elif self.prior_type[:4] in ('gsm-', 'gmm-', 'lsm-', 'lmm-'): cls = tfpd.Normal if self.prior_type.startswith('g') else tfpd.Logistic prior = tfpd.MixtureSameFamily(mixture_distribution=tfpd.Categorical(logits=self.logits), components_distribution=cls(loc=self.loc, scale=tf.math.exp(self.log_scale))) if soft_round: if alpha is None: alpha = self.alpha prior = tfc.SoftRoundAdapter(prior, alpha) if skip_noise: prior = prior prior = tfc.UniformNoiseAdapter(prior) </DeepExtract> assert not tuple(prior.event_shape) if not xlim: xlim = (tf.reduce_min(tfc.lower_tail(prior, 1e-05)), tf.reduce_max(tfc.upper_tail(prior, 1e-05))) y = tf.linspace(xlim[0], xlim[1], 1000)[:, None] prob = prior.prob(y) plt.figure(figsize=figsize or (16, 8)) plt.plot(y, prob) plt.grid() plt.xlabel('latent space') plt.ylabel('$p$') plt.figure(figsize=figsize or (16, 8)) plt.imshow(tf.transpose(prob), aspect='auto', extent=(xlim[0], xlim[1], -0.5, self.ndim_latent - 0.5)) plt.xlabel('latent space') plt.ylabel('latent dimension')
def plot_prior(self, figsize=None, xlim=None, **kwargs): if self.prior_type == 'deep': prior = self._prior elif self.prior_type[:4] in ('gsm-', 'gmm-', 'lsm-', 'lmm-'): cls = tfpd.Normal if self.prior_type.startswith('g') else tfpd.Logistic prior = tfpd.MixtureSameFamily(mixture_distribution=tfpd.Categorical(logits=self.logits), components_distribution=cls(loc=self.loc, scale=tf.math.exp(self.log_scale))) if soft_round: if alpha is None: alpha = self.alpha prior = tfc.SoftRoundAdapter(prior, alpha) if skip_noise: prior = prior prior = tfc.UniformNoiseAdapter(prior) assert not tuple(prior.event_shape) if not xlim: xlim = (tf.reduce_min(tfc.lower_tail(prior, 1e-05)), tf.reduce_max(tfc.upper_tail(prior, 1e-05))) y = tf.linspace(xlim[0], xlim[1], 1000)[:, None] prob = prior.prob(y) plt.figure(figsize=figsize or (16, 8)) plt.plot(y, prob) plt.grid() plt.xlabel('latent space') plt.ylabel('$p$') plt.figure(figsize=figsize or (16, 8)) plt.imshow(tf.transpose(prob), aspect='auto', extent=(xlim[0], xlim[1], -0.5, self.ndim_latent - 0.5)) plt.xlabel('latent space') plt.ylabel('latent dimension')
compression
positive
def __call__(self, results): for key in self.keys: <DeepExtract> if isinstance(results[key], torch.Tensor): results[key] = results[key] elif isinstance(results[key], np.ndarray): results[key] = torch.from_numpy(results[key]) elif isinstance(results[key], Sequence) and (not mmcv.is_str(results[key])): results[key] = torch.tensor(results[key]) elif isinstance(results[key], int): results[key] = torch.LongTensor([results[key]]) elif isinstance(results[key], float): results[key] = torch.FloatTensor([results[key]]) else: raise TypeError('type {} cannot be converted to tensor.'.format(type(results[key]))) </DeepExtract> return results
def __call__(self, results): for key in self.keys: if isinstance(results[key], torch.Tensor): results[key] = results[key] elif isinstance(results[key], np.ndarray): results[key] = torch.from_numpy(results[key]) elif isinstance(results[key], Sequence) and (not mmcv.is_str(results[key])): results[key] = torch.tensor(results[key]) elif isinstance(results[key], int): results[key] = torch.LongTensor([results[key]]) elif isinstance(results[key], float): results[key] = torch.FloatTensor([results[key]]) else: raise TypeError('type {} cannot be converted to tensor.'.format(type(results[key]))) return results
D2Det
positive
def test_dirfrag_limit(self): """ That the directory fragment size cannot exceed mds_bal_fragment_size_max (using a limit of 50 in all configurations). That fragmentation (forced) will allow more entries to be created. That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged. """ self.fs.mon_manager.raw_cluster_cmd('mds', 'set', 'allow_dirfrags', 'true', '--yes-i-really-mean-it') LOW_LIMIT = 50 for mds in self.fs.get_daemon_names(): self.fs.mds_asok(['config', 'set', 'mds_bal_fragment_size_max', str(LOW_LIMIT)], mds) try: self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir")\n os.mkdir(path)\n for n in range(0, {file_count}):\n open(os.path.join(path, "%s" % n), \'w\').write("%s" % n)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT + 1))) except CommandFailedError: pass else: raise RuntimeError('fragment size exceeded') self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir2")\n os.mkdir(path)\n for n in range(0, {file_count}):\n open(os.path.join(path, "%s" % n), \'w\').write("%s" % n)\n dfd = os.open(path, os.O_DIRECTORY)\n os.fsync(dfd)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT))) mds_id = self.fs.get_active_names()[0] self.fs.mds_asok(['dirfrag', 'split', '/subdir2', '0/0', '1'], mds_id) self.mount_a.umount_wait() self.fs.mds_asok(['flush', 'journal'], mds_id) self.mount_a.mount() self.mount_a.wait_until_mounted() self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir2")\n for n in range({file_count}, ({file_count}*3)//2):\n open(os.path.join(path, "%s" % n), \'w\').write("%s" % n)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT))) <DeepExtract> strays_before = self.fs.mds_asok(['perf', 'dump', 'mds_cache', 'strays_created'], mds_id=mds_id)['mds_cache']['strays_created'] </DeepExtract> try: self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir3")\n os.mkdir(path)\n for n in range({file_count}):\n fpath = os.path.join(path, "%s" % n)\n f = open(fpath, \'w\')\n f.write("%s" % n)\n f.close()\n os.unlink(fpath)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT * 10))) except CommandFailedError: pass else: raise RuntimeError('fragment size exceeded') <DeepExtract> strays_after = self.fs.mds_asok(['perf', 'dump', 'mds_cache', 'strays_created'], mds_id=mds_id)['mds_cache']['strays_created'] </DeepExtract> self.assertGreaterEqual(strays_after - strays_before, LOW_LIMIT) self.wait_until_equal(lambda : self.get_mdc_stat('strays_purged'), strays_after, timeout=600) self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir4")\n os.mkdir(path)\n for n in range({file_count}):\n fpath = os.path.join(path, "%s" % n)\n f = open(fpath, \'w\')\n f.write("%s" % n)\n f.close()\n os.unlink(fpath)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT)))
def test_dirfrag_limit(self): """ That the directory fragment size cannot exceed mds_bal_fragment_size_max (using a limit of 50 in all configurations). That fragmentation (forced) will allow more entries to be created. That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged. """ self.fs.mon_manager.raw_cluster_cmd('mds', 'set', 'allow_dirfrags', 'true', '--yes-i-really-mean-it') LOW_LIMIT = 50 for mds in self.fs.get_daemon_names(): self.fs.mds_asok(['config', 'set', 'mds_bal_fragment_size_max', str(LOW_LIMIT)], mds) try: self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir")\n os.mkdir(path)\n for n in range(0, {file_count}):\n open(os.path.join(path, "%s" % n), \'w\').write("%s" % n)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT + 1))) except CommandFailedError: pass else: raise RuntimeError('fragment size exceeded') self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir2")\n os.mkdir(path)\n for n in range(0, {file_count}):\n open(os.path.join(path, "%s" % n), \'w\').write("%s" % n)\n dfd = os.open(path, os.O_DIRECTORY)\n os.fsync(dfd)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT))) mds_id = self.fs.get_active_names()[0] self.fs.mds_asok(['dirfrag', 'split', '/subdir2', '0/0', '1'], mds_id) self.mount_a.umount_wait() self.fs.mds_asok(['flush', 'journal'], mds_id) self.mount_a.mount() self.mount_a.wait_until_mounted() self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir2")\n for n in range({file_count}, ({file_count}*3)//2):\n open(os.path.join(path, "%s" % n), \'w\').write("%s" % n)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT))) strays_before = self.fs.mds_asok(['perf', 'dump', 'mds_cache', 'strays_created'], mds_id=mds_id)['mds_cache']['strays_created'] try: self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir3")\n os.mkdir(path)\n for n in range({file_count}):\n fpath = os.path.join(path, "%s" % n)\n f = open(fpath, \'w\')\n f.write("%s" % n)\n f.close()\n os.unlink(fpath)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT * 10))) except CommandFailedError: pass else: raise RuntimeError('fragment size exceeded') strays_after = self.fs.mds_asok(['perf', 'dump', 'mds_cache', 'strays_created'], mds_id=mds_id)['mds_cache']['strays_created'] self.assertGreaterEqual(strays_after - strays_before, LOW_LIMIT) self.wait_until_equal(lambda : self.get_mdc_stat('strays_purged'), strays_after, timeout=600) self.mount_a.run_python(dedent('\n import os\n path = os.path.join("{path}", "subdir4")\n os.mkdir(path)\n for n in range({file_count}):\n fpath = os.path.join(path, "%s" % n)\n f = open(fpath, \'w\')\n f.write("%s" % n)\n f.close()\n os.unlink(fpath)\n '.format(path=self.mount_a.mountpoint, file_count=LOW_LIMIT)))
ceph-qa-suite
positive
def test_generic_url_fails(self): <DeepExtract> form = MatchingRequiredForm({'facebook_identifier': 'https://www.facebook.com/messages/10601427'}, view=self.mock_view) self.assertFalse(form.is_valid()) </DeepExtract> <DeepExtract> form = MatchingRequiredForm({'facebook_identifier': 'https://www.facebook.com/hashtag/funny?source=feed_text&story_id=858583977551613'}, view=self.mock_view) self.assertFalse(form.is_valid()) </DeepExtract> <DeepExtract> form = MatchingRequiredForm({'facebook_identifier': 'https://www.facebook.com/'}, view=self.mock_view) self.assertFalse(form.is_valid()) </DeepExtract>
def test_generic_url_fails(self): form = MatchingRequiredForm({'facebook_identifier': 'https://www.facebook.com/messages/10601427'}, view=self.mock_view) self.assertFalse(form.is_valid()) form = MatchingRequiredForm({'facebook_identifier': 'https://www.facebook.com/hashtag/funny?source=feed_text&story_id=858583977551613'}, view=self.mock_view) self.assertFalse(form.is_valid()) form = MatchingRequiredForm({'facebook_identifier': 'https://www.facebook.com/'}, view=self.mock_view) self.assertFalse(form.is_valid()) </DeepExtract>
callisto-core
positive
def killScript(reason=None): if reason is None: print(readMe) sys.exit() else: <DeepExtract> logString = '%s -- %s' % (datetime.datetime.now(), 'ERROR: %s' % reason) print(logString) if not filePath is None: try: with open(filePath, 'a') as logFile: logFile.write('%s\n' % logString) except: log('ERROR: Unable to append to log file') </DeepExtract> sys.exit()
def killScript(reason=None): if reason is None: print(readMe) sys.exit() else: logString = '%s -- %s' % (datetime.datetime.now(), 'ERROR: %s' % reason) print(logString) if not filePath is None: try: with open(filePath, 'a') as logFile: logFile.write('%s\n' % logString) except: log('ERROR: Unable to append to log file') sys.exit()
automation-scripts
positive
def __exit__(self, exc_type, exc_val, exc_tb): <DeepExtract> LOG.info('Releasing lock') return self.lock_config.delete_lock() </DeepExtract> return False
def __exit__(self, exc_type, exc_val, exc_tb): LOG.info('Releasing lock') return self.lock_config.delete_lock() return False
armada
positive
def write_data_labels(num_processes, merged_entity_emb_file, merged_storage_type, sent_idx2row, cache_folder, out_file, entity_dump, train_in_candidates, max_candidates, trie_candidate_map_folder=None, trie_qid2eid_file=None): """Take the flattened data from merge_sentences and write out predictions. Args: num_processes: number of processes merged_entity_emb_file: input memmap file after merge sentences merged_storage_type: input file storage type sent_idx2row: Dict of sentence idx to row relevant to this subbatch cache_folder: folder to save temporary outputs out_file: final output file for predictions entity_dump: entity dump train_in_candidates: whether NC entities are not in candidate lists max_candidates: maximum number of candidates trie_candidate_map_folder: folder where trie of alias->candidate map is stored for parallel proccessing trie_qid2eid_file: file where trie of qid->eid map is stored for parallel proccessing """ st = time.time() <DeepExtract> filt_emb_data = np.memmap(merged_entity_emb_file, dtype=merged_storage_type, mode='r') sental2embid = {} for (i, row) in tqdm(enumerate(filt_emb_data), total=len(filt_emb_data), desc='Getting setnal2emb map'): sent_idx = row['sent_idx'] alias_idx = row['alias_list_pos'] assert sent_idx != -1 and alias_idx != -1, f'{i} {row} Has Sent {sent_idx}, Al {alias_idx}' sental2embid[f'{sent_idx}_{alias_idx}'] = i sental2embid = sental2embid </DeepExtract> log_rank_0_debug(logger, f'Finished getting sentence map {time.time() - st}s') total_input = len(sent_idx2row) if num_processes == 1: filt_emb_data = np.memmap(merged_entity_emb_file, dtype=merged_storage_type, mode='r') <DeepExtract> with open(out_file, 'w') as f_out: for sent_idx in sent_idx2row: line = sent_idx2row[sent_idx] aliases = line['aliases'] char_spans = line['char_spans'] assert sent_idx == str(line['sent_idx_unq']) qids = [] ctx_emb_ids = [] entity_ids = [] probs = [] cands = [] cand_probs = [] entity_cands_qid = map_aliases_to_candidates(train_in_candidates, max_candidates, entity_dump.get_alias2qids_dict(), aliases) entity_cands_eid = map_candidate_qids_to_eid(entity_cands_qid, entity_dump.get_qid2eid_dict()) for (al_idx, alias) in enumerate(aliases): sent_idx_key = f'{sent_idx}_{al_idx}' assert sent_idx_key in sental2embid, f'Dumped prediction data does not match data file. Can not find {sent_idx} - {al_idx}' if isinstance(sental2embid, dict): emb_idx = sental2embid[sent_idx_key] else: emb_idx = sental2embid[sent_idx_key][0][0] ctx_emb_ids.append(emb_idx) prob = filt_emb_data[emb_idx]['final_loss_prob'] prob = prob if not math.isnan(prob) else None cand_prob = strip_nan(filt_emb_data[emb_idx]['final_loss_cand_probs']) pred_cand = filt_emb_data[emb_idx]['final_loss_pred'] eid = entity_cands_eid[al_idx][pred_cand] qid = entity_cands_qid[al_idx][pred_cand] qids.append(qid) probs.append(prob) cands.append(list(entity_cands_qid[al_idx])) cand_probs.append(list(cand_prob)) entity_ids.append(eid) line['qids'] = qids line['probs'] = probs line['cands'] = cands line['cand_probs'] = cand_probs line['entity_ids'] = entity_ids line['char_spans'] = char_spans f_out.write(ujson.dumps(line, ensure_ascii=False) + '\n') </DeepExtract> else: assert trie_candidate_map_folder is not None, 'trie_candidate_map_folder is None and you have parallel turned on' assert trie_qid2eid_file is not None, 'trie_qid2eid_file is None and you have parallel turned on' trie_folder = os.path.join(cache_folder, 'bootleg_sental2embid') utils.ensure_dir(trie_folder) trie_file = os.path.join(trie_folder, 'sentidx.marisa') utils.create_single_item_trie(sental2embid, out_file=trie_file) create_ex_indir = os.path.join(cache_folder, '_bootleg_eval_temp_indir') utils.ensure_dir(create_ex_indir) create_ex_outdir = os.path.join(cache_folder, '_bootleg_eval_temp_outdir') utils.ensure_dir(create_ex_outdir) chunk_input = int(np.ceil(total_input / num_processes)) logger.debug(f'Chunking up {total_input} lines into subfiles of size {chunk_input} lines') input_files = [] i = 0 cur_lines = 0 file_split = os.path.join(create_ex_indir, f'out{i}.jsonl') open_file = open(file_split, 'w') for s_idx in sent_idx2row: if cur_lines >= chunk_input: open_file.close() input_files.append(file_split) cur_lines = 0 i += 1 file_split = os.path.join(create_ex_indir, f'out{i}.jsonl') open_file = open(file_split, 'w') line = sent_idx2row[s_idx] open_file.write(ujson.dumps(line, ensure_ascii=False) + '\n') cur_lines += 1 open_file.close() input_files.append(file_split) output_files = [in_file_name.replace(create_ex_indir, create_ex_outdir) for in_file_name in input_files] log_rank_0_debug(logger, 'Done chunking files. Starting pool') pool = multiprocessing.Pool(processes=num_processes, initializer=write_data_labels_initializer, initargs=[merged_entity_emb_file, merged_storage_type, trie_file, train_in_candidates, max_candidates, trie_candidate_map_folder, trie_qid2eid_file]) input_args = list(zip(input_files, output_files)) total = 0 for res in pool.imap(write_data_labels_hlp, input_args, chunksize=1): total += 1 pool.close() pool.join() log_rank_0_debug(logger, 'Merging output files') with open(out_file, 'wb') as outfile: for filename in glob.glob(os.path.join(create_ex_outdir, '*')): if filename == out_file: continue with open(filename, 'rb') as readfile: shutil.copyfileobj(readfile, outfile)
def write_data_labels(num_processes, merged_entity_emb_file, merged_storage_type, sent_idx2row, cache_folder, out_file, entity_dump, train_in_candidates, max_candidates, trie_candidate_map_folder=None, trie_qid2eid_file=None): """Take the flattened data from merge_sentences and write out predictions. Args: num_processes: number of processes merged_entity_emb_file: input memmap file after merge sentences merged_storage_type: input file storage type sent_idx2row: Dict of sentence idx to row relevant to this subbatch cache_folder: folder to save temporary outputs out_file: final output file for predictions entity_dump: entity dump train_in_candidates: whether NC entities are not in candidate lists max_candidates: maximum number of candidates trie_candidate_map_folder: folder where trie of alias->candidate map is stored for parallel proccessing trie_qid2eid_file: file where trie of qid->eid map is stored for parallel proccessing """ st = time.time() filt_emb_data = np.memmap(merged_entity_emb_file, dtype=merged_storage_type, mode='r') sental2embid = {} for (i, row) in tqdm(enumerate(filt_emb_data), total=len(filt_emb_data), desc='Getting setnal2emb map'): sent_idx = row['sent_idx'] alias_idx = row['alias_list_pos'] assert sent_idx != -1 and alias_idx != -1, f'{i} {row} Has Sent {sent_idx}, Al {alias_idx}' sental2embid[f'{sent_idx}_{alias_idx}'] = i sental2embid = sental2embid log_rank_0_debug(logger, f'Finished getting sentence map {time.time() - st}s') total_input = len(sent_idx2row) if num_processes == 1: filt_emb_data = np.memmap(merged_entity_emb_file, dtype=merged_storage_type, mode='r') with open(out_file, 'w') as f_out: for sent_idx in sent_idx2row: line = sent_idx2row[sent_idx] aliases = line['aliases'] char_spans = line['char_spans'] assert sent_idx == str(line['sent_idx_unq']) qids = [] ctx_emb_ids = [] entity_ids = [] probs = [] cands = [] cand_probs = [] entity_cands_qid = map_aliases_to_candidates(train_in_candidates, max_candidates, entity_dump.get_alias2qids_dict(), aliases) entity_cands_eid = map_candidate_qids_to_eid(entity_cands_qid, entity_dump.get_qid2eid_dict()) for (al_idx, alias) in enumerate(aliases): sent_idx_key = f'{sent_idx}_{al_idx}' assert sent_idx_key in sental2embid, f'Dumped prediction data does not match data file. Can not find {sent_idx} - {al_idx}' if isinstance(sental2embid, dict): emb_idx = sental2embid[sent_idx_key] else: emb_idx = sental2embid[sent_idx_key][0][0] ctx_emb_ids.append(emb_idx) prob = filt_emb_data[emb_idx]['final_loss_prob'] prob = prob if not math.isnan(prob) else None cand_prob = strip_nan(filt_emb_data[emb_idx]['final_loss_cand_probs']) pred_cand = filt_emb_data[emb_idx]['final_loss_pred'] eid = entity_cands_eid[al_idx][pred_cand] qid = entity_cands_qid[al_idx][pred_cand] qids.append(qid) probs.append(prob) cands.append(list(entity_cands_qid[al_idx])) cand_probs.append(list(cand_prob)) entity_ids.append(eid) line['qids'] = qids line['probs'] = probs line['cands'] = cands line['cand_probs'] = cand_probs line['entity_ids'] = entity_ids line['char_spans'] = char_spans f_out.write(ujson.dumps(line, ensure_ascii=False) + '\n') else: assert trie_candidate_map_folder is not None, 'trie_candidate_map_folder is None and you have parallel turned on' assert trie_qid2eid_file is not None, 'trie_qid2eid_file is None and you have parallel turned on' trie_folder = os.path.join(cache_folder, 'bootleg_sental2embid') utils.ensure_dir(trie_folder) trie_file = os.path.join(trie_folder, 'sentidx.marisa') utils.create_single_item_trie(sental2embid, out_file=trie_file) create_ex_indir = os.path.join(cache_folder, '_bootleg_eval_temp_indir') utils.ensure_dir(create_ex_indir) create_ex_outdir = os.path.join(cache_folder, '_bootleg_eval_temp_outdir') utils.ensure_dir(create_ex_outdir) chunk_input = int(np.ceil(total_input / num_processes)) logger.debug(f'Chunking up {total_input} lines into subfiles of size {chunk_input} lines') input_files = [] i = 0 cur_lines = 0 file_split = os.path.join(create_ex_indir, f'out{i}.jsonl') open_file = open(file_split, 'w') for s_idx in sent_idx2row: if cur_lines >= chunk_input: open_file.close() input_files.append(file_split) cur_lines = 0 i += 1 file_split = os.path.join(create_ex_indir, f'out{i}.jsonl') open_file = open(file_split, 'w') line = sent_idx2row[s_idx] open_file.write(ujson.dumps(line, ensure_ascii=False) + '\n') cur_lines += 1 open_file.close() input_files.append(file_split) output_files = [in_file_name.replace(create_ex_indir, create_ex_outdir) for in_file_name in input_files] log_rank_0_debug(logger, 'Done chunking files. Starting pool') pool = multiprocessing.Pool(processes=num_processes, initializer=write_data_labels_initializer, initargs=[merged_entity_emb_file, merged_storage_type, trie_file, train_in_candidates, max_candidates, trie_candidate_map_folder, trie_qid2eid_file]) input_args = list(zip(input_files, output_files)) total = 0 for res in pool.imap(write_data_labels_hlp, input_args, chunksize=1): total += 1 pool.close() pool.join() log_rank_0_debug(logger, 'Merging output files') with open(out_file, 'wb') as outfile: for filename in glob.glob(os.path.join(create_ex_outdir, '*')): if filename == out_file: continue with open(filename, 'rb') as readfile: shutil.copyfileobj(readfile, outfile)
bootleg
positive
def system_is_system_running(self): if _wait: for attempt in xrange(DefaultMaximumTimeout): <DeepExtract> conf = self.sysinit_target() status_file = self.status_file_from(conf) if not os.path.isfile(status_file): time.sleep(EpsilonTime) if not os.path.isfile(status_file): state = 'offline' status = self.read_status_from(conf) state = status.get('SubState', 'unknown') </DeepExtract> logg.debug('[%i] wait %s', attempt, state) if state not in ['initializing', 'starting']: break time.sleep(1) <DeepExtract> conf = self.sysinit_target() status_file = self.status_file_from(conf) if not os.path.isfile(status_file): time.sleep(EpsilonTime) if not os.path.isfile(status_file): state = 'offline' status = self.read_status_from(conf) state = status.get('SubState', 'unknown') </DeepExtract> logg.debug('is-system-running %s', state) if self._quiet: return state in ['running'] elif state in ['running']: return (True, state) else: return (False, state)
def system_is_system_running(self): if _wait: for attempt in xrange(DefaultMaximumTimeout): conf = self.sysinit_target() status_file = self.status_file_from(conf) if not os.path.isfile(status_file): time.sleep(EpsilonTime) if not os.path.isfile(status_file): state = 'offline' status = self.read_status_from(conf) state = status.get('SubState', 'unknown') logg.debug('[%i] wait %s', attempt, state) if state not in ['initializing', 'starting']: break time.sleep(1) conf = self.sysinit_target() status_file = self.status_file_from(conf) if not os.path.isfile(status_file): time.sleep(EpsilonTime) if not os.path.isfile(status_file): state = 'offline' status = self.read_status_from(conf) state = status.get('SubState', 'unknown') logg.debug('is-system-running %s', state) if self._quiet: return state in ['running'] elif state in ['running']: return (True, state) else: return (False, state)
deployment
positive
def evaluation_metrics(prediction_file, testset_path): """ Args: prediction_file: str testset_path: str Returns: acc: float top-1 accuracy. """ <DeepExtract> with open(prediction_file) as f: lines = f.readlines() dictionary = dict([l.replace('\n', '').split(' ') for l in lines]) top1_reference_ids = dictionary </DeepExtract> <DeepExtract> with open(testset_path) as f: lines = f.readlines() dictionary = dict([l.replace('\n', '').split(' ') for l in lines]) gt_labels = dictionary </DeepExtract> return evaluate(top1_reference_ids, gt_labels)
def evaluation_metrics(prediction_file, testset_path): """ Args: prediction_file: str testset_path: str Returns: acc: float top-1 accuracy. """ with open(prediction_file) as f: lines = f.readlines() dictionary = dict([l.replace('\n', '').split(' ') for l in lines]) top1_reference_ids = dictionary with open(testset_path) as f: lines = f.readlines() dictionary = dict([l.replace('\n', '').split(' ') for l in lines]) gt_labels = dictionary return evaluate(top1_reference_ids, gt_labels)
AI_Starthon2019
positive
def main(): argument_spec = dict(instance=dict(), id=dict(), name=dict(), volume_size=dict(type='int'), volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']), iops=dict(type='int'), encrypted=dict(default=False, type='bool'), kms_key_id=dict(), device_name=dict(), delete_on_termination=dict(default=False, type='bool'), zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot=dict(), state=dict(default='present', choices=['absent', 'present']), tags=dict(type='dict', aliases=['resource_tags']), modify_volume=dict(default=False, type='bool'), throughput=dict(type='int'), outpost_arn=dict(type='str'), purge_tags=dict(type='bool', default=True), multi_attach=dict(type='bool')) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['volume_type', 'io1', ['iops']], ['volume_type', 'io2', ['iops']]], supports_check_mode=True) param_id = module.params.get('id') name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') tags = module.params.get('tags') iops = module.params.get('iops') volume_type = module.params.get('volume_type') throughput = module.params.get('throughput') multi_attach = module.params.get('multi_attach') if instance is None and zone is None and (state == 'present'): module.fail_json(msg='You must specify either instance or zone') if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False if iops: if volume_type in ('gp2', 'st1', 'sc1', 'standard'): module.fail_json(msg='IOPS is not supported for gp2, st1, sc1, or standard volumes.') if volume_type == 'gp3' and (int(iops) < 3000 or int(iops) > 16000): module.fail_json(msg='For a gp3 volume type, IOPS values must be between 3000 and 16000.') if volume_type in ('io1', 'io2') and (int(iops) < 100 or int(iops) > 64000): module.fail_json(msg='For io1 and io2 volume types, IOPS values must be between 100 and 64000.') if throughput: if volume_type != 'gp3': module.fail_json(msg='Throughput is only supported for gp3 volume.') if throughput < 125 or throughput > 1000: module.fail_json(msg='Throughput values must be between 125 and 1000.') if multi_attach is True and volume_type not in ('io1', 'io2'): module.fail_json(msg='multi_attach is only supported for io1 and io2 volumes.') changed = False ec2_conn = module.client('ec2', AWSRetry.jittered_backoff()) inst = None if not volume_size and (not (param_id or name or snapshot)): module.fail_json(msg='You must specify volume_size or identify an existing volume by id, name, or snapshot') <DeepExtract> name = module.params.get('name') param_id = module.params.get('id') zone = module.params.get('zone') if not vol_id: vol_id = param_id if vol_id is None and name is None: volume = None find_params = dict() vols = [] if vol_id: find_params['VolumeIds'] = [vol_id] elif name: find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name}) elif zone: find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone}) try: paginator = ec2_conn.get_paginator('describe_volumes') vols_response = paginator.paginate(**find_params) vols = list(vols_response)[0].get('Volumes') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if is_boto3_error_code('InvalidVolume.NotFound'): module.exit_json(msg='Volume {0} does not exist'.format(vol_id), changed=False) module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params)) if not vols: if False and vol_id: msg = 'Could not find volume with id: {0}'.format(vol_id) if name: msg += ' and name: {0}'.format(name) module.fail_json(msg=msg) else: volume = None if len(vols) > 1: module.fail_json(msg='Found more than one volume in zone (if specified) with name: {0}'.format(name), found=[v['VolumeId'] for v in vols]) vol = camel_dict_to_snake_dict(vols[0]) volume = vol </DeepExtract> if state == 'present': if instance: <DeepExtract> instance = None if not instance: inst = instance try: reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance]) instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance)) inst = instance </DeepExtract> zone = inst['placement']['availability_zone'] if device_name is None: if inst.get('platform', '') == 'Windows': device_name = '/dev/xvdf' else: device_name = '/dev/sdf' <DeepExtract> mapped_block_device = None if not inst: mapped_device = mapped_block_device if not device_name: mapped_device = mapped_block_device for device in inst.get('block_device_mappings', []): if device['device_name'] == device_name: mapped_block_device = device break mapped_device = mapped_block_device </DeepExtract> if mapped_device: other_volume_mapped = False if volume: if volume['volume_id'] != mapped_device['ebs']['volume_id']: other_volume_mapped = True else: other_volume_mapped = True if other_volume_mapped: module.exit_json(msg='Volume mapping for {0} already exists on instance {1}'.format(device_name, instance), volume_id=mapped_device['ebs']['volume_id'], found_volume=volume, device=device_name, changed=False) final_tags = None tags_changed = False if volume: <DeepExtract> changed = False req_obj = {'VolumeId': volume['volume_id']} if module.params.get('modify_volume'): target_type = module.params.get('volume_type') original_type = None type_changed = False if target_type: original_type = volume['volume_type'] if target_type != original_type: type_changed = True req_obj['VolumeType'] = target_type iops_changed = False target_iops = module.params.get('iops') original_iops = volume.get('iops') if target_iops: if target_iops != original_iops: iops_changed = True req_obj['Iops'] = target_iops else: req_obj['Iops'] = original_iops elif type_changed and target_type == 'gp3': if original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000) or not original_iops: req_obj['Iops'] = 3000 iops_changed = True target_size = module.params.get('volume_size') size_changed = False if target_size: original_size = volume['size'] if target_size != original_size: size_changed = True req_obj['Size'] = target_size target_type = module.params.get('volume_type') original_type = None type_changed = False if target_type: original_type = volume['volume_type'] if target_type != original_type: type_changed = True req_obj['VolumeType'] = target_type target_throughput = module.params.get('throughput') throughput_changed = False if target_throughput: original_throughput = volume.get('throughput') if target_throughput != original_throughput: throughput_changed = True req_obj['Throughput'] = target_throughput target_multi_attach = module.params.get('multi_attach') multi_attach_changed = False if target_multi_attach is not None: original_multi_attach = volume['multi_attach_enabled'] if target_multi_attach != original_multi_attach: multi_attach_changed = True req_obj['MultiAttachEnabled'] = target_multi_attach changed = iops_changed or size_changed or type_changed or throughput_changed or multi_attach_changed if changed: if module.check_mode: module.exit_json(changed=True, msg='Would have updated volume if not in check mode.') response = ec2_conn.modify_volume(**req_obj) volume['size'] = response.get('VolumeModification').get('TargetSize') volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType') volume['iops'] = response.get('VolumeModification').get('TargetIops') volume['multi_attach_enabled'] = response.get('VolumeModification').get('TargetMultiAttachEnabled') volume['throughput'] = response.get('VolumeModification').get('TargetThroughput') (volume, changed) = (volume, changed) </DeepExtract> if name: if not tags: tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) tags['Name'] = name <DeepExtract> if module.check_mode: (final_tags, tags_changed) = ({}, True) changed = ensure_ec2_tags(ec2_conn, module, volume['volume_id'], 'volume', tags, module.params.get('purge_tags'), ['InvalidVolume.NotFound']) final_tags = describe_ec2_tags(ec2_conn, module, volume['volume_id'], 'volume') (final_tags, tags_changed) = (final_tags, changed) </DeepExtract> else: <DeepExtract> changed = False iops = module.params.get('iops') encrypted = module.params.get('encrypted') kms_key_id = module.params.get('kms_key_id') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') throughput = module.params.get('throughput') multi_attach = module.params.get('multi_attach') outpost_arn = module.params.get('outpost_arn') tags = module.params.get('tags') or {} name = module.params.get('name') volume = get_volume(module, ec2_conn) if module.check_mode: module.exit_json(changed=True, msg='Would have created a volume if not in check mode.') if volume is None: try: changed = True additional_params = dict() if volume_size: additional_params['Size'] = int(volume_size) if kms_key_id: additional_params['KmsKeyId'] = kms_key_id if snapshot: additional_params['SnapshotId'] = snapshot if iops: additional_params['Iops'] = int(iops) if volume_type == 'gp3' and (not iops): additional_params['Iops'] = 3000 if throughput: additional_params['Throughput'] = int(throughput) if multi_attach: additional_params['MultiAttachEnabled'] = True if outpost_arn: if is_outpost_arn(outpost_arn): additional_params['OutpostArn'] = outpost_arn else: module.fail_json('OutpostArn does not match the pattern specified in API specifications.') if name: tags['Name'] = name if tags: additional_params['TagSpecifications'] = boto3_tag_specifications(tags, types=['volume']) create_vol_response = ec2_conn.create_volume(aws_retry=True, AvailabilityZone=zone, Encrypted=encrypted, VolumeType=volume_type, **additional_params) waiter = ec2_conn.get_waiter('volume_available') waiter.wait(VolumeIds=[create_vol_response['VolumeId']]) volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while creating EBS volume') (volume, changed) = (volume, changed) </DeepExtract> if detach_vol_flag: <DeepExtract> changed = False attachment_data = get_attachment_data(volume, wanted_state='attached') for attachment in attachment_data: if module.check_mode: module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume['volume_id']) waiter = ec2_conn.get_waiter('volume_available') waiter.wait(VolumeIds=[volume['volume_id']]) changed = True volume = get_volume(module, ec2_conn, vol_id=volume['volume_id']) (volume, attach_changed) = (volume, changed) </DeepExtract> elif inst is not None: <DeepExtract> changed = False attachment_data = get_attachment_data(volume, wanted_state='attached') if attachment_data: if module.check_mode: if attachment_data[0].get('status') in ['attached', 'attaching']: module.exit_json(changed=False, msg='IN CHECK MODE - volume already attached to instance: {0}.'.format(attachment_data[0].get('instance_id', None))) if not volume['multi_attach_enabled']: if attachment_data[0].get('instance_id', None) != inst['instance_id']: module.fail_json(msg='Volume {0} is already attached to another instance: {1}.'.format(volume['volume_id'], attachment_data[0].get('instance_id', None))) else: (volume, attach_changed) = (volume, changed) try: if module.check_mode: module.exit_json(changed=True, msg='Would have attached volume if not in check mode.') attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name, InstanceId=inst['instance_id'], VolumeId=volume['volume_id']) waiter = ec2_conn.get_waiter('volume_in_use') waiter.wait(VolumeIds=[attach_response['VolumeId']]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while attaching EBS volume') modify_dot_attribute(module, ec2_conn, inst, device_name) volume = get_volume(module, ec2_conn, vol_id=volume['volume_id']) (volume, attach_changed) = (volume, changed) </DeepExtract> else: attach_changed = False <DeepExtract> if not final_tags: final_tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) attachment_data = get_attachment_data(volume) volume_info = {'create_time': volume.get('create_time'), 'encrypted': volume.get('encrypted'), 'id': volume.get('volume_id'), 'iops': volume.get('iops'), 'size': volume.get('size'), 'snapshot_id': volume.get('snapshot_id'), 'status': volume.get('state'), 'type': volume.get('volume_type'), 'zone': volume.get('availability_zone'), 'attachment_set': attachment_data, 'multi_attach_enabled': volume.get('multi_attach_enabled'), 'tags': final_tags} volume_info['throughput'] = volume.get('throughput') volume_info = volume_info </DeepExtract> if tags_changed or attach_changed: changed = True module.exit_json(changed=changed, volume=volume_info, device=device_name, volume_id=volume_info['id'], volume_type=volume_info['type']) elif state == 'absent': if not name and (not param_id): module.fail_json('A volume name or id is required for deletion') if volume: if module.check_mode: module.exit_json(changed=True, msg='Would have deleted volume if not in check mode.') <DeepExtract> changed = False attachment_data = get_attachment_data(volume, wanted_state='attached') for attachment in attachment_data: if module.check_mode: module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume['volume_id']) waiter = ec2_conn.get_waiter('volume_available') waiter.wait(VolumeIds=[volume['volume_id']]) changed = True volume = get_volume(module, ec2_conn, vol_id=volume['volume_id']) return (volume, changed) </DeepExtract> <DeepExtract> changed = False if volume['volume_id']: try: ec2_conn.delete_volume(aws_retry=True, VolumeId=volume['volume_id']) changed = True except is_boto3_error_code('InvalidVolume.NotFound'): module.exit_json(changed=False) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while deleting volume') changed = changed </DeepExtract> module.exit_json(changed=changed)
def main(): argument_spec = dict(instance=dict(), id=dict(), name=dict(), volume_size=dict(type='int'), volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']), iops=dict(type='int'), encrypted=dict(default=False, type='bool'), kms_key_id=dict(), device_name=dict(), delete_on_termination=dict(default=False, type='bool'), zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot=dict(), state=dict(default='present', choices=['absent', 'present']), tags=dict(type='dict', aliases=['resource_tags']), modify_volume=dict(default=False, type='bool'), throughput=dict(type='int'), outpost_arn=dict(type='str'), purge_tags=dict(type='bool', default=True), multi_attach=dict(type='bool')) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['volume_type', 'io1', ['iops']], ['volume_type', 'io2', ['iops']]], supports_check_mode=True) param_id = module.params.get('id') name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') tags = module.params.get('tags') iops = module.params.get('iops') volume_type = module.params.get('volume_type') throughput = module.params.get('throughput') multi_attach = module.params.get('multi_attach') if instance is None and zone is None and (state == 'present'): module.fail_json(msg='You must specify either instance or zone') if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False if iops: if volume_type in ('gp2', 'st1', 'sc1', 'standard'): module.fail_json(msg='IOPS is not supported for gp2, st1, sc1, or standard volumes.') if volume_type == 'gp3' and (int(iops) < 3000 or int(iops) > 16000): module.fail_json(msg='For a gp3 volume type, IOPS values must be between 3000 and 16000.') if volume_type in ('io1', 'io2') and (int(iops) < 100 or int(iops) > 64000): module.fail_json(msg='For io1 and io2 volume types, IOPS values must be between 100 and 64000.') if throughput: if volume_type != 'gp3': module.fail_json(msg='Throughput is only supported for gp3 volume.') if throughput < 125 or throughput > 1000: module.fail_json(msg='Throughput values must be between 125 and 1000.') if multi_attach is True and volume_type not in ('io1', 'io2'): module.fail_json(msg='multi_attach is only supported for io1 and io2 volumes.') changed = False ec2_conn = module.client('ec2', AWSRetry.jittered_backoff()) inst = None if not volume_size and (not (param_id or name or snapshot)): module.fail_json(msg='You must specify volume_size or identify an existing volume by id, name, or snapshot') name = module.params.get('name') param_id = module.params.get('id') zone = module.params.get('zone') if not vol_id: vol_id = param_id if vol_id is None and name is None: volume = None find_params = dict() vols = [] if vol_id: find_params['VolumeIds'] = [vol_id] elif name: find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name}) elif zone: find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone}) try: paginator = ec2_conn.get_paginator('describe_volumes') vols_response = paginator.paginate(**find_params) vols = list(vols_response)[0].get('Volumes') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: if is_boto3_error_code('InvalidVolume.NotFound'): module.exit_json(msg='Volume {0} does not exist'.format(vol_id), changed=False) module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params)) if not vols: if False and vol_id: msg = 'Could not find volume with id: {0}'.format(vol_id) if name: msg += ' and name: {0}'.format(name) module.fail_json(msg=msg) else: volume = None if len(vols) > 1: module.fail_json(msg='Found more than one volume in zone (if specified) with name: {0}'.format(name), found=[v['VolumeId'] for v in vols]) vol = camel_dict_to_snake_dict(vols[0]) volume = vol if state == 'present': if instance: instance = None if not instance: inst = instance try: reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance]) instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance)) inst = instance zone = inst['placement']['availability_zone'] if device_name is None: if inst.get('platform', '') == 'Windows': device_name = '/dev/xvdf' else: device_name = '/dev/sdf' mapped_block_device = None if not inst: mapped_device = mapped_block_device if not device_name: mapped_device = mapped_block_device for device in inst.get('block_device_mappings', []): if device['device_name'] == device_name: mapped_block_device = device break mapped_device = mapped_block_device if mapped_device: other_volume_mapped = False if volume: if volume['volume_id'] != mapped_device['ebs']['volume_id']: other_volume_mapped = True else: other_volume_mapped = True if other_volume_mapped: module.exit_json(msg='Volume mapping for {0} already exists on instance {1}'.format(device_name, instance), volume_id=mapped_device['ebs']['volume_id'], found_volume=volume, device=device_name, changed=False) final_tags = None tags_changed = False if volume: changed = False req_obj = {'VolumeId': volume['volume_id']} if module.params.get('modify_volume'): target_type = module.params.get('volume_type') original_type = None type_changed = False if target_type: original_type = volume['volume_type'] if target_type != original_type: type_changed = True req_obj['VolumeType'] = target_type iops_changed = False target_iops = module.params.get('iops') original_iops = volume.get('iops') if target_iops: if target_iops != original_iops: iops_changed = True req_obj['Iops'] = target_iops else: req_obj['Iops'] = original_iops elif type_changed and target_type == 'gp3': if original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000) or not original_iops: req_obj['Iops'] = 3000 iops_changed = True target_size = module.params.get('volume_size') size_changed = False if target_size: original_size = volume['size'] if target_size != original_size: size_changed = True req_obj['Size'] = target_size target_type = module.params.get('volume_type') original_type = None type_changed = False if target_type: original_type = volume['volume_type'] if target_type != original_type: type_changed = True req_obj['VolumeType'] = target_type target_throughput = module.params.get('throughput') throughput_changed = False if target_throughput: original_throughput = volume.get('throughput') if target_throughput != original_throughput: throughput_changed = True req_obj['Throughput'] = target_throughput target_multi_attach = module.params.get('multi_attach') multi_attach_changed = False if target_multi_attach is not None: original_multi_attach = volume['multi_attach_enabled'] if target_multi_attach != original_multi_attach: multi_attach_changed = True req_obj['MultiAttachEnabled'] = target_multi_attach changed = iops_changed or size_changed or type_changed or throughput_changed or multi_attach_changed if changed: if module.check_mode: module.exit_json(changed=True, msg='Would have updated volume if not in check mode.') response = ec2_conn.modify_volume(**req_obj) volume['size'] = response.get('VolumeModification').get('TargetSize') volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType') volume['iops'] = response.get('VolumeModification').get('TargetIops') volume['multi_attach_enabled'] = response.get('VolumeModification').get('TargetMultiAttachEnabled') volume['throughput'] = response.get('VolumeModification').get('TargetThroughput') (volume, changed) = (volume, changed) if name: if not tags: tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) tags['Name'] = name if module.check_mode: (final_tags, tags_changed) = ({}, True) changed = ensure_ec2_tags(ec2_conn, module, volume['volume_id'], 'volume', tags, module.params.get('purge_tags'), ['InvalidVolume.NotFound']) final_tags = describe_ec2_tags(ec2_conn, module, volume['volume_id'], 'volume') (final_tags, tags_changed) = (final_tags, changed) else: changed = False iops = module.params.get('iops') encrypted = module.params.get('encrypted') kms_key_id = module.params.get('kms_key_id') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') throughput = module.params.get('throughput') multi_attach = module.params.get('multi_attach') outpost_arn = module.params.get('outpost_arn') tags = module.params.get('tags') or {} name = module.params.get('name') volume = get_volume(module, ec2_conn) if module.check_mode: module.exit_json(changed=True, msg='Would have created a volume if not in check mode.') if volume is None: try: changed = True additional_params = dict() if volume_size: additional_params['Size'] = int(volume_size) if kms_key_id: additional_params['KmsKeyId'] = kms_key_id if snapshot: additional_params['SnapshotId'] = snapshot if iops: additional_params['Iops'] = int(iops) if volume_type == 'gp3' and (not iops): additional_params['Iops'] = 3000 if throughput: additional_params['Throughput'] = int(throughput) if multi_attach: additional_params['MultiAttachEnabled'] = True if outpost_arn: if is_outpost_arn(outpost_arn): additional_params['OutpostArn'] = outpost_arn else: module.fail_json('OutpostArn does not match the pattern specified in API specifications.') if name: tags['Name'] = name if tags: additional_params['TagSpecifications'] = boto3_tag_specifications(tags, types=['volume']) create_vol_response = ec2_conn.create_volume(aws_retry=True, AvailabilityZone=zone, Encrypted=encrypted, VolumeType=volume_type, **additional_params) waiter = ec2_conn.get_waiter('volume_available') waiter.wait(VolumeIds=[create_vol_response['VolumeId']]) volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while creating EBS volume') (volume, changed) = (volume, changed) if detach_vol_flag: changed = False attachment_data = get_attachment_data(volume, wanted_state='attached') for attachment in attachment_data: if module.check_mode: module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume['volume_id']) waiter = ec2_conn.get_waiter('volume_available') waiter.wait(VolumeIds=[volume['volume_id']]) changed = True volume = get_volume(module, ec2_conn, vol_id=volume['volume_id']) (volume, attach_changed) = (volume, changed) elif inst is not None: changed = False attachment_data = get_attachment_data(volume, wanted_state='attached') if attachment_data: if module.check_mode: if attachment_data[0].get('status') in ['attached', 'attaching']: module.exit_json(changed=False, msg='IN CHECK MODE - volume already attached to instance: {0}.'.format(attachment_data[0].get('instance_id', None))) if not volume['multi_attach_enabled']: if attachment_data[0].get('instance_id', None) != inst['instance_id']: module.fail_json(msg='Volume {0} is already attached to another instance: {1}.'.format(volume['volume_id'], attachment_data[0].get('instance_id', None))) else: (volume, attach_changed) = (volume, changed) try: if module.check_mode: module.exit_json(changed=True, msg='Would have attached volume if not in check mode.') attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name, InstanceId=inst['instance_id'], VolumeId=volume['volume_id']) waiter = ec2_conn.get_waiter('volume_in_use') waiter.wait(VolumeIds=[attach_response['VolumeId']]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while attaching EBS volume') modify_dot_attribute(module, ec2_conn, inst, device_name) volume = get_volume(module, ec2_conn, vol_id=volume['volume_id']) (volume, attach_changed) = (volume, changed) else: attach_changed = False if not final_tags: final_tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) attachment_data = get_attachment_data(volume) volume_info = {'create_time': volume.get('create_time'), 'encrypted': volume.get('encrypted'), 'id': volume.get('volume_id'), 'iops': volume.get('iops'), 'size': volume.get('size'), 'snapshot_id': volume.get('snapshot_id'), 'status': volume.get('state'), 'type': volume.get('volume_type'), 'zone': volume.get('availability_zone'), 'attachment_set': attachment_data, 'multi_attach_enabled': volume.get('multi_attach_enabled'), 'tags': final_tags} volume_info['throughput'] = volume.get('throughput') volume_info = volume_info if tags_changed or attach_changed: changed = True module.exit_json(changed=changed, volume=volume_info, device=device_name, volume_id=volume_info['id'], volume_type=volume_info['type']) elif state == 'absent': if not name and (not param_id): module.fail_json('A volume name or id is required for deletion') if volume: if module.check_mode: module.exit_json(changed=True, msg='Would have deleted volume if not in check mode.') changed = False attachment_data = get_attachment_data(volume, wanted_state='attached') for attachment in attachment_data: if module.check_mode: module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume['volume_id']) waiter = ec2_conn.get_waiter('volume_available') waiter.wait(VolumeIds=[volume['volume_id']]) changed = True volume = get_volume(module, ec2_conn, vol_id=volume['volume_id']) return (volume, changed) changed = False if volume['volume_id']: try: ec2_conn.delete_volume(aws_retry=True, VolumeId=volume['volume_id']) changed = True except is_boto3_error_code('InvalidVolume.NotFound'): module.exit_json(changed=False) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Error while deleting volume') changed = changed module.exit_json(changed=changed)
amazon.aws
positive
def __exit__(self, exc_type, exc_val, exc_tb): <DeepExtract> self.pools.clear() </DeepExtract> return False
def __exit__(self, exc_type, exc_val, exc_tb): self.pools.clear() return False
alexa-sky-hd
positive
def toc(self, average=True): <DeepExtract> self.diff = time.time() - self.start_time self.total_time += self.diff self.calls += 1 </DeepExtract> if average: return self.average_time else: return self.diff
def toc(self, average=True): self.diff = time.time() - self.start_time self.total_time += self.diff self.calls += 1 if average: return self.average_time else: return self.diff
ACNet
positive
def _process_protocol_v2(argv, ifile, ofile): """ Processes records on the `input stream optionally writing records to the output stream. :param ifile: Input file object. :type ifile: file or InputType :param ofile: Output file object. :type ofile: file or OutputType :return: :const:`None` """ debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=2', class_name) self._protocol_version = 2 try: debug('Reading metadata') <DeepExtract> try: header = ifile.readline() except Exception as error: raise RuntimeError('Failed to read transport header: {}'.format(error)) if not header: (metadata, body) = None match = SearchCommand._header.match(header) if match is None: raise RuntimeError('Failed to parse transport header: {}'.format(header)) (metadata_length, body_length) = match.groups() metadata_length = int(metadata_length) body_length = int(body_length) try: metadata = ifile.read(metadata_length) except Exception as error: raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error)) decoder = MetadataDecoder() try: metadata = decoder.decode(metadata) except Exception as error: raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error)) body = '' try: if body_length > 0: body = ifile.read(body_length) except Exception as error: raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error)) (metadata, body) = (metadata, body) </DeepExtract> action = getattr(metadata, 'action', None) if action != 'getinfo': raise RuntimeError('Expected getinfo action, not {}'.format(action)) if len(body) > 0: raise RuntimeError('Did not expect data for getinfo action') self._metadata = deepcopy(metadata) searchinfo = self._metadata.searchinfo searchinfo.earliest_time = float(searchinfo.earliest_time) searchinfo.latest_time = float(searchinfo.latest_time) searchinfo.search = unquote(searchinfo.search) <DeepExtract> metadata = self._metadata searchinfo = metadata.searchinfo self._input_header.update(allowStream=None, infoPath=os.path.join(searchinfo.dispatch_dir, 'info.csv'), keywords=None, preview=metadata.preview, realtime=searchinfo.earliest_time != 0 and searchinfo.latest_time != 0, search=searchinfo.search, sid=searchinfo.sid, splunkVersion=searchinfo.splunk_version, truncated=None) </DeepExtract> debug(' metadata=%r, input_header=%r', self._metadata, self._input_header) try: tempfile.tempdir = self._metadata.searchinfo.dispatch_dir except AttributeError: raise RuntimeError('%s.metadata.searchinfo.dispatch_dir is undefined'.format(class_name)) debug(' tempfile.tempdir=%r', tempfile.tempdir) except: self._record_writer = RecordWriterV2(ofile) <DeepExtract> (error_type, error, tb) = sys.exc_info() origin = tb while origin.tb_next is not None: origin = origin.tb_next filename = origin.tb_frame.f_code.co_filename lineno = origin.tb_lineno message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error) environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) self.write_error(message) </DeepExtract> <DeepExtract> self._record_writer.flush(finished=True) </DeepExtract> exit(1) try: self._record_writer = RecordWriterV2(ofile, getattr(self._metadata.searchinfo, 'maxresultrows', None)) self.fieldnames = [] self.options.reset() args = self.metadata.searchinfo.args error_count = 0 debug('Parsing arguments') if args and type(args) == list: for arg in args: result = arg.split('=', 1) if len(result) == 1: self.fieldnames.append(str(result[0])) else: (name, value) = result name = str(name) try: option = self.options[name] except KeyError: <DeepExtract> self._record_writer.write_message('ERROR', 'Unrecognized option: {}={}'.format(name, value), *args) </DeepExtract> error_count += 1 continue try: option.value = value except ValueError: <DeepExtract> self._record_writer.write_message('ERROR', 'Illegal value: {}={}'.format(name, value), *args) </DeepExtract> error_count += 1 continue missing = self.options.get_missing() if missing is not None: if len(missing) == 1: <DeepExtract> self._record_writer.write_message('ERROR', 'A value for "{}" is required'.format(missing[0]), *args) </DeepExtract> else: <DeepExtract> self._record_writer.write_message('ERROR', 'Values for these required options are missing: {}'.format(', '.join(missing)), *args) </DeepExtract> error_count += 1 if error_count > 0: exit(1) debug(' command: %s', six.text_type(self)) debug('Preparing for execution') <DeepExtract> pass </DeepExtract> if self.record: <DeepExtract> recordings = os.path.join(environment.splunk_home, 'var', 'run', 'splunklib.searchcommands', 'recordings') if not os.path.isdir(recordings): os.makedirs(recordings) recording = os.path.join(recordings, self.__class__.__name__ + '-' + repr(time()) + '.' + self._metadata.action) ifile = Recorder(recording + '.input', ifile) ofile = Recorder(recording + '.output', ofile) dispatch_dir = self._metadata.searchinfo.dispatch_dir if dispatch_dir is not None: (root_dir, base_dir) = os.path.split(dispatch_dir) make_archive(recording + '.dispatch_dir', 'gztar', root_dir, base_dir, logger=self.logger) with open(recording + '.splunk_cmd', 'wb') as f: f.write('splunk cmd python '.encode()) f.write(os.path.basename(argv[0]).encode()) for arg in islice(argv, 1, len(argv)): f.write(' '.encode()) f.write(arg.encode()) (ifile, ofile) = (ifile, ofile) </DeepExtract> self._record_writer.ofile = ofile info = self._metadata.searchinfo for attr in ('args', 'raw_args'): setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')]) metadata = MetadataEncoder().encode(self._metadata) ifile.record('chunked 1.0,', six.text_type(len(metadata)), ',0\n', metadata) if self.show_configuration: <DeepExtract> self._record_writer.write_message('INFO', self.name + ' command configuration: ' + str(self._configuration), *args) </DeepExtract> debug(' command configuration: %s', self._configuration) except SystemExit: self._record_writer.write_metadata(self._configuration) <DeepExtract> self._record_writer.flush(finished=True) </DeepExtract> raise except: self._record_writer.write_metadata(self._configuration) <DeepExtract> (error_type, error, tb) = sys.exc_info() origin = tb while origin.tb_next is not None: origin = origin.tb_next filename = origin.tb_frame.f_code.co_filename lineno = origin.tb_lineno message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error) environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) self.write_error(message) </DeepExtract> <DeepExtract> self._record_writer.flush(finished=True) </DeepExtract> exit(1) self._record_writer.write_metadata(self._configuration) try: debug('Executing under protocol_version=2') self._records = self._records_protocol_v2 self._metadata.action = 'execute' <DeepExtract> self._record_writer.write_records(None(self._records(ifile))) self.finish() </DeepExtract> except SystemExit: <DeepExtract> self._record_writer.flush(finished=True) </DeepExtract> raise except: <DeepExtract> (error_type, error, tb) = sys.exc_info() origin = tb while origin.tb_next is not None: origin = origin.tb_next filename = origin.tb_frame.f_code.co_filename lineno = origin.tb_lineno message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error) environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) self.write_error(message) </DeepExtract> <DeepExtract> self._record_writer.flush(finished=True) </DeepExtract> exit(1) debug('%s.process completed', class_name)
def _process_protocol_v2(argv, ifile, ofile): """ Processes records on the `input stream optionally writing records to the output stream. :param ifile: Input file object. :type ifile: file or InputType :param ofile: Output file object. :type ofile: file or OutputType :return: :const:`None` """ debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=2', class_name) self._protocol_version = 2 try: debug('Reading metadata') try: header = ifile.readline() except Exception as error: raise RuntimeError('Failed to read transport header: {}'.format(error)) if not header: (metadata, body) = None match = SearchCommand._header.match(header) if match is None: raise RuntimeError('Failed to parse transport header: {}'.format(header)) (metadata_length, body_length) = match.groups() metadata_length = int(metadata_length) body_length = int(body_length) try: metadata = ifile.read(metadata_length) except Exception as error: raise RuntimeError('Failed to read metadata of length {}: {}'.format(metadata_length, error)) decoder = MetadataDecoder() try: metadata = decoder.decode(metadata) except Exception as error: raise RuntimeError('Failed to parse metadata of length {}: {}'.format(metadata_length, error)) body = '' try: if body_length > 0: body = ifile.read(body_length) except Exception as error: raise RuntimeError('Failed to read body of length {}: {}'.format(body_length, error)) (metadata, body) = (metadata, body) action = getattr(metadata, 'action', None) if action != 'getinfo': raise RuntimeError('Expected getinfo action, not {}'.format(action)) if len(body) > 0: raise RuntimeError('Did not expect data for getinfo action') self._metadata = deepcopy(metadata) searchinfo = self._metadata.searchinfo searchinfo.earliest_time = float(searchinfo.earliest_time) searchinfo.latest_time = float(searchinfo.latest_time) searchinfo.search = unquote(searchinfo.search) metadata = self._metadata searchinfo = metadata.searchinfo self._input_header.update(allowStream=None, infoPath=os.path.join(searchinfo.dispatch_dir, 'info.csv'), keywords=None, preview=metadata.preview, realtime=searchinfo.earliest_time != 0 and searchinfo.latest_time != 0, search=searchinfo.search, sid=searchinfo.sid, splunkVersion=searchinfo.splunk_version, truncated=None) debug(' metadata=%r, input_header=%r', self._metadata, self._input_header) try: tempfile.tempdir = self._metadata.searchinfo.dispatch_dir except AttributeError: raise RuntimeError('%s.metadata.searchinfo.dispatch_dir is undefined'.format(class_name)) debug(' tempfile.tempdir=%r', tempfile.tempdir) except: self._record_writer = RecordWriterV2(ofile) (error_type, error, tb) = sys.exc_info() origin = tb while origin.tb_next is not None: origin = origin.tb_next filename = origin.tb_frame.f_code.co_filename lineno = origin.tb_lineno message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error) environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) self.write_error(message) self._record_writer.flush(finished=True) exit(1) try: self._record_writer = RecordWriterV2(ofile, getattr(self._metadata.searchinfo, 'maxresultrows', None)) self.fieldnames = [] self.options.reset() args = self.metadata.searchinfo.args error_count = 0 debug('Parsing arguments') if args and type(args) == list: for arg in args: result = arg.split('=', 1) if len(result) == 1: self.fieldnames.append(str(result[0])) else: (name, value) = result name = str(name) try: option = self.options[name] except KeyError: self._record_writer.write_message('ERROR', 'Unrecognized option: {}={}'.format(name, value), *args) error_count += 1 continue try: option.value = value except ValueError: self._record_writer.write_message('ERROR', 'Illegal value: {}={}'.format(name, value), *args) error_count += 1 continue missing = self.options.get_missing() if missing is not None: if len(missing) == 1: self._record_writer.write_message('ERROR', 'A value for "{}" is required'.format(missing[0]), *args) else: self._record_writer.write_message('ERROR', 'Values for these required options are missing: {}'.format(', '.join(missing)), *args) error_count += 1 if error_count > 0: exit(1) debug(' command: %s', six.text_type(self)) debug('Preparing for execution') pass if self.record: recordings = os.path.join(environment.splunk_home, 'var', 'run', 'splunklib.searchcommands', 'recordings') if not os.path.isdir(recordings): os.makedirs(recordings) recording = os.path.join(recordings, self.__class__.__name__ + '-' + repr(time()) + '.' + self._metadata.action) ifile = Recorder(recording + '.input', ifile) ofile = Recorder(recording + '.output', ofile) dispatch_dir = self._metadata.searchinfo.dispatch_dir if dispatch_dir is not None: (root_dir, base_dir) = os.path.split(dispatch_dir) make_archive(recording + '.dispatch_dir', 'gztar', root_dir, base_dir, logger=self.logger) with open(recording + '.splunk_cmd', 'wb') as f: f.write('splunk cmd python '.encode()) f.write(os.path.basename(argv[0]).encode()) for arg in islice(argv, 1, len(argv)): f.write(' '.encode()) f.write(arg.encode()) (ifile, ofile) = (ifile, ofile) self._record_writer.ofile = ofile info = self._metadata.searchinfo for attr in ('args', 'raw_args'): setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')]) metadata = MetadataEncoder().encode(self._metadata) ifile.record('chunked 1.0,', six.text_type(len(metadata)), ',0\n', metadata) if self.show_configuration: self._record_writer.write_message('INFO', self.name + ' command configuration: ' + str(self._configuration), *args) debug(' command configuration: %s', self._configuration) except SystemExit: self._record_writer.write_metadata(self._configuration) self._record_writer.flush(finished=True) raise except: self._record_writer.write_metadata(self._configuration) (error_type, error, tb) = sys.exc_info() origin = tb while origin.tb_next is not None: origin = origin.tb_next filename = origin.tb_frame.f_code.co_filename lineno = origin.tb_lineno message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error) environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) self.write_error(message) self._record_writer.flush(finished=True) exit(1) self._record_writer.write_metadata(self._configuration) try: debug('Executing under protocol_version=2') self._records = self._records_protocol_v2 self._metadata.action = 'execute' self._record_writer.write_records(None(self._records(ifile))) self.finish() except SystemExit: self._record_writer.flush(finished=True) raise except: (error_type, error, tb) = sys.exc_info() origin = tb while origin.tb_next is not None: origin = origin.tb_next filename = origin.tb_frame.f_code.co_filename lineno = origin.tb_lineno message = '{0} at "{1}", line {2:d} : {3}'.format(error_type.__name__, filename, lineno, error) environment.splunklib_logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) self.write_error(message) self._record_writer.flush(finished=True) exit(1) debug('%s.process completed', class_name)
CobaltSplunk
positive
def matchCallback(self, path): self.index += 1 if path.status is not None: if path.status not in self.excludeStatusCodes and (self.blacklists.get(path.status) is None or path.path not in self.blacklists.get(path.status)): self.output.statusReport(path.path, path.response) <DeepExtract> if not self.recursive: return False if path.path.endswith('/'): if path.path in [directory + '/' for directory in self.excludeSubdirs]: return False self.directories.put(self.currentDirectory + path.path) return True else: return False </DeepExtract> self.reportManager.addPath(self.currentDirectory + path.path, path.status, path.response) self.reportManager.save() del path
def matchCallback(self, path): self.index += 1 if path.status is not None: if path.status not in self.excludeStatusCodes and (self.blacklists.get(path.status) is None or path.path not in self.blacklists.get(path.status)): self.output.statusReport(path.path, path.response) if not self.recursive: return False if path.path.endswith('/'): if path.path in [directory + '/' for directory in self.excludeSubdirs]: return False self.directories.put(self.currentDirectory + path.path) return True else: return False self.reportManager.addPath(self.currentDirectory + path.path, path.status, path.response) self.reportManager.save() del path
BruteSploit
positive
def test_dtypes_are_correct_with_mixed_precision(self): tf.keras.mixed_precision.set_global_policy('mixed_float16') try: <DeepExtract> em = continuous_indexed.ContinuousIndexedEntropyModel(prior_fn, (2, 3, 5), dict(loc=lambda i: i[..., :2] - [0.0, 1.5], scale=lambda _: scale, weight=lambda i: tf.nn.softmax((i[..., 2:] - 2.0) * [-1.0, 1.0])), coding_rank, **kwargs) </DeepExtract> self.assertIsInstance(em.prior, uniform_noise.NoisyLogisticMixture) self.assertEqual(em.bottleneck_dtype, tf.float16) self.assertEqual(em.prior.dtype, tf.float64) self.assertEqual(em.prior_dtype, tf.float64) <DeepExtract> x = tf.random.stateless_uniform((2, 5), minval=0.0, maxval=1.0, seed=(0, 1), dtype=tf.float16) s = tf.random.stateless_uniform((2, 5), minval=-1.0, maxval=1.0, seed=(1, 2), dtype=tf.float16) u = tf.random.stateless_uniform((2, 5), minval=-0.5, maxval=0.5, seed=(3, 4), dtype=tf.float16) x = (tf.math.log(x) * tf.math.sign(s) + u) * scale indexes = tf.random.stateless_uniform(tuple((2, 5)) + (3,), minval=-0.4, maxval=(2.4, 3.4, 5), seed=(5, 6), dtype=tf.float32) (x, indexes) = (x, indexes) </DeepExtract> (x_tilde, bits) = em(x, indexes) bitstring = em.compress(x, indexes) x_hat = em.decompress(bitstring, indexes) self.assertEqual(x_hat.dtype, tf.float16) self.assertAllClose(x, x_hat, rtol=0, atol=0.5) self.assertEqual(x_tilde.dtype, tf.float16) self.assertAllClose(x, x_tilde, rtol=0, atol=0.5) self.assertEqual(bits.dtype, tf.float64) self.assertEqual(bits.shape, (2,)) self.assertAllGreaterEqual(bits, 0.0) finally: tf.keras.mixed_precision.set_global_policy(None)
def test_dtypes_are_correct_with_mixed_precision(self): tf.keras.mixed_precision.set_global_policy('mixed_float16') try: em = continuous_indexed.ContinuousIndexedEntropyModel(prior_fn, (2, 3, 5), dict(loc=lambda i: i[..., :2] - [0.0, 1.5], scale=lambda _: scale, weight=lambda i: tf.nn.softmax((i[..., 2:] - 2.0) * [-1.0, 1.0])), coding_rank, **kwargs) self.assertIsInstance(em.prior, uniform_noise.NoisyLogisticMixture) self.assertEqual(em.bottleneck_dtype, tf.float16) self.assertEqual(em.prior.dtype, tf.float64) self.assertEqual(em.prior_dtype, tf.float64) x = tf.random.stateless_uniform((2, 5), minval=0.0, maxval=1.0, seed=(0, 1), dtype=tf.float16) s = tf.random.stateless_uniform((2, 5), minval=-1.0, maxval=1.0, seed=(1, 2), dtype=tf.float16) u = tf.random.stateless_uniform((2, 5), minval=-0.5, maxval=0.5, seed=(3, 4), dtype=tf.float16) x = (tf.math.log(x) * tf.math.sign(s) + u) * scale indexes = tf.random.stateless_uniform(tuple((2, 5)) + (3,), minval=-0.4, maxval=(2.4, 3.4, 5), seed=(5, 6), dtype=tf.float32) (x, indexes) = (x, indexes) (x_tilde, bits) = em(x, indexes) bitstring = em.compress(x, indexes) x_hat = em.decompress(bitstring, indexes) self.assertEqual(x_hat.dtype, tf.float16) self.assertAllClose(x, x_hat, rtol=0, atol=0.5) self.assertEqual(x_tilde.dtype, tf.float16) self.assertAllClose(x, x_tilde, rtol=0, atol=0.5) self.assertEqual(bits.dtype, tf.float64) self.assertEqual(bits.shape, (2,)) self.assertAllGreaterEqual(bits, 0.0) finally: tf.keras.mixed_precision.set_global_policy(None)
compression
positive
def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs): super(SqueezeNet, self).__init__() self.loss = loss self.feature_dim = 512 if version not in [1.0, 1.1]: raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version)) if version == 1.0: self.features = nn.Sequential(nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256)) else: self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256)) self.global_avgpool = nn.AdaptiveAvgPool2d(1) <DeepExtract> if fc_dims is None: self.feature_dim = 512 self.fc = None assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims)) layers = [] for dim in fc_dims: layers.append(nn.Linear(512, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) 512 = dim self.feature_dim = fc_dims[-1] self.fc = nn.Sequential(*layers) </DeepExtract> self.classifier = nn.Linear(self.feature_dim, num_classes) <DeepExtract> for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) </DeepExtract>
def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs): super(SqueezeNet, self).__init__() self.loss = loss self.feature_dim = 512 if version not in [1.0, 1.1]: raise ValueError('Unsupported SqueezeNet version {version}:1.0 or 1.1 expected'.format(version=version)) if version == 1.0: self.features = nn.Sequential(nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256)) else: self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256)) self.global_avgpool = nn.AdaptiveAvgPool2d(1) if fc_dims is None: self.feature_dim = 512 self.fc = None assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims)) layers = [] for dim in fc_dims: layers.append(nn.Linear(512, dim)) layers.append(nn.BatchNorm1d(dim)) layers.append(nn.ReLU(inplace=True)) if dropout_p is not None: layers.append(nn.Dropout(p=dropout_p)) 512 = dim self.feature_dim = fc_dims[-1] self.fc = nn.Sequential(*layers) self.classifier = nn.Linear(self.feature_dim, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) if m.bias is not None: nn.init.constant_(m.bias, 0) </DeepExtract>
ABD-Net
positive
def create_js_files(): js_file_dec = open('mysite/static/Emu86/sample_functions.js', 'w') <DeepExtract> file_code = '' for file_name in function_names: function_code = create_function_def(file_name, function_names, DEC) count = 0 function_code += "(flavor) {\n\tlet codeString = '';" for dire in ALL_TEST_DIRS: if count < WASM: function_code += create_cond_line(count) else: if file_name != 'sum_test.asm' and file_name != 'area.asm': break function_code += '\n\telse{\n' sample_test = open(dire + file_name, 'r') function_code += '\t\tcodeString += ' if DEC == DEC and count in DEC_LANGS or (DEC == HEX and count != 0 and (count != 1) and (count != 5)) or count == 3: function_code += repr(sample_test.read()) else: sample_conv = '' for line in sample_test: if line.strip() == '': sample_conv += line elif line.strip()[0] == ';': sample_conv += line elif DEC == DEC: sample_conv += convert_line_hex_to_dec(line) else: sample_conv += convert_line_dec_to_hex(line) function_code += repr(sample_conv) sample_test.close() function_code += ';\n\t}' count += 1 function_code += "\n\tdocument.getElementById('id_code')" function_code += '.value = codeString;\n}' file_code += function_code + '\n' file_code = file_code </DeepExtract> file_code += sample_dir(intel_function_names, INTEL_TEST_DIRS, DEC) js_file_dec.write(file_code) js_file_dec.close() js_file_hex = open('mysite/static/Emu86/sample_functions_hex.js', 'w') <DeepExtract> file_code = '' for file_name in function_names: function_code = create_function_def(file_name, function_names, HEX) count = 0 function_code += "(flavor) {\n\tlet codeString = '';" for dire in ALL_TEST_DIRS: if count < WASM: function_code += create_cond_line(count) else: if file_name != 'sum_test.asm' and file_name != 'area.asm': break function_code += '\n\telse{\n' sample_test = open(dire + file_name, 'r') function_code += '\t\tcodeString += ' if HEX == DEC and count in DEC_LANGS or (HEX == HEX and count != 0 and (count != 1) and (count != 5)) or count == 3: function_code += repr(sample_test.read()) else: sample_conv = '' for line in sample_test: if line.strip() == '': sample_conv += line elif line.strip()[0] == ';': sample_conv += line elif HEX == DEC: sample_conv += convert_line_hex_to_dec(line) else: sample_conv += convert_line_dec_to_hex(line) function_code += repr(sample_conv) sample_test.close() function_code += ';\n\t}' count += 1 function_code += "\n\tdocument.getElementById('id_code')" function_code += '.value = codeString;\n}' file_code += function_code + '\n' file_code = file_code </DeepExtract> file_code += sample_dir(intel_function_names, INTEL_TEST_DIRS, HEX) js_file_hex.write(file_code) js_file_hex.close() js_file_fp = open('mysite/static/Emu86/sample_functions_fp.js', 'w') <DeepExtract> file_code = '' for file_name in fp_function_names: count = 0 function_code = f'function {fp_function_names[file_name]}(flavor)' function_code += "{\n\tlet codeString = '';" for dire in FP_DIRS: sample_test = open(dire + file_name, 'r') if count == 0: function_code += "\n\tif (flavor === 'intel'){" elif count == 1: function_code += "\n\telse if (flavor == 'att'){" else: function_code += "\n\telse if (flavor == 'mips_asm'){" function_code += '\n\t\tcodeString += ' function_code += repr(sample_test.read()) sample_test.close() function_code += ';\n\t}' count += 1 function_code += "\n\tdocument.getElementById('id_code')" function_code += '.value = codeString;\n}' file_code += function_code + '\n' file_code = file_code </DeepExtract> file_code += function_directory_fp(intel_function_names, INTEL_TEST_DIRS) js_file_fp.write(file_code) js_file_fp.close()
def create_js_files(): js_file_dec = open('mysite/static/Emu86/sample_functions.js', 'w') file_code = '' for file_name in function_names: function_code = create_function_def(file_name, function_names, DEC) count = 0 function_code += "(flavor) {\n\tlet codeString = '';" for dire in ALL_TEST_DIRS: if count < WASM: function_code += create_cond_line(count) else: if file_name != 'sum_test.asm' and file_name != 'area.asm': break function_code += '\n\telse{\n' sample_test = open(dire + file_name, 'r') function_code += '\t\tcodeString += ' if DEC == DEC and count in DEC_LANGS or (DEC == HEX and count != 0 and (count != 1) and (count != 5)) or count == 3: function_code += repr(sample_test.read()) else: sample_conv = '' for line in sample_test: if line.strip() == '': sample_conv += line elif line.strip()[0] == ';': sample_conv += line elif DEC == DEC: sample_conv += convert_line_hex_to_dec(line) else: sample_conv += convert_line_dec_to_hex(line) function_code += repr(sample_conv) sample_test.close() function_code += ';\n\t}' count += 1 function_code += "\n\tdocument.getElementById('id_code')" function_code += '.value = codeString;\n}' file_code += function_code + '\n' file_code = file_code file_code += sample_dir(intel_function_names, INTEL_TEST_DIRS, DEC) js_file_dec.write(file_code) js_file_dec.close() js_file_hex = open('mysite/static/Emu86/sample_functions_hex.js', 'w') file_code = '' for file_name in function_names: function_code = create_function_def(file_name, function_names, HEX) count = 0 function_code += "(flavor) {\n\tlet codeString = '';" for dire in ALL_TEST_DIRS: if count < WASM: function_code += create_cond_line(count) else: if file_name != 'sum_test.asm' and file_name != 'area.asm': break function_code += '\n\telse{\n' sample_test = open(dire + file_name, 'r') function_code += '\t\tcodeString += ' if HEX == DEC and count in DEC_LANGS or (HEX == HEX and count != 0 and (count != 1) and (count != 5)) or count == 3: function_code += repr(sample_test.read()) else: sample_conv = '' for line in sample_test: if line.strip() == '': sample_conv += line elif line.strip()[0] == ';': sample_conv += line elif HEX == DEC: sample_conv += convert_line_hex_to_dec(line) else: sample_conv += convert_line_dec_to_hex(line) function_code += repr(sample_conv) sample_test.close() function_code += ';\n\t}' count += 1 function_code += "\n\tdocument.getElementById('id_code')" function_code += '.value = codeString;\n}' file_code += function_code + '\n' file_code = file_code file_code += sample_dir(intel_function_names, INTEL_TEST_DIRS, HEX) js_file_hex.write(file_code) js_file_hex.close() js_file_fp = open('mysite/static/Emu86/sample_functions_fp.js', 'w') file_code = '' for file_name in fp_function_names: count = 0 function_code = f'function {fp_function_names[file_name]}(flavor)' function_code += "{\n\tlet codeString = '';" for dire in FP_DIRS: sample_test = open(dire + file_name, 'r') if count == 0: function_code += "\n\tif (flavor === 'intel'){" elif count == 1: function_code += "\n\telse if (flavor == 'att'){" else: function_code += "\n\telse if (flavor == 'mips_asm'){" function_code += '\n\t\tcodeString += ' function_code += repr(sample_test.read()) sample_test.close() function_code += ';\n\t}' count += 1 function_code += "\n\tdocument.getElementById('id_code')" function_code += '.value = codeString;\n}' file_code += function_code + '\n' file_code = file_code file_code += function_directory_fp(intel_function_names, INTEL_TEST_DIRS) js_file_fp.write(file_code) js_file_fp.close()
Emu86
positive
def set_l2s_fp(file: BufferedIOBase): <DeepExtract> tmp_l2s_path = self.path.joinpath('tmp_l2s.txt') with tmp_l2s_path.open(mode='wb') as fout: fout.write(file.read()) with tmp_l2s_path.open(mode='r') as file_raw, self.l2s_path.open(mode='w', encoding='utf-8') as file_translated: file_translated.write(file_raw.read().replace('\r\n', '\n')) if os.path.exists(tmp_l2s_path): os.remove(tmp_l2s_path) self.config['l2s'] = True </DeepExtract> self.config['l2s'] = True
def set_l2s_fp(file: BufferedIOBase): tmp_l2s_path = self.path.joinpath('tmp_l2s.txt') with tmp_l2s_path.open(mode='wb') as fout: fout.write(file.read()) with tmp_l2s_path.open(mode='r') as file_raw, self.l2s_path.open(mode='w', encoding='utf-8') as file_translated: file_translated.write(file_raw.read().replace('\r\n', '\n')) if os.path.exists(tmp_l2s_path): os.remove(tmp_l2s_path) self.config['l2s'] = True self.config['l2s'] = True
elpis
positive
def test_script_not_js(self): markup = '\n <script type="text/x-jquery-tmpl">foo</script>\n ' <DeepExtract> self.setup_err() self.err.save_resource('app_type', app_type) markuptester.MarkupParser(self.err, debug=True).process('', markup) </DeepExtract> self.assert_silent() <DeepExtract> self.setup_err() self.err.save_resource('app_type', 'privileged') markuptester.MarkupParser(self.err, debug=True).process('', markup) </DeepExtract> self.assert_silent()
def test_script_not_js(self): markup = '\n <script type="text/x-jquery-tmpl">foo</script>\n ' self.setup_err() self.err.save_resource('app_type', app_type) markuptester.MarkupParser(self.err, debug=True).process('', markup) self.assert_silent() self.setup_err() self.err.save_resource('app_type', 'privileged') markuptester.MarkupParser(self.err, debug=True).process('', markup) self.assert_silent()
app-validator
positive
def test_list_interfaces_tcp(self): with client.connector(host=self.enip_server_tcp.addr, port=self.enip_server_tcp.port, timeout=4.0, udp=False, broadcast=False) as conn: conn.list_interfaces() conn.shutdown() <DeepExtract> (response, _) = client.await_response(conn, timeout=4.0) response = response['enip']['CIP']['list_interfaces']['CPF'] </DeepExtract> self.assertDictEqual({'count': 0}, response)
def test_list_interfaces_tcp(self): with client.connector(host=self.enip_server_tcp.addr, port=self.enip_server_tcp.port, timeout=4.0, udp=False, broadcast=False) as conn: conn.list_interfaces() conn.shutdown() (response, _) = client.await_response(conn, timeout=4.0) response = response['enip']['CIP']['list_interfaces']['CPF'] self.assertDictEqual({'count': 0}, response)
conpot
positive
def SetPolys(self, polys): """Set polys. Parameters ---------- polys : 1D or 2D ndarray If 2D, shape = (n_points, n), and n is the number of points per poly. All polys must use the same number of points. """ if isinstance(polys, np.ndarray): <DeepExtract> if polys.ndim == 1: offset = 0 n_cells = 0 while offset < polys.size: offset += polys[offset] + 1 n_cells += 1 vtk_cells = polys else: (n_cells, n_points_cell) = polys.shape vtk_cells = np.empty((n_cells, n_points_cell + 1), dtype=np.uintp) vtk_cells[:, 0] = n_points_cell vtk_cells[:, 1:] = polys vtk_cells = vtk_cells.ravel() ca = BSCellArray() ca.SetCells(n_cells, vtk_cells) polys = ca.VTKObject </DeepExtract> self.VTKObject.SetPolys(polys)
def SetPolys(self, polys): """Set polys. Parameters ---------- polys : 1D or 2D ndarray If 2D, shape = (n_points, n), and n is the number of points per poly. All polys must use the same number of points. """ if isinstance(polys, np.ndarray): if polys.ndim == 1: offset = 0 n_cells = 0 while offset < polys.size: offset += polys[offset] + 1 n_cells += 1 vtk_cells = polys else: (n_cells, n_points_cell) = polys.shape vtk_cells = np.empty((n_cells, n_points_cell + 1), dtype=np.uintp) vtk_cells[:, 0] = n_points_cell vtk_cells[:, 1:] = polys vtk_cells = vtk_cells.ravel() ca = BSCellArray() ca.SetCells(n_cells, vtk_cells) polys = ca.VTKObject self.VTKObject.SetPolys(polys)
BrainSpace
positive
def print_top3_scores(filename): <DeepExtract> with open(filename, 'r') as f: row = list(csv.DictReader(f, delimiter=';'))[0] sorted_items = sorted(row.items(), key=lambda x: x[1]) top3 = list(reversed(sorted_items[-4:-1])) top3 = [(topic_map[k], float(v)) for (k, v) in top3 if topic_map[k] != 'noise'] top3 = top3 </DeepExtract> for (k, v) in top3: print('{}\t{}\t{}'.format(topic_map[k], k, v))
def print_top3_scores(filename): with open(filename, 'r') as f: row = list(csv.DictReader(f, delimiter=';'))[0] sorted_items = sorted(row.items(), key=lambda x: x[1]) top3 = list(reversed(sorted_items[-4:-1])) top3 = [(topic_map[k], float(v)) for (k, v) in top3 if topic_map[k] != 'noise'] top3 = top3 for (k, v) in top3: print('{}\t{}\t{}'.format(topic_map[k], k, v))
convai-bot-1337
positive
def add_default_params(self, params): params['SignatureVersion'] = '2' params['SignatureMethod'] = 'HmacSHA256' params['AWSAccessKeyId'] = self.user_id params['Version'] = API_VERSION params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) <DeepExtract> keys = params.keys() keys.sort() pairs = [] for key in keys: pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(params[key], safe='-_~')) qs = '&'.join(pairs) string_to_sign = '\n'.join(('GET', self.host, path, qs)) b64_hmac = base64.b64encode(hmac.new(self.key, string_to_sign, digestmod=sha256).digest()) params['Signature'] = b64_hmac </DeepExtract> return params
def add_default_params(self, params): params['SignatureVersion'] = '2' params['SignatureMethod'] = 'HmacSHA256' params['AWSAccessKeyId'] = self.user_id params['Version'] = API_VERSION params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) keys = params.keys() keys.sort() pairs = [] for key in keys: pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(params[key], safe='-_~')) qs = '&'.join(pairs) string_to_sign = '\n'.join(('GET', self.host, path, qs)) b64_hmac = base64.b64encode(hmac.new(self.key, string_to_sign, digestmod=sha256).digest()) params['Signature'] = b64_hmac return params
AEServmon
positive
def handle(self, *args, **options): if 'tut_md' not in options or len(options['tut_md']) == 0: raise CommandError('Please specify a tutorial.md file to ingest') for md in options['tut_md']: print('\n\nProcessing: %s' % md) <DeepExtract> if not os.path.exists(md): raise CommandError("File doesn't exist: %s" % md) with open(md, 'r', encoding='utf-8') as fp: lines = fp.readlines() first = True second = False counter = 0 frontmatter = {} for line in lines: if '---' in line and first: first = False second = True continue if '---' in line and second: second = False break if not first and second: data = line.split(':', 1) key = data[0].strip() value = data[1].strip().strip('"') frontmatter[key] = value counter += 1 if 'date' in frontmatter: frontmatter['date'] = datetime.strptime(frontmatter['date'], '%Y-%m-%d %H:%M:%S') if 'featured' in frontmatter: frontmatter['featured'] = True if frontmatter['featured'] == 'true' else False else: frontmatter['featured'] = False if 'part' in frontmatter: frontmatter['part'] = int(frontmatter['part']) if 'track_part' in frontmatter: frontmatter['track_part'] = int(frontmatter['track_part']) if 'draft' in frontmatter: frontmatter['draft'] = frontmatter['draft'].lower() == 'true' counter += 1 content_lines = ''.join(lines[counter + 1:]) html = markdown.markdown(content_lines, extensions=get_markdown_extensions()) (frontmatter, content, content_md) = (frontmatter, html, md) </DeepExtract> <DeepExtract> for key in ['title', 'post_image', 'excerpt', 'date', 'category']: if key not in frontmatter: raise CommandError("No '%s' found in frontmatter" % key) </DeepExtract> <DeepExtract> for (key, value) in frontmatter.items(): self.stdout.write('%10s: %s' % (key, value)) </DeepExtract> self.stdout.write('Parsing markdown successful!') if 'draft' in frontmatter and frontmatter['draft']: from django.conf import settings if settings.DEBUG: self.stdout.write(' [DRAFT] This tutorial is not complete and is currently') self.stdout.write(' being worked on. Ingesting in DEBUG mode.') else: self.stdout.write(' [DRAFT] This tutorial is not complete and is currently') self.stdout.write(' being worked on. Skipping.') return if frontmatter['category'] == 'Uncategorized': raise CommandError('No category specified') try: category = Category.objects.get(title=frontmatter['category']) except Category.DoesNotExist as e: raise CommandError("Category doesn't exist: %s" % frontmatter['category']) try: user = User.objects.get(email=frontmatter['author']) except User.DoesNotExist as e: raise CommandError('User does not exist: %s' % frontmatter['author']) <DeepExtract> ret = os.path.basename(md).lower() ret = ret.split('.')[0] ret = re.sub('[\\?\\!_+:()]', '', ret) parts = ret.split('-') for word in stop_words: if word in parts: parts = list(filter(lambda x: x != word, parts)) ret = '-'.join(parts) ret = ret.replace('/', '-') slug = ret </DeepExtract> print('Slug: \t%s' % slug) try: tutorial = Tutorial.objects.get(slug=slug) self.stdout.write('Tutorial already exists') tutorial.title = frontmatter['title'] tutorial.date = frontmatter['date'] tutorial.excerpt = frontmatter['excerpt'] tutorial.category = category tutorial.slug = slug tutorial.post_image = frontmatter['post_image'] tutorial.content = content tutorial.content_md = content_md tutorial.author = user tutorial.featured = frontmatter['featured'] tutorial.series = None except Tutorial.DoesNotExist as e: self.stdout.write('Tutorial does not exist - trying to create it') tutorial = Tutorial(title=frontmatter['title'], date=frontmatter['date'], excerpt=frontmatter['excerpt'], category=category, author=user, slug=slug, post_image=frontmatter['post_image'], content=content, featured=frontmatter['featured']) if frontmatter['post_image']: from django.conf import settings fp = settings.STATIC_ROOT.replace(settings.STATIC_URL, '') filepath = os.path.join(fp, frontmatter['post_image'][1:]) im = Image.open(filepath) im = im.convert(mode='RGB') (width, height) = im.size upper = 0 lower = height left = 0 right = 0 if 'thumb_pull' in frontmatter and frontmatter['thumb_pull'] == 'right': left = width - height right = width elif 'thumb_pull' in frontmatter and frontmatter['thumb_pull'] == 'left': left = 0 right = height else: left = (width - height) / 2 right = (width + height) / 2 thumb = im.crop((left, upper, right, lower)) parts = filepath.split('/')[-1].split('.') small_filepath = '%s%s/%s' % (settings.STATIC_ROOT, 'thumb', '.'.join(parts)) thumb_big = thumb.resize((200, 200), Image.BICUBIC).convert(mode='RGB') thumb_big.save(small_filepath) tutorial.post_thumb = '%s%s' % (settings.STATIC_URL, small_filepath.split(settings.STATIC_URL)[1]) tutorial.save() self.stdout.write('Tutorial update successful!') if 'series' in frontmatter: try: series = TutorialSeries.objects.get(name=frontmatter['series']) except TutorialSeries.DoesNotExist as e: self.stdout.write('Series "%s" does not exist' % frontmatter['series']) series = TutorialSeries(name=frontmatter['series']) series.save() tuts = series.tutorial_list() if tutorial not in tuts: order = TutorialSeriesOrder(series=series, tutorial=tutorial, order=frontmatter['part']) order.save() tutorial.series = series tutorial.save() if 'track' in frontmatter: try: track = Track.objects.get(title=frontmatter['track']) except Track.DoesNotExist as e: self.stdout.write('Track "%s" does not exist' % frontmatter['track']) track = Track(title=frontmatter['track']) track.save() tuts = track.tutorial_list() if tutorial not in tuts: order = TrackTutorials(track=track, tutorial=tutorial, order=frontmatter['track_part']) order.save() self.stdout.write('Next steps:') self.stdout.write(' * python mange.py parse_series') self.stdout.write(' * python mange.py update_related_tutorials')
def handle(self, *args, **options): if 'tut_md' not in options or len(options['tut_md']) == 0: raise CommandError('Please specify a tutorial.md file to ingest') for md in options['tut_md']: print('\n\nProcessing: %s' % md) if not os.path.exists(md): raise CommandError("File doesn't exist: %s" % md) with open(md, 'r', encoding='utf-8') as fp: lines = fp.readlines() first = True second = False counter = 0 frontmatter = {} for line in lines: if '---' in line and first: first = False second = True continue if '---' in line and second: second = False break if not first and second: data = line.split(':', 1) key = data[0].strip() value = data[1].strip().strip('"') frontmatter[key] = value counter += 1 if 'date' in frontmatter: frontmatter['date'] = datetime.strptime(frontmatter['date'], '%Y-%m-%d %H:%M:%S') if 'featured' in frontmatter: frontmatter['featured'] = True if frontmatter['featured'] == 'true' else False else: frontmatter['featured'] = False if 'part' in frontmatter: frontmatter['part'] = int(frontmatter['part']) if 'track_part' in frontmatter: frontmatter['track_part'] = int(frontmatter['track_part']) if 'draft' in frontmatter: frontmatter['draft'] = frontmatter['draft'].lower() == 'true' counter += 1 content_lines = ''.join(lines[counter + 1:]) html = markdown.markdown(content_lines, extensions=get_markdown_extensions()) (frontmatter, content, content_md) = (frontmatter, html, md) for key in ['title', 'post_image', 'excerpt', 'date', 'category']: if key not in frontmatter: raise CommandError("No '%s' found in frontmatter" % key) for (key, value) in frontmatter.items(): self.stdout.write('%10s: %s' % (key, value)) self.stdout.write('Parsing markdown successful!') if 'draft' in frontmatter and frontmatter['draft']: from django.conf import settings if settings.DEBUG: self.stdout.write(' [DRAFT] This tutorial is not complete and is currently') self.stdout.write(' being worked on. Ingesting in DEBUG mode.') else: self.stdout.write(' [DRAFT] This tutorial is not complete and is currently') self.stdout.write(' being worked on. Skipping.') return if frontmatter['category'] == 'Uncategorized': raise CommandError('No category specified') try: category = Category.objects.get(title=frontmatter['category']) except Category.DoesNotExist as e: raise CommandError("Category doesn't exist: %s" % frontmatter['category']) try: user = User.objects.get(email=frontmatter['author']) except User.DoesNotExist as e: raise CommandError('User does not exist: %s' % frontmatter['author']) ret = os.path.basename(md).lower() ret = ret.split('.')[0] ret = re.sub('[\\?\\!_+:()]', '', ret) parts = ret.split('-') for word in stop_words: if word in parts: parts = list(filter(lambda x: x != word, parts)) ret = '-'.join(parts) ret = ret.replace('/', '-') slug = ret print('Slug: \t%s' % slug) try: tutorial = Tutorial.objects.get(slug=slug) self.stdout.write('Tutorial already exists') tutorial.title = frontmatter['title'] tutorial.date = frontmatter['date'] tutorial.excerpt = frontmatter['excerpt'] tutorial.category = category tutorial.slug = slug tutorial.post_image = frontmatter['post_image'] tutorial.content = content tutorial.content_md = content_md tutorial.author = user tutorial.featured = frontmatter['featured'] tutorial.series = None except Tutorial.DoesNotExist as e: self.stdout.write('Tutorial does not exist - trying to create it') tutorial = Tutorial(title=frontmatter['title'], date=frontmatter['date'], excerpt=frontmatter['excerpt'], category=category, author=user, slug=slug, post_image=frontmatter['post_image'], content=content, featured=frontmatter['featured']) if frontmatter['post_image']: from django.conf import settings fp = settings.STATIC_ROOT.replace(settings.STATIC_URL, '') filepath = os.path.join(fp, frontmatter['post_image'][1:]) im = Image.open(filepath) im = im.convert(mode='RGB') (width, height) = im.size upper = 0 lower = height left = 0 right = 0 if 'thumb_pull' in frontmatter and frontmatter['thumb_pull'] == 'right': left = width - height right = width elif 'thumb_pull' in frontmatter and frontmatter['thumb_pull'] == 'left': left = 0 right = height else: left = (width - height) / 2 right = (width + height) / 2 thumb = im.crop((left, upper, right, lower)) parts = filepath.split('/')[-1].split('.') small_filepath = '%s%s/%s' % (settings.STATIC_ROOT, 'thumb', '.'.join(parts)) thumb_big = thumb.resize((200, 200), Image.BICUBIC).convert(mode='RGB') thumb_big.save(small_filepath) tutorial.post_thumb = '%s%s' % (settings.STATIC_URL, small_filepath.split(settings.STATIC_URL)[1]) tutorial.save() self.stdout.write('Tutorial update successful!') if 'series' in frontmatter: try: series = TutorialSeries.objects.get(name=frontmatter['series']) except TutorialSeries.DoesNotExist as e: self.stdout.write('Series "%s" does not exist' % frontmatter['series']) series = TutorialSeries(name=frontmatter['series']) series.save() tuts = series.tutorial_list() if tutorial not in tuts: order = TutorialSeriesOrder(series=series, tutorial=tutorial, order=frontmatter['part']) order.save() tutorial.series = series tutorial.save() if 'track' in frontmatter: try: track = Track.objects.get(title=frontmatter['track']) except Track.DoesNotExist as e: self.stdout.write('Track "%s" does not exist' % frontmatter['track']) track = Track(title=frontmatter['track']) track.save() tuts = track.tutorial_list() if tutorial not in tuts: order = TrackTutorials(track=track, tutorial=tutorial, order=frontmatter['track_part']) order.save() self.stdout.write('Next steps:') self.stdout.write(' * python mange.py parse_series') self.stdout.write(' * python mange.py update_related_tutorials')
aishack
positive
def __init__(self, config): raise NotImplementedError('BertViz does not currently support XLM') super(XLMModel, self).__init__(config) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError('Currently XLM can only be used as an encoder') self.causal = config.causal self.n_langs = config.n_langs self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index self.dim = config.emb_dim self.hidden_dim = self.dim * 4 self.n_heads = config.n_heads self.n_layers = config.n_layers self.dropout = config.dropout self.attention_dropout = config.attention_dropout assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads' self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim) if config.sinusoidal_embeddings: <DeepExtract> position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / self.dim) for j in range(self.dim)] for pos in range(config.max_position_embeddings)]) self.position_embeddings.weight[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) self.position_embeddings.weight[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) self.position_embeddings.weight.detach_() self.position_embeddings.weight.requires_grad = False </DeepExtract> if config.n_langs > 1: self.lang_embeddings = nn.Embedding(self.n_langs, self.dim) self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index) self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps) self.attentions = nn.ModuleList() self.layer_norm1 = nn.ModuleList() self.ffns = nn.ModuleList() self.layer_norm2 = nn.ModuleList() for _ in range(self.n_layers): self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config)) self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config)) self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) self.apply(self.init_weights)
def __init__(self, config): raise NotImplementedError('BertViz does not currently support XLM') super(XLMModel, self).__init__(config) self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError('Currently XLM can only be used as an encoder') self.causal = config.causal self.n_langs = config.n_langs self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index self.dim = config.emb_dim self.hidden_dim = self.dim * 4 self.n_heads = config.n_heads self.n_layers = config.n_layers self.dropout = config.dropout self.attention_dropout = config.attention_dropout assert self.dim % self.n_heads == 0, 'transformer dim must be a multiple of n_heads' self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim) if config.sinusoidal_embeddings: position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / self.dim) for j in range(self.dim)] for pos in range(config.max_position_embeddings)]) self.position_embeddings.weight[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) self.position_embeddings.weight[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) self.position_embeddings.weight.detach_() self.position_embeddings.weight.requires_grad = False if config.n_langs > 1: self.lang_embeddings = nn.Embedding(self.n_langs, self.dim) self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index) self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps) self.attentions = nn.ModuleList() self.layer_norm1 = nn.ModuleList() self.ffns = nn.ModuleList() self.layer_norm2 = nn.ModuleList() for _ in range(self.n_layers): self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config)) self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config)) self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) self.apply(self.init_weights)
bertviz
positive
def smart_update(graph_dict: Dict[str, Any]): """ Smartly update the legend based on the graph dictionary provided. If the graph dictionary doesn't have a hover-line enabled, then just update the legend with the latest data. If the hover-line is enabled, show what the graph dictionary holds at the hover-line's X position. :param graph_dict: Graph dictionary to use to smart update. """ if graph_dict.get('line') is None: <DeepExtract> legend = graph_dict['graph'].plotItem.legend.items date_object = datetime.utcfromtimestamp(graph_dict['plots'][0]['z'][-1]) total = f"X: {-1} Datetime in UTC: {date_object.strftime('%m/%d/%Y, %H:%M:%S')}" for (index, plot_dict) in enumerate(graph_dict['plots']): info = f" {plot_dict['name']}: {plot_dict['y'][-1]}" total += info legend[index][1].setText(info) graph_dict['label'].setText(total) </DeepExtract> else: x_pos = graph_dict['line'].getXPos() if x_pos == -1: <DeepExtract> legend = graph_dict['graph'].plotItem.legend.items date_object = datetime.utcfromtimestamp(graph_dict['plots'][0]['z'][-1]) total = f"X: {-1} Datetime in UTC: {date_object.strftime('%m/%d/%Y, %H:%M:%S')}" for (index, plot_dict) in enumerate(graph_dict['plots']): info = f" {plot_dict['name']}: {plot_dict['y'][-1]}" total += info legend[index][1].setText(info) graph_dict['label'].setText(total) </DeepExtract>
def smart_update(graph_dict: Dict[str, Any]): """ Smartly update the legend based on the graph dictionary provided. If the graph dictionary doesn't have a hover-line enabled, then just update the legend with the latest data. If the hover-line is enabled, show what the graph dictionary holds at the hover-line's X position. :param graph_dict: Graph dictionary to use to smart update. """ if graph_dict.get('line') is None: legend = graph_dict['graph'].plotItem.legend.items date_object = datetime.utcfromtimestamp(graph_dict['plots'][0]['z'][-1]) total = f"X: {-1} Datetime in UTC: {date_object.strftime('%m/%d/%Y, %H:%M:%S')}" for (index, plot_dict) in enumerate(graph_dict['plots']): info = f" {plot_dict['name']}: {plot_dict['y'][-1]}" total += info legend[index][1].setText(info) graph_dict['label'].setText(total) else: x_pos = graph_dict['line'].getXPos() if x_pos == -1: legend = graph_dict['graph'].plotItem.legend.items date_object = datetime.utcfromtimestamp(graph_dict['plots'][0]['z'][-1]) total = f"X: {-1} Datetime in UTC: {date_object.strftime('%m/%d/%Y, %H:%M:%S')}" for (index, plot_dict) in enumerate(graph_dict['plots']): info = f" {plot_dict['name']}: {plot_dict['y'][-1]}" total += info legend[index][1].setText(info) graph_dict['label'].setText(total) </DeepExtract>
algobot
positive
def reconstructed_nested_args(args, names, parser_hierarchy, prefix): """Reconstruct the arguments and pass them to the necessary subparsers.""" for (key, sub_parser) in parser_hierarchy.items(): if isinstance(sub_parser, dict): names[key] = {} <DeepExtract> for (key, sub_parser) in sub_parser.items(): if isinstance(sub_parser, dict): names[key][key] = {} reconstructed_nested_args(args, names[key][key], sub_parser, f"{f'{prefix}{key}.'}{key}.") else: sub_options = [action.dest for action in sub_parser._group_actions] sub_names = {name: value for (name, value) in args._get_kwargs() if name in sub_options} temp = argparse.Namespace(**sub_names) for (k, v) in temp.__dict__.items(): names[key][k.replace(f"{f'{prefix}{key}.'}", '')] = v return </DeepExtract> else: sub_options = [action.dest for action in sub_parser._group_actions] sub_names = {name: value for (name, value) in args._get_kwargs() if name in sub_options} temp = argparse.Namespace(**sub_names) for (k, v) in temp.__dict__.items(): names[k.replace(f'{prefix}', '')] = v return
def reconstructed_nested_args(args, names, parser_hierarchy, prefix): """Reconstruct the arguments and pass them to the necessary subparsers.""" for (key, sub_parser) in parser_hierarchy.items(): if isinstance(sub_parser, dict): names[key] = {} for (key, sub_parser) in sub_parser.items(): if isinstance(sub_parser, dict): names[key][key] = {} reconstructed_nested_args(args, names[key][key], sub_parser, f"{f'{prefix}{key}.'}{key}.") else: sub_options = [action.dest for action in sub_parser._group_actions] sub_names = {name: value for (name, value) in args._get_kwargs() if name in sub_options} temp = argparse.Namespace(**sub_names) for (k, v) in temp.__dict__.items(): names[key][k.replace(f"{f'{prefix}{key}.'}", '')] = v return else: sub_options = [action.dest for action in sub_parser._group_actions] sub_names = {name: value for (name, value) in args._get_kwargs() if name in sub_options} temp = argparse.Namespace(**sub_names) for (k, v) in temp.__dict__.items(): names[k.replace(f'{prefix}', '')] = v return
bootleg
positive
def _build_graph(jobs: Union[Dict[Job, List[Job]], List[WorkflowJob]]) -> Dict[WorkflowJob, List[WorkflowJob]]: if isinstance(jobs, list): <DeepExtract> graph_as_dict = collections.OrderedDict() if len(jobs) == 1: graph_as_dict[jobs[0]] = [] else: for i in range(1, len(jobs)): graph_as_dict[jobs[i - 1]] = [jobs[i]] job_graph = graph_as_dict </DeepExtract> elif isinstance(jobs, dict): job_graph = {self._map_to_workflow_job(source_job): [self._map_to_workflow_job(tj) for tj in target_jobs] for (source_job, target_jobs) in jobs.items()} else: raise ValueError('Job graph has to be dict or list') JobGraphValidator(job_graph).validate() return job_graph
def _build_graph(jobs: Union[Dict[Job, List[Job]], List[WorkflowJob]]) -> Dict[WorkflowJob, List[WorkflowJob]]: if isinstance(jobs, list): graph_as_dict = collections.OrderedDict() if len(jobs) == 1: graph_as_dict[jobs[0]] = [] else: for i in range(1, len(jobs)): graph_as_dict[jobs[i - 1]] = [jobs[i]] job_graph = graph_as_dict elif isinstance(jobs, dict): job_graph = {self._map_to_workflow_job(source_job): [self._map_to_workflow_job(tj) for tj in target_jobs] for (source_job, target_jobs) in jobs.items()} else: raise ValueError('Job graph has to be dict or list') JobGraphValidator(job_graph).validate() return job_graph
bigflow
positive
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in COCO protocol. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str: float] """ metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError('metric {} is not supported'.format(metric)) <DeepExtract> assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) (result_files, tmp_dir) = (result_files, tmp_dir) </DeepExtract> eval_results = {} cocoGt = self.coco for metric in metrics: msg = 'Evaluating {}...'.format(metric) if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': <DeepExtract> gt_bboxes = [] for i in range(len(self.img_ids)): ann_ids = self.coco.getAnnIds(imgIds=self.img_ids[i]) ann_info = self.coco.loadAnns(ann_ids) if len(ann_info) == 0: gt_bboxes.append(np.zeros((0, 4))) continue bboxes = [] for ann in ann_info: if ann.get('ignore', False) or ann['iscrowd']: continue (x1, y1, w, h) = ann['bbox'] bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1]) bboxes = np.array(bboxes, dtype=np.float32) if bboxes.shape[0] == 0: bboxes = np.zeros((0, 4)) gt_bboxes.append(bboxes) recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thrs, logger='silent') ar = recalls.mean(axis=1) ar = ar </DeepExtract> log_msg = [] for (i, num) in enumerate(proposal_nums): eval_results['AR@{}'.format(num)] = ar[i] log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError('{} is not in results'.format(metric)) try: cocoDt = cocoGt.loadRes(result_files[metric]) except IndexError: print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.imgIds = self.img_ids if metric == 'proposal': cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(proposal_nums) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metric_items = ['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000'] for (i, item) in enumerate(metric_items): val = float('{:.3f}'.format(cocoEval.stats[i + 6])) eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: pass metric_items = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'] for i in range(len(metric_items)): key = '{}_{}'.format(metric, metric_items[i]) val = float('{:.3f}'.format(cocoEval.stats[i])) eval_results[key] = val eval_results['{}_mAP_copypaste'.format(metric)] = '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} {ap[4]:.3f} {ap[5]:.3f}'.format(ap=cocoEval.stats[:6]) if tmp_dir is not None: tmp_dir.cleanup() return eval_results
def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in COCO protocol. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str: float] """ metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError('metric {} is not supported'.format(metric)) assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) (result_files, tmp_dir) = (result_files, tmp_dir) eval_results = {} cocoGt = self.coco for metric in metrics: msg = 'Evaluating {}...'.format(metric) if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': gt_bboxes = [] for i in range(len(self.img_ids)): ann_ids = self.coco.getAnnIds(imgIds=self.img_ids[i]) ann_info = self.coco.loadAnns(ann_ids) if len(ann_info) == 0: gt_bboxes.append(np.zeros((0, 4))) continue bboxes = [] for ann in ann_info: if ann.get('ignore', False) or ann['iscrowd']: continue (x1, y1, w, h) = ann['bbox'] bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1]) bboxes = np.array(bboxes, dtype=np.float32) if bboxes.shape[0] == 0: bboxes = np.zeros((0, 4)) gt_bboxes.append(bboxes) recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thrs, logger='silent') ar = recalls.mean(axis=1) ar = ar log_msg = [] for (i, num) in enumerate(proposal_nums): eval_results['AR@{}'.format(num)] = ar[i] log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError('{} is not in results'.format(metric)) try: cocoDt = cocoGt.loadRes(result_files[metric]) except IndexError: print_log('The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.imgIds = self.img_ids if metric == 'proposal': cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(proposal_nums) cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metric_items = ['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000'] for (i, item) in enumerate(metric_items): val = float('{:.3f}'.format(cocoEval.stats[i + 6])) eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if classwise: pass metric_items = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'] for i in range(len(metric_items)): key = '{}_{}'.format(metric, metric_items[i]) val = float('{:.3f}'.format(cocoEval.stats[i])) eval_results[key] = val eval_results['{}_mAP_copypaste'.format(metric)] = '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} {ap[4]:.3f} {ap[5]:.3f}'.format(ap=cocoEval.stats[:6]) if tmp_dir is not None: tmp_dir.cleanup() return eval_results
EfficientDet-bifpn
positive
@edit_action('comment selection', final=True) @clear_selection def toggle_comment_selection(self, prefix: str) -> None: <DeepExtract> ((s_y, _), (e_y, _)) = self.selection.get() e_y = min(e_y + 1, len(self.buf) - 1) if self.buf[e_y - 1] == '': e_y -= 1 (s_y, e_y) = (s_y, e_y) </DeepExtract> <DeepExtract> commented = self.buf[s_y].lstrip().startswith(prefix) </DeepExtract> <DeepExtract> (s_y, e_y) = self._selection_lines() minimum_indent = min((len(self._indent(lineno)) for lineno in range(s_y, e_y))) </DeepExtract> for lineno in range(s_y, e_y): if commented: <DeepExtract> line = self.buf[lineno] indent = self._indent(lineno) ws_len = len(indent) if line.startswith(f'{prefix} ', ws_len): self.buf[lineno] = f'{indent}{line[ws_len + len(prefix) + 1:]}' elif line.startswith(prefix, ws_len): self.buf[lineno] = f'{indent}{line[ws_len + len(prefix):]}' if self.buf.y == lineno and self.buf.x > ws_len: self.buf.x -= len(line) - len(self.buf[lineno]) </DeepExtract> else: <DeepExtract> line = self.buf[lineno] if not line: self.buf[lineno] = f'{prefix}' else: self.buf[lineno] = f'{line[:minimum_indent]}{prefix} {line[minimum_indent:]}' if lineno == self.buf.y and self.buf.x > minimum_indent: self.buf.x += len(self.buf[lineno]) - len(line) </DeepExtract>
@edit_action('comment selection', final=True) @clear_selection def toggle_comment_selection(self, prefix: str) -> None: ((s_y, _), (e_y, _)) = self.selection.get() e_y = min(e_y + 1, len(self.buf) - 1) if self.buf[e_y - 1] == '': e_y -= 1 (s_y, e_y) = (s_y, e_y) commented = self.buf[s_y].lstrip().startswith(prefix) (s_y, e_y) = self._selection_lines() minimum_indent = min((len(self._indent(lineno)) for lineno in range(s_y, e_y))) for lineno in range(s_y, e_y): if commented: line = self.buf[lineno] indent = self._indent(lineno) ws_len = len(indent) if line.startswith(f'{prefix} ', ws_len): self.buf[lineno] = f'{indent}{line[ws_len + len(prefix) + 1:]}' elif line.startswith(prefix, ws_len): self.buf[lineno] = f'{indent}{line[ws_len + len(prefix):]}' if self.buf.y == lineno and self.buf.x > ws_len: self.buf.x -= len(line) - len(self.buf[lineno]) else: line = self.buf[lineno] if not line: self.buf[lineno] = f'{prefix}' else: self.buf[lineno] = f'{line[:minimum_indent]}{prefix} {line[minimum_indent:]}' if lineno == self.buf.y and self.buf.x > minimum_indent: self.buf.x += len(self.buf[lineno]) - len(line) </DeepExtract>
babi
positive
def nvmlDeviceSetEccMode(handle, mode): <DeepExtract> global nvmlLib if 'nvmlDeviceSetEccMode' in _nvmlGetFunctionPointer_cache: fn = _nvmlGetFunctionPointer_cache['nvmlDeviceSetEccMode'] libLoadLock.acquire() try: if nvmlLib == None: raise NVMLError(NVML_ERROR_UNINITIALIZED) try: _nvmlGetFunctionPointer_cache['nvmlDeviceSetEccMode'] = getattr(nvmlLib, 'nvmlDeviceSetEccMode') fn = _nvmlGetFunctionPointer_cache['nvmlDeviceSetEccMode'] except AttributeError: raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) finally: libLoadLock.release() </DeepExtract> ret = fn(handle, _nvmlEnableState_t(mode)) <DeepExtract> if ret != NVML_SUCCESS: raise NVMLError(ret) return ret </DeepExtract> return None
def nvmlDeviceSetEccMode(handle, mode): global nvmlLib if 'nvmlDeviceSetEccMode' in _nvmlGetFunctionPointer_cache: fn = _nvmlGetFunctionPointer_cache['nvmlDeviceSetEccMode'] libLoadLock.acquire() try: if nvmlLib == None: raise NVMLError(NVML_ERROR_UNINITIALIZED) try: _nvmlGetFunctionPointer_cache['nvmlDeviceSetEccMode'] = getattr(nvmlLib, 'nvmlDeviceSetEccMode') fn = _nvmlGetFunctionPointer_cache['nvmlDeviceSetEccMode'] except AttributeError: raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) finally: libLoadLock.release() ret = fn(handle, _nvmlEnableState_t(mode)) if ret != NVML_SUCCESS: raise NVMLError(ret) return ret return None
DeepFaceLab_Linux
positive
def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval): operation_config = {} request = None if header_parameters is None: header_parameters = {} header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if method == 'GET': request = self._client.get(url, query_parameters) elif method == 'PUT': request = self._client.put(url, query_parameters) elif method == 'POST': request = self._client.post(url, query_parameters) elif method == 'HEAD': request = self._client.head(url, query_parameters) elif method == 'PATCH': request = self._client.patch(url, query_parameters) elif method == 'DELETE': request = self._client.delete(url, query_parameters) elif method == 'MERGE': request = self._client.merge(url, query_parameters) response = self._client.send(request, header_parameters, body, **operation_config) if response.status_code not in expected_status_codes: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp elif response.status_code == 202 and polling_timeout > 0: def get_long_running_output(response): return response poller = LROPoller(self._client, ClientRawResponse(None, response), get_long_running_output, ARMPolling(polling_interval, **operation_config)) <DeepExtract> try: poller.wait(timeout=polling_timeout) response = poller.result() except Exception as exc: raise </DeepExtract> return response
def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval): operation_config = {} request = None if header_parameters is None: header_parameters = {} header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if method == 'GET': request = self._client.get(url, query_parameters) elif method == 'PUT': request = self._client.put(url, query_parameters) elif method == 'POST': request = self._client.post(url, query_parameters) elif method == 'HEAD': request = self._client.head(url, query_parameters) elif method == 'PATCH': request = self._client.patch(url, query_parameters) elif method == 'DELETE': request = self._client.delete(url, query_parameters) elif method == 'MERGE': request = self._client.merge(url, query_parameters) response = self._client.send(request, header_parameters, body, **operation_config) if response.status_code not in expected_status_codes: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp elif response.status_code == 202 and polling_timeout > 0: def get_long_running_output(response): return response poller = LROPoller(self._client, ClientRawResponse(None, response), get_long_running_output, ARMPolling(polling_interval, **operation_config)) try: poller.wait(timeout=polling_timeout) response = poller.result() except Exception as exc: raise return response
AnsibleLabs
positive
def test_stac_search_limits(stac_client: FlaskClient): large_limit = OUR_DATASET_LIMIT + 1 rv: Response = stac_client.get(f'/stac/search?&limit={large_limit}') assert rv.status_code == 400 assert b'Max page size' in rv.data <DeepExtract> with DebugContext(f"Requested {repr('/stac/search?&bbox=[114, -33, 153, -10]&datetime=2017-04-16T01:12:16/2017-05-10T00:24:21')}"): data = get_geojson(stac_client, '/stac/search?&bbox=[114, -33, 153, -10]&datetime=2017-04-16T01:12:16/2017-05-10T00:24:21') assert_item_collection(data) geojson = data </DeepExtract> assert len(geojson.get('features')) == OUR_PAGE_SIZE
def test_stac_search_limits(stac_client: FlaskClient): large_limit = OUR_DATASET_LIMIT + 1 rv: Response = stac_client.get(f'/stac/search?&limit={large_limit}') assert rv.status_code == 400 assert b'Max page size' in rv.data with DebugContext(f"Requested {repr('/stac/search?&bbox=[114, -33, 153, -10]&datetime=2017-04-16T01:12:16/2017-05-10T00:24:21')}"): data = get_geojson(stac_client, '/stac/search?&bbox=[114, -33, 153, -10]&datetime=2017-04-16T01:12:16/2017-05-10T00:24:21') assert_item_collection(data) geojson = data assert len(geojson.get('features')) == OUR_PAGE_SIZE
datacube-explorer
positive
def report_simple_failure(self, scene, reference_filepath, output_filepath, log_filepath, error_message): self.failures += 1 <DeepExtract> if os.name == 'nt': command = 'copy /Y "{0}" "{1}"'.format(output_filepath, reference_filepath) else: command = 'cp "{0}" "{1}"'.format(output_filepath, reference_filepath) </DeepExtract> self.all_commands.append(command) self.file.write(self.__render(self.simple_failure_template, {'project-path': scene, 'ref-image-url': urllib.pathname2url(reference_filepath), 'output-image-url': urllib.pathname2url(output_filepath), 'failure-reason': error_message, 'log-file-url': urllib.pathname2url(log_filepath), 'log-file-path': os.path.basename(log_filepath), 'update-command': command})) self.file.flush()
def report_simple_failure(self, scene, reference_filepath, output_filepath, log_filepath, error_message): self.failures += 1 if os.name == 'nt': command = 'copy /Y "{0}" "{1}"'.format(output_filepath, reference_filepath) else: command = 'cp "{0}" "{1}"'.format(output_filepath, reference_filepath) self.all_commands.append(command) self.file.write(self.__render(self.simple_failure_template, {'project-path': scene, 'ref-image-url': urllib.pathname2url(reference_filepath), 'output-image-url': urllib.pathname2url(output_filepath), 'failure-reason': error_message, 'log-file-url': urllib.pathname2url(log_filepath), 'log-file-path': os.path.basename(log_filepath), 'update-command': command})) self.file.flush()
blenderseed
positive
def test_review_delete(self): <DeepExtract> review = db_review.create(**self.review) </DeepExtract> resp = self.client.delete('/review/%s' % review['id'], headers=self.header(self.user)) self.assert200(resp)
def test_review_delete(self): review = db_review.create(**self.review) resp = self.client.delete('/review/%s' % review['id'], headers=self.header(self.user)) self.assert200(resp)
critiquebrainz
positive
def word_transformer(source, target, words): def state(word1): res = [] for word2 in words: if word2 != word1 and len(word2) == len(word1): n_unequal_chars = 0 for i in range(len(word2)): if word1[i] != word2[i]: n_unequal_chars += 1 if n_unequal_chars == 1: res.append(word2) return res def reconstruct_path(parents, target): res = [target] temp = target while temp: temp = parents[temp] res.append(temp) return res[::-1] q = collections.deque() visited = set() q.append(source) level = 0 parents = collections.defaultdict(str) parents[source] = None while q: for i in range(len(q)): word = q.popleft() if word == target: <DeepExtract> res = [target] temp = target while temp: temp = parents[temp] res.append(temp) path = res[::-1] </DeepExtract> return path[1:] visited.add(word) <DeepExtract> res = [] for word2 in words: if word2 != word and len(word2) == len(word): n_unequal_chars = 0 for i in range(len(word2)): if word[i] != word2[i]: n_unequal_chars += 1 if n_unequal_chars == 1: res.append(word2) neighs = res </DeepExtract> for neigh in neighs: if neigh not in visited: parents[neigh] = word q.append(neigh) level += 1 return -1
def word_transformer(source, target, words): def state(word1): res = [] for word2 in words: if word2 != word1 and len(word2) == len(word1): n_unequal_chars = 0 for i in range(len(word2)): if word1[i] != word2[i]: n_unequal_chars += 1 if n_unequal_chars == 1: res.append(word2) return res def reconstruct_path(parents, target): res = [target] temp = target while temp: temp = parents[temp] res.append(temp) return res[::-1] q = collections.deque() visited = set() q.append(source) level = 0 parents = collections.defaultdict(str) parents[source] = None while q: for i in range(len(q)): word = q.popleft() if word == target: res = [target] temp = target while temp: temp = parents[temp] res.append(temp) path = res[::-1] return path[1:] visited.add(word) res = [] for word2 in words: if word2 != word and len(word2) == len(word): n_unequal_chars = 0 for i in range(len(word2)): if word[i] != word2[i]: n_unequal_chars += 1 if n_unequal_chars == 1: res.append(word2) neighs = res for neigh in neighs: if neigh not in visited: parents[neigh] = word q.append(neigh) level += 1 return -1
CtCI-6th-Edition-Python
positive
@pytest.mark.parametrize('det_arch, reco_arch', [['db_mobilenet_v3_large', 'crnn_mobilenet_v3_large']]) def test_zoo_models(det_arch, reco_arch): predictor = models.ocr_predictor(det_arch, reco_arch, pretrained=True) <DeepExtract> assert isinstance(predictor, OCRPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, Document) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) </DeepExtract> det_model = detection.__dict__[det_arch](pretrained=True) reco_model = recognition.__dict__[reco_arch](pretrained=True) predictor = models.ocr_predictor(det_model, reco_model) <DeepExtract> assert isinstance(predictor, OCRPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, Document) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) </DeepExtract> with pytest.raises(ValueError): models.ocr_predictor(det_arch=reco_model, pretrained=True) with pytest.raises(ValueError): models.ocr_predictor(reco_arch=det_model, pretrained=True) predictor = models.kie_predictor(det_arch, reco_arch, pretrained=True) <DeepExtract> assert isinstance(predictor, KIEPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, KIEDocument) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) </DeepExtract> det_model = detection.__dict__[det_arch](pretrained=True) reco_model = recognition.__dict__[reco_arch](pretrained=True) predictor = models.kie_predictor(det_model, reco_model) <DeepExtract> assert isinstance(predictor, KIEPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, KIEDocument) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) </DeepExtract> with pytest.raises(ValueError): models.kie_predictor(det_arch=reco_model, pretrained=True) with pytest.raises(ValueError): models.kie_predictor(reco_arch=det_model, pretrained=True)
@pytest.mark.parametrize('det_arch, reco_arch', [['db_mobilenet_v3_large', 'crnn_mobilenet_v3_large']]) def test_zoo_models(det_arch, reco_arch): predictor = models.ocr_predictor(det_arch, reco_arch, pretrained=True) assert isinstance(predictor, OCRPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, Document) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) det_model = detection.__dict__[det_arch](pretrained=True) reco_model = recognition.__dict__[reco_arch](pretrained=True) predictor = models.ocr_predictor(det_model, reco_model) assert isinstance(predictor, OCRPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, Document) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) with pytest.raises(ValueError): models.ocr_predictor(det_arch=reco_model, pretrained=True) with pytest.raises(ValueError): models.ocr_predictor(reco_arch=det_model, pretrained=True) predictor = models.kie_predictor(det_arch, reco_arch, pretrained=True) assert isinstance(predictor, KIEPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, KIEDocument) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) det_model = detection.__dict__[det_arch](pretrained=True) reco_model = recognition.__dict__[reco_arch](pretrained=True) predictor = models.kie_predictor(det_model, reco_model) assert isinstance(predictor, KIEPredictor) doc = [np.zeros((512, 512, 3), dtype=np.uint8)] out = predictor(doc) assert isinstance(out, KIEDocument) assert len(out.pages) == 1 with pytest.raises(ValueError): input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8) _ = predictor([input_page]) with pytest.raises(ValueError): models.kie_predictor(det_arch=reco_model, pretrained=True) with pytest.raises(ValueError): models.kie_predictor(reco_arch=det_model, pretrained=True)
doctr
positive
def remove(self, key): if key == self.key: if self.left is None: return self.right if self.right is None: return self.left <DeepExtract> (self.key, self.left.maxnode().key) = (self.left.maxnode().key, self.key) (self.value, self.left.maxnode().value) = (self.left.maxnode().value, self.value) </DeepExtract> self.left = self.left.remove(key) elif key < self.key and self.left: self.left = self.left.remove(key) elif key > self.key and self.right: self.right = self.right.remove(key) else: raise KeyError <DeepExtract> len_left = len(self.left) if self.left else 0 len_right = len(self.right) if self.right else 0 self._length = 1 + len_left + len_right </DeepExtract> return self
def remove(self, key): if key == self.key: if self.left is None: return self.right if self.right is None: return self.left (self.key, self.left.maxnode().key) = (self.left.maxnode().key, self.key) (self.value, self.left.maxnode().value) = (self.left.maxnode().value, self.value) self.left = self.left.remove(key) elif key < self.key and self.left: self.left = self.left.remove(key) elif key > self.key and self.right: self.right = self.right.remove(key) else: raise KeyError len_left = len(self.left) if self.left else 0 len_right = len(self.right) if self.right else 0 self._length = 1 + len_left + len_right return self
datastructures
positive
def render(self): <DeepExtract> self.authenticating.set() view = InterstitialView(title='VSphere Login Wait', message='Logging in to VSphere. Please wait.', event=self.authenticating) view.show() </DeepExtract> app.loop.create_task(self.login_to_vsphere())
def render(self): self.authenticating.set() view = InterstitialView(title='VSphere Login Wait', message='Logging in to VSphere. Please wait.', event=self.authenticating) view.show() app.loop.create_task(self.login_to_vsphere())
conjure-up
positive
def gen_key_iv(password, method): method = method.lower() (key_len, iv_len, m) = method_supported[method] if key_len > 0: <DeepExtract> cached_key = '%s-%d-%d' % (password, key_len, iv_len) r = cached_keys.get(cached_key, None) if r: (key, _) = r m = [] i = 0 while len(b''.join(m)) < key_len + iv_len: md5 = hashlib.md5() data = password if i > 0: data = m[i - 1] + password md5.update(data) m.append(md5.digest()) i += 1 ms = b''.join(m) key = ms[:key_len] iv = ms[key_len:key_len + iv_len] cached_keys[cached_key] = (key, iv) (key, _) = (key, iv) </DeepExtract> else: key = password <DeepExtract> iv = os.urandom(iv_len) </DeepExtract> return (key, iv, m)
def gen_key_iv(password, method): method = method.lower() (key_len, iv_len, m) = method_supported[method] if key_len > 0: cached_key = '%s-%d-%d' % (password, key_len, iv_len) r = cached_keys.get(cached_key, None) if r: (key, _) = r m = [] i = 0 while len(b''.join(m)) < key_len + iv_len: md5 = hashlib.md5() data = password if i > 0: data = m[i - 1] + password md5.update(data) m.append(md5.digest()) i += 1 ms = b''.join(m) key = ms[:key_len] iv = ms[key_len:key_len + iv_len] cached_keys[cached_key] = (key, iv) (key, _) = (key, iv) else: key = password iv = os.urandom(iv_len) return (key, iv, m)
EasyLogin
positive
def test_read_data(self): <DeepExtract> trigger = self.create_triggerservice(consumer_name='ServiceGithub') name = 'github' repo = 'foobar' project = 'barfoo' status = True return Github.objects.create(trigger=trigger, name=name, status=status, repo=repo, project=project) </DeepExtract> kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00', 'trigger_id': self.trigger_id, 'model_name': 'Github'}) trigger_id = kwargs.get('trigger_id') data = [] cache.set('th_github_' + str(trigger_id), data) with patch('github3.GitHub.ratelimit_remaining', new_callable=PropertyMock) as mock_read_data: mock_read_data.return_value = 2 with patch.object(GitHub, 'issues_on') as mock_read_data2: se = ServiceGithub(self.token) se.read_data(**kwargs) mock_read_data2.assert_called_once() mock_read_data.assert_called_once()
def test_read_data(self): trigger = self.create_triggerservice(consumer_name='ServiceGithub') name = 'github' repo = 'foobar' project = 'barfoo' status = True return Github.objects.create(trigger=trigger, name=name, status=status, repo=repo, project=project) kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00', 'trigger_id': self.trigger_id, 'model_name': 'Github'}) trigger_id = kwargs.get('trigger_id') data = [] cache.set('th_github_' + str(trigger_id), data) with patch('github3.GitHub.ratelimit_remaining', new_callable=PropertyMock) as mock_read_data: mock_read_data.return_value = 2 with patch.object(GitHub, 'issues_on') as mock_read_data2: se = ServiceGithub(self.token) se.read_data(**kwargs) mock_read_data2.assert_called_once() mock_read_data.assert_called_once()
django-th
positive
def _build_encoder(self, hparams): """Build a GNMT encoder.""" if hparams.encoder_type == 'uni' or hparams.encoder_type == 'bi': return super(GNMTModel, self)._build_encoder(hparams) if hparams.encoder_type != 'gnmt': raise ValueError('Unknown encoder_type %s' % hparams.encoder_type) num_bi_layers = 1 num_uni_layers = self.num_encoder_layers - num_bi_layers utils.print_out('# Build a GNMT encoder') utils.print_out(' num_bi_layers = %d' % num_bi_layers) utils.print_out(' num_uni_layers = %d' % num_uni_layers) iterator = self.iterator source = iterator.source if self.time_major: source = tf.transpose(source) with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope: dtype = scope.dtype DAPPLE_TEST = hparams.dapple_test devices = cluster_utils.get_pipeline_devices(hparams.pipeline_device_num) encoder_device = devices[0] self.encoder_emb_inp = self.encoder_emb_lookup_fn(self.embedding_encoder, source) if DAPPLE_TEST: fw_cell = model_helper._single_cell_dapple(hparams, self.mode, 0, residual_connection=False, device_str=encoder_device) bw_cell = model_helper._single_cell_dapple(hparams, self.mode, 0, residual_connection=False, device_str=encoder_device) (bi_outputs, bi_encoder_state) = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, self.encoder_emb_inp, dtype=dtype, sequence_length=iterator.source_sequence_length, time_major=self.time_major, swap_memory=True) bi_encoder_outputs = tf.concat(bi_outputs, -1) else: (bi_encoder_outputs, bi_encoder_state) = self._build_bidirectional_rnn(inputs=self.encoder_emb_inp, sequence_length=iterator.source_sequence_length, dtype=dtype, hparams=hparams, num_bi_layers=num_bi_layers, num_bi_residual_layers=0, base_gpu=0) if self.extract_encoder_layers: <DeepExtract> uni_cell_lists = model_helper._cell_list(unit_type=hparams.unit_type, num_units=hparams.num_units, num_layers=num_uni_layers, num_residual_layers=self.num_encoder_residual_layers, forget_bias=hparams.forget_bias, dropout=hparams.dropout, num_gpus=self.num_gpus, base_gpu=1, mode=self.mode, single_cell_fn=self.single_cell_fn) encoder_inp = bi_encoder_outputs encoder_states = [] self.encoder_state_list = [bi_encoder_outputs[:, :, :hparams.num_units], bi_encoder_outputs[:, :, hparams.num_units:]] with tf.variable_scope('rnn/multi_rnn_cell'): for (i, cell) in enumerate(uni_cell_lists): with tf.variable_scope('cell_%d' % i) as scope: (encoder_inp, encoder_state) = tf.nn.dynamic_rnn(cell, encoder_inp, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major, scope=scope) encoder_states.append(encoder_state) self.encoder_state_list.append(encoder_inp) encoder_state = tuple(encoder_states) encoder_outputs = self.encoder_state_list[-1] (encoder_state, encoder_outputs) = (encoder_state, encoder_outputs) </DeepExtract> elif DAPPLE_TEST: self.encoder_state_list = [bi_encoder_outputs[:, :, :hparams.num_units], bi_encoder_outputs[:, :, hparams.num_units:]] encoder_state = [] with tf.variable_scope('rnn/multi_rnn_cell'): uni_cell1 = model_helper._single_cell_dapple(hparams, self.mode, 1, residual_connection=False, device_str=encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 1) as scope: (uc1_outs, uc1_stat) = tf.nn.dynamic_rnn(uni_cell1, bi_encoder_outputs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc1_outs) encoder_state.append(uc1_stat) uc2 = model_helper._single_cell_dapple(hparams, self.mode, 2, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 2) as scope: (uc2_outs, uc2_stat) = tf.nn.dynamic_rnn(uc2, uc1_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc2_outs) encoder_state.append(uc2_stat) uc3 = model_helper._single_cell_dapple(hparams, self.mode, 3, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 3) as scope: (uc3_outs, uc3_stat) = tf.nn.dynamic_rnn(uc3, uc2_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc3_outs) encoder_state.append(uc3_stat) uc4 = model_helper._single_cell_dapple(hparams, self.mode, 4, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 4) as scope: (uc4_outs, uc4_stat) = tf.nn.dynamic_rnn(uc4, uc3_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc4_outs) encoder_state.append(uc4_stat) uc5 = model_helper._single_cell_dapple(hparams, self.mode, 5, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 5) as scope: (uc5_outs, uc5_stat) = tf.nn.dynamic_rnn(uc5, uc4_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc5_outs) encoder_state.append(uc5_stat) uc6 = model_helper._single_cell_dapple(hparams, self.mode, 6, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 6) as scope: (uc6_outs, uc6_stat) = tf.nn.dynamic_rnn(uc6, uc5_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc6_outs) encoder_state.append(uc6_stat) uc7 = model_helper._single_cell_dapple(hparams, self.mode, 7, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 7) as scope: (uc7_outs, uc7_stat) = tf.nn.dynamic_rnn(uc7, uc6_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc7_outs) encoder_state.append(uc7_stat) if hparams.gnmt16: uc8 = model_helper._single_cell_dapple(hparams, self.mode, 8, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 8) as scope: (uc8_outs, uc8_stat) = tf.nn.dynamic_rnn(uc8, uc7_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc8_outs) encoder_state.append(uc8_stat) uc9 = model_helper._single_cell_dapple(hparams, self.mode, 9, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 9) as scope: (uc9_outs, uc9_stat) = tf.nn.dynamic_rnn(uc9, uc8_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc9_outs) encoder_state.append(uc9_stat) uc10 = model_helper._single_cell_dapple(hparams, self.mode, 10, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 10) as scope: (uc10_outs, uc10_stat) = tf.nn.dynamic_rnn(uc10, uc9_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc10_outs) encoder_state.append(uc10_stat) uc11 = model_helper._single_cell_dapple(hparams, self.mode, 11, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 11) as scope: (uc11_outs, uc11_stat) = tf.nn.dynamic_rnn(uc11, uc10_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc11_outs) encoder_state.append(uc11_stat) uc12 = model_helper._single_cell_dapple(hparams, self.mode, 12, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 12) as scope: (uc12_outs, uc12_stat) = tf.nn.dynamic_rnn(uc12, uc11_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc12_outs) encoder_state.append(uc12_stat) uc13 = model_helper._single_cell_dapple(hparams, self.mode, 13, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 13) as scope: (uc13_outs, uc13_stat) = tf.nn.dynamic_rnn(uc13, uc12_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc13_outs) encoder_state.append(uc13_stat) uc14 = model_helper._single_cell_dapple(hparams, self.mode, 14, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 14) as scope: (uc14_outs, uc14_stat) = tf.nn.dynamic_rnn(uc14, uc13_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc14_outs) encoder_state.append(uc14_stat) uc15 = model_helper._single_cell_dapple(hparams, self.mode, 15, True, encoder_device) <DeepExtract> with tf.variable_scope('cell_%d' % 15) as scope: (uc15_outs, uc15_stat) = tf.nn.dynamic_rnn(uc15, uc14_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) </DeepExtract> self.encoder_state_list.append(uc15_outs) encoder_state.append(uc15_stat) encoder_outputs = uc7_outs if not hparams.gnmt16 else uc15_outs encoder_state = tuple(encoder_state) else: <DeepExtract> uni_cell = model_helper.create_rnn_cell(unit_type=hparams.unit_type, num_units=hparams.num_units, num_layers=num_uni_layers, num_residual_layers=self.num_encoder_residual_layers, forget_bias=hparams.forget_bias, dropout=hparams.dropout, num_gpus=self.num_gpus, base_gpu=0, mode=self.mode, single_cell_fn=self.single_cell_fn) (encoder_outputs, encoder_state) = tf.nn.dynamic_rnn(uni_cell, bi_encoder_outputs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list = [encoder_outputs] (encoder_state, encoder_outputs) = (encoder_state, encoder_outputs) </DeepExtract> encoder_state = (bi_encoder_state[1],) + ((encoder_state,) if num_uni_layers == 1 else encoder_state) return (encoder_outputs, encoder_state)
def _build_encoder(self, hparams): """Build a GNMT encoder.""" if hparams.encoder_type == 'uni' or hparams.encoder_type == 'bi': return super(GNMTModel, self)._build_encoder(hparams) if hparams.encoder_type != 'gnmt': raise ValueError('Unknown encoder_type %s' % hparams.encoder_type) num_bi_layers = 1 num_uni_layers = self.num_encoder_layers - num_bi_layers utils.print_out('# Build a GNMT encoder') utils.print_out(' num_bi_layers = %d' % num_bi_layers) utils.print_out(' num_uni_layers = %d' % num_uni_layers) iterator = self.iterator source = iterator.source if self.time_major: source = tf.transpose(source) with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope: dtype = scope.dtype DAPPLE_TEST = hparams.dapple_test devices = cluster_utils.get_pipeline_devices(hparams.pipeline_device_num) encoder_device = devices[0] self.encoder_emb_inp = self.encoder_emb_lookup_fn(self.embedding_encoder, source) if DAPPLE_TEST: fw_cell = model_helper._single_cell_dapple(hparams, self.mode, 0, residual_connection=False, device_str=encoder_device) bw_cell = model_helper._single_cell_dapple(hparams, self.mode, 0, residual_connection=False, device_str=encoder_device) (bi_outputs, bi_encoder_state) = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, self.encoder_emb_inp, dtype=dtype, sequence_length=iterator.source_sequence_length, time_major=self.time_major, swap_memory=True) bi_encoder_outputs = tf.concat(bi_outputs, -1) else: (bi_encoder_outputs, bi_encoder_state) = self._build_bidirectional_rnn(inputs=self.encoder_emb_inp, sequence_length=iterator.source_sequence_length, dtype=dtype, hparams=hparams, num_bi_layers=num_bi_layers, num_bi_residual_layers=0, base_gpu=0) if self.extract_encoder_layers: uni_cell_lists = model_helper._cell_list(unit_type=hparams.unit_type, num_units=hparams.num_units, num_layers=num_uni_layers, num_residual_layers=self.num_encoder_residual_layers, forget_bias=hparams.forget_bias, dropout=hparams.dropout, num_gpus=self.num_gpus, base_gpu=1, mode=self.mode, single_cell_fn=self.single_cell_fn) encoder_inp = bi_encoder_outputs encoder_states = [] self.encoder_state_list = [bi_encoder_outputs[:, :, :hparams.num_units], bi_encoder_outputs[:, :, hparams.num_units:]] with tf.variable_scope('rnn/multi_rnn_cell'): for (i, cell) in enumerate(uni_cell_lists): with tf.variable_scope('cell_%d' % i) as scope: (encoder_inp, encoder_state) = tf.nn.dynamic_rnn(cell, encoder_inp, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major, scope=scope) encoder_states.append(encoder_state) self.encoder_state_list.append(encoder_inp) encoder_state = tuple(encoder_states) encoder_outputs = self.encoder_state_list[-1] (encoder_state, encoder_outputs) = (encoder_state, encoder_outputs) elif DAPPLE_TEST: self.encoder_state_list = [bi_encoder_outputs[:, :, :hparams.num_units], bi_encoder_outputs[:, :, hparams.num_units:]] encoder_state = [] with tf.variable_scope('rnn/multi_rnn_cell'): uni_cell1 = model_helper._single_cell_dapple(hparams, self.mode, 1, residual_connection=False, device_str=encoder_device) with tf.variable_scope('cell_%d' % 1) as scope: (uc1_outs, uc1_stat) = tf.nn.dynamic_rnn(uni_cell1, bi_encoder_outputs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc1_outs) encoder_state.append(uc1_stat) uc2 = model_helper._single_cell_dapple(hparams, self.mode, 2, True, encoder_device) with tf.variable_scope('cell_%d' % 2) as scope: (uc2_outs, uc2_stat) = tf.nn.dynamic_rnn(uc2, uc1_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc2_outs) encoder_state.append(uc2_stat) uc3 = model_helper._single_cell_dapple(hparams, self.mode, 3, True, encoder_device) with tf.variable_scope('cell_%d' % 3) as scope: (uc3_outs, uc3_stat) = tf.nn.dynamic_rnn(uc3, uc2_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc3_outs) encoder_state.append(uc3_stat) uc4 = model_helper._single_cell_dapple(hparams, self.mode, 4, True, encoder_device) with tf.variable_scope('cell_%d' % 4) as scope: (uc4_outs, uc4_stat) = tf.nn.dynamic_rnn(uc4, uc3_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc4_outs) encoder_state.append(uc4_stat) uc5 = model_helper._single_cell_dapple(hparams, self.mode, 5, True, encoder_device) with tf.variable_scope('cell_%d' % 5) as scope: (uc5_outs, uc5_stat) = tf.nn.dynamic_rnn(uc5, uc4_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc5_outs) encoder_state.append(uc5_stat) uc6 = model_helper._single_cell_dapple(hparams, self.mode, 6, True, encoder_device) with tf.variable_scope('cell_%d' % 6) as scope: (uc6_outs, uc6_stat) = tf.nn.dynamic_rnn(uc6, uc5_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc6_outs) encoder_state.append(uc6_stat) uc7 = model_helper._single_cell_dapple(hparams, self.mode, 7, True, encoder_device) with tf.variable_scope('cell_%d' % 7) as scope: (uc7_outs, uc7_stat) = tf.nn.dynamic_rnn(uc7, uc6_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc7_outs) encoder_state.append(uc7_stat) if hparams.gnmt16: uc8 = model_helper._single_cell_dapple(hparams, self.mode, 8, True, encoder_device) with tf.variable_scope('cell_%d' % 8) as scope: (uc8_outs, uc8_stat) = tf.nn.dynamic_rnn(uc8, uc7_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc8_outs) encoder_state.append(uc8_stat) uc9 = model_helper._single_cell_dapple(hparams, self.mode, 9, True, encoder_device) with tf.variable_scope('cell_%d' % 9) as scope: (uc9_outs, uc9_stat) = tf.nn.dynamic_rnn(uc9, uc8_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc9_outs) encoder_state.append(uc9_stat) uc10 = model_helper._single_cell_dapple(hparams, self.mode, 10, True, encoder_device) with tf.variable_scope('cell_%d' % 10) as scope: (uc10_outs, uc10_stat) = tf.nn.dynamic_rnn(uc10, uc9_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc10_outs) encoder_state.append(uc10_stat) uc11 = model_helper._single_cell_dapple(hparams, self.mode, 11, True, encoder_device) with tf.variable_scope('cell_%d' % 11) as scope: (uc11_outs, uc11_stat) = tf.nn.dynamic_rnn(uc11, uc10_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc11_outs) encoder_state.append(uc11_stat) uc12 = model_helper._single_cell_dapple(hparams, self.mode, 12, True, encoder_device) with tf.variable_scope('cell_%d' % 12) as scope: (uc12_outs, uc12_stat) = tf.nn.dynamic_rnn(uc12, uc11_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc12_outs) encoder_state.append(uc12_stat) uc13 = model_helper._single_cell_dapple(hparams, self.mode, 13, True, encoder_device) with tf.variable_scope('cell_%d' % 13) as scope: (uc13_outs, uc13_stat) = tf.nn.dynamic_rnn(uc13, uc12_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc13_outs) encoder_state.append(uc13_stat) uc14 = model_helper._single_cell_dapple(hparams, self.mode, 14, True, encoder_device) with tf.variable_scope('cell_%d' % 14) as scope: (uc14_outs, uc14_stat) = tf.nn.dynamic_rnn(uc14, uc13_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc14_outs) encoder_state.append(uc14_stat) uc15 = model_helper._single_cell_dapple(hparams, self.mode, 15, True, encoder_device) with tf.variable_scope('cell_%d' % 15) as scope: (uc15_outs, uc15_stat) = tf.nn.dynamic_rnn(uc15, uc14_outs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list.append(uc15_outs) encoder_state.append(uc15_stat) encoder_outputs = uc7_outs if not hparams.gnmt16 else uc15_outs encoder_state = tuple(encoder_state) else: uni_cell = model_helper.create_rnn_cell(unit_type=hparams.unit_type, num_units=hparams.num_units, num_layers=num_uni_layers, num_residual_layers=self.num_encoder_residual_layers, forget_bias=hparams.forget_bias, dropout=hparams.dropout, num_gpus=self.num_gpus, base_gpu=0, mode=self.mode, single_cell_fn=self.single_cell_fn) (encoder_outputs, encoder_state) = tf.nn.dynamic_rnn(uni_cell, bi_encoder_outputs, dtype=dtype, sequence_length=self.iterator.source_sequence_length, time_major=self.time_major) self.encoder_state_list = [encoder_outputs] (encoder_state, encoder_outputs) = (encoder_state, encoder_outputs) encoder_state = (bi_encoder_state[1],) + ((encoder_state,) if num_uni_layers == 1 else encoder_state) return (encoder_outputs, encoder_state)
DAPPLE
positive
@njit(locals={'lower_mask': _QSMask}, nogil=True, parallel=True, fastmath=FASTMATH) def _rzgate(qubits: np.ndarray, n_qubits: _QSIdx, target: _QSIdx, ang: np.float64) -> None: ang *= 0.5 eit = cmath.exp(1j * ang) eitstar = eit.conjugate() lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): <DeepExtract> lower = i & lower_mask higher = (i & ~lower_mask) << 1 i0 = higher | lower </DeepExtract> qubits[i0] *= eitstar qubits[i0 + (1 << target)] *= eit
@njit(locals={'lower_mask': _QSMask}, nogil=True, parallel=True, fastmath=FASTMATH) def _rzgate(qubits: np.ndarray, n_qubits: _QSIdx, target: _QSIdx, ang: np.float64) -> None: ang *= 0.5 eit = cmath.exp(1j * ang) eitstar = eit.conjugate() lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): lower = i & lower_mask higher = (i & ~lower_mask) << 1 i0 = higher | lower qubits[i0] *= eitstar qubits[i0 + (1 << target)] *= eit
Blueqat
positive
def update_rating(review_id): review = db_review.get_by_id(review_id) <DeepExtract> with db.engine.connect() as connection: result = connection.execute(sqlalchemy.text('\n SELECT row_number\n FROM (\n SELECT row_number() over(order by timestamp),\n id\n FROM revision\n WHERE review_id = :review_id\n ) AS indexed_revisions\n WHERE id = :revision_id\n '), {'review_id': review['id'], 'revision_id': review['last_revision']['id']}) rev_num = result.fetchone() if not rev_num: raise db_exceptions.NoDataFoundException("Can't find the revision with id={} for specified review.".format(review['last_revision']['id'])) rev_num = rev_num[0] </DeepExtract> if rev_num > 1: <DeepExtract> with db.engine.begin() as connection: result = connection.execute(sqlalchemy.text("\n SELECT id,\n review_id,\n timestamp,\n text,\n rating,\n SUM(\n CASE WHEN vote='t' THEN 1 ELSE 0 END\n ) AS votes_positive,\n SUM(\n CASE WHEN vote='f' THEN 1 ELSE 0 END\n ) AS votes_negative\n FROM revision\n LEFT JOIN vote\n ON vote.revision_id = revision.id\n WHERE review_id = :review_id\n GROUP BY revision.id\n ORDER BY timestamp DESC\n OFFSET :offset\n LIMIT :limit\n "), {'review_id': review['id'], 'offset': 0, 'limit': 2}) rows = result.mappings().all() if not rows: raise db_exceptions.NoDataFoundException('Cannot find specified review.') rows = [dict(row) for row in rows] for row in rows: row['rating'] = RATING_SCALE_1_5.get(row['rating']) revisions = rows </DeepExtract> if revisions[0]['rating'] != revisions[1]['rating']: db_avg_rating.update(review['entity_id'], review['entity_type']) else: db_avg_rating.update(review['entity_id'], review['entity_type'])
def update_rating(review_id): review = db_review.get_by_id(review_id) with db.engine.connect() as connection: result = connection.execute(sqlalchemy.text('\n SELECT row_number\n FROM (\n SELECT row_number() over(order by timestamp),\n id\n FROM revision\n WHERE review_id = :review_id\n ) AS indexed_revisions\n WHERE id = :revision_id\n '), {'review_id': review['id'], 'revision_id': review['last_revision']['id']}) rev_num = result.fetchone() if not rev_num: raise db_exceptions.NoDataFoundException("Can't find the revision with id={} for specified review.".format(review['last_revision']['id'])) rev_num = rev_num[0] if rev_num > 1: with db.engine.begin() as connection: result = connection.execute(sqlalchemy.text("\n SELECT id,\n review_id,\n timestamp,\n text,\n rating,\n SUM(\n CASE WHEN vote='t' THEN 1 ELSE 0 END\n ) AS votes_positive,\n SUM(\n CASE WHEN vote='f' THEN 1 ELSE 0 END\n ) AS votes_negative\n FROM revision\n LEFT JOIN vote\n ON vote.revision_id = revision.id\n WHERE review_id = :review_id\n GROUP BY revision.id\n ORDER BY timestamp DESC\n OFFSET :offset\n LIMIT :limit\n "), {'review_id': review['id'], 'offset': 0, 'limit': 2}) rows = result.mappings().all() if not rows: raise db_exceptions.NoDataFoundException('Cannot find specified review.') rows = [dict(row) for row in rows] for row in rows: row['rating'] = RATING_SCALE_1_5.get(row['rating']) revisions = rows if revisions[0]['rating'] != revisions[1]['rating']: db_avg_rating.update(review['entity_id'], review['entity_type']) else: db_avg_rating.update(review['entity_id'], review['entity_type'])
critiquebrainz
positive
def _request(self, method, url, params=None, data=None, type=None): <DeepExtract> if data: data = dict(((key, value if value is not None else '') for (key, value) in data.iteritems())) </DeepExtract> <DeepExtract> if params: params = dict(((key, value if value is not None else '') for (key, value) in params.iteritems())) </DeepExtract> if DEBUG: print('<REQ> %s %r data=%r params=%r' % (method, url, data, params)) type = type or AttributeDict r = requests.request(method, str(url), headers=self.headers, params=params, data=data) data = None if DEBUG: print('<RES> %r' % (r.text,)) if r.ok: <DeepExtract> ret = anyjson.deserialize(r.text) </DeepExtract> if isinstance(ret, dict): return type(ret) return ret r.raise_for_status()
def _request(self, method, url, params=None, data=None, type=None): if data: data = dict(((key, value if value is not None else '') for (key, value) in data.iteritems())) if params: params = dict(((key, value if value is not None else '') for (key, value) in params.iteritems())) if DEBUG: print('<REQ> %s %r data=%r params=%r' % (method, url, data, params)) type = type or AttributeDict r = requests.request(method, str(url), headers=self.headers, params=params, data=data) data = None if DEBUG: print('<RES> %r' % (r.text,)) if r.ok: ret = anyjson.deserialize(r.text) if isinstance(ret, dict): return type(ret) return ret r.raise_for_status()
cyme
positive
@property def ok(self): """Returns True if :attr:`status_code` is less than 400, False if not. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ try: <DeepExtract> http_error_msg = '' if isinstance(self.reason, bytes): try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if 400 <= self.status_code < 500: http_error_msg = f'{self.status_code} Client Error: {reason} for url: {self.url}' elif 500 <= self.status_code < 600: http_error_msg = f'{self.status_code} Server Error: {reason} for url: {self.url}' if http_error_msg: raise HTTPError(http_error_msg, response=self) </DeepExtract> except HTTPError: return False return True
@property def ok(self): """Returns True if :attr:`status_code` is less than 400, False if not. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ try: http_error_msg = '' if isinstance(self.reason, bytes): try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if 400 <= self.status_code < 500: http_error_msg = f'{self.status_code} Client Error: {reason} for url: {self.url}' elif 500 <= self.status_code < 600: http_error_msg = f'{self.status_code} Server Error: {reason} for url: {self.url}' if http_error_msg: raise HTTPError(http_error_msg, response=self) except HTTPError: return False return True
BaiduPanFilesTransfers
positive
def process(self, sess, **kwargs): try: sess.run(self.sync_pi) <DeepExtract> new_trial = 0 if self.current_episode >= self.num_episodes_per_trial: self.current_episode = 0 new_trial = 1 if self.current_source_trial >= self.num_source_trials: self.current_trial_mode = 1 self.current_source_trial = 0 self.current_target_trial = 0 if self.current_target_trial >= self.num_target_trials: self.current_trial_mode = 0 self.current_source_trial = 0 self.current_target_trial = 0 if self.current_trial_mode: self.current_target_trial += 1 else: self.current_source_trial += 1 self.current_episode += 1 if self.task == 0: trial_sample_type = 1 else: trial_sample_type = self.current_trial_mode sample_config = dict(episode_config=dict(get_new=True, sample_type=1, timestamp=self.global_timestamp, b_alpha=self.episode_sample_params[0], b_beta=self.episode_sample_params[-1]), trial_config=dict(get_new=new_trial, sample_type=trial_sample_type, timestamp=self.global_timestamp, b_alpha=self.trial_sample_params[0], b_beta=self.trial_sample_params[-1])) data_config = sample_config </DeepExtract> is_test = data_config['trial_config']['sample_type'] and data_config['episode_config']['sample_type'] if is_test: if self.task == 0: <DeepExtract> data = {} done = False data_config['trial_config']['align_left'] = 1 self.log.info('test episode started...') while not done: data = self.get_data(policy=self.local_network, data_sample_config=data_config, policy_sync_op=self.sync_pi) done = np.asarray(data['terminal']).any() self.global_timestamp = data['on_policy'][0]['state']['metadata']['timestamp'][-1] self.log.info('test episode rollout done, global_time: {}, global_step: {}'.format(datetime.datetime.fromtimestamp(self.global_timestamp), sess.run(self.global_step))) self.log.notice('test episode finished, global_time set: {}'.format(datetime.datetime.fromtimestamp(self.global_timestamp))) self.log.notice('final value: {:8.2f} after {} steps @ {}'.format(data['on_policy'][0]['info']['broker_value'][-1], data['on_policy'][0]['info']['step'][-1], data['on_policy'][0]['info']['time'][-1])) data['ep_summary'] = [None] self.process_summary(sess, data) </DeepExtract> else: pass else: <DeepExtract> data = {} done = False data_config['trial_config']['align_left'] = 0 while not done: sess.run(self.sync_pi) wirte_model_summary = self.local_steps % self.model_summary_freq == 0 data = self.get_data(policy=self.local_network, data_sample_config=data_config) done = np.asarray(data['terminal']).any() feed_dict = self.process_data(sess, data, is_train=True, pi=self.local_network) if wirte_model_summary: fetches = [self.train_op, self.model_summary_op, self.inc_step] else: fetches = [self.train_op, self.inc_step] fetched = sess.run(fetches, feed_dict=feed_dict) if wirte_model_summary: model_summary = fetched[-2] else: model_summary = None self.process_summary(sess, data, model_summary) self.local_steps += 1 </DeepExtract> except: msg = 'process() exception occurred' + '\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n' self.log.exception(msg) raise RuntimeError(msg)
def process(self, sess, **kwargs): try: sess.run(self.sync_pi) new_trial = 0 if self.current_episode >= self.num_episodes_per_trial: self.current_episode = 0 new_trial = 1 if self.current_source_trial >= self.num_source_trials: self.current_trial_mode = 1 self.current_source_trial = 0 self.current_target_trial = 0 if self.current_target_trial >= self.num_target_trials: self.current_trial_mode = 0 self.current_source_trial = 0 self.current_target_trial = 0 if self.current_trial_mode: self.current_target_trial += 1 else: self.current_source_trial += 1 self.current_episode += 1 if self.task == 0: trial_sample_type = 1 else: trial_sample_type = self.current_trial_mode sample_config = dict(episode_config=dict(get_new=True, sample_type=1, timestamp=self.global_timestamp, b_alpha=self.episode_sample_params[0], b_beta=self.episode_sample_params[-1]), trial_config=dict(get_new=new_trial, sample_type=trial_sample_type, timestamp=self.global_timestamp, b_alpha=self.trial_sample_params[0], b_beta=self.trial_sample_params[-1])) data_config = sample_config is_test = data_config['trial_config']['sample_type'] and data_config['episode_config']['sample_type'] if is_test: if self.task == 0: data = {} done = False data_config['trial_config']['align_left'] = 1 self.log.info('test episode started...') while not done: data = self.get_data(policy=self.local_network, data_sample_config=data_config, policy_sync_op=self.sync_pi) done = np.asarray(data['terminal']).any() self.global_timestamp = data['on_policy'][0]['state']['metadata']['timestamp'][-1] self.log.info('test episode rollout done, global_time: {}, global_step: {}'.format(datetime.datetime.fromtimestamp(self.global_timestamp), sess.run(self.global_step))) self.log.notice('test episode finished, global_time set: {}'.format(datetime.datetime.fromtimestamp(self.global_timestamp))) self.log.notice('final value: {:8.2f} after {} steps @ {}'.format(data['on_policy'][0]['info']['broker_value'][-1], data['on_policy'][0]['info']['step'][-1], data['on_policy'][0]['info']['time'][-1])) data['ep_summary'] = [None] self.process_summary(sess, data) else: pass else: data = {} done = False data_config['trial_config']['align_left'] = 0 while not done: sess.run(self.sync_pi) wirte_model_summary = self.local_steps % self.model_summary_freq == 0 data = self.get_data(policy=self.local_network, data_sample_config=data_config) done = np.asarray(data['terminal']).any() feed_dict = self.process_data(sess, data, is_train=True, pi=self.local_network) if wirte_model_summary: fetches = [self.train_op, self.model_summary_op, self.inc_step] else: fetches = [self.train_op, self.inc_step] fetched = sess.run(fetches, feed_dict=feed_dict) if wirte_model_summary: model_summary = fetched[-2] else: model_summary = None self.process_summary(sess, data, model_summary) self.local_steps += 1 except: msg = 'process() exception occurred' + '\n\nPress `Ctrl-C` or jupyter:[Kernel]->[Interrupt] for clean exit.\n' self.log.exception(msg) raise RuntimeError(msg)
btgym
positive
def get(self, path_segment='', owner=None, app=None, sharing=None, **query): <DeepExtract> if owner is None and app is None and (sharing is None): if self._state is not None and 'access' in self._state: (owner, app, sharing) = (self._state.access.owner, self._state.access.app, self._state.access.sharing) else: (owner, app, sharing) = (self.service.namespace['owner'], self.service.namespace['app'], self.service.namespace['sharing']) else: (owner, app, sharing) = (owner, app, sharing) </DeepExtract> return super(Entity, self).get(path_segment, owner=owner, app=app, sharing=sharing, **query)
def get(self, path_segment='', owner=None, app=None, sharing=None, **query): if owner is None and app is None and (sharing is None): if self._state is not None and 'access' in self._state: (owner, app, sharing) = (self._state.access.owner, self._state.access.app, self._state.access.sharing) else: (owner, app, sharing) = (self.service.namespace['owner'], self.service.namespace['app'], self.service.namespace['sharing']) else: (owner, app, sharing) = (owner, app, sharing) return super(Entity, self).get(path_segment, owner=owner, app=app, sharing=sharing, **query)
CobaltSplunk
positive
def test_osbs_builder_with_fetch_artifacts_url_file_creation_5(tmpdir, mocker, caplog): """ Checks whether the fetch-artifacts-url.yaml file is generated with URL artifact with sha256 checksum. """ caplog.set_level(logging.DEBUG, logger='cekit') mocker.patch('cekit.tools.decision', return_value=True) mocker.patch('cekit.tools.urlopen') mocker.patch('cekit.builders.osbs.DistGit.push') tmpdir.mkdir('osbs').mkdir('repo') res = mocker.Mock() res.getcode.return_value = 200 res.read.side_effect = [b'test', None] res.getheader.return_value = 0 mocker.patch('cekit.tools.urlopen', return_value=res) cfgcontents = '\n[common]\nfetch_artifact_domains = https://foo.domain, http://another.domain/path/name\nssl_verify = False\n ' cfgfile = os.path.join(str(tmpdir), 'config') with open(cfgfile, 'w') as _file: _file.write(cfgcontents) descriptor = copy.deepcopy(image_descriptor) descriptor['artifacts'] = [{'name': 'artifact_name', 'sha256': '123456', 'url': 'https://foo.domain/bar.jar'}, {'name': 'another_artifact_name', 'sha256': '654321', 'url': 'http://another.domain/path/name/bar.jar'}, {'name': 'not_allowed_in_fetch', 'sha256': '9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08', 'url': 'http://another.domain/wrong.jar'}] <DeepExtract> if build_command is None: build_command = ['build', 'osbs'] if general_command is None: general_command = ['--redhat'] if flag == OSBSTestFlags.NO_SKIP_COMMITTING: skip_committing = False else: skip_committing = True mocker.patch('cekit.builders.osbs.OSBSBuilder.dependencies', return_value={}) mocker.patch('cekit.builders.osbs.OSBSBuilder._wait_for_osbs_task') mocker.patch('cekit.builders.osbs.DistGit.prepare') side_affect = [subprocess.CompletedProcess('', 0, 'true'), subprocess.CompletedProcess('', 0, '/home/repos/path'), subprocess.CompletedProcess('', 0, 'branch'), subprocess.CompletedProcess('', 0, '3b9283cb26b35511517ff5c0c3e11f490cba8feb'), subprocess.CompletedProcess('ls-files', 0, '')] if flag == OSBSTestFlags.RM_FETCH_FILE: side_affect.append(subprocess.CompletedProcess('rm', 0, '')) side_affect.append(subprocess.CompletedProcess('add', 0, '')) if flag == OSBSTestFlags.MULTI_ADD: side_affect.append(subprocess.CompletedProcess('add', 0, '')) side_affect.extend([subprocess.CompletedProcess('diff-index', int(skip_committing), ''), subprocess.CompletedProcess('commit', 0, ''), subprocess.CompletedProcess('', 0, ''), subprocess.CompletedProcess('diff-files', 0, '')]) if flag == OSBSTestFlags.TRIGGER_GIT_FAILURE: side_affect.append(subprocess.CalledProcessError(1, 'git', output='A GIT ERROR')) else: side_affect.append(subprocess.CompletedProcess('push', 0, '')) side_affect.extend([subprocess.CompletedProcess('', 0, ''), subprocess.CompletedProcess('', 0, ''), subprocess.CompletedProcess('', 0, 'ssh://someuser@somehost.com/containers/somerepo'), subprocess.CompletedProcess('', 0, '3b9283cb26b35511517ff5c0c3e11f490cba8feb'), subprocess.CompletedProcess('', 0, '1234'), subprocess.CompletedProcess('', 0, 'UUU'), subprocess.CompletedProcess('', 0, '')]) patched_run = mocker.patch.object(subprocess, 'run', side_effect=side_affect) with open(os.path.join(str(tmpdir), 'image.yaml'), 'w') as fd: yaml.dump(descriptor, fd, default_flow_style=False) run_cekit(str(tmpdir), general_command + ['-v', '--work-dir', str(tmpdir), '--config', 'config'] + build_command, return_code=return_code) if flag == OSBSTestFlags.RM_FETCH_FILE: with contextlib.suppress(FileNotFoundError): os.remove(os.path.join(str(tmpdir), 'osbs', 'repo', 'fetch-artifacts-url.yaml')) with contextlib.suppress(FileNotFoundError): os.remove(os.path.join(str(tmpdir), 'osbs', 'repo', 'fetch-artifacts-pnc.yaml')) return patched_run </DeepExtract> with open(os.path.join(str(tmpdir), 'target', 'image', 'fetch-artifacts-url.yaml'), 'r') as _file: fetch_artifacts = yaml.safe_load(_file) assert len(fetch_artifacts) == 2 assert fetch_artifacts[0] == {'sha256': '123456', 'target': 'artifact_name', 'url': 'https://foo.domain/bar.jar'} assert fetch_artifacts[1] == {'sha256': '654321', 'target': 'another_artifact_name', 'url': 'http://another.domain/path/name/bar.jar'} assert "Ignoring http://another.domain/wrong.jar as restricted to ['https://foo.domain', 'http://another.domain/path/name']" in caplog.text assert "Executing 'rhpkg new-sources not_allowed_in_fetch'" in caplog.text assert "Artifact 'artifact_name' (as URL) added to fetch-artifacts-url.yaml" in caplog.text
def test_osbs_builder_with_fetch_artifacts_url_file_creation_5(tmpdir, mocker, caplog): """ Checks whether the fetch-artifacts-url.yaml file is generated with URL artifact with sha256 checksum. """ caplog.set_level(logging.DEBUG, logger='cekit') mocker.patch('cekit.tools.decision', return_value=True) mocker.patch('cekit.tools.urlopen') mocker.patch('cekit.builders.osbs.DistGit.push') tmpdir.mkdir('osbs').mkdir('repo') res = mocker.Mock() res.getcode.return_value = 200 res.read.side_effect = [b'test', None] res.getheader.return_value = 0 mocker.patch('cekit.tools.urlopen', return_value=res) cfgcontents = '\n[common]\nfetch_artifact_domains = https://foo.domain, http://another.domain/path/name\nssl_verify = False\n ' cfgfile = os.path.join(str(tmpdir), 'config') with open(cfgfile, 'w') as _file: _file.write(cfgcontents) descriptor = copy.deepcopy(image_descriptor) descriptor['artifacts'] = [{'name': 'artifact_name', 'sha256': '123456', 'url': 'https://foo.domain/bar.jar'}, {'name': 'another_artifact_name', 'sha256': '654321', 'url': 'http://another.domain/path/name/bar.jar'}, {'name': 'not_allowed_in_fetch', 'sha256': '9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08', 'url': 'http://another.domain/wrong.jar'}] if build_command is None: build_command = ['build', 'osbs'] if general_command is None: general_command = ['--redhat'] if flag == OSBSTestFlags.NO_SKIP_COMMITTING: skip_committing = False else: skip_committing = True mocker.patch('cekit.builders.osbs.OSBSBuilder.dependencies', return_value={}) mocker.patch('cekit.builders.osbs.OSBSBuilder._wait_for_osbs_task') mocker.patch('cekit.builders.osbs.DistGit.prepare') side_affect = [subprocess.CompletedProcess('', 0, 'true'), subprocess.CompletedProcess('', 0, '/home/repos/path'), subprocess.CompletedProcess('', 0, 'branch'), subprocess.CompletedProcess('', 0, '3b9283cb26b35511517ff5c0c3e11f490cba8feb'), subprocess.CompletedProcess('ls-files', 0, '')] if flag == OSBSTestFlags.RM_FETCH_FILE: side_affect.append(subprocess.CompletedProcess('rm', 0, '')) side_affect.append(subprocess.CompletedProcess('add', 0, '')) if flag == OSBSTestFlags.MULTI_ADD: side_affect.append(subprocess.CompletedProcess('add', 0, '')) side_affect.extend([subprocess.CompletedProcess('diff-index', int(skip_committing), ''), subprocess.CompletedProcess('commit', 0, ''), subprocess.CompletedProcess('', 0, ''), subprocess.CompletedProcess('diff-files', 0, '')]) if flag == OSBSTestFlags.TRIGGER_GIT_FAILURE: side_affect.append(subprocess.CalledProcessError(1, 'git', output='A GIT ERROR')) else: side_affect.append(subprocess.CompletedProcess('push', 0, '')) side_affect.extend([subprocess.CompletedProcess('', 0, ''), subprocess.CompletedProcess('', 0, ''), subprocess.CompletedProcess('', 0, 'ssh://someuser@somehost.com/containers/somerepo'), subprocess.CompletedProcess('', 0, '3b9283cb26b35511517ff5c0c3e11f490cba8feb'), subprocess.CompletedProcess('', 0, '1234'), subprocess.CompletedProcess('', 0, 'UUU'), subprocess.CompletedProcess('', 0, '')]) patched_run = mocker.patch.object(subprocess, 'run', side_effect=side_affect) with open(os.path.join(str(tmpdir), 'image.yaml'), 'w') as fd: yaml.dump(descriptor, fd, default_flow_style=False) run_cekit(str(tmpdir), general_command + ['-v', '--work-dir', str(tmpdir), '--config', 'config'] + build_command, return_code=return_code) if flag == OSBSTestFlags.RM_FETCH_FILE: with contextlib.suppress(FileNotFoundError): os.remove(os.path.join(str(tmpdir), 'osbs', 'repo', 'fetch-artifacts-url.yaml')) with contextlib.suppress(FileNotFoundError): os.remove(os.path.join(str(tmpdir), 'osbs', 'repo', 'fetch-artifacts-pnc.yaml')) return patched_run with open(os.path.join(str(tmpdir), 'target', 'image', 'fetch-artifacts-url.yaml'), 'r') as _file: fetch_artifacts = yaml.safe_load(_file) assert len(fetch_artifacts) == 2 assert fetch_artifacts[0] == {'sha256': '123456', 'target': 'artifact_name', 'url': 'https://foo.domain/bar.jar'} assert fetch_artifacts[1] == {'sha256': '654321', 'target': 'another_artifact_name', 'url': 'http://another.domain/path/name/bar.jar'} assert "Ignoring http://another.domain/wrong.jar as restricted to ['https://foo.domain', 'http://another.domain/path/name']" in caplog.text assert "Executing 'rhpkg new-sources not_allowed_in_fetch'" in caplog.text assert "Artifact 'artifact_name' (as URL) added to fetch-artifacts-url.yaml" in caplog.text
cekit
positive
def add_simple_email_headers(message: Message, locale: str='en_US', use_short_email: bool=False) -> typing.Tuple[typing.Tuple[str, str], typing.Tuple[str, str]]: """ Adds the key email headers to a Mime part :param message: the Mime part to add headers to :param locale: change this to generate locale specific "real names" and subject :param use_short_email: produces a "To" or "From" that is only the email address if True """ <DeepExtract> fake = faker.Faker(locale=locale) first_name = fake.first_name() last_name = last_name_override or fake.last_name() real_name = None if use_short_email else real_name_format.format(first_name=first_name, last_name=last_name) email_address = '{}.{}@{}'.format(first_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), last_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), get_random_string(5) + fake.domain_name()) to_meta = (email.utils.formataddr((real_name, email_address)), email_address, first_name, last_name) </DeepExtract> <DeepExtract> fake = faker.Faker(locale=locale) first_name = fake.first_name() last_name = last_name_override or fake.last_name() real_name = None if use_short_email else real_name_format.format(first_name=first_name, last_name=last_name) email_address = '{}.{}@{}'.format(first_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), last_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), get_random_string(5) + fake.domain_name()) from_meta = (email.utils.formataddr((real_name, email_address)), email_address, first_name, last_name) </DeepExtract> <DeepExtract> string = factory.Faker('sentence').evaluate({}, None, {'locale': locale}) while len(string) < min_length: string += factory.Faker('sentence').evaluate({}, None, {'locale': locale}) message['Subject'] = string </DeepExtract> message['From'] = from_meta[0] message['To'] = to_meta[0] return (from_meta, to_meta)
def add_simple_email_headers(message: Message, locale: str='en_US', use_short_email: bool=False) -> typing.Tuple[typing.Tuple[str, str], typing.Tuple[str, str]]: """ Adds the key email headers to a Mime part :param message: the Mime part to add headers to :param locale: change this to generate locale specific "real names" and subject :param use_short_email: produces a "To" or "From" that is only the email address if True """ fake = faker.Faker(locale=locale) first_name = fake.first_name() last_name = last_name_override or fake.last_name() real_name = None if use_short_email else real_name_format.format(first_name=first_name, last_name=last_name) email_address = '{}.{}@{}'.format(first_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), last_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), get_random_string(5) + fake.domain_name()) to_meta = (email.utils.formataddr((real_name, email_address)), email_address, first_name, last_name) fake = faker.Faker(locale=locale) first_name = fake.first_name() last_name = last_name_override or fake.last_name() real_name = None if use_short_email else real_name_format.format(first_name=first_name, last_name=last_name) email_address = '{}.{}@{}'.format(first_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), last_name.replace(' ', '').encode('ascii', 'ignore').lower().decode(), get_random_string(5) + fake.domain_name()) from_meta = (email.utils.formataddr((real_name, email_address)), email_address, first_name, last_name) string = factory.Faker('sentence').evaluate({}, None, {'locale': locale}) while len(string) < min_length: string += factory.Faker('sentence').evaluate({}, None, {'locale': locale}) message['Subject'] = string message['From'] = from_meta[0] message['To'] = to_meta[0] return (from_meta, to_meta)
django-helpdesk
positive
def execute(self, context): try: keywords = self.as_keywords(ignore=('filter_glob',)) <DeepExtract> if not targets: targets = context.selected_objects if not targets: raise Fatal('No object selected') vgmap = json.load(open(filepath, 'r')) if reverse: vgmap = {int(v): int(k) for (k, v) in vgmap.items()} else: vgmap = {k: int(v) for (k, v) in vgmap.items()} for obj in targets: if commit: raise Fatal('commit not yet implemented') prop_name = '3DMigoto:VGMap:' + suffix obj[prop_name] = keys_to_strings(vgmap) if rename: for (k, v) in vgmap.items(): if str(k) in obj.vertex_groups.keys(): continue if str(v) in obj.vertex_groups.keys(): obj.vertex_groups[str(v)].name = k else: obj.vertex_groups.new(name=str(k)) if cleanup: for vg in obj.vertex_groups: if vg.name not in vgmap: obj.vertex_groups.remove(vg) if '3DMigoto:VBLayout' not in obj: self.report({'WARNING'}, '%s is not a 3DMigoto mesh. Vertex Group Map custom property applied anyway' % obj.name) else: self.report({'INFO'}, 'Applied vgmap to %s' % obj.name) </DeepExtract> except Fatal as e: self.report({'ERROR'}, str(e)) return {'FINISHED'}
def execute(self, context): try: keywords = self.as_keywords(ignore=('filter_glob',)) if not targets: targets = context.selected_objects if not targets: raise Fatal('No object selected') vgmap = json.load(open(filepath, 'r')) if reverse: vgmap = {int(v): int(k) for (k, v) in vgmap.items()} else: vgmap = {k: int(v) for (k, v) in vgmap.items()} for obj in targets: if commit: raise Fatal('commit not yet implemented') prop_name = '3DMigoto:VGMap:' + suffix obj[prop_name] = keys_to_strings(vgmap) if rename: for (k, v) in vgmap.items(): if str(k) in obj.vertex_groups.keys(): continue if str(v) in obj.vertex_groups.keys(): obj.vertex_groups[str(v)].name = k else: obj.vertex_groups.new(name=str(k)) if cleanup: for vg in obj.vertex_groups: if vg.name not in vgmap: obj.vertex_groups.remove(vg) if '3DMigoto:VBLayout' not in obj: self.report({'WARNING'}, '%s is not a 3DMigoto mesh. Vertex Group Map custom property applied anyway' % obj.name) else: self.report({'INFO'}, 'Applied vgmap to %s' % obj.name) except Fatal as e: self.report({'ERROR'}, str(e)) return {'FINISHED'}
3d-fixes
positive
def add_keypoint_rcnn_blobs(blobs, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx, fg_thresh): """Add Mask R-CNN keypoint specific blobs to the given blobs dictionary.""" gt_inds = np.where(roidb['gt_classes'] > 0)[0] max_overlaps = roidb['max_overlaps'] gt_keypoints = roidb['gt_keypoints'] ind_kp = gt_inds[roidb['box_to_gt_ind_map']] <DeepExtract> x_within = np.logical_and(gt_keypoints[ind_kp, :, :][:, 0, :] >= np.expand_dims(roidb['boxes'][:, 0], axis=1), gt_keypoints[ind_kp, :, :][:, 0, :] <= np.expand_dims(roidb['boxes'][:, 2], axis=1)) y_within = np.logical_and(gt_keypoints[ind_kp, :, :][:, 1, :] >= np.expand_dims(roidb['boxes'][:, 1], axis=1), gt_keypoints[ind_kp, :, :][:, 1, :] <= np.expand_dims(roidb['boxes'][:, 3], axis=1)) within_box = np.logical_and(x_within, y_within) </DeepExtract> vis_kp = gt_keypoints[ind_kp, 2, :] > 0 is_visible = np.sum(np.logical_and(vis_kp, within_box), axis=1) > 0 kp_fg_inds = np.where(np.logical_and(max_overlaps >= fg_thresh, is_visible))[0] kp_fg_rois_per_this_image = np.minimum(fg_rois_per_image, kp_fg_inds.size) if kp_fg_inds.size > kp_fg_rois_per_this_image: kp_fg_inds = np.random.choice(kp_fg_inds, size=kp_fg_rois_per_this_image, replace=False) sampled_fg_rois = roidb['boxes'][kp_fg_inds] box_to_gt_ind_map = roidb['box_to_gt_ind_map'][kp_fg_inds] num_keypoints = gt_keypoints.shape[2] sampled_keypoints = -np.ones((len(sampled_fg_rois), gt_keypoints.shape[1], num_keypoints), dtype=gt_keypoints.dtype) for ii in range(len(sampled_fg_rois)): ind = box_to_gt_ind_map[ii] if ind >= 0: sampled_keypoints[ii, :, :] = gt_keypoints[gt_inds[ind], :, :] assert np.sum(sampled_keypoints[ii, 2, :]) > 0 (heats, weights) = keypoint_utils.keypoints_to_heatmap_labels(sampled_keypoints, sampled_fg_rois) shape = (sampled_fg_rois.shape[0] * cfg.KRCNN.NUM_KEYPOINTS, 1) heats = heats.reshape(shape) weights = weights.reshape(shape) sampled_fg_rois *= im_scale repeated_batch_idx = batch_idx * blob_utils.ones((sampled_fg_rois.shape[0], 1)) sampled_fg_rois = np.hstack((repeated_batch_idx, sampled_fg_rois)) blobs['keypoint_rois'] = sampled_fg_rois blobs['keypoint_locations_int32'] = heats.astype(np.int32, copy=False) blobs['keypoint_weights'] = weights
def add_keypoint_rcnn_blobs(blobs, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx, fg_thresh): """Add Mask R-CNN keypoint specific blobs to the given blobs dictionary.""" gt_inds = np.where(roidb['gt_classes'] > 0)[0] max_overlaps = roidb['max_overlaps'] gt_keypoints = roidb['gt_keypoints'] ind_kp = gt_inds[roidb['box_to_gt_ind_map']] x_within = np.logical_and(gt_keypoints[ind_kp, :, :][:, 0, :] >= np.expand_dims(roidb['boxes'][:, 0], axis=1), gt_keypoints[ind_kp, :, :][:, 0, :] <= np.expand_dims(roidb['boxes'][:, 2], axis=1)) y_within = np.logical_and(gt_keypoints[ind_kp, :, :][:, 1, :] >= np.expand_dims(roidb['boxes'][:, 1], axis=1), gt_keypoints[ind_kp, :, :][:, 1, :] <= np.expand_dims(roidb['boxes'][:, 3], axis=1)) within_box = np.logical_and(x_within, y_within) vis_kp = gt_keypoints[ind_kp, 2, :] > 0 is_visible = np.sum(np.logical_and(vis_kp, within_box), axis=1) > 0 kp_fg_inds = np.where(np.logical_and(max_overlaps >= fg_thresh, is_visible))[0] kp_fg_rois_per_this_image = np.minimum(fg_rois_per_image, kp_fg_inds.size) if kp_fg_inds.size > kp_fg_rois_per_this_image: kp_fg_inds = np.random.choice(kp_fg_inds, size=kp_fg_rois_per_this_image, replace=False) sampled_fg_rois = roidb['boxes'][kp_fg_inds] box_to_gt_ind_map = roidb['box_to_gt_ind_map'][kp_fg_inds] num_keypoints = gt_keypoints.shape[2] sampled_keypoints = -np.ones((len(sampled_fg_rois), gt_keypoints.shape[1], num_keypoints), dtype=gt_keypoints.dtype) for ii in range(len(sampled_fg_rois)): ind = box_to_gt_ind_map[ii] if ind >= 0: sampled_keypoints[ii, :, :] = gt_keypoints[gt_inds[ind], :, :] assert np.sum(sampled_keypoints[ii, 2, :]) > 0 (heats, weights) = keypoint_utils.keypoints_to_heatmap_labels(sampled_keypoints, sampled_fg_rois) shape = (sampled_fg_rois.shape[0] * cfg.KRCNN.NUM_KEYPOINTS, 1) heats = heats.reshape(shape) weights = weights.reshape(shape) sampled_fg_rois *= im_scale repeated_batch_idx = batch_idx * blob_utils.ones((sampled_fg_rois.shape[0], 1)) sampled_fg_rois = np.hstack((repeated_batch_idx, sampled_fg_rois)) blobs['keypoint_rois'] = sampled_fg_rois blobs['keypoint_locations_int32'] = heats.astype(np.int32, copy=False) blobs['keypoint_weights'] = weights
CBNet
positive
def put_in_currentdata(key, value): """Adds a value in the current run data""" if key: <DeepExtract> runctx = framework_currentdata() curr_data = json_from_file(runctx) if not curr_data: curr_data = {} curr_data = curr_data </DeepExtract> if key in curr_data: val = curr_data[key] if isinstance(val, list): val.append(value) else: curr_data[key] = value else: curr_data[key] = value <DeepExtract> if not curr_data: curr_data = {} runctx = framework_currentdata() save_json_to_file(curr_data, runctx) </DeepExtract>
def put_in_currentdata(key, value): """Adds a value in the current run data""" if key: runctx = framework_currentdata() curr_data = json_from_file(runctx) if not curr_data: curr_data = {} curr_data = curr_data if key in curr_data: val = curr_data[key] if isinstance(val, list): val.append(value) else: curr_data[key] = value else: curr_data[key] = value if not curr_data: curr_data = {} runctx = framework_currentdata() save_json_to_file(curr_data, runctx) </DeepExtract>
cloud-validation-framework
positive
def __init__(self, num_layers, heads, head_convs, num_stacks=1, opt=None): super(GenericNetwork, self).__init__() print('Using generic model with backbone {} and neck {}'.format(opt.backbone, opt.neck)) if opt is not None and opt.head_kernel != 3: print('Using head kernel:', opt.head_kernel) head_kernel = opt.head_kernel else: head_kernel = 3 self.opt = opt self.backbone = backbone_factory[opt.backbone](opt=opt) channels = self.backbone.channels self.neck = neck_factory[opt.neck](opt=opt, channels=channels) last_channel = self.neck.out_channel self.num_stacks = num_stacks self.heads = heads for head in self.heads: classes = self.heads[head] head_conv = head_convs[head] if len(head_conv) > 0: out = nn.Conv2d(head_conv[-1], classes, kernel_size=1, stride=1, padding=0, bias=True) conv = nn.Conv2d(last_channel, head_conv[0], kernel_size=head_kernel, padding=head_kernel // 2, bias=True) convs = [conv] for k in range(1, len(head_conv)): convs.append(nn.Conv2d(head_conv[k - 1], head_conv[k], kernel_size=1, bias=True)) if len(convs) == 1: fc = nn.Sequential(conv, nn.ReLU(inplace=True), out) elif len(convs) == 2: fc = nn.Sequential(convs[0], nn.ReLU(inplace=True), convs[1], nn.ReLU(inplace=True), out) elif len(convs) == 3: fc = nn.Sequential(convs[0], nn.ReLU(inplace=True), convs[1], nn.ReLU(inplace=True), convs[2], nn.ReLU(inplace=True), out) elif len(convs) == 4: fc = nn.Sequential(convs[0], nn.ReLU(inplace=True), convs[1], nn.ReLU(inplace=True), convs[2], nn.ReLU(inplace=True), convs[3], nn.ReLU(inplace=True), out) if 'hm' in head: fc[-1].bias.data.fill_(opt.prior_bias) else: <DeepExtract> for m in fc.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: nn.init.constant_(m.bias, 0) </DeepExtract> else: fc = nn.Conv2d(last_channel, classes, kernel_size=1, stride=1, padding=0, bias=True) if 'hm' in head: fc.bias.data.fill_(opt.prior_bias) else: <DeepExtract> for m in fc.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: nn.init.constant_(m.bias, 0) </DeepExtract> self.__setattr__(head, fc)
def __init__(self, num_layers, heads, head_convs, num_stacks=1, opt=None): super(GenericNetwork, self).__init__() print('Using generic model with backbone {} and neck {}'.format(opt.backbone, opt.neck)) if opt is not None and opt.head_kernel != 3: print('Using head kernel:', opt.head_kernel) head_kernel = opt.head_kernel else: head_kernel = 3 self.opt = opt self.backbone = backbone_factory[opt.backbone](opt=opt) channels = self.backbone.channels self.neck = neck_factory[opt.neck](opt=opt, channels=channels) last_channel = self.neck.out_channel self.num_stacks = num_stacks self.heads = heads for head in self.heads: classes = self.heads[head] head_conv = head_convs[head] if len(head_conv) > 0: out = nn.Conv2d(head_conv[-1], classes, kernel_size=1, stride=1, padding=0, bias=True) conv = nn.Conv2d(last_channel, head_conv[0], kernel_size=head_kernel, padding=head_kernel // 2, bias=True) convs = [conv] for k in range(1, len(head_conv)): convs.append(nn.Conv2d(head_conv[k - 1], head_conv[k], kernel_size=1, bias=True)) if len(convs) == 1: fc = nn.Sequential(conv, nn.ReLU(inplace=True), out) elif len(convs) == 2: fc = nn.Sequential(convs[0], nn.ReLU(inplace=True), convs[1], nn.ReLU(inplace=True), out) elif len(convs) == 3: fc = nn.Sequential(convs[0], nn.ReLU(inplace=True), convs[1], nn.ReLU(inplace=True), convs[2], nn.ReLU(inplace=True), out) elif len(convs) == 4: fc = nn.Sequential(convs[0], nn.ReLU(inplace=True), convs[1], nn.ReLU(inplace=True), convs[2], nn.ReLU(inplace=True), convs[3], nn.ReLU(inplace=True), out) if 'hm' in head: fc[-1].bias.data.fill_(opt.prior_bias) else: for m in fc.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: nn.init.constant_(m.bias, 0) else: fc = nn.Conv2d(last_channel, classes, kernel_size=1, stride=1, padding=0, bias=True) if 'hm' in head: fc.bias.data.fill_(opt.prior_bias) else: for m in fc.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: nn.init.constant_(m.bias, 0) self.__setattr__(head, fc)
CenterFusion
positive
def __call__(self, *args, **kwargs): try: <DeepExtract> test_cases = [] for (name, data) in self.tests_settings().items(): data_accessor = Roamer(data) env = data_accessor.env() or [] if isinstance(env, dict): env = ['%s=%s' % (k, v) for (k, v) in env.items()] test = ExecutionTest(appdir=self.app_dir, name=name, image=data_accessor.image(), command=data_accessor.command(), before_command=data_accessor.before_command(), env=env) test_cases.append(test) test_cases = test_cases </DeepExtract> except DockerException as e: logging.error('Docker error : ' + str(e)) logging.error('(Is docker installed/started ?)') logging.error('Tests will be skipped') return try: for test in test_cases: test.run() except TestFailed as err: logging.error('test failed') logging.error(err) exit(1)
def __call__(self, *args, **kwargs): try: test_cases = [] for (name, data) in self.tests_settings().items(): data_accessor = Roamer(data) env = data_accessor.env() or [] if isinstance(env, dict): env = ['%s=%s' % (k, v) for (k, v) in env.items()] test = ExecutionTest(appdir=self.app_dir, name=name, image=data_accessor.image(), command=data_accessor.command(), before_command=data_accessor.before_command(), env=env) test_cases.append(test) test_cases = test_cases except DockerException as e: logging.error('Docker error : ' + str(e)) logging.error('(Is docker installed/started ?)') logging.error('Tests will be skipped') return try: for test in test_cases: test.run() except TestFailed as err: logging.error('test failed') logging.error(err) exit(1)
appimage-builder
positive
def get_all_response_headers(self): <DeepExtract> outputs = [] while True: try: outputs.append(self.get_output()) except asyncio.TimeoutError: break outputs = outputs </DeepExtract> response_start = next((o for o in outputs if o['type'] == 'http.response.start')) return response_start['headers']
def get_all_response_headers(self): outputs = [] while True: try: outputs.append(self.get_output()) except asyncio.TimeoutError: break outputs = outputs response_start = next((o for o in outputs if o['type'] == 'http.response.start')) return response_start['headers']
client_python
positive
def test_v_t_raises_CoordinateError(cartesian_differential, spherical_differential, bl_differential): M = 1e+24 * u.kg a = 0.0 * u.one ms = Schwarzschild(coords=spherical_differential, M=M) mk = Kerr(coords=bl_differential, M=M, a=a) (cd, sd, bd) = (cartesian_differential, spherical_differential, bl_differential) def cd_s(cd, ms): return cd.velocity(metric=ms) def bd_s(bd, ms): return bd.velocity(metric=ms) def cd_k(cd, mk): return cd.velocity(metric=mk) def sd_k(sd, mk): return sd.velocity(metric=mk) with pytest.raises(CoordinateError): <DeepExtract> return cd.velocity(metric=ms) </DeepExtract> with pytest.raises(CoordinateError): <DeepExtract> return bd.velocity(metric=ms) </DeepExtract> with pytest.raises(CoordinateError): <DeepExtract> return cd.velocity(metric=mk) </DeepExtract> with pytest.raises(CoordinateError): <DeepExtract> return sd.velocity(metric=mk) </DeepExtract>
def test_v_t_raises_CoordinateError(cartesian_differential, spherical_differential, bl_differential): M = 1e+24 * u.kg a = 0.0 * u.one ms = Schwarzschild(coords=spherical_differential, M=M) mk = Kerr(coords=bl_differential, M=M, a=a) (cd, sd, bd) = (cartesian_differential, spherical_differential, bl_differential) def cd_s(cd, ms): return cd.velocity(metric=ms) def bd_s(bd, ms): return bd.velocity(metric=ms) def cd_k(cd, mk): return cd.velocity(metric=mk) def sd_k(sd, mk): return sd.velocity(metric=mk) with pytest.raises(CoordinateError): return cd.velocity(metric=ms) with pytest.raises(CoordinateError): return bd.velocity(metric=ms) with pytest.raises(CoordinateError): return cd.velocity(metric=mk) with pytest.raises(CoordinateError): return sd.velocity(metric=mk) </DeepExtract>
einsteinpy
positive
def test_perspective_queue_get_active_menu(display): <DeepExtract> display._active_perspective = 2 perspective = display.perspectives[2] </DeepExtract> perspective._active_window = 0 assert perspective._get_active_menu() == perspective._queue_menu
def test_perspective_queue_get_active_menu(display): display._active_perspective = 2 perspective = display.perspectives[2] perspective._active_window = 0 assert perspective._get_active_menu() == perspective._queue_menu
castero
positive
def fit(self, X, y, y_error=1): kwds = {} if self.kwds is not None: kwds.update(self.kwds) kwds['fit_intercept'] = False <DeepExtract> model = self._regressors.get(self.regularization.lower(), None) if model is None: raise ValueError("regularization='{}' unrecognized".format(self.regularization)) model = model </DeepExtract> self.clf_ = model(**kwds) <DeepExtract> X = np.asarray(X) if self.fit_intercept: X = np.hstack([np.ones([X.shape[0], 1]), X]) X = X </DeepExtract> <DeepExtract> X = np.atleast_2d(X) y = np.asarray(y) y_error = np.asarray(y_error) assert X.ndim == 2 assert y.ndim == 1 assert X.shape[0] == y.shape[0] if y_error.ndim == 0: (X, y) = (X / y_error, y / y_error) elif y_error.ndim == 1: assert y_error.shape == y.shape (X_out, y_out) = (X / y_error[:, None], y / y_error) elif y_error.ndim == 2: assert y_error.shape == (y.size, y.size) (evals, evecs) = np.linalg.eigh(y_error) X_out = np.dot(evecs * evals ** (-0.5), np.dot(evecs.T, X)) y_out = np.dot(evecs * evals ** (-0.5), np.dot(evecs.T, y)) else: raise ValueError('shape of y_error does not match that of y') (X, y) = (X_out, y_out) </DeepExtract> self.clf_.fit(X, y) return self
def fit(self, X, y, y_error=1): kwds = {} if self.kwds is not None: kwds.update(self.kwds) kwds['fit_intercept'] = False model = self._regressors.get(self.regularization.lower(), None) if model is None: raise ValueError("regularization='{}' unrecognized".format(self.regularization)) model = model self.clf_ = model(**kwds) X = np.asarray(X) if self.fit_intercept: X = np.hstack([np.ones([X.shape[0], 1]), X]) X = X X = np.atleast_2d(X) y = np.asarray(y) y_error = np.asarray(y_error) assert X.ndim == 2 assert y.ndim == 1 assert X.shape[0] == y.shape[0] if y_error.ndim == 0: (X, y) = (X / y_error, y / y_error) elif y_error.ndim == 1: assert y_error.shape == y.shape (X_out, y_out) = (X / y_error[:, None], y / y_error) elif y_error.ndim == 2: assert y_error.shape == (y.size, y.size) (evals, evecs) = np.linalg.eigh(y_error) X_out = np.dot(evecs * evals ** (-0.5), np.dot(evecs.T, X)) y_out = np.dot(evecs * evals ** (-0.5), np.dot(evecs.T, y)) else: raise ValueError('shape of y_error does not match that of y') (X, y) = (X_out, y_out) self.clf_.fit(X, y) return self
astroML
positive
def create_socket(conf): unsupported = ['ListenUSBFunction', 'ListenMessageQueue', 'ListenNetlink'] unsupported += ['ListenSpecial', 'ListenFIFO', 'ListenSequentialPacket'] for item in unsupported: if conf.get(Socket, item, ''): logg.warning('%s: %s sockets are not implemented', conf.name(), item) self.error |= NOT_OK return None vListenDatagram = conf.get(Socket, 'ListenDatagram', '') vListenStream = conf.get(Socket, 'ListenStream', '') address = vListenStream or vListenDatagram m = re.match('(/.*)', address) if m: path = m.group(1) <DeepExtract> sock_stream = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_UNIX, sock_stream) try: dirmode = conf.get(Socket, 'DirectoryMode', '0755') mode = conf.get(Socket, 'SocketMode', '0666') user = conf.get(Socket, 'SocketUser', '') group = conf.get(Socket, 'SocketGroup', '') symlinks = conf.getlist(Socket, 'SymLinks', []) dirpath = os.path.dirname(path) if not os.path.isdir(dirpath): os.makedirs(dirpath, int(dirmode, 8)) if os.path.exists(path): os.unlink(path) sock.bind(path) os.fchmod(sock.fileno(), int(mode, 8)) shutil_fchown(sock.fileno(), user, group) if symlinks: logg.warning('%s: symlinks for socket not implemented (%s)', conf.name(), path) except Exception as e: logg.error('%s: create socket failed [%s]: %s', conf.name(), path, e) sock.close() sock = None sock = sock </DeepExtract> <DeepExtract> if conf.status is None: conf.status = self.read_status_from(conf) if path is None: try: del conf.status['path'] except KeyError: pass else: conf.status['path'] = path </DeepExtract> return sock m = re.match('(\\d+[.]\\d*[.]\\d*[.]\\d+):(\\d+)', address) if m: (addr, port) = (m.group(1), m.group(2)) <DeepExtract> inet = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_INET, inet) try: sock.bind((addr, int(port))) logg.info('%s: bound socket at %s %s:%s', conf.name(), strINET(inet), addr, port) except Exception as e: logg.error('%s: create socket failed (%s:%s): %s', conf.name(), addr, port, e) sock.close() sock = None sock = sock </DeepExtract> <DeepExtract> if conf.status is None: conf.status = self.read_status_from(conf) if port is None: try: del conf.status['port'] except KeyError: pass else: conf.status['port'] = port </DeepExtract> <DeepExtract> if conf.status is None: conf.status = self.read_status_from(conf) if addr is None: try: del conf.status['addr'] except KeyError: pass else: conf.status['addr'] = addr </DeepExtract> return sock m = re.match('\\[([0-9a-fA-F:]*)\\]:(\\d+)', address) if m: (addr, port) = (m.group(1), m.group(2)) <DeepExtract> inet = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_INET6, inet) try: sock.bind((addr, int(port))) logg.info('%s: bound socket at %s [%s]:%s', conf.name(), strINET(inet), addr, port) except Exception as e: logg.error('%s: create socket failed ([%s]:%s): %s', conf.name(), addr, port, e) sock.close() sock = None sock = sock </DeepExtract> <DeepExtract> if conf.status is None: conf.status = self.read_status_from(conf) if port is None: try: del conf.status['port'] except KeyError: pass else: conf.status['port'] = port </DeepExtract> <DeepExtract> if conf.status is None: conf.status = self.read_status_from(conf) if addr is None: try: del conf.status['addr'] except KeyError: pass else: conf.status['addr'] = addr </DeepExtract> return sock m = re.match('(\\d+)$', address) if m: port = m.group(1) <DeepExtract> inet = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_INET, inet) try: sock.bind(('', int(port))) logg.info('%s: bound socket at %s %s:%s', conf.name(), strINET(inet), '*', port) except Exception as e: logg.error('%s: create socket failed (%s:%s): %s', conf.name(), '*', port, e) sock.close() sock = None sock = sock </DeepExtract> <DeepExtract> if conf.status is None: conf.status = self.read_status_from(conf) if port is None: try: del conf.status['port'] except KeyError: pass else: conf.status['port'] = port </DeepExtract> return sock if re.match('@.*', address): logg.warning('%s: abstract namespace socket not implemented (%s)', conf.name(), address) return None if re.match('vsock:.*', address): logg.warning('%s: virtual machine socket not implemented (%s)', conf.name(), address) return None logg.error('%s: unknown socket address type (%s)', conf.name(), address) return None
def create_socket(conf): unsupported = ['ListenUSBFunction', 'ListenMessageQueue', 'ListenNetlink'] unsupported += ['ListenSpecial', 'ListenFIFO', 'ListenSequentialPacket'] for item in unsupported: if conf.get(Socket, item, ''): logg.warning('%s: %s sockets are not implemented', conf.name(), item) self.error |= NOT_OK return None vListenDatagram = conf.get(Socket, 'ListenDatagram', '') vListenStream = conf.get(Socket, 'ListenStream', '') address = vListenStream or vListenDatagram m = re.match('(/.*)', address) if m: path = m.group(1) sock_stream = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_UNIX, sock_stream) try: dirmode = conf.get(Socket, 'DirectoryMode', '0755') mode = conf.get(Socket, 'SocketMode', '0666') user = conf.get(Socket, 'SocketUser', '') group = conf.get(Socket, 'SocketGroup', '') symlinks = conf.getlist(Socket, 'SymLinks', []) dirpath = os.path.dirname(path) if not os.path.isdir(dirpath): os.makedirs(dirpath, int(dirmode, 8)) if os.path.exists(path): os.unlink(path) sock.bind(path) os.fchmod(sock.fileno(), int(mode, 8)) shutil_fchown(sock.fileno(), user, group) if symlinks: logg.warning('%s: symlinks for socket not implemented (%s)', conf.name(), path) except Exception as e: logg.error('%s: create socket failed [%s]: %s', conf.name(), path, e) sock.close() sock = None sock = sock if conf.status is None: conf.status = self.read_status_from(conf) if path is None: try: del conf.status['path'] except KeyError: pass else: conf.status['path'] = path return sock m = re.match('(\\d+[.]\\d*[.]\\d*[.]\\d+):(\\d+)', address) if m: (addr, port) = (m.group(1), m.group(2)) inet = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_INET, inet) try: sock.bind((addr, int(port))) logg.info('%s: bound socket at %s %s:%s', conf.name(), strINET(inet), addr, port) except Exception as e: logg.error('%s: create socket failed (%s:%s): %s', conf.name(), addr, port, e) sock.close() sock = None sock = sock if conf.status is None: conf.status = self.read_status_from(conf) if port is None: try: del conf.status['port'] except KeyError: pass else: conf.status['port'] = port if conf.status is None: conf.status = self.read_status_from(conf) if addr is None: try: del conf.status['addr'] except KeyError: pass else: conf.status['addr'] = addr return sock m = re.match('\\[([0-9a-fA-F:]*)\\]:(\\d+)', address) if m: (addr, port) = (m.group(1), m.group(2)) inet = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_INET6, inet) try: sock.bind((addr, int(port))) logg.info('%s: bound socket at %s [%s]:%s', conf.name(), strINET(inet), addr, port) except Exception as e: logg.error('%s: create socket failed ([%s]:%s): %s', conf.name(), addr, port, e) sock.close() sock = None sock = sock if conf.status is None: conf.status = self.read_status_from(conf) if port is None: try: del conf.status['port'] except KeyError: pass else: conf.status['port'] = port if conf.status is None: conf.status = self.read_status_from(conf) if addr is None: try: del conf.status['addr'] except KeyError: pass else: conf.status['addr'] = addr return sock m = re.match('(\\d+)$', address) if m: port = m.group(1) inet = not vListenStream and socket.SOCK_DGRAM or socket.SOCK_STREAM sock = socket.socket(socket.AF_INET, inet) try: sock.bind(('', int(port))) logg.info('%s: bound socket at %s %s:%s', conf.name(), strINET(inet), '*', port) except Exception as e: logg.error('%s: create socket failed (%s:%s): %s', conf.name(), '*', port, e) sock.close() sock = None sock = sock if conf.status is None: conf.status = self.read_status_from(conf) if port is None: try: del conf.status['port'] except KeyError: pass else: conf.status['port'] = port return sock if re.match('@.*', address): logg.warning('%s: abstract namespace socket not implemented (%s)', conf.name(), address) return None if re.match('vsock:.*', address): logg.warning('%s: virtual machine socket not implemented (%s)', conf.name(), address) return None logg.error('%s: unknown socket address type (%s)', conf.name(), address) return None
docker-systemctl-images
positive
def backward(self, retain_grad=False): if self.grad is None: self.grad = np.ones_like(self.data) funcs = [] seen_set = set() def add_func(f): if f not in seen_set: funcs.append(f) seen_set.add(f) funcs.sort(key=lambda x: x.generation) <DeepExtract> if self.creator not in seen_set: funcs.append(self.creator) seen_set.add(self.creator) funcs.sort(key=lambda x: x.generation) </DeepExtract> while funcs: f = funcs.pop() gys = [output().grad for output in f.outputs] gxs = f.backward(*gys) if not isinstance(gxs, tuple): gxs = (gxs,) for (x, gx) in zip(f.inputs, gxs): if x.grad is None: x.grad = gx else: x.grad = x.grad + gx if x.creator is not None: <DeepExtract> if x.creator not in seen_set: funcs.append(x.creator) seen_set.add(x.creator) funcs.sort(key=lambda x: x.generation) </DeepExtract> if not retain_grad: for y in f.outputs: y().grad = None
def backward(self, retain_grad=False): if self.grad is None: self.grad = np.ones_like(self.data) funcs = [] seen_set = set() def add_func(f): if f not in seen_set: funcs.append(f) seen_set.add(f) funcs.sort(key=lambda x: x.generation) if self.creator not in seen_set: funcs.append(self.creator) seen_set.add(self.creator) funcs.sort(key=lambda x: x.generation) while funcs: f = funcs.pop() gys = [output().grad for output in f.outputs] gxs = f.backward(*gys) if not isinstance(gxs, tuple): gxs = (gxs,) for (x, gx) in zip(f.inputs, gxs): if x.grad is None: x.grad = gx else: x.grad = x.grad + gx if x.creator is not None: if x.creator not in seen_set: funcs.append(x.creator) seen_set.add(x.creator) funcs.sort(key=lambda x: x.generation) if not retain_grad: for y in f.outputs: y().grad = None
deep-learning-from-scratch-3
positive
def main(file_paths): with open(file_paths[0]) as yaml_file: result = yaml.load(yaml_file, Loader=yaml.RoundTripLoader) for file_path in file_paths[1:]: with open(file_path) as yaml_file: <DeepExtract> for (k, v) in yaml.load(yaml_file, Loader=yaml.RoundTripLoader).items(): if isinstance(v, CommentedSeq): assert isinstance(result, CommentedSeq) result[k] = result.get(k, []) + v elif isinstance(v, CommentedMap): assert isinstance(result, CommentedMap) result[k] = dict_merge_recursive(result.get(k, CommentedMap()), v) elif k in result: raise NotImplementedError('I do not know how to merge key "{}": previous value "{}" - new value "{}"'.format(k, result[k], v)) else: result[k] = v return result </DeepExtract> yaml.dump(result, sys.stdout, Dumper=yaml.RoundTripDumper)
def main(file_paths): with open(file_paths[0]) as yaml_file: result = yaml.load(yaml_file, Loader=yaml.RoundTripLoader) for file_path in file_paths[1:]: with open(file_path) as yaml_file: for (k, v) in yaml.load(yaml_file, Loader=yaml.RoundTripLoader).items(): if isinstance(v, CommentedSeq): assert isinstance(result, CommentedSeq) result[k] = result.get(k, []) + v elif isinstance(v, CommentedMap): assert isinstance(result, CommentedMap) result[k] = dict_merge_recursive(result.get(k, CommentedMap()), v) elif k in result: raise NotImplementedError('I do not know how to merge key "{}": previous value "{}" - new value "{}"'.format(k, result[k], v)) else: result[k] = v return result yaml.dump(result, sys.stdout, Dumper=yaml.RoundTripDumper)
dotfiles_and_notes
positive
def operator_api(mesos_master_fqdn, mesos_secret_path): def execute_operator_api_request(**kwargs): <DeepExtract> def execute_request(method, endpoint, timeout=(3, 1), **kwargs): url = 'http://%s:%d%s' % (mesos_master_fqdn, MESOS_MASTER_PORT, endpoint) s = Session() s.auth = (get_principal(mesos_secret_path), get_secret(mesos_secret_path)) req = Request(method, url, **kwargs) prepared = s.prepare_request(req) try: resp = s.send(prepared, timeout=timeout) resp.raise_for_status() base_api_client = resp except HTTPError: raise HTTPError('Error executing API request calling %s.' % url) base_api_client = execute_request </DeepExtract> if 'headers' in kwargs: kwargs['headers']['Content-Type'] = 'application/json' else: kwargs['headers'] = {'Content-Type': 'application/json'} data = kwargs.pop('data') return base_api_client('POST', '/api/v1', data=json.dumps(data), **kwargs) return execute_operator_api_request
def operator_api(mesos_master_fqdn, mesos_secret_path): def execute_operator_api_request(**kwargs): def execute_request(method, endpoint, timeout=(3, 1), **kwargs): url = 'http://%s:%d%s' % (mesos_master_fqdn, MESOS_MASTER_PORT, endpoint) s = Session() s.auth = (get_principal(mesos_secret_path), get_secret(mesos_secret_path)) req = Request(method, url, **kwargs) prepared = s.prepare_request(req) try: resp = s.send(prepared, timeout=timeout) resp.raise_for_status() base_api_client = resp except HTTPError: raise HTTPError('Error executing API request calling %s.' % url) base_api_client = execute_request if 'headers' in kwargs: kwargs['headers']['Content-Type'] = 'application/json' else: kwargs['headers'] = {'Content-Type': 'application/json'} data = kwargs.pop('data') return base_api_client('POST', '/api/v1', data=json.dumps(data), **kwargs) return execute_operator_api_request
clusterman
positive
def receive_message(self, req, resp): ReceiveMessageValidator.validate(req) req_url = '/%s/%s/%s' % (URISEC_QUEUE, req.queue_name, URISEC_MESSAGE) if req.wait_seconds != -1: req_url += '?waitseconds=%s' % req.wait_seconds req_inter = RequestInternal(req.method, req_url) <DeepExtract> if req.request_id is not None: req_inter.header['x-mns-user-request-id'] = req.request_id if self.http.is_keep_alive(): req_inter.header['Connection'] = 'Keep-Alive' if req_inter.data != '': req_inter.header['content-md5'] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest().encode('utf-8')).decode('utf-8') req_inter.header['content-type'] = 'text/xml;charset=UTF-8' req_inter.header['x-mns-version'] = self.version req_inter.header['host'] = self.host req_inter.header['date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()) req_inter.header['user-agent'] = 'aliyun-sdk-python/%s(%s/%s;%s)' % (pkg_info.version, platform.system(), platform.release(), platform.python_version()) req_inter.header['Authorization'] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri) if self.security_token != '': req_inter.header['security-token'] = self.security_token </DeepExtract> resp_inter = self.http.send_request(req_inter) resp.status = resp_inter.status resp.header = resp_inter.header <DeepExtract> if resp_inter.status >= 200 and resp_inter.status < 400: resp.error_data = '' else: resp.error_data = resp_inter.data if resp_inter.status >= 400 and resp_inter.status <= 600: (excType, excMessage, reqId, hostId, subErr) = decoder.decodeError(resp.error_data, req_inter.get_req_id()) if reqId is None: reqId = resp.header['x-mns-request-id'] raise MNSServerException(excType, excMessage, reqId, hostId, subErr) else: raise MNSClientNetworkException('UnkownError', resp_inter.data, req_inter.get_req_id()) </DeepExtract> if resp.error_data == '': data = RecvMessageDecoder.decode(resp_inter.data, req.base64decode, req_inter.get_req_id()) <DeepExtract> resp.dequeue_count = int(data['DequeueCount']) resp.enqueue_time = int(data['EnqueueTime']) resp.first_dequeue_time = int(data['FirstDequeueTime']) resp.message_body = data['MessageBody'] resp.message_id = data['MessageId'] resp.message_body_md5 = data['MessageBodyMD5'] resp.next_visible_time = int(data['NextVisibleTime']) resp.receipt_handle = data['ReceiptHandle'] resp.priority = int(data['Priority']) </DeepExtract> if self.logger: self.logger.info('ReceiveMessage RequestId:%s QueueName:%s WaitSeconds:%s MessageId:%s MessageBodyMD5:%s NextVisibilityTime:%s ReceiptHandle:%s EnqueueTime:%s DequeueCount:%s' % (resp.get_requestid(), req.queue_name, req.wait_seconds, resp.message_id, resp.message_body_md5, resp.next_visible_time, resp.receipt_handle, resp.enqueue_time, resp.dequeue_count))
def receive_message(self, req, resp): ReceiveMessageValidator.validate(req) req_url = '/%s/%s/%s' % (URISEC_QUEUE, req.queue_name, URISEC_MESSAGE) if req.wait_seconds != -1: req_url += '?waitseconds=%s' % req.wait_seconds req_inter = RequestInternal(req.method, req_url) if req.request_id is not None: req_inter.header['x-mns-user-request-id'] = req.request_id if self.http.is_keep_alive(): req_inter.header['Connection'] = 'Keep-Alive' if req_inter.data != '': req_inter.header['content-md5'] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest().encode('utf-8')).decode('utf-8') req_inter.header['content-type'] = 'text/xml;charset=UTF-8' req_inter.header['x-mns-version'] = self.version req_inter.header['host'] = self.host req_inter.header['date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()) req_inter.header['user-agent'] = 'aliyun-sdk-python/%s(%s/%s;%s)' % (pkg_info.version, platform.system(), platform.release(), platform.python_version()) req_inter.header['Authorization'] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri) if self.security_token != '': req_inter.header['security-token'] = self.security_token resp_inter = self.http.send_request(req_inter) resp.status = resp_inter.status resp.header = resp_inter.header if resp_inter.status >= 200 and resp_inter.status < 400: resp.error_data = '' else: resp.error_data = resp_inter.data if resp_inter.status >= 400 and resp_inter.status <= 600: (excType, excMessage, reqId, hostId, subErr) = decoder.decodeError(resp.error_data, req_inter.get_req_id()) if reqId is None: reqId = resp.header['x-mns-request-id'] raise MNSServerException(excType, excMessage, reqId, hostId, subErr) else: raise MNSClientNetworkException('UnkownError', resp_inter.data, req_inter.get_req_id()) if resp.error_data == '': data = RecvMessageDecoder.decode(resp_inter.data, req.base64decode, req_inter.get_req_id()) resp.dequeue_count = int(data['DequeueCount']) resp.enqueue_time = int(data['EnqueueTime']) resp.first_dequeue_time = int(data['FirstDequeueTime']) resp.message_body = data['MessageBody'] resp.message_id = data['MessageId'] resp.message_body_md5 = data['MessageBodyMD5'] resp.next_visible_time = int(data['NextVisibleTime']) resp.receipt_handle = data['ReceiptHandle'] resp.priority = int(data['Priority']) if self.logger: self.logger.info('ReceiveMessage RequestId:%s QueueName:%s WaitSeconds:%s MessageId:%s MessageBodyMD5:%s NextVisibilityTime:%s ReceiptHandle:%s EnqueueTime:%s DequeueCount:%s' % (resp.get_requestid(), req.queue_name, req.wait_seconds, resp.message_id, resp.message_body_md5, resp.next_visible_time, resp.receipt_handle, resp.enqueue_time, resp.dequeue_count))
AutomationTest
positive
def __getitem__(self, i): <DeepExtract> if i < 0 or i >= self.size: raise IndexError('index out of range') </DeepExtract> return self.tokens_list[i]
def __getitem__(self, i): if i < 0 or i >= self.size: raise IndexError('index out of range') return self.tokens_list[i]
Abstractive-Text-Summarization
positive
def test_5_GETkey_errors_406(self): req = {'remote_user': 'test', 'query': {'type': 'complex'}, 'trail': ['test', 'key1']} rep = {'headers': {}} with self.assertRaises(HTTPError) as err: <DeepExtract> self.check_authz(req) self.secrets.GET(req, rep) </DeepExtract> self.assertEqual(err.exception.code, 406)
def test_5_GETkey_errors_406(self): req = {'remote_user': 'test', 'query': {'type': 'complex'}, 'trail': ['test', 'key1']} rep = {'headers': {}} with self.assertRaises(HTTPError) as err: self.check_authz(req) self.secrets.GET(req, rep) self.assertEqual(err.exception.code, 406)
custodia
positive
def get_xlsx_export(self, context): <DeepExtract> rows = context['results'] new_rows = [[self._format_value(o) for o in filter(lambda c: getattr(c, 'export', False), r.cells)] for r in rows] new_rows.insert(0, [force_text(c.text) for c in context['result_headers'].cells if c.export]) datas = new_rows </DeepExtract> output = io.BytesIO() export_header = self.request.GET.get('export_xlsx_header', 'off') == 'on' model_name = self.opts.verbose_name book = xlsxwriter.Workbook(output) sheet = book.add_worksheet(u'%s %s' % (_(u'Sheet'), force_text(model_name))) styles = {'datetime': book.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'}), 'date': book.add_format({'num_format': 'yyyy-mm-dd'}), 'time': book.add_format({'num_format': 'hh:mm:ss'}), 'header': book.add_format({'font': 'name Times New Roman', 'color': 'red', 'bold': 'on', 'num_format': '#,##0.00'}), 'default': book.add_format()} if not export_header: datas = datas[1:] for (rowx, row) in enumerate(datas): for (colx, value) in enumerate(row): if export_header and rowx == 0: cell_style = styles['header'] elif isinstance(value, datetime.datetime): cell_style = styles['datetime'] elif isinstance(value, datetime.date): cell_style = styles['date'] elif isinstance(value, datetime.time): cell_style = styles['time'] else: cell_style = styles['default'] sheet.write(rowx, colx, value, cell_style) book.close() output.seek(0) return output.getvalue()
def get_xlsx_export(self, context): rows = context['results'] new_rows = [[self._format_value(o) for o in filter(lambda c: getattr(c, 'export', False), r.cells)] for r in rows] new_rows.insert(0, [force_text(c.text) for c in context['result_headers'].cells if c.export]) datas = new_rows output = io.BytesIO() export_header = self.request.GET.get('export_xlsx_header', 'off') == 'on' model_name = self.opts.verbose_name book = xlsxwriter.Workbook(output) sheet = book.add_worksheet(u'%s %s' % (_(u'Sheet'), force_text(model_name))) styles = {'datetime': book.add_format({'num_format': 'yyyy-mm-dd hh:mm:ss'}), 'date': book.add_format({'num_format': 'yyyy-mm-dd'}), 'time': book.add_format({'num_format': 'hh:mm:ss'}), 'header': book.add_format({'font': 'name Times New Roman', 'color': 'red', 'bold': 'on', 'num_format': '#,##0.00'}), 'default': book.add_format()} if not export_header: datas = datas[1:] for (rowx, row) in enumerate(datas): for (colx, value) in enumerate(row): if export_header and rowx == 0: cell_style = styles['header'] elif isinstance(value, datetime.datetime): cell_style = styles['datetime'] elif isinstance(value, datetime.date): cell_style = styles['date'] elif isinstance(value, datetime.time): cell_style = styles['time'] else: cell_style = styles['default'] sheet.write(rowx, colx, value, cell_style) book.close() output.seek(0) return output.getvalue()
book
positive
@classmethod def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info('loading archive file {}'.format(archive_file)) else: logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: serialization_dir = resolved_archive_file else: tempdir = tempfile.mkdtemp() logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info('Model config {}'.format(config)) model = cls(config, *inputs, **kwargs) if state_dict is None and (not from_tf): weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None) if tempdir: shutil.rmtree(tempdir) if from_tf: weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for (old_key, new_key) in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in module._modules.items(): if child is not None: <DeepExtract> local_metadata = {} if metadata is None else metadata.get(prefix + name + '.'[:-1], {}) child._load_from_state_dict(state_dict, prefix + name + '.', local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in child._modules.items(): if child is not None: load(child, prefix + name + '.' + name + '.') </DeepExtract> start_prefix = '' if not hasattr(model, 'bert') and any((s.startswith('bert.') for s in state_dict.keys())): start_prefix = 'bert.' <DeepExtract> local_metadata = {} if metadata is None else metadata.get(start_prefix[:-1], {}) model._load_from_state_dict(state_dict, start_prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in model._modules.items(): if child is not None: load(child, start_prefix + name + '.') </DeepExtract> if len(missing_keys) > 0: logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(model.__class__.__name__, '\n\t'.join(error_msgs))) return model
@classmethod def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-large-cased` . `bert-base-multilingual-uncased` . `bert-base-multilingual-cased` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `model.chkpt` a TensorFlow checkpoint from_tf: should we load the weights from a locally saved TensorFlow checkpoint cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] else: archive_file = pretrained_model_name_or_path try: resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) except EnvironmentError: logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info('loading archive file {}'.format(archive_file)) else: logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file) or from_tf: serialization_dir = resolved_archive_file else: tempdir = tempfile.mkdtemp() logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) logger.info('Model config {}'.format(config)) model = cls(config, *inputs, **kwargs) if state_dict is None and (not from_tf): weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None) if tempdir: shutil.rmtree(tempdir) if from_tf: weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) return load_tf_weights_in_bert(model, weights_path) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for (old_key, new_key) in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in module._modules.items(): if child is not None: local_metadata = {} if metadata is None else metadata.get(prefix + name + '.'[:-1], {}) child._load_from_state_dict(state_dict, prefix + name + '.', local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in child._modules.items(): if child is not None: load(child, prefix + name + '.' + name + '.') start_prefix = '' if not hasattr(model, 'bert') and any((s.startswith('bert.') for s in state_dict.keys())): start_prefix = 'bert.' local_metadata = {} if metadata is None else metadata.get(start_prefix[:-1], {}) model._load_from_state_dict(state_dict, start_prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for (name, child) in model._modules.items(): if child is not None: load(child, start_prefix + name + '.') if len(missing_keys) > 0: logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(model.__class__.__name__, '\n\t'.join(error_msgs))) return model
bert-jointly-relation-entity-extraciton
positive
def run_benchmark_train_test(n_jobs=-1): print('Normal fit & Evaluation') model_dict = {'LSCDE': {'estimator': ['LSConditionalDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'random_seed': SEEDS}, 'MDN w/0 noise': {'estimator': ['MixtureDensityNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [20], 'n_training_epochs': [1000], 'x_noise_std': [None], 'y_noise_std': [None], 'random_seed': SEEDS}, 'KMN w/0 noise': {'estimator': ['KernelMixtureNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [50], 'n_training_epochs': [1000], 'init_scales': [[0.7, 0.3]], 'x_noise_std': [None], 'y_noise_std': [None], 'random_seed': SEEDS}, 'NF w/0 noise': {'flows_type': [('affine', 'radial', 'radial', 'radial', 'radial')], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_training_epochs': [1000], 'hidden_sizes': [(16, 16)], 'x_noise_std': [None], 'y_noise_std': [None], 'random_seed': SEEDS}, 'MDN w/ noise': {'estimator': ['MixtureDensityNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [20], 'n_training_epochs': [1000], 'x_noise_std': [0.2], 'y_noise_std': [0.1], 'random_seed': SEEDS}, 'KMN w/ noise': {'estimator': ['KernelMixtureNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [50], 'n_training_epochs': [1000], 'init_scales': [[0.7, 0.3]], 'x_noise_std': [0.2], 'y_noise_std': [0.1], 'random_seed': SEEDS}, 'NF w/ noise': {'flows_type': [('affine', 'radial', 'radial', 'radial', 'radial')], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_training_epochs': [1000], 'hidden_sizes': [(16, 16)], 'x_noise_std': [0.1], 'y_noise_std': [0.1], 'random_seed': SEEDS}, 'NKDE': {'estimator': ['NeighborKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'param_selection': ['normal_reference'], 'random_seed': [None]}, 'CKDE': {'estimator': ['ConditionalKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'bandwidth': ['normal_reference'], 'random_seed': [None]}} <DeepExtract> model_configs = {} for (model_key, conf_dict) in model_dict.items(): model_configs[model_key] = [dict(zip(conf_dict.keys(), value_tuple)) for value_tuple in list(itertools.product(*list(conf_dict.values())))] configs_initialized = {} for (model_key, model_conf_list) in model_configs.items(): configs_initialized[model_key] = [] for (i, conf) in enumerate(model_conf_list): conf['name'] = model_name_prefix + model_key.replace(' ', '_') + '_%i' % i if VERBOSE: print('instantiating ', conf['name']) ' remove estimator entry from dict to instantiate it' estimator = conf.pop('estimator') configs_initialized[model_key].append(globals()[estimator](**conf)) model_dict = configs_initialized </DeepExtract> model_dict = OrderedDict(list(model_dict.items())) <DeepExtract> result_dict = {} manager = Manager() result_list_model = manager.list() if n_jobs == -1: n_jobs = len(SEEDS) if multiprocessing: executor = AsyncExecutor(n_jobs=n_jobs) eval = lambda est: result_list_model.append(empirical_evaluation(est, VALIDATION_PORTION, moment_r2=True, eval_by_fc=False, fit_by_cv=False)) for (model_name, models) in model_dict.items(): print('Running likelihood fit and validation for %s' % model_name) t = time.time() if multiprocessing: executor.run(eval, models) else: for est in models: eval(est) assert len(result_list_model) == len(models) (mean_logli_list, mu_rmse_list, std_rmse_list, std_intraday_rmse_list) = list(zip(*list(result_list_model))) for _ in range(len(result_list_model)): del result_list_model[0] assert len(result_list_model) == 0 (mean_logli, mean_logli_dev) = (np.mean(mean_logli_list), np.std(mean_logli_list)) (mu_rmse, mu_rmse_dev) = (np.mean(mu_rmse_list), np.std(mu_rmse_list)) (std_rmse, std_rmse_dev) = (np.mean(std_rmse_list), np.std(std_rmse_list)) (std_intraday_rmse, std_intraday_rmse_dev) = (np.mean(std_intraday_rmse_list), np.std(std_intraday_rmse_list)) result_dict[model_name] = (mean_logli, mean_logli_dev, mu_rmse, mu_rmse_dev, std_rmse, std_rmse_dev, std_intraday_rmse, std_intraday_rmse_dev) print('%s results:' % model_name, result_dict[model_name]) print('Duration of %s:' % model_name, time.time() - t) df = pd.DataFrame.from_dict(result_dict, 'index') df.columns = ['log_likelihood', 'log_likelihood_dev', 'rmse_mean', 'rmse_mean_dev', 'rmse_std', 'rmse_std_dev', 'rmse_std_intraday', 'rmse_std_intraday_dev'] result_df = df </DeepExtract> print(result_df.to_latex()) print(result_df)
def run_benchmark_train_test(n_jobs=-1): print('Normal fit & Evaluation') model_dict = {'LSCDE': {'estimator': ['LSConditionalDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'random_seed': SEEDS}, 'MDN w/0 noise': {'estimator': ['MixtureDensityNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [20], 'n_training_epochs': [1000], 'x_noise_std': [None], 'y_noise_std': [None], 'random_seed': SEEDS}, 'KMN w/0 noise': {'estimator': ['KernelMixtureNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [50], 'n_training_epochs': [1000], 'init_scales': [[0.7, 0.3]], 'x_noise_std': [None], 'y_noise_std': [None], 'random_seed': SEEDS}, 'NF w/0 noise': {'flows_type': [('affine', 'radial', 'radial', 'radial', 'radial')], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_training_epochs': [1000], 'hidden_sizes': [(16, 16)], 'x_noise_std': [None], 'y_noise_std': [None], 'random_seed': SEEDS}, 'MDN w/ noise': {'estimator': ['MixtureDensityNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [20], 'n_training_epochs': [1000], 'x_noise_std': [0.2], 'y_noise_std': [0.1], 'random_seed': SEEDS}, 'KMN w/ noise': {'estimator': ['KernelMixtureNetwork'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_centers': [50], 'n_training_epochs': [1000], 'init_scales': [[0.7, 0.3]], 'x_noise_std': [0.2], 'y_noise_std': [0.1], 'random_seed': SEEDS}, 'NF w/ noise': {'flows_type': [('affine', 'radial', 'radial', 'radial', 'radial')], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'n_training_epochs': [1000], 'hidden_sizes': [(16, 16)], 'x_noise_std': [0.1], 'y_noise_std': [0.1], 'random_seed': SEEDS}, 'NKDE': {'estimator': ['NeighborKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'param_selection': ['normal_reference'], 'random_seed': [None]}, 'CKDE': {'estimator': ['ConditionalKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'bandwidth': ['normal_reference'], 'random_seed': [None]}} model_configs = {} for (model_key, conf_dict) in model_dict.items(): model_configs[model_key] = [dict(zip(conf_dict.keys(), value_tuple)) for value_tuple in list(itertools.product(*list(conf_dict.values())))] configs_initialized = {} for (model_key, model_conf_list) in model_configs.items(): configs_initialized[model_key] = [] for (i, conf) in enumerate(model_conf_list): conf['name'] = model_name_prefix + model_key.replace(' ', '_') + '_%i' % i if VERBOSE: print('instantiating ', conf['name']) ' remove estimator entry from dict to instantiate it' estimator = conf.pop('estimator') configs_initialized[model_key].append(globals()[estimator](**conf)) model_dict = configs_initialized model_dict = OrderedDict(list(model_dict.items())) result_dict = {} manager = Manager() result_list_model = manager.list() if n_jobs == -1: n_jobs = len(SEEDS) if multiprocessing: executor = AsyncExecutor(n_jobs=n_jobs) eval = lambda est: result_list_model.append(empirical_evaluation(est, VALIDATION_PORTION, moment_r2=True, eval_by_fc=False, fit_by_cv=False)) for (model_name, models) in model_dict.items(): print('Running likelihood fit and validation for %s' % model_name) t = time.time() if multiprocessing: executor.run(eval, models) else: for est in models: eval(est) assert len(result_list_model) == len(models) (mean_logli_list, mu_rmse_list, std_rmse_list, std_intraday_rmse_list) = list(zip(*list(result_list_model))) for _ in range(len(result_list_model)): del result_list_model[0] assert len(result_list_model) == 0 (mean_logli, mean_logli_dev) = (np.mean(mean_logli_list), np.std(mean_logli_list)) (mu_rmse, mu_rmse_dev) = (np.mean(mu_rmse_list), np.std(mu_rmse_list)) (std_rmse, std_rmse_dev) = (np.mean(std_rmse_list), np.std(std_rmse_list)) (std_intraday_rmse, std_intraday_rmse_dev) = (np.mean(std_intraday_rmse_list), np.std(std_intraday_rmse_list)) result_dict[model_name] = (mean_logli, mean_logli_dev, mu_rmse, mu_rmse_dev, std_rmse, std_rmse_dev, std_intraday_rmse, std_intraday_rmse_dev) print('%s results:' % model_name, result_dict[model_name]) print('Duration of %s:' % model_name, time.time() - t) df = pd.DataFrame.from_dict(result_dict, 'index') df.columns = ['log_likelihood', 'log_likelihood_dev', 'rmse_mean', 'rmse_mean_dev', 'rmse_std', 'rmse_std_dev', 'rmse_std_intraday', 'rmse_std_intraday_dev'] result_df = df print(result_df.to_latex()) print(result_df)
Conditional_Density_Estimation
positive
def generaldata(pids=None): if pids is None: <DeepExtract> if 'split' == 'split': pids = get_split_patients() else: pid_tracking = pd.read_csv(paths.derived_dir + 'pids_tracking.csv') pid_tracking.set_index('PatientID', inplace=True) pids = pid_tracking.index[pid_tracking['split']] pids = pids </DeepExtract> df = pd.read_csv(paths.csvs_0 + 'expot-generaldata.csv', sep=';', encoding='latin-1') df.set_index('PatientID', inplace=True) for pid in pids: assert pid in df.index df_subset = df.loc[pids] n_patients = len(pids) print(n_patients) if not n_patients == 36098: print('ERROR: WRONG NUMBER OF PATIENTS:', n_patients) return False assert df_subset.shape[0] == n_patients for characteristic in ['Country', 'MaritalState', 'Sex']: print(characteristic) print('#:', df_subset[characteristic].value_counts()) print('%:', 100 * df_subset[characteristic].value_counts() / n_patients) df_subset['AdmissionTime'] = pd.to_datetime(df_subset['AdmissionTime']) age = df_subset['AdmissionTime'].apply(lambda x: x.year) - df_subset['birthYear'] print('Age:', age.mean(), age.std(), age.median(), age.ptp(), age.min(), age.max()) return df_subset
def generaldata(pids=None): if pids is None: if 'split' == 'split': pids = get_split_patients() else: pid_tracking = pd.read_csv(paths.derived_dir + 'pids_tracking.csv') pid_tracking.set_index('PatientID', inplace=True) pids = pid_tracking.index[pid_tracking['split']] pids = pids df = pd.read_csv(paths.csvs_0 + 'expot-generaldata.csv', sep=';', encoding='latin-1') df.set_index('PatientID', inplace=True) for pid in pids: assert pid in df.index df_subset = df.loc[pids] n_patients = len(pids) print(n_patients) if not n_patients == 36098: print('ERROR: WRONG NUMBER OF PATIENTS:', n_patients) return False assert df_subset.shape[0] == n_patients for characteristic in ['Country', 'MaritalState', 'Sex']: print(characteristic) print('#:', df_subset[characteristic].value_counts()) print('%:', 100 * df_subset[characteristic].value_counts() / n_patients) df_subset['AdmissionTime'] = pd.to_datetime(df_subset['AdmissionTime']) age = df_subset['AdmissionTime'].apply(lambda x: x.year) - df_subset['birthYear'] print('Age:', age.mean(), age.std(), age.median(), age.ptp(), age.min(), age.max()) return df_subset
circEWS
positive
def run(self, interaction: Interaction, app: AppPublic) -> Interaction | None: """Execute the ``inventory`` request for mode interactive. :param interaction: The interaction from the user :param app: The app instance :returns: The pending :class:`~ansible_navigator.ui_framework.ui.Interaction` or :data:`None` """ self._logger.debug('inventory requested in interactive mode') self._prepare_to_run(app, interaction) args_updated = self._update_args([self._name] + shlex.split(self._interaction.action.match.groupdict()['params'] or '')) if not args_updated: self._prepare_to_exit(interaction) return None self.stdout = self._calling_app.stdout self._inventories = self._args.inventory <DeepExtract> modification_times = [] for inventory in self._inventories: if os.path.isdir(inventory): modification_times.append(max((os.path.getmtime(e) for e in glob.glob(os.path.join(inventory, '**'), recursive=True)))) elif os.path.isfile(inventory): modification_times.append(os.path.getmtime(inventory)) if modification_times: self._inventories_mtime = max(modification_times) else: self._inventories_mtime = None </DeepExtract> <DeepExtract> if isinstance(self._args.set_environment_variable, dict): set_env_vars = {**self._args.set_environment_variable} else: set_env_vars = {} if self._args.display_color is False: set_env_vars['ANSIBLE_NOCOLOR'] = '1' kwargs = {'container_engine': self._args.container_engine, 'host_cwd': os.getcwd(), 'execution_environment_image': self._args.execution_environment_image, 'execution_environment': self._args.execution_environment, 'navigator_mode': self._args.mode, 'pass_environment_variable': self._args.pass_environment_variable, 'set_environment_variable': set_env_vars, 'private_data_dir': self._args.ansible_runner_artifact_dir, 'rotate_artifacts': self._args.ansible_runner_rotate_artifacts_count, 'timeout': self._args.ansible_runner_timeout} if isinstance(self._args.execution_environment_volume_mounts, list): kwargs.update({'container_volume_mounts': self._args.execution_environment_volume_mounts}) if isinstance(self._args.container_options, list): kwargs.update({'container_options': self._args.container_options}) if self._args.mode == 'interactive': self._collect_inventory_details_interactive(kwargs) else: return self._collect_inventory_details_automated(kwargs) return (None, None, None) </DeepExtract> if not self._inventory: self._prepare_to_exit(interaction) return None if self._inventory_error: while True: interaction = self._interaction.ui.show(self._inventory_error, content_format=ContentFormat.ANSI) if interaction.name != 'refresh': break self._prepare_to_exit(interaction) return None self.steps.append(self._build_main_menu()) while True: <DeepExtract> self._calling_app.update() </DeepExtract> <DeepExtract> result = None if isinstance(self.steps.current, Interaction): result = run_action(self.steps.current.name, self.app, self.steps.current) elif isinstance(self.steps.current, Step): if self.steps.current.show_func: current_index = self.steps.current.index self.steps.current.show_func() self.steps.current.index = current_index if self.steps.current.type == 'menu': result = self._interaction.ui.show(obj=self.steps.current.value, columns=self.steps.current.columns, color_menu_item=color_menu) elif self.steps.current.type == 'content': result = self._interaction.ui.show(obj=self.steps.current.value, index=self.steps.current.index, content_heading=content_heading, filter_content_keys=filter_content_keys) if result is None: self.steps.back_one() else: self.steps.append(result) </DeepExtract> if not self.steps: break current_mtime = self._inventories_mtime <DeepExtract> modification_times = [] for inventory in self._inventories: if os.path.isdir(inventory): modification_times.append(max((os.path.getmtime(e) for e in glob.glob(os.path.join(inventory, '**'), recursive=True)))) elif os.path.isfile(inventory): modification_times.append(os.path.getmtime(inventory)) if modification_times: self._inventories_mtime = max(modification_times) else: self._inventories_mtime = None </DeepExtract> if current_mtime != self._inventories_mtime: self._logger.debug('inventory changed') <DeepExtract> modification_times = [] for inventory in self._inventories: if os.path.isdir(inventory): modification_times.append(max((os.path.getmtime(e) for e in glob.glob(os.path.join(inventory, '**'), recursive=True)))) elif os.path.isfile(inventory): modification_times.append(os.path.getmtime(inventory)) if modification_times: self._inventories_mtime = max(modification_times) else: self._inventories_mtime = None </DeepExtract> <DeepExtract> if isinstance(self._args.set_environment_variable, dict): set_env_vars = {**self._args.set_environment_variable} else: set_env_vars = {} if self._args.display_color is False: set_env_vars['ANSIBLE_NOCOLOR'] = '1' kwargs = {'container_engine': self._args.container_engine, 'host_cwd': os.getcwd(), 'execution_environment_image': self._args.execution_environment_image, 'execution_environment': self._args.execution_environment, 'navigator_mode': self._args.mode, 'pass_environment_variable': self._args.pass_environment_variable, 'set_environment_variable': set_env_vars, 'private_data_dir': self._args.ansible_runner_artifact_dir, 'rotate_artifacts': self._args.ansible_runner_rotate_artifacts_count, 'timeout': self._args.ansible_runner_timeout} if isinstance(self._args.execution_environment_volume_mounts, list): kwargs.update({'container_volume_mounts': self._args.execution_environment_volume_mounts}) if isinstance(self._args.container_options, list): kwargs.update({'container_options': self._args.container_options}) if self._args.mode == 'interactive': self._collect_inventory_details_interactive(kwargs) else: return self._collect_inventory_details_automated(kwargs) return (None, None, None) </DeepExtract> if not self._inventory: break if self._inventory_error: self._logger.error(self._inventory_error) break if self.steps.current.name == 'quit': return self.steps.current self._prepare_to_exit(interaction) return None
def run(self, interaction: Interaction, app: AppPublic) -> Interaction | None: """Execute the ``inventory`` request for mode interactive. :param interaction: The interaction from the user :param app: The app instance :returns: The pending :class:`~ansible_navigator.ui_framework.ui.Interaction` or :data:`None` """ self._logger.debug('inventory requested in interactive mode') self._prepare_to_run(app, interaction) args_updated = self._update_args([self._name] + shlex.split(self._interaction.action.match.groupdict()['params'] or '')) if not args_updated: self._prepare_to_exit(interaction) return None self.stdout = self._calling_app.stdout self._inventories = self._args.inventory modification_times = [] for inventory in self._inventories: if os.path.isdir(inventory): modification_times.append(max((os.path.getmtime(e) for e in glob.glob(os.path.join(inventory, '**'), recursive=True)))) elif os.path.isfile(inventory): modification_times.append(os.path.getmtime(inventory)) if modification_times: self._inventories_mtime = max(modification_times) else: self._inventories_mtime = None if isinstance(self._args.set_environment_variable, dict): set_env_vars = {**self._args.set_environment_variable} else: set_env_vars = {} if self._args.display_color is False: set_env_vars['ANSIBLE_NOCOLOR'] = '1' kwargs = {'container_engine': self._args.container_engine, 'host_cwd': os.getcwd(), 'execution_environment_image': self._args.execution_environment_image, 'execution_environment': self._args.execution_environment, 'navigator_mode': self._args.mode, 'pass_environment_variable': self._args.pass_environment_variable, 'set_environment_variable': set_env_vars, 'private_data_dir': self._args.ansible_runner_artifact_dir, 'rotate_artifacts': self._args.ansible_runner_rotate_artifacts_count, 'timeout': self._args.ansible_runner_timeout} if isinstance(self._args.execution_environment_volume_mounts, list): kwargs.update({'container_volume_mounts': self._args.execution_environment_volume_mounts}) if isinstance(self._args.container_options, list): kwargs.update({'container_options': self._args.container_options}) if self._args.mode == 'interactive': self._collect_inventory_details_interactive(kwargs) else: return self._collect_inventory_details_automated(kwargs) return (None, None, None) if not self._inventory: self._prepare_to_exit(interaction) return None if self._inventory_error: while True: interaction = self._interaction.ui.show(self._inventory_error, content_format=ContentFormat.ANSI) if interaction.name != 'refresh': break self._prepare_to_exit(interaction) return None self.steps.append(self._build_main_menu()) while True: self._calling_app.update() result = None if isinstance(self.steps.current, Interaction): result = run_action(self.steps.current.name, self.app, self.steps.current) elif isinstance(self.steps.current, Step): if self.steps.current.show_func: current_index = self.steps.current.index self.steps.current.show_func() self.steps.current.index = current_index if self.steps.current.type == 'menu': result = self._interaction.ui.show(obj=self.steps.current.value, columns=self.steps.current.columns, color_menu_item=color_menu) elif self.steps.current.type == 'content': result = self._interaction.ui.show(obj=self.steps.current.value, index=self.steps.current.index, content_heading=content_heading, filter_content_keys=filter_content_keys) if result is None: self.steps.back_one() else: self.steps.append(result) if not self.steps: break current_mtime = self._inventories_mtime modification_times = [] for inventory in self._inventories: if os.path.isdir(inventory): modification_times.append(max((os.path.getmtime(e) for e in glob.glob(os.path.join(inventory, '**'), recursive=True)))) elif os.path.isfile(inventory): modification_times.append(os.path.getmtime(inventory)) if modification_times: self._inventories_mtime = max(modification_times) else: self._inventories_mtime = None if current_mtime != self._inventories_mtime: self._logger.debug('inventory changed') modification_times = [] for inventory in self._inventories: if os.path.isdir(inventory): modification_times.append(max((os.path.getmtime(e) for e in glob.glob(os.path.join(inventory, '**'), recursive=True)))) elif os.path.isfile(inventory): modification_times.append(os.path.getmtime(inventory)) if modification_times: self._inventories_mtime = max(modification_times) else: self._inventories_mtime = None if isinstance(self._args.set_environment_variable, dict): set_env_vars = {**self._args.set_environment_variable} else: set_env_vars = {} if self._args.display_color is False: set_env_vars['ANSIBLE_NOCOLOR'] = '1' kwargs = {'container_engine': self._args.container_engine, 'host_cwd': os.getcwd(), 'execution_environment_image': self._args.execution_environment_image, 'execution_environment': self._args.execution_environment, 'navigator_mode': self._args.mode, 'pass_environment_variable': self._args.pass_environment_variable, 'set_environment_variable': set_env_vars, 'private_data_dir': self._args.ansible_runner_artifact_dir, 'rotate_artifacts': self._args.ansible_runner_rotate_artifacts_count, 'timeout': self._args.ansible_runner_timeout} if isinstance(self._args.execution_environment_volume_mounts, list): kwargs.update({'container_volume_mounts': self._args.execution_environment_volume_mounts}) if isinstance(self._args.container_options, list): kwargs.update({'container_options': self._args.container_options}) if self._args.mode == 'interactive': self._collect_inventory_details_interactive(kwargs) else: return self._collect_inventory_details_automated(kwargs) return (None, None, None) if not self._inventory: break if self._inventory_error: self._logger.error(self._inventory_error) break if self.steps.current.name == 'quit': return self.steps.current self._prepare_to_exit(interaction) return None
ansible-navigator
positive
def test_error_invalid_language_target(self): self.manifest['languages-provided'] = {'pt_BR': {}} <DeepExtract> locales.validate_locales(self.err, None) </DeepExtract> self.assert_failed(with_errors=True)
def test_error_invalid_language_target(self): self.manifest['languages-provided'] = {'pt_BR': {}} locales.validate_locales(self.err, None) self.assert_failed(with_errors=True)
app-validator
positive
def resnet152_fpn(input_shape, channels=1, activation='softmax'): img_input = Input(input_shape) resnet_base = ResNet152(img_input, include_top=True) resnet_base.load_weights(download_resnet_imagenet('resnet152')) conv1 = resnet_base.get_layer('conv1_relu').output conv2 = resnet_base.get_layer('res2c_relu').output conv3 = resnet_base.get_layer('res3b7_relu').output conv4 = resnet_base.get_layer('res4b35_relu').output conv5 = resnet_base.get_layer('res5c_relu').output <DeepExtract> P5 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='P5', kernel_initializer='he_normal')(conv5) P5_upsampled = UpSampling2D(name='P5_upsampled')(P5) P4 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced', kernel_initializer='he_normal')(conv4) P4 = Add(name='P4_merged')([P5_upsampled, P4]) P4 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4', kernel_initializer='he_normal')(P4) P4_upsampled = UpSampling2D(name='P4_upsampled')(P4) P3 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced', kernel_initializer='he_normal')(conv3) P3 = Add(name='P3_merged')([P4_upsampled, P3]) P3 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3', kernel_initializer='he_normal')(P3) P3_upsampled = UpSampling2D(name='P3_upsampled')(P3) P2 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C2_reduced', kernel_initializer='he_normal')(conv2) P2 = Add(name='P2_merged')([P3_upsampled, P2]) P2 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P2', kernel_initializer='he_normal')(P2) P2_upsampled = UpSampling2D(size=(2, 2), name='P2_upsampled')(P2) P1 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C1_reduced', kernel_initializer='he_normal')(conv1) P1 = Add(name='P1_merged')([P2_upsampled, P1]) P1 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P1', kernel_initializer='he_normal')(P1) (P1, P2, P3, P4, P5) = (P1, P2, P3, P4, P5) </DeepExtract> x = concatenate([prediction_fpn_block(P5, 'P5', (8, 8)), prediction_fpn_block(P4, 'P4', (4, 4)), prediction_fpn_block(P3, 'P3', (2, 2)), prediction_fpn_block(P2, 'P2')]) <DeepExtract> x = Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding=padding, kernel_initializer='he_normal', use_bias=use_bias, name='aggregation' + '_conv')(x) x = BatchNormalization(name='aggregation' + '_bn', scale=bn_scale, axis=bn_axis, momentum=bn_momentum, epsilon=1.001e-05)(x) x = Activation('relu', name='aggregation' + '_relu')(x) x = x </DeepExtract> <DeepExtract> x = UpSampling2D()(x) x = conv_relu(x, 128, 3, stride=1, padding='same', name='up4' + '_conv1', activation=activation) x = concatenate([x, conv1], axis=-1, name='up4' + '_concat') x = conv_relu(x, 128, 3, stride=1, padding='same', name='up4' + '_conv2', activation=activation) x = x </DeepExtract> x = UpSampling2D()(x) <DeepExtract> x = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding=padding, kernel_initializer='he_normal', use_bias=use_bias, name='up5_conv1' + '_conv')(x) x = Activation(activation, name='up5_conv1' + '_relu')(x) x = x </DeepExtract> <DeepExtract> x = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding=padding, kernel_initializer='he_normal', use_bias=use_bias, name='up5_conv2' + '_conv')(x) x = Activation(activation, name='up5_conv2' + '_relu')(x) x = x </DeepExtract> x = Conv2D(channels, (1, 1), name='mask', kernel_initializer='he_normal')(x) x = Activation(activation)(x) model = Model(img_input, x) return model
def resnet152_fpn(input_shape, channels=1, activation='softmax'): img_input = Input(input_shape) resnet_base = ResNet152(img_input, include_top=True) resnet_base.load_weights(download_resnet_imagenet('resnet152')) conv1 = resnet_base.get_layer('conv1_relu').output conv2 = resnet_base.get_layer('res2c_relu').output conv3 = resnet_base.get_layer('res3b7_relu').output conv4 = resnet_base.get_layer('res4b35_relu').output conv5 = resnet_base.get_layer('res5c_relu').output P5 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='P5', kernel_initializer='he_normal')(conv5) P5_upsampled = UpSampling2D(name='P5_upsampled')(P5) P4 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced', kernel_initializer='he_normal')(conv4) P4 = Add(name='P4_merged')([P5_upsampled, P4]) P4 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4', kernel_initializer='he_normal')(P4) P4_upsampled = UpSampling2D(name='P4_upsampled')(P4) P3 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced', kernel_initializer='he_normal')(conv3) P3 = Add(name='P3_merged')([P4_upsampled, P3]) P3 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3', kernel_initializer='he_normal')(P3) P3_upsampled = UpSampling2D(name='P3_upsampled')(P3) P2 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C2_reduced', kernel_initializer='he_normal')(conv2) P2 = Add(name='P2_merged')([P3_upsampled, P2]) P2 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P2', kernel_initializer='he_normal')(P2) P2_upsampled = UpSampling2D(size=(2, 2), name='P2_upsampled')(P2) P1 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C1_reduced', kernel_initializer='he_normal')(conv1) P1 = Add(name='P1_merged')([P2_upsampled, P1]) P1 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P1', kernel_initializer='he_normal')(P1) (P1, P2, P3, P4, P5) = (P1, P2, P3, P4, P5) x = concatenate([prediction_fpn_block(P5, 'P5', (8, 8)), prediction_fpn_block(P4, 'P4', (4, 4)), prediction_fpn_block(P3, 'P3', (2, 2)), prediction_fpn_block(P2, 'P2')]) x = Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), padding=padding, kernel_initializer='he_normal', use_bias=use_bias, name='aggregation' + '_conv')(x) x = BatchNormalization(name='aggregation' + '_bn', scale=bn_scale, axis=bn_axis, momentum=bn_momentum, epsilon=1.001e-05)(x) x = Activation('relu', name='aggregation' + '_relu')(x) x = x x = UpSampling2D()(x) x = conv_relu(x, 128, 3, stride=1, padding='same', name='up4' + '_conv1', activation=activation) x = concatenate([x, conv1], axis=-1, name='up4' + '_concat') x = conv_relu(x, 128, 3, stride=1, padding='same', name='up4' + '_conv2', activation=activation) x = x x = UpSampling2D()(x) x = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding=padding, kernel_initializer='he_normal', use_bias=use_bias, name='up5_conv1' + '_conv')(x) x = Activation(activation, name='up5_conv1' + '_relu')(x) x = x x = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding=padding, kernel_initializer='he_normal', use_bias=use_bias, name='up5_conv2' + '_conv')(x) x = Activation(activation, name='up5_conv2' + '_relu')(x) x = x x = Conv2D(channels, (1, 1), name='mask', kernel_initializer='he_normal')(x) x = Activation(activation)(x) model = Model(img_input, x) return model
dsb2018_topcoders
positive
def add_ps_texcoord_input(self, texcoord, components, format='float', comment=None): assert components == 1 <DeepExtract> search_str = textwrap.dedent('\n // Input signature:\n //\n // Name Index Mask Register SysValue Format Used\n // -------------------- ----- ------ -------- -------- ------- ------\n ') signature_pattern = re.compile('\n ^ \\/\\/\n \\s+ (?P<name>\\w+)\n \\s+ (?P<index>\\d+)\n \\s+ (?P<mask>[x ][y ][z ][w ])\n \\s+ (?P<register>\\d+)\n \\s+ (?P<sysvalue>\\w+)\n \\s+ (?P<format>\\w+)\n \\s* (?P<used>[xyzw ]*)\n $', re.VERBOSE) SignatureEntry = collections.namedtuple('Input', 'Name Index Mask Register SysValue Format Used end'.split()) self.isgn = [] pos = self.declarations_txt.find(search_str) if pos == -1: return pos = pos + len(search_str) while pos != -1: new_pos = self.declarations_txt.find('\n', pos) line = self.declarations_txt[pos:new_pos] match = signature_pattern.match(line) if match is None: return pos (name, index, mask, register, sysvalue, format, used) = match.groups() (index, register) = map(int, (index, register)) self.isgn.append(SignatureEntry(name, index, mask, register, sysvalue, format, used, new_pos + 1)) pos = new_pos + 1 assert False </DeepExtract> texcoords = filter(lambda x: x.Name == 'TEXCOORD', self.isgn) texcoords = sorted(texcoords, key=lambda x: x.Index) if list(filter(lambda x: x.Index == texcoord, texcoords)): raise KeyError('Shader already has TEXCOORD{}'.format(texcoord)) last_texcoord = texcoords[-1] assert last_texcoord.Mask == 'xyz ' assert last_texcoord.Index < texcoord reg_no = last_texcoord.Register mask = ' w' pos = last_texcoord.end <DeepExtract> x = '// {Name:<20} {Index:>5} {Mask:>4} {Register:>8} {SysValue:>8} {Format:>7} {Used:>4}\n'.format(Name='TEXCOORD', Index=texcoord, Mask=mask, Register=reg_no, SysValue='NONE', Format=format, Used=mask) self.declarations_txt = self.declarations_txt[:pos] + x + self.declarations_txt[pos:] </DeepExtract> <DeepExtract> if comment is not None: self.declarations.append(hlsltool.Comment('\n// %s' % comment)) if declaration is not None: d = Declaration('\n' + declaration) if d not in self.declarations: self.declarations.append(d) if comment is None and declaration is None: self.declarations.append(hlsltool.Comment('\n')) </DeepExtract> if comment: <DeepExtract> if comment is not None: self.declarations.append(hlsltool.Comment('\n// %s' % comment)) if '// ' + comment is not None: d = Declaration('\n' + '// ' + comment) if d not in self.declarations: self.declarations.append(d) if comment is None and '// ' + comment is None: self.declarations.append(hlsltool.Comment('\n')) </DeepExtract> reg = 'v{}.{}'.format(reg_no, mask.strip()) <DeepExtract> if comment is not None: self.declarations.append(hlsltool.Comment('\n// %s' % comment)) if 'dcl_input_ps linear ' + reg is not None: d = Declaration('\n' + 'dcl_input_ps linear ' + reg) if d not in self.declarations: self.declarations.append(d) if comment is None and 'dcl_input_ps linear ' + reg is None: self.declarations.append(hlsltool.Comment('\n')) </DeepExtract> return reg
def add_ps_texcoord_input(self, texcoord, components, format='float', comment=None): assert components == 1 search_str = textwrap.dedent('\n // Input signature:\n //\n // Name Index Mask Register SysValue Format Used\n // -------------------- ----- ------ -------- -------- ------- ------\n ') signature_pattern = re.compile('\n ^ \\/\\/\n \\s+ (?P<name>\\w+)\n \\s+ (?P<index>\\d+)\n \\s+ (?P<mask>[x ][y ][z ][w ])\n \\s+ (?P<register>\\d+)\n \\s+ (?P<sysvalue>\\w+)\n \\s+ (?P<format>\\w+)\n \\s* (?P<used>[xyzw ]*)\n $', re.VERBOSE) SignatureEntry = collections.namedtuple('Input', 'Name Index Mask Register SysValue Format Used end'.split()) self.isgn = [] pos = self.declarations_txt.find(search_str) if pos == -1: return pos = pos + len(search_str) while pos != -1: new_pos = self.declarations_txt.find('\n', pos) line = self.declarations_txt[pos:new_pos] match = signature_pattern.match(line) if match is None: return pos (name, index, mask, register, sysvalue, format, used) = match.groups() (index, register) = map(int, (index, register)) self.isgn.append(SignatureEntry(name, index, mask, register, sysvalue, format, used, new_pos + 1)) pos = new_pos + 1 assert False texcoords = filter(lambda x: x.Name == 'TEXCOORD', self.isgn) texcoords = sorted(texcoords, key=lambda x: x.Index) if list(filter(lambda x: x.Index == texcoord, texcoords)): raise KeyError('Shader already has TEXCOORD{}'.format(texcoord)) last_texcoord = texcoords[-1] assert last_texcoord.Mask == 'xyz ' assert last_texcoord.Index < texcoord reg_no = last_texcoord.Register mask = ' w' pos = last_texcoord.end x = '// {Name:<20} {Index:>5} {Mask:>4} {Register:>8} {SysValue:>8} {Format:>7} {Used:>4}\n'.format(Name='TEXCOORD', Index=texcoord, Mask=mask, Register=reg_no, SysValue='NONE', Format=format, Used=mask) self.declarations_txt = self.declarations_txt[:pos] + x + self.declarations_txt[pos:] if comment is not None: self.declarations.append(hlsltool.Comment('\n// %s' % comment)) if declaration is not None: d = Declaration('\n' + declaration) if d not in self.declarations: self.declarations.append(d) if comment is None and declaration is None: self.declarations.append(hlsltool.Comment('\n')) if comment: if comment is not None: self.declarations.append(hlsltool.Comment('\n// %s' % comment)) if '// ' + comment is not None: d = Declaration('\n' + '// ' + comment) if d not in self.declarations: self.declarations.append(d) if comment is None and '// ' + comment is None: self.declarations.append(hlsltool.Comment('\n')) reg = 'v{}.{}'.format(reg_no, mask.strip()) if comment is not None: self.declarations.append(hlsltool.Comment('\n// %s' % comment)) if 'dcl_input_ps linear ' + reg is not None: d = Declaration('\n' + 'dcl_input_ps linear ' + reg) if d not in self.declarations: self.declarations.append(d) if comment is None and 'dcl_input_ps linear ' + reg is None: self.declarations.append(hlsltool.Comment('\n')) return reg
3d-fixes
positive
def _add(prim): def addType(dict_, ret_type): if ret_type not in dict_: new_list = [] for (type_, list_) in dict_.items(): if issubclass(type_, ret_type): for item in list_: if item not in new_list: new_list.append(item) dict_[ret_type] = new_list <DeepExtract> if prim.ret not in self.primitives: new_list = [] for (type_, list_) in self.primitives.items(): if issubclass(type_, prim.ret): for item in list_: if item not in new_list: new_list.append(item) self.primitives[prim.ret] = new_list </DeepExtract> <DeepExtract> if prim.ret not in self.terminals: new_list = [] for (type_, list_) in self.terminals.items(): if issubclass(type_, prim.ret): for item in list_: if item not in new_list: new_list.append(item) self.terminals[prim.ret] = new_list </DeepExtract> self.mapping[prim.name] = prim if isinstance(prim, Primitive): for type_ in prim.args: <DeepExtract> if type_ not in self.primitives: new_list = [] for (type_, list_) in self.primitives.items(): if issubclass(type_, type_): for item in list_: if item not in new_list: new_list.append(item) self.primitives[type_] = new_list </DeepExtract> <DeepExtract> if type_ not in self.terminals: new_list = [] for (type_, list_) in self.terminals.items(): if issubclass(type_, type_): for item in list_: if item not in new_list: new_list.append(item) self.terminals[type_] = new_list </DeepExtract> dict_ = self.primitives else: dict_ = self.terminals for type_ in dict_: if issubclass(prim.ret, type_): dict_[type_].append(prim)
def _add(prim): def addType(dict_, ret_type): if ret_type not in dict_: new_list = [] for (type_, list_) in dict_.items(): if issubclass(type_, ret_type): for item in list_: if item not in new_list: new_list.append(item) dict_[ret_type] = new_list if prim.ret not in self.primitives: new_list = [] for (type_, list_) in self.primitives.items(): if issubclass(type_, prim.ret): for item in list_: if item not in new_list: new_list.append(item) self.primitives[prim.ret] = new_list if prim.ret not in self.terminals: new_list = [] for (type_, list_) in self.terminals.items(): if issubclass(type_, prim.ret): for item in list_: if item not in new_list: new_list.append(item) self.terminals[prim.ret] = new_list self.mapping[prim.name] = prim if isinstance(prim, Primitive): for type_ in prim.args: if type_ not in self.primitives: new_list = [] for (type_, list_) in self.primitives.items(): if issubclass(type_, type_): for item in list_: if item not in new_list: new_list.append(item) self.primitives[type_] = new_list if type_ not in self.terminals: new_list = [] for (type_, list_) in self.terminals.items(): if issubclass(type_, type_): for item in list_: if item not in new_list: new_list.append(item) self.terminals[type_] = new_list dict_ = self.primitives else: dict_ = self.terminals for type_ in dict_: if issubclass(prim.ret, type_): dict_[type_].append(prim)
deap
positive
@property def postgres_database(self): """Get the postgres database associated with the pipeline Note: This will create the object if it doesn't exist Returns: postgres_database(Object): lazily-constructed postgres database """ if not self._postgres_database: <DeepExtract> instance_count = sum([1 for o in self._base_objects.values() if isinstance(o, PostgresDatabase)]) object_id = PostgresDatabase.__name__ + str(instance_count) new_object = PostgresDatabase(object_id, **kwargs) self._base_objects[object_id] = new_object self._postgres_database = new_object </DeepExtract> return self._postgres_database
@property def postgres_database(self): """Get the postgres database associated with the pipeline Note: This will create the object if it doesn't exist Returns: postgres_database(Object): lazily-constructed postgres database """ if not self._postgres_database: instance_count = sum([1 for o in self._base_objects.values() if isinstance(o, PostgresDatabase)]) object_id = PostgresDatabase.__name__ + str(instance_count) new_object = PostgresDatabase(object_id, **kwargs) self._base_objects[object_id] = new_object self._postgres_database = new_object return self._postgres_database
dataduct
positive
def common_open(): <DeepExtract> global hwspi, swspi try: hwspi = SPI(2, baudrate=spi_freq, polarity=1, phase=1, bits=8, firstbit=SPI.MSB, sck=Pin(jtagpin.tck), mosi=Pin(jtagpin.tdi), miso=Pin(jtagpin.tdo)) except: hwspi = SPI(baudrate=spi_freq, polarity=1, phase=1, bits=8, firstbit=SPI.MSB, sck=Pin(jtagpin.tck), mosi=Pin(jtagpin.tdi), miso=Pin(jtagpin.tdo)) swspi = SoftSPI(baudrate=spi_freq, polarity=1, phase=1, bits=8, firstbit=SPI.MSB, sck=Pin(jtagpin.tck), mosi=Pin(jtagpin.tdi), miso=Pin(jtagpin.tdo)) </DeepExtract> hwspi.init(sck=Pin(jtagpin.tcknc)) <DeepExtract> global tck, tms, tdi, tdo, led led = Pin(jtagpin.led, Pin.OUT) tms = Pin(jtagpin.tms, Pin.OUT) tck = Pin(jtagpin.tck, Pin.OUT) tdi = Pin(jtagpin.tdi, Pin.OUT) tdo = Pin(jtagpin.tdo, Pin.IN) </DeepExtract> led.on() <DeepExtract> if 1: tms.on() else: tms.off() for i in range(6): tck.off() tck.on() </DeepExtract> <DeepExtract> leave = int(ticks_ms()) + 0 send_tms(0, 1) while int(ticks_ms()) - leave < 0: send_tms(0, 1) send_tms(1, 1) </DeepExtract> <DeepExtract> send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'\x1c', 1, 0) send_tms0111() </DeepExtract> <DeepExtract> send_tms(0, 2) send_read_buf_lsb1st(bytearray([255 for i in range(64)]), 1, 0) send_tms0111() </DeepExtract> <DeepExtract> send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'\xc6', 1, 0) send_tms0111() </DeepExtract> <DeepExtract> send_tms(0, 2) send_read_buf_lsb1st(b'\x00', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 10) </DeepExtract> <DeepExtract> send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'<', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 1) </DeepExtract> status = bytearray(4) <DeepExtract> send_tms(0, 2) send_read_buf_lsb1st(status, 1, addressof(status)) send_tms0111() </DeepExtract> <DeepExtract> if unpack('<I', status)[0] & 147520 != 0: print('0x%08X & 0x%08X != 0x%08X %s' % (unpack('<I', status)[0], 147520, 0, 'FAIL status')) </DeepExtract> <DeepExtract> send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'\x0e', 1, 0) send_tms0111() </DeepExtract> <DeepExtract> send_tms(0, 2) send_read_buf_lsb1st(b'\x01', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 10) </DeepExtract> <DeepExtract> send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'<', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 1) </DeepExtract> status = bytearray(4) <DeepExtract> send_tms(0, 2) send_read_buf_lsb1st(status, 1, addressof(status)) send_tms0111() </DeepExtract> <DeepExtract> if unpack('<I', status)[0] & 45056 != 0: print('0x%08X & 0x%08X != 0x%08X %s' % (unpack('<I', status)[0], 45056, 0, 'FAIL status')) </DeepExtract>
def common_open(): global hwspi, swspi try: hwspi = SPI(2, baudrate=spi_freq, polarity=1, phase=1, bits=8, firstbit=SPI.MSB, sck=Pin(jtagpin.tck), mosi=Pin(jtagpin.tdi), miso=Pin(jtagpin.tdo)) except: hwspi = SPI(baudrate=spi_freq, polarity=1, phase=1, bits=8, firstbit=SPI.MSB, sck=Pin(jtagpin.tck), mosi=Pin(jtagpin.tdi), miso=Pin(jtagpin.tdo)) swspi = SoftSPI(baudrate=spi_freq, polarity=1, phase=1, bits=8, firstbit=SPI.MSB, sck=Pin(jtagpin.tck), mosi=Pin(jtagpin.tdi), miso=Pin(jtagpin.tdo)) hwspi.init(sck=Pin(jtagpin.tcknc)) global tck, tms, tdi, tdo, led led = Pin(jtagpin.led, Pin.OUT) tms = Pin(jtagpin.tms, Pin.OUT) tck = Pin(jtagpin.tck, Pin.OUT) tdi = Pin(jtagpin.tdi, Pin.OUT) tdo = Pin(jtagpin.tdo, Pin.IN) led.on() if 1: tms.on() else: tms.off() for i in range(6): tck.off() tck.on() leave = int(ticks_ms()) + 0 send_tms(0, 1) while int(ticks_ms()) - leave < 0: send_tms(0, 1) send_tms(1, 1) send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'\x1c', 1, 0) send_tms0111() send_tms(0, 2) send_read_buf_lsb1st(bytearray([255 for i in range(64)]), 1, 0) send_tms0111() send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'\xc6', 1, 0) send_tms0111() send_tms(0, 2) send_read_buf_lsb1st(b'\x00', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 10) send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'<', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 1) status = bytearray(4) send_tms(0, 2) send_read_buf_lsb1st(status, 1, addressof(status)) send_tms0111() if unpack('<I', status)[0] & 147520 != 0: print('0x%08X & 0x%08X != 0x%08X %s' % (unpack('<I', status)[0], 147520, 0, 'FAIL status')) send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'\x0e', 1, 0) send_tms0111() send_tms(0, 2) send_read_buf_lsb1st(b'\x01', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 10) send_tms(1, 1) send_tms(0, 2) send_read_buf_lsb1st(b'<', 1, 0) send_tms(0, 1) send_tms(1, 2) runtest_idle(2 + 1, 1) status = bytearray(4) send_tms(0, 2) send_read_buf_lsb1st(status, 1, addressof(status)) send_tms0111() if unpack('<I', status)[0] & 45056 != 0: print('0x%08X & 0x%08X != 0x%08X %s' % (unpack('<I', status)[0], 45056, 0, 'FAIL status')) </DeepExtract>
esp32ecp5
positive
@StructuringDecorators.must_be_structured def capacities_to_cycles(self, thresh_max_cap=0.98, thresh_min_cap=0.78, interval_cap=0.03): """ Get cycles to reach set threshold capacities. Args: thresh_max_cap (float): Upper bound on capacity to compute cycles at. thresh_min_cap (float): Lower bound on capacity to compute cycles at. interval_cap (float): Interval/step size. Returns: pandas.DataFrame: """ threshold_list = np.around(np.arange(thresh_max_cap, thresh_min_cap, -interval_cap), 2) counter = 0 cycles = pd.DataFrame(np.zeros((1, len(threshold_list)))) for threshold in threshold_list: <DeepExtract> if len(self.structured_summary) > n_cycles_cutoff: max_capacity = np.median(self.structured_summary.discharge_capacity.iloc[0:n_cycles_cutoff]) else: max_capacity = 1.1 if self.structured_summary.discharge_capacity.iloc[-1] / max_capacity <= threshold: cycle_life = self.structured_summary[self.structured_summary.discharge_capacity < threshold * max_capacity].index[0] else: cycle_life = len(self.structured_summary) + 1 cycles[counter] = cycle_life </DeepExtract> counter = counter + 1 cycles.columns = np.core.defchararray.add('capacity_', threshold_list.astype(str)) return cycles
@StructuringDecorators.must_be_structured def capacities_to_cycles(self, thresh_max_cap=0.98, thresh_min_cap=0.78, interval_cap=0.03): """ Get cycles to reach set threshold capacities. Args: thresh_max_cap (float): Upper bound on capacity to compute cycles at. thresh_min_cap (float): Lower bound on capacity to compute cycles at. interval_cap (float): Interval/step size. Returns: pandas.DataFrame: """ threshold_list = np.around(np.arange(thresh_max_cap, thresh_min_cap, -interval_cap), 2) counter = 0 cycles = pd.DataFrame(np.zeros((1, len(threshold_list)))) for threshold in threshold_list: if len(self.structured_summary) > n_cycles_cutoff: max_capacity = np.median(self.structured_summary.discharge_capacity.iloc[0:n_cycles_cutoff]) else: max_capacity = 1.1 if self.structured_summary.discharge_capacity.iloc[-1] / max_capacity <= threshold: cycle_life = self.structured_summary[self.structured_summary.discharge_capacity < threshold * max_capacity].index[0] else: cycle_life = len(self.structured_summary) + 1 cycles[counter] = cycle_life counter = counter + 1 cycles.columns = np.core.defchararray.add('capacity_', threshold_list.astype(str)) return cycles
beep
positive
@no_warnings def test_fast_resource(self): <DeepExtract> request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.servernetloc request = request </DeepExtract> request.opt.uri_path = ['empty'] response = self.fetch_response(request) self.assertEqual(response.code, aiocoap.CONTENT, 'Fast request did not succede') self.assertEqual(self._count_empty_acks(), 0, 'Fast resource had an empty ack')
@no_warnings def test_fast_resource(self): request = aiocoap.Message(code=aiocoap.GET) request.unresolved_remote = self.servernetloc request = request request.opt.uri_path = ['empty'] response = self.fetch_response(request) self.assertEqual(response.code, aiocoap.CONTENT, 'Fast request did not succede') self.assertEqual(self._count_empty_acks(), 0, 'Fast resource had an empty ack')
aiocoap
positive
def compute_losses(rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask): <DeepExtract> rpn_match = rpn_match.squeeze(2) anchor_class = (rpn_match == 1).long() indices = torch.nonzero(rpn_match != 0) rpn_class_logits = rpn_class_logits[indices.data[:, 0], indices.data[:, 1], :] anchor_class = anchor_class[indices.data[:, 0], indices.data[:, 1]] loss = F.cross_entropy(rpn_class_logits, anchor_class) rpn_class_loss = loss </DeepExtract> <DeepExtract> rpn_match = rpn_match.squeeze(2) indices = torch.nonzero(rpn_match == 1) rpn_pred_bbox = rpn_pred_bbox[indices.data[:, 0], indices.data[:, 1]] rpn_bbox = rpn_bbox[0, :rpn_pred_bbox.size()[0], :] loss = F.smooth_l1_loss(rpn_pred_bbox, rpn_bbox) rpn_bbox_loss = loss </DeepExtract> <DeepExtract> if target_class_ids.size(): loss = F.cross_entropy(mrcnn_class_logits, target_class_ids.long()) else: loss = Variable(torch.FloatTensor([0]), requires_grad=False) if target_class_ids.is_cuda: loss = loss.cuda() mrcnn_class_loss = loss </DeepExtract> <DeepExtract> if target_class_ids.size(): positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0] positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long() indices = torch.stack((positive_roi_ix, positive_roi_class_ids), dim=1) target_deltas = target_deltas[indices[:, 0].data, :] mrcnn_bbox = mrcnn_bbox[indices[:, 0].data, indices[:, 1].data, :] loss = F.smooth_l1_loss(mrcnn_bbox, target_deltas) else: loss = Variable(torch.FloatTensor([0]), requires_grad=False) if target_class_ids.is_cuda: loss = loss.cuda() mrcnn_bbox_loss = loss </DeepExtract> <DeepExtract> if target_class_ids.size(): positive_ix = torch.nonzero(target_class_ids > 0)[:, 0] positive_class_ids = target_class_ids[positive_ix.data].long() indices = torch.stack((positive_ix, positive_class_ids), dim=1) y_true = target_mask[indices[:, 0].data, :, :] y_pred = mrcnn_mask[indices[:, 0].data, indices[:, 1].data, :, :] weights = y_true.data + 1 loss = F.binary_cross_entropy(y_pred, y_true, weight=weights) else: loss = Variable(torch.FloatTensor([0]), requires_grad=False) if target_class_ids.is_cuda: loss = loss.cuda() mrcnn_mask_loss = loss </DeepExtract> return [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss]
def compute_losses(rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask): rpn_match = rpn_match.squeeze(2) anchor_class = (rpn_match == 1).long() indices = torch.nonzero(rpn_match != 0) rpn_class_logits = rpn_class_logits[indices.data[:, 0], indices.data[:, 1], :] anchor_class = anchor_class[indices.data[:, 0], indices.data[:, 1]] loss = F.cross_entropy(rpn_class_logits, anchor_class) rpn_class_loss = loss rpn_match = rpn_match.squeeze(2) indices = torch.nonzero(rpn_match == 1) rpn_pred_bbox = rpn_pred_bbox[indices.data[:, 0], indices.data[:, 1]] rpn_bbox = rpn_bbox[0, :rpn_pred_bbox.size()[0], :] loss = F.smooth_l1_loss(rpn_pred_bbox, rpn_bbox) rpn_bbox_loss = loss if target_class_ids.size(): loss = F.cross_entropy(mrcnn_class_logits, target_class_ids.long()) else: loss = Variable(torch.FloatTensor([0]), requires_grad=False) if target_class_ids.is_cuda: loss = loss.cuda() mrcnn_class_loss = loss if target_class_ids.size(): positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0] positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long() indices = torch.stack((positive_roi_ix, positive_roi_class_ids), dim=1) target_deltas = target_deltas[indices[:, 0].data, :] mrcnn_bbox = mrcnn_bbox[indices[:, 0].data, indices[:, 1].data, :] loss = F.smooth_l1_loss(mrcnn_bbox, target_deltas) else: loss = Variable(torch.FloatTensor([0]), requires_grad=False) if target_class_ids.is_cuda: loss = loss.cuda() mrcnn_bbox_loss = loss if target_class_ids.size(): positive_ix = torch.nonzero(target_class_ids > 0)[:, 0] positive_class_ids = target_class_ids[positive_ix.data].long() indices = torch.stack((positive_ix, positive_class_ids), dim=1) y_true = target_mask[indices[:, 0].data, :, :] y_pred = mrcnn_mask[indices[:, 0].data, indices[:, 1].data, :, :] weights = y_true.data + 1 loss = F.binary_cross_entropy(y_pred, y_true, weight=weights) else: loss = Variable(torch.FloatTensor([0]), requires_grad=False) if target_class_ids.is_cuda: loss = loss.cuda() mrcnn_mask_loss = loss return [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss]
cvpr-2018-autonomous-driving-autopilot-solution
positive
@property def state(self) -> Optional[str]: <DeepExtract> if self.is_state(): state_obj = us.states.lookup(self.fips) elif self.is_county(): state_obj = us.states.lookup(self.fips[:2]) state_obj = None </DeepExtract> if state_obj: return state_obj.abbr return None
@property def state(self) -> Optional[str]: if self.is_state(): state_obj = us.states.lookup(self.fips) elif self.is_county(): state_obj = us.states.lookup(self.fips[:2]) state_obj = None if state_obj: return state_obj.abbr return None
covid-data-model
positive
def fit_transform(self, kmat, copy=True): """Fit kernel PCA on the precomputed kernel matrix & project to lower dimensions Notes ----- - Keeps the kernel matrix intact - can be done only once on a given object, need to reinitialise if trying to run again Parameters ---------- kmat : numpy.ndarray, shape=(M,M) kerenl matrix between the observations needs to be square and real, symmetric copy : bool, optional, default=True copy the kernel matrix or overwrite it, passed to self.transform() nb. the kernel matrix will be left centered if this is False Returns ------- """ <DeepExtract> if self._fitted: raise RuntimeError('Kernel already fitted before, please reinitialise the object!') self._check_kmat(kmat, square=True) self._m = len(kmat) (self.colmean, self.mean, self.center_kmat) = self.center_square(kmat) (self._lambdas, self._alphas) = salg.eigh(self.center_kmat, eigvals=(self._m - self.n_components, self._m - 1)) self._lambdas = np.flipud(self._lambdas) self._alphas = np.fliplr(self._alphas) / np.sqrt(self._lambdas) self._fitted = True </DeepExtract> return self.transform(self.center_kmat, iscentered=True, copy=copy)
def fit_transform(self, kmat, copy=True): """Fit kernel PCA on the precomputed kernel matrix & project to lower dimensions Notes ----- - Keeps the kernel matrix intact - can be done only once on a given object, need to reinitialise if trying to run again Parameters ---------- kmat : numpy.ndarray, shape=(M,M) kerenl matrix between the observations needs to be square and real, symmetric copy : bool, optional, default=True copy the kernel matrix or overwrite it, passed to self.transform() nb. the kernel matrix will be left centered if this is False Returns ------- """ if self._fitted: raise RuntimeError('Kernel already fitted before, please reinitialise the object!') self._check_kmat(kmat, square=True) self._m = len(kmat) (self.colmean, self.mean, self.center_kmat) = self.center_square(kmat) (self._lambdas, self._alphas) = salg.eigh(self.center_kmat, eigvals=(self._m - self.n_components, self._m - 1)) self._lambdas = np.flipud(self._lambdas) self._alphas = np.fliplr(self._alphas) / np.sqrt(self._lambdas) self._fitted = True return self.transform(self.center_kmat, iscentered=True, copy=copy)
ASAP
positive
def __init__(self, mapped_matrix_int, order_num_dist, idle_driver_dist_time, idle_driver_location_mat, order_time_dist, order_price_dist, l_max, M, N, n_side, time_limit, probability=1.0 / 28, real_orders='', onoff_driver_location_mat='', global_flag='global', time_interval=10, fleet_rate=0.8, fleet_help=False): """ :param mapped_matrix_int: 2D matrix: each position is either -100 or grid id from order in real data. :param order_num_dist: 144 [{node_id1: [mu, std]}, {node_id2: [mu, std]}, ..., {node_idn: [mu, std]}] node_id1 is node the index in self.nodes :param idle_driver_dist_time: [[mu1, std1], [mu2, std2], ..., [mu144, std144]] mean and variance of idle drivers in the city at each time :param idle_driver_location_mat: 144 x num_valid_grids matrix. :param order_time_dist: [ 0.27380797,..., 0.00205766] The probs of order duration = 1 to 9 :param order_price_dist: [[10.17, 3.34], # mean and std of order's price, order durations = 10 minutes. [15.02, 6.90], # mean and std of order's price, order durations = 20 minutes. ...,] :param onoff_driver_location_mat: 144 x 504 x 2: 144 total time steps, num_valid_grids = 504. mean and std of online driver number - offline driver number onoff_driver_location_mat[t] = [[-0.625 2.92350389] <-- Corresponds to the grid in target_node_ids [ 0.09090909 1.46398452] [ 0.09090909 2.36596622] [-1.2 2.05588586]...] :param M: :param N: :param n_side: :param time_interval: :param l_max: The max-duration of an order :param fleet_help: trigger for joint order dispatching and fleet management :return: """ self.M = M self.N = N self.nodes = [Node(i) for i in range(M * N)] self.drivers = {} self.n_drivers = 0 self.n_offline_drivers = 0 self.n_fleet_drivers = 0 <DeepExtract> for (idx, current_node) in enumerate(self.nodes): if current_node is not None: (i, j) = ids_1dto2d(idx, M, N) current_node.set_neighbors(get_neighbor_list(i, j, M, N, n_side, self.nodes)) </DeepExtract> self.city_time = 0 self.n_intervals = 1440 // time_interval self.n_nodes = self.M * self.N self.n_side = n_side self.order_response_rate = 0.0 self.fake_response_rate = 0.0 self.fleet_response_rate = 0.0 self.ori_idle = 0 self.ori_fleet = 0 self.ori_order_num = 0 self.gmv = 0 self.fleet_rate = fleet_rate self.fleet_help = fleet_help self.RANDOM_SEED = RANDOM_SEED self.state_space = None self.feature_space = None self.l_max = l_max assert l_max <= M - 1 and l_max <= N - 1 assert 1 <= l_max <= 9 self.target_grids = [] self.n_valid_grids = 0 self.nodes = [None for _ in np.arange(self.M * self.N)] <DeepExtract> (row_inds, col_inds) = np.where(mapped_matrix_int >= 0) target_ids = [] for (x, y) in zip(row_inds, col_inds): node_id = ids_2dto1d(x, y, self.M, self.N) self.nodes[node_id] = Node(node_id) target_ids.append(node_id) for (x, y) in zip(row_inds, col_inds): node_id = ids_2dto1d(x, y, self.M, self.N) self.nodes[node_id].get_layers_neighbors(self.l_max, self.M, self.N, self) self.target_grids = target_ids self.n_valid_grids = len(target_ids) </DeepExtract> self.mapped_matrix_int = mapped_matrix_int <DeepExtract> for (idx, current_node) in enumerate(self.nodes): (i, j) = ids_1dto2d(idx, self.M, self.N) if current_node is not None: current_node.set_neighbors(get_neighbor_list(i, j, self.M, self.N, n_side, self.nodes)) </DeepExtract> self.order_num_dist = order_num_dist self.distribution_name = 'Poisson' self.idle_driver_dist_time = idle_driver_dist_time self.idle_driver_location_mat = idle_driver_location_mat self.order_time_dist = order_time_dist[:l_max] / np.sum(order_time_dist[:l_max]) self.order_price_dist = order_price_dist self._current_dist = None self._entropy = 0 self._global_entropy = 0 target_node_ids = [] target_grids_sorted = np.sort(mapped_matrix_int[np.where(mapped_matrix_int >= 0)]) for item in target_grids_sorted: (x, y) = np.where(mapped_matrix_int == item) target_node_ids.append(ids_2dto1d(x, y, M, N)[0]) self.target_node_ids = target_node_ids self.node_mapping = {} self.layer_neighborhood = dict() <DeepExtract> target_grid_id = self.mapped_matrix_int[np.where(self.mapped_matrix_int > 0)] for (g_id, n_id) in zip(target_grid_id, self.target_grids): self.node_mapping[g_id] = n_id </DeepExtract> <DeepExtract> vaild_node_index = [] for _row in range(self.M): for _column in range(self.N): if self.mapped_matrix_int[_row][_column] != -100: vaild_node_index.append(self.mapped_matrix_int[_row][_column]) (i, j) = ids_1dto2d(self.mapped_matrix_int[_row][_column], self.M, self.N) _layer_neighbors = dict() for _layer in range(self.l_max): _layer_neighbors[_layer] = [] for _layer in range(self.l_max): for _node in get_layers_neighbors(i, j, self.l_max, self.M, self.N)[_layer]: _index = ids_2dto1d(_node[0], _node[1], self.M, self.N) _layer_neighbors[_layer].append(_index) self.layer_neighborhood[self.mapped_matrix_int[_row][_column]] = _layer_neighbors for _layers in self.layer_neighborhood.values(): for _layer in _layers.values(): for _node in _layer: if _node not in vaild_node_index: _layer.remove(_node) </DeepExtract> self.real_orders = real_orders self.p = probability self.time_keys = [int(dt.strftime('%H%M')) for dt in datetime_range(datetime(2017, 9, 1, 0), datetime(2017, 9, 2, 0), timedelta(minutes=time_interval))] self.day_orders = [] self.onoff_driver_location_mat = onoff_driver_location_mat self.all_grids_on_number = 0 self.all_grids_off_number = 0 self.out_grid_in_orders = np.zeros((self.n_intervals, len(self.target_grids))) self.global_flag = global_flag self.weights_layers_neighbors = [1.0, np.exp(-1), np.exp(-2)] self._time_limit = time_limit
def __init__(self, mapped_matrix_int, order_num_dist, idle_driver_dist_time, idle_driver_location_mat, order_time_dist, order_price_dist, l_max, M, N, n_side, time_limit, probability=1.0 / 28, real_orders='', onoff_driver_location_mat='', global_flag='global', time_interval=10, fleet_rate=0.8, fleet_help=False): """ :param mapped_matrix_int: 2D matrix: each position is either -100 or grid id from order in real data. :param order_num_dist: 144 [{node_id1: [mu, std]}, {node_id2: [mu, std]}, ..., {node_idn: [mu, std]}] node_id1 is node the index in self.nodes :param idle_driver_dist_time: [[mu1, std1], [mu2, std2], ..., [mu144, std144]] mean and variance of idle drivers in the city at each time :param idle_driver_location_mat: 144 x num_valid_grids matrix. :param order_time_dist: [ 0.27380797,..., 0.00205766] The probs of order duration = 1 to 9 :param order_price_dist: [[10.17, 3.34], # mean and std of order's price, order durations = 10 minutes. [15.02, 6.90], # mean and std of order's price, order durations = 20 minutes. ...,] :param onoff_driver_location_mat: 144 x 504 x 2: 144 total time steps, num_valid_grids = 504. mean and std of online driver number - offline driver number onoff_driver_location_mat[t] = [[-0.625 2.92350389] <-- Corresponds to the grid in target_node_ids [ 0.09090909 1.46398452] [ 0.09090909 2.36596622] [-1.2 2.05588586]...] :param M: :param N: :param n_side: :param time_interval: :param l_max: The max-duration of an order :param fleet_help: trigger for joint order dispatching and fleet management :return: """ self.M = M self.N = N self.nodes = [Node(i) for i in range(M * N)] self.drivers = {} self.n_drivers = 0 self.n_offline_drivers = 0 self.n_fleet_drivers = 0 for (idx, current_node) in enumerate(self.nodes): if current_node is not None: (i, j) = ids_1dto2d(idx, M, N) current_node.set_neighbors(get_neighbor_list(i, j, M, N, n_side, self.nodes)) self.city_time = 0 self.n_intervals = 1440 // time_interval self.n_nodes = self.M * self.N self.n_side = n_side self.order_response_rate = 0.0 self.fake_response_rate = 0.0 self.fleet_response_rate = 0.0 self.ori_idle = 0 self.ori_fleet = 0 self.ori_order_num = 0 self.gmv = 0 self.fleet_rate = fleet_rate self.fleet_help = fleet_help self.RANDOM_SEED = RANDOM_SEED self.state_space = None self.feature_space = None self.l_max = l_max assert l_max <= M - 1 and l_max <= N - 1 assert 1 <= l_max <= 9 self.target_grids = [] self.n_valid_grids = 0 self.nodes = [None for _ in np.arange(self.M * self.N)] (row_inds, col_inds) = np.where(mapped_matrix_int >= 0) target_ids = [] for (x, y) in zip(row_inds, col_inds): node_id = ids_2dto1d(x, y, self.M, self.N) self.nodes[node_id] = Node(node_id) target_ids.append(node_id) for (x, y) in zip(row_inds, col_inds): node_id = ids_2dto1d(x, y, self.M, self.N) self.nodes[node_id].get_layers_neighbors(self.l_max, self.M, self.N, self) self.target_grids = target_ids self.n_valid_grids = len(target_ids) self.mapped_matrix_int = mapped_matrix_int for (idx, current_node) in enumerate(self.nodes): (i, j) = ids_1dto2d(idx, self.M, self.N) if current_node is not None: current_node.set_neighbors(get_neighbor_list(i, j, self.M, self.N, n_side, self.nodes)) self.order_num_dist = order_num_dist self.distribution_name = 'Poisson' self.idle_driver_dist_time = idle_driver_dist_time self.idle_driver_location_mat = idle_driver_location_mat self.order_time_dist = order_time_dist[:l_max] / np.sum(order_time_dist[:l_max]) self.order_price_dist = order_price_dist self._current_dist = None self._entropy = 0 self._global_entropy = 0 target_node_ids = [] target_grids_sorted = np.sort(mapped_matrix_int[np.where(mapped_matrix_int >= 0)]) for item in target_grids_sorted: (x, y) = np.where(mapped_matrix_int == item) target_node_ids.append(ids_2dto1d(x, y, M, N)[0]) self.target_node_ids = target_node_ids self.node_mapping = {} self.layer_neighborhood = dict() target_grid_id = self.mapped_matrix_int[np.where(self.mapped_matrix_int > 0)] for (g_id, n_id) in zip(target_grid_id, self.target_grids): self.node_mapping[g_id] = n_id vaild_node_index = [] for _row in range(self.M): for _column in range(self.N): if self.mapped_matrix_int[_row][_column] != -100: vaild_node_index.append(self.mapped_matrix_int[_row][_column]) (i, j) = ids_1dto2d(self.mapped_matrix_int[_row][_column], self.M, self.N) _layer_neighbors = dict() for _layer in range(self.l_max): _layer_neighbors[_layer] = [] for _layer in range(self.l_max): for _node in get_layers_neighbors(i, j, self.l_max, self.M, self.N)[_layer]: _index = ids_2dto1d(_node[0], _node[1], self.M, self.N) _layer_neighbors[_layer].append(_index) self.layer_neighborhood[self.mapped_matrix_int[_row][_column]] = _layer_neighbors for _layers in self.layer_neighborhood.values(): for _layer in _layers.values(): for _node in _layer: if _node not in vaild_node_index: _layer.remove(_node) self.real_orders = real_orders self.p = probability self.time_keys = [int(dt.strftime('%H%M')) for dt in datetime_range(datetime(2017, 9, 1, 0), datetime(2017, 9, 2, 0), timedelta(minutes=time_interval))] self.day_orders = [] self.onoff_driver_location_mat = onoff_driver_location_mat self.all_grids_on_number = 0 self.all_grids_off_number = 0 self.out_grid_in_orders = np.zeros((self.n_intervals, len(self.target_grids))) self.global_flag = global_flag self.weights_layers_neighbors = [1.0, np.exp(-1), np.exp(-2)] self._time_limit = time_limit
CoRide
positive