text stringlengths 0 93.6k |
|---|
logger.info("final best loss test performance (at epoch %d)" % best_epoch_loss) |
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % ( |
best_loss_test_err / test_inst, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total) |
logger.info("final best acc test performance (at epoch %d)" % best_epoch_acc) |
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % ( |
best_acc_test_err / test_inst, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total) |
def test(): |
energies_var = T.tensor4('energies', dtype=theano.config.floatX) |
targets_var = T.imatrix('targets') |
masks_var = T.matrix('masks', dtype=theano.config.floatX) |
layer_input = lasagne.layers.InputLayer([2, 2, 3, 3], input_var=energies_var) |
out = lasagne.layers.get_output(layer_input) |
loss = crf_loss(out, targets_var, masks_var) |
prediction, acc = crf_accuracy(energies_var, targets_var) |
fn = theano.function([energies_var, targets_var, masks_var], [loss, prediction, acc]) |
energies = np.array([[[[10, 15, 20], [5, 10, 15], [3, 2, 0]], [[5, 10, 1], [5, 10, 1], [5, 10, 1]]], |
[[[5, 6, 7], [2, 3, 4], [2, 1, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], dtype=np.float32) |
targets = np.array([[0, 1], [0, 2]], dtype=np.int32) |
masks = np.array([[1, 1], [1, 0]], dtype=np.float32) |
l, p, a = fn(energies, targets, masks) |
print l |
print p |
print a |
if __name__ == '__main__': |
main() |
# <FILESEP> |
import warnings |
warnings.filterwarnings("ignore") |
# from apex import amp |
import numpy as np |
import torch.utils.data as data |
from torchvision import transforms |
import os |
import torch |
import argparse |
from data_preprocessing.dataset_raf import RafDataSet |
from data_preprocessing.dataset_affectnet import Affectdataset |
from data_preprocessing.dataset_affectnet_8class import Affectdataset_8class |
from sklearn.metrics import f1_score, confusion_matrix |
from time import time |
from utils import * |
from data_preprocessing.sam import SAM |
from models.emotion_hyp import pyramid_trans_expr |
def parse_args(): |
parser = argparse.ArgumentParser() |
parser.add_argument('--dataset', type=str, default='rafdb', help='dataset') |
parser.add_argument('-c', '--checkpoint', type=str, default=None, help='Pytorch checkpoint file path') |
parser.add_argument('--batch_size', type=int, default=200, help='Batch size.') |
parser.add_argument('--val_batch_size', type=int, default=32, help='Batch size for validation.') |
parser.add_argument('--modeltype', type=str, default='large', help='small or base or large') |
parser.add_argument('--optimizer', type=str, default="adam", help='Optimizer, adam or sgd.') |
parser.add_argument('--lr', type=float, default=0.00004, help='Initial learning rate for sgd.') |
parser.add_argument('--momentum', default=0.9, type=float, help='Momentum for sgd') |
parser.add_argument('--workers', default=2, type=int, help='Number of data loading workers (default: 4)') |
parser.add_argument('--epochs', type=int, default=300, help='Total training epochs.') |
parser.add_argument('--gpu', type=str, default='0,1', help='assign multi-gpus by comma concat') |
return parser.parse_args() |
def run_training(): |
args = parse_args() |
torch.manual_seed(123) |
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) |
print("Work on GPU: ", os.environ['CUDA_VISIBLE_DEVICES']) |
data_transforms = transforms.Compose([ |
transforms.ToPILImage(), |
transforms.RandomHorizontalFlip(), |
transforms.Resize((224, 224)), |
transforms.ToTensor(), |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), |
transforms.RandomErasing(scale=(0.02, 0.1)), |
]) |
data_transforms_val = transforms.Compose([ |
transforms.ToPILImage(), |
transforms.Resize((224, 224)), |
transforms.ToTensor(), |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) |
num_classes = 7 |
if args.dataset == "rafdb": |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.