update 10 NAS algs
This commit is contained in:
		
							
								
								
									
										97
									
								
								others/GDAS/exps-cnn/cvpr-vis.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								others/GDAS/exps-cnn/cvpr-vis.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| # python ./exps-nas/cvpr-vis.py --save_dir ./snapshots/NAS-VIS/ | ||||
| import os, sys, time, glob, random, argparse | ||||
| import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| from pathlib import Path | ||||
| lib_dir = (Path(__file__).parent / '..' / 'lib').resolve() | ||||
| if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) | ||||
| from nas import DMS_V1, DMS_F1 | ||||
| from nas_rnn import DARTS_V2, GDAS | ||||
| from graphviz import Digraph | ||||
|  | ||||
| parser = argparse.ArgumentParser("Visualize the Networks") | ||||
| parser.add_argument('--save_dir',   type=str,   help='The directory to save the network plot.') | ||||
| args = parser.parse_args() | ||||
|  | ||||
|  | ||||
| def plot_cnn(genotype, filename): | ||||
|   g = Digraph( | ||||
|       format='pdf', | ||||
|       edge_attr=dict(fontsize='20', fontname="times"), | ||||
|       node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"), | ||||
|       engine='dot') | ||||
|   g.body.extend(['rankdir=LR']) | ||||
|  | ||||
|   g.node("c_{k-2}", fillcolor='darkseagreen2') | ||||
|   g.node("c_{k-1}", fillcolor='darkseagreen2') | ||||
|   assert len(genotype) % 2 == 0, '{:}'.format(genotype) | ||||
|   steps = len(genotype) // 2 | ||||
|  | ||||
|   for i in range(steps): | ||||
|     g.node(str(i), fillcolor='lightblue') | ||||
|  | ||||
|   for i in range(steps): | ||||
|     for k in [2*i, 2*i + 1]: | ||||
|       op, j, weight = genotype[k] | ||||
|       if j == 0: | ||||
|         u = "c_{k-2}" | ||||
|       elif j == 1: | ||||
|         u = "c_{k-1}" | ||||
|       else: | ||||
|         u = str(j-2) | ||||
|       v = str(i) | ||||
|       g.edge(u, v, label=op, fillcolor="gray") | ||||
|  | ||||
|   g.node("c_{k}", fillcolor='palegoldenrod') | ||||
|   for i in range(steps): | ||||
|     g.edge(str(i), "c_{k}", fillcolor="gray") | ||||
|  | ||||
|   g.render(filename, view=False) | ||||
|  | ||||
| def plot_rnn(genotype, filename): | ||||
|   g = Digraph( | ||||
|       format='pdf', | ||||
|       edge_attr=dict(fontsize='20', fontname="times"), | ||||
|       node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"), | ||||
|       engine='dot') | ||||
|   g.body.extend(['rankdir=LR']) | ||||
|  | ||||
|   g.node("x_{t}", fillcolor='darkseagreen2') | ||||
|   g.node("h_{t-1}", fillcolor='darkseagreen2') | ||||
|   g.node("0", fillcolor='lightblue') | ||||
|   g.edge("x_{t}", "0", fillcolor="gray") | ||||
|   g.edge("h_{t-1}", "0", fillcolor="gray") | ||||
|   steps = len(genotype) | ||||
|  | ||||
|   for i in range(1, steps + 1): | ||||
|     g.node(str(i), fillcolor='lightblue') | ||||
|  | ||||
|   for i, (op, j) in enumerate(genotype): | ||||
|     g.edge(str(j), str(i + 1), label=op, fillcolor="gray") | ||||
|  | ||||
|   g.node("h_{t}", fillcolor='palegoldenrod') | ||||
|   for i in range(1, steps + 1): | ||||
|     g.edge(str(i), "h_{t}", fillcolor="gray") | ||||
|  | ||||
|   g.render(filename, view=False) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   save_dir   = Path(args.save_dir) | ||||
|  | ||||
|   save_path = str(save_dir / 'DMS_V1-normal') | ||||
|   plot_cnn(DMS_V1.normal, save_path) | ||||
|   save_path = str(save_dir / 'DMS_V1-reduce') | ||||
|   plot_cnn(DMS_V1.reduce, save_path) | ||||
|   save_path = str(save_dir / 'DMS_F1-normal') | ||||
|   plot_cnn(DMS_F1.normal, save_path) | ||||
|  | ||||
|   save_path = str(save_dir / 'DARTS-V2-RNN') | ||||
|   plot_rnn(DARTS_V2.recurrent, save_path) | ||||
|  | ||||
|   save_path = str(save_dir / 'GDAS-V1-RNN') | ||||
|   plot_rnn(GDAS.recurrent, save_path) | ||||
							
								
								
									
										53
									
								
								others/GDAS/exps-cnn/evaluate.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								others/GDAS/exps-cnn/evaluate.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| # For evaluating the learned model | ||||
| import os, sys, time, glob, random, argparse | ||||
| import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
| import torchvision.datasets as dset | ||||
| import torch.backends.cudnn as cudnn | ||||
| import torchvision.transforms as transforms | ||||
| from pathlib import Path | ||||
| lib_dir = (Path(__file__).parent / '..' / 'lib').resolve() | ||||
| if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) | ||||
| from utils import AverageMeter, time_string, convert_secs2time | ||||
| from utils import print_log, obtain_accuracy | ||||
| from utils import Cutout, count_parameters_in_MB | ||||
| from nas import model_types as models | ||||
| from train_utils import main_procedure | ||||
| from train_utils_imagenet import main_procedure_imagenet | ||||
| from scheduler import load_config | ||||
|  | ||||
|  | ||||
| parser = argparse.ArgumentParser("Evaluate-CNN") | ||||
| parser.add_argument('--data_path',         type=str,   help='Path to dataset.') | ||||
| parser.add_argument('--checkpoint',        type=str,   help='Choose between Cifar10/100 and ImageNet.') | ||||
| args = parser.parse_args() | ||||
|  | ||||
| assert torch.cuda.is_available(), 'torch.cuda is not available' | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|  | ||||
|   assert os.path.isdir( args.data_path ), 'invalid data-path : {:}'.format(args.data_path) | ||||
|   assert os.path.isfile( args.checkpoint ), 'invalid checkpoint : {:}'.format(args.checkpoint) | ||||
|  | ||||
|   checkpoint = torch.load( args.checkpoint ) | ||||
|   xargs      = checkpoint['args'] | ||||
|   config     = load_config(xargs.model_config) | ||||
|   genotype   = models[xargs.arch] | ||||
|  | ||||
|   # clear GPU cache | ||||
|   torch.cuda.empty_cache() | ||||
|   if xargs.dataset == 'imagenet': | ||||
|     main_procedure_imagenet(config, args.data_path, xargs, genotype, xargs.init_channels, xargs.layers, checkpoint['state_dict'], None) | ||||
|   else: | ||||
|     main_procedure(config, xargs.dataset, args.data_path, xargs, genotype, xargs.init_channels, xargs.layers, checkpoint['state_dict'], None) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main()  | ||||
							
								
								
									
										89
									
								
								others/GDAS/exps-cnn/train_base.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										89
									
								
								others/GDAS/exps-cnn/train_base.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,89 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, time, glob, random, argparse | ||||
| import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
| import torchvision.datasets as dset | ||||
| import torch.backends.cudnn as cudnn | ||||
| import torchvision.transforms as transforms | ||||
| from pathlib import Path | ||||
| lib_dir = (Path(__file__).parent / '..' / 'lib').resolve() | ||||
| if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) | ||||
| from utils import AverageMeter, time_string, convert_secs2time | ||||
| from utils import print_log, obtain_accuracy | ||||
| from utils import Cutout, count_parameters_in_MB | ||||
| from nas import model_types as models | ||||
| from train_utils import main_procedure | ||||
| from train_utils_imagenet import main_procedure_imagenet | ||||
| from scheduler import load_config | ||||
|  | ||||
|  | ||||
| parser = argparse.ArgumentParser("Train-CNN") | ||||
| parser.add_argument('--data_path',         type=str,   help='Path to dataset') | ||||
| parser.add_argument('--dataset',           type=str,   choices=['imagenet', 'cifar10', 'cifar100'], help='Choose between Cifar10/100 and ImageNet.') | ||||
| parser.add_argument('--arch',              type=str,   choices=models.keys(), help='the searched model.') | ||||
| #  | ||||
| parser.add_argument('--grad_clip',      type=float, help='gradient clipping') | ||||
| parser.add_argument('--model_config',   type=str  , help='the model configuration') | ||||
| parser.add_argument('--init_channels',  type=int  , help='the initial number of channels') | ||||
| parser.add_argument('--layers',         type=int  , help='the number of layers.') | ||||
|  | ||||
| # log | ||||
| parser.add_argument('--workers',       type=int, default=2, help='number of data loading workers (default: 2)') | ||||
| parser.add_argument('--save_path',     type=str, help='Folder to save checkpoints and log.') | ||||
| parser.add_argument('--print_freq',    type=int, help='print frequency (default: 200)') | ||||
| parser.add_argument('--manualSeed',    type=int, help='manual seed') | ||||
| args = parser.parse_args() | ||||
|  | ||||
| if 'CUDA_VISIBLE_DEVICES' not in os.environ: print('Can not find CUDA_VISIBLE_DEVICES in os.environ') | ||||
| else                                       : print('Find CUDA_VISIBLE_DEVICES={:}'.format(os.environ['CUDA_VISIBLE_DEVICES'])) | ||||
|  | ||||
| assert torch.cuda.is_available(), 'torch.cuda is not available' | ||||
|  | ||||
|  | ||||
| if args.manualSeed is None or args.manualSeed < 0: | ||||
|   args.manualSeed = random.randint(1, 10000) | ||||
| random.seed(args.manualSeed) | ||||
| cudnn.benchmark = True | ||||
| cudnn.enabled   = True | ||||
| torch.manual_seed(args.manualSeed) | ||||
| torch.cuda.manual_seed_all(args.manualSeed) | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|  | ||||
|   # Init logger | ||||
|   #args.save_path = os.path.join(args.save_path, 'seed-{:}'.format(args.manualSeed)) | ||||
|   if not os.path.isdir(args.save_path): | ||||
|     os.makedirs(args.save_path) | ||||
|   log = open(os.path.join(args.save_path, 'seed-{:}-log.txt'.format(args.manualSeed)), 'w') | ||||
|   print_log('Save Path      : {:}'.format(args.save_path), log) | ||||
|   state = {k: v for k, v in args._get_kwargs()} | ||||
|   print_log(state, log) | ||||
|   print_log("Random Seed    : {:}".format(args.manualSeed), log) | ||||
|   print_log("Python version : {:}".format(sys.version.replace('\n', ' ')), log) | ||||
|   print_log("Torch  version : {:}".format(torch.__version__), log) | ||||
|   print_log("CUDA   version : {:}".format(torch.version.cuda), log) | ||||
|   print_log("cuDNN  version : {:}".format(cudnn.version()), log) | ||||
|   print_log("Num of GPUs    : {:}".format(torch.cuda.device_count()), log) | ||||
|   args.dataset = args.dataset.lower() | ||||
|  | ||||
|   config = load_config(args.model_config) | ||||
|   genotype = models[args.arch] | ||||
|   print_log('configuration : {:}'.format(config), log) | ||||
|   print_log('genotype      : {:}'.format(genotype), log) | ||||
|   # clear GPU cache | ||||
|   torch.cuda.empty_cache() | ||||
|   if args.dataset == 'imagenet': | ||||
|     main_procedure_imagenet(config, args.data_path, args, genotype, args.init_channels, args.layers, None, log) | ||||
|   else: | ||||
|     main_procedure(config, args.dataset, args.data_path, args, genotype, args.init_channels, args.layers, None, log) | ||||
|   log.close() | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main()  | ||||
							
								
								
									
										169
									
								
								others/GDAS/exps-cnn/train_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										169
									
								
								others/GDAS/exps-cnn/train_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,169 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, time | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| import torchvision.transforms as transforms | ||||
| from shutil import copyfile | ||||
|  | ||||
| from utils import print_log, obtain_accuracy, AverageMeter | ||||
| from utils import time_string, convert_secs2time | ||||
| from utils import count_parameters_in_MB | ||||
| from utils import Cutout | ||||
| from nas import NetworkCIFAR as Network | ||||
| from datasets import get_datasets | ||||
|  | ||||
|  | ||||
| def obtain_best(accuracies): | ||||
|   if len(accuracies) == 0: return (0, 0) | ||||
|   tops = [value for key, value in accuracies.items()] | ||||
|   s2b = sorted( tops ) | ||||
|   return s2b[-1] | ||||
|  | ||||
|  | ||||
| def main_procedure(config, dataset, data_path, args, genotype, init_channels, layers, pure_evaluate, log): | ||||
|    | ||||
|   train_data, test_data, class_num = get_datasets(dataset, data_path, config.cutout) | ||||
|  | ||||
|   print_log('-------------------------------------- main-procedure', log) | ||||
|   print_log('config        : {:}'.format(config), log) | ||||
|   print_log('genotype      : {:}'.format(genotype), log) | ||||
|   print_log('init_channels : {:}'.format(init_channels), log) | ||||
|   print_log('layers        : {:}'.format(layers), log) | ||||
|   print_log('class_num     : {:}'.format(class_num), log) | ||||
|   basemodel = Network(init_channels, class_num, layers, config.auxiliary, genotype) | ||||
|   model     = torch.nn.DataParallel(basemodel).cuda() | ||||
|  | ||||
|   total_param, aux_param = count_parameters_in_MB(basemodel), count_parameters_in_MB(basemodel.auxiliary_param()) | ||||
|   print_log('Network =>\n{:}'.format(basemodel), log) | ||||
|   print_log('Parameters : {:} - {:} = {:.3f} MB'.format(total_param, aux_param, total_param - aux_param), log) | ||||
|   print_log('config        : {:}'.format(config), log) | ||||
|   print_log('genotype      : {:}'.format(genotype), log) | ||||
|   print_log('args          : {:}'.format(args), log) | ||||
|   print_log('Train-Dataset : {:}'.format(train_data), log) | ||||
|   print_log('Train-Trans   : {:}'.format(train_data.transform), log) | ||||
|   print_log('Test--Dataset : {:}'.format(test_data ), log) | ||||
|   print_log('Test--Trans   : {:}'.format(test_data.transform ), log) | ||||
|  | ||||
|  | ||||
|   train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True, | ||||
|                          num_workers=args.workers, pin_memory=True) | ||||
|   test_loader  = torch.utils.data.DataLoader(test_data , batch_size=config.batch_size, shuffle=False, | ||||
|                          num_workers=args.workers, pin_memory=True) | ||||
|  | ||||
|   criterion = torch.nn.CrossEntropyLoss().cuda() | ||||
|    | ||||
|   optimizer = torch.optim.SGD(model.parameters(), config.LR, momentum=config.momentum, weight_decay=config.decay) | ||||
|   #optimizer = torch.optim.SGD(model.parameters(), config.LR, momentum=config.momentum, weight_decay=config.decay, nestero=True) | ||||
|   if config.type == 'cosine': | ||||
|     scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(config.epochs), eta_min=float(config.LR_MIN)) | ||||
|   else: | ||||
|     raise ValueError('Can not find the schedular type : {:}'.format(config.type)) | ||||
|  | ||||
|  | ||||
|   checkpoint_path = os.path.join(args.save_path, 'seed-{:}-checkpoint-{:}-model.pth'.format(args.manualSeed, dataset)) | ||||
|   checkpoint_best = os.path.join(args.save_path, 'seed-{:}-checkpoint-{:}-best.pth'.format(args.manualSeed, dataset)) | ||||
|   if pure_evaluate: | ||||
|     print_log('-'*20 + 'Pure Evaluation' + '-'*20, log) | ||||
|     basemodel.load_state_dict( pure_evaluate ) | ||||
|     with torch.no_grad(): | ||||
|       valid_acc1, valid_acc5, valid_los = _train(test_loader, model, criterion, optimizer, 'test', -1, config, args.print_freq, log) | ||||
|     return (valid_acc1, valid_acc5) | ||||
|   elif os.path.isfile(checkpoint_path): | ||||
|     checkpoint  = torch.load( checkpoint_path ) | ||||
|     start_epoch = checkpoint['epoch'] | ||||
|     basemodel.load_state_dict(checkpoint['state_dict']) | ||||
|     optimizer.load_state_dict(checkpoint['optimizer']) | ||||
|     scheduler.load_state_dict(checkpoint['scheduler']) | ||||
|     accuracies  = checkpoint['accuracies'] | ||||
|     print_log('Load checkpoint from {:} with start-epoch = {:}'.format(checkpoint_path, start_epoch), log) | ||||
|   else: | ||||
|     start_epoch, accuracies = 0, {} | ||||
|     print_log('Train model from scratch without pre-trained model or snapshot', log) | ||||
|  | ||||
|  | ||||
|   # Main loop | ||||
|   start_time, epoch_time = time.time(), AverageMeter() | ||||
|   for epoch in range(start_epoch, config.epochs): | ||||
|     scheduler.step() | ||||
|  | ||||
|     need_time = convert_secs2time(epoch_time.val * (config.epochs-epoch), True) | ||||
|     print_log("\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} LR={:6.4f} ~ {:6.4f}, Batch={:d}".format(time_string(), epoch, config.epochs, need_time, min(scheduler.get_lr()), max(scheduler.get_lr()), config.batch_size), log) | ||||
|  | ||||
|     basemodel.update_drop_path(config.drop_path_prob * epoch / config.epochs) | ||||
|  | ||||
|     train_acc1, train_acc5, train_los = _train(train_loader, model, criterion, optimizer, 'train', epoch, config, args.print_freq, log) | ||||
|  | ||||
|     with torch.no_grad(): | ||||
|       valid_acc1, valid_acc5, valid_los = _train(test_loader, model, criterion, optimizer, 'test', epoch, config, args.print_freq, log) | ||||
|     accuracies[epoch] = (valid_acc1, valid_acc5) | ||||
|  | ||||
|     torch.save({'epoch'     : epoch + 1, | ||||
|                 'args'      : deepcopy(args), | ||||
|                 'state_dict': basemodel.state_dict(), | ||||
|                 'optimizer' : optimizer.state_dict(), | ||||
|                 'scheduler' : scheduler.state_dict(), | ||||
|                 'accuracies': accuracies}, | ||||
|                 checkpoint_path) | ||||
|     best_acc = obtain_best( accuracies ) | ||||
|     if accuracies[epoch] == best_acc: copyfile(checkpoint_path, checkpoint_best) | ||||
|     print_log('----> Best Accuracy : Acc@1={:.2f}, Acc@5={:.2f}, Error@1={:.2f}, Error@5={:.2f}'.format(best_acc[0], best_acc[1], 100-best_acc[0], 100-best_acc[1]), log) | ||||
|     print_log('----> Save into {:}'.format(checkpoint_path), log) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     epoch_time.update(time.time() - start_time) | ||||
|     start_time = time.time() | ||||
|   return obtain_best( accuracies ) | ||||
|  | ||||
|  | ||||
| def _train(xloader, model, criterion, optimizer, mode, epoch, config, print_freq, log): | ||||
|   data_time, batch_time, losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   if mode == 'train': | ||||
|     model.train() | ||||
|   elif mode == 'test': | ||||
|     model.eval() | ||||
|   else: raise ValueError("The mode is not right : {:}".format(mode)) | ||||
|    | ||||
|   end = time.time() | ||||
|   for i, (inputs, targets) in enumerate(xloader): | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|     # calculate prediction and loss | ||||
|     targets = targets.cuda(non_blocking=True) | ||||
|  | ||||
|     if mode == 'train': optimizer.zero_grad() | ||||
|  | ||||
|     if config.auxiliary and model.training: | ||||
|       logits, logits_aux = model(inputs) | ||||
|     else: | ||||
|       logits = model(inputs) | ||||
|  | ||||
|     loss = criterion(logits, targets) | ||||
|     if config.auxiliary and model.training: | ||||
|       loss_aux = criterion(logits_aux, targets) | ||||
|       loss += config.auxiliary_weight * loss_aux | ||||
|      | ||||
|     if mode == 'train': | ||||
|       loss.backward() | ||||
|       if config.grad_clip > 0: | ||||
|         torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) | ||||
|       optimizer.step() | ||||
|     # record | ||||
|     prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|     losses.update(loss.item(),  inputs.size(0)) | ||||
|     top1.update  (prec1.item(), inputs.size(0)) | ||||
|     top5.update  (prec5.item(), inputs.size(0)) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|  | ||||
|     if i % print_freq == 0 or (i+1) == len(xloader): | ||||
|       Sstr = ' {:5s}'.format(mode) + time_string() + ' Epoch: [{:03d}][{:03d}/{:03d}]'.format(epoch, i, len(xloader)) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5) | ||||
|       print_log(Sstr + ' ' + Tstr + ' ' + Lstr, log) | ||||
|  | ||||
|   print_log ('{TIME:} **{mode:}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(TIME=time_string(), mode=mode, top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, loss=losses.avg), log) | ||||
|   return top1.avg, top5.avg, losses.avg | ||||
							
								
								
									
										192
									
								
								others/GDAS/exps-cnn/train_utils_imagenet.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										192
									
								
								others/GDAS/exps-cnn/train_utils_imagenet.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,192 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, time | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| import torchvision.transforms as transforms | ||||
| from shutil import copyfile | ||||
|  | ||||
| from utils import print_log, obtain_accuracy, AverageMeter | ||||
| from utils import time_string, convert_secs2time | ||||
| from utils import count_parameters_in_MB | ||||
| from utils import print_FLOPs | ||||
| from utils import Cutout | ||||
| from nas import NetworkImageNet as Network | ||||
| from datasets import get_datasets | ||||
|  | ||||
|  | ||||
| def obtain_best(accuracies): | ||||
|   if len(accuracies) == 0: return (0, 0) | ||||
|   tops = [value for key, value in accuracies.items()] | ||||
|   s2b = sorted( tops ) | ||||
|   return s2b[-1] | ||||
|  | ||||
|  | ||||
| class CrossEntropyLabelSmooth(nn.Module): | ||||
|  | ||||
|   def __init__(self, num_classes, epsilon): | ||||
|     super(CrossEntropyLabelSmooth, self).__init__() | ||||
|     self.num_classes = num_classes | ||||
|     self.epsilon = epsilon | ||||
|     self.logsoftmax = nn.LogSoftmax(dim=1) | ||||
|  | ||||
|   def forward(self, inputs, targets): | ||||
|     log_probs = self.logsoftmax(inputs) | ||||
|     targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) | ||||
|     targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes | ||||
|     loss = (-targets * log_probs).mean(0).sum() | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| def main_procedure_imagenet(config, data_path, args, genotype, init_channels, layers, pure_evaluate, log): | ||||
|    | ||||
|   # training data and testing data | ||||
|   train_data, valid_data, class_num = get_datasets('imagenet-1k', data_path, -1) | ||||
|  | ||||
|   train_queue = torch.utils.data.DataLoader( | ||||
|     train_data, batch_size=config.batch_size, shuffle= True, pin_memory=True, num_workers=args.workers) | ||||
|  | ||||
|   valid_queue = torch.utils.data.DataLoader( | ||||
|     valid_data, batch_size=config.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers) | ||||
|  | ||||
|   print_log('-------------------------------------- main-procedure', log) | ||||
|   print_log('config        : {:}'.format(config), log) | ||||
|   print_log('genotype      : {:}'.format(genotype), log) | ||||
|   print_log('init_channels : {:}'.format(init_channels), log) | ||||
|   print_log('layers        : {:}'.format(layers), log) | ||||
|   print_log('class_num     : {:}'.format(class_num), log) | ||||
|   basemodel = Network(init_channels, class_num, layers, config.auxiliary, genotype) | ||||
|   model     = torch.nn.DataParallel(basemodel).cuda() | ||||
|  | ||||
|   total_param, aux_param = count_parameters_in_MB(basemodel), count_parameters_in_MB(basemodel.auxiliary_param()) | ||||
|   print_log('Network =>\n{:}'.format(basemodel), log) | ||||
|   print_FLOPs(basemodel, (1,3,224,224), [print_log, log]) | ||||
|   print_log('Parameters : {:} - {:} = {:.3f} MB'.format(total_param, aux_param, total_param - aux_param), log) | ||||
|   print_log('config        : {:}'.format(config), log) | ||||
|   print_log('genotype      : {:}'.format(genotype), log) | ||||
|   print_log('Train-Dataset : {:}'.format(train_data), log) | ||||
|   print_log('Valid--Dataset : {:}'.format(valid_data), log) | ||||
|   print_log('Args          : {:}'.format(args), log) | ||||
|  | ||||
|  | ||||
|   criterion = torch.nn.CrossEntropyLoss().cuda() | ||||
|   criterion_smooth = CrossEntropyLabelSmooth(class_num, config.label_smooth).cuda() | ||||
|  | ||||
|  | ||||
|   optimizer = torch.optim.SGD(model.parameters(), config.LR, momentum=config.momentum, weight_decay=config.decay, nesterov=True) | ||||
|   if config.type == 'cosine': | ||||
|     scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(config.epochs)) | ||||
|   elif config.type == 'steplr': | ||||
|     scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.decay_period, gamma=config.gamma) | ||||
|   else: | ||||
|     raise ValueError('Can not find the schedular type : {:}'.format(config.type)) | ||||
|  | ||||
|  | ||||
|   checkpoint_path = os.path.join(args.save_path, 'seed-{:}-checkpoint-imagenet-model.pth'.format(args.manualSeed)) | ||||
|   checkpoint_best = os.path.join(args.save_path, 'seed-{:}-checkpoint-imagenet-best.pth'.format(args.manualSeed)) | ||||
|  | ||||
|   if pure_evaluate: | ||||
|     print_log('-'*20 + 'Pure Evaluation' + '-'*20, log) | ||||
|     basemodel.load_state_dict( pure_evaluate ) | ||||
|     with torch.no_grad(): | ||||
|       valid_acc1, valid_acc5, valid_los = _train(valid_queue, model, criterion,           None, 'test' , -1, config, args.print_freq, log) | ||||
|     return (valid_acc1, valid_acc5) | ||||
|   elif os.path.isfile(checkpoint_path): | ||||
|     checkpoint  = torch.load( checkpoint_path ) | ||||
|     start_epoch = checkpoint['epoch'] | ||||
|     basemodel.load_state_dict(checkpoint['state_dict']) | ||||
|     optimizer.load_state_dict(checkpoint['optimizer']) | ||||
|     scheduler.load_state_dict(checkpoint['scheduler']) | ||||
|     accuracies  = checkpoint['accuracies'] | ||||
|     print_log('Load checkpoint from {:} with start-epoch = {:}'.format(checkpoint_path, start_epoch), log) | ||||
|   else: | ||||
|     start_epoch, accuracies = 0, {} | ||||
|     print_log('Train model from scratch without pre-trained model or snapshot', log) | ||||
|  | ||||
|  | ||||
|   # Main loop | ||||
|   start_time, epoch_time = time.time(), AverageMeter() | ||||
|   for epoch in range(start_epoch, config.epochs): | ||||
|     scheduler.step() | ||||
|  | ||||
|     basemodel.update_drop_path(config.drop_path_prob * epoch / config.epochs) | ||||
|  | ||||
|     need_time = convert_secs2time(epoch_time.val * (config.epochs-epoch), True) | ||||
|     print_log("\n==>>{:s} [Epoch={:03d}/{:03d}] {:s} LR={:6.4f} ~ {:6.4f}, Batch={:d}, Drop-Path-Prob={:}".format(time_string(), epoch, config.epochs, need_time, min(scheduler.get_lr()), max(scheduler.get_lr()), config.batch_size, basemodel.get_drop_path()), log) | ||||
|  | ||||
|     train_acc1, train_acc5, train_los = _train(train_queue, model, criterion_smooth, optimizer, 'train', epoch, config, args.print_freq, log) | ||||
|  | ||||
|     with torch.no_grad(): | ||||
|       valid_acc1, valid_acc5, valid_los = _train(valid_queue, model, criterion,           None, 'test' , epoch, config, args.print_freq, log) | ||||
|     accuracies[epoch] = (valid_acc1, valid_acc5) | ||||
|  | ||||
|     torch.save({'epoch'     : epoch + 1, | ||||
|                 'args'      : deepcopy(args), | ||||
|                 'state_dict': basemodel.state_dict(), | ||||
|                 'optimizer' : optimizer.state_dict(), | ||||
|                 'scheduler' : scheduler.state_dict(), | ||||
|                 'accuracies': accuracies}, | ||||
|                 checkpoint_path) | ||||
|     best_acc = obtain_best( accuracies ) | ||||
|     if accuracies[epoch] == best_acc: copyfile(checkpoint_path, checkpoint_best) | ||||
|     print_log('----> Best Accuracy : Acc@1={:.2f}, Acc@5={:.2f}, Error@1={:.2f}, Error@5={:.2f}'.format(best_acc[0], best_acc[1], 100-best_acc[0], 100-best_acc[1]), log) | ||||
|     print_log('----> Save into {:}'.format(checkpoint_path), log) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     epoch_time.update(time.time() - start_time) | ||||
|     start_time = time.time() | ||||
|   return obtain_best( accuracies ) | ||||
|  | ||||
|  | ||||
| def _train(xloader, model, criterion, optimizer, mode, epoch, config, print_freq, log): | ||||
|   data_time, batch_time, losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   if mode == 'train': | ||||
|     model.train() | ||||
|   elif mode == 'test': | ||||
|     model.eval() | ||||
|   else: raise ValueError("The mode is not right : {:}".format(mode)) | ||||
|    | ||||
|   end = time.time() | ||||
|   for i, (inputs, targets) in enumerate(xloader): | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|     # calculate prediction and loss | ||||
|     targets = targets.cuda(non_blocking=True) | ||||
|  | ||||
|     if mode == 'train': optimizer.zero_grad() | ||||
|  | ||||
|     if config.auxiliary and model.training: | ||||
|       logits, logits_aux = model(inputs) | ||||
|     else: | ||||
|       logits = model(inputs) | ||||
|  | ||||
|     loss = criterion(logits, targets) | ||||
|     if config.auxiliary and model.training: | ||||
|       loss_aux = criterion(logits_aux, targets) | ||||
|       loss += config.auxiliary_weight * loss_aux | ||||
|      | ||||
|     if mode == 'train': | ||||
|       loss.backward() | ||||
|       if config.grad_clip > 0: | ||||
|         torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) | ||||
|       optimizer.step() | ||||
|     # record | ||||
|     prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|     losses.update(loss.item(),  inputs.size(0)) | ||||
|     top1.update  (prec1.item(), inputs.size(0)) | ||||
|     top5.update  (prec5.item(), inputs.size(0)) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|  | ||||
|     if i % print_freq == 0 or (i+1) == len(xloader): | ||||
|       Sstr = ' {:5s}'.format(mode) + time_string() + ' Epoch: [{:03d}][{:03d}/{:03d}]'.format(epoch, i, len(xloader)) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5) | ||||
|       print_log(Sstr + ' ' + Tstr + ' ' + Lstr, log) | ||||
|  | ||||
|   print_log ('{TIME:} **{mode:}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(TIME=time_string(), mode=mode, top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, loss=losses.avg), log) | ||||
|   return top1.avg, top5.avg, losses.avg | ||||
							
								
								
									
										69
									
								
								others/GDAS/exps-cnn/vis-arch.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										69
									
								
								others/GDAS/exps-cnn/vis-arch.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,69 @@ | ||||
| import os, sys, time, glob, random, argparse | ||||
| import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| from pathlib import Path | ||||
| lib_dir = (Path(__file__).parent / '..' / 'lib').resolve() | ||||
| if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) | ||||
| from graphviz import Digraph | ||||
|  | ||||
| parser = argparse.ArgumentParser("Visualize the Networks") | ||||
| parser.add_argument('--checkpoint', type=str,   help='The path to the checkpoint.') | ||||
| parser.add_argument('--save_dir',   type=str,   help='The directory to save the network plot.') | ||||
| args = parser.parse_args() | ||||
|  | ||||
|  | ||||
| def plot(genotype, filename): | ||||
|   g = Digraph( | ||||
|       format='pdf', | ||||
|       edge_attr=dict(fontsize='20', fontname="times"), | ||||
|       node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"), | ||||
|       engine='dot') | ||||
|   g.body.extend(['rankdir=LR']) | ||||
|  | ||||
|   g.node("c_{k-2}", fillcolor='darkseagreen2') | ||||
|   g.node("c_{k-1}", fillcolor='darkseagreen2') | ||||
|   assert len(genotype) % 2 == 0 | ||||
|   steps = len(genotype) // 2 | ||||
|  | ||||
|   for i in range(steps): | ||||
|     g.node(str(i), fillcolor='lightblue') | ||||
|  | ||||
|   for i in range(steps): | ||||
|     for k in [2*i, 2*i + 1]: | ||||
|       op, j, weight = genotype[k] | ||||
|       if j == 0: | ||||
|         u = "c_{k-2}" | ||||
|       elif j == 1: | ||||
|         u = "c_{k-1}" | ||||
|       else: | ||||
|         u = str(j-2) | ||||
|       v = str(i) | ||||
|       g.edge(u, v, label=op, fillcolor="gray") | ||||
|  | ||||
|   g.node("c_{k}", fillcolor='palegoldenrod') | ||||
|   for i in range(steps): | ||||
|     g.edge(str(i), "c_{k}", fillcolor="gray") | ||||
|  | ||||
|   g.render(filename, view=False) | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   checkpoint = args.checkpoint | ||||
|   assert os.path.isfile(checkpoint), 'Invalid path for checkpoint : {:}'.format(checkpoint) | ||||
|   checkpoint = torch.load( checkpoint, map_location='cpu' ) | ||||
|   genotypes  = checkpoint['genotypes'] | ||||
|   save_dir   = Path(args.save_dir) | ||||
|   subs       = ['normal', 'reduce'] | ||||
|   for sub in subs: | ||||
|     if not (save_dir / sub).exists(): | ||||
|       (save_dir / sub).mkdir(parents=True, exist_ok=True) | ||||
|  | ||||
|   for key, network in genotypes.items(): | ||||
|     save_path = str(save_dir / 'normal' / 'epoch-{:03d}'.format( int(key) )) | ||||
|     print('save into {:}'.format(save_path)) | ||||
|     plot(network.normal, save_path) | ||||
|  | ||||
|     save_path = str(save_dir / 'reduce' / 'epoch-{:03d}'.format( int(key) )) | ||||
|     print('save into {:}'.format(save_path)) | ||||
|     plot(network.reduce, save_path) | ||||
		Reference in New Issue
	
	Block a user