Update TAS abd FBV2 for NAS-Bench
This commit is contained in:
		| @@ -338,8 +338,7 @@ def main(xargs): | ||||
|   else: | ||||
|     extra_info = {'class_num': class_num, 'xshape': xshape, 'epochs': xargs.overwite_epochs} | ||||
|   config = load_config(xargs.config_path, extra_info, logger) | ||||
|   search_loader, train_loader, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', \ | ||||
|                                         (config.batch_size, config.test_batch_size), xargs.workers) | ||||
|   search_loader, train_loader, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', (config.batch_size, config.test_batch_size), xargs.workers) | ||||
|   logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) | ||||
|   logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config)) | ||||
|  | ||||
|   | ||||
							
								
								
									
										334
									
								
								exps/algos-v2/search-size.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										334
									
								
								exps/algos-v2/search-size.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,334 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020 # | ||||
| ###################################################################################### | ||||
| # python ./exps/algos-v2/search-size.py --dataset cifar10  --data_path $TORCH_HOME/cifar.python --algo tas --rand_seed 777 | ||||
| # python ./exps/algos-v2/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo tas --rand_seed 777 | ||||
| # python ./exps/algos-v2/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo tas --rand_seed 777 | ||||
| #### | ||||
| # python ./exps/algos-v2/search-size.py --dataset cifar10  --data_path $TORCH_HOME/cifar.python --algo fbv2 --rand_seed 777 | ||||
| # python ./exps/algos-v2/search-size.py --dataset cifar100 --data_path $TORCH_HOME/cifar.python --algo fbv2 --rand_seed 777 | ||||
| # python ./exps/algos-v2/search-size.py --dataset ImageNet16-120 --data_path $TORCH_HOME/cifar.python/ImageNet16 --algo fbv2 --rand_seed 777 | ||||
| ###################################################################################### | ||||
| import os, sys, time, random, argparse | ||||
| import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from pathlib import Path | ||||
| lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve() | ||||
| if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) | ||||
| from config_utils import load_config, dict2config, configure2str | ||||
| from datasets     import get_datasets, get_nas_search_loaders | ||||
| from procedures   import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler | ||||
| from utils        import count_parameters_in_MB, obtain_accuracy | ||||
| from log_utils    import AverageMeter, time_string, convert_secs2time | ||||
| from models       import get_cell_based_tiny_net, get_search_spaces | ||||
| from nas_201_api  import NASBench301API as API | ||||
|  | ||||
|  | ||||
| def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, logger): | ||||
|   data_time, batch_time = AverageMeter(), AverageMeter() | ||||
|   base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   end = time.time() | ||||
|   network.train() | ||||
|   for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): | ||||
|     scheduler.update(None, 1.0 * step / len(xloader)) | ||||
|     base_inputs = base_inputs.cuda(non_blocking=True) | ||||
|     arch_inputs = arch_inputs.cuda(non_blocking=True) | ||||
|     base_targets = base_targets.cuda(non_blocking=True) | ||||
|     arch_targets = arch_targets.cuda(non_blocking=True) | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|      | ||||
|     # Update the weights | ||||
|     network.zero_grad() | ||||
|     _, logits = network(base_inputs) | ||||
|     base_loss = criterion(logits, base_targets) | ||||
|     base_loss.backward() | ||||
|     w_optimizer.step() | ||||
|     # record | ||||
|     base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) | ||||
|     base_losses.update(base_loss.item(),  base_inputs.size(0)) | ||||
|     base_top1.update  (base_prec1.item(), base_inputs.size(0)) | ||||
|     base_top5.update  (base_prec5.item(), base_inputs.size(0)) | ||||
|  | ||||
|     # update the architecture-weight | ||||
|     network.zero_grad() | ||||
|     _, logits = network(arch_inputs) | ||||
|     arch_loss = criterion(logits, arch_targets) | ||||
|     arch_loss.backward() | ||||
|     a_optimizer.step() | ||||
|     # record | ||||
|     arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) | ||||
|     arch_losses.update(arch_loss.item(),  arch_inputs.size(0)) | ||||
|     arch_top1.update  (arch_prec1.item(), arch_inputs.size(0)) | ||||
|     arch_top5.update  (arch_prec5.item(), arch_inputs.size(0)) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|  | ||||
|     if step % print_freq == 0 or step + 1 == len(xloader): | ||||
|       Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader)) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5) | ||||
|       Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5) | ||||
|       logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) | ||||
|   return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg | ||||
|  | ||||
|  | ||||
| def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger): | ||||
|   # config. (containing some necessary arg) | ||||
|   #   baseline: The baseline score (i.e. average val_acc) from the previous epoch | ||||
|   data_time, batch_time = AverageMeter(), AverageMeter() | ||||
|   GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time() | ||||
|    | ||||
|   controller_num_aggregate = 20 | ||||
|   controller_train_steps = 50 | ||||
|   controller_bl_dec = 0.99 | ||||
|   controller_entropy_weight = 0.0001 | ||||
|  | ||||
|   network.eval() | ||||
|   network.controller.train() | ||||
|   network.controller.zero_grad() | ||||
|   loader_iter = iter(xloader) | ||||
|   for step in range(controller_train_steps * controller_num_aggregate): | ||||
|     try: | ||||
|       inputs, targets = next(loader_iter) | ||||
|     except: | ||||
|       loader_iter = iter(xloader) | ||||
|       inputs, targets = next(loader_iter) | ||||
|     inputs  = inputs.cuda(non_blocking=True) | ||||
|     targets = targets.cuda(non_blocking=True) | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - xend) | ||||
|      | ||||
|     log_prob, entropy, sampled_arch = network.controller() | ||||
|     with torch.no_grad(): | ||||
|       network.set_cal_mode('dynamic', sampled_arch) | ||||
|       _, logits = network(inputs) | ||||
|       val_top1, val_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|       val_top1  = val_top1.view(-1) / 100 | ||||
|     reward = val_top1 + controller_entropy_weight * entropy | ||||
|     if prev_baseline is None: | ||||
|       baseline = val_top1 | ||||
|     else: | ||||
|       baseline = prev_baseline - (1 - controller_bl_dec) * (prev_baseline - reward) | ||||
|     | ||||
|     loss = -1 * log_prob * (reward - baseline) | ||||
|      | ||||
|     # account | ||||
|     RewardMeter.update(reward.item()) | ||||
|     BaselineMeter.update(baseline.item()) | ||||
|     ValAccMeter.update(val_top1.item()*100) | ||||
|     LossMeter.update(loss.item()) | ||||
|     EntropyMeter.update(entropy.item()) | ||||
|    | ||||
|     # Average gradient over controller_num_aggregate samples | ||||
|     loss = loss / controller_num_aggregate | ||||
|     loss.backward(retain_graph=True) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - xend) | ||||
|     xend = time.time() | ||||
|     if (step+1) % controller_num_aggregate == 0: | ||||
|       grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0) | ||||
|       GradnormMeter.update(grad_norm) | ||||
|       optimizer.step() | ||||
|       network.controller.zero_grad() | ||||
|  | ||||
|     if step % print_freq == 0: | ||||
|       Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, controller_train_steps * controller_num_aggregate) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter) | ||||
|       Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg) | ||||
|       logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Estr) | ||||
|  | ||||
|   return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg | ||||
|  | ||||
|  | ||||
| def valid_func(xloader, network, criterion, logger): | ||||
|   data_time, batch_time = AverageMeter(), AverageMeter() | ||||
|   arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   end = time.time() | ||||
|   with torch.no_grad(): | ||||
|     network.eval() | ||||
|     for step, (arch_inputs, arch_targets) in enumerate(xloader): | ||||
|       arch_targets = arch_targets.cuda(non_blocking=True) | ||||
|       # measure data loading time | ||||
|       data_time.update(time.time() - end) | ||||
|       # prediction | ||||
|       _, logits = network(arch_inputs.cuda(non_blocking=True)) | ||||
|       arch_loss = criterion(logits, arch_targets) | ||||
|       # record | ||||
|       arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5)) | ||||
|       arch_losses.update(arch_loss.item(),  arch_inputs.size(0)) | ||||
|       arch_top1.update  (arch_prec1.item(), arch_inputs.size(0)) | ||||
|       arch_top5.update  (arch_prec5.item(), arch_inputs.size(0)) | ||||
|       # measure elapsed time | ||||
|       batch_time.update(time.time() - end) | ||||
|       end = time.time() | ||||
|   return arch_losses.avg, arch_top1.avg, arch_top5.avg | ||||
|  | ||||
|  | ||||
| def main(xargs): | ||||
|   assert torch.cuda.is_available(), 'CUDA is not available.' | ||||
|   torch.backends.cudnn.enabled   = True | ||||
|   torch.backends.cudnn.benchmark = False | ||||
|   torch.backends.cudnn.deterministic = True | ||||
|   torch.set_num_threads( xargs.workers ) | ||||
|   prepare_seed(xargs.rand_seed) | ||||
|   logger = prepare_logger(args) | ||||
|  | ||||
|   train_data, valid_data, xshape, class_num = get_datasets(xargs.dataset, xargs.data_path, -1) | ||||
|   if xargs.overwite_epochs is None: | ||||
|     extra_info = {'class_num': class_num, 'xshape': xshape} | ||||
|   else: | ||||
|     extra_info = {'class_num': class_num, 'xshape': xshape, 'epochs': xargs.overwite_epochs} | ||||
|   config = load_config(xargs.config_path, extra_info, logger) | ||||
|   search_loader, train_loader, valid_loader = get_nas_search_loaders(train_data, valid_data, xargs.dataset, 'configs/nas-benchmark/', (config.batch_size, config.test_batch_size), xargs.workers) | ||||
|   logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) | ||||
|   logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config)) | ||||
|  | ||||
|   search_space = get_search_spaces(xargs.search_space, 'nas-bench-301') | ||||
|    | ||||
|   model_config = dict2config( | ||||
|       dict(name='generic', super_type='search-shape', candidate_Cs=search_space['candidates'], max_num_Cs=search_space['numbers'], num_classes=class_num, | ||||
|            genotype=args.genotype, affine=bool(xargs.affine), track_running_stats=bool(xargs.track_running_stats)), None) | ||||
|   logger.log('search space : {:}'.format(search_space)) | ||||
|   logger.log('model config : {:}'.format(model_config)) | ||||
|   search_model = get_cell_based_tiny_net(model_config) | ||||
|   search_model.set_algo(xargs.algo) | ||||
|   logger.log('{:}'.format(search_model)) | ||||
|  | ||||
|   w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.weights, config) | ||||
|   a_optimizer = torch.optim.Adam(search_model.alphas, lr=xargs.arch_learning_rate, betas=(0.5, 0.999), weight_decay=xargs.arch_weight_decay, eps=xargs.arch_eps) | ||||
|   logger.log('w-optimizer : {:}'.format(w_optimizer)) | ||||
|   logger.log('a-optimizer : {:}'.format(a_optimizer)) | ||||
|   logger.log('w-scheduler : {:}'.format(w_scheduler)) | ||||
|   logger.log('criterion   : {:}'.format(criterion)) | ||||
|   params = count_parameters_in_MB(search_model) | ||||
|   logger.log('The parameters of the search model = {:.2f} MB'.format(params)) | ||||
|   logger.log('search-space : {:}'.format(search_space)) | ||||
|   try: | ||||
|     api = API(verbose=False) | ||||
|   except: | ||||
|     api = None | ||||
|   logger.log('{:} create API = {:} done'.format(time_string(), api)) | ||||
|  | ||||
|   last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') | ||||
|   network, criterion = search_model.cuda(), criterion.cuda()  # use a single GPU | ||||
|  | ||||
|   last_info, model_base_path, model_best_path = logger.path('info'), logger.path('model'), logger.path('best') | ||||
|  | ||||
|   if last_info.exists(): # automatically resume from previous checkpoint | ||||
|     logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info)) | ||||
|     last_info   = torch.load(last_info) | ||||
|     start_epoch = last_info['epoch'] | ||||
|     checkpoint  = torch.load(last_info['last_checkpoint']) | ||||
|     genotypes   = checkpoint['genotypes'] | ||||
|     valid_accuracies = checkpoint['valid_accuracies'] | ||||
|     search_model.load_state_dict( checkpoint['search_model'] ) | ||||
|     w_scheduler.load_state_dict ( checkpoint['w_scheduler'] ) | ||||
|     w_optimizer.load_state_dict ( checkpoint['w_optimizer'] ) | ||||
|     a_optimizer.load_state_dict ( checkpoint['a_optimizer'] ) | ||||
|     logger.log("=> loading checkpoint of the last-info '{:}' start with {:}-th epoch.".format(last_info, start_epoch)) | ||||
|   else: | ||||
|     logger.log("=> do not find the last-info file : {:}".format(last_info)) | ||||
|     start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, {-1: network.random} | ||||
|  | ||||
|   # start training | ||||
|   start_time, search_time, epoch_time, total_epoch = time.time(), AverageMeter(), AverageMeter(), config.epochs + config.warmup | ||||
|   for epoch in range(start_epoch, total_epoch): | ||||
|     w_scheduler.update(epoch, 0.0) | ||||
|     need_time = 'Time Left: {:}'.format(convert_secs2time(epoch_time.val * (total_epoch-epoch), True)) | ||||
|     epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) | ||||
|     logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()))) | ||||
|  | ||||
|     if xargs.algo == 'fbv2' or xargs.algo == 'tas': | ||||
|       network.set_tau( xargs.tau_max - (xargs.tau_max-xargs.tau_min) * epoch / (total_epoch-1) ) | ||||
|       logger.log('[RESET tau as : {:}]'.format(network.tau)) | ||||
|     search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \ | ||||
|                 = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger) | ||||
|     search_time.update(time.time() - start_time) | ||||
|     logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5, search_time.sum)) | ||||
|     logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5)) | ||||
|  | ||||
|     genotype = network.genotype | ||||
|     logger.log('[{:}] - [get_best_arch] : {:}'.format(epoch_str, genotype)) | ||||
|     valid_a_loss , valid_a_top1 , valid_a_top5  = valid_func(valid_loader, network, criterion, logger) | ||||
|     logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype)) | ||||
|     valid_accuracies[epoch] = valid_a_top1 | ||||
|  | ||||
|     genotypes[epoch] = genotype | ||||
|     logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch])) | ||||
|     # save checkpoint | ||||
|     save_path = save_checkpoint({'epoch' : epoch + 1, | ||||
|                 'args'  : deepcopy(xargs), | ||||
|                 'search_model': search_model.state_dict(), | ||||
|                 'w_optimizer' : w_optimizer.state_dict(), | ||||
|                 'a_optimizer' : a_optimizer.state_dict(), | ||||
|                 'w_scheduler' : w_scheduler.state_dict(), | ||||
|                 'genotypes'   : genotypes, | ||||
|                 'valid_accuracies' : valid_accuracies}, | ||||
|                 model_base_path, logger) | ||||
|     last_info = save_checkpoint({ | ||||
|           'epoch': epoch + 1, | ||||
|           'args' : deepcopy(args), | ||||
|           'last_checkpoint': save_path, | ||||
|           }, logger.path('info'), logger) | ||||
|     with torch.no_grad(): | ||||
|       logger.log('{:}'.format(search_model.show_alphas())) | ||||
|     if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch], '90'))) | ||||
|     # measure elapsed time | ||||
|     epoch_time.update(time.time() - start_time) | ||||
|     start_time = time.time() | ||||
|  | ||||
|   # the final post procedure : count the time | ||||
|   start_time = time.time() | ||||
|   genotype = network.genotype | ||||
|   search_time.update(time.time() - start_time) | ||||
|  | ||||
|   valid_a_loss, valid_a_top1, valid_a_top5 = valid_func(valid_loader, network, criterion, logger) | ||||
|   logger.log('Last : the gentotype is : {:}, with the validation accuracy of {:.3f}%.'.format(genotype, valid_a_top1)) | ||||
|  | ||||
|   logger.log('\n' + '-'*100) | ||||
|   # check the performance from the architecture dataset | ||||
|   logger.log('[{:}] run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(xargs.algo, total_epoch, search_time.sum, genotype)) | ||||
|   if api is not None: logger.log('{:}'.format(api.query_by_arch(genotype, '90') )) | ||||
|   logger.close() | ||||
|    | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   parser = argparse.ArgumentParser("Weight sharing NAS methods to search for cells.") | ||||
|   parser.add_argument('--data_path'   ,       type=str,   help='Path to dataset') | ||||
|   parser.add_argument('--dataset'     ,       type=str,   choices=['cifar10', 'cifar100', 'ImageNet16-120'], help='Choose between Cifar10/100 and ImageNet-16.') | ||||
|   parser.add_argument('--search_space',       type=str,   default='sss', choices=['sss'], help='The search space name.') | ||||
|   parser.add_argument('--algo'        ,       type=str,   choices=['tas', 'fbv2', 'enas'], help='The search space name.') | ||||
|   parser.add_argument('--genotype'    ,       type=str,   default='|nor_conv_3x3~0|+|nor_conv_3x3~0|nor_conv_3x3~1|+|skip_connect~0|nor_conv_3x3~1|nor_conv_3x3~2|', help='The genotype.') | ||||
|   # FOR GDAS | ||||
|   parser.add_argument('--tau_min',            type=float, default=0.1,  help='The minimum tau for Gumbel Softmax.') | ||||
|   parser.add_argument('--tau_max',            type=float, default=10,   help='The maximum tau for Gumbel Softmax.') | ||||
|   # | ||||
|   parser.add_argument('--track_running_stats',type=int,   default=0, choices=[0,1],help='Whether use track_running_stats or not in the BN layer.') | ||||
|   parser.add_argument('--affine'      ,       type=int,   default=0, choices=[0,1],help='Whether use affine=True or False in the BN layer.') | ||||
|   parser.add_argument('--config_path' ,       type=str,   default='./configs/nas-benchmark/algos/weight-sharing.config', help='The path of configuration.') | ||||
|   parser.add_argument('--overwite_epochs',    type=int,   help='The number of epochs to overwrite that value in config files.') | ||||
|   # architecture leraning rate | ||||
|   parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding') | ||||
|   parser.add_argument('--arch_weight_decay' , type=float, default=1e-3, help='weight decay for arch encoding') | ||||
|   parser.add_argument('--arch_eps'          , type=float, default=1e-8, help='weight decay for arch encoding') | ||||
|   # log | ||||
|   parser.add_argument('--workers',            type=int,   default=2,    help='number of data loading workers (default: 2)') | ||||
|   parser.add_argument('--save_dir',           type=str,   default='./output/search', help='Folder to save checkpoints and log.') | ||||
|   parser.add_argument('--print_freq',         type=int,   default=200,  help='print frequency (default: 200)') | ||||
|   parser.add_argument('--rand_seed',          type=int,   help='manual seed') | ||||
|   args = parser.parse_args() | ||||
|   if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000) | ||||
|   dirname = '{:}-affine{:}_BN{:}'.format(args.algo, args.affine, args.track_running_stats) | ||||
|   if args.overwite_epochs is not None: | ||||
|     dirname = dirname + '-E{:}'.format(args.overwite_epochs) | ||||
|   args.save_dir = os.path.join('{:}-{:}'.format(args.save_dir, args.search_space), args.dataset, dirname) | ||||
|  | ||||
|   main(args) | ||||
| @@ -33,6 +33,7 @@ def fetch_data(root_dir='./output/search', search_space='tss', dataset=None): | ||||
|   alg2name['GDAS'] = 'gdas-affine0_BN0-None' | ||||
|   alg2name['RSPS'] = 'random-affine0_BN0-None' | ||||
|   alg2name['DARTS (1st)'] = 'darts-v1-affine0_BN0-None' | ||||
|   alg2name['ENAS'] = 'enas-affine0_BN0-None' | ||||
|   """ | ||||
|   alg2name['DARTS (2nd)'] = 'darts-v2-affine1_BN0-None' | ||||
|   alg2name['SETN'] = 'setn-affine1_BN0-None' | ||||
|   | ||||
		Reference in New Issue
	
	Block a user