updates for beta

This commit is contained in:
D-X-Y 2019-11-09 16:50:13 +11:00
parent 34ba8053de
commit 975fe4c385
9 changed files with 415 additions and 38 deletions

View File

@ -9,5 +9,6 @@
"momentum" : ["float", "0.9"], "momentum" : ["float", "0.9"],
"nesterov" : ["bool", "1"], "nesterov" : ["bool", "1"],
"criterion": ["str", "Softmax"], "criterion": ["str", "Softmax"],
"batch_size": ["int", "64"] "batch_size": ["int", "64"],
"test_batch_size": ["int", "512"]
} }

274
exps/AA-NAS-Bench-main.py Normal file
View File

@ -0,0 +1,274 @@
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import os, sys, time, torch, random, argparse
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from copy import deepcopy
from pathlib import Path
lib_dir = (Path(__file__).parent / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import load_config
from procedures import save_checkpoint, copy_checkpoint
from procedures import get_machine_info
from datasets import get_datasets
from log_utils import Logger, AverageMeter, time_string, convert_secs2time
from models import CellStructure, CellArchitectures, get_search_spaces
from AA_functions import evaluate_for_seed
def evaluate_all_datasets(arch, datasets, xpaths, splits, seed, arch_config, workers, logger):
machine_info, arch_config = get_machine_info(), deepcopy(arch_config)
all_infos = {'info': machine_info}
all_dataset_keys = []
# look all the datasets
for dataset, xpath, split in zip(datasets, xpaths, splits):
# train valid data
train_data, valid_data, xshape, class_num = get_datasets(dataset, xpath, -1)
# load the configurature
if dataset == 'cifar10' or dataset == 'cifar100':
config_path = 'configs/nas-benchmark/CIFAR.config'
split_info = load_config('configs/nas-benchmark/cifar-split.txt', None, None)
elif dataset.startswith('ImageNet16'):
config_path = 'configs/nas-benchmark/ImageNet-16.config'
split_info = load_config('configs/nas-benchmark/{:}-split.txt'.format(dataset), None, None)
else:
raise ValueError('invalid dataset : {:}'.format(dataset))
config = load_config(config_path, \
{'class_num': class_num,
'xshape' : xshape}, \
logger)
# check whether use splited validation set
if bool(split):
assert len(train_data) == len(split_info.train) + len(split_info.valid), 'invalid length : {:} vs {:} + {:}'.format(len(train_data), len(split_info.train), len(split_info.valid))
train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform
valid_data = train_data_v2
# data loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train), num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid), num_workers=workers, pin_memory=True)
else:
# data loader
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True , num_workers=workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)
dataset_key = '{:}'.format(dataset)
if bool(split): dataset_key = dataset_key + '-valid'
logger.log('Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(dataset_key, len(train_data), len(valid_data), len(train_loader), len(valid_loader), config.batch_size))
logger.log('Evaluate ||||||| {:10s} ||||||| Config={:}'.format(dataset_key, config))
results = evaluate_for_seed(arch_config, config, arch, train_loader, valid_loader, seed, logger)
all_infos[dataset_key] = results
all_dataset_keys.append( dataset_key )
all_infos['all_dataset_keys'] = all_dataset_keys
return all_infos
def main(save_dir, workers, datasets, xpaths, splits, srange, arch_index, seeds, cover_mode, meta_info, arch_config):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
torch.set_num_threads( workers )
assert len(srange) == 2 and 0 <= srange[0] <= srange[1], 'invalid srange : {:}'.format(srange)
sub_dir = Path(save_dir) / '{:06d}-{:06d}-C{:}-N{:}'.format(srange[0], srange[1], arch_config['channel'], arch_config['num_cells'])
logger = Logger(str(sub_dir), 0, False)
all_archs = meta_info['archs']
assert srange[1] < meta_info['total'], 'invalid range : {:}-{:} vs. {:}'.format(srange[0], srange[1], meta_info['total'])
assert arch_index == -1 or srange[0] <= arch_index <= srange[1], 'invalid range : {:} vs. {:} vs. {:}'.format(srange[0], arch_index, srange[1])
if arch_index == -1:
to_evaluate_indexes = list(range(srange[0], srange[1]+1))
else:
to_evaluate_indexes = [arch_index]
logger.log('xargs : seeds = {:}'.format(seeds))
logger.log('xargs : arch_index = {:}'.format(arch_index))
logger.log('xargs : cover_mode = {:}'.format(cover_mode))
logger.log('-'*100)
logger.log('Start evaluating range =: {:06d} vs. {:06d} vs. {:06d} / {:06d} with cover-mode={:}'.format(srange[0], arch_index, srange[1], meta_info['total'], cover_mode))
for i, (dataset, xpath, split) in enumerate(zip(datasets, xpaths, splits)):
logger.log('--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}'.format(i, len(datasets), dataset, xpath, split))
logger.log('--->>> architecture config : {:}'.format(arch_config))
start_time, epoch_time = time.time(), AverageMeter()
for i, index in enumerate(to_evaluate_indexes):
arch = all_archs[index]
logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th architecture [seeds={:}] {:}'.format('-'*15, i, len(to_evaluate_indexes), index, meta_info['total'], seeds, '-'*15))
#logger.log('{:} {:} {:}'.format('-'*15, arch.tostr(), '-'*15))
logger.log('{:} {:} {:}'.format('-'*15, arch, '-'*15))
# test this arch on different datasets with different seeds
has_continue = False
for seed in seeds:
to_save_name = sub_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed)
if to_save_name.exists():
if cover_mode:
logger.log('Find existing file : {:}, remove it before evaluation'.format(to_save_name))
os.remove(str(to_save_name))
else :
logger.log('Find existing file : {:}, skip this evaluation'.format(to_save_name))
has_continue = True
continue
results = evaluate_all_datasets(CellStructure.str2structure(arch), \
datasets, xpaths, splits, seed, \
arch_config, workers, logger)
torch.save(results, to_save_name)
logger.log('{:} valuate {:06d}/{:06d} ({:06d}/{:06d})-th seed={:} done, save into {:}'.format('-'*15, i, len(to_evaluate_indexes), index, meta_info['total'], seed, to_save_name))
# measure elapsed time
if not has_continue: epoch_time.update(time.time() - start_time)
start_time = time.time()
need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (len(to_evaluate_indexes)-i-1), True) )
logger.log('This arch costs : {:}'.format( convert_secs2time(epoch_time.val, True) ))
logger.log('{:}'.format('*'*100))
logger.log('{:} {:74s} {:}'.format('*'*10, '{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}'.format(i, len(to_evaluate_indexes), index, meta_info['total'], need_time), '*'*10))
logger.log('{:}'.format('*'*100))
logger.close()
def train_single_model(save_dir, workers, datasets, xpaths, splits, seeds, model_str, arch_config):
assert torch.cuda.is_available(), 'CUDA is not available.'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
#torch.backends.cudnn.benchmark = True
torch.set_num_threads( workers )
save_dir = Path(save_dir) / 'specifics' / '{:}-{:}-{:}'.format(model_str, arch_config['channel'], arch_config['num_cells'])
logger = Logger(str(save_dir), 0, False)
if model_str in CellArchitectures:
arch = CellArchitectures[model_str]
logger.log('The model string is found in pre-defined architecture dict : {:}'.format(model_str))
else:
try:
arch = CellStructure.str2structure(model_str)
except:
raise ValueError('Invalid model string : {:}. It can not be found or parsed.'.format(model_str))
assert arch.check_valid_op(get_search_spaces('cell', 'full')), '{:} has the invalid op.'.format(arch)
logger.log('Start train-evaluate {:}'.format(arch.tostr()))
logger.log('arch_config : {:}'.format(arch_config))
start_time, seed_time = time.time(), AverageMeter()
for _is, seed in enumerate(seeds):
logger.log('\nThe {:02d}/{:02d}-th seed is {:} ----------------------<.>----------------------'.format(_is, len(seeds), seed))
to_save_name = save_dir / 'seed-{:04d}.pth'.format(seed)
if to_save_name.exists():
logger.log('Find the existing file {:}, directly load!'.format(to_save_name))
checkpoint = torch.load(to_save_name)
else:
logger.log('Does not find the existing file {:}, train and evaluate!'.format(to_save_name))
checkpoint = evaluate_all_datasets(arch, datasets, xpaths, splits, seed, arch_config, workers, logger)
torch.save(checkpoint, to_save_name)
# log information
logger.log('{:}'.format(checkpoint['info']))
all_dataset_keys = checkpoint['all_dataset_keys']
for dataset_key in all_dataset_keys:
logger.log('\n{:} dataset : {:} {:}'.format('-'*15, dataset_key, '-'*15))
dataset_info = checkpoint[dataset_key]
#logger.log('Network ==>\n{:}'.format( dataset_info['net_string'] ))
logger.log('Flops = {:} MB, Params = {:} MB'.format(dataset_info['flop'], dataset_info['param']))
logger.log('config : {:}'.format(dataset_info['config']))
logger.log('Training State (finish) = {:}'.format(dataset_info['finish-train']))
last_epoch = dataset_info['total_epoch'] - 1
train_acc1es, train_acc5es = dataset_info['train_acc1es'], dataset_info['train_acc5es']
valid_acc1es, valid_acc5es = dataset_info['valid_acc1es'], dataset_info['valid_acc5es']
logger.log('Last Info : Train = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%, Test = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%'.format(train_acc1es[last_epoch], train_acc5es[last_epoch], 100-train_acc1es[last_epoch], valid_acc1es[last_epoch], valid_acc5es[last_epoch], 100-valid_acc1es[last_epoch]))
# measure elapsed time
seed_time.update(time.time() - start_time)
start_time = time.time()
need_time = 'Time Left: {:}'.format( convert_secs2time(seed_time.avg * (len(seeds)-_is-1), True) )
logger.log('\n<<<***>>> The {:02d}/{:02d}-th seed is {:} <finish> other procedures need {:}'.format(_is, len(seeds), seed, need_time))
logger.close()
def generate_meta_info(save_dir, max_node, divide=40):
aa_nas_bench_ss = get_search_spaces('cell', 'aa-nas')
archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False)
print ('There are {:} archs vs {:}.'.format(len(archs), len(aa_nas_bench_ss) ** ((max_node-1)*max_node/2)))
random.seed( 88 ) # please do not change this line for reproducibility
random.shuffle( archs )
# to test fixed-random shuffle
#print ('arch [0] : {:}\n---->>>> {:}'.format( archs[0], archs[0].tostr() ))
#print ('arch [9] : {:}\n---->>>> {:}'.format( archs[9], archs[9].tostr() ))
assert archs[0 ].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|', 'please check the 0-th architecture : {:}'.format(archs[0])
assert archs[9 ].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|', 'please check the 9-th architecture : {:}'.format(archs[9])
assert archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|', 'please check the 123-th architecture : {:}'.format(archs[123])
total_arch = len(archs)
num = 50000
indexes_5W = list(range(num))
random.seed( 1021 )
random.shuffle( indexes_5W )
train_split = sorted( list(set(indexes_5W[:num//2])) )
valid_split = sorted( list(set(indexes_5W[num//2:])) )
assert len(train_split) + len(valid_split) == num
assert train_split[0] == 0 and train_split[10] == 26 and train_split[111] == 203 and valid_split[0] == 1 and valid_split[10] == 18 and valid_split[111] == 242, '{:} {:} {:} - {:} {:} {:}'.format(train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111])
splits = {num: {'train': train_split, 'valid': valid_split} }
info = {'archs' : [x.tostr() for x in archs],
'total' : total_arch,
'max_node' : max_node,
'splits': splits}
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
save_name = save_dir / 'meta-node-{:}.pth'.format(max_node)
assert not save_name.exists(), '{:} already exist'.format(save_name)
torch.save(info, save_name)
print ('save the meta file into {:}'.format(save_name))
script_name = save_dir / 'meta-node-{:}.script.txt'.format(max_node)
with open(str(script_name), 'w') as cfile:
gaps = total_arch // divide
for start in range(0, total_arch, gaps):
xend = min(start+gaps, total_arch)
cfile.write('bash ./scripts-search/AA-NAS-train-archs.sh {:5d} {:5d} -1 \'777 888 999\'\n'.format(start, xend-1))
print ('save the training script into {:}'.format(script_name))
if __name__ == '__main__':
#mode_choices = ['meta', 'new', 'cover'] + ['specific-{:}'.format(_) for _ in CellArchitectures.keys()]
parser = argparse.ArgumentParser(description='Algorithm-Agnostic NAS Benchmark', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mode' , type=str, required=True, help='The script mode.')
parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.')
parser.add_argument('--max_node', type=int, help='The maximum node in a cell.')
# use for train the model
parser.add_argument('--workers', type=int, default=8, help='number of data loading workers (default: 2)')
parser.add_argument('--srange' , type=int, nargs='+', help='The range of models to be evaluated')
parser.add_argument('--arch_index', type=int, default=-1, help='The architecture index to be evaluated (cover mode).')
parser.add_argument('--datasets', type=str, nargs='+', help='The applied datasets.')
parser.add_argument('--xpaths', type=str, nargs='+', help='The root path for this dataset.')
parser.add_argument('--splits', type=int, nargs='+', help='The root path for this dataset.')
parser.add_argument('--seeds' , type=int, nargs='+', help='The range of models to be evaluated')
parser.add_argument('--channel', type=int, help='The number of channels.')
parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')
args = parser.parse_args()
assert args.mode in ['meta', 'new', 'cover'] or args.mode.startswith('specific-'), 'invalid mode : {:}'.format(args.mode)
if args.mode == 'meta':
generate_meta_info(args.save_dir, args.max_node)
elif args.mode.startswith('specific'):
assert len(args.mode.split('-')) == 2, 'invalid mode : {:}'.format(args.mode)
model_str = args.mode.split('-')[1]
train_single_model(args.save_dir, args.workers, args.datasets, args.xpaths, args.splits, \
tuple(args.seeds), model_str, {'channel': args.channel, 'num_cells': args.num_cells})
else:
meta_path = Path(args.save_dir) / 'meta-node-{:}.pth'.format(args.max_node)
assert meta_path.exists(), '{:} does not exist.'.format(meta_path)
meta_info = torch.load( meta_path )
# check whether args is ok
assert len(args.srange) == 2 and args.srange[0] <= args.srange[1], 'invalid length of srange args: {:}'.format(args.srange)
assert len(args.seeds) > 0, 'invalid length of seeds args: {:}'.format(args.seeds)
assert len(args.datasets) == len(args.xpaths) == len(args.splits), 'invalid infos : {:} vs {:} vs {:}'.format(len(args.datasets), len(args.xpaths), len(args.splits))
assert args.workers > 0, 'invalid number of workers : {:}'.format(args.workers)
main(args.save_dir, args.workers, args.datasets, args.xpaths, args.splits, \
tuple(args.srange), args.arch_index, tuple(args.seeds), \
args.mode == 'cover', meta_info, \
{'channel': args.channel, 'num_cells': args.num_cells})

View File

@ -62,7 +62,7 @@ def train_controller(xloader, shared_cnn, controller, criterion, optimizer, conf
# config. (containing some necessary arg) # config. (containing some necessary arg)
# baseline: The baseline score (i.e. average val_acc) from the previous epoch # baseline: The baseline score (i.e. average val_acc) from the previous epoch
data_time, batch_time = AverageMeter(), AverageMeter() data_time, batch_time = AverageMeter(), AverageMeter()
GradnormMeter, LossMeter, ValAccMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time() GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time()
shared_cnn.eval() shared_cnn.eval()
controller.train() controller.train()
@ -96,8 +96,9 @@ def train_controller(xloader, shared_cnn, controller, criterion, optimizer, conf
# account # account
RewardMeter.update(reward.item()) RewardMeter.update(reward.item())
BaselineMeter.update(baseline.item()) BaselineMeter.update(baseline.item())
ValAccMeter.update(val_top1.item()) ValAccMeter.update(val_top1.item()*100)
LossMeter.update(loss.item()) LossMeter.update(loss.item())
EntropyMeter.update(entropy.item())
# Average gradient over controller_num_aggregate samples # Average gradient over controller_num_aggregate samples
loss = loss / config.ctl_num_aggre loss = loss / config.ctl_num_aggre
@ -116,7 +117,8 @@ def train_controller(xloader, shared_cnn, controller, criterion, optimizer, conf
Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, config.ctl_train_steps * config.ctl_num_aggre) Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, config.ctl_train_steps * config.ctl_num_aggre)
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter) Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr) Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Estr)
return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg, baseline.item() return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg, baseline.item()
@ -250,7 +252,7 @@ def main(xargs):
w_scheduler.update(epoch, 0.0) w_scheduler.update(epoch, 0.0)
need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) ) need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.val * (total_epoch-epoch), True) )
epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)
logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()))) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}, baseline={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()), baseline))
cnn_loss, cnn_top1, cnn_top5 = train_shared_cnn(train_loader, shared_cnn, controller, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger) cnn_loss, cnn_top1, cnn_top5 = train_shared_cnn(train_loader, shared_cnn, controller, criterion, w_scheduler, w_optimizer, epoch_str, xargs.print_freq, logger)
logger.log('[{:}] shared-cnn : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, cnn_loss, cnn_top1, cnn_top5)) logger.log('[{:}] shared-cnn : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, cnn_loss, cnn_top1, cnn_top5))
@ -264,7 +266,7 @@ def main(xargs):
logger.log('[{:}] controller : loss={:.2f}, accuracy={:.2f}%, baseline={:.2f}, reward={:.2f}, current-baseline={:.4f}'.format(epoch_str, ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline)) logger.log('[{:}] controller : loss={:.2f}, accuracy={:.2f}%, baseline={:.2f}, reward={:.2f}, current-baseline={:.4f}'.format(epoch_str, ctl_loss, ctl_acc, ctl_baseline, ctl_reward, baseline))
best_arch, _ = get_best_arch(controller, shared_cnn, valid_loader) best_arch, _ = get_best_arch(controller, shared_cnn, valid_loader)
shared_cnn.module.update_arch(best_arch) shared_cnn.module.update_arch(best_arch)
best_valid_acc = valid_func(valid_loader, shared_cnn, criterion) _, best_valid_acc, _ = valid_func(valid_loader, shared_cnn, criterion)
genotypes[epoch] = best_arch genotypes[epoch] = best_arch
# check the best accuracy # check the best accuracy
@ -301,6 +303,14 @@ def main(xargs):
start_time = time.time() start_time = time.time()
logger.log('\n' + '-'*100) logger.log('\n' + '-'*100)
logger.log('During searching, the best architecture is {:}'.format(genotypes['best']))
logger.log('Its accuracy is {:.2f}%'.format(valid_accuracies['best']))
logger.log('Randomly select {:} architectures and select the best.'.format(xargs.controller_num_samples))
final_arch, _ = get_best_arch(controller, shared_cnn, valid_loader, xargs.controller_num_samples)
shared_cnn.module.update_arch(final_arch)
final_loss, final_top1, final_top5 = valid_func(valid_loader, shared_cnn, criterion)
logger.log('The Selected Final Architecture : {:}'.format(final_arch))
logger.log('Loss={:.3f}, Accuracy@1={:.2f}%, Accuracy@5={:.2f}%'.format(final_loss, final_top1, final_top5))
# check the performance from the architecture dataset # check the performance from the architecture dataset
#if xargs.arch_nas_dataset is None or not os.path.isfile(xargs.arch_nas_dataset): #if xargs.arch_nas_dataset is None or not os.path.isfile(xargs.arch_nas_dataset):
# logger.log('Can not find the architecture dataset : {:}.'.format(xargs.arch_nas_dataset)) # logger.log('Can not find the architecture dataset : {:}.'.format(xargs.arch_nas_dataset))

View File

@ -23,7 +23,6 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
data_time, batch_time = AverageMeter(), AverageMeter() data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter() base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time() end = time.time()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader): for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
scheduler.update(None, 1.0 * step / len(xloader)) scheduler.update(None, 1.0 * step / len(xloader))
@ -33,9 +32,13 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
data_time.update(time.time() - end) data_time.update(time.time() - end)
# update the weights # update the weights
network.module.set_cal_mode( 'urs' ) network.train()
w_optimizer.zero_grad() sampled_arch = network.module.dync_genotype(True)
_, logits = network(base_inputs) network.module.set_cal_mode('dynamic', sampled_arch)
#network.module.set_cal_mode( 'urs' )
network.zero_grad()
_, logits = network( torch.cat((base_inputs, arch_inputs), dim=0) )
logits = logits[:base_inputs.size(0)]
base_loss = criterion(logits, base_targets) base_loss = criterion(logits, base_targets)
base_loss.backward() base_loss.backward()
w_optimizer.step() w_optimizer.step()
@ -46,8 +49,9 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
base_top5.update (base_prec5.item(), base_inputs.size(0)) base_top5.update (base_prec5.item(), base_inputs.size(0))
# update the architecture-weight # update the architecture-weight
network.eval()
network.module.set_cal_mode( 'joint' ) network.module.set_cal_mode( 'joint' )
a_optimizer.zero_grad() network.zero_grad()
_, logits = network(arch_inputs) _, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets) arch_loss = criterion(logits, arch_targets)
arch_loss.backward() arch_loss.backward()
@ -68,15 +72,42 @@ def search_func(xloader, network, criterion, scheduler, w_optimizer, a_optimizer
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5) Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5) Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr) logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg #print (nn.functional.softmax(network.module.arch_parameters, dim=-1))
#print (network.module.arch_parameters)
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
def get_best_arch(xloader, network, n_samples):
with torch.no_grad():
network.eval()
archs, valid_accs = [], []
loader_iter = iter(xloader)
for i in range(n_samples):
try:
inputs, targets = next(loader_iter)
except:
loader_iter = iter(xloader)
inputs, targets = next(loader_iter)
sampled_arch = network.module.dync_genotype(False)
network.module.set_cal_mode('dynamic', sampled_arch)
_, logits = network(inputs)
val_top1, val_top5 = obtain_accuracy(logits.cpu().data, targets.data, topk=(1, 5))
archs.append( sampled_arch )
valid_accs.append( val_top1.item() )
best_idx = np.argmax(valid_accs)
best_arch, best_valid_acc = archs[best_idx], valid_accs[best_idx]
return best_arch, best_valid_acc
def valid_func(xloader, network, criterion): def valid_func(xloader, network, criterion):
data_time, batch_time = AverageMeter(), AverageMeter() data_time, batch_time = AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter() arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
end = time.time() end = time.time()
with torch.no_grad(): with torch.no_grad():
network.eval()
for step, (arch_inputs, arch_targets) in enumerate(xloader): for step, (arch_inputs, arch_targets) in enumerate(xloader):
arch_targets = arch_targets.cuda(non_blocking=True) arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time # measure data loading time
@ -117,8 +148,8 @@ def main(xargs):
logger.log('Load split file from {:}'.format(split_Fpath)) logger.log('Load split file from {:}'.format(split_Fpath))
else: else:
raise ValueError('invalid dataset : {:}'.format(xargs.dataset)) raise ValueError('invalid dataset : {:}'.format(xargs.dataset))
config_path = 'configs/nas-benchmark/algos/SETN.config' #config_path = 'configs/nas-benchmark/algos/SETN.config'
config = load_config(config_path, {'class_num': class_num, 'xshape': xshape}, logger) config = load_config(xargs.config_path, {'class_num': class_num, 'xshape': xshape}, logger)
# To split data # To split data
train_data_v2 = deepcopy(train_data) train_data_v2 = deepcopy(train_data)
train_data_v2.transform = valid_data.transform train_data_v2.transform = valid_data.transform
@ -126,7 +157,7 @@ def main(xargs):
search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split) search_data = SearchDataset(xargs.dataset, train_data, train_split, valid_split)
# data loader # data loader
search_loader = torch.utils.data.DataLoader(search_data, batch_size=config.batch_size, shuffle=True , num_workers=xargs.workers, pin_memory=True) search_loader = torch.utils.data.DataLoader(search_data, batch_size=config.batch_size, shuffle=True , num_workers=xargs.workers, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.test_batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(valid_split), num_workers=xargs.workers, pin_memory=True)
logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size)) logger.log('||||||| {:10s} ||||||| Search-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(xargs.dataset, len(search_loader), len(valid_loader), config.batch_size))
logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config)) logger.log('||||||| {:10s} ||||||| Config={:}'.format(xargs.dataset, config))
@ -134,6 +165,7 @@ def main(xargs):
model_config = dict2config({'name': 'SETN', 'C': xargs.channel, 'N': xargs.num_cells, model_config = dict2config({'name': 'SETN', 'C': xargs.channel, 'N': xargs.num_cells,
'max_nodes': xargs.max_nodes, 'num_classes': class_num, 'max_nodes': xargs.max_nodes, 'num_classes': class_num,
'space' : search_space}, None) 'space' : search_space}, None)
logger.log('search space : {:}'.format(search_space))
search_model = get_cell_based_tiny_net(model_config) search_model = get_cell_based_tiny_net(model_config)
w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config) w_optimizer, w_scheduler, criterion = get_optim_scheduler(search_model.get_weights(), config)
@ -173,17 +205,24 @@ def main(xargs):
epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch) epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)
logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr()))) logger.log('\n[Search the {:}-th epoch] {:}, LR={:}'.format(epoch_str, need_time, min(w_scheduler.get_lr())))
search_w_loss, search_w_top1, search_w_top5 = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger) search_w_loss, search_w_top1, search_w_top5, search_a_loss, search_a_top1, search_a_top5 \
logger.log('[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5)) = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str, xargs.print_freq, logger)
search_model.set_cal_mode('urs') logger.log('[{:}] search [base] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_w_loss, search_w_top1, search_w_top5))
logger.log('[{:}] search [arch] : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, search_a_loss, search_a_top1, search_a_top5))
genotype, temp_accuracy = get_best_arch(valid_loader, network, xargs.select_num)
network.module.set_cal_mode('dynamic', genotype)
valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)
logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) logger.log('[{:}] evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}% | {:}'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5, genotype))
search_model.set_cal_mode('joint') #search_model.set_cal_mode('urs')
valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)
logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) #logger.log('[{:}] URS---evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))
search_model.set_cal_mode('select') #search_model.set_cal_mode('joint')
valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion) #valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)
logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5)) #logger.log('[{:}] JOINT-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))
#search_model.set_cal_mode('select')
#valid_a_loss , valid_a_top1 , valid_a_top5 = valid_func(valid_loader, network, criterion)
#logger.log('[{:}] Selec-evaluate : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'.format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))
# check the best accuracy # check the best accuracy
valid_accuracies[epoch] = valid_a_top1 valid_accuracies[epoch] = valid_a_top1
if valid_a_top1 > valid_accuracies['best']: if valid_a_top1 > valid_accuracies['best']:
@ -192,7 +231,7 @@ def main(xargs):
find_best = True find_best = True
else: find_best = False else: find_best = False
genotypes[epoch] = search_model.genotype() genotypes[epoch] = genotype
logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch])) logger.log('<<<--->>> The {:}-th epoch : {:}'.format(epoch_str, genotypes[epoch]))
# save checkpoint # save checkpoint
save_path = save_checkpoint({'epoch' : epoch + 1, save_path = save_checkpoint({'epoch' : epoch + 1,
@ -219,6 +258,7 @@ def main(xargs):
start_time = time.time() start_time = time.time()
# sampling # sampling
"""
with torch.no_grad(): with torch.no_grad():
logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() )) logger.log('arch-parameters :\n{:}'.format( nn.functional.softmax(search_model.arch_parameters, dim=-1).cpu() ))
selected_archs = set() selected_archs = set()
@ -238,6 +278,7 @@ def main(xargs):
if best_arch is None or best_acc < valid_a_top1: if best_arch is None or best_acc < valid_a_top1:
best_arch, best_acc = arch, valid_a_top1 best_arch, best_acc = arch, valid_a_top1
logger.log('Find the best one : {:} with accuracy={:.2f}%'.format(best_arch, best_acc)) logger.log('Find the best one : {:} with accuracy={:.2f}%'.format(best_arch, best_acc))
"""
logger.log('\n' + '-'*100) logger.log('\n' + '-'*100)
# check the performance from the architecture dataset # check the performance from the architecture dataset
@ -267,6 +308,7 @@ if __name__ == '__main__':
parser.add_argument('--channel', type=int, help='The number of channels.') parser.add_argument('--channel', type=int, help='The number of channels.')
parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.') parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.')
parser.add_argument('--select_num', type=int, help='The number of selected architectures to evaluate.') parser.add_argument('--select_num', type=int, help='The number of selected architectures to evaluate.')
parser.add_argument('--config_path', type=str, help='.')
# architecture leraning rate # architecture leraning rate
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding') parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding') parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')

View File

@ -83,7 +83,8 @@ class SearchCell(nn.Module):
for j in range(i): for j in range(i):
node_str = '{:}<-{:}'.format(i, j) node_str = '{:}<-{:}'.format(i, j)
weights = weightss[ self.edge2index[node_str] ] weights = weightss[ self.edge2index[node_str] ]
aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) / weights.numel() #aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) / weights.numel()
aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) )
inter_nodes.append( aggregation ) inter_nodes.append( aggregation )
nodes.append( sum(inter_nodes) ) nodes.append( sum(inter_nodes) )
return nodes[-1] return nodes[-1]

View File

@ -3,7 +3,7 @@
###################################################################################### ######################################################################################
# One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019 # # One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019 #
###################################################################################### ######################################################################################
import torch import torch, random
import torch.nn as nn import torch.nn as nn
from copy import deepcopy from copy import deepcopy
from ..cell_operations import ResNetBasicblock from ..cell_operations import ResNetBasicblock
@ -87,7 +87,7 @@ class TinyNetworkSETN(nn.Module):
return Structure( genotypes ) return Structure( genotypes )
def dync_genotype(self): def dync_genotype(self, use_random=False):
genotypes = [] genotypes = []
with torch.no_grad(): with torch.no_grad():
alphas_cpu = nn.functional.softmax(self.arch_parameters, dim=-1) alphas_cpu = nn.functional.softmax(self.arch_parameters, dim=-1)
@ -95,9 +95,12 @@ class TinyNetworkSETN(nn.Module):
xlist = [] xlist = []
for j in range(i): for j in range(i):
node_str = '{:}<-{:}'.format(i, j) node_str = '{:}<-{:}'.format(i, j)
weights = alphas_cpu[ self.edge2index[node_str] ] if use_random:
op_index = torch.multinomial(weights, 1).item() op_name = random.choice(self.op_names)
op_name = self.op_names[ op_index ] else:
weights = alphas_cpu[ self.edge2index[node_str] ]
op_index = torch.multinomial(weights, 1).item()
op_name = self.op_names[ op_index ]
xlist.append((op_name, j)) xlist.append((op_name, j))
genotypes.append( tuple(xlist) ) genotypes.append( tuple(xlist) )
return Structure( genotypes ) return Structure( genotypes )

View File

@ -69,12 +69,15 @@ class CosineAnnealingLR(_LRScheduler):
def get_lr(self): def get_lr(self):
lrs = [] lrs = []
for base_lr in self.base_lrs: for base_lr in self.base_lrs:
if self.current_epoch >= self.warmup_epochs: if self.current_epoch >= self.warmup_epochs and self.current_epoch < self.max_epochs:
last_epoch = self.current_epoch - self.warmup_epochs last_epoch = self.current_epoch - self.warmup_epochs
if last_epoch < self.T_max: #if last_epoch < self.T_max:
lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * last_epoch / self.T_max)) / 2 #if last_epoch < self.max_epochs:
else: lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * last_epoch / self.T_max)) / 2
lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * (self.T_max-1.0) / self.T_max)) / 2 #else:
# lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * (self.T_max-1.0) / self.T_max)) / 2
elif self.current_epoch >= self.max_epochs:
lr = self.eta_min
else: else:
lr = (self.current_epoch / self.warmup_epochs + self.current_iter / self.warmup_epochs) * base_lr lr = (self.current_epoch / self.warmup_epochs + self.current_iter / self.warmup_epochs) * base_lr
lrs.append( lr ) lrs.append( lr )

View File

@ -0,0 +1,42 @@
#!/bin/bash
# Efficient Neural Architecture Search via Parameter Sharing, ICML 2018
# bash ./scripts-search/scripts/algos/ENAS.sh cifar10 -1
echo script name: $0
echo $# arguments
if [ "$#" -ne 2 ] ;then
echo "Input illegal number of parameters " $#
echo "Need 2 parameters for dataset and seed"
exit 1
fi
if [ "$TORCH_HOME" = "" ]; then
echo "Must set TORCH_HOME envoriment variable for data dir saving"
exit 1
else
echo "TORCH_HOME : $TORCH_HOME"
fi
dataset=$1
seed=$2
channel=16
num_cells=5
max_nodes=4
if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
data_path="$TORCH_HOME/cifar.python"
else
data_path="$TORCH_HOME/cifar.python/ImageNet16"
fi
save_dir=./output/cell-search-tiny/ENAS-${dataset}
OMP_NUM_THREADS=4 python ./exps/algos/ENAS.py \
--save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
--dataset ${dataset} --data_path ${data_path} \
--search_space_name aa-nas \
--config_path ./configs/nas-benchmark/algos/ENAS.config \
--controller_entropy_weight 0.0001 \
--controller_bl_dec 0.99 \
--controller_train_steps 50 \
--controller_num_aggregate 20 \
--controller_num_samples 100 \
--workers 4 --print_freq 200 --rand_seed ${seed}

View File

@ -33,6 +33,7 @@ OMP_NUM_THREADS=4 python ./exps/algos/SETN.py \
--save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \ --save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
--dataset ${dataset} --data_path ${data_path} \ --dataset ${dataset} --data_path ${data_path} \
--search_space_name aa-nas \ --search_space_name aa-nas \
--config_path configs/nas-benchmark/algos/SETN.config \
--arch_learning_rate 0.0003 --arch_weight_decay 0.001 \ --arch_learning_rate 0.0003 --arch_weight_decay 0.001 \
--select_num 100 \ --select_num 100 \
--workers 4 --print_freq 200 --rand_seed ${seed} --workers 4 --print_freq 200 --rand_seed ${seed}