From fac556c176332dd37eb31646bb9ae45e0a062b79 Mon Sep 17 00:00:00 2001 From: D-X-Y <280835372@qq.com> Date: Sun, 10 Nov 2019 00:15:31 +1100 Subject: [PATCH] clean headers --- .gitignore | 3 + README.md | 7 - exps/AA-NAS-Bench-main.py | 274 ------------------ exps/AA-NAS-test-API.py | 38 +++ lib/config_utils/__init__.py | 3 - lib/config_utils/attention_args.py | 3 - lib/config_utils/basic_args.py | 3 - lib/config_utils/cls_init_args.py | 3 - lib/config_utils/cls_kd_args.py | 3 - lib/config_utils/pruning_args.py | 3 - lib/config_utils/random_baseline.py | 3 - lib/config_utils/search_args.py | 3 - lib/config_utils/search_single_args.py | 3 - lib/config_utils/share_args.py | 3 - lib/datasets/__init__.py | 3 - lib/datasets/get_dataset_with_transform.py | 3 - lib/log_utils/__init__.py | 3 - lib/log_utils/meter.py | 3 - lib/log_utils/print_logger.py | 6 +- lib/models/CifarResNet.py | 3 - lib/models/__init__.py | 3 - lib/models/cell_infers/cells.py | 3 - lib/models/cell_infers/tiny_network.py | 3 - lib/models/cell_searchs/genotypes.py | 18 ++ .../cell_searchs/search_model_darts_v1.py | 2 - .../cell_searchs/search_model_darts_v2.py | 2 - lib/models/cell_searchs/search_model_enas.py | 2 - .../cell_searchs/search_model_enas_utils.py | 2 - lib/models/cell_searchs/search_model_gdas.py | 2 - lib/models/cell_searchs/search_model_setn.py | 2 - lib/models/shape_infers/shared_utils.py | 2 - lib/nas_infer_model/DXYs/genotypes.py | 3 - lib/nas_infer_model/__init__.py | 3 - lib/procedures/__init__.py | 3 - lib/procedures/basic_main.py | 3 - lib/procedures/optimizers.py | 3 - lib/procedures/search_main.py | 3 - lib/procedures/search_main_v2.py | 3 - lib/procedures/simple_KD_main.py | 3 - lib/utils/__init__.py | 3 - lib/utils/flop_benchmark.py | 4 - 41 files changed, 60 insertions(+), 385 deletions(-) delete mode 100644 exps/AA-NAS-Bench-main.py create mode 100644 exps/AA-NAS-test-API.py diff --git a/.gitignore b/.gitignore index 2525cce..a2a45a5 100644 --- a/.gitignore +++ b/.gitignore @@ -107,3 +107,6 @@ scripts-nas/.nfs00* # logs and snapshots output logs + +# snapshot +a.pth diff --git a/README.md b/README.md index 04a5ca1..23f739d 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,6 @@ This project contains the following neural architecture search algorithms, imple - Network Pruning via Transformable Architecture Search, NeurIPS 2019 - One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019 - Searching for A Robust Neural Architecture in Four GPU Hours, CVPR 2019 -- Auto-ReID: Searching for a Part-Aware ConvNet for Person Re-Identification, ICCV 2019 - several typical classification models, e.g., ResNet and DenseNet (see BASELINE.md) @@ -104,12 +103,6 @@ CUDA_VISIBLE_DEVICES=0 bash ./scripts-search/algos/DARTS-V2.sh cifar10 -1 ``` -## [Auto-ReID: Searching for a Part-Aware ConvNet for Person Re-Identification](https://arxiv.org/abs/1903.09776) - -The part-aware module is defined at [here](https://github.com/D-X-Y/NAS-Projects/blob/master/lib/models/cell_searchs/operations.py#L85). - -For more questions, please contact Ruijie Quan (Ruijie.Quan@student.uts.edu.au). - # Citation If you find that this project helps your research, please consider citing some of the following papers: diff --git a/exps/AA-NAS-Bench-main.py b/exps/AA-NAS-Bench-main.py deleted file mode 100644 index e021fb6..0000000 --- a/exps/AA-NAS-Bench-main.py +++ /dev/null @@ -1,274 +0,0 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## -import os, sys, time, torch, random, argparse -from PIL import ImageFile -ImageFile.LOAD_TRUNCATED_IMAGES = True -from copy import deepcopy -from pathlib import Path - -lib_dir = (Path(__file__).parent / '..' / 'lib').resolve() -if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) -from config_utils import load_config -from procedures import save_checkpoint, copy_checkpoint -from procedures import get_machine_info -from datasets import get_datasets -from log_utils import Logger, AverageMeter, time_string, convert_secs2time -from models import CellStructure, CellArchitectures, get_search_spaces -from AA_functions import evaluate_for_seed - - -def evaluate_all_datasets(arch, datasets, xpaths, splits, seed, arch_config, workers, logger): - machine_info, arch_config = get_machine_info(), deepcopy(arch_config) - all_infos = {'info': machine_info} - all_dataset_keys = [] - # look all the datasets - for dataset, xpath, split in zip(datasets, xpaths, splits): - # train valid data - train_data, valid_data, xshape, class_num = get_datasets(dataset, xpath, -1) - # load the configurature - if dataset == 'cifar10' or dataset == 'cifar100': - config_path = 'configs/nas-benchmark/CIFAR.config' - split_info = load_config('configs/nas-benchmark/cifar-split.txt', None, None) - elif dataset.startswith('ImageNet16'): - config_path = 'configs/nas-benchmark/ImageNet-16.config' - split_info = load_config('configs/nas-benchmark/{:}-split.txt'.format(dataset), None, None) - else: - raise ValueError('invalid dataset : {:}'.format(dataset)) - config = load_config(config_path, \ - {'class_num': class_num, - 'xshape' : xshape}, \ - logger) - # check whether use splited validation set - if bool(split): - assert len(train_data) == len(split_info.train) + len(split_info.valid), 'invalid length : {:} vs {:} + {:}'.format(len(train_data), len(split_info.train), len(split_info.valid)) - train_data_v2 = deepcopy(train_data) - train_data_v2.transform = valid_data.transform - valid_data = train_data_v2 - # data loader - train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train), num_workers=workers, pin_memory=True) - valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid), num_workers=workers, pin_memory=True) - else: - # data loader - train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True , num_workers=workers, pin_memory=True) - valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True) - - dataset_key = '{:}'.format(dataset) - if bool(split): dataset_key = dataset_key + '-valid' - logger.log('Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(dataset_key, len(train_data), len(valid_data), len(train_loader), len(valid_loader), config.batch_size)) - logger.log('Evaluate ||||||| {:10s} ||||||| Config={:}'.format(dataset_key, config)) - results = evaluate_for_seed(arch_config, config, arch, train_loader, valid_loader, seed, logger) - all_infos[dataset_key] = results - all_dataset_keys.append( dataset_key ) - all_infos['all_dataset_keys'] = all_dataset_keys - return all_infos - - -def main(save_dir, workers, datasets, xpaths, splits, srange, arch_index, seeds, cover_mode, meta_info, arch_config): - assert torch.cuda.is_available(), 'CUDA is not available.' - torch.backends.cudnn.enabled = True - #torch.backends.cudnn.benchmark = True - torch.backends.cudnn.deterministic = True - torch.set_num_threads( workers ) - - assert len(srange) == 2 and 0 <= srange[0] <= srange[1], 'invalid srange : {:}'.format(srange) - - sub_dir = Path(save_dir) / '{:06d}-{:06d}-C{:}-N{:}'.format(srange[0], srange[1], arch_config['channel'], arch_config['num_cells']) - logger = Logger(str(sub_dir), 0, False) - - all_archs = meta_info['archs'] - assert srange[1] < meta_info['total'], 'invalid range : {:}-{:} vs. {:}'.format(srange[0], srange[1], meta_info['total']) - assert arch_index == -1 or srange[0] <= arch_index <= srange[1], 'invalid range : {:} vs. {:} vs. {:}'.format(srange[0], arch_index, srange[1]) - if arch_index == -1: - to_evaluate_indexes = list(range(srange[0], srange[1]+1)) - else: - to_evaluate_indexes = [arch_index] - logger.log('xargs : seeds = {:}'.format(seeds)) - logger.log('xargs : arch_index = {:}'.format(arch_index)) - logger.log('xargs : cover_mode = {:}'.format(cover_mode)) - logger.log('-'*100) - - logger.log('Start evaluating range =: {:06d} vs. {:06d} vs. {:06d} / {:06d} with cover-mode={:}'.format(srange[0], arch_index, srange[1], meta_info['total'], cover_mode)) - for i, (dataset, xpath, split) in enumerate(zip(datasets, xpaths, splits)): - logger.log('--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}'.format(i, len(datasets), dataset, xpath, split)) - logger.log('--->>> architecture config : {:}'.format(arch_config)) - - - start_time, epoch_time = time.time(), AverageMeter() - for i, index in enumerate(to_evaluate_indexes): - arch = all_archs[index] - logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th architecture [seeds={:}] {:}'.format('-'*15, i, len(to_evaluate_indexes), index, meta_info['total'], seeds, '-'*15)) - #logger.log('{:} {:} {:}'.format('-'*15, arch.tostr(), '-'*15)) - logger.log('{:} {:} {:}'.format('-'*15, arch, '-'*15)) - - # test this arch on different datasets with different seeds - has_continue = False - for seed in seeds: - to_save_name = sub_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed) - if to_save_name.exists(): - if cover_mode: - logger.log('Find existing file : {:}, remove it before evaluation'.format(to_save_name)) - os.remove(str(to_save_name)) - else : - logger.log('Find existing file : {:}, skip this evaluation'.format(to_save_name)) - has_continue = True - continue - results = evaluate_all_datasets(CellStructure.str2structure(arch), \ - datasets, xpaths, splits, seed, \ - arch_config, workers, logger) - torch.save(results, to_save_name) - logger.log('{:} valuate {:06d}/{:06d} ({:06d}/{:06d})-th seed={:} done, save into {:}'.format('-'*15, i, len(to_evaluate_indexes), index, meta_info['total'], seed, to_save_name)) - # measure elapsed time - if not has_continue: epoch_time.update(time.time() - start_time) - start_time = time.time() - need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (len(to_evaluate_indexes)-i-1), True) ) - logger.log('This arch costs : {:}'.format( convert_secs2time(epoch_time.val, True) )) - logger.log('{:}'.format('*'*100)) - logger.log('{:} {:74s} {:}'.format('*'*10, '{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}'.format(i, len(to_evaluate_indexes), index, meta_info['total'], need_time), '*'*10)) - logger.log('{:}'.format('*'*100)) - - logger.close() - - -def train_single_model(save_dir, workers, datasets, xpaths, splits, seeds, model_str, arch_config): - assert torch.cuda.is_available(), 'CUDA is not available.' - torch.backends.cudnn.enabled = True - torch.backends.cudnn.deterministic = True - #torch.backends.cudnn.benchmark = True - torch.set_num_threads( workers ) - - save_dir = Path(save_dir) / 'specifics' / '{:}-{:}-{:}'.format(model_str, arch_config['channel'], arch_config['num_cells']) - logger = Logger(str(save_dir), 0, False) - if model_str in CellArchitectures: - arch = CellArchitectures[model_str] - logger.log('The model string is found in pre-defined architecture dict : {:}'.format(model_str)) - else: - try: - arch = CellStructure.str2structure(model_str) - except: - raise ValueError('Invalid model string : {:}. It can not be found or parsed.'.format(model_str)) - assert arch.check_valid_op(get_search_spaces('cell', 'full')), '{:} has the invalid op.'.format(arch) - logger.log('Start train-evaluate {:}'.format(arch.tostr())) - logger.log('arch_config : {:}'.format(arch_config)) - - start_time, seed_time = time.time(), AverageMeter() - for _is, seed in enumerate(seeds): - logger.log('\nThe {:02d}/{:02d}-th seed is {:} ----------------------<.>----------------------'.format(_is, len(seeds), seed)) - to_save_name = save_dir / 'seed-{:04d}.pth'.format(seed) - if to_save_name.exists(): - logger.log('Find the existing file {:}, directly load!'.format(to_save_name)) - checkpoint = torch.load(to_save_name) - else: - logger.log('Does not find the existing file {:}, train and evaluate!'.format(to_save_name)) - checkpoint = evaluate_all_datasets(arch, datasets, xpaths, splits, seed, arch_config, workers, logger) - torch.save(checkpoint, to_save_name) - # log information - logger.log('{:}'.format(checkpoint['info'])) - all_dataset_keys = checkpoint['all_dataset_keys'] - for dataset_key in all_dataset_keys: - logger.log('\n{:} dataset : {:} {:}'.format('-'*15, dataset_key, '-'*15)) - dataset_info = checkpoint[dataset_key] - #logger.log('Network ==>\n{:}'.format( dataset_info['net_string'] )) - logger.log('Flops = {:} MB, Params = {:} MB'.format(dataset_info['flop'], dataset_info['param'])) - logger.log('config : {:}'.format(dataset_info['config'])) - logger.log('Training State (finish) = {:}'.format(dataset_info['finish-train'])) - last_epoch = dataset_info['total_epoch'] - 1 - train_acc1es, train_acc5es = dataset_info['train_acc1es'], dataset_info['train_acc5es'] - valid_acc1es, valid_acc5es = dataset_info['valid_acc1es'], dataset_info['valid_acc5es'] - logger.log('Last Info : Train = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%, Test = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%'.format(train_acc1es[last_epoch], train_acc5es[last_epoch], 100-train_acc1es[last_epoch], valid_acc1es[last_epoch], valid_acc5es[last_epoch], 100-valid_acc1es[last_epoch])) - # measure elapsed time - seed_time.update(time.time() - start_time) - start_time = time.time() - need_time = 'Time Left: {:}'.format( convert_secs2time(seed_time.avg * (len(seeds)-_is-1), True) ) - logger.log('\n<<<***>>> The {:02d}/{:02d}-th seed is {:} other procedures need {:}'.format(_is, len(seeds), seed, need_time)) - logger.close() - - -def generate_meta_info(save_dir, max_node, divide=40): - aa_nas_bench_ss = get_search_spaces('cell', 'aa-nas') - archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) - print ('There are {:} archs vs {:}.'.format(len(archs), len(aa_nas_bench_ss) ** ((max_node-1)*max_node/2))) - - random.seed( 88 ) # please do not change this line for reproducibility - random.shuffle( archs ) - # to test fixed-random shuffle - #print ('arch [0] : {:}\n---->>>> {:}'.format( archs[0], archs[0].tostr() )) - #print ('arch [9] : {:}\n---->>>> {:}'.format( archs[9], archs[9].tostr() )) - assert archs[0 ].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|', 'please check the 0-th architecture : {:}'.format(archs[0]) - assert archs[9 ].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|', 'please check the 9-th architecture : {:}'.format(archs[9]) - assert archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|', 'please check the 123-th architecture : {:}'.format(archs[123]) - total_arch = len(archs) - - num = 50000 - indexes_5W = list(range(num)) - random.seed( 1021 ) - random.shuffle( indexes_5W ) - train_split = sorted( list(set(indexes_5W[:num//2])) ) - valid_split = sorted( list(set(indexes_5W[num//2:])) ) - assert len(train_split) + len(valid_split) == num - assert train_split[0] == 0 and train_split[10] == 26 and train_split[111] == 203 and valid_split[0] == 1 and valid_split[10] == 18 and valid_split[111] == 242, '{:} {:} {:} - {:} {:} {:}'.format(train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111]) - splits = {num: {'train': train_split, 'valid': valid_split} } - - info = {'archs' : [x.tostr() for x in archs], - 'total' : total_arch, - 'max_node' : max_node, - 'splits': splits} - - save_dir = Path(save_dir) - save_dir.mkdir(parents=True, exist_ok=True) - save_name = save_dir / 'meta-node-{:}.pth'.format(max_node) - assert not save_name.exists(), '{:} already exist'.format(save_name) - torch.save(info, save_name) - print ('save the meta file into {:}'.format(save_name)) - - script_name = save_dir / 'meta-node-{:}.script.txt'.format(max_node) - with open(str(script_name), 'w') as cfile: - gaps = total_arch // divide - for start in range(0, total_arch, gaps): - xend = min(start+gaps, total_arch) - cfile.write('bash ./scripts-search/AA-NAS-train-archs.sh {:5d} {:5d} -1 \'777 888 999\'\n'.format(start, xend-1)) - print ('save the training script into {:}'.format(script_name)) - - - -if __name__ == '__main__': - #mode_choices = ['meta', 'new', 'cover'] + ['specific-{:}'.format(_) for _ in CellArchitectures.keys()] - parser = argparse.ArgumentParser(description='Algorithm-Agnostic NAS Benchmark', formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--mode' , type=str, required=True, help='The script mode.') - parser.add_argument('--save_dir', type=str, help='Folder to save checkpoints and log.') - parser.add_argument('--max_node', type=int, help='The maximum node in a cell.') - # use for train the model - parser.add_argument('--workers', type=int, default=8, help='number of data loading workers (default: 2)') - parser.add_argument('--srange' , type=int, nargs='+', help='The range of models to be evaluated') - parser.add_argument('--arch_index', type=int, default=-1, help='The architecture index to be evaluated (cover mode).') - parser.add_argument('--datasets', type=str, nargs='+', help='The applied datasets.') - parser.add_argument('--xpaths', type=str, nargs='+', help='The root path for this dataset.') - parser.add_argument('--splits', type=int, nargs='+', help='The root path for this dataset.') - parser.add_argument('--seeds' , type=int, nargs='+', help='The range of models to be evaluated') - parser.add_argument('--channel', type=int, help='The number of channels.') - parser.add_argument('--num_cells', type=int, help='The number of cells in one stage.') - args = parser.parse_args() - - assert args.mode in ['meta', 'new', 'cover'] or args.mode.startswith('specific-'), 'invalid mode : {:}'.format(args.mode) - - if args.mode == 'meta': - generate_meta_info(args.save_dir, args.max_node) - elif args.mode.startswith('specific'): - assert len(args.mode.split('-')) == 2, 'invalid mode : {:}'.format(args.mode) - model_str = args.mode.split('-')[1] - train_single_model(args.save_dir, args.workers, args.datasets, args.xpaths, args.splits, \ - tuple(args.seeds), model_str, {'channel': args.channel, 'num_cells': args.num_cells}) - else: - meta_path = Path(args.save_dir) / 'meta-node-{:}.pth'.format(args.max_node) - assert meta_path.exists(), '{:} does not exist.'.format(meta_path) - meta_info = torch.load( meta_path ) - # check whether args is ok - assert len(args.srange) == 2 and args.srange[0] <= args.srange[1], 'invalid length of srange args: {:}'.format(args.srange) - assert len(args.seeds) > 0, 'invalid length of seeds args: {:}'.format(args.seeds) - assert len(args.datasets) == len(args.xpaths) == len(args.splits), 'invalid infos : {:} vs {:} vs {:}'.format(len(args.datasets), len(args.xpaths), len(args.splits)) - assert args.workers > 0, 'invalid number of workers : {:}'.format(args.workers) - - main(args.save_dir, args.workers, args.datasets, args.xpaths, args.splits, \ - tuple(args.srange), args.arch_index, tuple(args.seeds), \ - args.mode == 'cover', meta_info, \ - {'channel': args.channel, 'num_cells': args.num_cells}) diff --git a/exps/AA-NAS-test-API.py b/exps/AA-NAS-test-API.py new file mode 100644 index 0000000..e478a60 --- /dev/null +++ b/exps/AA-NAS-test-API.py @@ -0,0 +1,38 @@ +import os, sys, time, queue, torch +from pathlib import Path + +lib_dir = (Path(__file__).parent / '..' / 'lib').resolve() +if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) + +from log_utils import time_string +from models import CellStructure + +def get_unique_matrix(archs, consider_zero): + UniquStrs = [arch.to_unique_str(consider_zero) for arch in archs] + print ('{:} create unique-string done'.format(time_string())) + sm_matrix = torch.eye(len(archs)).bool() + for i, _ in enumerate(UniquStrs): + for j in range(i): + sm_matrix[i,j] = sm_matrix[j,i] = UniquStrs[i] == UniquStrs[j] + unique_ids, unique_num = [-1 for _ in archs], 0 + for i in range(len(unique_ids)): + if unique_ids[i] > -1: continue + neighbours = sm_matrix[i].nonzero().view(-1).tolist() + for nghb in neighbours: + assert unique_ids[nghb] == -1, 'impossible' + unique_ids[nghb] = unique_num + unique_num += 1 + return sm_matrix, unique_ids, unique_num + +def check_unique_arch(): + print ('{:} start'.format(time_string())) + meta_info = torch.load('./output/AA-NAS-BENCH-4/meta-node-4.pth') + arch_strs = meta_info['archs'] + archs = [CellStructure.str2structure(arch_str) for arch_str in arch_strs] + _, _, unique_num = get_unique_matrix(archs, False) + print ('{:} There are {:} unique architectures (not considering zero).'.format(time_string(), unique_num)) + _, _, unique_num = get_unique_matrix(archs, True) + print ('{:} There are {:} unique architectures (considering zero).'.format(time_string(), unique_num)) + +if __name__ == '__main__': + check_unique_arch() diff --git a/lib/config_utils/__init__.py b/lib/config_utils/__init__.py index dd91409..66f6355 100644 --- a/lib/config_utils/__init__.py +++ b/lib/config_utils/__init__.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## from .configure_utils import load_config, dict2config, configure2str from .basic_args import obtain_basic_args from .attention_args import obtain_attention_args diff --git a/lib/config_utils/attention_args.py b/lib/config_utils/attention_args.py index a52c987..aec0915 100644 --- a/lib/config_utils/attention_args.py +++ b/lib/config_utils/attention_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/basic_args.py b/lib/config_utils/basic_args.py index dc6d78c..3146371 100644 --- a/lib/config_utils/basic_args.py +++ b/lib/config_utils/basic_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/cls_init_args.py b/lib/config_utils/cls_init_args.py index a29ecce..c1f9e43 100644 --- a/lib/config_utils/cls_init_args.py +++ b/lib/config_utils/cls_init_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/cls_kd_args.py b/lib/config_utils/cls_kd_args.py index 8f85b91..1f70584 100644 --- a/lib/config_utils/cls_kd_args.py +++ b/lib/config_utils/cls_kd_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/pruning_args.py b/lib/config_utils/pruning_args.py index 1d1fbff..7462a71 100644 --- a/lib/config_utils/pruning_args.py +++ b/lib/config_utils/pruning_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/random_baseline.py b/lib/config_utils/random_baseline.py index 5e6fca5..79b89c8 100644 --- a/lib/config_utils/random_baseline.py +++ b/lib/config_utils/random_baseline.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/search_args.py b/lib/config_utils/search_args.py index 5ccb957..ecb60a1 100644 --- a/lib/config_utils/search_args.py +++ b/lib/config_utils/search_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/search_single_args.py b/lib/config_utils/search_single_args.py index 41cdfa6..13e1ea6 100644 --- a/lib/config_utils/search_single_args.py +++ b/lib/config_utils/search_single_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse from .share_args import add_shared_args diff --git a/lib/config_utils/share_args.py b/lib/config_utils/share_args.py index 738b1c0..b582373 100644 --- a/lib/config_utils/share_args.py +++ b/lib/config_utils/share_args.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, random, argparse def add_shared_args( parser ): diff --git a/lib/datasets/__init__.py b/lib/datasets/__init__.py index 6000628..20d3a4c 100644 --- a/lib/datasets/__init__.py +++ b/lib/datasets/__init__.py @@ -1,5 +1,2 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## from .get_dataset_with_transform import get_datasets from .SearchDatasetWrap import SearchDataset diff --git a/lib/datasets/get_dataset_with_transform.py b/lib/datasets/get_dataset_with_transform.py index 416bcde..f437fe3 100644 --- a/lib/datasets/get_dataset_with_transform.py +++ b/lib/datasets/get_dataset_with_transform.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, torch import os.path as osp import numpy as np diff --git a/lib/log_utils/__init__.py b/lib/log_utils/__init__.py index 0c8858a..c3f2438 100644 --- a/lib/log_utils/__init__.py +++ b/lib/log_utils/__init__.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## from .logger import Logger from .print_logger import PrintLogger from .meter import AverageMeter diff --git a/lib/log_utils/meter.py b/lib/log_utils/meter.py index 14575db..3138fec 100644 --- a/lib/log_utils/meter.py +++ b/lib/log_utils/meter.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import time, sys import numpy as np diff --git a/lib/log_utils/print_logger.py b/lib/log_utils/print_logger.py index 65fed21..5dc5b14 100644 --- a/lib/log_utils/print_logger.py +++ b/lib/log_utils/print_logger.py @@ -1,8 +1,4 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## -import importlib, warnings -import os, sys, time, numpy as np +import os, sys, time class PrintLogger(object): diff --git a/lib/models/CifarResNet.py b/lib/models/CifarResNet.py index c99382b..36f7f57 100644 --- a/lib/models/CifarResNet.py +++ b/lib/models/CifarResNet.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import torch import torch.nn as nn import torch.nn.functional as F diff --git a/lib/models/__init__.py b/lib/models/__init__.py index fb761ff..7158fee 100644 --- a/lib/models/__init__.py +++ b/lib/models/__init__.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import torch from os import path as osp diff --git a/lib/models/cell_infers/cells.py b/lib/models/cell_infers/cells.py index bdb6963..4cec78a 100644 --- a/lib/models/cell_infers/cells.py +++ b/lib/models/cell_infers/cells.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import torch import torch.nn as nn from copy import deepcopy diff --git a/lib/models/cell_infers/tiny_network.py b/lib/models/cell_infers/tiny_network.py index 22619d4..eb5c38c 100644 --- a/lib/models/cell_infers/tiny_network.py +++ b/lib/models/cell_infers/tiny_network.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import torch import torch.nn as nn from ..cell_operations import ResNetBasicblock diff --git a/lib/models/cell_searchs/genotypes.py b/lib/models/cell_searchs/genotypes.py index 1b4179d..a1c3fb6 100644 --- a/lib/models/cell_searchs/genotypes.py +++ b/lib/models/cell_searchs/genotypes.py @@ -60,6 +60,24 @@ class Structure: strings.append( string ) return '+'.join(strings) + def to_unique_str(self, consider_zero=False): + # this is used to identify the isomorphic cell, which rerquires the prior knowledge of operation + # two operations are special, i.e., none and skip_connect + nodes = {0: '0'} + for i_node, node_info in enumerate(self.nodes): + cur_node = [] + for op, xin in node_info: + if consider_zero: + if op == 'none' or nodes[xin] == '#': x = '#' # zero + elif op == 'skip_connect': x = nodes[xin] + else: x = nodes[xin] + '@{:}'.format(op) + else: + if op == 'skip_connect': x = nodes[xin] + else: x = nodes[xin] + '@{:}'.format(op) + cur_node.append(x) + nodes[i_node+1] = '+'.join( sorted(cur_node) ) + return nodes[ len(self.nodes) ] + def check_valid_op(self, op_names): for node_info in self.nodes: for inode_edge in node_info: diff --git a/lib/models/cell_searchs/search_model_darts_v1.py b/lib/models/cell_searchs/search_model_darts_v1.py index 61ef8ea..2103ee6 100644 --- a/lib/models/cell_searchs/search_model_darts_v1.py +++ b/lib/models/cell_searchs/search_model_darts_v1.py @@ -1,5 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # ######################################################## # DARTS: Differentiable Architecture Search, ICLR 2019 # ######################################################## diff --git a/lib/models/cell_searchs/search_model_darts_v2.py b/lib/models/cell_searchs/search_model_darts_v2.py index cb996ff..9928bb1 100644 --- a/lib/models/cell_searchs/search_model_darts_v2.py +++ b/lib/models/cell_searchs/search_model_darts_v2.py @@ -1,5 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # ######################################################## # DARTS: Differentiable Architecture Search, ICLR 2019 # ######################################################## diff --git a/lib/models/cell_searchs/search_model_enas.py b/lib/models/cell_searchs/search_model_enas.py index 2422b52..b08bcb5 100644 --- a/lib/models/cell_searchs/search_model_enas.py +++ b/lib/models/cell_searchs/search_model_enas.py @@ -1,5 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # ########################################################################## # Efficient Neural Architecture Search via Parameters Sharing, ICML 2018 # ########################################################################## diff --git a/lib/models/cell_searchs/search_model_enas_utils.py b/lib/models/cell_searchs/search_model_enas_utils.py index e03f57b..c87bcd0 100644 --- a/lib/models/cell_searchs/search_model_enas_utils.py +++ b/lib/models/cell_searchs/search_model_enas_utils.py @@ -1,5 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # ########################################################################## # Efficient Neural Architecture Search via Parameters Sharing, ICML 2018 # ########################################################################## diff --git a/lib/models/cell_searchs/search_model_gdas.py b/lib/models/cell_searchs/search_model_gdas.py index 1db635f..6a4dd4e 100644 --- a/lib/models/cell_searchs/search_model_gdas.py +++ b/lib/models/cell_searchs/search_model_gdas.py @@ -1,5 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # ########################################################################### # Searching for A Robust Neural Architecture in Four GPU Hours, CVPR 2019 # ########################################################################### diff --git a/lib/models/cell_searchs/search_model_setn.py b/lib/models/cell_searchs/search_model_setn.py index e968de1..316c88d 100644 --- a/lib/models/cell_searchs/search_model_setn.py +++ b/lib/models/cell_searchs/search_model_setn.py @@ -1,5 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # ###################################################################################### # One-Shot Neural Architecture Search via Self-Evaluated Template Network, ICCV 2019 # ###################################################################################### diff --git a/lib/models/shape_infers/shared_utils.py b/lib/models/shape_infers/shared_utils.py index 33daaad..c29620c 100644 --- a/lib/models/shape_infers/shared_utils.py +++ b/lib/models/shape_infers/shared_utils.py @@ -1,5 +1,3 @@ -# Xuanyi Dong - def parse_channel_info(xstring): blocks = xstring.split(' ') blocks = [x.split('-') for x in blocks] diff --git a/lib/nas_infer_model/DXYs/genotypes.py b/lib/nas_infer_model/DXYs/genotypes.py index 8c975ae..8e77b0a 100644 --- a/lib/nas_infer_model/DXYs/genotypes.py +++ b/lib/nas_infer_model/DXYs/genotypes.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## from collections import namedtuple Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat connectN connects') diff --git a/lib/nas_infer_model/__init__.py b/lib/nas_infer_model/__init__.py index aeda5b5..5a77cce 100644 --- a/lib/nas_infer_model/__init__.py +++ b/lib/nas_infer_model/__init__.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import torch def obtain_nas_infer_model(config): diff --git a/lib/procedures/__init__.py b/lib/procedures/__init__.py index 7950c66..b3aee8d 100644 --- a/lib/procedures/__init__.py +++ b/lib/procedures/__init__.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## from .starts import prepare_seed, prepare_logger, get_machine_info, save_checkpoint, copy_checkpoint from .optimizers import get_optim_scheduler diff --git a/lib/procedures/basic_main.py b/lib/procedures/basic_main.py index ed8cea4..33a8ff4 100644 --- a/lib/procedures/basic_main.py +++ b/lib/procedures/basic_main.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, torch from log_utils import AverageMeter, time_string from utils import obtain_accuracy diff --git a/lib/procedures/optimizers.py b/lib/procedures/optimizers.py index a3cb84a..4dd63fc 100644 --- a/lib/procedures/optimizers.py +++ b/lib/procedures/optimizers.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import math, torch import torch.nn as nn from bisect import bisect_right diff --git a/lib/procedures/search_main.py b/lib/procedures/search_main.py index 48ed44f..d6808ee 100644 --- a/lib/procedures/search_main.py +++ b/lib/procedures/search_main.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, torch from log_utils import AverageMeter, time_string from utils import obtain_accuracy diff --git a/lib/procedures/search_main_v2.py b/lib/procedures/search_main_v2.py index 46707ef..cd5d107 100644 --- a/lib/procedures/search_main_v2.py +++ b/lib/procedures/search_main_v2.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, torch from log_utils import AverageMeter, time_string from utils import obtain_accuracy diff --git a/lib/procedures/simple_KD_main.py b/lib/procedures/simple_KD_main.py index 007b51c..20e2598 100644 --- a/lib/procedures/simple_KD_main.py +++ b/lib/procedures/simple_KD_main.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## import os, sys, time, torch import torch.nn.functional as F # our modules diff --git a/lib/utils/__init__.py b/lib/utils/__init__.py index 6fc723d..ff4419f 100644 --- a/lib/utils/__init__.py +++ b/lib/utils/__init__.py @@ -1,6 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## from .evaluation_utils import obtain_accuracy from .gpu_manager import GPUManager from .flop_benchmark import get_model_infos diff --git a/lib/utils/flop_benchmark.py b/lib/utils/flop_benchmark.py index 356b85f..749751e 100644 --- a/lib/utils/flop_benchmark.py +++ b/lib/utils/flop_benchmark.py @@ -1,7 +1,3 @@ -################################################## -# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # -################################################## -# modified from https://github.com/warmspringwinds/pytorch-segmentation-detection/blob/master/pytorch_segmentation_detection/utils/flops_benchmark.py import copy, torch import torch.nn as nn import numpy as np