v2
This commit is contained in:
		
							
								
								
									
										1
									
								
								autodl/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								autodl/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
|   | ||||
							
								
								
									
										11
									
								
								autodl/nas_201_api/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								autodl/nas_201_api/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 # | ||||
| ##################################################### | ||||
| from .api_utils import ArchResults, ResultsCount | ||||
| from .api_201 import NASBench201API | ||||
| from .api_301 import NASBench301API | ||||
|  | ||||
| # NAS_BENCH_201_API_VERSION="v1.1"  # [2020.02.25] | ||||
| # NAS_BENCH_201_API_VERSION="v1.2"  # [2020.03.09] | ||||
| # NAS_BENCH_201_API_VERSION="v1.3"  # [2020.03.16] | ||||
| NAS_BENCH_201_API_VERSION="v2.0"    # [2020.06.30] | ||||
							
								
								
									
										274
									
								
								autodl/nas_201_api/api_201.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										274
									
								
								autodl/nas_201_api/api_201.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,274 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 # | ||||
| ############################################################################################ | ||||
| # NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search, ICLR 2020 # | ||||
| ############################################################################################ | ||||
| # The history of benchmark files: | ||||
| # [2020.02.25] NAS-Bench-201-v1_0-e61699.pth : 6219 architectures are trained once, 1621 architectures are trained twice, 7785 architectures are trained three times. `LESS` only supports CIFAR10-VALID. | ||||
| # [2020.03.16] NAS-Bench-201-v1_1-096897.pth : 2225 architectures are trained once, 5439 archiitectures are trained twice, 7961 architectures are trained three times on all training sets. For the hyper-parameters with the total epochs of 12, each model is trained on CIFAR-10, CIFAR-100, ImageNet16-120 once, and is trained on CIFAR-10-VALID twice. | ||||
| # | ||||
| # I'm still actively enhancing this benchmark. Please feel free to contact me if you have any question w.r.t. NAS-Bench-201. | ||||
| # | ||||
| import os, copy, random, torch, numpy as np | ||||
| from pathlib import Path | ||||
| from typing import List, Text, Union, Dict, Optional | ||||
| from collections import OrderedDict, defaultdict | ||||
|  | ||||
| from .api_utils import ArchResults | ||||
| from .api_utils import NASBenchMetaAPI | ||||
| from .api_utils import remap_dataset_set_names | ||||
|  | ||||
|  | ||||
| ALL_BENCHMARK_FILES = ['NAS-Bench-201-v1_0-e61699.pth', 'NAS-Bench-201-v1_1-096897.pth'] | ||||
| ALL_ARCHIVE_DIRS = ['NAS-Bench-201-v1_1-archive'] | ||||
|  | ||||
|  | ||||
| def print_information(information, extra_info=None, show=False): | ||||
|   dataset_names = information.get_dataset_names() | ||||
|   strings = [information.arch_str, 'datasets : {:}, extra-info : {:}'.format(dataset_names, extra_info)] | ||||
|   def metric2str(loss, acc): | ||||
|     return 'loss = {:.3f}, top1 = {:.2f}%'.format(loss, acc) | ||||
|  | ||||
|   for ida, dataset in enumerate(dataset_names): | ||||
|     metric = information.get_compute_costs(dataset) | ||||
|     flop, param, latency = metric['flops'], metric['params'], metric['latency'] | ||||
|     str1 = '{:14s} FLOP={:6.2f} M, Params={:.3f} MB, latency={:} ms.'.format(dataset, flop, param, '{:.2f}'.format(latency*1000) if latency is not None and latency > 0 else None) | ||||
|     train_info = information.get_metrics(dataset, 'train') | ||||
|     if dataset == 'cifar10-valid': | ||||
|       valid_info = information.get_metrics(dataset, 'x-valid') | ||||
|       str2 = '{:14s} train : [{:}], valid : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy'])) | ||||
|     elif dataset == 'cifar10': | ||||
|       test__info = information.get_metrics(dataset, 'ori-test') | ||||
|       str2 = '{:14s} train : [{:}], test  : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy'])) | ||||
|     else: | ||||
|       valid_info = information.get_metrics(dataset, 'x-valid') | ||||
|       test__info = information.get_metrics(dataset, 'x-test') | ||||
|       str2 = '{:14s} train : [{:}], valid : [{:}], test : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy'])) | ||||
|     strings += [str1, str2] | ||||
|   if show: print('\n'.join(strings)) | ||||
|   return strings | ||||
|  | ||||
|  | ||||
| """ | ||||
| This is the class for the API of NAS-Bench-201. | ||||
| """ | ||||
| class NASBench201API(NASBenchMetaAPI): | ||||
|  | ||||
|   """ The initialization function that takes the dataset file path (or a dict loaded from that path) as input. """ | ||||
|   def __init__(self, file_path_or_dict: Optional[Union[Text, Dict]]=None, | ||||
|                verbose: bool=True): | ||||
|     self.filename = None | ||||
|     self.reset_time() | ||||
|     if file_path_or_dict is None: | ||||
|       file_path_or_dict = os.path.join(os.environ['TORCH_HOME'], ALL_BENCHMARK_FILES[-1]) | ||||
|       print ('Try to use the default NAS-Bench-201 path from {:}.'.format(file_path_or_dict)) | ||||
|     if isinstance(file_path_or_dict, str) or isinstance(file_path_or_dict, Path): | ||||
|       file_path_or_dict = str(file_path_or_dict) | ||||
|       if verbose: print('try to create the NAS-Bench-201 api from {:}'.format(file_path_or_dict)) | ||||
|       assert os.path.isfile(file_path_or_dict), 'invalid path : {:}'.format(file_path_or_dict) | ||||
|       self.filename = Path(file_path_or_dict).name | ||||
|       file_path_or_dict = torch.load(file_path_or_dict, map_location='cpu') | ||||
|     elif isinstance(file_path_or_dict, dict): | ||||
|       file_path_or_dict = copy.deepcopy(file_path_or_dict) | ||||
|     else: raise ValueError('invalid type : {:} not in [str, dict]'.format(type(file_path_or_dict))) | ||||
|     assert isinstance(file_path_or_dict, dict), 'It should be a dict instead of {:}'.format(type(file_path_or_dict)) | ||||
|     self.verbose = verbose # [TODO] a flag indicating whether to print more logs | ||||
|     keys = ('meta_archs', 'arch2infos', 'evaluated_indexes') | ||||
|     for key in keys: assert key in file_path_or_dict, 'Can not find key[{:}] in the dict'.format(key) | ||||
|     self.meta_archs = copy.deepcopy( file_path_or_dict['meta_archs'] ) | ||||
|     # This is a dict mapping each architecture to a dict, where the key is #epochs and the value is ArchResults | ||||
|     self.arch2infos_dict = OrderedDict() | ||||
|     self._avaliable_hps = set(['12', '200']) | ||||
|     for xkey in sorted(list(file_path_or_dict['arch2infos'].keys())): | ||||
|       all_info = file_path_or_dict['arch2infos'][xkey] | ||||
|       hp2archres = OrderedDict() | ||||
|       # self.arch2infos_less[xkey] = ArchResults.create_from_state_dict( all_info['less'] ) | ||||
|       # self.arch2infos_full[xkey] = ArchResults.create_from_state_dict( all_info['full'] ) | ||||
|       hp2archres['12'] = ArchResults.create_from_state_dict(all_info['less']) | ||||
|       hp2archres['200'] = ArchResults.create_from_state_dict(all_info['full']) | ||||
|       self.arch2infos_dict[xkey] = hp2archres | ||||
|     self.evaluated_indexes = sorted(list(file_path_or_dict['evaluated_indexes'])) | ||||
|     self.archstr2index = {} | ||||
|     for idx, arch in enumerate(self.meta_archs): | ||||
|       assert arch not in self.archstr2index, 'This [{:}]-th arch {:} already in the dict ({:}).'.format(idx, arch, self.archstr2index[arch]) | ||||
|       self.archstr2index[ arch ] = idx | ||||
|  | ||||
|   def reload(self, archive_root: Text = None, index: int = None): | ||||
|     """Overwrite all information of the 'index'-th architecture in the search space. | ||||
|          It will load its data from 'archive_root'. | ||||
|     """ | ||||
|     if archive_root is None: | ||||
|       archive_root = os.path.join(os.environ['TORCH_HOME'], ALL_ARCHIVE_DIRS[-1]) | ||||
|     assert os.path.isdir(archive_root), 'invalid directory : {:}'.format(archive_root) | ||||
|     if index is None: | ||||
|       indexes = list(range(len(self))) | ||||
|     else: | ||||
|       indexes = [index] | ||||
|     for idx in indexes: | ||||
|       assert 0 <= idx < len(self.meta_archs), 'invalid index of {:}'.format(idx) | ||||
|       xfile_path = os.path.join(archive_root, '{:06d}-FULL.pth'.format(idx)) | ||||
|       assert os.path.isfile(xfile_path), 'invalid data path : {:}'.format(xfile_path) | ||||
|       xdata = torch.load(xfile_path, map_location='cpu') | ||||
|       assert isinstance(xdata, dict) and 'full' in xdata and 'less' in xdata, 'invalid format of data in {:}'.format(xfile_path) | ||||
|       hp2archres = OrderedDict() | ||||
|       hp2archres['12'] = ArchResults.create_from_state_dict(xdata['less']) | ||||
|       hp2archres['200'] = ArchResults.create_from_state_dict(xdata['full']) | ||||
|       self.arch2infos_dict[idx] = hp2archres | ||||
|  | ||||
|   def query_info_str_by_arch(self, arch, hp: Text='12'): | ||||
|     """ This function is used to query the information of a specific architecture | ||||
|         'arch' can be an architecture index or an architecture string | ||||
|         When hp=12, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/12E.config' | ||||
|         When hp=200, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/200E.config' | ||||
|         The difference between these three configurations are the number of training epochs. | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call query_info_str_by_arch with arch={:} and hp={:}'.format(arch, hp)) | ||||
|     return self._query_info_str_by_arch(arch, hp, print_information) | ||||
|  | ||||
|   # obtain the metric for the `index`-th architecture | ||||
|   # `dataset` indicates the dataset: | ||||
|   #   'cifar10-valid'  : using the proposed train set of CIFAR-10 as the training set | ||||
|   #   'cifar10'        : using the proposed train+valid set of CIFAR-10 as the training set | ||||
|   #   'cifar100'       : using the proposed train set of CIFAR-100 as the training set | ||||
|   #   'ImageNet16-120' : using the proposed train set of ImageNet-16-120 as the training set | ||||
|   # `iepoch` indicates the index of training epochs from 0 to 11/199. | ||||
|   #   When iepoch=None, it will return the metric for the last training epoch | ||||
|   #   When iepoch=11, it will return the metric for the 11-th training epoch (starting from 0) | ||||
|   # `use_12epochs_result` indicates different hyper-parameters for training | ||||
|   #   When use_12epochs_result=True, it trains the network with 12 epochs and the LR decayed from 0.1 to 0 within 12 epochs | ||||
|   #   When use_12epochs_result=False, it trains the network with 200 epochs and the LR decayed from 0.1 to 0 within 200 epochs | ||||
|   # `is_random` | ||||
|   #   When is_random=True, the performance of a random architecture will be returned | ||||
|   #   When is_random=False, the performanceo of all trials will be averaged. | ||||
|   def get_more_info(self, index, dataset, iepoch=None, hp='12', is_random=True): | ||||
|     if self.verbose: | ||||
|       print('Call the get_more_info function with index={:}, dataset={:}, iepoch={:}, hp={:}, and is_random={:}.'.format(index, dataset, iepoch, hp, is_random)) | ||||
|     index = self.query_index_by_arch(index)  # To avoid the input is a string or an instance of a arch object | ||||
|     if index not in self.arch2infos_dict: | ||||
|       raise ValueError('Did not find {:} from arch2infos_dict.'.format(index)) | ||||
|     archresult = self.arch2infos_dict[index][str(hp)] | ||||
|     # if randomly select one trial, select the seed at first | ||||
|     if isinstance(is_random, bool) and is_random: | ||||
|       seeds = archresult.get_dataset_seeds(dataset) | ||||
|       is_random = random.choice(seeds) | ||||
|     # collect the training information | ||||
|     train_info = archresult.get_metrics(dataset, 'train', iepoch=iepoch, is_random=is_random) | ||||
|     total = train_info['iepoch'] + 1 | ||||
|     xinfo = {'train-loss'    : train_info['loss'], | ||||
|              'train-accuracy': train_info['accuracy'], | ||||
|              'train-per-time': train_info['all_time'] / total if train_info['all_time'] is not None else None, | ||||
|              'train-all-time': train_info['all_time']} | ||||
|     # collect the evaluation information | ||||
|     if dataset == 'cifar10-valid': | ||||
|       valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random) | ||||
|       try: | ||||
|         test_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random) | ||||
|       except: | ||||
|         test_info = None | ||||
|       valtest_info = None | ||||
|     else: | ||||
|       try: # collect results on the proposed test set | ||||
|         if dataset == 'cifar10': | ||||
|           test_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random) | ||||
|         else: | ||||
|           test_info = archresult.get_metrics(dataset, 'x-test', iepoch=iepoch, is_random=is_random) | ||||
|       except: | ||||
|         test_info = None | ||||
|       try: # collect results on the proposed validation set | ||||
|         valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random) | ||||
|       except: | ||||
|         valid_info = None | ||||
|       try: | ||||
|         if dataset != 'cifar10': | ||||
|           valtest_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random) | ||||
|         else: | ||||
|           valtest_info = None | ||||
|       except: | ||||
|         valtest_info = None | ||||
|     if valid_info is not None: | ||||
|       xinfo['valid-loss'] = valid_info['loss'] | ||||
|       xinfo['valid-accuracy'] = valid_info['accuracy'] | ||||
|       xinfo['valid-per-time'] = valid_info['all_time'] / total if valid_info['all_time'] is not None else None | ||||
|       xinfo['valid-all-time'] = valid_info['all_time'] | ||||
|     if test_info is not None: | ||||
|       xinfo['test-loss'] = test_info['loss'] | ||||
|       xinfo['test-accuracy'] = test_info['accuracy'] | ||||
|       xinfo['test-per-time'] = test_info['all_time'] / total if test_info['all_time'] is not None else None | ||||
|       xinfo['test-all-time'] = test_info['all_time'] | ||||
|     if valtest_info is not None: | ||||
|       xinfo['valtest-loss'] = valtest_info['loss'] | ||||
|       xinfo['valtest-accuracy'] = valtest_info['accuracy'] | ||||
|       xinfo['valtest-per-time'] = valtest_info['all_time'] / total if valtest_info['all_time'] is not None else None | ||||
|       xinfo['valtest-all-time'] = valtest_info['all_time'] | ||||
|     return xinfo | ||||
|  | ||||
|   def show(self, index: int = -1) -> None: | ||||
|     """This function will print the information of a specific (or all) architecture(s).""" | ||||
|     self._show(index, print_information) | ||||
|  | ||||
|   @staticmethod | ||||
|   def str2lists(arch_str: Text) -> List[tuple]: | ||||
|     """ | ||||
|     This function shows how to read the string-based architecture encoding. | ||||
|       It is the same as the `str2structure` func in `AutoDL-Projects/lib/models/cell_searchs/genotypes.py` | ||||
|  | ||||
|     :param | ||||
|       arch_str: the input is a string indicates the architecture topology, such as | ||||
|                     |nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2| | ||||
|     :return: a list of tuple, contains multiple (op, input_node_index) pairs. | ||||
|  | ||||
|     :usage | ||||
|       arch = api.str2lists( '|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|' ) | ||||
|       print ('there are {:} nodes in this arch'.format(len(arch)+1)) # arch is a list | ||||
|       for i, node in enumerate(arch): | ||||
|         print('the {:}-th node is the sum of these {:} nodes with op: {:}'.format(i+1, len(node), node)) | ||||
|     """ | ||||
|     node_strs = arch_str.split('+') | ||||
|     genotypes = [] | ||||
|     for i, node_str in enumerate(node_strs): | ||||
|       inputs = list(filter(lambda x: x != '', node_str.split('|'))) | ||||
|       for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput) | ||||
|       inputs = ( xi.split('~') for xi in inputs ) | ||||
|       input_infos = tuple( (op, int(IDX)) for (op, IDX) in inputs) | ||||
|       genotypes.append( input_infos ) | ||||
|     return genotypes | ||||
|  | ||||
|   @staticmethod | ||||
|   def str2matrix(arch_str: Text, | ||||
|                  search_space: List[Text] = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']) -> np.ndarray: | ||||
|     """ | ||||
|     This func shows how to convert the string-based architecture encoding to the encoding strategy in NAS-Bench-101. | ||||
|  | ||||
|     :param | ||||
|       arch_str: the input is a string indicates the architecture topology, such as | ||||
|                     |nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2| | ||||
|       search_space: a list of operation string, the default list is the search space for NAS-Bench-201 | ||||
|         the default value should be be consistent with this line https://github.com/D-X-Y/AutoDL-Projects/blob/master/lib/models/cell_operations.py#L24 | ||||
|     :return | ||||
|       the numpy matrix (2-D np.ndarray) representing the DAG of this architecture topology | ||||
|     :usage | ||||
|       matrix = api.str2matrix( '|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|' ) | ||||
|       This matrix is 4-by-4 matrix representing a cell with 4 nodes (only the lower left triangle is useful). | ||||
|          [ [0, 0, 0, 0],  # the first line represents the input (0-th) node | ||||
|            [2, 0, 0, 0],  # the second line represents the 1-st node, is calculated by 2-th-op( 0-th-node ) | ||||
|            [0, 0, 0, 0],  # the third line represents the 2-nd node, is calculated by 0-th-op( 0-th-node ) + 0-th-op( 1-th-node ) | ||||
|            [0, 0, 1, 0] ] # the fourth line represents the 3-rd node, is calculated by 0-th-op( 0-th-node ) + 0-th-op( 1-th-node ) + 1-th-op( 2-th-node ) | ||||
|       In NAS-Bench-201 search space, 0-th-op is 'none', 1-th-op is 'skip_connect', | ||||
|          2-th-op is 'nor_conv_1x1', 3-th-op is 'nor_conv_3x3', 4-th-op is 'avg_pool_3x3'. | ||||
|     :(NOTE) | ||||
|       If a node has two input-edges from the same node, this function does not work. One edge will be overlapped. | ||||
|     """ | ||||
|     node_strs = arch_str.split('+') | ||||
|     num_nodes = len(node_strs) + 1 | ||||
|     matrix = np.zeros((num_nodes, num_nodes)) | ||||
|     for i, node_str in enumerate(node_strs): | ||||
|       inputs = list(filter(lambda x: x != '', node_str.split('|'))) | ||||
|       for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput) | ||||
|       for xi in inputs: | ||||
|         op, idx = xi.split('~') | ||||
|         if op not in search_space: raise ValueError('this op ({:}) is not in {:}'.format(op, search_space)) | ||||
|         op_idx, node_idx = search_space.index(op), int(idx) | ||||
|         matrix[i+1, node_idx] = op_idx | ||||
|     return matrix | ||||
|  | ||||
							
								
								
									
										222
									
								
								autodl/nas_201_api/api_301.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										222
									
								
								autodl/nas_201_api/api_301.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,222 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.06 # | ||||
| ############################################################################################ | ||||
| # NAS-Bench-301, coming soon. | ||||
| ############################################################################################ | ||||
| # The history of benchmark files: | ||||
| # [2020.06.30] NAS-Bench-301-v1_0 | ||||
| #  | ||||
| import os, copy, random, torch, numpy as np | ||||
| from pathlib import Path | ||||
| from typing import List, Text, Union, Dict, Optional | ||||
| from collections import OrderedDict, defaultdict | ||||
| from .api_utils import ArchResults | ||||
| from .api_utils import NASBenchMetaAPI | ||||
| from .api_utils import remap_dataset_set_names | ||||
|  | ||||
|  | ||||
| ALL_BENCHMARK_FILES = ['NAS-Bench-301-v1_0-363be7.pth'] | ||||
| ALL_ARCHIVE_DIRS = ['NAS-Bench-301-v1_0-archive'] | ||||
|  | ||||
|  | ||||
| def print_information(information, extra_info=None, show=False): | ||||
|   dataset_names = information.get_dataset_names() | ||||
|   strings = [information.arch_str, 'datasets : {:}, extra-info : {:}'.format(dataset_names, extra_info)] | ||||
|   def metric2str(loss, acc): | ||||
|     return 'loss = {:.3f} & top1 = {:.2f}%'.format(loss, acc) | ||||
|  | ||||
|   for ida, dataset in enumerate(dataset_names): | ||||
|     metric = information.get_compute_costs(dataset) | ||||
|     flop, param, latency = metric['flops'], metric['params'], metric['latency'] | ||||
|     str1 = '{:14s} FLOP={:6.2f} M, Params={:.3f} MB, latency={:} ms.'.format(dataset, flop, param, '{:.2f}'.format(latency*1000) if latency is not None and latency > 0 else None) | ||||
|     train_info = information.get_metrics(dataset, 'train') | ||||
|     if dataset == 'cifar10-valid': | ||||
|       valid_info = information.get_metrics(dataset, 'x-valid') | ||||
|       test__info = information.get_metrics(dataset, 'ori-test') | ||||
|       str2 = '{:14s} train : [{:}], valid : [{:}], test : [{:}]'.format( | ||||
|                 dataset, metric2str(train_info['loss'], train_info['accuracy']), | ||||
|                 metric2str(valid_info['loss'], valid_info['accuracy']), | ||||
|                 metric2str(test__info['loss'], test__info['accuracy'])) | ||||
|     elif dataset == 'cifar10': | ||||
|       test__info = information.get_metrics(dataset, 'ori-test') | ||||
|       str2 = '{:14s} train : [{:}], test  : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy'])) | ||||
|     else: | ||||
|       valid_info = information.get_metrics(dataset, 'x-valid') | ||||
|       test__info = information.get_metrics(dataset, 'x-test') | ||||
|       str2 = '{:14s} train : [{:}], valid : [{:}], test : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy'])) | ||||
|     strings += [str1, str2] | ||||
|   if show: print('\n'.join(strings)) | ||||
|   return strings | ||||
|  | ||||
|  | ||||
| """ | ||||
| This is the class for the API of NAS-Bench-301. | ||||
| """ | ||||
| class NASBench301API(NASBenchMetaAPI): | ||||
|  | ||||
|   """ The initialization function that takes the dataset file path (or a dict loaded from that path) as input. """ | ||||
|   def __init__(self, file_path_or_dict: Optional[Union[Text, Dict]]=None, verbose: bool=True): | ||||
|     self.filename = None | ||||
|     self.reset_time() | ||||
|     if file_path_or_dict is None: | ||||
|       file_path_or_dict = os.path.join(os.environ['TORCH_HOME'], ALL_BENCHMARK_FILES[-1]) | ||||
|       print ('Try to use the default NAS-Bench-301 path from {:}.'.format(file_path_or_dict)) | ||||
|     if isinstance(file_path_or_dict, str) or isinstance(file_path_or_dict, Path): | ||||
|       file_path_or_dict = str(file_path_or_dict) | ||||
|       if verbose: print('try to create the NAS-Bench-201 api from {:}'.format(file_path_or_dict)) | ||||
|       assert os.path.isfile(file_path_or_dict), 'invalid path : {:}'.format(file_path_or_dict) | ||||
|       self.filename = Path(file_path_or_dict).name | ||||
|       file_path_or_dict = torch.load(file_path_or_dict, map_location='cpu') | ||||
|     elif isinstance(file_path_or_dict, dict): | ||||
|       file_path_or_dict = copy.deepcopy( file_path_or_dict ) | ||||
|     else: raise ValueError('invalid type : {:} not in [str, dict]'.format(type(file_path_or_dict))) | ||||
|     assert isinstance(file_path_or_dict, dict), 'It should be a dict instead of {:}'.format(type(file_path_or_dict)) | ||||
|     self.verbose = verbose # [TODO] a flag indicating whether to print more logs | ||||
|     keys = ('meta_archs', 'arch2infos', 'evaluated_indexes') | ||||
|     for key in keys: assert key in file_path_or_dict, 'Can not find key[{:}] in the dict'.format(key) | ||||
|     self.meta_archs = copy.deepcopy( file_path_or_dict['meta_archs'] ) | ||||
|     # This is a dict mapping each architecture to a dict, where the key is #epochs and the value is ArchResults | ||||
|     self.arch2infos_dict = OrderedDict() | ||||
|     self._avaliable_hps = set() | ||||
|     for xkey in sorted(list(file_path_or_dict['arch2infos'].keys())): | ||||
|       all_infos = file_path_or_dict['arch2infos'][xkey] | ||||
|       hp2archres = OrderedDict() | ||||
|       for hp_key, results in all_infos.items(): | ||||
|         hp2archres[hp_key] = ArchResults.create_from_state_dict(results) | ||||
|         self._avaliable_hps.add(hp_key)  # save the avaliable hyper-parameter | ||||
|       self.arch2infos_dict[xkey] = hp2archres | ||||
|     self.evaluated_indexes = sorted(list(file_path_or_dict['evaluated_indexes'])) | ||||
|     self.archstr2index = {} | ||||
|     for idx, arch in enumerate(self.meta_archs): | ||||
|       assert arch not in self.archstr2index, 'This [{:}]-th arch {:} already in the dict ({:}).'.format(idx, arch, self.archstr2index[arch]) | ||||
|       self.archstr2index[ arch ] = idx | ||||
|     if self.verbose: | ||||
|       print('Create NAS-Bench-301 done with {:}/{:} architectures avaliable.'.format(len(self.evaluated_indexes), len(self.meta_archs))) | ||||
|  | ||||
|   def reload(self, archive_root: Text = None, index: int = None): | ||||
|     """Overwrite all information of the 'index'-th architecture in the search space, where the data will be loaded from 'archive_root'. | ||||
|        If index is None, overwrite all ckps. | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call clear_params with archive_root={:} and index={:}'.format(archive_root, index)) | ||||
|     if archive_root is None: | ||||
|       archive_root = os.path.join(os.environ['TORCH_HOME'], ALL_ARCHIVE_DIRS[-1]) | ||||
|     assert os.path.isdir(archive_root), 'invalid directory : {:}'.format(archive_root) | ||||
|     if index is None: | ||||
|       indexes = list(range(len(self))) | ||||
|     else: | ||||
|       indexes = [index] | ||||
|     for idx in indexes: | ||||
|       assert 0 <= idx < len(self.meta_archs), 'invalid index of {:}'.format(idx) | ||||
|       xfile_path = os.path.join(archive_root, '{:06d}-FULL.pth'.format(idx)) | ||||
|       if not os.path.isfile(xfile_path): | ||||
|         xfile_path = os.path.join(archive_root, '{:d}-FULL.pth'.format(idx)) | ||||
|       assert os.path.isfile(xfile_path), 'invalid data path : {:}'.format(xfile_path) | ||||
|       xdata = torch.load(xfile_path, map_location='cpu') | ||||
|       assert isinstance(xdata, dict), 'invalid format of data in {:}'.format(xfile_path) | ||||
|  | ||||
|       hp2archres = OrderedDict() | ||||
|       for hp_key, results in xdata.items(): | ||||
|         hp2archres[hp_key] = ArchResults.create_from_state_dict(results) | ||||
|       self.arch2infos_dict[idx] = hp2archres | ||||
|  | ||||
|   def query_info_str_by_arch(self, arch, hp: Text='12'): | ||||
|     """ This function is used to query the information of a specific architecture | ||||
|         'arch' can be an architecture index or an architecture string | ||||
|         When hp=01, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/01E.config' | ||||
|         When hp=12, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/12E.config' | ||||
|         When hp=90, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/90E.config' | ||||
|         The difference between these three configurations are the number of training epochs. | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call query_info_str_by_arch with arch={:} and hp={:}'.format(arch, hp)) | ||||
|     return self._query_info_str_by_arch(arch, hp, print_information) | ||||
|  | ||||
|   def get_more_info(self, index, dataset: Text, iepoch=None, hp='12', is_random=True): | ||||
|     """This function will return the metric for the `index`-th architecture | ||||
|        `dataset` indicates the dataset: | ||||
|           'cifar10-valid'  : using the proposed train set of CIFAR-10 as the training set | ||||
|           'cifar10'        : using the proposed train+valid set of CIFAR-10 as the training set | ||||
|           'cifar100'       : using the proposed train set of CIFAR-100 as the training set | ||||
|           'ImageNet16-120' : using the proposed train set of ImageNet-16-120 as the training set | ||||
|         `iepoch` indicates the index of training epochs from 0 to 11/199. | ||||
|           When iepoch=None, it will return the metric for the last training epoch | ||||
|           When iepoch=11, it will return the metric for the 11-th training epoch (starting from 0) | ||||
|         `hp` indicates different hyper-parameters for training | ||||
|           When hp=01, it trains the network with 01 epochs and the LR decayed from 0.1 to 0 within 01 epochs | ||||
|           When hp=12, it trains the network with 01 epochs and the LR decayed from 0.1 to 0 within 12 epochs | ||||
|           When hp=90, it trains the network with 01 epochs and the LR decayed from 0.1 to 0 within 90 epochs | ||||
|         `is_random` | ||||
|           When is_random=True, the performance of a random architecture will be returned | ||||
|           When is_random=False, the performanceo of all trials will be averaged. | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call the get_more_info function with index={:}, dataset={:}, iepoch={:}, hp={:}, and is_random={:}.'.format(index, dataset, iepoch, hp, is_random)) | ||||
|     index = self.query_index_by_arch(index)  # To avoid the input is a string or an instance of a arch object | ||||
|     if index not in self.arch2infos_dict: | ||||
|       raise ValueError('Did not find {:} from arch2infos_dict.'.format(index)) | ||||
|     archresult = self.arch2infos_dict[index][str(hp)] | ||||
|     # if randomly select one trial, select the seed at first | ||||
|     if isinstance(is_random, bool) and is_random: | ||||
|       seeds = archresult.get_dataset_seeds(dataset) | ||||
|       is_random = random.choice(seeds) | ||||
|     # collect the training information | ||||
|     train_info = archresult.get_metrics(dataset, 'train', iepoch=iepoch, is_random=is_random) | ||||
|     total = train_info['iepoch'] + 1 | ||||
|     xinfo = {'train-loss'    : train_info['loss'], | ||||
|              'train-accuracy': train_info['accuracy'], | ||||
|              'train-per-time': train_info['all_time'] / total, | ||||
|              'train-all-time': train_info['all_time']} | ||||
|     # collect the evaluation information | ||||
|     if dataset == 'cifar10-valid': | ||||
|       valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random) | ||||
|       try: | ||||
|         test_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random) | ||||
|       except: | ||||
|         test_info = None | ||||
|       valtest_info = None | ||||
|     else: | ||||
|       try: # collect results on the proposed test set | ||||
|         if dataset == 'cifar10': | ||||
|           test_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random) | ||||
|         else: | ||||
|           test_info = archresult.get_metrics(dataset, 'x-test', iepoch=iepoch, is_random=is_random) | ||||
|       except: | ||||
|         test_info = None | ||||
|       try: # collect results on the proposed validation set | ||||
|         valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random) | ||||
|       except: | ||||
|         valid_info = None | ||||
|       try: | ||||
|         if dataset != 'cifar10': | ||||
|           valtest_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random) | ||||
|         else: | ||||
|           valtest_info = None | ||||
|       except: | ||||
|         valtest_info = None | ||||
|     if valid_info is not None: | ||||
|       xinfo['valid-loss'] = valid_info['loss'] | ||||
|       xinfo['valid-accuracy'] = valid_info['accuracy'] | ||||
|       xinfo['valid-per-time'] = valid_info['all_time'] / total | ||||
|       xinfo['valid-all-time'] = valid_info['all_time'] | ||||
|     if test_info is not None: | ||||
|       xinfo['test-loss'] = test_info['loss'] | ||||
|       xinfo['test-accuracy'] = test_info['accuracy'] | ||||
|       xinfo['test-per-time'] = test_info['all_time'] / total | ||||
|       xinfo['test-all-time'] = test_info['all_time'] | ||||
|     if valtest_info is not None: | ||||
|       xinfo['valtest-loss'] = valtest_info['loss'] | ||||
|       xinfo['valtest-accuracy'] = valtest_info['accuracy'] | ||||
|       xinfo['valtest-per-time'] = valtest_info['all_time'] / total | ||||
|       xinfo['valtest-all-time'] = valtest_info['all_time'] | ||||
|     return xinfo | ||||
|  | ||||
|   def show(self, index: int = -1) -> None: | ||||
|     """ | ||||
|     This function will print the information of a specific (or all) architecture(s). | ||||
|  | ||||
|     :param index: If the index < 0: it will loop for all architectures and print their information one by one. | ||||
|                   else: it will print the information of the 'index'-th architecture. | ||||
|     :return: nothing | ||||
|     """ | ||||
|     self._show(index, print_information) | ||||
							
								
								
									
										750
									
								
								autodl/nas_201_api/api_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										750
									
								
								autodl/nas_201_api/api_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,750 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 # | ||||
| ############################################################################################ | ||||
| # NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search, ICLR 2020 # | ||||
| ############################################################################################ | ||||
| # In this Python file, we define NASBenchMetaAPI, the abstract class for benchmark APIs. | ||||
| # We also define the class ArchResults, which contains all information of a single architecture trained by one kind of hyper-parameters on three datasets. | ||||
| # We also define the class ResultsCount, which contains all information of a single trial for a single architecture. | ||||
| ############################################################################################ | ||||
| # History: | ||||
| # [2020.06.30] The first version. | ||||
| # | ||||
| import os, abc, copy, random, torch, numpy as np | ||||
| from pathlib import Path | ||||
| from typing import List, Text, Union, Dict, Optional | ||||
| from collections import OrderedDict, defaultdict | ||||
|  | ||||
|  | ||||
| def remap_dataset_set_names(dataset, metric_on_set, verbose=False): | ||||
|   """re-map the metric_on_set to internal keys""" | ||||
|   if verbose: | ||||
|     print('Call internal function _remap_dataset_set_names with dataset={:} and metric_on_set={:}'.format(dataset, metric_on_set)) | ||||
|   if dataset == 'cifar10' and metric_on_set == 'valid': | ||||
|     dataset, metric_on_set = 'cifar10-valid', 'x-valid' | ||||
|   elif dataset == 'cifar10' and metric_on_set == 'test': | ||||
|     dataset, metric_on_set = 'cifar10', 'ori-test' | ||||
|   elif dataset == 'cifar10' and metric_on_set == 'train': | ||||
|     dataset, metric_on_set = 'cifar10', 'train' | ||||
|   elif (dataset == 'cifar100' or dataset == 'ImageNet16-120') and metric_on_set == 'valid': | ||||
|     metric_on_set = 'x-valid' | ||||
|   elif (dataset == 'cifar100' or dataset == 'ImageNet16-120') and metric_on_set == 'test': | ||||
|     metric_on_set = 'x-test' | ||||
|   if verbose: | ||||
|     print('  return dataset={:} and metric_on_set={:}'.format(dataset, metric_on_set)) | ||||
|   return dataset, metric_on_set | ||||
|  | ||||
|  | ||||
| class NASBenchMetaAPI(metaclass=abc.ABCMeta): | ||||
|  | ||||
|   @abc.abstractmethod | ||||
|   def __init__(self, file_path_or_dict: Optional[Union[Text, Dict]]=None, verbose: bool=True): | ||||
|     """The initialization function that takes the dataset file path (or a dict loaded from that path) as input.""" | ||||
|  | ||||
|   def __getitem__(self, index: int): | ||||
|     return copy.deepcopy(self.meta_archs[index]) | ||||
|  | ||||
|   def arch(self, index: int): | ||||
|     """Return the topology structure of the `index`-th architecture.""" | ||||
|     if self.verbose: | ||||
|       print('Call the arch function with index={:}'.format(index)) | ||||
|     assert 0 <= index < len(self.meta_archs), 'invalid index : {:} vs. {:}.'.format(index, len(self.meta_archs)) | ||||
|     return copy.deepcopy(self.meta_archs[index]) | ||||
|  | ||||
|   def __len__(self): | ||||
|     return len(self.meta_archs) | ||||
|  | ||||
|   def __repr__(self): | ||||
|     return ('{name}({num}/{total} architectures, file={filename})'.format(name=self.__class__.__name__, num=len(self.evaluated_indexes), total=len(self.meta_archs), filename=self.filename)) | ||||
|  | ||||
|   @property | ||||
|   def avaliable_hps(self): | ||||
|     return list(copy.deepcopy(self._avaliable_hps)) | ||||
|  | ||||
|   @property | ||||
|   def used_time(self): | ||||
|     return self._used_time | ||||
|  | ||||
|   def reset_time(self): | ||||
|     self._used_time = 0 | ||||
|  | ||||
|   def simulate_train_eval(self, arch, dataset, iepoch=None, hp='12', account_time=True): | ||||
|     index = self.query_index_by_arch(arch) | ||||
|     all_names = ('cifar10', 'cifar100', 'ImageNet16-120') | ||||
|     assert dataset in all_names, 'Invalid dataset name : {:} vs {:}'.format(dataset, all_names) | ||||
|     if dataset == 'cifar10': | ||||
|       info = self.get_more_info(index, 'cifar10-valid', iepoch=iepoch, hp=hp, is_random=True) | ||||
|     else: | ||||
|       info = self.get_more_info(index, dataset, iepoch=iepoch, hp=hp, is_random=True) | ||||
|     valid_acc, time_cost = info['valid-accuracy'], info['train-all-time'] + info['valid-per-time'] | ||||
|     latency = self.get_latency(index, dataset) | ||||
|     if account_time: | ||||
|       self._used_time += time_cost | ||||
|     return valid_acc, latency, time_cost, self._used_time | ||||
|  | ||||
|   def random(self): | ||||
|     """Return a random index of all architectures.""" | ||||
|     return random.randint(0, len(self.meta_archs)-1) | ||||
|  | ||||
|   def query_index_by_arch(self, arch): | ||||
|     """ This function is used to query the index of an architecture in the search space. | ||||
|         In the topology search space, the input arch can be an architecture string such as '|nor_conv_3x3~0|+|nor_conv_3x3~0|avg_pool_3x3~1|+|skip_connect~0|nor_conv_3x3~1|skip_connect~2|'; | ||||
|           or an instance that has the 'tostr' function that can generate the architecture string; | ||||
|           or it is directly an architecture index, in this case, we will check whether it is valid or not. | ||||
|         This function will return the index. | ||||
|         If return -1, it means this architecture is not in the search space. | ||||
|         Otherwise, it will return an int in [0, the-number-of-candidates-in-the-search-space). | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call query_index_by_arch with arch={:}'.format(arch)) | ||||
|     if isinstance(arch, int): | ||||
|       if 0 <= arch < len(self): | ||||
|         return arch | ||||
|       else: | ||||
|         raise ValueError('Invalid architecture index {:} vs [{:}, {:}].'.format(arch, 0, len(self))) | ||||
|     elif isinstance(arch, str): | ||||
|       if arch in self.archstr2index: arch_index = self.archstr2index[ arch ] | ||||
|       else                         : arch_index = -1 | ||||
|     elif hasattr(arch, 'tostr'): | ||||
|       if arch.tostr() in self.archstr2index: arch_index = self.archstr2index[ arch.tostr() ] | ||||
|       else                                 : arch_index = -1 | ||||
|     else: arch_index = -1 | ||||
|     return arch_index | ||||
|  | ||||
|   def query_by_arch(self, arch, hp): | ||||
|     # This is to make the current version be compatible with the old version. | ||||
|     return self.query_info_str_by_arch(arch, hp) | ||||
|  | ||||
|   @abc.abstractmethod | ||||
|   def reload(self, archive_root: Text = None, index: int = None): | ||||
|     """Overwrite all information of the 'index'-th architecture in the search space, where the data will be loaded from 'archive_root'. | ||||
|        If index is None, overwrite all ckps. | ||||
|     """ | ||||
|  | ||||
|   def clear_params(self, index: int, hp: Optional[Text]=None): | ||||
|     """Remove the architecture's weights to save memory. | ||||
|     :arg | ||||
|       index: the index of the target architecture | ||||
|       hp: a flag to controll how to clear the parameters. | ||||
|         -- None: clear all the weights in '01'/'12'/'90', which indicates the number of training epochs. | ||||
|         -- '01' or '12' or '90': clear all the weights in arch2infos_dict[index][hp]. | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call clear_params with index={:} and hp={:}'.format(index, hp)) | ||||
|     if hp is None: | ||||
|       for key, result in self.arch2infos_dict[index].items(): | ||||
|         result.clear_params() | ||||
|     else: | ||||
|       if str(hp) not in self.arch2infos_dict[index]: | ||||
|         raise ValueError('The {:}-th architecture only has hyper-parameters of {:} instead of {:}.'.format(index, list(self.arch2infos_dict[index].keys()), hp)) | ||||
|       self.arch2infos_dict[index][str(hp)].clear_params() | ||||
|  | ||||
|   @abc.abstractmethod | ||||
|   def query_info_str_by_arch(self, arch, hp: Text='12'): | ||||
|     """This function is used to query the information of a specific architecture.""" | ||||
|  | ||||
|   def _query_info_str_by_arch(self, arch, hp: Text='12', print_information=None): | ||||
|     arch_index = self.query_index_by_arch(arch) | ||||
|     if arch_index in self.arch2infos_dict: | ||||
|       if hp not in self.arch2infos_dict[arch_index]: | ||||
|         raise ValueError('The {:}-th architecture only has hyper-parameters of {:} instead of {:}.'.format(index, list(self.arch2infos_dict[arch_index].keys()), hp)) | ||||
|       info = self.arch2infos_dict[arch_index][hp] | ||||
|       strings = print_information(info, 'arch-index={:}'.format(arch_index)) | ||||
|       return '\n'.join(strings) | ||||
|     else: | ||||
|       print ('Find this arch-index : {:}, but this arch is not evaluated.'.format(arch_index)) | ||||
|       return None | ||||
|  | ||||
|   def query_meta_info_by_index(self, arch_index, hp: Text = '12'): | ||||
|     """Return the ArchResults for the 'arch_index'-th architecture. This function is similar to query_by_index.""" | ||||
|     if self.verbose: | ||||
|       print('Call query_meta_info_by_index with arch_index={:}, hp={:}'.format(arch_index, hp)) | ||||
|     if arch_index in self.arch2infos_dict: | ||||
|       if hp not in self.arch2infos_dict[arch_index]: | ||||
|         raise ValueError('The {:}-th architecture only has hyper-parameters of {:} instead of {:}.'.format(arch_index, list(self.arch2infos_dict[arch_index].keys()), hp)) | ||||
|       info = self.arch2infos_dict[arch_index][hp] | ||||
|     else: | ||||
|       raise ValueError('arch_index [{:}] does not in arch2infos'.format(arch_index)) | ||||
|     return copy.deepcopy(info) | ||||
|  | ||||
|   def query_by_index(self, arch_index: int, dataname: Union[None, Text] = None, hp: Text = '12'): | ||||
|     """ This 'query_by_index' function is used to query information with the training of 01 epochs, 12 epochs, 90 epochs, or 200 epochs. | ||||
|         ------ | ||||
|         If hp=01, we train the model by 01 epochs (see config in configs/nas-benchmark/hyper-opts/01E.config) | ||||
|         If hp=12, we train the model by 01 epochs (see config in configs/nas-benchmark/hyper-opts/12E.config) | ||||
|         If hp=90, we train the model by 01 epochs (see config in configs/nas-benchmark/hyper-opts/90E.config) | ||||
|         If hp=200, we train the model by 01 epochs (see config in configs/nas-benchmark/hyper-opts/200E.config) | ||||
|         ------ | ||||
|         If dataname is None, return the ArchResults | ||||
|           else, return a dict with all trials on that dataset (the key is the seed) | ||||
|         Options are 'cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120'. | ||||
|         -- cifar10-valid : training the model on the CIFAR-10 training set. | ||||
|         -- cifar10 : training the model on the CIFAR-10 training + validation set. | ||||
|         -- cifar100 : training the model on the CIFAR-100 training set. | ||||
|         -- ImageNet16-120 : training the model on the ImageNet16-120 training set. | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call query_by_index with arch_index={:}, dataname={:}, hp={:}'.format(arch_index, dataname, hp)) | ||||
|     info = self.query_meta_info_by_index(arch_index, hp) | ||||
|     if dataname is None: return info | ||||
|     else: | ||||
|       if dataname not in info.get_dataset_names(): | ||||
|         raise ValueError('invalid dataset-name : {:} vs. {:}'.format(dataname, info.get_dataset_names())) | ||||
|       return info.query(dataname) | ||||
|  | ||||
|   def find_best(self, dataset, metric_on_set, FLOP_max=None, Param_max=None, hp: Text = '12'): | ||||
|     """Find the architecture with the highest accuracy based on some constraints.""" | ||||
|     if self.verbose: | ||||
|       print('Call find_best with dataset={:}, metric_on_set={:}, hp={:} | with #FLOPs < {:} and #Params < {:}'.format(dataset, metric_on_set, hp, FLOP_max, Param_max)) | ||||
|     dataset, metric_on_set = remap_dataset_set_names(dataset, metric_on_set, self.verbose) | ||||
|     best_index, highest_accuracy = -1, None | ||||
|     for i, arch_index in enumerate(self.evaluated_indexes): | ||||
|       arch_info = self.arch2infos_dict[arch_index][hp] | ||||
|       info = arch_info.get_compute_costs(dataset)  # the information of costs | ||||
|       flop, param, latency = info['flops'], info['params'], info['latency'] | ||||
|       if FLOP_max  is not None and flop  > FLOP_max : continue | ||||
|       if Param_max is not None and param > Param_max: continue | ||||
|       xinfo = arch_info.get_metrics(dataset, metric_on_set)  # the information of loss and accuracy | ||||
|       loss, accuracy = xinfo['loss'], xinfo['accuracy'] | ||||
|       if best_index == -1: | ||||
|         best_index, highest_accuracy = arch_index, accuracy | ||||
|       elif highest_accuracy < accuracy: | ||||
|         best_index, highest_accuracy = arch_index, accuracy | ||||
|     if self.verbose: | ||||
|       print('  the best architecture : [{:}] {:} with accuracy={:.3f}%'.format(best_index, self.arch(best_index), highest_accuracy)) | ||||
|     return best_index, highest_accuracy | ||||
|  | ||||
|   def get_net_param(self, index, dataset, seed: Optional[int], hp: Text = '12'): | ||||
|     """ | ||||
|       This function is used to obtain the trained weights of the `index`-th architecture on `dataset` with the seed of `seed` | ||||
|       Args [seed]: | ||||
|         -- None : return a dict containing the trained weights of all trials, where each key is a seed and its corresponding value is the weights. | ||||
|         -- a interger : return the weights of a specific trial, whose seed is this interger. | ||||
|       Args [hp]: | ||||
|         -- 01 : train the model by 01 epochs | ||||
|         -- 12 : train the model by 12 epochs | ||||
|         -- 90 : train the model by 90 epochs | ||||
|         -- 200 : train the model by 200 epochs | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call the get_net_param function with index={:}, dataset={:}, seed={:}, hp={:}'.format(index, dataset, seed, hp)) | ||||
|     info = self.query_meta_info_by_index(index, hp) | ||||
|     return info.get_net_param(dataset, seed) | ||||
|  | ||||
|   def get_net_config(self, index: int, dataset: Text): | ||||
|     """ | ||||
|       This function is used to obtain the configuration for the `index`-th architecture on `dataset`. | ||||
|       Args [dataset] (4 possible options): | ||||
|         -- cifar10-valid : training the model on the CIFAR-10 training set. | ||||
|         -- cifar10 : training the model on the CIFAR-10 training + validation set. | ||||
|         -- cifar100 : training the model on the CIFAR-100 training set. | ||||
|         -- ImageNet16-120 : training the model on the ImageNet16-120 training set. | ||||
|       This function will return a dict. | ||||
|       ========= Some examlpes for using this function: | ||||
|       config = api.get_net_config(128, 'cifar10') | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call the get_net_config function with index={:}, dataset={:}.'.format(index, dataset)) | ||||
|     if index in self.arch2infos_dict: | ||||
|       info = self.arch2infos_dict[index] | ||||
|     else: | ||||
|       raise ValueError('The arch_index={:} is not in arch2infos_dict.'.format(arch_index)) | ||||
|     info = next(iter(info.values())) | ||||
|     results = info.query(dataset, None) | ||||
|     results = next(iter(results.values())) | ||||
|     return results.get_config(None) | ||||
|    | ||||
|   def get_cost_info(self, index: int, dataset: Text, hp: Text = '12') -> Dict[Text, float]: | ||||
|     """To obtain the cost metric for the `index`-th architecture on a dataset.""" | ||||
|     if self.verbose: | ||||
|       print('Call the get_cost_info function with index={:}, dataset={:}, and hp={:}.'.format(index, dataset, hp)) | ||||
|     info = self.query_meta_info_by_index(index, hp) | ||||
|     return info.get_compute_costs(dataset) | ||||
|  | ||||
|   def get_latency(self, index: int, dataset: Text, hp: Text = '12') -> float: | ||||
|     """ | ||||
|     To obtain the latency of the network (by default it will return the latency with the batch size of 256). | ||||
|     :param index: the index of the target architecture | ||||
|     :param dataset: the dataset name (cifar10-valid, cifar10, cifar100, ImageNet16-120) | ||||
|     :return: return a float value in seconds | ||||
|     """ | ||||
|     if self.verbose: | ||||
|       print('Call the get_latency function with index={:}, dataset={:}, and hp={:}.'.format(index, dataset, hp)) | ||||
|     cost_dict = self.get_cost_info(index, dataset, hp) | ||||
|     return cost_dict['latency'] | ||||
|  | ||||
|   @abc.abstractmethod | ||||
|   def show(self, index=-1): | ||||
|     """This function will print the information of a specific (or all) architecture(s).""" | ||||
|  | ||||
|   def _show(self, index=-1, print_information=None) -> None: | ||||
|     """ | ||||
|     This function will print the information of a specific (or all) architecture(s). | ||||
|  | ||||
|     :param index: If the index < 0: it will loop for all architectures and print their information one by one. | ||||
|                   else: it will print the information of the 'index'-th architecture. | ||||
|     :return: nothing | ||||
|     """ | ||||
|     if index < 0: # show all architectures | ||||
|       print(self) | ||||
|       for i, idx in enumerate(self.evaluated_indexes): | ||||
|         print('\n' + '-' * 10 + ' The ({:5d}/{:5d}) {:06d}-th architecture! '.format(i, len(self.evaluated_indexes), idx) + '-'*10) | ||||
|         print('arch : {:}'.format(self.meta_archs[idx])) | ||||
|         for key, result in self.arch2infos_dict[index].items(): | ||||
|           strings = print_information(result) | ||||
|           print('>' * 40 + ' {:03d} epochs '.format(result.get_total_epoch()) + '>' * 40) | ||||
|           print('\n'.join(strings)) | ||||
|         print('<' * 40 + '------------' + '<' * 40) | ||||
|     else: | ||||
|       if 0 <= index < len(self.meta_archs): | ||||
|         if index not in self.evaluated_indexes: print('The {:}-th architecture has not been evaluated or not saved.'.format(index)) | ||||
|         else: | ||||
|           arch_info = self.arch2infos_dict[index] | ||||
|           for key, result in self.arch2infos_dict[index].items(): | ||||
|             strings = print_information(result) | ||||
|             print('>' * 40 + ' {:03d} epochs '.format(result.get_total_epoch()) + '>' * 40) | ||||
|             print('\n'.join(strings)) | ||||
|           print('<' * 40 + '------------' + '<' * 40) | ||||
|       else: | ||||
|         print('This index ({:}) is out of range (0~{:}).'.format(index, len(self.meta_archs))) | ||||
|  | ||||
|   def statistics(self, dataset: Text, hp: Union[Text, int]) -> Dict[int, int]: | ||||
|     """This function will count the number of total trials.""" | ||||
|     if self.verbose: | ||||
|       print('Call the statistics function with dataset={:} and hp={:}.'.format(dataset, hp)) | ||||
|     valid_datasets = ['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120'] | ||||
|     if dataset not in valid_datasets: | ||||
|       raise ValueError('{:} not in {:}'.format(dataset, valid_datasets)) | ||||
|     nums, hp = defaultdict(lambda: 0), str(hp) | ||||
|     for index in range(len(self)): | ||||
|       archInfo = self.arch2infos_dict[index][hp] | ||||
|       dataset_seed = archInfo.dataset_seed | ||||
|       if dataset not in dataset_seed: | ||||
|         nums[0] += 1 | ||||
|       else: | ||||
|         nums[len(dataset_seed[dataset])] += 1 | ||||
|     return dict(nums) | ||||
|  | ||||
|  | ||||
| class ArchResults(object): | ||||
|  | ||||
|   def __init__(self, arch_index, arch_str): | ||||
|     self.arch_index   = int(arch_index) | ||||
|     self.arch_str     = copy.deepcopy(arch_str) | ||||
|     self.all_results  = dict() | ||||
|     self.dataset_seed = dict() | ||||
|     self.clear_net_done = False | ||||
|  | ||||
|   def get_compute_costs(self, dataset): | ||||
|     x_seeds = self.dataset_seed[dataset] | ||||
|     results = [self.all_results[ (dataset, seed) ] for seed in x_seeds] | ||||
|  | ||||
|     flops     = [result.flop for result in results] | ||||
|     params    = [result.params for result in results] | ||||
|     latencies = [result.get_latency() for result in results] | ||||
|     latencies = [x for x in latencies if x > 0] | ||||
|     mean_latency = np.mean(latencies) if len(latencies) > 0 else None | ||||
|     time_infos = defaultdict(list) | ||||
|     for result in results: | ||||
|       time_info = result.get_times() | ||||
|       for key, value in time_info.items(): time_infos[key].append( value ) | ||||
|       | ||||
|     info = {'flops'  : np.mean(flops), | ||||
|             'params' : np.mean(params), | ||||
|             'latency': mean_latency} | ||||
|     for key, value in time_infos.items(): | ||||
|       if len(value) > 0 and value[0] is not None: | ||||
|         info[key] = np.mean(value) | ||||
|       else: info[key] = None | ||||
|     return info | ||||
|  | ||||
|   def get_metrics(self, dataset, setname, iepoch=None, is_random=False): | ||||
|     """ | ||||
|       This `get_metrics` function is used to obtain obtain the loss, accuracy, etc information on a specific dataset. | ||||
|       If not specify, each set refer to the proposed split in NAS-Bench-201 paper. | ||||
|       If some args return None or raise error, then it is not avaliable. | ||||
|       ======================================== | ||||
|       Args [dataset] (4 possible options): | ||||
|         -- cifar10-valid : training the model on the CIFAR-10 training set. | ||||
|         -- cifar10 : training the model on the CIFAR-10 training + validation set. | ||||
|         -- cifar100 : training the model on the CIFAR-100 training set. | ||||
|         -- ImageNet16-120 : training the model on the ImageNet16-120 training set. | ||||
|       Args [setname] (each dataset has different setnames): | ||||
|         -- When dataset = cifar10-valid, you can use 'train', 'x-valid', 'ori-test' | ||||
|         ------ 'train' : the metric on the training set. | ||||
|         ------ 'x-valid' : the metric on the validation set. | ||||
|         ------ 'ori-test' : the metric on the test set. | ||||
|         -- When dataset = cifar10, you can use 'train', 'ori-test'. | ||||
|         ------ 'train' : the metric on the training + validation set. | ||||
|         ------ 'ori-test' : the metric on the test set. | ||||
|         -- When dataset = cifar100 or ImageNet16-120, you can use 'train', 'ori-test', 'x-valid', 'x-test' | ||||
|         ------ 'train' : the metric on the training set. | ||||
|         ------ 'x-valid' : the metric on the validation set. | ||||
|         ------ 'x-test' : the metric on the test set. | ||||
|         ------ 'ori-test' : the metric on the validation + test set. | ||||
|       Args [iepoch] (None or an integer in [0, the-number-of-total-training-epochs) | ||||
|         ------ None : return the metric after the last training epoch. | ||||
|         ------ an integer i : return the metric after the i-th training epoch. | ||||
|       Args [is_random]: | ||||
|         ------ True : return the metric of a randomly selected trial. | ||||
|         ------ False : return the averaged metric of all avaliable trials. | ||||
|         ------ an integer indicating the 'seed' value : return the metric of a specific trial (whose random seed is 'is_random'). | ||||
|     """ | ||||
|     x_seeds = self.dataset_seed[dataset] | ||||
|     results = [self.all_results[ (dataset, seed) ] for seed in x_seeds] | ||||
|     infos   = defaultdict(list) | ||||
|     for result in results: | ||||
|       if setname == 'train': | ||||
|         info = result.get_train(iepoch) | ||||
|       else: | ||||
|         info = result.get_eval(setname, iepoch) | ||||
|       for key, value in info.items(): infos[key].append( value ) | ||||
|     return_info = dict() | ||||
|     if isinstance(is_random, bool) and is_random: # randomly select one | ||||
|       index = random.randint(0, len(results)-1) | ||||
|       for key, value in infos.items(): return_info[key] = value[index] | ||||
|     elif isinstance(is_random, bool) and not is_random: # average | ||||
|       for key, value in infos.items(): | ||||
|         if len(value) > 0 and value[0] is not None: | ||||
|           return_info[key] = np.mean(value) | ||||
|         else: return_info[key] = None | ||||
|     elif isinstance(is_random, int): # specify the seed | ||||
|       if is_random not in x_seeds: raise ValueError('can not find random seed ({:}) from {:}'.format(is_random, x_seeds)) | ||||
|       index = x_seeds.index(is_random) | ||||
|       for key, value in infos.items(): return_info[key] = value[index] | ||||
|     else: | ||||
|       raise ValueError('invalid value for is_random: {:}'.format(is_random)) | ||||
|     return return_info | ||||
|  | ||||
|   def show(self, is_print=False): | ||||
|     return print_information(self, None, is_print) | ||||
|  | ||||
|   def get_dataset_names(self): | ||||
|     return list(self.dataset_seed.keys()) | ||||
|  | ||||
|   def get_dataset_seeds(self, dataset): | ||||
|     return copy.deepcopy( self.dataset_seed[dataset] ) | ||||
|  | ||||
|   def get_net_param(self, dataset: Text, seed: Union[None, int] =None): | ||||
|     """ | ||||
|     This function will return the trained network's weights on the 'dataset'. | ||||
|     :arg | ||||
|       dataset: one of 'cifar10-valid', 'cifar10', 'cifar100', and 'ImageNet16-120'. | ||||
|       seed: an integer indicates the seed value or None that indicates returing all trials. | ||||
|     """ | ||||
|     if seed is None: | ||||
|       x_seeds = self.dataset_seed[dataset] | ||||
|       return {seed: self.all_results[(dataset, seed)].get_net_param() for seed in x_seeds} | ||||
|     else: | ||||
|       xkey = (dataset, seed) | ||||
|       if xkey in self.all_results: | ||||
|         return self.all_results[xkey].get_net_param() | ||||
|       else: | ||||
|         raise ValueError('key={:} not in {:}'.format(xkey, list(self.all_results.keys()))) | ||||
|  | ||||
|   def reset_latency(self, dataset: Text, seed: Union[None, Text], latency: float) -> None: | ||||
|     """This function is used to reset the latency in all corresponding ResultsCount(s).""" | ||||
|     if seed is None: | ||||
|       for seed in self.dataset_seed[dataset]: | ||||
|         self.all_results[(dataset, seed)].update_latency([latency]) | ||||
|     else: | ||||
|       self.all_results[(dataset, seed)].update_latency([latency]) | ||||
|  | ||||
|   def reset_pseudo_train_times(self, dataset: Text, seed: Union[None, Text], estimated_per_epoch_time: float) -> None: | ||||
|     """This function is used to reset the train-times in all corresponding ResultsCount(s).""" | ||||
|     if seed is None: | ||||
|       for seed in self.dataset_seed[dataset]: | ||||
|         self.all_results[(dataset, seed)].reset_pseudo_train_times(estimated_per_epoch_time) | ||||
|     else: | ||||
|       self.all_results[(dataset, seed)].reset_pseudo_train_times(estimated_per_epoch_time) | ||||
|  | ||||
|   def reset_pseudo_eval_times(self, dataset: Text, seed: Union[None, Text], eval_name: Text, estimated_per_epoch_time: float) -> None: | ||||
|     """This function is used to reset the eval-times in all corresponding ResultsCount(s).""" | ||||
|     if seed is None: | ||||
|       for seed in self.dataset_seed[dataset]: | ||||
|         self.all_results[(dataset, seed)].reset_pseudo_eval_times(eval_name, estimated_per_epoch_time) | ||||
|     else: | ||||
|       self.all_results[(dataset, seed)].reset_pseudo_eval_times(eval_name, estimated_per_epoch_time) | ||||
|  | ||||
|   def get_latency(self, dataset: Text) -> float: | ||||
|     """Get the latency of a model on the target dataset. [Timestamp: 2020.03.09]""" | ||||
|     latencies = [] | ||||
|     for seed in self.dataset_seed[dataset]: | ||||
|       latency = self.all_results[(dataset, seed)].get_latency() | ||||
|       if not isinstance(latency, float) or latency <= 0: | ||||
|         raise ValueError('invalid latency of {:} with seed={:} : {:}'.format(dataset, seed, latency)) | ||||
|       latencies.append(latency) | ||||
|     return sum(latencies) / len(latencies) | ||||
|  | ||||
|   def get_total_epoch(self, dataset=None): | ||||
|     """Return the total number of training epochs.""" | ||||
|     if dataset is None: | ||||
|       epochss = [] | ||||
|       for xdata, x_seeds in self.dataset_seed.items(): | ||||
|         epochss += [self.all_results[(xdata, seed)].get_total_epoch() for seed in x_seeds] | ||||
|     elif isinstance(dataset, str): | ||||
|       x_seeds = self.dataset_seed[dataset] | ||||
|       epochss = [self.all_results[(dataset, seed)].get_total_epoch() for seed in x_seeds] | ||||
|     else: | ||||
|       raise ValueError('invalid dataset={:}'.format(dataset)) | ||||
|     if len(set(epochss)) > 1: raise ValueError('Each trial mush have the same number of training epochs : {:}'.format(epochss)) | ||||
|     return epochss[-1] | ||||
|  | ||||
|   def query(self, dataset, seed=None): | ||||
|     """Return the ResultsCount object (containing all information of a single trial) for 'dataset' and 'seed'""" | ||||
|     if seed is None: | ||||
|       x_seeds = self.dataset_seed[dataset] | ||||
|       return {seed: self.all_results[(dataset, seed)] for seed in x_seeds} | ||||
|     else: | ||||
|       return self.all_results[(dataset, seed)] | ||||
|  | ||||
|   def arch_idx_str(self): | ||||
|     return '{:06d}'.format(self.arch_index) | ||||
|  | ||||
|   def update(self, dataset_name, seed, result): | ||||
|     if dataset_name not in self.dataset_seed: | ||||
|       self.dataset_seed[dataset_name] = [] | ||||
|     assert seed not in self.dataset_seed[dataset_name], '{:}-th arch alreadly has this seed ({:}) on {:}'.format(self.arch_index, seed, dataset_name) | ||||
|     self.dataset_seed[ dataset_name ].append( seed ) | ||||
|     self.dataset_seed[ dataset_name ] = sorted( self.dataset_seed[ dataset_name ] ) | ||||
|     assert (dataset_name, seed) not in self.all_results | ||||
|     self.all_results[ (dataset_name, seed) ] = result | ||||
|     self.clear_net_done = False | ||||
|  | ||||
|   def state_dict(self): | ||||
|     state_dict = dict() | ||||
|     for key, value in self.__dict__.items(): | ||||
|       if key == 'all_results': # contain the class of ResultsCount | ||||
|         xvalue = dict() | ||||
|         assert isinstance(value, dict), 'invalid type of value for {:} : {:}'.format(key, type(value)) | ||||
|         for _k, _v in value.items(): | ||||
|           assert isinstance(_v, ResultsCount), 'invalid type of value for {:}/{:} : {:}'.format(key, _k, type(_v)) | ||||
|           xvalue[_k] = _v.state_dict() | ||||
|       else: | ||||
|         xvalue = value | ||||
|       state_dict[key] = xvalue | ||||
|     return state_dict | ||||
|  | ||||
|   def load_state_dict(self, state_dict): | ||||
|     new_state_dict = dict() | ||||
|     for key, value in state_dict.items(): | ||||
|       if key == 'all_results': # to convert to the class of ResultsCount | ||||
|         xvalue = dict() | ||||
|         assert isinstance(value, dict), 'invalid type of value for {:} : {:}'.format(key, type(value)) | ||||
|         for _k, _v in value.items(): | ||||
|           xvalue[_k] = ResultsCount.create_from_state_dict(_v) | ||||
|       else: xvalue = value | ||||
|       new_state_dict[key] = xvalue | ||||
|     self.__dict__.update(new_state_dict) | ||||
|  | ||||
|   @staticmethod | ||||
|   def create_from_state_dict(state_dict_or_file): | ||||
|     x = ArchResults(-1, -1) | ||||
|     if isinstance(state_dict_or_file, str): # a file path | ||||
|       state_dict = torch.load(state_dict_or_file, map_location='cpu') | ||||
|     elif isinstance(state_dict_or_file, dict): | ||||
|       state_dict = state_dict_or_file | ||||
|     else: | ||||
|       raise ValueError('invalid type of state_dict_or_file : {:}'.format(type(state_dict_or_file))) | ||||
|     x.load_state_dict(state_dict) | ||||
|     return x | ||||
|  | ||||
|   # This function is used to clear the weights saved in each 'result' | ||||
|   # This can help reduce the memory footprint. | ||||
|   def clear_params(self): | ||||
|     for key, result in self.all_results.items(): | ||||
|       del result.net_state_dict | ||||
|       result.net_state_dict = None | ||||
|     self.clear_net_done = True | ||||
|  | ||||
|   def debug_test(self): | ||||
|     """This function is used for me to debug and test, which will call most methods.""" | ||||
|     all_dataset = ['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120'] | ||||
|     for dataset in all_dataset: | ||||
|       print('---->>>> {:}'.format(dataset)) | ||||
|       print('The latency on {:} is {:} s'.format(dataset, self.get_latency(dataset))) | ||||
|       for seed in self.dataset_seed[dataset]: | ||||
|         result = self.all_results[(dataset, seed)] | ||||
|         print('  ==>> result = {:}'.format(result)) | ||||
|         print('  ==>> cost = {:}'.format(result.get_times())) | ||||
|  | ||||
|   def __repr__(self): | ||||
|     return ('{name}(arch-index={index}, arch={arch}, {num} runs, clear={clear})'.format(name=self.__class__.__name__, index=self.arch_index, arch=self.arch_str, num=len(self.all_results), clear=self.clear_net_done)) | ||||
|  | ||||
|  | ||||
| """ | ||||
| This class (ResultsCount) is used to save the information of one trial for a single architecture. | ||||
| I did not write much comment for this class, because it is the lowest-level class in NAS-Bench-201 API, which will be rarely called. | ||||
| If you have any question regarding this class, please open an issue or email me. | ||||
| """ | ||||
| class ResultsCount(object): | ||||
|  | ||||
|   def __init__(self, name, state_dict, train_accs, train_losses, params, flop, arch_config, seed, epochs, latency): | ||||
|     self.name           = name | ||||
|     self.net_state_dict = state_dict | ||||
|     self.train_acc1es = copy.deepcopy(train_accs) | ||||
|     self.train_acc5es = None | ||||
|     self.train_losses = copy.deepcopy(train_losses) | ||||
|     self.train_times  = None | ||||
|     self.arch_config  = copy.deepcopy(arch_config) | ||||
|     self.params     = params | ||||
|     self.flop       = flop | ||||
|     self.seed       = seed | ||||
|     self.epochs     = epochs | ||||
|     self.latency    = latency | ||||
|     # evaluation results | ||||
|     self.reset_eval() | ||||
|  | ||||
|   def update_train_info(self, train_acc1es, train_acc5es, train_losses, train_times) -> None: | ||||
|     self.train_acc1es = train_acc1es | ||||
|     self.train_acc5es = train_acc5es | ||||
|     self.train_losses = train_losses | ||||
|     self.train_times  = train_times | ||||
|  | ||||
|   def reset_pseudo_train_times(self, estimated_per_epoch_time: float) -> None: | ||||
|     """Assign the training times.""" | ||||
|     train_times = OrderedDict() | ||||
|     for i in range(self.epochs): | ||||
|       train_times[i] = estimated_per_epoch_time | ||||
|     self.train_times = train_times | ||||
|  | ||||
|   def reset_pseudo_eval_times(self, eval_name: Text, estimated_per_epoch_time: float) -> None: | ||||
|     """Assign the evaluation times.""" | ||||
|     if eval_name not in self.eval_names: raise ValueError('invalid eval name : {:}'.format(eval_name)) | ||||
|     for i in range(self.epochs): | ||||
|       self.eval_times['{:}@{:}'.format(eval_name,i)] = estimated_per_epoch_time | ||||
|  | ||||
|   def reset_eval(self): | ||||
|     self.eval_names  = [] | ||||
|     self.eval_acc1es = {} | ||||
|     self.eval_times  = {} | ||||
|     self.eval_losses = {} | ||||
|  | ||||
|   def update_latency(self, latency): | ||||
|     self.latency = copy.deepcopy( latency ) | ||||
|  | ||||
|   def get_latency(self) -> float: | ||||
|     """Return the latency value in seconds. -1 represents not avaliable ; otherwise it should be a float value""" | ||||
|     if self.latency is None: return -1.0 | ||||
|     else: return sum(self.latency) / len(self.latency) | ||||
|  | ||||
|   def update_eval(self, accs, losses, times):  # new version | ||||
|     data_names = set([x.split('@')[0] for x in accs.keys()]) | ||||
|     for data_name in data_names: | ||||
|       assert data_name not in self.eval_names, '{:} has already been added into eval-names'.format(data_name) | ||||
|       self.eval_names.append( data_name ) | ||||
|       for iepoch in range(self.epochs): | ||||
|         xkey = '{:}@{:}'.format(data_name, iepoch) | ||||
|         self.eval_acc1es[ xkey ] = accs[ xkey ] | ||||
|         self.eval_losses[ xkey ] = losses[ xkey ] | ||||
|         self.eval_times [ xkey ] = times[ xkey ] | ||||
|  | ||||
|   def update_OLD_eval(self, name, accs, losses): # old version | ||||
|     assert name not in self.eval_names, '{:} has already added'.format(name) | ||||
|     self.eval_names.append( name ) | ||||
|     for iepoch in range(self.epochs): | ||||
|       if iepoch in accs: | ||||
|         self.eval_acc1es['{:}@{:}'.format(name,iepoch)] = accs[iepoch] | ||||
|         self.eval_losses['{:}@{:}'.format(name,iepoch)] = losses[iepoch] | ||||
|  | ||||
|   def __repr__(self): | ||||
|     num_eval = len(self.eval_names) | ||||
|     set_name = '[' + ', '.join(self.eval_names) + ']' | ||||
|     return ('{name}({xname}, arch={arch}, FLOP={flop:.2f}M, Param={param:.3f}MB, seed={seed}, {num_eval} eval-sets: {set_name})'.format(name=self.__class__.__name__, xname=self.name, arch=self.arch_config['arch_str'], flop=self.flop, param=self.params, seed=self.seed, num_eval=num_eval, set_name=set_name)) | ||||
|  | ||||
|   def get_total_epoch(self): | ||||
|     return copy.deepcopy(self.epochs) | ||||
|  | ||||
|   def get_times(self): | ||||
|     """Obtain the information regarding both training and evaluation time.""" | ||||
|     if self.train_times is not None and isinstance(self.train_times, dict): | ||||
|       train_times = list( self.train_times.values() ) | ||||
|       time_info = {'T-train@epoch': np.mean(train_times), 'T-train@total': np.sum(train_times)} | ||||
|     else: | ||||
|       time_info = {'T-train@epoch':                 None, 'T-train@total':               None } | ||||
|     for name in self.eval_names: | ||||
|       try: | ||||
|         xtimes = [self.eval_times['{:}@{:}'.format(name,i)] for i in range(self.epochs)] | ||||
|         time_info['T-{:}@epoch'.format(name)] = np.mean(xtimes) | ||||
|         time_info['T-{:}@total'.format(name)] = np.sum(xtimes) | ||||
|       except: | ||||
|         time_info['T-{:}@epoch'.format(name)] = None | ||||
|         time_info['T-{:}@total'.format(name)] = None | ||||
|     return time_info | ||||
|  | ||||
|   def get_eval_set(self): | ||||
|     return self.eval_names | ||||
|  | ||||
|   # get the training information | ||||
|   def get_train(self, iepoch=None): | ||||
|     if iepoch is None: iepoch = self.epochs-1 | ||||
|     assert 0 <= iepoch < self.epochs, 'invalid iepoch={:} < {:}'.format(iepoch, self.epochs) | ||||
|     if self.train_times is not None: | ||||
|       xtime = self.train_times[iepoch] | ||||
|       atime = sum([self.train_times[i] for i in range(iepoch+1)]) | ||||
|     else: xtime, atime = None, None | ||||
|     return {'iepoch'  : iepoch, | ||||
|             'loss'    : self.train_losses[iepoch], | ||||
|             'accuracy': self.train_acc1es[iepoch], | ||||
|             'cur_time': xtime, | ||||
|             'all_time': atime} | ||||
|  | ||||
|   def get_eval(self, name, iepoch=None): | ||||
|     """Get the evaluation information ; there could be multiple evaluation sets (identified by the 'name' argument).""" | ||||
|     if iepoch is None: iepoch = self.epochs-1 | ||||
|     assert 0 <= iepoch < self.epochs, 'invalid iepoch={:} < {:}'.format(iepoch, self.epochs) | ||||
|     def _internal_query(xname): | ||||
|       if isinstance(self.eval_times,dict) and len(self.eval_times) > 0: | ||||
|         xtime = self.eval_times['{:}@{:}'.format(xname, iepoch)] | ||||
|         atime = sum([self.eval_times['{:}@{:}'.format(xname, i)] for i in range(iepoch+1)]) | ||||
|       else: | ||||
|         xtime, atime = None, None | ||||
|       return {'iepoch'  : iepoch, | ||||
|               'loss'    : self.eval_losses['{:}@{:}'.format(xname, iepoch)], | ||||
|               'accuracy': self.eval_acc1es['{:}@{:}'.format(xname, iepoch)], | ||||
|               'cur_time': xtime, | ||||
|               'all_time': atime} | ||||
|     if name == 'valid': | ||||
|       return _internal_query('x-valid') | ||||
|     else: | ||||
|       return _internal_query(name) | ||||
|  | ||||
|   def get_net_param(self, clone=False): | ||||
|     if clone: return copy.deepcopy(self.net_state_dict) | ||||
|     else: return self.net_state_dict | ||||
|  | ||||
|   def get_config(self, str2structure): | ||||
|     """This function is used to obtain the config dict for this architecture.""" | ||||
|     if str2structure is None: | ||||
|       # In this case, this is NAS-Bench-301 | ||||
|       if 'name' in self.arch_config and self.arch_config['name'] == 'infer.shape.tiny': | ||||
|         return {'name': 'infer.shape.tiny', 'channels': self.arch_config['channels'], | ||||
|                 'genotype': self.arch_config['genotype'], 'num_classes': self.arch_config['class_num']} | ||||
|       # In this case, this is NAS-Bench-201 | ||||
|       else: | ||||
|         return {'name': 'infer.tiny', 'C': self.arch_config['channel'], | ||||
|                 'N'   : self.arch_config['num_cells'], | ||||
|                 'arch_str': self.arch_config['arch_str'], 'num_classes': self.arch_config['class_num']} | ||||
|     else: | ||||
|       # In this case, this is NAS-Bench-301 | ||||
|       if 'name' in self.arch_config and self.arch_config['name'] == 'infer.shape.tiny': | ||||
|         return {'name': 'infer.shape.tiny', 'channels': self.arch_config['channels'], | ||||
|                 'genotype': str2structure(self.arch_config['genotype']), 'num_classes': self.arch_config['class_num']} | ||||
|       # In this case, this is NAS-Bench-201 | ||||
|       else: | ||||
|         return {'name': 'infer.tiny', 'C': self.arch_config['channel'], | ||||
|                 'N'   : self.arch_config['num_cells'], | ||||
|                 'genotype': str2structure(self.arch_config['arch_str']), 'num_classes': self.arch_config['class_num']} | ||||
|  | ||||
|   def state_dict(self): | ||||
|     _state_dict = {key: value for key, value in self.__dict__.items()} | ||||
|     return _state_dict | ||||
|  | ||||
|   def load_state_dict(self, state_dict): | ||||
|     self.__dict__.update(state_dict) | ||||
|  | ||||
|   @staticmethod | ||||
|   def create_from_state_dict(state_dict): | ||||
|     x = ResultsCount(None, None, None, None, None, None, None, None, None, None) | ||||
|     x.load_state_dict(state_dict) | ||||
|     return x | ||||
							
								
								
									
										25
									
								
								autodl/procedures/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								autodl/procedures/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| from .starts     import prepare_seed, prepare_logger, get_machine_info, save_checkpoint, copy_checkpoint | ||||
| from .optimizers import get_optim_scheduler | ||||
| from .funcs_nasbench import evaluate_for_seed as bench_evaluate_for_seed | ||||
| from .funcs_nasbench import pure_evaluate as bench_pure_evaluate | ||||
| from .funcs_nasbench import get_nas_bench_loaders | ||||
|  | ||||
| def get_procedures(procedure): | ||||
|   from .basic_main     import basic_train, basic_valid | ||||
|   from .search_main    import search_train, search_valid | ||||
|   from .search_main_v2 import search_train_v2 | ||||
|   from .simple_KD_main import simple_KD_train, simple_KD_valid | ||||
|  | ||||
|   train_funcs = {'basic' : basic_train, \ | ||||
|                  'search': search_train,'Simple-KD': simple_KD_train, \ | ||||
|                  'search-v2': search_train_v2} | ||||
|   valid_funcs = {'basic' : basic_valid, \ | ||||
|                  'search': search_valid,'Simple-KD': simple_KD_valid, \ | ||||
|                  'search-v2': search_valid} | ||||
|    | ||||
|   train_func  = train_funcs[procedure] | ||||
|   valid_func  = valid_funcs[procedure] | ||||
|   return train_func, valid_func | ||||
							
								
								
									
										75
									
								
								autodl/procedures/basic_main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								autodl/procedures/basic_main.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, time, torch | ||||
| from log_utils import AverageMeter, time_string | ||||
| from utils     import obtain_accuracy | ||||
|  | ||||
|  | ||||
| def basic_train(xloader, network, criterion, scheduler, optimizer, optim_config, extra_info, print_freq, logger): | ||||
|   loss, acc1, acc5 = procedure(xloader, network, criterion, scheduler, optimizer, 'train', optim_config, extra_info, print_freq, logger) | ||||
|   return loss, acc1, acc5 | ||||
|  | ||||
|  | ||||
| def basic_valid(xloader, network, criterion, optim_config, extra_info, print_freq, logger): | ||||
|   with torch.no_grad(): | ||||
|     loss, acc1, acc5 = procedure(xloader, network, criterion, None, None, 'valid', None, extra_info, print_freq, logger) | ||||
|   return loss, acc1, acc5 | ||||
|  | ||||
|  | ||||
| def procedure(xloader, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger): | ||||
|   data_time, batch_time, losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   if mode == 'train': | ||||
|     network.train() | ||||
|   elif mode == 'valid': | ||||
|     network.eval() | ||||
|   else: raise ValueError("The mode is not right : {:}".format(mode)) | ||||
|    | ||||
|   #logger.log('[{:5s}] config ::  auxiliary={:}, message={:}'.format(mode, config.auxiliary if hasattr(config, 'auxiliary') else -1, network.module.get_message())) | ||||
|   logger.log('[{:5s}] config ::  auxiliary={:}'.format(mode, config.auxiliary if hasattr(config, 'auxiliary') else -1)) | ||||
|   end = time.time() | ||||
|   for i, (inputs, targets) in enumerate(xloader): | ||||
|     if mode == 'train': scheduler.update(None, 1.0 * i / len(xloader)) | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|     # calculate prediction and loss | ||||
|     targets = targets.cuda(non_blocking=True) | ||||
|  | ||||
|     if mode == 'train': optimizer.zero_grad() | ||||
|  | ||||
|     features, logits = network(inputs) | ||||
|     if isinstance(logits, list): | ||||
|       assert len(logits) == 2, 'logits must has {:} items instead of {:}'.format(2, len(logits)) | ||||
|       logits, logits_aux = logits | ||||
|     else: | ||||
|       logits, logits_aux = logits, None | ||||
|     loss             = criterion(logits, targets) | ||||
|     if config is not None and hasattr(config, 'auxiliary') and config.auxiliary > 0: | ||||
|       loss_aux = criterion(logits_aux, targets) | ||||
|       loss += config.auxiliary * loss_aux | ||||
|      | ||||
|     if mode == 'train': | ||||
|       loss.backward() | ||||
|       optimizer.step() | ||||
|  | ||||
|     # record | ||||
|     prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|     losses.update(loss.item(),  inputs.size(0)) | ||||
|     top1.update  (prec1.item(), inputs.size(0)) | ||||
|     top5.update  (prec5.item(), inputs.size(0)) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|  | ||||
|     if i % print_freq == 0 or (i+1) == len(xloader): | ||||
|       Sstr = ' {:5s} '.format(mode.upper()) + time_string() + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)) | ||||
|       if scheduler is not None: | ||||
|         Sstr += ' {:}'.format(scheduler.get_min_info()) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5) | ||||
|       Istr = 'Size={:}'.format(list(inputs.size())) | ||||
|       logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Istr) | ||||
|  | ||||
|   logger.log(' **{mode:5s}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(mode=mode.upper(), top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, loss=losses.avg)) | ||||
|   return losses.avg, top1.avg, top5.avg | ||||
							
								
								
									
										203
									
								
								autodl/procedures/funcs_nasbench.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										203
									
								
								autodl/procedures/funcs_nasbench.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,203 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.08 # | ||||
| ##################################################### | ||||
| import os, time, copy, torch, pathlib | ||||
|  | ||||
| import datasets | ||||
| from config_utils import load_config | ||||
| from autodl.procedures   import prepare_seed, get_optim_scheduler | ||||
| from autodl.utils        import get_model_infos, obtain_accuracy | ||||
| from autodl.log_utils    import AverageMeter, time_string, convert_secs2time | ||||
| from models       import get_cell_based_tiny_net | ||||
|  | ||||
|  | ||||
| __all__ = ['evaluate_for_seed', 'pure_evaluate', 'get_nas_bench_loaders'] | ||||
|  | ||||
|  | ||||
| def pure_evaluate(xloader, network, criterion=torch.nn.CrossEntropyLoss()): | ||||
|   data_time, batch_time, batch = AverageMeter(), AverageMeter(), None | ||||
|   losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   latencies, device = [], torch.cuda.current_device() | ||||
|   network.eval() | ||||
|   with torch.no_grad(): | ||||
|     end = time.time() | ||||
|     for i, (inputs, targets) in enumerate(xloader): | ||||
|       targets = targets.cuda(device=device, non_blocking=True) | ||||
|       inputs  = inputs.cuda(device=device, non_blocking=True) | ||||
|       data_time.update(time.time() - end) | ||||
|       # forward | ||||
|       features, logits = network(inputs) | ||||
|       loss             = criterion(logits, targets) | ||||
|       batch_time.update(time.time() - end) | ||||
|       if batch is None or batch == inputs.size(0): | ||||
|         batch = inputs.size(0) | ||||
|         latencies.append( batch_time.val - data_time.val ) | ||||
|       # record loss and accuracy | ||||
|       prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|       losses.update(loss.item(),  inputs.size(0)) | ||||
|       top1.update  (prec1.item(), inputs.size(0)) | ||||
|       top5.update  (prec5.item(), inputs.size(0)) | ||||
|       end = time.time() | ||||
|   if len(latencies) > 2: latencies = latencies[1:] | ||||
|   return losses.avg, top1.avg, top5.avg, latencies | ||||
|  | ||||
|  | ||||
|  | ||||
| def procedure(xloader, network, criterion, scheduler, optimizer, mode: str): | ||||
|   losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   if mode == 'train'  : network.train() | ||||
|   elif mode == 'valid': network.eval() | ||||
|   else: raise ValueError("The mode is not right : {:}".format(mode)) | ||||
|   device = torch.cuda.current_device() | ||||
|   data_time, batch_time, end = AverageMeter(), AverageMeter(), time.time() | ||||
|   for i, (inputs, targets) in enumerate(xloader): | ||||
|     if mode == 'train': scheduler.update(None, 1.0 * i / len(xloader)) | ||||
|  | ||||
|     targets = targets.cuda(device=device, non_blocking=True) | ||||
|     if mode == 'train': optimizer.zero_grad() | ||||
|     # forward | ||||
|     features, logits = network(inputs) | ||||
|     loss             = criterion(logits, targets) | ||||
|     # backward | ||||
|     if mode == 'train': | ||||
|       loss.backward() | ||||
|       optimizer.step() | ||||
|     # record loss and accuracy | ||||
|     prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|     losses.update(loss.item(),  inputs.size(0)) | ||||
|     top1.update  (prec1.item(), inputs.size(0)) | ||||
|     top5.update  (prec5.item(), inputs.size(0)) | ||||
|     # count time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|   return losses.avg, top1.avg, top5.avg, batch_time.sum | ||||
|  | ||||
|  | ||||
| def evaluate_for_seed(arch_config, opt_config, train_loader, valid_loaders, seed: int, logger): | ||||
|  | ||||
|   prepare_seed(seed) # random seed | ||||
|   net = get_cell_based_tiny_net(arch_config) | ||||
|   #net = TinyNetwork(arch_config['channel'], arch_config['num_cells'], arch, config.class_num) | ||||
|   flop, param  = get_model_infos(net, opt_config.xshape) | ||||
|   logger.log('Network : {:}'.format(net.get_message()), False) | ||||
|   logger.log('{:} Seed-------------------------- {:} --------------------------'.format(time_string(), seed)) | ||||
|   logger.log('FLOP = {:} MB, Param = {:} MB'.format(flop, param)) | ||||
|   # train and valid | ||||
|   optimizer, scheduler, criterion = get_optim_scheduler(net.parameters(), opt_config) | ||||
|   default_device = torch.cuda.current_device() | ||||
|   network = torch.nn.DataParallel(net, device_ids=[default_device]).cuda(device=default_device) | ||||
|   criterion = criterion.cuda(device=default_device) | ||||
|   # start training | ||||
|   start_time, epoch_time, total_epoch = time.time(), AverageMeter(), opt_config.epochs + opt_config.warmup | ||||
|   train_losses, train_acc1es, train_acc5es, valid_losses, valid_acc1es, valid_acc5es = {}, {}, {}, {}, {}, {} | ||||
|   train_times , valid_times, lrs = {}, {}, {} | ||||
|   for epoch in range(total_epoch): | ||||
|     scheduler.update(epoch, 0.0) | ||||
|     lr = min(scheduler.get_lr()) | ||||
|     train_loss, train_acc1, train_acc5, train_tm = procedure(train_loader, network, criterion, scheduler, optimizer, 'train') | ||||
|     train_losses[epoch] = train_loss | ||||
|     train_acc1es[epoch] = train_acc1  | ||||
|     train_acc5es[epoch] = train_acc5 | ||||
|     train_times [epoch] = train_tm | ||||
|     lrs[epoch] = lr | ||||
|     with torch.no_grad(): | ||||
|       for key, xloder in valid_loaders.items(): | ||||
|         valid_loss, valid_acc1, valid_acc5, valid_tm = procedure(xloder  , network, criterion,      None,      None, 'valid') | ||||
|         valid_losses['{:}@{:}'.format(key,epoch)] = valid_loss | ||||
|         valid_acc1es['{:}@{:}'.format(key,epoch)] = valid_acc1  | ||||
|         valid_acc5es['{:}@{:}'.format(key,epoch)] = valid_acc5 | ||||
|         valid_times ['{:}@{:}'.format(key,epoch)] = valid_tm | ||||
|  | ||||
|     # measure elapsed time | ||||
|     epoch_time.update(time.time() - start_time) | ||||
|     start_time = time.time() | ||||
|     need_time = 'Time Left: {:}'.format( convert_secs2time(epoch_time.avg * (total_epoch-epoch-1), True) ) | ||||
|     logger.log('{:} {:} epoch={:03d}/{:03d} :: Train [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%] Valid [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%], lr={:}'.format(time_string(), need_time, epoch, total_epoch, train_loss, train_acc1, train_acc5, valid_loss, valid_acc1, valid_acc5, lr)) | ||||
|   info_seed = {'flop' : flop, | ||||
|                'param': param, | ||||
|                'arch_config' : arch_config._asdict(), | ||||
|                'opt_config'  : opt_config._asdict(), | ||||
|                'total_epoch' : total_epoch , | ||||
|                'train_losses': train_losses, | ||||
|                'train_acc1es': train_acc1es, | ||||
|                'train_acc5es': train_acc5es, | ||||
|                'train_times' : train_times, | ||||
|                'valid_losses': valid_losses, | ||||
|                'valid_acc1es': valid_acc1es, | ||||
|                'valid_acc5es': valid_acc5es, | ||||
|                'valid_times' : valid_times, | ||||
|                'learning_rates': lrs, | ||||
|                'net_state_dict': net.state_dict(), | ||||
|                'net_string'  : '{:}'.format(net), | ||||
|                'finish-train': True | ||||
|               } | ||||
|   return info_seed | ||||
|  | ||||
|  | ||||
| def get_nas_bench_loaders(workers): | ||||
|  | ||||
|   torch.set_num_threads(workers) | ||||
|  | ||||
|   root_dir  = (pathlib.Path(__file__).parent / '..' / '..').resolve() | ||||
|   torch_dir = pathlib.Path(os.environ['TORCH_HOME']) | ||||
|   # cifar | ||||
|   cifar_config_path = root_dir / 'configs' / 'nas-benchmark' / 'CIFAR.config' | ||||
|   cifar_config = load_config(cifar_config_path, None, None) | ||||
|   get_datasets = datasets.get_datasets  # a function to return the dataset | ||||
|   break_line = '-' * 150 | ||||
|   print ('{:} Create data-loader for all datasets'.format(time_string())) | ||||
|   print (break_line) | ||||
|   TRAIN_CIFAR10, VALID_CIFAR10, xshape, class_num = get_datasets('cifar10', str(torch_dir/'cifar.python'), -1) | ||||
|   print ('original CIFAR-10 : {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR10), len(VALID_CIFAR10), xshape, class_num)) | ||||
|   cifar10_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar-split.txt', None, None) | ||||
|   assert cifar10_splits.train[:10] == [0, 5, 7, 11, 13, 15, 16, 17, 20, 24] and cifar10_splits.valid[:10] == [1, 2, 3, 4, 6, 8, 9, 10, 12, 14] | ||||
|   temp_dataset = copy.deepcopy(TRAIN_CIFAR10) | ||||
|   temp_dataset.transform = VALID_CIFAR10.transform | ||||
|   # data loader | ||||
|   trainval_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, shuffle=True , num_workers=workers, pin_memory=True) | ||||
|   train_cifar10_loader    = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.train), num_workers=workers, pin_memory=True) | ||||
|   valid_cifar10_loader    = torch.utils.data.DataLoader(temp_dataset , batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.valid), num_workers=workers, pin_memory=True) | ||||
|   test__cifar10_loader    = torch.utils.data.DataLoader(VALID_CIFAR10, batch_size=cifar_config.batch_size, shuffle=False, num_workers=workers, pin_memory=True) | ||||
|   print ('CIFAR-10  : trval-loader has {:3d} batch with {:} per batch'.format(len(trainval_cifar10_loader), cifar_config.batch_size)) | ||||
|   print ('CIFAR-10  : train-loader has {:3d} batch with {:} per batch'.format(len(train_cifar10_loader), cifar_config.batch_size)) | ||||
|   print ('CIFAR-10  : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_cifar10_loader), cifar_config.batch_size)) | ||||
|   print ('CIFAR-10  : test--loader has {:3d} batch with {:} per batch'.format(len(test__cifar10_loader), cifar_config.batch_size)) | ||||
|   print (break_line) | ||||
|   # CIFAR-100 | ||||
|   TRAIN_CIFAR100, VALID_CIFAR100, xshape, class_num = get_datasets('cifar100', str(torch_dir/'cifar.python'), -1) | ||||
|   print ('original CIFAR-100: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR100), len(VALID_CIFAR100), xshape, class_num)) | ||||
|   cifar100_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'cifar100-test-split.txt', None, None) | ||||
|   assert cifar100_splits.xvalid[:10] == [1, 3, 4, 5, 8, 10, 13, 14, 15, 16] and cifar100_splits.xtest[:10] == [0, 2, 6, 7, 9, 11, 12, 17, 20, 24] | ||||
|   train_cifar100_loader = torch.utils.data.DataLoader(TRAIN_CIFAR100, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True) | ||||
|   valid_cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True) | ||||
|   test__cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest) , num_workers=workers, pin_memory=True) | ||||
|   print ('CIFAR-100  : train-loader has {:3d} batch'.format(len(train_cifar100_loader))) | ||||
|   print ('CIFAR-100  : valid-loader has {:3d} batch'.format(len(valid_cifar100_loader))) | ||||
|   print ('CIFAR-100  : test--loader has {:3d} batch'.format(len(test__cifar100_loader))) | ||||
|   print (break_line) | ||||
|  | ||||
|   imagenet16_config_path = 'configs/nas-benchmark/ImageNet-16.config' | ||||
|   imagenet16_config = load_config(imagenet16_config_path, None, None) | ||||
|   TRAIN_ImageNet16_120, VALID_ImageNet16_120, xshape, class_num = get_datasets('ImageNet16-120', str(torch_dir/'cifar.python'/'ImageNet16'), -1) | ||||
|   print ('original TRAIN_ImageNet16_120: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_ImageNet16_120), len(VALID_ImageNet16_120), xshape, class_num)) | ||||
|   imagenet_splits = load_config(root_dir / 'configs' / 'nas-benchmark' / 'imagenet-16-120-test-split.txt', None, None) | ||||
|   assert imagenet_splits.xvalid[:10] == [1, 2, 3, 6, 7, 8, 9, 12, 16, 18] and imagenet_splits.xtest[:10] == [0, 4, 5, 10, 11, 13, 14, 15, 17, 20] | ||||
|   train_imagenet_loader = torch.utils.data.DataLoader(TRAIN_ImageNet16_120, batch_size=imagenet16_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True) | ||||
|   valid_imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xvalid), num_workers=workers, pin_memory=True) | ||||
|   test__imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xtest) , num_workers=workers, pin_memory=True) | ||||
|   print ('ImageNet-16-120  : train-loader has {:3d} batch with {:} per batch'.format(len(train_imagenet_loader), imagenet16_config.batch_size)) | ||||
|   print ('ImageNet-16-120  : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_imagenet_loader), imagenet16_config.batch_size)) | ||||
|   print ('ImageNet-16-120  : test--loader has {:3d} batch with {:} per batch'.format(len(test__imagenet_loader), imagenet16_config.batch_size)) | ||||
|  | ||||
|   # 'cifar10', 'cifar100', 'ImageNet16-120' | ||||
|   loaders = {'cifar10@trainval': trainval_cifar10_loader, | ||||
|              'cifar10@train'   : train_cifar10_loader, | ||||
|              'cifar10@valid'   : valid_cifar10_loader, | ||||
|              'cifar10@test'    : test__cifar10_loader, | ||||
|              'cifar100@train'  : train_cifar100_loader, | ||||
|              'cifar100@valid'  : valid_cifar100_loader, | ||||
|              'cifar100@test'   : test__cifar100_loader, | ||||
|              'ImageNet16-120@train': train_imagenet_loader, | ||||
|              'ImageNet16-120@valid': valid_imagenet_loader, | ||||
|              'ImageNet16-120@test' : test__imagenet_loader} | ||||
|   return loaders | ||||
							
								
								
									
										204
									
								
								autodl/procedures/optimizers.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										204
									
								
								autodl/procedures/optimizers.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,204 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import math, torch | ||||
| import torch.nn as nn | ||||
| from bisect import bisect_right | ||||
| from torch.optim import Optimizer | ||||
|  | ||||
|  | ||||
| class _LRScheduler(object): | ||||
|  | ||||
|   def __init__(self, optimizer, warmup_epochs, epochs): | ||||
|     if not isinstance(optimizer, Optimizer): | ||||
|       raise TypeError('{:} is not an Optimizer'.format(type(optimizer).__name__)) | ||||
|     self.optimizer = optimizer | ||||
|     for group in optimizer.param_groups: | ||||
|       group.setdefault('initial_lr', group['lr']) | ||||
|     self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups)) | ||||
|     self.max_epochs = epochs | ||||
|     self.warmup_epochs  = warmup_epochs | ||||
|     self.current_epoch  = 0 | ||||
|     self.current_iter   = 0 | ||||
|  | ||||
|   def extra_repr(self): | ||||
|     return '' | ||||
|  | ||||
|   def __repr__(self): | ||||
|     return ('{name}(warmup={warmup_epochs}, max-epoch={max_epochs}, current::epoch={current_epoch}, iter={current_iter:.2f}'.format(name=self.__class__.__name__, **self.__dict__) | ||||
|               + ', {:})'.format(self.extra_repr())) | ||||
|  | ||||
|   def state_dict(self): | ||||
|     return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} | ||||
|  | ||||
|   def load_state_dict(self, state_dict): | ||||
|     self.__dict__.update(state_dict) | ||||
|  | ||||
|   def get_lr(self): | ||||
|     raise NotImplementedError | ||||
|  | ||||
|   def get_min_info(self): | ||||
|     lrs = self.get_lr() | ||||
|     return '#LR=[{:.6f}~{:.6f}] epoch={:03d}, iter={:4.2f}#'.format(min(lrs), max(lrs), self.current_epoch, self.current_iter) | ||||
|  | ||||
|   def get_min_lr(self): | ||||
|     return min( self.get_lr() ) | ||||
|  | ||||
|   def update(self, cur_epoch, cur_iter): | ||||
|     if cur_epoch is not None: | ||||
|       assert isinstance(cur_epoch, int) and cur_epoch>=0, 'invalid cur-epoch : {:}'.format(cur_epoch) | ||||
|       self.current_epoch = cur_epoch | ||||
|     if cur_iter is not None: | ||||
|       assert isinstance(cur_iter, float) and cur_iter>=0, 'invalid cur-iter : {:}'.format(cur_iter) | ||||
|       self.current_iter  = cur_iter | ||||
|     for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): | ||||
|       param_group['lr'] = lr | ||||
|  | ||||
|  | ||||
|  | ||||
| class CosineAnnealingLR(_LRScheduler): | ||||
|  | ||||
|   def __init__(self, optimizer, warmup_epochs, epochs, T_max, eta_min): | ||||
|     self.T_max = T_max | ||||
|     self.eta_min = eta_min | ||||
|     super(CosineAnnealingLR, self).__init__(optimizer, warmup_epochs, epochs) | ||||
|  | ||||
|   def extra_repr(self): | ||||
|     return 'type={:}, T-max={:}, eta-min={:}'.format('cosine', self.T_max, self.eta_min) | ||||
|  | ||||
|   def get_lr(self): | ||||
|     lrs = [] | ||||
|     for base_lr in self.base_lrs: | ||||
|       if self.current_epoch >= self.warmup_epochs and self.current_epoch < self.max_epochs: | ||||
|         last_epoch = self.current_epoch - self.warmup_epochs | ||||
|         #if last_epoch < self.T_max: | ||||
|         #if last_epoch < self.max_epochs: | ||||
|         lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * last_epoch / self.T_max)) / 2 | ||||
|         #else: | ||||
|         #  lr = self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * (self.T_max-1.0) / self.T_max)) / 2 | ||||
|       elif self.current_epoch >= self.max_epochs: | ||||
|         lr = self.eta_min | ||||
|       else: | ||||
|         lr = (self.current_epoch / self.warmup_epochs + self.current_iter / self.warmup_epochs) * base_lr | ||||
|       lrs.append( lr ) | ||||
|     return lrs | ||||
|  | ||||
|  | ||||
|  | ||||
| class MultiStepLR(_LRScheduler): | ||||
|  | ||||
|   def __init__(self, optimizer, warmup_epochs, epochs, milestones, gammas): | ||||
|     assert len(milestones) == len(gammas), 'invalid {:} vs {:}'.format(len(milestones), len(gammas)) | ||||
|     self.milestones = milestones | ||||
|     self.gammas     = gammas | ||||
|     super(MultiStepLR, self).__init__(optimizer, warmup_epochs, epochs) | ||||
|  | ||||
|   def extra_repr(self): | ||||
|     return 'type={:}, milestones={:}, gammas={:}, base-lrs={:}'.format('multistep', self.milestones, self.gammas, self.base_lrs) | ||||
|  | ||||
|   def get_lr(self): | ||||
|     lrs = [] | ||||
|     for base_lr in self.base_lrs: | ||||
|       if self.current_epoch >= self.warmup_epochs: | ||||
|         last_epoch = self.current_epoch - self.warmup_epochs | ||||
|         idx = bisect_right(self.milestones, last_epoch) | ||||
|         lr = base_lr | ||||
|         for x in self.gammas[:idx]: lr *= x | ||||
|       else: | ||||
|         lr = (self.current_epoch / self.warmup_epochs + self.current_iter / self.warmup_epochs) * base_lr | ||||
|       lrs.append( lr ) | ||||
|     return lrs | ||||
|  | ||||
|  | ||||
| class ExponentialLR(_LRScheduler): | ||||
|  | ||||
|   def __init__(self, optimizer, warmup_epochs, epochs, gamma): | ||||
|     self.gamma      = gamma | ||||
|     super(ExponentialLR, self).__init__(optimizer, warmup_epochs, epochs) | ||||
|  | ||||
|   def extra_repr(self): | ||||
|     return 'type={:}, gamma={:}, base-lrs={:}'.format('exponential', self.gamma, self.base_lrs) | ||||
|  | ||||
|   def get_lr(self): | ||||
|     lrs = [] | ||||
|     for base_lr in self.base_lrs: | ||||
|       if self.current_epoch >= self.warmup_epochs: | ||||
|         last_epoch = self.current_epoch - self.warmup_epochs | ||||
|         assert last_epoch >= 0, 'invalid last_epoch : {:}'.format(last_epoch) | ||||
|         lr = base_lr * (self.gamma ** last_epoch) | ||||
|       else: | ||||
|         lr = (self.current_epoch / self.warmup_epochs + self.current_iter / self.warmup_epochs) * base_lr | ||||
|       lrs.append( lr ) | ||||
|     return lrs | ||||
|  | ||||
|  | ||||
| class LinearLR(_LRScheduler): | ||||
|  | ||||
|   def __init__(self, optimizer, warmup_epochs, epochs, max_LR, min_LR): | ||||
|     self.max_LR = max_LR | ||||
|     self.min_LR = min_LR | ||||
|     super(LinearLR, self).__init__(optimizer, warmup_epochs, epochs) | ||||
|  | ||||
|   def extra_repr(self): | ||||
|     return 'type={:}, max_LR={:}, min_LR={:}, base-lrs={:}'.format('LinearLR', self.max_LR, self.min_LR, self.base_lrs) | ||||
|  | ||||
|   def get_lr(self): | ||||
|     lrs = [] | ||||
|     for base_lr in self.base_lrs: | ||||
|       if self.current_epoch >= self.warmup_epochs: | ||||
|         last_epoch = self.current_epoch - self.warmup_epochs | ||||
|         assert last_epoch >= 0, 'invalid last_epoch : {:}'.format(last_epoch) | ||||
|         ratio = (self.max_LR - self.min_LR) * last_epoch / self.max_epochs / self.max_LR | ||||
|         lr = base_lr * (1-ratio) | ||||
|       else: | ||||
|         lr = (self.current_epoch / self.warmup_epochs + self.current_iter / self.warmup_epochs) * base_lr | ||||
|       lrs.append( lr ) | ||||
|     return lrs | ||||
|  | ||||
|  | ||||
|  | ||||
| class CrossEntropyLabelSmooth(nn.Module): | ||||
|  | ||||
|   def __init__(self, num_classes, epsilon): | ||||
|     super(CrossEntropyLabelSmooth, self).__init__() | ||||
|     self.num_classes = num_classes | ||||
|     self.epsilon = epsilon | ||||
|     self.logsoftmax = nn.LogSoftmax(dim=1) | ||||
|  | ||||
|   def forward(self, inputs, targets): | ||||
|     log_probs = self.logsoftmax(inputs) | ||||
|     targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) | ||||
|     targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes | ||||
|     loss = (-targets * log_probs).mean(0).sum() | ||||
|     return loss | ||||
|  | ||||
|  | ||||
|  | ||||
| def get_optim_scheduler(parameters, config): | ||||
|   assert hasattr(config, 'optim') and hasattr(config, 'scheduler') and hasattr(config, 'criterion'), 'config must have optim / scheduler / criterion keys instead of {:}'.format(config) | ||||
|   if config.optim == 'SGD': | ||||
|     optim = torch.optim.SGD(parameters, config.LR, momentum=config.momentum, weight_decay=config.decay, nesterov=config.nesterov) | ||||
|   elif config.optim == 'RMSprop': | ||||
|     optim = torch.optim.RMSprop(parameters, config.LR, momentum=config.momentum, weight_decay=config.decay) | ||||
|   else: | ||||
|     raise ValueError('invalid optim : {:}'.format(config.optim)) | ||||
|  | ||||
|   if config.scheduler == 'cos': | ||||
|     T_max = getattr(config, 'T_max', config.epochs) | ||||
|     scheduler = CosineAnnealingLR(optim, config.warmup, config.epochs, T_max, config.eta_min) | ||||
|   elif config.scheduler == 'multistep': | ||||
|     scheduler = MultiStepLR(optim, config.warmup, config.epochs, config.milestones, config.gammas) | ||||
|   elif config.scheduler == 'exponential': | ||||
|     scheduler = ExponentialLR(optim, config.warmup, config.epochs, config.gamma) | ||||
|   elif config.scheduler == 'linear': | ||||
|     scheduler = LinearLR(optim, config.warmup, config.epochs, config.LR, config.LR_min) | ||||
|   else: | ||||
|     raise ValueError('invalid scheduler : {:}'.format(config.scheduler)) | ||||
|  | ||||
|   if config.criterion == 'Softmax': | ||||
|     criterion = torch.nn.CrossEntropyLoss() | ||||
|   elif config.criterion == 'SmoothSoftmax': | ||||
|     criterion = CrossEntropyLabelSmooth(config.class_num, config.label_smooth) | ||||
|   else: | ||||
|     raise ValueError('invalid criterion : {:}'.format(config.criterion)) | ||||
|   return optim, scheduler, criterion | ||||
							
								
								
									
										126
									
								
								autodl/procedures/search_main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								autodl/procedures/search_main.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,126 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, time, torch | ||||
| from log_utils import AverageMeter, time_string | ||||
| from utils     import obtain_accuracy | ||||
| from models    import change_key | ||||
|  | ||||
|  | ||||
| def get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant): | ||||
|   expected_flop = torch.mean( expected_flop ) | ||||
|  | ||||
|   if flop_cur < flop_need - flop_tolerant:   # Too Small FLOP | ||||
|     loss = - torch.log( expected_flop ) | ||||
|   #elif flop_cur > flop_need + flop_tolerant: # Too Large FLOP | ||||
|   elif flop_cur > flop_need: # Too Large FLOP | ||||
|     loss = torch.log( expected_flop ) | ||||
|   else: # Required FLOP | ||||
|     loss = None | ||||
|   if loss is None: return 0, 0 | ||||
|   else           : return loss, loss.item() | ||||
|  | ||||
|  | ||||
| def search_train(search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, extra_info, print_freq, logger): | ||||
|   data_time, batch_time = AverageMeter(), AverageMeter() | ||||
|   base_losses, arch_losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   arch_cls_losses, arch_flop_losses = AverageMeter(), AverageMeter() | ||||
|   epoch_str, flop_need, flop_weight, flop_tolerant = extra_info['epoch-str'], extra_info['FLOP-exp'], extra_info['FLOP-weight'], extra_info['FLOP-tolerant'] | ||||
|  | ||||
|   network.train() | ||||
|   logger.log('[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}'.format(epoch_str, flop_need, flop_weight)) | ||||
|   end = time.time() | ||||
|   network.apply( change_key('search_mode', 'search') ) | ||||
|   for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(search_loader): | ||||
|     scheduler.update(None, 1.0 * step / len(search_loader)) | ||||
|     # calculate prediction and loss | ||||
|     base_targets = base_targets.cuda(non_blocking=True) | ||||
|     arch_targets = arch_targets.cuda(non_blocking=True) | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|      | ||||
|     # update the weights | ||||
|     base_optimizer.zero_grad() | ||||
|     logits, expected_flop = network(base_inputs) | ||||
|     #network.apply( change_key('search_mode', 'basic') ) | ||||
|     #features, logits = network(base_inputs) | ||||
|     base_loss = criterion(logits, base_targets) | ||||
|     base_loss.backward() | ||||
|     base_optimizer.step() | ||||
|     # record | ||||
|     prec1, prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) | ||||
|     base_losses.update(base_loss.item(), base_inputs.size(0)) | ||||
|     top1.update       (prec1.item(), base_inputs.size(0)) | ||||
|     top5.update       (prec5.item(), base_inputs.size(0)) | ||||
|  | ||||
|     # update the architecture | ||||
|     arch_optimizer.zero_grad() | ||||
|     logits, expected_flop = network(arch_inputs) | ||||
|     flop_cur  = network.module.get_flop('genotype', None, None) | ||||
|     flop_loss, flop_loss_scale = get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant) | ||||
|     acls_loss = criterion(logits, arch_targets) | ||||
|     arch_loss = acls_loss + flop_loss * flop_weight | ||||
|     arch_loss.backward() | ||||
|     arch_optimizer.step() | ||||
|    | ||||
|     # record | ||||
|     arch_losses.update(arch_loss.item(), arch_inputs.size(0)) | ||||
|     arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0)) | ||||
|     arch_cls_losses.update (acls_loss.item(), arch_inputs.size(0)) | ||||
|      | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|     if step % print_freq == 0 or (step+1) == len(search_loader): | ||||
|       Sstr = '**TRAIN** ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(search_loader)) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Lstr = 'Base-Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=base_losses, top1=top1, top5=top5) | ||||
|       Vstr = 'Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})'.format(aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses) | ||||
|       logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr) | ||||
|       #Istr = 'Bsz={:} Asz={:}'.format(list(base_inputs.size()), list(arch_inputs.size())) | ||||
|       #logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' ' + Istr) | ||||
|       #print(network.module.get_arch_info()) | ||||
|       #print(network.module.width_attentions[0]) | ||||
|       #print(network.module.width_attentions[1]) | ||||
|  | ||||
|   logger.log(' **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, baseloss=base_losses.avg, archloss=arch_losses.avg)) | ||||
|   return base_losses.avg, arch_losses.avg, top1.avg, top5.avg | ||||
|  | ||||
|  | ||||
|  | ||||
| def search_valid(xloader, network, criterion, extra_info, print_freq, logger): | ||||
|   data_time, batch_time, losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|  | ||||
|   network.eval() | ||||
|   network.apply( change_key('search_mode', 'search') ) | ||||
|   end = time.time() | ||||
|   #logger.log('Starting evaluating {:}'.format(epoch_info)) | ||||
|   with torch.no_grad(): | ||||
|     for i, (inputs, targets) in enumerate(xloader): | ||||
|       # measure data loading time | ||||
|       data_time.update(time.time() - end) | ||||
|       # calculate prediction and loss | ||||
|       targets = targets.cuda(non_blocking=True) | ||||
|  | ||||
|       logits, expected_flop = network(inputs) | ||||
|       loss             = criterion(logits, targets) | ||||
|       # record | ||||
|       prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|       losses.update(loss.item(),  inputs.size(0)) | ||||
|       top1.update  (prec1.item(), inputs.size(0)) | ||||
|       top5.update  (prec5.item(), inputs.size(0)) | ||||
|  | ||||
|       # measure elapsed time | ||||
|       batch_time.update(time.time() - end) | ||||
|       end = time.time() | ||||
|  | ||||
|       if i % print_freq == 0 or (i+1) == len(xloader): | ||||
|         Sstr = '**VALID** ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)) | ||||
|         Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|         Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5) | ||||
|         Istr = 'Size={:}'.format(list(inputs.size())) | ||||
|         logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Istr) | ||||
|  | ||||
|   logger.log(' **VALID** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, loss=losses.avg)) | ||||
|   | ||||
|   return losses.avg, top1.avg, top5.avg | ||||
							
								
								
									
										87
									
								
								autodl/procedures/search_main_v2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										87
									
								
								autodl/procedures/search_main_v2.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,87 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, time, torch | ||||
| from log_utils import AverageMeter, time_string | ||||
| from utils     import obtain_accuracy | ||||
| from models    import change_key | ||||
|  | ||||
|  | ||||
| def get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant): | ||||
|   expected_flop = torch.mean( expected_flop ) | ||||
|  | ||||
|   if flop_cur < flop_need - flop_tolerant:   # Too Small FLOP | ||||
|     loss = - torch.log( expected_flop ) | ||||
|   #elif flop_cur > flop_need + flop_tolerant: # Too Large FLOP | ||||
|   elif flop_cur > flop_need: # Too Large FLOP | ||||
|     loss = torch.log( expected_flop ) | ||||
|   else: # Required FLOP | ||||
|     loss = None | ||||
|   if loss is None: return 0, 0 | ||||
|   else           : return loss, loss.item() | ||||
|  | ||||
|  | ||||
| def search_train_v2(search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, extra_info, print_freq, logger): | ||||
|   data_time, batch_time = AverageMeter(), AverageMeter() | ||||
|   base_losses, arch_losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   arch_cls_losses, arch_flop_losses = AverageMeter(), AverageMeter() | ||||
|   epoch_str, flop_need, flop_weight, flop_tolerant = extra_info['epoch-str'], extra_info['FLOP-exp'], extra_info['FLOP-weight'], extra_info['FLOP-tolerant'] | ||||
|  | ||||
|   network.train() | ||||
|   logger.log('[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}'.format(epoch_str, flop_need, flop_weight)) | ||||
|   end = time.time() | ||||
|   network.apply( change_key('search_mode', 'search') ) | ||||
|   for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(search_loader): | ||||
|     scheduler.update(None, 1.0 * step / len(search_loader)) | ||||
|     # calculate prediction and loss | ||||
|     base_targets = base_targets.cuda(non_blocking=True) | ||||
|     arch_targets = arch_targets.cuda(non_blocking=True) | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|      | ||||
|     # update the weights | ||||
|     base_optimizer.zero_grad() | ||||
|     logits, expected_flop = network(base_inputs) | ||||
|     base_loss = criterion(logits, base_targets) | ||||
|     base_loss.backward() | ||||
|     base_optimizer.step() | ||||
|     # record | ||||
|     prec1, prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5)) | ||||
|     base_losses.update(base_loss.item(), base_inputs.size(0)) | ||||
|     top1.update       (prec1.item(), base_inputs.size(0)) | ||||
|     top5.update       (prec5.item(), base_inputs.size(0)) | ||||
|  | ||||
|     # update the architecture | ||||
|     arch_optimizer.zero_grad() | ||||
|     logits, expected_flop = network(arch_inputs) | ||||
|     flop_cur  = network.module.get_flop('genotype', None, None) | ||||
|     flop_loss, flop_loss_scale = get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant) | ||||
|     acls_loss = criterion(logits, arch_targets) | ||||
|     arch_loss = acls_loss + flop_loss * flop_weight | ||||
|     arch_loss.backward() | ||||
|     arch_optimizer.step() | ||||
|    | ||||
|     # record | ||||
|     arch_losses.update(arch_loss.item(), arch_inputs.size(0)) | ||||
|     arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0)) | ||||
|     arch_cls_losses.update (acls_loss.item(), arch_inputs.size(0)) | ||||
|      | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|     if step % print_freq == 0 or (step+1) == len(search_loader): | ||||
|       Sstr = '**TRAIN** ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(search_loader)) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Lstr = 'Base-Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=base_losses, top1=top1, top5=top5) | ||||
|       Vstr = 'Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})'.format(aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses) | ||||
|       logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr) | ||||
|       #num_bytes = torch.cuda.max_memory_allocated( next(network.parameters()).device ) * 1.0 | ||||
|       #logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' GPU={:.2f}MB'.format(num_bytes/1e6)) | ||||
|       #Istr = 'Bsz={:} Asz={:}'.format(list(base_inputs.size()), list(arch_inputs.size())) | ||||
|       #logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Vstr + ' ' + Istr) | ||||
|       #print(network.module.get_arch_info()) | ||||
|       #print(network.module.width_attentions[0]) | ||||
|       #print(network.module.width_attentions[1]) | ||||
|  | ||||
|   logger.log(' **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}'.format(top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, baseloss=base_losses.avg, archloss=arch_losses.avg)) | ||||
|   return base_losses.avg, arch_losses.avg, top1.avg, top5.avg | ||||
							
								
								
									
										94
									
								
								autodl/procedures/simple_KD_main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								autodl/procedures/simple_KD_main.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,94 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import os, sys, time, torch | ||||
| import torch.nn.functional as F | ||||
| # our modules | ||||
| from log_utils import AverageMeter, time_string | ||||
| from utils     import obtain_accuracy | ||||
|  | ||||
|  | ||||
| def simple_KD_train(xloader, teacher, network, criterion, scheduler, optimizer, optim_config, extra_info, print_freq, logger): | ||||
|   loss, acc1, acc5 = procedure(xloader, teacher, network, criterion, scheduler, optimizer, 'train', optim_config, extra_info, print_freq, logger) | ||||
|   return loss, acc1, acc5 | ||||
|  | ||||
| def simple_KD_valid(xloader, teacher, network, criterion, optim_config, extra_info, print_freq, logger): | ||||
|   with torch.no_grad(): | ||||
|     loss, acc1, acc5 = procedure(xloader, teacher, network, criterion, None, None, 'valid', optim_config, extra_info, print_freq, logger) | ||||
|   return loss, acc1, acc5 | ||||
|  | ||||
|  | ||||
| def loss_KD_fn(criterion, student_logits, teacher_logits, studentFeatures, teacherFeatures, targets, alpha, temperature): | ||||
|   basic_loss = criterion(student_logits, targets) * (1. - alpha) | ||||
|   log_student= F.log_softmax(student_logits / temperature, dim=1) | ||||
|   sof_teacher= F.softmax    (teacher_logits / temperature, dim=1) | ||||
|   KD_loss    = F.kl_div(log_student, sof_teacher, reduction='batchmean') * (alpha * temperature * temperature) | ||||
|   return basic_loss + KD_loss | ||||
|  | ||||
|  | ||||
| def procedure(xloader, teacher, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger): | ||||
|   data_time, batch_time, losses, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter() | ||||
|   Ttop1, Ttop5 = AverageMeter(), AverageMeter() | ||||
|   if mode == 'train': | ||||
|     network.train() | ||||
|   elif mode == 'valid': | ||||
|     network.eval() | ||||
|   else: raise ValueError("The mode is not right : {:}".format(mode)) | ||||
|   teacher.eval() | ||||
|    | ||||
|   logger.log('[{:5s}] config :: auxiliary={:}, KD :: [alpha={:.2f}, temperature={:.2f}]'.format(mode, config.auxiliary if hasattr(config, 'auxiliary') else -1, config.KD_alpha, config.KD_temperature)) | ||||
|   end = time.time() | ||||
|   for i, (inputs, targets) in enumerate(xloader): | ||||
|     if mode == 'train': scheduler.update(None, 1.0 * i / len(xloader)) | ||||
|     # measure data loading time | ||||
|     data_time.update(time.time() - end) | ||||
|     # calculate prediction and loss | ||||
|     targets = targets.cuda(non_blocking=True) | ||||
|  | ||||
|     if mode == 'train': optimizer.zero_grad() | ||||
|  | ||||
|     student_f, logits = network(inputs) | ||||
|     if isinstance(logits, list): | ||||
|       assert len(logits) == 2, 'logits must has {:} items instead of {:}'.format(2, len(logits)) | ||||
|       logits, logits_aux = logits | ||||
|     else: | ||||
|       logits, logits_aux = logits, None | ||||
|     with torch.no_grad(): | ||||
|       teacher_f, teacher_logits = teacher(inputs) | ||||
|  | ||||
|     loss             = loss_KD_fn(criterion, logits, teacher_logits, student_f, teacher_f, targets, config.KD_alpha, config.KD_temperature) | ||||
|     if config is not None and hasattr(config, 'auxiliary') and config.auxiliary > 0: | ||||
|       loss_aux = criterion(logits_aux, targets) | ||||
|       loss += config.auxiliary * loss_aux | ||||
|      | ||||
|     if mode == 'train': | ||||
|       loss.backward() | ||||
|       optimizer.step() | ||||
|  | ||||
|     # record | ||||
|     sprec1, sprec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5)) | ||||
|     losses.update(loss.item(),   inputs.size(0)) | ||||
|     top1.update  (sprec1.item(), inputs.size(0)) | ||||
|     top5.update  (sprec5.item(), inputs.size(0)) | ||||
|     # teacher | ||||
|     tprec1, tprec5 = obtain_accuracy(teacher_logits.data, targets.data, topk=(1, 5)) | ||||
|     Ttop1.update (tprec1.item(), inputs.size(0)) | ||||
|     Ttop5.update (tprec5.item(), inputs.size(0)) | ||||
|  | ||||
|     # measure elapsed time | ||||
|     batch_time.update(time.time() - end) | ||||
|     end = time.time() | ||||
|  | ||||
|     if i % print_freq == 0 or (i+1) == len(xloader): | ||||
|       Sstr = ' {:5s} '.format(mode.upper()) + time_string() + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)) | ||||
|       if scheduler is not None: | ||||
|         Sstr += ' {:}'.format(scheduler.get_min_info()) | ||||
|       Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time) | ||||
|       Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f})  Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5) | ||||
|       Lstr+= ' Teacher : acc@1={:.2f}, acc@5={:.2f}'.format(Ttop1.avg, Ttop5.avg) | ||||
|       Istr = 'Size={:}'.format(list(inputs.size())) | ||||
|       logger.log(Sstr + ' ' + Tstr + ' ' + Lstr + ' ' + Istr) | ||||
|  | ||||
|   logger.log(' **{:5s}** accuracy drop :: @1={:.2f}, @5={:.2f}'.format(mode.upper(), Ttop1.avg - top1.avg, Ttop5.avg - top5.avg)) | ||||
|   logger.log(' **{mode:5s}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(mode=mode.upper(), top1=top1, top5=top5, error1=100-top1.avg, error5=100-top5.avg, loss=losses.avg)) | ||||
|   return losses.avg, top1.avg, top5.avg | ||||
							
								
								
									
										64
									
								
								autodl/procedures/starts.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								autodl/procedures/starts.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| ################################################## | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 # | ||||
| ################################################## | ||||
| import os, sys, torch, random, PIL, copy, numpy as np | ||||
| from os import path as osp | ||||
| from shutil  import copyfile | ||||
|  | ||||
|  | ||||
| def prepare_seed(rand_seed): | ||||
|   random.seed(rand_seed) | ||||
|   np.random.seed(rand_seed) | ||||
|   torch.manual_seed(rand_seed) | ||||
|   torch.cuda.manual_seed(rand_seed) | ||||
|   torch.cuda.manual_seed_all(rand_seed) | ||||
|  | ||||
|  | ||||
| def prepare_logger(xargs): | ||||
|   args = copy.deepcopy( xargs ) | ||||
|   from autodl.log_utils import Logger | ||||
|   logger = Logger(args.save_dir, args.rand_seed) | ||||
|   logger.log('Main Function with logger : {:}'.format(logger)) | ||||
|   logger.log('Arguments : -------------------------------') | ||||
|   for name, value in args._get_kwargs(): | ||||
|     logger.log('{:16} : {:}'.format(name, value)) | ||||
|   logger.log("Python  Version  : {:}".format(sys.version.replace('\n', ' '))) | ||||
|   logger.log("Pillow  Version  : {:}".format(PIL.__version__)) | ||||
|   logger.log("PyTorch Version  : {:}".format(torch.__version__)) | ||||
|   logger.log("cuDNN   Version  : {:}".format(torch.backends.cudnn.version())) | ||||
|   logger.log("CUDA available   : {:}".format(torch.cuda.is_available())) | ||||
|   logger.log("CUDA GPU numbers : {:}".format(torch.cuda.device_count())) | ||||
|   logger.log("CUDA_VISIBLE_DEVICES : {:}".format(os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ else 'None')) | ||||
|   return logger | ||||
|  | ||||
|  | ||||
| def get_machine_info(): | ||||
|   info = "Python  Version  : {:}".format(sys.version.replace('\n', ' ')) | ||||
|   info+= "\nPillow  Version  : {:}".format(PIL.__version__) | ||||
|   info+= "\nPyTorch Version  : {:}".format(torch.__version__) | ||||
|   info+= "\ncuDNN   Version  : {:}".format(torch.backends.cudnn.version()) | ||||
|   info+= "\nCUDA available   : {:}".format(torch.cuda.is_available()) | ||||
|   info+= "\nCUDA GPU numbers : {:}".format(torch.cuda.device_count()) | ||||
|   if 'CUDA_VISIBLE_DEVICES' in os.environ: | ||||
|     info+= "\nCUDA_VISIBLE_DEVICES={:}".format(os.environ['CUDA_VISIBLE_DEVICES']) | ||||
|   else: | ||||
|     info+= "\nDoes not set CUDA_VISIBLE_DEVICES" | ||||
|   return info | ||||
|  | ||||
|  | ||||
| def save_checkpoint(state, filename, logger): | ||||
|   if osp.isfile(filename): | ||||
|     if hasattr(logger, 'log'): logger.log('Find {:} exist, delete is at first before saving'.format(filename)) | ||||
|     os.remove(filename) | ||||
|   torch.save(state, filename) | ||||
|   assert osp.isfile(filename), 'save filename : {:} failed, which is not found.'.format(filename) | ||||
|   if hasattr(logger, 'log'): logger.log('save checkpoint into {:}'.format(filename)) | ||||
|   return filename | ||||
|  | ||||
|  | ||||
| def copy_checkpoint(src, dst, logger): | ||||
|   if osp.isfile(dst): | ||||
|     if hasattr(logger, 'log'): logger.log('Find {:} exist, delete is at first before saving'.format(dst)) | ||||
|     os.remove(dst) | ||||
|   copyfile(src, dst) | ||||
|   if hasattr(logger, 'log'): logger.log('copy the file from {:} into {:}'.format(src, dst)) | ||||
							
								
								
									
										5
									
								
								autodl/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								autodl/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| from .evaluation_utils import obtain_accuracy | ||||
| from .gpu_manager      import GPUManager | ||||
| from .flop_benchmark   import get_model_infos, count_parameters_in_MB | ||||
| from .affine_utils     import normalize_points, denormalize_points | ||||
| from .affine_utils     import identity2affine, solve2theta, affine2image | ||||
							
								
								
									
										125
									
								
								autodl/utils/affine_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								autodl/utils/affine_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,125 @@ | ||||
| # functions for affine transformation | ||||
| import math, torch | ||||
| import numpy as np | ||||
| import torch.nn.functional as F | ||||
|  | ||||
| def identity2affine(full=False): | ||||
|   if not full: | ||||
|     parameters = torch.zeros((2,3)) | ||||
|     parameters[0, 0] = parameters[1, 1] = 1 | ||||
|   else: | ||||
|     parameters = torch.zeros((3,3)) | ||||
|     parameters[0, 0] = parameters[1, 1] = parameters[2, 2] = 1 | ||||
|   return parameters | ||||
|  | ||||
| def normalize_L(x, L): | ||||
|   return -1. + 2. * x / (L-1) | ||||
|  | ||||
| def denormalize_L(x, L): | ||||
|   return (x + 1.0) / 2.0 * (L-1) | ||||
|  | ||||
| def crop2affine(crop_box, W, H): | ||||
|   assert len(crop_box) == 4, 'Invalid crop-box : {:}'.format(crop_box) | ||||
|   parameters = torch.zeros(3,3) | ||||
|   x1, y1 = normalize_L(crop_box[0], W), normalize_L(crop_box[1], H) | ||||
|   x2, y2 = normalize_L(crop_box[2], W), normalize_L(crop_box[3], H) | ||||
|   parameters[0,0] = (x2-x1)/2 | ||||
|   parameters[0,2] = (x2+x1)/2 | ||||
|  | ||||
|   parameters[1,1] = (y2-y1)/2 | ||||
|   parameters[1,2] = (y2+y1)/2 | ||||
|   parameters[2,2] = 1 | ||||
|   return parameters | ||||
|  | ||||
| def scale2affine(scalex, scaley): | ||||
|   parameters = torch.zeros(3,3) | ||||
|   parameters[0,0] = scalex | ||||
|   parameters[1,1] = scaley | ||||
|   parameters[2,2] = 1 | ||||
|   return parameters | ||||
|   | ||||
| def offset2affine(offx, offy): | ||||
|   parameters = torch.zeros(3,3) | ||||
|   parameters[0,0] = parameters[1,1] = parameters[2,2] = 1 | ||||
|   parameters[0,2] = offx | ||||
|   parameters[1,2] = offy | ||||
|   return parameters | ||||
|  | ||||
| def horizontalmirror2affine(): | ||||
|   parameters = torch.zeros(3,3) | ||||
|   parameters[0,0] = -1 | ||||
|   parameters[1,1] = parameters[2,2] = 1 | ||||
|   return parameters | ||||
|  | ||||
| # clockwise rotate image = counterclockwise rotate the rectangle | ||||
| # degree is between [0, 360] | ||||
| def rotate2affine(degree): | ||||
|   assert degree >= 0 and degree <= 360, 'Invalid degree : {:}'.format(degree) | ||||
|   degree = degree / 180 * math.pi | ||||
|   parameters = torch.zeros(3,3) | ||||
|   parameters[0,0] =  math.cos(-degree) | ||||
|   parameters[0,1] = -math.sin(-degree) | ||||
|   parameters[1,0] =  math.sin(-degree) | ||||
|   parameters[1,1] =  math.cos(-degree) | ||||
|   parameters[2,2] = 1 | ||||
|   return parameters | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def normalize_points(shape, points): | ||||
|   assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, 'invalid shape : {:}'.format(shape)   | ||||
|   assert isinstance(points, torch.Tensor) and (points.shape[0] == 2), 'points are wrong : {:}'.format(points.shape) | ||||
|   (H, W), points = shape, points.clone() | ||||
|   points[0, :] = normalize_L(points[0,:], W) | ||||
|   points[1, :] = normalize_L(points[1,:], H) | ||||
|   return points | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def normalize_points_batch(shape, points): | ||||
|   assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, 'invalid shape : {:}'.format(shape)   | ||||
|   assert isinstance(points, torch.Tensor) and (points.size(-1) == 2), 'points are wrong : {:}'.format(points.shape) | ||||
|   (H, W), points = shape, points.clone() | ||||
|   x = normalize_L(points[...,0], W) | ||||
|   y = normalize_L(points[...,1], H) | ||||
|   return torch.stack((x,y), dim=-1) | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def denormalize_points(shape, points): | ||||
|   assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, 'invalid shape : {:}'.format(shape)   | ||||
|   assert isinstance(points, torch.Tensor) and (points.shape[0] == 2), 'points are wrong : {:}'.format(points.shape) | ||||
|   (H, W), points = shape, points.clone() | ||||
|   points[0, :] = denormalize_L(points[0,:], W) | ||||
|   points[1, :] = denormalize_L(points[1,:], H) | ||||
|   return points | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def denormalize_points_batch(shape, points): | ||||
|   assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, 'invalid shape : {:}'.format(shape)   | ||||
|   assert isinstance(points, torch.Tensor) and (points.shape[-1] == 2), 'points are wrong : {:}'.format(points.shape) | ||||
|   (H, W), points = shape, points.clone() | ||||
|   x = denormalize_L(points[...,0], W) | ||||
|   y = denormalize_L(points[...,1], H) | ||||
|   return torch.stack((x,y), dim=-1) | ||||
|  | ||||
| # make target * theta = source | ||||
| def solve2theta(source, target): | ||||
|   source, target = source.clone(), target.clone() | ||||
|   oks = source[2, :] == 1 | ||||
|   assert torch.sum(oks).item() >= 3, 'valid points : {:} is short'.format(oks) | ||||
|   if target.size(0) == 2: target = torch.cat((target, oks.unsqueeze(0).float()), dim=0) | ||||
|   source, target = source[:, oks], target[:, oks] | ||||
|   source, target = source.transpose(1,0), target.transpose(1,0) | ||||
|   assert source.size(1) == target.size(1) == 3 | ||||
|   #X, residual, rank, s = np.linalg.lstsq(target.numpy(), source.numpy()) | ||||
|   #theta = torch.Tensor(X.T[:2, :]) | ||||
|   X_, qr = torch.gels(source, target) | ||||
|   theta = X_[:3, :2].transpose(1, 0) | ||||
|   return theta | ||||
|  | ||||
| # shape = [H,W] | ||||
| def affine2image(image, theta, shape): | ||||
|   C, H, W = image.size() | ||||
|   theta = theta[:2, :].unsqueeze(0) | ||||
|   grid_size = torch.Size([1, C, shape[0], shape[1]]) | ||||
|   grid  = F.affine_grid(theta, grid_size) | ||||
|   affI  = F.grid_sample(image.unsqueeze(0), grid, mode='bilinear', padding_mode='border') | ||||
|   return affI.squeeze(0) | ||||
							
								
								
									
										16
									
								
								autodl/utils/evaluation_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								autodl/utils/evaluation_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,16 @@ | ||||
| import torch | ||||
|  | ||||
| def obtain_accuracy(output, target, topk=(1,)): | ||||
|   """Computes the precision@k for the specified values of k""" | ||||
|   maxk = max(topk) | ||||
|   batch_size = target.size(0) | ||||
|  | ||||
|   _, pred = output.topk(maxk, 1, True, True) | ||||
|   pred = pred.t() | ||||
|   correct = pred.eq(target.view(1, -1).expand_as(pred)) | ||||
|  | ||||
|   res = [] | ||||
|   for k in topk: | ||||
|     correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) | ||||
|     res.append(correct_k.mul_(100.0 / batch_size)) | ||||
|   return res | ||||
							
								
								
									
										181
									
								
								autodl/utils/flop_benchmark.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										181
									
								
								autodl/utils/flop_benchmark.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,181 @@ | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| import numpy as np | ||||
|  | ||||
|  | ||||
| def count_parameters_in_MB(model): | ||||
|   if isinstance(model, nn.Module): | ||||
|     return np.sum(np.prod(v.size()) for v in model.parameters())/1e6 | ||||
|   else: | ||||
|     return np.sum(np.prod(v.size()) for v in model)/1e6 | ||||
|  | ||||
|  | ||||
| def get_model_infos(model, shape): | ||||
|   #model = copy.deepcopy( model ) | ||||
|  | ||||
|   model = add_flops_counting_methods(model) | ||||
|   #model = model.cuda() | ||||
|   model.eval() | ||||
|  | ||||
|   #cache_inputs = torch.zeros(*shape).cuda() | ||||
|   #cache_inputs = torch.zeros(*shape) | ||||
|   cache_inputs = torch.rand(*shape) | ||||
|   if next(model.parameters()).is_cuda: cache_inputs = cache_inputs.cuda() | ||||
|   #print_log('In the calculating function : cache input size : {:}'.format(cache_inputs.size()), log) | ||||
|   with torch.no_grad(): | ||||
|     _____ = model(cache_inputs) | ||||
|   FLOPs = compute_average_flops_cost( model ) / 1e6 | ||||
|   Param = count_parameters_in_MB(model) | ||||
|  | ||||
|   if hasattr(model, 'auxiliary_param'): | ||||
|     aux_params = count_parameters_in_MB(model.auxiliary_param())  | ||||
|     print ('The auxiliary params of this model is : {:}'.format(aux_params)) | ||||
|     print ('We remove the auxiliary params from the total params ({:}) when counting'.format(Param)) | ||||
|     Param = Param - aux_params | ||||
|    | ||||
|   #print_log('FLOPs : {:} MB'.format(FLOPs), log) | ||||
|   torch.cuda.empty_cache() | ||||
|   model.apply( remove_hook_function ) | ||||
|   return FLOPs, Param | ||||
|  | ||||
|  | ||||
| # ---- Public functions | ||||
| def add_flops_counting_methods( model ): | ||||
|   model.__batch_counter__ = 0 | ||||
|   add_batch_counter_hook_function( model ) | ||||
|   model.apply( add_flops_counter_variable_or_reset ) | ||||
|   model.apply( add_flops_counter_hook_function ) | ||||
|   return model | ||||
|  | ||||
|  | ||||
|  | ||||
| def compute_average_flops_cost(model): | ||||
|   """ | ||||
|   A method that will be available after add_flops_counting_methods() is called on a desired net object. | ||||
|   Returns current mean flops consumption per image. | ||||
|   """ | ||||
|   batches_count = model.__batch_counter__ | ||||
|   flops_sum = 0 | ||||
|   #or isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d) \ | ||||
|   for module in model.modules(): | ||||
|     if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear) \ | ||||
|       or isinstance(module, torch.nn.Conv1d) \ | ||||
|       or hasattr(module, 'calculate_flop_self'): | ||||
|       flops_sum += module.__flops__ | ||||
|   return flops_sum / batches_count | ||||
|  | ||||
|  | ||||
| # ---- Internal functions | ||||
| def pool_flops_counter_hook(pool_module, inputs, output): | ||||
|   batch_size = inputs[0].size(0) | ||||
|   kernel_size = pool_module.kernel_size | ||||
|   out_C, output_height, output_width = output.shape[1:] | ||||
|   assert out_C == inputs[0].size(1), '{:} vs. {:}'.format(out_C, inputs[0].size()) | ||||
|  | ||||
|   overall_flops = batch_size * out_C * output_height * output_width * kernel_size * kernel_size | ||||
|   pool_module.__flops__ += overall_flops | ||||
|  | ||||
|  | ||||
| def self_calculate_flops_counter_hook(self_module, inputs, output): | ||||
|   overall_flops = self_module.calculate_flop_self(inputs[0].shape, output.shape) | ||||
|   self_module.__flops__ += overall_flops | ||||
|  | ||||
|  | ||||
| def fc_flops_counter_hook(fc_module, inputs, output): | ||||
|   batch_size = inputs[0].size(0) | ||||
|   xin, xout = fc_module.in_features, fc_module.out_features | ||||
|   assert xin == inputs[0].size(1) and xout == output.size(1), 'IO=({:}, {:})'.format(xin, xout) | ||||
|   overall_flops = batch_size * xin * xout | ||||
|   if fc_module.bias is not None: | ||||
|     overall_flops += batch_size * xout | ||||
|   fc_module.__flops__ += overall_flops | ||||
|  | ||||
|  | ||||
| def conv1d_flops_counter_hook(conv_module, inputs, outputs): | ||||
|   batch_size   = inputs[0].size(0) | ||||
|   outL         = outputs.shape[-1] | ||||
|   [kernel]     = conv_module.kernel_size | ||||
|   in_channels  = conv_module.in_channels | ||||
|   out_channels = conv_module.out_channels | ||||
|   groups       = conv_module.groups | ||||
|   conv_per_position_flops = kernel * in_channels * out_channels / groups | ||||
|    | ||||
|   active_elements_count = batch_size * outL  | ||||
|   overall_flops = conv_per_position_flops * active_elements_count | ||||
|  | ||||
|   if conv_module.bias is not None: | ||||
|     overall_flops += out_channels * active_elements_count | ||||
|   conv_module.__flops__ += overall_flops | ||||
|  | ||||
|  | ||||
| def conv2d_flops_counter_hook(conv_module, inputs, output): | ||||
|   batch_size = inputs[0].size(0) | ||||
|   output_height, output_width = output.shape[2:] | ||||
|    | ||||
|   kernel_height, kernel_width = conv_module.kernel_size | ||||
|   in_channels  = conv_module.in_channels | ||||
|   out_channels = conv_module.out_channels | ||||
|   groups       = conv_module.groups | ||||
|   conv_per_position_flops = kernel_height * kernel_width * in_channels * out_channels / groups | ||||
|    | ||||
|   active_elements_count = batch_size * output_height * output_width | ||||
|   overall_flops = conv_per_position_flops * active_elements_count | ||||
|      | ||||
|   if conv_module.bias is not None: | ||||
|     overall_flops += out_channels * active_elements_count | ||||
|   conv_module.__flops__ += overall_flops | ||||
|  | ||||
|    | ||||
| def batch_counter_hook(module, inputs, output): | ||||
|   # Can have multiple inputs, getting the first one | ||||
|   inputs = inputs[0] | ||||
|   batch_size = inputs.shape[0] | ||||
|   module.__batch_counter__ += batch_size | ||||
|  | ||||
|  | ||||
| def add_batch_counter_hook_function(module): | ||||
|   if not hasattr(module, '__batch_counter_handle__'): | ||||
|     handle = module.register_forward_hook(batch_counter_hook) | ||||
|     module.__batch_counter_handle__ = handle | ||||
|  | ||||
|    | ||||
| def add_flops_counter_variable_or_reset(module): | ||||
|   if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear) \ | ||||
|     or isinstance(module, torch.nn.Conv1d) \ | ||||
|     or isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d) \ | ||||
|     or hasattr(module, 'calculate_flop_self'): | ||||
|     module.__flops__ = 0 | ||||
|  | ||||
|  | ||||
| def add_flops_counter_hook_function(module): | ||||
|   if isinstance(module, torch.nn.Conv2d): | ||||
|     if not hasattr(module, '__flops_handle__'): | ||||
|       handle = module.register_forward_hook(conv2d_flops_counter_hook) | ||||
|       module.__flops_handle__ = handle | ||||
|   elif isinstance(module, torch.nn.Conv1d): | ||||
|     if not hasattr(module, '__flops_handle__'): | ||||
|       handle = module.register_forward_hook(conv1d_flops_counter_hook) | ||||
|       module.__flops_handle__ = handle | ||||
|   elif isinstance(module, torch.nn.Linear): | ||||
|     if not hasattr(module, '__flops_handle__'): | ||||
|       handle = module.register_forward_hook(fc_flops_counter_hook) | ||||
|       module.__flops_handle__ = handle | ||||
|   elif isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d): | ||||
|     if not hasattr(module, '__flops_handle__'): | ||||
|       handle = module.register_forward_hook(pool_flops_counter_hook) | ||||
|       module.__flops_handle__ = handle | ||||
|   elif hasattr(module, 'calculate_flop_self'): # self-defined module | ||||
|     if not hasattr(module, '__flops_handle__'): | ||||
|       handle = module.register_forward_hook(self_calculate_flops_counter_hook) | ||||
|       module.__flops_handle__ = handle | ||||
|  | ||||
|  | ||||
| def remove_hook_function(module): | ||||
|   hookers = ['__batch_counter_handle__', '__flops_handle__'] | ||||
|   for hooker in hookers: | ||||
|     if hasattr(module, hooker): | ||||
|       handle = getattr(module, hooker) | ||||
|       handle.remove() | ||||
|   keys = ['__flops__', '__batch_counter__', '__flops__'] + hookers | ||||
|   for ckey in keys: | ||||
|     if hasattr(module, ckey): delattr(module, ckey) | ||||
							
								
								
									
										70
									
								
								autodl/utils/gpu_manager.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								autodl/utils/gpu_manager.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| import os | ||||
|  | ||||
| class GPUManager(): | ||||
|   queries = ('index', 'gpu_name', 'memory.free', 'memory.used', 'memory.total', 'power.draw', 'power.limit') | ||||
|  | ||||
|   def __init__(self): | ||||
|     all_gpus = self.query_gpu(False) | ||||
|  | ||||
|   def get_info(self, ctype): | ||||
|     cmd = 'nvidia-smi --query-gpu={} --format=csv,noheader'.format(ctype) | ||||
|     lines = os.popen(cmd).readlines() | ||||
|     lines = [line.strip('\n') for line in lines] | ||||
|     return lines | ||||
|  | ||||
|   def query_gpu(self, show=True): | ||||
|     num_gpus = len( self.get_info('index') ) | ||||
|     all_gpus = [ {} for i in range(num_gpus) ] | ||||
|     for query in self.queries: | ||||
|       infos = self.get_info(query) | ||||
|       for idx, info in enumerate(infos): | ||||
|         all_gpus[idx][query] = info | ||||
|  | ||||
|     if 'CUDA_VISIBLE_DEVICES' in os.environ: | ||||
|       CUDA_VISIBLE_DEVICES = os.environ['CUDA_VISIBLE_DEVICES'].split(',') | ||||
|       selected_gpus = [] | ||||
|       for idx, CUDA_VISIBLE_DEVICE in enumerate(CUDA_VISIBLE_DEVICES): | ||||
|         find = False | ||||
|         for gpu in all_gpus: | ||||
|           if gpu['index'] == CUDA_VISIBLE_DEVICE: | ||||
|             assert not find, 'Duplicate cuda device index : {}'.format(CUDA_VISIBLE_DEVICE) | ||||
|             find = True | ||||
|             selected_gpus.append( gpu.copy() ) | ||||
|             selected_gpus[-1]['index'] = '{}'.format(idx) | ||||
|         assert find, 'Does not find the device : {}'.format(CUDA_VISIBLE_DEVICE) | ||||
|       all_gpus = selected_gpus | ||||
|      | ||||
|     if show: | ||||
|       allstrings = '' | ||||
|       for gpu in all_gpus: | ||||
|         string = '| ' | ||||
|         for query in self.queries: | ||||
|           if query.find('memory') == 0: xinfo = '{:>9}'.format(gpu[query]) | ||||
|           else:                         xinfo = gpu[query] | ||||
|           string = string + query + ' : ' + xinfo + ' | ' | ||||
|         allstrings = allstrings + string + '\n' | ||||
|       return allstrings | ||||
|     else: | ||||
|       return all_gpus | ||||
|  | ||||
|   def select_by_memory(self, numbers=1): | ||||
|     all_gpus = self.query_gpu(False) | ||||
|     assert numbers <= len(all_gpus), 'Require {} gpus more than you have'.format(numbers) | ||||
|     alls = [] | ||||
|     for idx, gpu in enumerate(all_gpus): | ||||
|       free_memory = gpu['memory.free'] | ||||
|       free_memory = free_memory.split(' ')[0] | ||||
|       free_memory = int(free_memory) | ||||
|       index = gpu['index'] | ||||
|       alls.append((free_memory, index)) | ||||
|     alls.sort(reverse = True) | ||||
|     alls = [ int(alls[i][1]) for i in range(numbers) ] | ||||
|     return sorted(alls) | ||||
|  | ||||
| """ | ||||
| if __name__ == '__main__': | ||||
|   manager = GPUManager() | ||||
|   manager.query_gpu(True) | ||||
|   indexes = manager.select_by_memory(3) | ||||
|   print (indexes) | ||||
| """ | ||||
							
								
								
									
										57
									
								
								autodl/utils/nas_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										57
									
								
								autodl/utils/nas_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,57 @@ | ||||
| # This file is for experimental usage | ||||
| import torch, random | ||||
| import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch.nn as nn | ||||
|  | ||||
| # from utils  import obtain_accuracy | ||||
| from models import CellStructure | ||||
| from log_utils import time_string | ||||
|  | ||||
|  | ||||
| def evaluate_one_shot(model, xloader, api, cal_mode, seed=111): | ||||
|   print ('This is an old version of codes to use NAS-Bench-API, and should be modified to align with the new version. Please contact me for more details if you use this function.') | ||||
|   weights = deepcopy(model.state_dict()) | ||||
|   model.train(cal_mode) | ||||
|   with torch.no_grad(): | ||||
|     logits = nn.functional.log_softmax(model.arch_parameters, dim=-1) | ||||
|     archs = CellStructure.gen_all(model.op_names, model.max_nodes, False) | ||||
|     probs, accuracies, gt_accs_10_valid, gt_accs_10_test = [], [], [], [] | ||||
|     loader_iter = iter(xloader) | ||||
|     random.seed(seed) | ||||
|     random.shuffle(archs) | ||||
|     for idx, arch in enumerate(archs): | ||||
|       arch_index = api.query_index_by_arch( arch ) | ||||
|       metrics = api.get_more_info(arch_index, 'cifar10-valid', None, False, False) | ||||
|       gt_accs_10_valid.append( metrics['valid-accuracy'] ) | ||||
|       metrics = api.get_more_info(arch_index, 'cifar10', None, False, False) | ||||
|       gt_accs_10_test.append( metrics['test-accuracy'] ) | ||||
|       select_logits = [] | ||||
|       for i, node_info in enumerate(arch.nodes): | ||||
|         for op, xin in node_info: | ||||
|           node_str = '{:}<-{:}'.format(i+1, xin) | ||||
|           op_index = model.op_names.index(op) | ||||
|           select_logits.append( logits[model.edge2index[node_str], op_index] ) | ||||
|       cur_prob = sum(select_logits).item() | ||||
|       probs.append( cur_prob ) | ||||
|     cor_prob_valid = np.corrcoef(probs, gt_accs_10_valid)[0,1] | ||||
|     cor_prob_test  = np.corrcoef(probs, gt_accs_10_test )[0,1] | ||||
|     print ('{:} correlation for probabilities : {:.6f} on CIFAR-10 validation and {:.6f} on CIFAR-10 test'.format(time_string(), cor_prob_valid, cor_prob_test)) | ||||
|        | ||||
|     for idx, arch in enumerate(archs): | ||||
|       model.set_cal_mode('dynamic', arch) | ||||
|       try: | ||||
|         inputs, targets = next(loader_iter) | ||||
|       except: | ||||
|         loader_iter = iter(xloader) | ||||
|         inputs, targets = next(loader_iter) | ||||
|       _, logits = model(inputs.cuda()) | ||||
|       _, preds  = torch.max(logits, dim=-1) | ||||
|       correct = (preds == targets.cuda() ).float() | ||||
|       accuracies.append( correct.mean().item() ) | ||||
|       if idx != 0 and (idx % 500 == 0 or idx + 1 == len(archs)): | ||||
|         cor_accs_valid = np.corrcoef(accuracies, gt_accs_10_valid[:idx+1])[0,1] | ||||
|         cor_accs_test  = np.corrcoef(accuracies, gt_accs_10_test [:idx+1])[0,1] | ||||
|         print ('{:} {:05d}/{:05d} mode={:5s}, correlation : accs={:.5f} for CIFAR-10 valid, {:.5f} for CIFAR-10 test.'.format(time_string(), idx, len(archs), 'Train' if cal_mode else 'Eval', cor_accs_valid, cor_accs_test)) | ||||
|   model.load_state_dict(weights) | ||||
|   return archs, probs, accuracies | ||||
							
								
								
									
										319
									
								
								autodl/utils/weight_watcher.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										319
									
								
								autodl/utils/weight_watcher.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,319 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.03 # | ||||
| ##################################################### | ||||
| # Reformulate the codes in https://github.com/CalculatedContent/WeightWatcher | ||||
| ##################################################### | ||||
| import numpy as np | ||||
| from typing import List | ||||
| import torch.nn as nn | ||||
| from collections import OrderedDict | ||||
| from sklearn.decomposition import TruncatedSVD | ||||
|  | ||||
|  | ||||
| def available_module_types(): | ||||
|   return (nn.Conv2d, nn.Linear) | ||||
|  | ||||
|  | ||||
| def get_conv2D_Wmats(tensor: np.ndarray) -> List[np.ndarray]: | ||||
|   """ | ||||
|   Extract W slices from a 4 index conv2D tensor of shape: (N,M,i,j) or (M,N,i,j). | ||||
|   Return ij (N x M) matrices | ||||
|   """ | ||||
|   mats = [] | ||||
|   N, M, imax, jmax = tensor.shape | ||||
|   assert N + M >= imax + jmax, 'invalid tensor shape detected: {}x{} (NxM), {}x{} (i,j)'.format(N, M, imax, jmax) | ||||
|   for i in range(imax): | ||||
|     for j in range(jmax): | ||||
|       w = tensor[:, :, i, j] | ||||
|       if N < M: w = w.T | ||||
|       mats.append(w) | ||||
|   return mats | ||||
|  | ||||
|  | ||||
| def glorot_norm_check(W, N, M, rf_size, lower=0.5, upper=1.5): | ||||
|   """Check if this layer needs Glorot Normalization Fix""" | ||||
|  | ||||
|   kappa = np.sqrt(2 / ((N + M) * rf_size)) | ||||
|   norm = np.linalg.norm(W) | ||||
|  | ||||
|   check1 = norm / np.sqrt(N * M) | ||||
|   check2 = norm / (kappa * np.sqrt(N * M)) | ||||
|  | ||||
|   if (rf_size > 1) and (check2 > lower) and (check2 < upper): | ||||
|     return check2, True | ||||
|   elif (check1 > lower) & (check1 < upper): | ||||
|     return check1, True | ||||
|   else: | ||||
|     if rf_size > 1: return check2, False | ||||
|     else: return check1, False | ||||
|  | ||||
| def glorot_norm_fix(w, n, m, rf_size): | ||||
|   """Apply Glorot Normalization Fix.""" | ||||
|   kappa = np.sqrt(2 / ((n + m) * rf_size)) | ||||
|   w = w / kappa | ||||
|   return w | ||||
|  | ||||
|  | ||||
| def analyze_weights(weights, min_size, max_size, alphas, lognorms, spectralnorms, softranks, normalize, glorot_fix): | ||||
|   results = OrderedDict() | ||||
|   count = len(weights) | ||||
|   if count == 0: return results | ||||
|  | ||||
|   for i, weight in enumerate(weights): | ||||
|     M, N = np.min(weight.shape), np.max(weight.shape) | ||||
|     Q = N / M | ||||
|     results[i] = cur_res = OrderedDict(N=N, M=M, Q=Q) | ||||
|     check, checkTF = glorot_norm_check(weight, N, M, count) | ||||
|     cur_res['check'] = check | ||||
|     cur_res['checkTF'] = checkTF | ||||
|     # assume receptive field size is count | ||||
|     if glorot_fix: | ||||
|       weight = glorot_norm_fix(weight, N, M, count) | ||||
|     else: | ||||
|       # probably never needed since we always fix for glorot | ||||
|       weight = weight * np.sqrt(count / 2.0) | ||||
|  | ||||
|     if spectralnorms:  # spectralnorm is the max eigenvalues | ||||
|       svd = TruncatedSVD(n_components=1, n_iter=7, random_state=10) | ||||
|       svd.fit(weight) | ||||
|       sv = svd.singular_values_ | ||||
|       sv_max = np.max(sv) | ||||
|       if normalize: | ||||
|         evals = sv * sv / N | ||||
|       else: | ||||
|         evals = sv * sv | ||||
|       lambda0 = evals[0] | ||||
|       cur_res["spectralnorm"] = lambda0 | ||||
|       cur_res["logspectralnorm"] = np.log10(lambda0) | ||||
|     else: | ||||
|       lambda0 = None | ||||
|  | ||||
|     if M < min_size: | ||||
|       summary = "Weight matrix {}/{} ({},{}): Skipping: too small (<{})".format(i + 1, count, M, N, min_size) | ||||
|       cur_res["summary"] = summary | ||||
|       continue | ||||
|     elif max_size > 0 and M > max_size: | ||||
|       summary = "Weight matrix {}/{} ({},{}): Skipping: too big (testing) (>{})".format(i + 1, count, M, N, max_size) | ||||
|       cur_res["summary"] = summary | ||||
|       continue | ||||
|     else: | ||||
|       summary = [] | ||||
|     if alphas: | ||||
|       import powerlaw | ||||
|       svd = TruncatedSVD(n_components=M - 1, n_iter=7, random_state=10) | ||||
|       svd.fit(weight.astype(float)) | ||||
|       sv = svd.singular_values_ | ||||
|       if normalize: evals = sv * sv / N | ||||
|       else: evals = sv * sv | ||||
|  | ||||
|       lambda_max = np.max(evals) | ||||
|       fit = powerlaw.Fit(evals, xmax=lambda_max, verbose=False) | ||||
|       alpha = fit.alpha | ||||
|       cur_res["alpha"] = alpha | ||||
|       D = fit.D | ||||
|       cur_res["D"] = D | ||||
|       cur_res["lambda_min"] = np.min(evals) | ||||
|       cur_res["lambda_max"] = lambda_max | ||||
|       alpha_weighted = alpha * np.log10(lambda_max) | ||||
|       cur_res["alpha_weighted"] = alpha_weighted | ||||
|       tolerance = lambda_max * M * np.finfo(np.max(sv)).eps | ||||
|       cur_res["rank_loss"] = np.count_nonzero(sv > tolerance, axis=-1) | ||||
|  | ||||
|       logpnorm = np.log10(np.sum([ev ** alpha for ev in evals])) | ||||
|       cur_res["logpnorm"] = logpnorm | ||||
|  | ||||
|       summary.append( | ||||
|         "Weight matrix {}/{} ({},{}): Alpha: {}, Alpha Weighted: {}, D: {}, pNorm {}".format(i + 1, count, M, N, alpha, | ||||
|                                                                                              alpha_weighted, D, | ||||
|                                                                                              logpnorm)) | ||||
|  | ||||
|     if lognorms: | ||||
|       norm = np.linalg.norm(weight)  # Frobenius Norm | ||||
|       cur_res["norm"] = norm | ||||
|       lognorm = np.log10(norm) | ||||
|       cur_res["lognorm"] = lognorm | ||||
|  | ||||
|       X = np.dot(weight.T, weight) | ||||
|       if normalize: X = X / N | ||||
|       normX = np.linalg.norm(X)  # Frobenius Norm | ||||
|       cur_res["normX"] = normX | ||||
|       lognormX = np.log10(normX) | ||||
|       cur_res["lognormX"] = lognormX | ||||
|  | ||||
|       summary.append( | ||||
|         "Weight matrix {}/{} ({},{}): LogNorm: {} ; LogNormX: {}".format(i + 1, count, M, N, lognorm, lognormX)) | ||||
|  | ||||
|       if softranks: | ||||
|         softrank = norm ** 2 / sv_max ** 2 | ||||
|         softranklog = np.log10(softrank) | ||||
|         softranklogratio = lognorm / np.log10(sv_max) | ||||
|         cur_res["softrank"] = softrank | ||||
|         cur_res["softranklog"] = softranklog | ||||
|         cur_res["softranklogratio"] = softranklogratio | ||||
|         summary += "{}. Softrank: {}. Softrank log: {}. Softrank log ratio: {}".format(summary, softrank, softranklog, | ||||
|                                                                                        softranklogratio) | ||||
|     cur_res["summary"] = "\n".join(summary) | ||||
|   return results | ||||
|  | ||||
|  | ||||
| def compute_details(results): | ||||
|   """ | ||||
|   Return a pandas data frame. | ||||
|   """ | ||||
|   final_summary = OrderedDict() | ||||
|  | ||||
|   metrics = { | ||||
|     # key in "results" : pretty print name | ||||
|     "check": "Check", | ||||
|     "checkTF": "CheckTF", | ||||
|     "norm": "Norm", | ||||
|     "lognorm": "LogNorm", | ||||
|     "normX": "Norm X", | ||||
|     "lognormX": "LogNorm X", | ||||
|     "alpha": "Alpha", | ||||
|     "alpha_weighted": "Alpha Weighted", | ||||
|     "spectralnorm": "Spectral Norm", | ||||
|     "logspectralnorm": "Log Spectral Norm", | ||||
|     "softrank": "Softrank", | ||||
|     "softranklog": "Softrank Log", | ||||
|     "softranklogratio": "Softrank Log Ratio", | ||||
|     "sigma_mp": "Marchenko-Pastur (MP) fit sigma", | ||||
|     "numofSpikes": "Number of spikes per MP fit", | ||||
|     "ratio_numofSpikes": "aka, percent_mass, Number of spikes / total number of evals", | ||||
|     "softrank_mp": "Softrank for MP fit", | ||||
|     "logpnorm": "alpha pNorm" | ||||
|   } | ||||
|  | ||||
|   metrics_stats = [] | ||||
|   for metric in metrics: | ||||
|     metrics_stats.append("{}_min".format(metric)) | ||||
|     metrics_stats.append("{}_max".format(metric)) | ||||
|     metrics_stats.append("{}_avg".format(metric)) | ||||
|  | ||||
|     metrics_stats.append("{}_compound_min".format(metric)) | ||||
|     metrics_stats.append("{}_compound_max".format(metric)) | ||||
|     metrics_stats.append("{}_compound_avg".format(metric)) | ||||
|  | ||||
|   columns = ["layer_id", "layer_type", "N", "M", "layer_count", "slice", | ||||
|              "slice_count", "level", "comment"] + [*metrics] + metrics_stats | ||||
|  | ||||
|   metrics_values = {} | ||||
|   metrics_values_compound = {} | ||||
|  | ||||
|   for metric in metrics: | ||||
|     metrics_values[metric] = [] | ||||
|     metrics_values_compound[metric] = [] | ||||
|  | ||||
|   layer_count = 0 | ||||
|   for layer_id, result in results.items(): | ||||
|     layer_count += 1 | ||||
|  | ||||
|     layer_type = np.NAN | ||||
|     if "layer_type" in result: | ||||
|       layer_type = str(result["layer_type"]).replace("LAYER_TYPE.", "") | ||||
|  | ||||
|     compounds = {}  # temp var | ||||
|     for metric in metrics: | ||||
|       compounds[metric] = [] | ||||
|  | ||||
|     slice_count, Ntotal, Mtotal = 0, 0, 0 | ||||
|     for slice_id, summary in result.items(): | ||||
|       if not str(slice_id).isdigit(): | ||||
|         continue | ||||
|       slice_count += 1 | ||||
|  | ||||
|       N = np.NAN | ||||
|       if "N" in summary: | ||||
|         N = summary["N"] | ||||
|         Ntotal += N | ||||
|  | ||||
|       M = np.NAN | ||||
|       if "M" in summary: | ||||
|         M = summary["M"] | ||||
|         Mtotal += M | ||||
|  | ||||
|       data = {"layer_id": layer_id, "layer_type": layer_type, "N": N, "M": M, "slice": slice_id, "level": "SLICE", | ||||
|               "comment": "Slice level"} | ||||
|       for metric in metrics: | ||||
|         if metric in summary: | ||||
|           value = summary[metric] | ||||
|           if value is not None: | ||||
|             metrics_values[metric].append(value) | ||||
|             compounds[metric].append(value) | ||||
|             data[metric] = value | ||||
|  | ||||
|     data = {"layer_id": layer_id, "layer_type": layer_type, "N": Ntotal, "M": Mtotal, "slice_count": slice_count, | ||||
|             "level": "LAYER", "comment": "Layer level"} | ||||
|     # Compute the compound value over the slices | ||||
|     for metric, value in compounds.items(): | ||||
|       count = len(value) | ||||
|       if count == 0: | ||||
|         continue | ||||
|  | ||||
|       compound = np.mean(value) | ||||
|       metrics_values_compound[metric].append(compound) | ||||
|       data[metric] = compound | ||||
|  | ||||
|   data = {"layer_count": layer_count, "level": "NETWORK", "comment": "Network Level"} | ||||
|   for metric, metric_name in metrics.items(): | ||||
|     if metric not in metrics_values or len(metrics_values[metric]) == 0: | ||||
|       continue | ||||
|  | ||||
|     values = metrics_values[metric] | ||||
|     minimum = min(values) | ||||
|     maximum = max(values) | ||||
|     avg = np.mean(values) | ||||
|     final_summary[metric] = avg | ||||
|     # print("{}: min: {}, max: {}, avg: {}".format(metric_name, minimum, maximum, avg)) | ||||
|     data["{}_min".format(metric)] = minimum | ||||
|     data["{}_max".format(metric)] = maximum | ||||
|     data["{}_avg".format(metric)] = avg | ||||
|  | ||||
|     values = metrics_values_compound[metric] | ||||
|     minimum = min(values) | ||||
|     maximum = max(values) | ||||
|     avg = np.mean(values) | ||||
|     final_summary["{}_compound".format(metric)] = avg | ||||
|     # print("{} compound: min: {}, max: {}, avg: {}".format(metric_name, minimum, maximum, avg)) | ||||
|     data["{}_compound_min".format(metric)] = minimum | ||||
|     data["{}_compound_max".format(metric)] = maximum | ||||
|     data["{}_compound_avg".format(metric)] = avg | ||||
|  | ||||
|   return final_summary | ||||
|  | ||||
|  | ||||
| def analyze(model: nn.Module, min_size=50, max_size=0, | ||||
|             alphas: bool = False, lognorms: bool = True, spectralnorms: bool = False, | ||||
|             softranks: bool = False, normalize: bool = False, glorot_fix: bool = False): | ||||
|   """ | ||||
|   Analyze the weight matrices of a model. | ||||
|   :param model: A PyTorch model | ||||
|   :param min_size: The minimum weight matrix size to analyze. | ||||
|   :param max_size: The maximum weight matrix size to analyze (0 = no limit). | ||||
|   :param alphas: Compute the power laws (alpha) of the weight matrices. | ||||
|     Time consuming so disabled by default (use lognorm if you want speed) | ||||
|   :param lognorms: Compute the log norms of the weight matrices. | ||||
|   :param spectralnorms: Compute the spectral norm (max eigenvalue) of the weight matrices. | ||||
|   :param softranks: Compute the soft norm (i.e. StableRank) of the weight matrices. | ||||
|   :param normalize: Normalize or not. | ||||
|   :param glorot_fix: | ||||
|   :return: (a dict of all layers' results, a dict of the summarized info) | ||||
|   """ | ||||
|   names, modules = [], [] | ||||
|   for name, module in model.named_modules(): | ||||
|     if isinstance(module, available_module_types()): | ||||
|       names.append(name) | ||||
|       modules.append(module) | ||||
|   # print('There are {:} layers to be analyzed in this model.'.format(len(modules))) | ||||
|   all_results = OrderedDict() | ||||
|   for index, module in enumerate(modules): | ||||
|     if isinstance(module, nn.Linear): | ||||
|       weights = [module.weight.cpu().detach().numpy()] | ||||
|     else: | ||||
|       weights = get_conv2D_Wmats(module.weight.cpu().detach().numpy()) | ||||
|     results = analyze_weights(weights, min_size, max_size, alphas, lognorms, spectralnorms, softranks, normalize, glorot_fix) | ||||
|     results['id'] = index | ||||
|     results['type'] = type(module) | ||||
|     all_results[index] = results | ||||
|   summary = compute_details(all_results) | ||||
|   return all_results, summary | ||||
		Reference in New Issue
	
	Block a user