update NAS-Bench
This commit is contained in:
		| @@ -39,6 +39,13 @@ def get_cell_based_tiny_net(config): | ||||
|       genotype = CellStructure.str2structure(config.arch_str) | ||||
|     else: raise ValueError('Can not find genotype from this config : {:}'.format(config)) | ||||
|     return TinyNetwork(config.C, config.N, genotype, config.num_classes) | ||||
|   elif config.name == 'infer.shape.tiny': | ||||
|     from .shape_infers import DynamicShapeTinyNet | ||||
|     if isinstance(config.channels, str): | ||||
|       channels = tuple([int(x) for x in config.channels.split(':')]) | ||||
|     else: channels = config.channels | ||||
|     genotype = CellStructure.str2structure(config.genotype) | ||||
|     return DynamicShapeTinyNet(channels, genotype, config.num_classes) | ||||
|   elif config.name == 'infer.nasnet-cifar': | ||||
|     from .cell_infers import NASNetonCIFAR | ||||
|     raise NotImplementedError | ||||
|   | ||||
| @@ -1,7 +1,6 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from ..cell_operations import ResNetBasicblock | ||||
| from .cells import InferCell | ||||
|   | ||||
| @@ -172,14 +172,19 @@ class FactorizedReduce(nn.Module): | ||||
|       for i in range(2): | ||||
|         self.convs.append( nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False) ) | ||||
|       self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0) | ||||
|     elif stride == 1: | ||||
|       self.conv = nn.Conv2d(C_in, C_out, 1, stride=stride, padding=0, bias=False) | ||||
|     else: | ||||
|       raise ValueError('Invalid stride : {:}'.format(stride)) | ||||
|     self.bn = nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats) | ||||
|  | ||||
|   def forward(self, x): | ||||
|     x = self.relu(x) | ||||
|     y = self.pad(x) | ||||
|     out = torch.cat([self.convs[0](x), self.convs[1](y[:,:,1:,1:])], dim=1) | ||||
|     if self.stride == 2: | ||||
|       x = self.relu(x) | ||||
|       y = self.pad(x) | ||||
|       out = torch.cat([self.convs[0](x), self.convs[1](y[:,:,1:,1:])], dim=1) | ||||
|     else: | ||||
|       out = self.conv(x) | ||||
|     out = self.bn(out) | ||||
|     return out | ||||
|  | ||||
|   | ||||
| @@ -14,11 +14,11 @@ from .search_model_darts_nasnet import NASNetworkDARTS | ||||
|  | ||||
|  | ||||
| nas201_super_nets = {'DARTS-V1': TinyNetworkDarts, | ||||
|                   'DARTS-V2': TinyNetworkDarts, | ||||
|                   'GDAS'    : TinyNetworkGDAS, | ||||
|                   'SETN'    : TinyNetworkSETN, | ||||
|                   'ENAS'    : TinyNetworkENAS, | ||||
|                   'RANDOM'  : TinyNetworkRANDOM} | ||||
|                      "DARTS-V2": TinyNetworkDarts, | ||||
|                      "GDAS": TinyNetworkGDAS, | ||||
|                      "SETN": TinyNetworkSETN, | ||||
|                      "ENAS": TinyNetworkENAS, | ||||
|                      "RANDOM": TinyNetworkRANDOM} | ||||
|  | ||||
| nasnet_super_nets = {'GDAS' : NASNetworkGDAS, | ||||
|                      'DARTS': NASNetworkDARTS} | ||||
| nasnet_super_nets = {"GDAS": NASNetworkGDAS, | ||||
|                      "DARTS": NASNetworkDARTS} | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| #################### | ||||
| # DARTS, ICLR 2019 #  | ||||
| # DARTS, ICLR 2019 # | ||||
| #################### | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| @@ -11,7 +11,8 @@ from .search_cells import NASNetSearchCell as SearchCell | ||||
| # The macro structure is based on NASNet | ||||
| class NASNetworkDARTS(nn.Module): | ||||
|  | ||||
|   def __init__(self, C: int, N: int, steps: int, multiplier: int, stem_multiplier: int, num_classes: int, search_space: List[Text], affine: bool, track_running_stats: bool): | ||||
|   def __init__(self, C: int, N: int, steps: int, multiplier: int, stem_multiplier: int, | ||||
|                num_classes: int, search_space: List[Text], affine: bool, track_running_stats: bool): | ||||
|     super(NASNetworkDARTS, self).__init__() | ||||
|     self._C        = C | ||||
|     self._layerN   = N | ||||
|   | ||||
| @@ -6,14 +6,15 @@ | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from copy import deepcopy | ||||
| from typing import List, Text, Dict | ||||
| from .search_cells     import NASNetSearchCell as SearchCell | ||||
| from .genotypes        import Structure | ||||
|  | ||||
|  | ||||
| # The macro structure is based on NASNet | ||||
| class NASNetworkSETN(nn.Module): | ||||
|  | ||||
|   def __init__(self, C, N, steps, multiplier, stem_multiplier, num_classes, search_space, affine, track_running_stats): | ||||
|   def __init__(self, C: int, N: int, steps: int, multiplier: int, stem_multiplier: int, | ||||
|                num_classes: int, search_space: List[Text], affine: bool, track_running_stats: bool): | ||||
|     super(NASNetworkSETN, self).__init__() | ||||
|     self._C        = C | ||||
|     self._layerN   = N | ||||
| @@ -45,6 +46,16 @@ class NASNetworkSETN(nn.Module): | ||||
|     self.classifier = nn.Linear(C_prev, num_classes) | ||||
|     self.arch_normal_parameters = nn.Parameter( 1e-3*torch.randn(num_edge, len(search_space)) ) | ||||
|     self.arch_reduce_parameters = nn.Parameter( 1e-3*torch.randn(num_edge, len(search_space)) ) | ||||
|     self.mode = 'urs' | ||||
|     self.dynamic_cell = None | ||||
|  | ||||
|   def set_cal_mode(self, mode, dynamic_cell=None): | ||||
|     assert mode in ['urs', 'joint', 'select', 'dynamic'] | ||||
|     self.mode = mode | ||||
|     if mode == 'dynamic': | ||||
|       self.dynamic_cell = deepcopy(dynamic_cell) | ||||
|     else: | ||||
|       self.dynamic_cell = None | ||||
|  | ||||
|   def get_weights(self): | ||||
|     xlist = list( self.stem.parameters() ) + list( self.cells.parameters() ) | ||||
| @@ -70,6 +81,24 @@ class NASNetworkSETN(nn.Module): | ||||
|   def extra_repr(self): | ||||
|     return ('{name}(C={_C}, N={_layerN}, steps={_steps}, multiplier={_multiplier}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)) | ||||
|  | ||||
|   def dync_genotype(self, use_random=False): | ||||
|     genotypes = [] | ||||
|     with torch.no_grad(): | ||||
|       alphas_cpu = nn.functional.softmax(self.arch_parameters, dim=-1) | ||||
|     for i in range(1, self.max_nodes): | ||||
|       xlist = [] | ||||
|       for j in range(i): | ||||
|         node_str = '{:}<-{:}'.format(i, j) | ||||
|         if use_random: | ||||
|           op_name  = random.choice(self.op_names) | ||||
|         else: | ||||
|           weights  = alphas_cpu[ self.edge2index[node_str] ] | ||||
|           op_index = torch.multinomial(weights, 1).item() | ||||
|           op_name  = self.op_names[ op_index ] | ||||
|         xlist.append((op_name, j)) | ||||
|       genotypes.append( tuple(xlist) ) | ||||
|     return Structure( genotypes ) | ||||
|  | ||||
|   def genotype(self): | ||||
|     def _parse(weights): | ||||
|       gene = [] | ||||
| @@ -94,9 +123,6 @@ class NASNetworkSETN(nn.Module): | ||||
|   def forward(self, inputs): | ||||
|     normal_hardwts = nn.functional.softmax(self.arch_normal_parameters, dim=-1) | ||||
|     reduce_hardwts = nn.functional.softmax(self.arch_reduce_parameters, dim=-1) | ||||
|     with torch.no_grad(): | ||||
|       normal_hardwts_cpu = normal_hardwts.detach().cpu() | ||||
|       reduce_hardwts_cpu = reduce_hardwts.detach().cpu() | ||||
|  | ||||
|     s0 = s1 = self.stem(inputs) | ||||
|     for i, cell in enumerate(self.cells): | ||||
|   | ||||
| @@ -1,8 +1,9 @@ | ||||
| import math, torch | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
| from ..initialization import initialize_resnet | ||||
| from ..SharedUtils    import additive_func | ||||
|  | ||||
|  | ||||
| class ConvBNReLU(nn.Module): | ||||
|   | ||||
| @@ -1,8 +1,9 @@ | ||||
| import math, torch | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
| from ..initialization import initialize_resnet | ||||
| from ..SharedUtils    import additive_func | ||||
|  | ||||
|  | ||||
| class ConvBNReLU(nn.Module): | ||||
|   | ||||
| @@ -1,8 +1,9 @@ | ||||
| import math | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
| from ..initialization import initialize_resnet | ||||
| from ..SharedUtils    import additive_func | ||||
|  | ||||
|  | ||||
| class ConvBNReLU(nn.Module): | ||||
|   | ||||
| @@ -1,8 +1,9 @@ | ||||
| import math, torch | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
| from ..initialization import initialize_resnet | ||||
| from ..SharedUtils    import additive_func | ||||
|  | ||||
|  | ||||
| class ConvBNReLU(nn.Module): | ||||
|   | ||||
| @@ -1,7 +1,10 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| # MobileNetV2: Inverted Residuals and Linear Bottlenecks, CVPR 2018 | ||||
| from torch import nn | ||||
| from ..initialization import initialize_resnet | ||||
| from ..SharedUtils    import additive_func, parse_channel_info | ||||
| from ..SharedUtils    import parse_channel_info | ||||
|  | ||||
|  | ||||
| class ConvBNReLU(nn.Module): | ||||
|   | ||||
							
								
								
									
										58
									
								
								lib/models/shape_infers/InferTinyCellNet.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								lib/models/shape_infers/InferTinyCellNet.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| from typing import List, Text, Any | ||||
| import torch.nn as nn | ||||
| from models.cell_operations import ResNetBasicblock | ||||
| from models.cell_infers.cells import InferCell | ||||
|  | ||||
|  | ||||
| class DynamicShapeTinyNet(nn.Module): | ||||
|  | ||||
|   def __init__(self, channels: List[int], genotype: Any, num_classes: int): | ||||
|     super(DynamicShapeTinyNet, self).__init__() | ||||
|     self._channels = channels | ||||
|     if len(channels) % 3 != 2: | ||||
|       raise ValueError('invalid number of layers : {:}'.format(len(channels))) | ||||
|     self._num_stage = N = len(channels) // 3 | ||||
|  | ||||
|     self.stem = nn.Sequential( | ||||
|                     nn.Conv2d(3, channels[0], kernel_size=3, padding=1, bias=False), | ||||
|                     nn.BatchNorm2d(channels[0])) | ||||
|  | ||||
|     # layer_channels   = [C    ] * N + [C*2 ] + [C*2  ] * N + [C*4 ] + [C*4  ] * N     | ||||
|     layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N | ||||
|  | ||||
|     c_prev = channels[0] | ||||
|     self.cells = nn.ModuleList() | ||||
|     for index, (c_curr, reduction) in enumerate(zip(channels, layer_reductions)): | ||||
|       if reduction : cell = ResNetBasicblock(c_prev, c_curr, 2, True) | ||||
|       else         : cell = InferCell(genotype, c_prev, c_curr, 1) | ||||
|       self.cells.append( cell ) | ||||
|       c_prev = cell.out_dim | ||||
|     self._num_layer = len(self.cells) | ||||
|  | ||||
|     self.lastact = nn.Sequential(nn.BatchNorm2d(c_prev), nn.ReLU(inplace=True)) | ||||
|     self.global_pooling = nn.AdaptiveAvgPool2d(1) | ||||
|     self.classifier = nn.Linear(c_prev, num_classes) | ||||
|  | ||||
|   def get_message(self) -> Text: | ||||
|     string = self.extra_repr() | ||||
|     for i, cell in enumerate(self.cells): | ||||
|       string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr()) | ||||
|     return string | ||||
|  | ||||
|   def extra_repr(self): | ||||
|     return ('{name}(C={_channels}, N={_num_stage}, L={_num_layer})'.format(name=self.__class__.__name__, **self.__dict__)) | ||||
|  | ||||
|   def forward(self, inputs): | ||||
|     feature = self.stem(inputs) | ||||
|     for i, cell in enumerate(self.cells): | ||||
|       feature = cell(feature) | ||||
|  | ||||
|     out = self.lastact(feature) | ||||
|     out = self.global_pooling( out ) | ||||
|     out = out.view(out.size(0), -1) | ||||
|     logits = self.classifier(out) | ||||
|  | ||||
|     return out, logits | ||||
| @@ -1,5 +1,9 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| from .InferCifarResNet_width import InferWidthCifarResNet | ||||
| from .InferImagenetResNet    import InferImagenetResNet | ||||
| from .InferImagenetResNet import InferImagenetResNet | ||||
| from .InferCifarResNet_depth import InferDepthCifarResNet | ||||
| from .InferCifarResNet       import InferCifarResNet | ||||
| from .InferMobileNetV2       import InferMobileNetV2 | ||||
| from .InferCifarResNet import InferCifarResNet | ||||
| from .InferMobileNetV2 import InferMobileNetV2 | ||||
| from .InferTinyCellNet import DynamicShapeTinyNet | ||||
		Reference in New Issue
	
	Block a user