update codes

This commit is contained in:
D-X-Y 2019-02-01 04:03:35 +11:00
parent 3f9b54d99e
commit 65d9c1c57f
11 changed files with 103 additions and 55 deletions

View File

@ -6,6 +6,7 @@ University of Technology Sydney
Requirements
- PyTorch 1.0
- Python 3.6
- opencv
```
conda install pytorch torchvision cuda100 -c pytorch
```
@ -14,12 +15,13 @@ conda install pytorch torchvision cuda100 -c pytorch
Searching CNNs
```
bash ./scripts-cnn/search-acc-v2.sh 3 acc2
```
Train the searched CNN on CIFAR
```
bash ./scripts-cnn/train-imagenet.sh 0 GDAS_F1 52 14
bash ./scripts-cnn/train-imagenet.sh 0 GDAS_V1 50 14
bash ./scripts-cnn/train-cifar.sh 0 GDAS_F1 cifar10
bash ./scripts-cnn/train-cifar.sh 0 GDAS_V1 cifar100
```
Train the searched CNN on ImageNet

View File

@ -1,13 +0,0 @@
{
"type" : ["str", "cosine"],
"batch_size": ["int", 128],
"epochs" : ["int", 600],
"momentum" : ["float", 0.9],
"decay" : ["float", 0.0005],
"LR" : ["float", 0.025],
"auxiliary" : ["bool", 1],
"auxiliary_weight" : ["float", 0.4],
"grad_clip" : ["float", 5],
"cutout" : ["int", 16],
"drop_path_prob" : ["float", 0.2]
}

View File

@ -54,15 +54,15 @@ def main():
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
log = open(os.path.join(args.save_path, 'log-seed-{:}.txt'.format(args.manualSeed)), 'w')
print_log('save path : {}'.format(args.save_path), log)
print_log('Save Path : {:}'.format(args.save_path), log)
state = {k: v for k, v in args._get_kwargs()}
print_log(state, log)
print_log("Random Seed: {}".format(args.manualSeed), log)
print_log("Python version : {}".format(sys.version.replace('\n', ' ')), log)
print_log("Torch version : {}".format(torch.__version__), log)
print_log("CUDA version : {}".format(torch.version.cuda), log)
print_log("cuDNN version : {}".format(cudnn.version()), log)
print_log("Num of GPUs : {}".format(torch.cuda.device_count()), log)
print_log("Random Seed : {:}".format(args.manualSeed), log)
print_log("Python version : {:}".format(sys.version.replace('\n', ' ')), log)
print_log("Torch version : {:}".format(torch.__version__), log)
print_log("CUDA version : {:}".format(torch.version.cuda), log)
print_log("cuDNN version : {:}".format(cudnn.version()), log)
print_log("Num of GPUs : {:}".format(torch.cuda.device_count()), log)
args.dataset = args.dataset.lower()
config = load_config(args.model_config)

View File

@ -21,7 +21,7 @@ def obtain_best(accuracies):
def main_procedure(config, dataset, data_path, args, genotype, init_channels, layers, log):
train_data, test_data, class_num = get_datasets(dataset, data_path, args.cutout)
train_data, test_data, class_num = get_datasets(dataset, data_path, config.cutout)
print_log('-------------------------------------- main-procedure', log)
print_log('config : {:}'.format(config), log)
@ -39,9 +39,9 @@ def main_procedure(config, dataset, data_path, args, genotype, init_channels, la
print_log('genotype : {:}'.format(genotype), log)
print_log('args : {:}'.format(args), log)
print_log('Train-Dataset : {:}'.format(train_data), log)
print_log('Train-Trans : {:}'.format(train_transform), log)
print_log('Train-Trans : {:}'.format(train_data.transform), log)
print_log('Test--Dataset : {:}'.format(test_data ), log)
print_log('Test--Trans : {:}'.format(test_transform ), log)
print_log('Test--Trans : {:}'.format(test_data.transform ), log)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True,

View File

@ -62,7 +62,7 @@ def main_procedure_imagenet(config, data_path, args, genotype, init_channels, la
total_param, aux_param = count_parameters_in_MB(basemodel), count_parameters_in_MB(basemodel.auxiliary_param())
print_log('Network =>\n{:}'.format(basemodel), log)
#print_FLOPs(basemodel, (1,3,224,224), [print_log, log])
print_FLOPs(basemodel, (1,3,224,224), [print_log, log])
print_log('Parameters : {:} - {:} = {:.3f} MB'.format(total_param, aux_param, total_param - aux_param), log)
print_log('config : {:}'.format(config), log)
print_log('genotype : {:}'.format(genotype), log)
@ -75,7 +75,7 @@ def main_procedure_imagenet(config, data_path, args, genotype, init_channels, la
criterion_smooth = CrossEntropyLabelSmooth(class_num, config.label_smooth).cuda()
optimizer = torch.optim.SGD(model.parameters(), config.LR, momentum=config.momentum, weight_decay=config.decay, nestero=True)
optimizer = torch.optim.SGD(model.parameters(), config.LR, momentum=config.momentum, weight_decay=config.decay, nesterov=True)
if config.type == 'cosine':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(config.epochs))
elif config.type == 'steplr':

View File

@ -10,7 +10,7 @@ from .TieredImageNet import TieredImageNet
Dataset2Class = {'cifar10' : 10,
'cifar100': 100,
'tiered' : -1,
'imagnet-1k' : 1000,
'imagenet-1k' : 1000,
'imagenet-100': 100}
@ -25,8 +25,8 @@ def get_datasets(name, root, cutout):
std = [x / 255 for x in [68.2, 65.4, 70.4]]
elif name == 'tiered':
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
elif name == 'imagnet-1k' or name == 'imagenet-100':
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
elif name == 'imagenet-1k' or name == 'imagenet-100':
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
else: raise TypeError("Unknow dataset : {:}".format(name))
@ -42,7 +42,7 @@ def get_datasets(name, root, cutout):
if cutout > 0 : lists += [Cutout(cutout)]
train_transform = transforms.Compose(lists)
test_transform = transforms.Compose([transforms.CenterCrop(80), transforms.ToTensor(), transforms.Normalize(mean, std)])
elif name == 'imagnet-1k' or name == 'imagenet-100':
elif name == 'imagenet-1k' or name == 'imagenet-100':
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
@ -57,15 +57,14 @@ def get_datasets(name, root, cutout):
])
test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
else: raise TypeError("Unknow dataset : {:}".format(name))
train_data = TieredImageNet(root, 'train-val', train_transform)
test_data = None
if name == 'cifar10':
train_data = dset.CIFAR10(root, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR10(root, train=True, transform=test_transform , download=True)
elif name == 'cifar100':
train_data = dset.CIFAR100(root, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR100(root, train=True, transform=test_transform , download=True)
elif name == 'imagnet-1k' or name == 'imagenet-100':
elif name == 'imagenet-1k' or name == 'imagenet-100':
train_data = dset.ImageFolder(osp.join(root, 'train'), train_transform)
test_data = dset.ImageFolder(osp.join(root, 'val'), train_transform)
else: raise TypeError("Unknow dataset : {:}".format(name))

View File

@ -4,7 +4,6 @@ from .utils import test_imagenet_data
from .utils import print_log
from .evaluation_utils import obtain_accuracy
from .draw_pts import draw_points
from .fb_transform import ApplyOffset
from .gpu_manager import GPUManager
from .save_meta import Save_Meta

View File

@ -1,14 +0,0 @@
import torch
import random
import numpy as np
class ApplyOffset(object):
def __init__(self, offset):
assert isinstance(offset, int), 'The offset is not right : {}'.format(offset)
self.offset = offset
def __call__(self, x):
if isinstance(x, np.ndarray) and x.dtype == 'uint8':
x = x.astype(int)
if isinstance(x, np.ndarray) and x.size == 1:
x = int(x)
return x + self.offset

View File

@ -0,0 +1,29 @@
#!/usr/bin/env sh
if [ "$#" -ne 2 ] ;then
echo "Input illegal number of parameters " $#
echo "Need 2 parameters for the GPUs and the network"
exit 1
fi
if [ "$TORCH_HOME" = "" ]; then
echo "Must set TORCH_HOME envoriment variable for data dir saving"
exit 1
else
echo "TORCH_HOME : $TORCH_HOME"
fi
gpus=$1
arch=$2
cutout=0
dataset=cifar10
epoch=200
SAVED=./snapshots/NAS/ACC-V2-Search-${arch}-${dataset}-cut${cutout}-${epoch}-E600
CUDA_VISIBLE_DEVICES=${gpus} python ./exps-cnn/DARTS-Search.py \
--data_path $TORCH_HOME/cifar.python \
--arch ${arch} --dataset ${dataset} --batch_size 128 \
--save_path ${SAVED} \
--learning_rate_max 0.025 --learning_rate_min 0.001 --momentum 0.9 --weight_decay 0.0003 \
--epochs ${epoch} --cutout ${cutout} --validate --grad_clip 5 \
--init_channels 16 --layers 8 \
--model_config ./configs/nas-cifar-cos.config \
--print_freq 100 --workers 10

45
scripts-cnn/search.sh Normal file
View File

@ -0,0 +1,45 @@
#!/usr/bin/env sh
if [ "$#" -ne 3 ] ;then
echo "Input illegal number of parameters " $#
echo "Need 3 parameters for the GPUs and the network and the dataset"
exit 1
fi
if [ "$TORCH_HOME" = "" ]; then
echo "Must set TORCH_HOME envoriment variable for data dir saving"
exit 1
else
echo "TORCH_HOME : $TORCH_HOME"
fi
gpus=$1
arch=$2
cutout=0
dataset=$3
epoch=50
SAVED=./snapshots/NAS/Search-${arch}-${dataset}-cut${cutout}-${epoch}
if [ "$dataset" == "cifar10" ] ;then
dataset_root=$TORCH_HOME/cifar.python
print_freq=100
elif [ "$dataset" == "cifar100" ] ;then
dataset_root=$TORCH_HOME/cifar.python
print_freq=100
elif [ "$dataset" == "tiered" ] ;then
dataset_root=$TORCH_HOME/tiered-imagenet
print_freq=500
else
echo 'invalid dataset-name :'${dataset}
exit 1
fi
CUDA_VISIBLE_DEVICES=${gpus} python ./exps-cnn/DARTS-Search.py \
--data_path ${dataset_root} \
--arch ${arch} \
--dataset ${dataset} --batch_size 64 \
--save_path ${SAVED} \
--learning_rate_max 0.025 --learning_rate_min 0.001 --momentum 0.9 --weight_decay 0.0003 \
--epochs ${epoch} --cutout ${cutout} --validate --grad_clip 5 \
--init_channels 16 --layers 8 \
--manualSeed 3858 \
--model_config ./configs/nas-cifar-cos-cut.config \
--print_freq ${print_freq} --workers 8

View File

@ -1,8 +1,8 @@
#!/usr/bin/env sh
# bash scripts-cnn/train-cifar.sh 0 GDAS cifar10
if [ "$#" -ne 3 ] ;then
# bash scripts-cnn/train-cifar.sh 0 GDAS cifar10 cut
if [ "$#" -ne 4 ] ;then
echo "Input illegal number of parameters " $#
echo "Need 3 parameters for the GPUs, the architecture, and the dataset-name"
echo "Need 4 parameters for the GPUs, the architecture, and the dataset-name, and the cutout"
exit 1
fi
if [ "$TORCH_HOME" = "" ]; then
@ -15,13 +15,14 @@ fi
gpus=$1
arch=$2
dataset=$3
SAVED=./snapshots/NAS/${arch}-${dataset}-E600
cutout=$4
SAVED=./snapshots/NAS/${arch}-${dataset}-${cutout}-E600
CUDA_VISIBLE_DEVICES=${gpus} python ./exps-nas/train_base.py \
CUDA_VISIBLE_DEVICES=${gpus} python ./exps-cnn/train_base.py \
--data_path $TORCH_HOME/cifar.python \
--dataset ${dataset} --arch ${arch} \
--save_path ${SAVED} \
--grad_clip 5 \
--init_channels 36 --layers 20 \
--model_config ./configs/nas-cifar-cos.config \
--model_config ./configs/nas-cifar-cos-${cutout}.config \
--print_freq 100 --workers 8