xautodl/exps/GeMOSA/main.py

401 lines
14 KiB
Python
Raw Normal View History

2021-05-13 09:32:44 +02:00
#####################################################
2021-05-22 17:49:09 +02:00
# Learning to Generate Model One Step Ahead #
2021-05-13 09:32:44 +02:00
#####################################################
2021-05-27 09:44:01 +02:00
# python exps/GeMOSA/main.py --env_version v1 --workers 0
2021-05-27 11:30:44 +02:00
# python exps/GeMOSA/main.py --env_version v1 --lr 0.002 --hidden_dim 16 --meta_batch 256 --device cuda
# python exps/GeMOSA/main.py --env_version v2 --lr 0.002 --hidden_dim 16 --meta_batch 256 --device cuda
# python exps/GeMOSA/main.py --env_version v3 --lr 0.002 --hidden_dim 32 --time_dim 32 --meta_batch 256 --device cuda
2021-05-27 13:27:29 +02:00
# python exps/GeMOSA/main.py --env_version v4 --lr 0.002 --hidden_dim 32 --time_dim 32 --meta_batch 256 --device cuda
2021-05-13 09:32:44 +02:00
#####################################################
2021-05-26 03:17:38 +02:00
import sys, time, copy, torch, random, argparse
2021-05-13 09:32:44 +02:00
from tqdm import tqdm
from copy import deepcopy
from pathlib import Path
2021-05-26 03:17:38 +02:00
from torch.nn import functional as F
2021-05-13 09:32:44 +02:00
2021-05-22 10:41:54 +02:00
lib_dir = (Path(__file__).parent / ".." / "..").resolve()
print("LIB-DIR: {:}".format(lib_dir))
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
2021-05-18 16:08:00 +02:00
from xautodl.procedures import (
prepare_seed,
prepare_logger,
save_checkpoint,
copy_checkpoint,
)
from xautodl.log_utils import time_string
from xautodl.log_utils import AverageMeter, convert_secs2time
2021-05-13 09:32:44 +02:00
2021-05-18 16:08:00 +02:00
from xautodl.utils import split_str2indexes
2021-05-13 09:32:44 +02:00
2021-05-18 16:08:00 +02:00
from xautodl.procedures.advanced_main import basic_train_fn, basic_eval_fn
from xautodl.procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric
2021-05-24 07:14:39 +02:00
from xautodl.datasets.synthetic_core import get_synthetic_env
2021-05-18 16:08:00 +02:00
from xautodl.models.xcore import get_model
2021-05-27 13:27:29 +02:00
from xautodl.procedures.metric_utils import MSEMetric, Top1AccMetric
2021-05-13 09:32:44 +02:00
2021-05-27 05:17:57 +02:00
from meta_model import MetaModelV1
2021-05-13 15:33:34 +02:00
2021-05-27 09:44:01 +02:00
def online_evaluate(
2021-05-27 13:27:29 +02:00
env,
meta_model,
base_model,
criterion,
metric,
args,
logger,
save=False,
easy_adapt=False,
2021-05-27 09:44:01 +02:00
):
2021-05-23 08:22:05 +02:00
logger.log("Online evaluate: {:}".format(env))
2021-05-27 13:27:29 +02:00
metric.reset()
2021-05-23 21:26:09 +02:00
loss_meter = AverageMeter()
w_containers = dict()
2021-05-23 19:06:22 +02:00
for idx, (future_time, (future_x, future_y)) in enumerate(env):
2021-05-23 08:22:05 +02:00
with torch.no_grad():
meta_model.eval()
base_model.eval()
2021-05-27 09:44:01 +02:00
future_time_embed = meta_model.gen_time_embed(
future_time.to(args.device).view(-1)
2021-05-23 19:06:22 +02:00
)
2021-05-27 09:44:01 +02:00
[future_container] = meta_model.gen_model(future_time_embed)
2021-05-23 21:26:09 +02:00
if save:
w_containers[idx] = future_container.no_grad_clone()
2021-05-23 17:09:14 +02:00
future_x, future_y = future_x.to(args.device), future_y.to(args.device)
future_y_hat = base_model.forward_with_container(future_x, future_container)
future_loss = criterion(future_y_hat, future_y)
2021-05-23 21:26:09 +02:00
loss_meter.update(future_loss.item())
2021-05-27 13:27:29 +02:00
# accumulate the metric scores
metric(future_y_hat, future_y)
2021-05-27 09:44:01 +02:00
if easy_adapt:
meta_model.easy_adapt(future_time.item(), future_time_embed)
refine, post_refine_loss = False, -1
else:
refine, post_refine_loss = meta_model.adapt(
base_model,
criterion,
future_time.item(),
future_x,
future_y,
args.refine_lr,
args.refine_epochs,
{"param": future_time_embed, "loss": future_loss.item()},
)
2021-05-23 21:14:12 +02:00
logger.log(
"[ONLINE] [{:03d}/{:03d}] loss={:.4f}".format(
idx, len(env), future_loss.item()
)
+ ", post-loss={:.4f}".format(post_refine_loss if refine else -1)
2021-05-23 17:09:14 +02:00
)
2021-05-23 19:06:22 +02:00
meta_model.clear_fixed()
meta_model.clear_learnt()
2021-05-27 13:27:29 +02:00
return w_containers, loss_meter.avg, metric.get_info()["score"]
2021-05-23 08:22:05 +02:00
2021-05-26 04:41:36 +02:00
def meta_train_procedure(base_model, meta_model, criterion, xenv, args, logger):
2021-05-22 13:02:29 +02:00
base_model.train()
meta_model.train()
2021-05-22 11:36:09 +02:00
optimizer = torch.optim.Adam(
2021-05-22 13:02:29 +02:00
meta_model.get_parameters(True, True, True),
2021-05-22 11:36:09 +02:00
lr=args.lr,
weight_decay=args.weight_decay,
amsgrad=True,
)
2021-05-22 11:43:48 +02:00
logger.log("Pre-train the meta-model")
logger.log("Using the optimizer: {:}".format(optimizer))
2021-05-22 11:36:09 +02:00
2021-05-22 13:02:29 +02:00
meta_model.set_best_dir(logger.path(None) / "ckps-pretrain-v2")
2021-05-22 17:04:24 +02:00
final_best_name = "final-pretrain-{:}.pth".format(args.rand_seed)
if meta_model.has_best(final_best_name):
meta_model.load_best(final_best_name)
2021-05-22 17:49:09 +02:00
logger.log("Directly load the best model from {:}".format(final_best_name))
2021-05-22 17:04:24 +02:00
return
2021-05-26 04:41:36 +02:00
total_indexes = list(range(meta_model.meta_length))
2021-05-22 13:02:29 +02:00
meta_model.set_best_name("pretrain-{:}.pth".format(args.rand_seed))
2021-05-22 17:04:24 +02:00
last_success_epoch, early_stop_thresh = 0, args.pretrain_early_stop_thresh
2021-05-22 11:43:48 +02:00
per_epoch_time, start_time = AverageMeter(), time.time()
2021-05-23 17:09:14 +02:00
device = args.device
2021-05-22 11:36:09 +02:00
for iepoch in range(args.epochs):
2021-05-22 11:43:48 +02:00
left_time = "Time Left: {:}".format(
convert_secs2time(per_epoch_time.avg * (args.epochs - iepoch), True)
)
2021-05-22 13:02:29 +02:00
optimizer.zero_grad()
2021-05-22 11:36:09 +02:00
2021-05-27 09:44:01 +02:00
generated_time_embeds = meta_model.gen_time_embed(meta_model.meta_timestamps)
2021-05-26 04:41:36 +02:00
batch_indexes = random.choices(total_indexes, k=args.meta_batch)
raw_time_steps = meta_model.meta_timestamps[batch_indexes]
regularization_loss = F.l1_loss(
generated_time_embeds, meta_model.super_meta_embed, reduction="mean"
)
# future loss
total_future_losses, total_present_losses = [], []
2021-05-27 09:44:01 +02:00
future_containers = meta_model.gen_model(generated_time_embeds[batch_indexes])
present_containers = meta_model.gen_model(
meta_model.super_meta_embed[batch_indexes]
2021-05-26 04:41:36 +02:00
)
for ibatch, time_step in enumerate(raw_time_steps.cpu().tolist()):
_, (inputs, targets) = xenv(time_step)
2021-05-23 17:09:14 +02:00
inputs, targets = inputs.to(device), targets.to(device)
2021-05-26 04:41:36 +02:00
predictions = base_model.forward_with_container(
inputs, future_containers[ibatch]
)
2021-05-26 03:17:38 +02:00
total_future_losses.append(criterion(predictions, targets))
2021-05-26 04:41:36 +02:00
predictions = base_model.forward_with_container(
inputs, present_containers[ibatch]
2021-05-26 03:17:38 +02:00
)
total_present_losses.append(criterion(predictions, targets))
2021-05-26 04:41:36 +02:00
2021-05-22 13:02:29 +02:00
with torch.no_grad():
2021-05-26 03:17:38 +02:00
meta_std = torch.stack(total_future_losses).std().item()
loss_future = torch.stack(total_future_losses).mean()
loss_present = torch.stack(total_present_losses).mean()
total_loss = loss_future + loss_present + regularization_loss
2021-05-22 11:36:09 +02:00
total_loss.backward()
optimizer.step()
# success
success, best_score = meta_model.save_best(-total_loss.item())
logger.log(
2021-05-26 04:41:36 +02:00
"{:} [META {:04d}/{:}] loss : {:.4f} +- {:.4f} = {:.4f} + {:.4f} + {:.4f}".format(
2021-05-22 11:36:09 +02:00
time_string(),
iepoch,
args.epochs,
total_loss.item(),
2021-05-22 13:02:29 +02:00
meta_std,
2021-05-26 03:17:38 +02:00
loss_future.item(),
loss_present.item(),
regularization_loss.item(),
2021-05-22 11:36:09 +02:00
)
2021-05-26 03:17:38 +02:00
+ ", batch={:}".format(len(total_future_losses))
2021-05-22 17:49:09 +02:00
+ ", success={:}, best={:.4f}".format(success, -best_score)
2021-05-23 08:22:05 +02:00
+ ", LS={:}/{:}".format(iepoch - last_success_epoch, early_stop_thresh)
2021-05-22 17:04:24 +02:00
+ ", {:}".format(left_time)
2021-05-22 11:36:09 +02:00
)
2021-05-22 17:04:24 +02:00
if success:
last_success_epoch = iepoch
if iepoch - last_success_epoch >= early_stop_thresh:
2021-05-22 13:02:29 +02:00
logger.log("Early stop the pre-training at {:}".format(iepoch))
break
2021-05-22 11:43:48 +02:00
per_epoch_time.update(time.time() - start_time)
start_time = time.time()
2021-05-22 13:02:29 +02:00
meta_model.load_best()
2021-05-22 17:04:24 +02:00
# save to the final model
meta_model.set_best_name(final_best_name)
success, _ = meta_model.save_best(best_score + 1e-6)
assert success
2021-05-22 17:49:09 +02:00
logger.log("Save the best model into {:}".format(final_best_name))
2021-05-22 11:36:09 +02:00
2021-05-13 09:32:44 +02:00
def main(args):
2021-05-27 09:56:08 +02:00
prepare_seed(args.rand_seed)
logger = prepare_logger(args)
2021-05-17 09:39:24 +02:00
train_env = get_synthetic_env(mode="train", version=args.env_version)
valid_env = get_synthetic_env(mode="valid", version=args.env_version)
2021-05-24 07:38:02 +02:00
trainval_env = get_synthetic_env(mode="trainval", version=args.env_version)
2021-05-23 21:26:09 +02:00
all_env = get_synthetic_env(mode=None, version=args.env_version)
logger.log("The training enviornment: {:}".format(train_env))
logger.log("The validation enviornment: {:}".format(valid_env))
2021-05-24 07:38:02 +02:00
logger.log("The trainval enviornment: {:}".format(trainval_env))
2021-05-23 21:26:09 +02:00
logger.log("The total enviornment: {:}".format(all_env))
2021-05-27 09:56:08 +02:00
model_kwargs = dict(
config=dict(model_type="norm_mlp"),
input_dim=all_env.meta_info["input_dim"],
output_dim=all_env.meta_info["output_dim"],
hidden_dims=[args.hidden_dim] * 2,
act_cls="relu",
norm_cls="layer_norm_1d",
)
2021-05-17 09:39:24 +02:00
2021-05-13 15:33:34 +02:00
base_model = get_model(**model_kwargs)
base_model = base_model.to(args.device)
2021-05-27 13:27:29 +02:00
if all_env.meta_info["task"] == "regression":
criterion = torch.nn.MSELoss()
metric = MSEMetric(True)
elif all_env.meta_info["task"] == "classification":
criterion = torch.nn.CrossEntropyLoss()
metric = Top1AccMetric(True)
else:
raise ValueError(
"This task ({:}) is not supported.".format(all_env.meta_info["task"])
)
2021-05-13 09:32:44 +02:00
2021-05-13 15:33:34 +02:00
shape_container = base_model.get_w_container().to_shape_container()
2021-05-13 09:32:44 +02:00
# pre-train the hypernetwork
2021-05-24 07:38:02 +02:00
timestamps = trainval_env.get_timestamp(None)
2021-05-24 10:04:27 +02:00
meta_model = MetaModelV1(
2021-05-23 17:09:14 +02:00
shape_container,
args.layer_dim,
args.time_dim,
timestamps,
seq_length=args.seq_length,
2021-05-24 07:38:02 +02:00
interval=trainval_env.time_interval,
2021-05-23 17:09:14 +02:00
)
2021-05-13 15:33:34 +02:00
meta_model = meta_model.to(args.device)
2021-05-13 09:32:44 +02:00
2021-05-13 15:33:34 +02:00
logger.log("The base-model has {:} weights.".format(base_model.numel()))
logger.log("The meta-model has {:} weights.".format(meta_model.numel()))
2021-05-22 17:49:09 +02:00
logger.log("The base-model is\n{:}".format(base_model))
logger.log("The meta-model is\n{:}".format(meta_model))
2021-05-13 09:32:44 +02:00
2021-05-26 04:41:36 +02:00
meta_train_procedure(base_model, meta_model, criterion, trainval_env, args, logger)
2021-05-13 09:32:44 +02:00
2021-05-23 08:22:05 +02:00
# try to evaluate once
2021-05-23 21:26:09 +02:00
# online_evaluate(train_env, meta_model, base_model, criterion, args, logger)
# online_evaluate(valid_env, meta_model, base_model, criterion, args, logger)
2021-05-27 09:44:01 +02:00
"""
2021-05-23 21:26:09 +02:00
w_containers, loss_meter = online_evaluate(
all_env, meta_model, base_model, criterion, args, logger, True
)
2021-05-27 05:17:57 +02:00
logger.log("In this enviornment, the total loss-meter is {:}".format(loss_meter))
2021-05-27 09:44:01 +02:00
"""
2021-05-27 13:27:29 +02:00
_, loss_adapt_v1, metric_adapt_v1 = online_evaluate(
valid_env, meta_model, base_model, criterion, metric, args, logger, False, False
2021-05-27 09:44:01 +02:00
)
2021-05-27 13:27:29 +02:00
_, loss_adapt_v2, metric_adapt_v2 = online_evaluate(
valid_env, meta_model, base_model, criterion, metric, args, logger, False, True
2021-05-27 09:44:01 +02:00
)
logger.log(
2021-05-27 13:27:29 +02:00
"[Refine-Adapt] loss = {:.6f}, metric = {:.6f}".format(
loss_adapt_v1, metric_adapt_v1
2021-05-27 09:44:01 +02:00
)
)
logger.log(
2021-05-27 13:27:29 +02:00
"[Easy-Adapt] loss = {:.6f}, metric = {:.6f}".format(
loss_adapt_v2, metric_adapt_v2
2021-05-27 09:44:01 +02:00
)
)
2021-05-23 08:22:05 +02:00
2021-05-23 21:26:09 +02:00
save_checkpoint(
2021-05-27 09:44:01 +02:00
{
2021-05-27 13:27:29 +02:00
"test_loss_adapt_v1": loss_adapt_v1,
"test_loss_adapt_v2": loss_adapt_v2,
"test_metric_adapt_v1": metric_adapt_v1,
"test_metric_adapt_v2": metric_adapt_v2,
2021-05-27 09:44:01 +02:00
},
2021-05-27 05:17:57 +02:00
logger.path(None) / "final-ckp-{:}.pth".format(args.rand_seed),
2021-05-23 21:26:09 +02:00
logger,
)
2021-05-13 09:32:44 +02:00
logger.log("-" * 200 + "\n")
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(".")
parser.add_argument(
"--save_dir",
type=str,
2021-05-27 10:09:28 +02:00
default="./outputs/GeMOSA-synthetic/GeMOSA",
2021-05-13 09:32:44 +02:00
help="The checkpoint directory.",
)
parser.add_argument(
"--env_version",
type=str,
required=True,
help="The synthetic enviornment version.",
)
parser.add_argument(
2021-05-17 04:31:26 +02:00
"--hidden_dim",
type=int,
default=16,
help="The hidden dimension.",
2021-05-13 09:32:44 +02:00
)
parser.add_argument(
2021-05-17 04:31:26 +02:00
"--layer_dim",
type=int,
default=16,
help="The layer chunk dimension.",
2021-05-13 15:33:34 +02:00
)
parser.add_argument(
2021-05-17 04:31:26 +02:00
"--time_dim",
type=int,
default=16,
help="The timestamp dimension.",
2021-05-13 09:32:44 +02:00
)
#####
parser.add_argument(
2021-05-17 09:39:24 +02:00
"--lr",
2021-05-13 09:32:44 +02:00
type=float,
2021-05-22 11:36:09 +02:00
default=0.002,
2021-05-13 09:32:44 +02:00
help="The initial learning rate for the optimizer (default is Adam)",
)
2021-05-15 10:31:35 +02:00
parser.add_argument(
"--weight_decay",
type=float,
default=0.00001,
help="The weight decay for the optimizer (default is Adam)",
)
2021-05-13 09:32:44 +02:00
parser.add_argument(
2021-05-17 04:31:26 +02:00
"--meta_batch",
type=int,
default=64,
help="The batch size for the meta-model",
2021-05-13 09:32:44 +02:00
)
parser.add_argument(
2021-05-13 15:33:34 +02:00
"--sampler_enlarge",
type=int,
default=5,
help="Enlarge the #iterations for an epoch",
)
2021-05-15 10:31:35 +02:00
parser.add_argument("--epochs", type=int, default=10000, help="The total #epochs.")
2021-05-17 09:39:24 +02:00
parser.add_argument(
"--refine_lr",
type=float,
2021-05-24 15:14:18 +02:00
default=0.001,
2021-05-17 09:39:24 +02:00
help="The learning rate for the optimizer, during refine",
)
parser.add_argument(
2021-05-24 15:14:18 +02:00
"--refine_epochs", type=int, default=150, help="The final refine #epochs."
2021-05-17 09:39:24 +02:00
)
2021-05-13 15:33:34 +02:00
parser.add_argument(
"--early_stop_thresh",
2021-05-13 09:32:44 +02:00
type=int,
2021-05-17 09:39:24 +02:00
default=20,
2021-05-17 06:33:40 +02:00
help="The #epochs for early stop.",
2021-05-13 15:33:34 +02:00
)
2021-05-22 17:04:24 +02:00
parser.add_argument(
"--pretrain_early_stop_thresh",
type=int,
2021-05-23 08:22:05 +02:00
default=300,
2021-05-22 17:04:24 +02:00
help="The #epochs for early stop.",
)
2021-05-13 15:33:34 +02:00
parser.add_argument(
2021-05-17 14:01:58 +02:00
"--seq_length", type=int, default=10, help="The sequence length."
2021-05-13 15:33:34 +02:00
)
parser.add_argument(
"--workers", type=int, default=4, help="The number of workers in parallel."
2021-05-13 09:32:44 +02:00
)
parser.add_argument(
2021-05-17 04:31:26 +02:00
"--device",
type=str,
default="cpu",
help="",
2021-05-13 09:32:44 +02:00
)
# Random Seed
parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed")
args = parser.parse_args()
if args.rand_seed is None or args.rand_seed < 0:
args.rand_seed = random.randint(1, 100000)
assert args.save_dir is not None, "The save dir argument can not be None"
2021-05-23 17:17:08 +02:00
args.save_dir = "{:}-bs{:}-d{:}_{:}_{:}-s{:}-lr{:}-wd{:}-e{:}-env{:}".format(
2021-05-13 18:36:37 +02:00
args.save_dir,
2021-05-23 17:17:08 +02:00
args.meta_batch,
2021-05-13 18:36:37 +02:00
args.hidden_dim,
args.layer_dim,
args.time_dim,
2021-05-17 14:01:58 +02:00
args.seq_length,
2021-05-17 09:39:24 +02:00
args.lr,
2021-05-15 10:31:35 +02:00
args.weight_decay,
2021-05-13 18:36:37 +02:00
args.epochs,
2021-05-15 10:01:40 +02:00
args.env_version,
2021-05-13 09:32:44 +02:00
)
main(args)