diff --git a/exps/GeMOSA/main.py b/exps/GeMOSA/main.py index d7fe8a3..91113fa 100644 --- a/exps/GeMOSA/main.py +++ b/exps/GeMOSA/main.py @@ -33,7 +33,6 @@ from xautodl.datasets.synthetic_core import get_synthetic_env from xautodl.models.xcore import get_model from xautodl.xlayers import super_core, trunc_normal_ -from lfna_utils import lfna_setup, train_model, TimeData from meta_model import MetaModelV1 @@ -182,7 +181,8 @@ def meta_train_procedure(base_model, meta_model, criterion, xenv, args, logger): def main(args): - logger, model_kwargs = lfna_setup(args) + prepare_seed(args.rand_seed) + logger = prepare_logger(args) train_env = get_synthetic_env(mode="train", version=args.env_version) valid_env = get_synthetic_env(mode="valid", version=args.env_version) trainval_env = get_synthetic_env(mode="trainval", version=args.env_version) @@ -191,6 +191,14 @@ def main(args): logger.log("The validation enviornment: {:}".format(valid_env)) logger.log("The trainval enviornment: {:}".format(trainval_env)) logger.log("The total enviornment: {:}".format(all_env)) + model_kwargs = dict( + config=dict(model_type="norm_mlp"), + input_dim=all_env.meta_info["input_dim"], + output_dim=all_env.meta_info["output_dim"], + hidden_dims=[args.hidden_dim] * 2, + act_cls="relu", + norm_cls="layer_norm_1d", + ) base_model = get_model(**model_kwargs) base_model = base_model.to(args.device) diff --git a/exps/GeMOSA/lfna_utils.py b/exps/GeMOSA/side_utils.py similarity index 81% rename from exps/GeMOSA/lfna_utils.py rename to exps/GeMOSA/side_utils.py index 809ee70..a9fe522 100644 --- a/exps/GeMOSA/lfna_utils.py +++ b/exps/GeMOSA/side_utils.py @@ -8,20 +8,6 @@ from xautodl.procedures import prepare_seed, prepare_logger from xautodl.datasets.synthetic_core import get_synthetic_env -def lfna_setup(args): - prepare_seed(args.rand_seed) - logger = prepare_logger(args) - model_kwargs = dict( - config=dict(model_type="norm_mlp"), - input_dim=1, - output_dim=1, - hidden_dims=[args.hidden_dim] * 2, - act_cls="relu", - norm_cls="layer_norm_1d", - ) - return logger, model_kwargs - - def train_model(model, dataset, lr, epochs): criterion = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=lr, amsgrad=True) diff --git a/xautodl/datasets/math_base_funcs.py b/xautodl/datasets/math_base_funcs.py index 8bc48de..fc02c39 100644 --- a/xautodl/datasets/math_base_funcs.py +++ b/xautodl/datasets/math_base_funcs.py @@ -40,6 +40,9 @@ class MathFunc(abc.ABC): def reset_xstr(self, xstr): self._xstr = str(xstr) + def output_shape(self, input_shape): + return input_shape + @abc.abstractmethod def __call__(self, x): raise NotImplementedError diff --git a/xautodl/datasets/math_dynamic_generator.py b/xautodl/datasets/math_dynamic_generator.py index 6742799..60c2d27 100644 --- a/xautodl/datasets/math_dynamic_generator.py +++ b/xautodl/datasets/math_dynamic_generator.py @@ -47,6 +47,9 @@ class GaussianDGenerator(DynamicGenerator): def ndim(self): return self._ndim + def output_shape(self): + return (self._ndim,) + def __call__(self, time, num): mean_list = [functor(time) for functor in self._mean_functors] cov_matrix = [ diff --git a/xautodl/datasets/synthetic_core.py b/xautodl/datasets/synthetic_core.py index 5df6fe8..1e43672 100644 --- a/xautodl/datasets/synthetic_core.py +++ b/xautodl/datasets/synthetic_core.py @@ -33,7 +33,7 @@ def get_synthetic_env(total_timestamp=1600, num_per_task=1000, mode=None, versio } ) dynamic_env = SyntheticDEnv( - data_generator, oracle_map, time_generator, num_per_task + data_generator, oracle_map, time_generator, num_per_task, noise=0.1 ) dynamic_env.set_regression() elif version.lower() == "v2": @@ -53,7 +53,7 @@ def get_synthetic_env(total_timestamp=1600, num_per_task=1000, mode=None, versio } ) dynamic_env = SyntheticDEnv( - data_generator, oracle_map, time_generator, num_per_task + data_generator, oracle_map, time_generator, num_per_task, noise=0.1 ) dynamic_env.set_regression() elif version.lower() == "v3": @@ -73,7 +73,7 @@ def get_synthetic_env(total_timestamp=1600, num_per_task=1000, mode=None, versio } ) dynamic_env = SyntheticDEnv( - data_generator, oracle_map, time_generator, num_per_task + data_generator, oracle_map, time_generator, num_per_task, noise=0.05 ) dynamic_env.set_regression() elif version.lower() == "v4": diff --git a/xautodl/datasets/synthetic_env.py b/xautodl/datasets/synthetic_env.py index 65e274e..c4839e3 100644 --- a/xautodl/datasets/synthetic_env.py +++ b/xautodl/datasets/synthetic_env.py @@ -1,3 +1,4 @@ +import numpy as np import torch import torch.utils.data as data @@ -44,10 +45,16 @@ class SyntheticDEnv(data.Dataset): def set_regression(self): self._meta_info["task"] = "regression" + self._meta_info["input_dim"] = self._data_generator.ndim + self._meta_info["output_shape"] = self._oracle_map.output_shape(self._data_generator.output_shape()) + self._meta_info['output_dim'] = int(np.prod(self._meta_info["output_shape"])) def set_classification(self, num_classes): self._meta_info["task"] = "classification" + self._meta_info["input_dim"] = self._data_generator.ndim self._meta_info["num_classes"] = int(num_classes) + self._meta_info["output_shape"] = self._oracle_map.output_shape(self._data_generator.output_shape()) + self._meta_info['output_dim'] = int(np.prod(self._meta_info["output_shape"])) @property def oracle_map(self):