diff --git a/.latent-data/qlib b/.latent-data/qlib
index 253378a..968930e 160000
--- a/.latent-data/qlib
+++ b/.latent-data/qlib
@@ -1 +1 @@
-Subproject commit 253378a44e88a9fcff17d23b589e2d4832f587aa
+Subproject commit 968930e85f4958d16dfc2c5740c02f5c91745b97
diff --git a/README.md b/README.md
index 8ff1e00..8b8d6f4 100644
--- a/README.md
+++ b/README.md
@@ -105,11 +105,11 @@ to download this repo with submodules.
If you find that this project helps your research, please consider citing the related paper:
```
-@article{dong2020autohas,
+@inproceedings{dong2021autohas,
title={{AutoHAS}: Efficient Hyperparameter and Architecture Search},
author={Dong, Xuanyi and Tan, Mingxing and Yu, Adams Wei and Peng, Daiyi and Gabrys, Bogdan and Le, Quoc V},
- journal={arXiv preprint arXiv:2006.03656},
- year={2020}
+ booktitle = {International Conference on Learning Representations (ICLR) Workshop on Neural Architecture Search},
+ year={2021}
}
@article{dong2021nats,
title = {{NATS-Bench}: Benchmarking NAS Algorithms for Architecture Topology and Size},
diff --git a/docs/README_CN.md b/docs/README_CN.md
index a77a81a..d19b647 100644
--- a/docs/README_CN.md
+++ b/docs/README_CN.md
@@ -62,13 +62,13 @@
NATS-Bench |
NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size |
- NATS-Bench.md |
+ NATS-Bench.md |
... |
ENAS / REA / REINFORCE / BOHB |
Please check the original papers. |
- NAS-Bench-201.md NATS-Bench.md |
+ NAS-Bench-201.md NATS-Bench.md |
HPO |
@@ -98,6 +98,12 @@ Some methods use knowledge distillation (KD), which require pre-trained models.
如果您发现该项目对您的科研或工程有帮助,请考虑引用下列的某些文献:
```
+@inproceedings{dong2021autohas,
+ title={{AutoHAS}: Efficient Hyperparameter and Architecture Search},
+ author={Dong, Xuanyi and Tan, Mingxing and Yu, Adams Wei and Peng, Daiyi and Gabrys, Bogdan and Le, Quoc V},
+ booktitle = {International Conference on Learning Representations (ICLR) Workshop on Neural Architecture Search},
+ year={2021}
+}
@article{dong2021nats,
title = {{NATS-Bench}: Benchmarking NAS Algorithms for Architecture Topology and Size},
author = {Dong, Xuanyi and Liu, Lu and Musial, Katarzyna and Gabrys, Bogdan},
diff --git a/exps/trading/baselines.py b/exps/trading/baselines.py
index 18d6df1..27b6e24 100644
--- a/exps/trading/baselines.py
+++ b/exps/trading/baselines.py
@@ -67,14 +67,26 @@ def extend_transformer_settings(alg2configs, name):
return alg2configs
-def remove_PortAnaRecord(alg2configs):
+def refresh_record(alg2configs):
alg2configs = copy.deepcopy(alg2configs)
for key, config in alg2configs.items():
xlist = config["task"]["record"]
new_list = []
for x in xlist:
- if x["class"] != "PortAnaRecord":
+ # remove PortAnaRecord and SignalMseRecord
+ if x["class"] != "PortAnaRecord" and x["class"] != "SignalMseRecord":
new_list.append(x)
+ ## add MultiSegRecord
+ new_list.append(
+ {
+ "class": "MultiSegRecord",
+ "module_path": "qlib.contrib.workflow",
+ "generate_kwargs": {
+ "segments": {"train": "train", "valid": "valid", "test": "test"},
+ "save": True,
+ },
+ }
+ )
config["task"]["record"] = new_list
return alg2configs
@@ -117,7 +129,7 @@ def retrieve_configs():
)
)
alg2configs = extend_transformer_settings(alg2configs, "TSF")
- alg2configs = remove_PortAnaRecord(alg2configs)
+ alg2configs = refresh_record(alg2configs)
print(
"There are {:} algorithms : {:}".format(
len(alg2configs), list(alg2configs.keys())
diff --git a/lib/procedures/q_exps.py b/lib/procedures/q_exps.py
index f31e41a..c1e606a 100644
--- a/lib/procedures/q_exps.py
+++ b/lib/procedures/q_exps.py
@@ -99,7 +99,12 @@ def run_exp(
# Train model
try:
- model = R.load_object(model_obj_name)
+ if hasattr(model, "to"): # Recoverable model
+ device = model.device
+ model = R.load_object(model_obj_name)
+ model.to(device)
+ else:
+ model = R.load_object(model_obj_name)
logger.info("[Find existing object from {:}]".format(model_obj_name))
except OSError:
R.log_params(**flatten_dict(task_config))
@@ -112,16 +117,29 @@ def run_exp(
recorder_root_dir, "model-ckps"
)
model.fit(**model_fit_kwargs)
- R.save_objects(**{model_obj_name: model})
- except:
- raise ValueError("Something wrong.")
+ # remove model to CPU for saving
+ if hasattr(model, "to"):
+ model.to("cpu")
+ R.save_objects(**{model_obj_name: model})
+ model.to()
+ else:
+ R.save_objects(**{model_obj_name: model})
+ except Exception as e:
+ import pdb
+
+ pdb.set_trace()
+ raise ValueError("Something wrong: {:}".format(e))
# Get the recorder
recorder = R.get_recorder()
# Generate records: prediction, backtest, and analysis
for record in task_config["record"]:
record = deepcopy(record)
- if record["class"] == "SignalRecord":
+ if record["class"] == "MultiSegRecord":
+ record["kwargs"] = dict(model=model, dataset=dataset, recorder=recorder)
+ sr = init_instance_by_config(record)
+ sr.generate(**record["generate_kwargs"])
+ elif record["class"] == "SignalRecord":
srconf = {"model": model, "dataset": dataset, "recorder": recorder}
record["kwargs"].update(srconf)
sr = init_instance_by_config(record)
diff --git a/lib/trade_models/quant_transformer.py b/lib/trade_models/quant_transformer.py
index 993453b..f56bd81 100644
--- a/lib/trade_models/quant_transformer.py
+++ b/lib/trade_models/quant_transformer.py
@@ -112,6 +112,12 @@ class QuantTransformer(Model):
def use_gpu(self):
return self.device != torch.device("cpu")
+ def to(self, device):
+ if device is None:
+ self.model.to(self.device)
+ else:
+ self.model.to("cpu")
+
def loss_fn(self, pred, label):
mask = ~torch.isnan(label)
if self.opt_config["loss"] == "mse":
diff --git a/scripts/trade/tsf-all.sh b/scripts/trade/tsf-all.sh
new file mode 100644
index 0000000..323dda5
--- /dev/null
+++ b/scripts/trade/tsf-all.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# bash scripts/trade/tsf-all.sh 0 csi300 0
+# bash scripts/trade/tsf-all.sh 0 csi300 0.1
+# bash scripts/trade/tsf-all.sh 1 all
+#
+set -e
+echo script name: $0
+echo $# arguments
+
+if [ "$#" -ne 3 ] ;then
+ echo "Input illegal number of parameters " $#
+ exit 1
+fi
+
+gpu=$1
+market=$2
+drop=$3
+
+channels="6 12 24 32 48 64"
+depths="1 2 3 4 5 6"
+
+for channel in ${channels}
+do
+ for depth in ${depths}
+ do
+ python exps/trading/baselines.py --alg TSF-${depth}x${channel}-d${drop} --gpu ${gpu} --market ${market}
+ done
+done