diff --git a/graph_dit/main.py b/graph_dit/main.py index f3d89e5..5e16301 100644 --- a/graph_dit/main.py +++ b/graph_dit/main.py @@ -1,4 +1,5 @@ # These imports are tricky because they use c++, do not move them +import tqdm import os, shutil import warnings @@ -144,10 +145,25 @@ def main(cfg: DictConfig): else: trainer.test(model, datamodule=datamodule, ckpt_path=cfg.general.test_only) +from accelerate import Accelerator +from accelerate.utils import set_seed, ProjectConfiguration + @hydra.main( version_base="1.1", config_path="../configs", config_name="config" ) def test(cfg: DictConfig): + accelerator_config = ProjectConfiguration( + project_dir=os.path.join(cfg.general.log_dir, cfg.general.name), + automatic_checkpoint_naming=True, + total_limit=cfg.general.number_checkpoint_limit, + ) + accelerator = Accelerator( + mixed_precision=cfg.mixed_precision, + project_config=accelerator_config, + gradient_accumulation_steps=cfg.train.gradient_accumulation_steps * cfg.n_epochs, + ) + set_seed(cfg.train.seed, device_specific=True) + datamodule = dataset.DataModule(cfg) datamodule.prepare_data() dataset_infos = dataset.DataInfos(datamodule=datamodule, cfg=cfg, dataset=datamodule.dataset) @@ -177,32 +193,88 @@ def test(cfg: DictConfig): os.chdir(cfg.general.resume.split("checkpoints")[0]) # os.environ["CUDA_VISIBLE_DEVICES"] = cfg.general.gpu_number model = Graph_DiT(cfg=cfg, **model_kwargs) - trainer = Trainer( - gradient_clip_val=cfg.train.clip_grad, - # accelerator="cpu", - accelerator="gpu" - if torch.cuda.is_available() and cfg.general.gpus > 0 - else "cpu", - devices=[cfg.general.gpu_number] - if torch.cuda.is_available() and cfg.general.gpus > 0 - else None, - max_epochs=cfg.train.n_epochs, - enable_checkpointing=False, - check_val_every_n_epoch=cfg.train.check_val_every_n_epoch, - val_check_interval=cfg.train.val_check_interval, - strategy="ddp" if cfg.general.gpus > 1 else "auto", - enable_progress_bar=cfg.general.enable_progress_bar, - callbacks=[], - reload_dataloaders_every_n_epochs=0, - logger=[], - ) + graph_dit_model = model - if not cfg.general.test_only: - print("start testing fit method") - trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume) - if cfg.general.save_model: - trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt") - trainer.test(model, datamodule=datamodule) + inference_dtype = torch.float32 + graph_dit_model.to(accelerator.device, dtype=inference_dtype) + + + # optional: freeze the model + # graph_dit_model.model.requires_grad_(True) + import torch.nn.functional as F + optimizer = graph_dit_model.configure_optimizers() + # start training + for epoch in range(cfg.train.n_epochs): + graph_dit_model.train() # 设置模型为训练模式 + for batch_data in datamodule.train_dataloader: # 从数据加载器中获取一个批次的数据 + data_x = F.one_hot(batch_data.x, num_classes=12).float()[:, graph_dit_model.active_index] # 节点特征 + data_edge_attr = F.one_hot(batch_data.edge_attr, num_classes=2).float() # 边特征 + + # 转换为 dense 格式并传递给 Graph_DiT + dense_data, node_mask = utils.to_dense(data_x, batch_data.edge_index, data_edge_attr, batch_data.batch, graph_dit_model.max_n_nodes) + dense_data = dense_data.mask(node_mask) + + X, E = dense_data.X, dense_data.E # 节点特征和边特征 + y = batch_data.y # 标签 + + # 前向传播和损失计算 + pred = graph_dit_model(dense_data) # 传入 Graph_DiT 模型 + loss = graph_dit_model.train_loss(pred, X, E, y, node_mask) + + # 优化步骤 + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # start sampling + + samples = [] + + for i in tqdm( + range(cfg.general.n_samples), desc="Sampling", disable=not cfg.general.enable_progress_bar + ): + batch_size = cfg.train.batch_size + num_steps = cfg.model.diffusion_steps + y = torch.ones(batch_size, num_steps, 1, 1, device=accelerator.device, dtype=inference_dtype) + + # sample from the model + samples_batch = graph_dit_model.sample_batch( + batch_id=i, + batch_size=batch_size, + y=y, + keep_chain=1, + number_chain_steps=num_steps, + save_final=batch_size + ) + samples.append(samples_batch) + + + # trainer = Trainer( + # gradient_clip_val=cfg.train.clip_grad, + # # accelerator="cpu", + # accelerator="gpu" + # if torch.cuda.is_available() and cfg.general.gpus > 0 + # else "cpu", + # devices=[cfg.general.gpu_number] + # if torch.cuda.is_available() and cfg.general.gpus > 0 + # else None, + # max_epochs=cfg.train.n_epochs, + # enable_checkpointing=False, + # check_val_every_n_epoch=cfg.train.check_val_every_n_epoch, + # val_check_interval=cfg.train.val_check_interval, + # strategy="ddp" if cfg.general.gpus > 1 else "auto", + # enable_progress_bar=cfg.general.enable_progress_bar, + # callbacks=[], + # reload_dataloaders_every_n_epochs=0, + # logger=[], + # ) + + # if not cfg.general.test_only: + # print("start testing fit method") + # trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume) + # if cfg.general.save_model: + # trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt") + # trainer.test(model, datamodule=datamodule) if __name__ == "__main__": test()