use trainer but has bugs
This commit is contained in:
parent
d36e1d1077
commit
be178bc5ee
@ -2,20 +2,23 @@ general:
|
|||||||
name: 'graph_dit'
|
name: 'graph_dit'
|
||||||
wandb: 'disabled'
|
wandb: 'disabled'
|
||||||
gpus: 1
|
gpus: 1
|
||||||
gpu_number: 2
|
gpu_number: 0
|
||||||
resume: null
|
resume: null
|
||||||
test_only: null
|
test_only: null
|
||||||
sample_every_val: 2500
|
sample_every_val: 2500
|
||||||
samples_to_generate: 512
|
samples_to_generate: 1000
|
||||||
samples_to_save: 3
|
samples_to_save: 3
|
||||||
chains_to_save: 1
|
chains_to_save: 1
|
||||||
log_every_steps: 50
|
log_every_steps: 50
|
||||||
number_chain_steps: 8
|
number_chain_steps: 8
|
||||||
final_model_samples_to_generate: 100
|
final_model_samples_to_generate: 1000
|
||||||
final_model_samples_to_save: 20
|
final_model_samples_to_save: 20
|
||||||
final_model_chains_to_save: 1
|
final_model_chains_to_save: 1
|
||||||
enable_progress_bar: False
|
enable_progress_bar: False
|
||||||
save_model: True
|
save_model: True
|
||||||
|
log_dir: '/nfs/data3/hanzhang/nasbenchDiT'
|
||||||
|
number_checkpoint_limit: 3
|
||||||
|
type: 'Trainer'
|
||||||
model:
|
model:
|
||||||
type: 'discrete'
|
type: 'discrete'
|
||||||
transition: 'marginal'
|
transition: 'marginal'
|
||||||
@ -32,7 +35,7 @@ model:
|
|||||||
ensure_connected: True
|
ensure_connected: True
|
||||||
train:
|
train:
|
||||||
# n_epochs: 5000
|
# n_epochs: 5000
|
||||||
n_epochs: 500
|
n_epochs: 10
|
||||||
batch_size: 1200
|
batch_size: 1200
|
||||||
lr: 0.0002
|
lr: 0.0002
|
||||||
clip_grad: null
|
clip_grad: null
|
||||||
@ -41,8 +44,11 @@ train:
|
|||||||
seed: 0
|
seed: 0
|
||||||
val_check_interval: null
|
val_check_interval: null
|
||||||
check_val_every_n_epoch: 1
|
check_val_every_n_epoch: 1
|
||||||
|
gradient_accumulation_steps: 1
|
||||||
dataset:
|
dataset:
|
||||||
datadir: 'data/'
|
datadir: 'data/'
|
||||||
task_name: 'nasbench-201'
|
task_name: 'nasbench-201'
|
||||||
guidance_target: 'nasbench-201'
|
guidance_target: 'nasbench-201'
|
||||||
pin_memory: False
|
pin_memory: False
|
||||||
|
ppo:
|
||||||
|
clip_param: 1
|
||||||
|
@ -54,7 +54,9 @@ class BasicGraphMetrics(object):
|
|||||||
covered_nodes = set()
|
covered_nodes = set()
|
||||||
direct_valid_count = 0
|
direct_valid_count = 0
|
||||||
print(f"generated number: {len(generated)}")
|
print(f"generated number: {len(generated)}")
|
||||||
|
print(f"generated: {generated}")
|
||||||
for graph in generated:
|
for graph in generated:
|
||||||
|
print(f"graph: {graph}")
|
||||||
node_types, edge_types = graph
|
node_types, edge_types = graph
|
||||||
direct_valid_flag = True
|
direct_valid_flag = True
|
||||||
direct_valid_count += 1
|
direct_valid_count += 1
|
||||||
|
@ -815,8 +815,8 @@ class Dataset(InMemoryDataset):
|
|||||||
train_loader = dt.get_data(args.dataset, args.data_loc, args.trainval, args.batch_size, args.augtype, args.repeat, args)
|
train_loader = dt.get_data(args.dataset, args.data_loc, args.trainval, args.batch_size, args.augtype, args.repeat, args)
|
||||||
self.swap_scores = []
|
self.swap_scores = []
|
||||||
import csv
|
import csv
|
||||||
# with open('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/swap_results.csv', 'r') as f:
|
with open('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/swap_results.csv', 'r') as f:
|
||||||
with open('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/swap_results_cifar100.csv', 'r') as f:
|
# with open('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/swap_results_cifar100.csv', 'r') as f:
|
||||||
reader = csv.reader(f)
|
reader = csv.reader(f)
|
||||||
header = next(reader)
|
header = next(reader)
|
||||||
data = [row for row in reader]
|
data = [row for row in reader]
|
||||||
|
@ -23,6 +23,9 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
self.test_only = cfg.general.test_only
|
self.test_only = cfg.general.test_only
|
||||||
self.guidance_target = getattr(cfg.dataset, 'guidance_target', None)
|
self.guidance_target = getattr(cfg.dataset, 'guidance_target', None)
|
||||||
|
|
||||||
|
from nas_201_api import NASBench201API as API
|
||||||
|
self.api = API('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/NAS-Bench-201-v1_1-096897.pth')
|
||||||
|
|
||||||
input_dims = dataset_infos.input_dims
|
input_dims = dataset_infos.input_dims
|
||||||
output_dims = dataset_infos.output_dims
|
output_dims = dataset_infos.output_dims
|
||||||
nodes_dist = dataset_infos.nodes_dist
|
nodes_dist = dataset_infos.nodes_dist
|
||||||
@ -79,6 +82,7 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
self.node_dist = nodes_dist
|
self.node_dist = nodes_dist
|
||||||
self.active_index = active_index
|
self.active_index = active_index
|
||||||
self.dataset_info = dataset_infos
|
self.dataset_info = dataset_infos
|
||||||
|
self.cur_epoch = 0
|
||||||
|
|
||||||
self.train_loss = TrainLossDiscrete(self.cfg.model.lambda_train)
|
self.train_loss = TrainLossDiscrete(self.cfg.model.lambda_train)
|
||||||
|
|
||||||
@ -162,25 +166,81 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
return pred
|
return pred
|
||||||
|
|
||||||
def training_step(self, data, i):
|
def training_step(self, data, i):
|
||||||
data_x = F.one_hot(data.x, num_classes=12).float()[:, self.active_index]
|
if self.cfg.general.type != 'accelerator' and self.current_epoch > self.cfg.train.n_epochs / 5 * 4:
|
||||||
data_edge_attr = F.one_hot(data.edge_attr, num_classes=2).float()
|
samples_left_to_generate = self.cfg.general.samples_to_generate
|
||||||
|
samples_left_to_save = self.cfg.general.samples_to_save
|
||||||
|
chains_left_to_save = self.cfg.general.chains_to_save
|
||||||
|
|
||||||
dense_data, node_mask = utils.to_dense(data_x, data.edge_index, data_edge_attr, data.batch, self.max_n_nodes)
|
samples, all_ys, batch_id = [], [], 0
|
||||||
dense_data = dense_data.mask(node_mask)
|
|
||||||
X, E = dense_data.X, dense_data.E
|
def graph_reward_fn(graphs, true_graphs=None, device=None, reward_model='swap'):
|
||||||
noisy_data = self.apply_noise(X, E, data.y, node_mask)
|
rewards = []
|
||||||
pred = self.forward(noisy_data)
|
if reward_model == 'swap':
|
||||||
loss = self.train_loss(masked_pred_X=pred.X, masked_pred_E=pred.E, pred_y=pred.y,
|
import csv
|
||||||
true_X=X, true_E=E, true_y=data.y, node_mask=node_mask,
|
with open('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/swap_results.csv', 'r') as f:
|
||||||
|
reader = csv.reader(f)
|
||||||
|
header = next(reader)
|
||||||
|
data = [row for row in reader]
|
||||||
|
swap_scores = [float(row[0]) for row in data]
|
||||||
|
for graph in graphs:
|
||||||
|
node_tensor = graph[0]
|
||||||
|
node = node_tensor.cpu().numpy().tolist()
|
||||||
|
|
||||||
|
def nodes_to_arch_str(nodes):
|
||||||
|
num_to_op = ['input', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3', 'skip_connect', 'none', 'output']
|
||||||
|
nodes_str = [num_to_op[node] for node in nodes]
|
||||||
|
arch_str = '|' + nodes_str[1] + '~0|+' + \
|
||||||
|
'|' + nodes_str[2] + '~0|' + nodes_str[3] + '~1|+' +\
|
||||||
|
'|' + nodes_str[4] + '~0|' + nodes_str[5] + '~1|' + nodes_str[6] + '~2|'
|
||||||
|
return arch_str
|
||||||
|
|
||||||
|
arch_str = nodes_to_arch_str(node)
|
||||||
|
reward = swap_scores[self.api.query_index_by_arch(arch_str)]
|
||||||
|
rewards.append(reward)
|
||||||
|
return torch.tensor(rewards, dtype=torch.float32, requires_grad=True).unsqueeze(0).to(device)
|
||||||
|
old_log_probs = None
|
||||||
|
|
||||||
|
bs = 1 * self.cfg.train.batch_size
|
||||||
|
to_generate = min(samples_left_to_generate, bs)
|
||||||
|
to_save = min(samples_left_to_save, bs)
|
||||||
|
chains_save = min(chains_left_to_save, bs)
|
||||||
|
# batch_y = test_y_collection[batch_id : batch_id + to_generate]
|
||||||
|
batch_y = torch.ones(to_generate, self.ydim_output, device=self.device)
|
||||||
|
|
||||||
|
cur_sample, log_probs = self.sample_batch(batch_id, to_generate, batch_y, save_final=to_save,
|
||||||
|
keep_chain=chains_save, number_chain_steps=self.number_chain_steps)
|
||||||
|
# samples = samples + cur_sample
|
||||||
|
samples.append(cur_sample)
|
||||||
|
reward = graph_reward_fn(cur_sample, device=self.device)
|
||||||
|
advantages = (reward - torch.mean(reward)) / (torch.std(reward) + 1e-6) #
|
||||||
|
if old_log_probs is None:
|
||||||
|
old_log_probs = log_probs.clone()
|
||||||
|
ratio = torch.exp(log_probs - old_log_probs)
|
||||||
|
print(f"ratio: {ratio.shape}, advantages: {advantages.shape}")
|
||||||
|
unclipped_loss = -advantages * ratio
|
||||||
|
clipped_loss = -advantages * torch.clamp(ratio, 1.0 - self.cfg.ppo.clip_param, 1.0 + self.cfg.ppo.clip_param)
|
||||||
|
loss = torch.mean(torch.max(unclipped_loss, clipped_loss))
|
||||||
|
return {'loss': loss}
|
||||||
|
else:
|
||||||
|
data_x = F.one_hot(data.x, num_classes=12).float()[:, self.active_index]
|
||||||
|
data_edge_attr = F.one_hot(data.edge_attr, num_classes=2).float()
|
||||||
|
|
||||||
|
dense_data, node_mask = utils.to_dense(data_x, data.edge_index, data_edge_attr, data.batch, self.max_n_nodes)
|
||||||
|
dense_data = dense_data.mask(node_mask)
|
||||||
|
X, E = dense_data.X, dense_data.E
|
||||||
|
noisy_data = self.apply_noise(X, E, data.y, node_mask)
|
||||||
|
pred = self.forward(noisy_data)
|
||||||
|
loss = self.train_loss(masked_pred_X=pred.X, masked_pred_E=pred.E, pred_y=pred.y,
|
||||||
|
true_X=X, true_E=E, true_y=data.y, node_mask=node_mask,
|
||||||
|
log=i % self.log_every_steps == 0)
|
||||||
|
# print(f'training loss: {loss}, epoch: {self.current_epoch}, batch: {i}\n, pred type: {type(pred)}, pred.X shape: {type(pred.X)}, {pred.X.shape}, pred.E shape: {type(pred.E)}, {pred.E.shape}')
|
||||||
|
self.train_metrics(masked_pred_X=pred.X, masked_pred_E=pred.E, true_X=X, true_E=E,
|
||||||
log=i % self.log_every_steps == 0)
|
log=i % self.log_every_steps == 0)
|
||||||
# print(f'training loss: {loss}, epoch: {self.current_epoch}, batch: {i}\n, pred type: {type(pred)}, pred.X shape: {type(pred.X)}, {pred.X.shape}, pred.E shape: {type(pred.E)}, {pred.E.shape}')
|
self.log(f'loss', loss, batch_size=X.size(0), sync_dist=True)
|
||||||
self.train_metrics(masked_pred_X=pred.X, masked_pred_E=pred.E, true_X=X, true_E=E,
|
print(f"training loss: {loss}")
|
||||||
log=i % self.log_every_steps == 0)
|
with open("training-loss.csv", "a") as f:
|
||||||
self.log(f'loss', loss, batch_size=X.size(0), sync_dist=True)
|
f.write(f"{loss}, {i}\n")
|
||||||
print(f"training loss: {loss}")
|
return {'loss': loss}
|
||||||
with open("training-loss.csv", "a") as f:
|
|
||||||
f.write(f"{loss}, {i}\n")
|
|
||||||
return {'loss': loss}
|
|
||||||
|
|
||||||
|
|
||||||
def configure_optimizers(self):
|
def configure_optimizers(self):
|
||||||
@ -196,14 +256,15 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
|
|
||||||
def on_train_epoch_start(self) -> None:
|
def on_train_epoch_start(self) -> None:
|
||||||
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
|
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
|
||||||
print("Starting train epoch {}/{}...".format(self.current_epoch, self.trainer.max_epochs))
|
# if self.cur_epoch / self.cfg.train.n_epochs in [0.25, 0.5, 0.75, 1.0]:
|
||||||
|
print("Starting train epoch {}/{}...".format(self.cur_epoch, self.cfg.train.n_epochs))
|
||||||
self.start_epoch_time = time.time()
|
self.start_epoch_time = time.time()
|
||||||
self.train_loss.reset()
|
self.train_loss.reset()
|
||||||
self.train_metrics.reset()
|
self.train_metrics.reset()
|
||||||
|
|
||||||
def on_train_epoch_end(self) -> None:
|
def on_train_epoch_end(self) -> None:
|
||||||
|
|
||||||
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
|
if self.current_epoch / self.cfg.train.n_epochs in [0.25, 0.5, 0.75, 1.0]:
|
||||||
log = True
|
log = True
|
||||||
else:
|
else:
|
||||||
log = False
|
log = False
|
||||||
@ -240,6 +301,7 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
self.val_X_logp.compute(), self.val_E_logp.compute()]
|
self.val_X_logp.compute(), self.val_E_logp.compute()]
|
||||||
|
|
||||||
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
|
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
|
||||||
|
# if self.cur_epoch / self.cfg.train.n_epochs in [0.25, 0.5, 0.75, 1.0]:
|
||||||
print(f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ",
|
print(f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ",
|
||||||
f"Val Edge type KL: {metrics[2] :.2f}", 'Val loss: %.2f \t Best : %.2f\n' % (metrics[0], self.best_val_nll))
|
f"Val Edge type KL: {metrics[2] :.2f}", 'Val loss: %.2f \t Best : %.2f\n' % (metrics[0], self.best_val_nll))
|
||||||
with open("validation-metrics.csv", "a") as f:
|
with open("validation-metrics.csv", "a") as f:
|
||||||
@ -336,7 +398,7 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
print(f"Epoch {self.current_epoch}: Test NLL {metrics[0] :.2f} -- Test Atom type KL {metrics[1] :.2f} -- ",
|
print(f"Epoch {self.current_epoch}: Test NLL {metrics[0] :.2f} -- Test Atom type KL {metrics[1] :.2f} -- ",
|
||||||
f"Test Edge type KL: {metrics[2] :.2f}")
|
f"Test Edge type KL: {metrics[2] :.2f}")
|
||||||
|
|
||||||
## final epcoh
|
## final epoch
|
||||||
samples_left_to_generate = self.cfg.general.final_model_samples_to_generate
|
samples_left_to_generate = self.cfg.general.final_model_samples_to_generate
|
||||||
samples_left_to_save = self.cfg.general.final_model_samples_to_save
|
samples_left_to_save = self.cfg.general.final_model_samples_to_save
|
||||||
chains_left_to_save = self.cfg.general.final_model_chains_to_save
|
chains_left_to_save = self.cfg.general.final_model_chains_to_save
|
||||||
@ -359,9 +421,9 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
# batch_y = test_y_collection[batch_id : batch_id + to_generate]
|
# batch_y = test_y_collection[batch_id : batch_id + to_generate]
|
||||||
batch_y = torch.ones(to_generate, self.ydim_output, device=self.device)
|
batch_y = torch.ones(to_generate, self.ydim_output, device=self.device)
|
||||||
|
|
||||||
cur_sample = self.sample_batch(batch_id, to_generate, batch_y, save_final=to_save,
|
cur_sample, log_probs = self.sample_batch(batch_id, to_generate, batch_y, save_final=to_save,
|
||||||
keep_chain=chains_save, number_chain_steps=self.number_chain_steps)
|
keep_chain=chains_save, number_chain_steps=self.number_chain_steps)
|
||||||
samples = samples + cur_sample
|
samples.append(cur_sample)
|
||||||
|
|
||||||
all_ys.append(batch_y)
|
all_ys.append(batch_y)
|
||||||
batch_id += to_generate
|
batch_id += to_generate
|
||||||
@ -601,6 +663,12 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
|
|
||||||
assert (E == torch.transpose(E, 1, 2)).all()
|
assert (E == torch.transpose(E, 1, 2)).all()
|
||||||
|
|
||||||
|
if self.cfg.general.type != 'accelerator':
|
||||||
|
if self.trainer.training or self.trainer.validating:
|
||||||
|
total_log_probs = torch.zeros([self.cfg.general.samples_to_generate, 10], device=self.device)
|
||||||
|
elif self.trainer.testing:
|
||||||
|
total_log_probs = torch.zeros([self.cfg.general.final_model_samples_to_generate, 10], device=self.device)
|
||||||
|
|
||||||
# Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.
|
# Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.
|
||||||
for s_int in reversed(range(0, self.T)):
|
for s_int in reversed(range(0, self.T)):
|
||||||
s_array = s_int * torch.ones((batch_size, 1)).type_as(y)
|
s_array = s_int * torch.ones((batch_size, 1)).type_as(y)
|
||||||
@ -609,21 +677,24 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
t_norm = t_array / self.T
|
t_norm = t_array / self.T
|
||||||
|
|
||||||
# Sample z_s
|
# Sample z_s
|
||||||
sampled_s, discrete_sampled_s = self.sample_p_zs_given_zt(s_norm, t_norm, X, E, y, node_mask)
|
sampled_s, discrete_sampled_s, log_probs = self.sample_p_zs_given_zt(s_norm, t_norm, X, E, y, node_mask)
|
||||||
X, E, y = sampled_s.X, sampled_s.E, sampled_s.y
|
X, E, y = sampled_s.X, sampled_s.E, sampled_s.y
|
||||||
|
total_log_probs += log_probs
|
||||||
|
|
||||||
# Sample
|
# Sample
|
||||||
sampled_s = sampled_s.mask(node_mask, collapse=True)
|
sampled_s = sampled_s.mask(node_mask, collapse=True)
|
||||||
X, E, y = sampled_s.X, sampled_s.E, sampled_s.y
|
X, E, y = sampled_s.X, sampled_s.E, sampled_s.y
|
||||||
|
|
||||||
molecule_list = []
|
graph_list = []
|
||||||
for i in range(batch_size):
|
for i in range(batch_size):
|
||||||
n = n_nodes[i]
|
n = n_nodes[i]
|
||||||
atom_types = X[i, :n].cpu()
|
node_types = X[i, :n].cpu()
|
||||||
edge_types = E[i, :n, :n].cpu()
|
edge_types = E[i, :n, :n].cpu()
|
||||||
molecule_list.append([atom_types, edge_types])
|
graph_list.append((node_types , edge_types))
|
||||||
|
|
||||||
return molecule_list
|
total_log_probs = torch.sum(total_log_probs, dim=-1)
|
||||||
|
|
||||||
|
return graph_list, total_log_probs
|
||||||
|
|
||||||
def sample_p_zs_given_zt(self, s, t, X_t, E_t, y_t, node_mask):
|
def sample_p_zs_given_zt(self, s, t, X_t, E_t, y_t, node_mask):
|
||||||
"""Samples from zs ~ p(zs | zt). Only used during sampling.
|
"""Samples from zs ~ p(zs | zt). Only used during sampling.
|
||||||
@ -675,6 +746,14 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
# with condition = P_t(A_{t-1} |A_t, y)
|
# with condition = P_t(A_{t-1} |A_t, y)
|
||||||
prob_X, prob_E, pred = get_prob(noisy_data)
|
prob_X, prob_E, pred = get_prob(noisy_data)
|
||||||
|
|
||||||
|
log_prob_X = torch.log(torch.gather(prob_X, -1, X_t.long()).squeeze(-1)) # bs, n
|
||||||
|
log_prob_E = torch.log(torch.gather(prob_E, -1, E_t.long()).squeeze(-1)) # bs, n, n
|
||||||
|
|
||||||
|
# Sum the log_prob across dimensions for total log_prob
|
||||||
|
log_prob_X = log_prob_X.sum(dim=-1)
|
||||||
|
log_prob_E = log_prob_E.sum(dim=(1, 2))
|
||||||
|
|
||||||
|
log_probs = torch.cat([log_prob_X, log_prob_E], dim=-1)
|
||||||
### Guidance
|
### Guidance
|
||||||
if self.guidance_target is not None and self.guide_scale is not None and self.guide_scale != 1:
|
if self.guidance_target is not None and self.guide_scale is not None and self.guide_scale != 1:
|
||||||
uncon_prob_X, uncon_prob_E, pred = get_prob(noisy_data, unconditioned=True)
|
uncon_prob_X, uncon_prob_E, pred = get_prob(noisy_data, unconditioned=True)
|
||||||
@ -810,4 +889,4 @@ class Graph_DiT(pl.LightningModule):
|
|||||||
out_one_hot = utils.PlaceHolder(X=X_s, E=E_s, y=y_t)
|
out_one_hot = utils.PlaceHolder(X=X_s, E=E_s, y=y_t)
|
||||||
out_discrete = utils.PlaceHolder(X=X_s, E=E_s, y=y_t)
|
out_discrete = utils.PlaceHolder(X=X_s, E=E_s, y=y_t)
|
||||||
|
|
||||||
return out_one_hot.mask(node_mask).type_as(y_t), out_discrete.mask(node_mask, collapse=True).type_as(y_t)
|
return out_one_hot.mask(node_mask).type_as(y_t), out_discrete.mask(node_mask, collapse=True).type_as(y_t), log_probs
|
||||||
|
@ -177,32 +177,92 @@ def test(cfg: DictConfig):
|
|||||||
os.chdir(cfg.general.resume.split("checkpoints")[0])
|
os.chdir(cfg.general.resume.split("checkpoints")[0])
|
||||||
# os.environ["CUDA_VISIBLE_DEVICES"] = cfg.general.gpu_number
|
# os.environ["CUDA_VISIBLE_DEVICES"] = cfg.general.gpu_number
|
||||||
model = Graph_DiT(cfg=cfg, **model_kwargs)
|
model = Graph_DiT(cfg=cfg, **model_kwargs)
|
||||||
trainer = Trainer(
|
|
||||||
gradient_clip_val=cfg.train.clip_grad,
|
|
||||||
# accelerator="cpu",
|
|
||||||
accelerator="gpu"
|
|
||||||
if torch.cuda.is_available() and cfg.general.gpus > 0
|
|
||||||
else "cpu",
|
|
||||||
devices=[cfg.general.gpu_number]
|
|
||||||
if torch.cuda.is_available() and cfg.general.gpus > 0
|
|
||||||
else None,
|
|
||||||
max_epochs=cfg.train.n_epochs,
|
|
||||||
enable_checkpointing=False,
|
|
||||||
check_val_every_n_epoch=cfg.train.check_val_every_n_epoch,
|
|
||||||
val_check_interval=cfg.train.val_check_interval,
|
|
||||||
strategy="ddp" if cfg.general.gpus > 1 else "auto",
|
|
||||||
enable_progress_bar=cfg.general.enable_progress_bar,
|
|
||||||
callbacks=[],
|
|
||||||
reload_dataloaders_every_n_epochs=0,
|
|
||||||
logger=[],
|
|
||||||
)
|
|
||||||
|
|
||||||
if not cfg.general.test_only:
|
if cfg.general.type == "accelerator":
|
||||||
print("start testing fit method")
|
graph_dit_model = model
|
||||||
trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume)
|
|
||||||
if cfg.general.save_model:
|
from accelerate import Accelerator
|
||||||
trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt")
|
from accelerate.utils import set_seed, ProjectConfiguration
|
||||||
trainer.test(model, datamodule=datamodule)
|
|
||||||
|
accelerator_config = ProjectConfiguration(
|
||||||
|
project_dir=os.path.join(cfg.general.log_dir, cfg.general.name),
|
||||||
|
automatic_checkpoint_naming=True,
|
||||||
|
total_limit=cfg.general.number_checkpoint_limit,
|
||||||
|
)
|
||||||
|
accelerator = Accelerator(
|
||||||
|
mixed_precision='no',
|
||||||
|
project_config=accelerator_config,
|
||||||
|
# gradient_accumulation_steps=cfg.train.gradient_accumulation_steps * cfg.train.n_epochs,
|
||||||
|
gradient_accumulation_steps=cfg.train.gradient_accumulation_steps,
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer = graph_dit_model.configure_optimizers()
|
||||||
|
|
||||||
|
train_dataloader = datamodule.train_dataloader()
|
||||||
|
train_dataloader = accelerator.prepare(train_dataloader)
|
||||||
|
val_dataloader = datamodule.val_dataloader()
|
||||||
|
val_dataloader = accelerator.prepare(val_dataloader)
|
||||||
|
test_dataloader = datamodule.test_dataloader()
|
||||||
|
test_dataloader = accelerator.prepare(test_dataloader)
|
||||||
|
|
||||||
|
optimizer, graph_dit_model = accelerator.prepare(optimizer, graph_dit_model)
|
||||||
|
|
||||||
|
# train_epoch
|
||||||
|
from pytorch_lightning import seed_everything
|
||||||
|
seed_everything(cfg.train.seed)
|
||||||
|
for epoch in range(cfg.train.n_epochs):
|
||||||
|
print(f"Epoch {epoch}")
|
||||||
|
graph_dit_model.train()
|
||||||
|
graph_dit_model.cur_epoch = epoch
|
||||||
|
graph_dit_model.on_train_epoch_start()
|
||||||
|
for batch in train_dataloader:
|
||||||
|
optimizer.zero_grad()
|
||||||
|
loss = graph_dit_model.training_step(batch, epoch)['loss']
|
||||||
|
accelerator.backward(loss)
|
||||||
|
optimizer.step()
|
||||||
|
graph_dit_model.on_train_epoch_end()
|
||||||
|
for batch in val_dataloader:
|
||||||
|
if epoch % cfg.train.check_val_every_n_epoch == 0:
|
||||||
|
graph_dit_model.eval()
|
||||||
|
graph_dit_model.on_validation_epoch_start()
|
||||||
|
graph_dit_model.validation_step(batch, epoch)
|
||||||
|
graph_dit_model.on_validation_epoch_end()
|
||||||
|
|
||||||
|
# test_epoch
|
||||||
|
|
||||||
|
graph_dit_model.test()
|
||||||
|
graph_dit_model.on_test_epoch_start()
|
||||||
|
for batch in test_dataloader:
|
||||||
|
graph_dit_model.test_step(batch, epoch)
|
||||||
|
graph_dit_model.on_test_epoch_end()
|
||||||
|
|
||||||
|
elif cfg.general.type == "Trainer":
|
||||||
|
trainer = Trainer(
|
||||||
|
gradient_clip_val=cfg.train.clip_grad,
|
||||||
|
# accelerator="cpu",
|
||||||
|
accelerator="gpu"
|
||||||
|
if torch.cuda.is_available() and cfg.general.gpus > 0
|
||||||
|
else "cpu",
|
||||||
|
devices=[cfg.general.gpu_number]
|
||||||
|
if torch.cuda.is_available() and cfg.general.gpus > 0
|
||||||
|
else None,
|
||||||
|
max_epochs=cfg.train.n_epochs,
|
||||||
|
enable_checkpointing=False,
|
||||||
|
check_val_every_n_epoch=cfg.train.check_val_every_n_epoch,
|
||||||
|
val_check_interval=cfg.train.val_check_interval,
|
||||||
|
strategy="ddp" if cfg.general.gpus > 1 else "auto",
|
||||||
|
enable_progress_bar=cfg.general.enable_progress_bar,
|
||||||
|
callbacks=[],
|
||||||
|
reload_dataloaders_every_n_epochs=0,
|
||||||
|
logger=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
if not cfg.general.test_only:
|
||||||
|
print("start testing fit method")
|
||||||
|
trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume)
|
||||||
|
if cfg.general.save_model:
|
||||||
|
trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt")
|
||||||
|
trainer.test(model, datamodule=datamodule)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test()
|
test()
|
||||||
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user