-
Notifications
You must be signed in to change notification settings - Fork 0
/
coach.py
268 lines (212 loc) · 9.05 KB
/
coach.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
import torch
import os
from clerk import Clerk
import numpy as np
from abc import ABCMeta
import usflow.optim
import usflow.data
# #### Setup options
from usflow import options
import sys
import pytorch_lightning as pl
from usflow.plusflow import PLUSFlow
from run_manager import RunManager
class Coach:
__metaclass__ = ABCMeta
def on_validation_epoch_end(self, trainer, pl_module: pl.LightningModule) -> None:
print("validation epoch end")
# if self.args.name_decisive_loss is not None:
# if self.val_metrics[self.args.name_decisive_loss] < self.val_loss_min:
# self.val_loss_min = self.val_metrics[self.args.name_decisive_loss]
# self.save_state(tag="best")
print("save model")
self.save_state()
def __init__(self, args):
print("")
self.clerk = Clerk(args)
self.args = self.clerk.args
self.run_dir = self.clerk.run_dir
# self.set_seed(self.args.train_seed)
# Load Optimizer
self.coach_state_dict_name = self.args.coach_state_dict_name
pl.seed_everything(self.args.train_seed)
# note: put model to cuda before otptimizer is initialized, because after .cuda() new object is generated
self.model = PLUSFlow(args=self.args)
if self.args.wandb_log_model:
self.clerk.wandb_log_model(self.model)
# self.val_epoch_length = 200 #len(self.val_dataloader)
# self.val_steps = 0
# self.train_losses_acc = {}
# self.train_states_acc = {}
# self.val_metrics_acc = {}
# self.val_images_acc = {}
# self.train_metrics = {}
# self.val_metrics = {}
# self.train_time_start = time.time()
# self.val_time_start = time.time()
# if torch.cuda.device_count() > 1:
# print("Let's use", torch.cuda.device_count(), "GPUs!")
# # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
# self.model = torch.nn.DataParallel(self.model)
# self.model.to(self.device)
def run(self):
self.args.dataloader_device = None # self.device
self.args.dataloader_pin_memory = False
if self.args.dataloader_num_workers > 0:
self.args.dataloader_device = "cpu"
self.args.dataloader_pin_memory = True
self.my_train_dataloader = usflow.data.create_train_dataloader(self.args)
self.my_val_dataloader = usflow.data.create_val_dataloader(self.args)
print("len train dataloader", len(self.my_train_dataloader))
if self.args.train_epoch_length is not None:
self.epoch_length = self.args.train_epoch_length
else:
self.epoch_length = len(self.my_train_dataloader)
num_params = 0
for param in self.model.parameters():
num_params += param.numel()
print("model has", num_params, "parameters")
# self.model = self.model.cuda()
self.optimizer = usflow.optim.load_optimizer(
self.args.optimization,
filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.args.lr,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay,
)
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones=self.args.lr_scheduler_steps,
gamma=0.5,
last_epoch=-1,
)
self.find_unused_parameters()
self.load_state(tag=self.args.model_load_tag)
self.model = self.model.cuda()
for state in self.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
"""
for state in self.lr_scheduler.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
"""
self.model.optimizer = self.optimizer
self.model.lr_scheduler = self.lr_scheduler
self.model.clerk = self.clerk
self.model.coach = self
self.model.val_dataloader_length = len(self.my_val_dataloader)
if self.args.debug_mode:
if len(self.my_train_dataloader) * 0.01 > 1:
self.args.train_limit_batches = 0.01
if len(self.my_val_dataloader) * 0.01 > 1:
self.args.val_limit_batches = 0.01
pl_trainer = pl.Trainer(
checkpoint_callback=False,
max_epochs=self.args.train_num_epochs_max,
gpus=-1,
accelerator="ddp",
accumulate_grad_batches=self.args.train_accumulate_grad_batches,
precision=self.args.train_precision,
sync_batchnorm=True,
limit_train_batches=self.args.train_limit_batches,
limit_val_batches=self.args.val_limit_batches,
check_val_every_n_epoch=self.args.val_every_n_epoch,
# global_step=self.optimizer.global_step
# current_epoch=self.lr_scheduler.last_epoch,
)
pl_trainer.current_epoch = self.lr_scheduler.last_epoch
pl_trainer.fit(self.model, self.my_train_dataloader, self.my_val_dataloader)
def save_state(self, tag="latest"):
self.clerk.save_model_state(self.model, tag)
self.clerk.save_optimizer_state(self.optimizer, tag)
self.clerk.save_lr_scheduler_state(self.lr_scheduler, tag)
self.clerk.save_coach_state(self, tag)
def load_state(self, tag="latest"):
if self.args.run_start == "new":
self.set_coach_state_default()
if self.args.model_load_modules is not None:
self.clerk.load_model_state(
self.model.architecture, tag, modules=self.args.model_load_modules
)
else:
if self.args.run_start == "continue":
self.clerk.load_optimizer_state(self.optimizer, tag)
self.clerk.load_model_state(self.model.architecture, tag)
elif self.args.run_start == "branch":
self.clerk.load_optimizer_state(
self.optimizer, tag, modules=self.args.model_load_modules
)
self.clerk.load_model_state(
self.model.architecture, tag, modules=self.args.model_load_modules
)
else:
print("error: unknown run_start passed", self.args.run_start)
self.clerk.load_lr_scheduler_state(self.lr_scheduler, tag)
self.clerk.load_coach_state(self, tag)
def get_coach_state_dict(self):
coach_state_dict = {}
coach_state_dict["epoch"] = self.epoch
if self.args.name_decisive_loss is not None:
coach_state_dict[self.args.name_decisive_loss] = self.val_loss_min
return coach_state_dict
def set_coach_state_dict(self, coach_state_dict):
if "epoch" in coach_state_dict:
self.epoch = coach_state_dict["epoch"]
if self.args.name_decisive_loss in coach_state_dict:
self.val_loss_min = coach_state_dict[self.args.name_decisive_loss]
def set_coach_state_default(self):
self.epoch = 0
self.val_loss_min = np.inf
def find_unused_parameters(self):
self.optimizer.zero_grad()
for batch_idx, batch in enumerate(self.my_train_dataloader):
for i in range(len(batch)):
batch[i] = batch[i][:1].to(self.model.device)
losses, states = self.model.loss_train.calc_losses(batch, batch_idx)
losses["total"].backward()
break
for name, param in self.model.named_parameters():
if param.requires_grad:
if param.grad == None:
print(name, "None")
param.requires_grad = False
else:
pass
"""
if torch.sum(param.grad.isinf()) > 0:
print(name, "inf")
if torch.sum(param.grad.isnan()) > 0:
print(name, "nan")
if torch.sum(param.grad == 0.) > 0:
print(name, "zero")
"""
self.optimizer.zero_grad()
def main():
print("test branch leo")
print("torch version", torch.__version__)
print("pytorch-lightning version: ", pl.__version__)
print("environ PATH: ", os.getenv("PATH", "not found"))
print("repro directory", os.path.dirname(os.path.realpath(__file__)))
# self.find_unused_parameters = True
repro_dir = None
# repro_dir = os.path.dirname(os.path.realpath(__file__))
if repro_dir is None:
repro_dir = "."
# os.chdir(repro_dir)
parser = options.setup_comon_options()
args = parser.parse_args()
args.repro_dir = repro_dir
# args.model_load_modules
# args = args
args.debug_mode = "_pydev_bundle.pydev_log" in sys.modules.keys()
if args.debug_mode:
print("DEBUG MODE")
args.dataloader_num_workers = 0
run_manager = RunManager(args)
coach = Coach(run_manager.args)
coach.run()
if __name__ == "__main__":
main()