Skip to content

Commit

Permalink
log everything to logfile
Browse files Browse the repository at this point in the history
  • Loading branch information
sebamenabar committed Jun 15, 2020
1 parent 4781d01 commit ecea9af
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 35 deletions.
3 changes: 3 additions & 0 deletions src/base_pl_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ def write(self, message):
sys.stderr.flush()
self.log.flush()

def close(self):
pass

def flush(self):
pass

Expand Down
77 changes: 42 additions & 35 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,38 +25,45 @@

# Prints should be done after the init log
model.init_log(vars(args))
with open(osp.join(model.exp_dir, "logfile.log"), "a") as log:
sys.stdout = Logger(log)
sys.stderr = ErrLogger(log)

if torch.cuda.is_available():
print("GPUS:", os.environ["CUDA_VISIBLE_DEVICES"])
print(torch.cuda.get_device_name())
pp.pprint(vars(args))
pp.pprint(cfg)

loggers = model.make_lightning_loggers()
default_ckpt_callback_kwargs = {
"filepath": osp.join(model.exp_dir, "checkpoints/"),
"monitor": "val_uni_acc",
"verbose": True,
"save_top_k": 2,
}
ckpt_callback = pl.callbacks.model_checkpoint.ModelCheckpoint(
**default_ckpt_callback_kwargs,
)
trainer = pl.Trainer.from_argparse_args(
args,
logger=loggers,
checkpoint_callback=ckpt_callback,
max_epochs=cfg.train.epochs,
default_root_dir=model.exp_dir,
gradient_clip_val=cfg.train.gradient_clip_val,
)
if args.eval:
pass
elif args.test:
pass
else:
pass
trainer.fit(model)
try:
_stdout = sys.stdout
_stderr = sys.stderr
with open(osp.join(model.exp_dir, "logfile.log"), "a") as log:
sys.stdout = Logger(log)
sys.stderr = ErrLogger(log)
pl._logger.addHandler(pl.python_logging.StreamHandler(sys.stdout))

if torch.cuda.is_available():
print("GPUS:", os.environ["CUDA_VISIBLE_DEVICES"])
print(torch.cuda.get_device_name())
print(pp.pformat(vars(args)))
print(pp.pformat(cfg))

loggers = model.make_lightning_loggers()
default_ckpt_callback_kwargs = {
"filepath": osp.join(model.exp_dir, "checkpoints/"),
"monitor": "val_uni_acc",
"verbose": True,
"save_top_k": 2,
}
ckpt_callback = pl.callbacks.model_checkpoint.ModelCheckpoint(
**default_ckpt_callback_kwargs,
)
trainer = pl.Trainer.from_argparse_args(
args,
logger=loggers,
checkpoint_callback=ckpt_callback,
max_epochs=cfg.train.epochs,
default_root_dir=model.exp_dir,
gradient_clip_val=cfg.train.gradient_clip_val,
)
if args.eval:
pass
elif args.test:
pass
else:
pass
trainer.fit(model)
finally:
sys.stdout = _stdout
sys.stderr = _stderr

0 comments on commit ecea9af

Please sign in to comment.