-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
96 lines (81 loc) · 2.85 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import sys
import math
import random
import numpy as np
from loguru import logger
import torch
from torch.backends import cudnn
def init_seeds(seed=0, cuda_deterministic=True):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
if cuda_deterministic: # slower, more reproducible
cudnn.deterministic = True
cudnn.benchmark = False
else: # faster, less reproducible
cudnn.deterministic = False
cudnn.benchmark = True
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
logger.info("\t".join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
decay = args.lr_drop_ratio if epoch in args.lr_drop_epoch else 1.0
lr = args.lr * decay
global current_lr
current_lr = lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
args.lr = current_lr
return current_lr
def adjust_learning_rate_cosine(optimizer, epoch, args):
"""cosine learning rate annealing without restart"""
lr = args.lr * 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
global current_lr
current_lr = lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return current_lr
def load_dict(resume_path, model):
if os.path.isfile(resume_path):
checkpoint = torch.load(resume_path)
model_dict = model.state_dict()
model_dict.update(checkpoint['state_dict'])
model.load_state_dict(model_dict)
# delete to release more space
del checkpoint
else:
sys.exit("=> No checkpoint found at '{}'".format(resume_path))
return model