-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathall_calc_modelparam.py
76 lines (65 loc) · 2.49 KB
/
all_calc_modelparam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
'''
2024.6.19: calculate model flops, params, speed
'''
import torch
from torchinfo import summary
from allconfig import get_arguments, get_model
import time
from ptflops import get_model_complexity_info
from calflops import calculate_flops
if __name__=="__main__":
device = 'cuda'
args = get_arguments()
args.city = 'Vaihingen'
args.method = 'UTELT'
args.input_size_train = '256,256'
args.num_classes = 5
# args.city = 'uavid'
# args.method = 'UTELT'
# args.input_size_train = '1024,1024'
# args.num_classes = 8
args.device = device
args.makedirs = False
w, h = map(int, args.input_size_train.split(','))
backbone = ['effiSAM', 'unetformer', 'FCNcls']
for b in backbone:
args.backbone = b
model = get_model(args).to(device)
print(b)
######### 1. measure train parameters, macs
# summary(model, input_size=(1, 3, h, w))
######### 2. memory usage
# input_tensor = torch.randn(1, 3, h, w).to(device)
# torch.cuda.reset_peak_memory_stats(device)
# model(input_tensor)
# peak_memory = torch.cuda.max_memory_allocated(device) / 1024**2 # Convert to MB
# print(f"Peak memory usage: {peak_memory} MB")
######### 3. model speed
input_tensor = torch.randn(1, 3, h, w).to(device)
# Warm-up
for _ in range(10):
_ = model(input_tensor)
# Measure speed
start_time = time.time()
num_runs = 1000
for _ in range(num_runs):
_ = model(input_tensor)
end_time = time.time()
avg_inference_time = (end_time - start_time) / num_runs
# Calculate FPS
fps = 1 / avg_inference_time
print(f"Average inference time: {avg_inference_time * 1000:.2f} ms")
print(f"FPS: {fps:.2f}")
########## 4. calculate flops
# Calculate FLOPs and parameters
# macs, params = get_model_complexity_info(model, (3, h, w), as_strings=True, print_per_layer_stat=True)
# print(f"MACS: {macs}, Parameters: {params}")
'''
input_shape = (1, 3, h, w)
flops, macs, params = calculate_flops(model=model,
input_shape=input_shape,
output_as_string=True,
output_precision=4)
print("model: %s FLOPs:%s MACs:%s Params:%s \n" %(b, flops, macs, params))
'''
#Alexnet FLOPs:4.2892 GFLOPS MACs:2.1426 GMACs Params:61.1008 M