-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
78 lines (61 loc) · 2.41 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import argparse
import sys
import torch
from PIL import Image
from torchvision import transforms
sys.path.append('/home/juandres/aml/CheXBias/src/')
from general_functions import *
# Class for colors
class bcolors:
OKGREEN = '\033[92m'
WARNING = '\033[93m'
ENDC = '\033[0m'
# Initialize parser
parser = argparse.ArgumentParser()
# Set argument
parser.add_argument('--mode',
type=str,
default='test',
help='Choose mode (demo or test)')
parser.add_argument('--img',
type=str,
default='/home/juandres/aml/CheXBias/data_new/processed/test/patient19396_study3_view1_frontal_Female_33_Frontal_PA.jpg',
help='Choose mode (demo or test)')
# Get argument
args = parser.parse_args()
# Set classes
args.classes = ['Enlarged Cardiomediastinum','Cardiomegaly','Lung Opacity','Lung Lesion','Edema','Consolidation','Pneumonia','Atelectasis','Pneumothorax','Pleural Effusion','Pleural Other','Fracture']
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Initialize model
model = CustomResNet(num_classes=len(args.classes)).to(device)
# Load weight (choose any of the selected models)
model.load_state_dict(torch.load('/home/juandres/aml/CheXBias/models/Experiment_2/age/group_selection_3/best_model.pth'))
# Get the sample image
if args.mode == 'test':
img = Image.open('/home/juandres/aml/CheXBias/data_new/processed/test/patient19396_study3_view1_frontal_Female_33_Frontal_PA.jpg').convert('L') # 'L' mode for grayscale
else:
img = Image.open(args.img)
# Define pre-process
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.Grayscale(num_output_channels=3), # Convert grayscale to specified number of channels
transforms.ToTensor()
])
# Pre process the image
img = preprocess(img)
# Pass it through the model
with torch.inference_mode():
# Turn model to eval mode
model.eval()
# Predict
predictions = model(img.unsqueeze(0).to(device))
# Output predictions
print('\nCheXBias Results =D \n')
for i,pred in enumerate(torch.round(predictions).tolist()[0]):
if pred == 1:
print(bcolors.WARNING + f' - {args.classes[i]} detected!' + bcolors.ENDC)
else:
print(bcolors.OKGREEN + f' - {args.classes[i]} not detected!' + bcolors.ENDC)
print('\nThanks for using! Verify results with professional')