forked from compbiolabucf/omicsGAT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcls_model.py
37 lines (28 loc) · 1.54 KB
/
cls_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphAttentionLayer
class omicsGAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
super(omicsGAT, self).__init__()
self.dropout = dropout
## creating attention layers for given number of heads
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention) ## adding the modules for each head
in_features = nhid * nheads
self.dnn = nn.Sequential(
nn.Linear(in_features, int(in_features/2)),
nn.BatchNorm1d(int(in_features/2)),
nn.ReLU(inplace = True),
nn.Linear(int(in_features/2), 16),
nn.BatchNorm1d(16),
nn.ReLU(inplace = True))
self.fc = nn.Linear(16,nclass)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1) ## concatanating all the attention heads dimension (#input X out_features*nb_heads); out_features = nhid... each head contributing to (#input X out_features)
x = F.dropout(x, self.dropout, training=self.training)
x = self.dnn(x)
x = F.log_softmax(self.fc(x), dim = 1)
return x