-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtanh_scale.py
36 lines (30 loc) · 1.09 KB
/
tanh_scale.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset
import gzip
import pickle
import os
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
class HardTanh(nn.Module):
def __init__(self, writer=None):
super().__init__()
self.momentum = 0.9
self.activation = nn.ReLU()
self.writer = writer
# self.alpha = nn.Parameter(torch.tensor(0.1), requires_grad=True)
self.register_buffer('alpha', torch.tensor(1.0, requires_grad=False))
def forward(self, input):
res = self.activation(input/self.alpha)
if self.training:
self.alpha = (self.momentum * self.alpha + (1-self.momentum)* (torch.std(input))).detach()
if self.writer is not None:
self.writer.add_scalar("Loss/Alpha",self.alpha.data)
print("alpha:" + str(self.alpha))
# res = torch.clamp(np.exp(-1)*input, -1, 1)
return res
# def forward(self, input):