-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathRLS_Neural_Network.py
108 lines (83 loc) · 2.89 KB
/
RLS_Neural_Network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 00:51:38 2020
Author: Hunar Ahmad @ Brainxyz
"""
import numpy as np
class RlsNode:
"""
Recursive Least Squares Estimation (This is the update part of Kalman filter)
"""
def __init__(self, _m, _name):
self.name = _name;
self.M = _m;
self.w = np.random.rand(1, _m);
self.P = np.eye(_m)/1;
def RlsPredict(self, v):
return np.dot(self.w , v);
def RlsLearn(self, v, e):
pv = np.dot(self.P, v)
vv = np.dot(pv.T, v)
eMod = pv/vv;
ee = eMod * e;
self.w = self.w + ee
outer = np.outer(eMod , pv);
self.P = self.P - outer
class Net:
"""
neural network (single hidden layer where the weights in first layer (iWh) are randomly initialized)
"""
def __init__(self, _input_size, _neurons):
self.input_size = _input_size;
self.neurons = _neurons;
self.iWh = (np.random.rand(_neurons, _input_size)-0.5)*1
self.nodes = [];
def CreateOutputNode(self, _name):
nn = RlsNode(self.neurons, _name);
self.nodes.append(nn)
def sigmoid(self, x):
return 1 / (1 + np.e ** -x)
def FeedForwardL1(self, v):
vout = np.dot(self.iWh, v);
# tout = np.tanh(vout);
tout = self.sigmoid(vout);
return tout + 0.00000001 ## adding a small value to avoid dividion by zero in the upcoming computations!
### RLS layer (Trainable weights using RLS algorthim)
def FeedForwardL2(self, tout):
yhats = [];
for i in range(len(self.nodes)):
p = self.nodes[i].RlsPredict(tout);
yhats.append(p[0]);
return np.asarray(yhats)
### Error Evaluation
def Evaluate(self, ys, yhats):
errs = ys - yhats
return errs
def Learn(self, acts, errs):
for i in range(len(self.nodes)):
self.nodes[i].RlsLearn(acts, errs[i]) #change to errs[0][i] if indexing error happen
# #### Example Usage ###
x = [[1, 1],[ 0, 0],[ 1, 0],[ 0,1]]; ## input data
y = [1, 1, 0, 0]; ## output data (targets)
## configuring the network
n_input = 2
n_neurons = 5
n_output = 1
net = Net(n_input, n_neurons)
for i in range(n_output):
net.CreateOutputNode(i)
## training
N = len(x) ## you only need one iteration over the samples to learn (RLS is a near one-shot learning!)
for i in range(N):
inputt = x[i][:]
L1 = net.FeedForwardL1(inputt);
yhats = net.FeedForwardL2(L1)
errs = net.Evaluate(y[i], yhats)
net.Learn(L1, errs)
## evaluate after learning
yh= [];
for i in range(N):
inputt = x[i][:]
L1 = net.FeedForwardL1(inputt);
yhats = net.FeedForwardL2(L1)
print("input", inputt, "predicted output:", yhats[0])