forked from nirshlezinger1/LearnedFactorGraphs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathv_fSumProduct.m
91 lines (79 loc) · 3.06 KB
/
v_fSumProduct.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
function v_fXhat = v_fSumProduct(m_fPriors, s_nConst, m_fTransition)
% Apply sum product detection from computed priors
%
% Syntax
% -------------------------------------------------------
% v_fXhat = v_fSumProduct(m_fPriors, s_nConst, s_nMemSize)
%
% INPUT:
% -------------------------------------------------------
% m_fPriors - evaluated likelihoods for each state at each time instance
% s_nConst - constellation size (positive integer)
% s_nMemSize - channel memory length
%
%
% OUTPUT:
% -------------------------------------------------------
% v_fXhat - recovered symbols vector
s_nMemSize = round(log(size(m_fTransition,1)) / log(s_nConst));
s_nDataSize = size(m_fPriors, 1);
s_nStates = s_nConst^s_nMemSize;
v_fShat = zeros(1, s_nDataSize);
% Generate state switch matrix - each state appears exactly Const times
m_fStateSwitch = zeros(s_nStates,s_nConst);
for ii=1:s_nStates
Idx = floor((ii -1)/s_nConst) + 1;
for ll=1:s_nConst
m_fStateSwitch(ii,ll) = (s_nStates/s_nConst)*(ll-1) + Idx;
end
end
% Compute forward messages path
m_fForward = zeros(s_nStates, 1+s_nDataSize);
% assume that the initial state is only zero (state 1)
m_fForward(1,1) = 1;
for kk=1:s_nDataSize
for ii=1:s_nStates
for ll=1:s_nConst
s_nNextState = m_fStateSwitch(ii,ll);
m_fForward(s_nNextState, kk+1) = m_fForward(s_nNextState, kk+1) + ...
m_fForward(ii,kk)*m_fPriors(kk,s_nNextState)...
*m_fTransition(s_nNextState,ii);
end
end
% Normalize
m_fForward(:, kk+1) = m_fForward(:, kk+1) / sum( m_fForward(:, kk+1));
end
% Compute backward messages path
m_fBackward = zeros(s_nStates, s_nDataSize+1);
% the final state does not pass a message
m_fBackward(:,end) = ones(s_nStates,1)/s_nConst;
for kk=s_nDataSize:-1:1
for ii=1:s_nStates
for ll=1:s_nConst
s_nNextState = m_fStateSwitch(ii,ll);
m_fBackward(ii, kk) = m_fBackward(ii, kk) + ...
m_fBackward(s_nNextState,kk+1)*m_fPriors(kk,ii)...
*m_fTransition(s_nNextState,ii);
end
end
% Normalize
m_fBackward(:, kk) = m_fBackward(:, kk) / sum( m_fBackward(:, kk));
end
% Compute MAP
s_fCurState = 1; % Initial state
for kk=1:s_nDataSize
v_fProb = zeros(s_nConst,1);
% Loop over possible symbol values
for ll=1:s_nConst
% Sum forward-backward products
s_nNextState = m_fStateSwitch(s_fCurState,ll);
v_fProb(ll) = v_fProb(ll) + ...
m_fForward(s_fCurState,kk)*m_fBackward(s_nNextState,kk)*m_fTransition(s_nNextState,s_fCurState)*m_fPriors(kk,s_nNextState);
end
% Select symbol which maximizes APP
[~, v_fShat(kk)] = max(v_fProb);
s_fCurState = m_fStateSwitch(s_fCurState,v_fShat(kk));
end
% pad first memory-1 symbols as the first symbol (zero)
v_fXhat = ones(1, s_nDataSize);
v_fXhat(s_nMemSize:end) = v_fShat(1:end-s_nMemSize+1);