From 3317084ea93883415dc7ab2145bdb1f9885b9570 Mon Sep 17 00:00:00 2001 From: Sascha Spors Date: Mon, 9 Dec 2024 13:39:47 +0100 Subject: [PATCH] fixed typo in gradient of cost function --- random_signals_LTI_systems/linear_prediction.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/random_signals_LTI_systems/linear_prediction.ipynb b/random_signals_LTI_systems/linear_prediction.ipynb index ea61a89..df117a9 100644 --- a/random_signals_LTI_systems/linear_prediction.ipynb +++ b/random_signals_LTI_systems/linear_prediction.ipynb @@ -66,7 +66,7 @@ "Above equation is referred to as [*cost function*](https://en.wikipedia.org/wiki/Loss_function) $J$ of the optimization problem. We aim at minimizing the cost function, hence minimizing the MSE between the signal $x[k]$ and its prediction $\\hat{x}[k]$. The solution of this [convex optimization](https://en.wikipedia.org/wiki/Convex_optimization) problem is referred to as [minimum mean squared error](https://en.wikipedia.org/wiki/Minimum_mean_square_error) (MMSE) solution. Minimizing the cost function is achieved by calculating its gradient with respect to the filter coefficients [[Haykin](../index.ipynb#Literature)] using results from [matrix calculus](https://en.wikipedia.org/wiki/Matrix_calculus)\n", "\n", "\\begin{align}\n", - "\\nabla_\\mathbf{h} J &= -2 E \\left\\{ x[k-1] (x[k] - \\mathbf{h}^T[k] \\mathbf{x}[k-1]) \\right\\} \\\\\n", + "\\nabla_\\mathbf{h} J &= -2 E \\left\\{ \\mathbf{x}[k-1] (x[k] - \\mathbf{h}^T[k] \\mathbf{x}[k-1]) \\right\\} \\\\\n", "&= - 2 \\mathbf{r}[k] + 2 \\mathbf{R}[k-1] \\mathbf{h}[k]\n", "\\end{align}\n", "\n", @@ -10436,9 +10436,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.12.6" } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 4 }