Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rudimentary fluid.polynomialregressor object #246

Open
wants to merge 30 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
cac7645
base PolynomialRegressor object
lewardo Aug 9, 2023
430ac9c
method call structure
lewardo Aug 9, 2023
3883e5c
design matrix construction
lewardo Aug 9, 2023
c633e49
linear algebra algorithm implementation
lewardo Aug 9, 2023
96f715c
copy constructor and assignment operator optimisations
lewardo Aug 9, 2023
1719d8f
getting obejct to show up in max (with lewardo/flucoma-max@02d3b22)
lewardo Aug 15, 2023
c3fa23a
temporary commit
lewardo Aug 17, 2023
3997c5b
client update test
lewardo Aug 17, 2023
971dce8
json functions now configured
lewardo Aug 18, 2023
b6a3701
init method for setting initial value on load
lewardo Aug 18, 2023
b591674
removed superfluous mIn/mOut members
lewardo Aug 18, 2023
d880081
predictpoint message now working and regressing
lewardo Aug 18, 2023
eb940de
predict message now working with datasets
lewardo Aug 18, 2023
8792d20
multi-regressor interface
lewardo Aug 18, 2023
53aff44
ranme mDims for consistency
lewardo Aug 18, 2023
cd15ed2
actually initialise algorithm from read
lewardo Aug 18, 2023
be581ff
fix parameter read/load updating
lewardo Aug 21, 2023
6cf06aa
automatic dimensioin setting from training mapping
lewardo Aug 21, 2023
ee63aae
bugfix saving in wrong dimensionality
lewardo Aug 21, 2023
7b92c53
rename algo methods for consistency
lewardo Aug 21, 2023
983560a
added tikhonov regularisation, currently only ridge normalisation
lewardo Aug 21, 2023
ab42687
slimmed json saving by removing redundant data
lewardo Aug 21, 2023
b615117
saving of tikhonov factor
lewardo Aug 21, 2023
fa0dbfb
added write regression state catch
lewardo Aug 21, 2023
5b0bc48
remove parameter update bug on first `fit`
lewardo Aug 22, 2023
cd16032
fix get<>() definition location
lewardo Aug 28, 2023
c0023d2
increase assignment clarity
lewardo Aug 28, 2023
dea9281
run clang-format
lewardo Aug 28, 2023
8702f6d
re-allocate memory on regression for first run
lewardo Aug 28, 2023
59d4932
now using the fluid memory allocator
lewardo Aug 28, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions FlucomaClients.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ add_kr_in_client(Stats clients/rt/RunningStatsClient.hpp CLASS RunningStatsClien
add_client(TransientSlice clients/rt/TransientSliceClient.hpp CLASS RTTransientSliceClient )
add_client(Transients clients/rt/TransientClient.hpp CLASS RTTransientClient )


#lib manipulation client group
add_client(DataSet clients/nrt/DataSetClient.hpp CLASS NRTThreadedDataSetClient GROUP MANIPULATION)
add_client(DataSetQuery clients/nrt/DataSetQueryClient.hpp CLASS NRTThreadedDataSetQueryClient GROUP MANIPULATION)
Expand All @@ -158,3 +159,4 @@ add_client(UMAP clients/nrt/UMAPClient.hpp CLASS NRTThreadedUMAPClient GROUP MAN
add_client(MLPRegressor clients/nrt/MLPRegressorClient.hpp CLASS NRTThreadedMLPRegressorClient GROUP MANIPULATION)
add_client(MLPClassifier clients/nrt/MLPClassifierClient.hpp CLASS NRTThreadedMLPClassifierClient GROUP MANIPULATION)
add_client(Grid clients/nrt/GridClient.hpp CLASS NRTThreadedGridClient GROUP MANIPULATION)
add_client(PolynomialRegressor clients/nrt/PolynomialRegressorClient.hpp CLASS NRTThreadedPolynomialRegressorClient GROUP MANIPULATION)
185 changes: 185 additions & 0 deletions include/algorithms/public/PolynomialRegressor.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
/*
Part of the Fluid Corpus Manipulation Project (http://www.flucoma.org/)
Copyright University of Huddersfield.
Licensed under the BSD-3 License.
See license.md file in the project root for full license information.
This project has received funding from the European Research Council (ERC)
under the European Union’s Horizon 2020 research and innovation programme
(grant agreement No 725899).
*/

#pragma once

#include "../util/AlgorithmUtils.hpp"
#include "../util/FluidEigenMappings.hpp"
#include "../../data/FluidIndex.hpp"
#include "../../data/FluidMemory.hpp"
#include "../../data/TensorTypes.hpp"
#include <Eigen/Core>
#include <Eigen/Dense>
#include <cassert>
#include <cmath>

namespace fluid {
namespace algorithm {

class PolynomialRegressor
{
public:
explicit PolynomialRegressor() = default;
~PolynomialRegressor() = default;

void init(index degree, index dims, double tikhonov = 0.0)
{
mInitialized = true;
setDegree(degree);
setDims(dims);
setTikhonov(tikhonov);
};

index degree() const { return mInitialized ? asSigned(mDegree) : 0; };
double tihkonov() const { return mInitialized ? mTikhonovFactor : 0.0; };
index dims() const { return mInitialized ? asSigned(mDims) : 0; };
index size() const { return mInitialized ? asSigned(mDegree) : 0; };

void clear() { mRegressed = false; }

bool regressed() const { return mRegressed; };
bool initialized() const { return mInitialized; };

void setDegree(index degree)
{
if (mDegree == degree) return;

mDegree = degree;
mRegressed = false;
}

void setDims(index dims)
{
if (mDims == dims) return;

mDims = dims;
mRegressed = false;
}

void setTikhonov(double tikhonov)
{
if (mTikhonovFactor == tikhonov) return;

mTikhonovFactor = tikhonov;
mRegressed = false;
}


void regress(InputRealMatrixView in, InputRealMatrixView out,
Allocator& alloc = FluidDefaultAllocator())
{
using namespace _impl;
using namespace Eigen;

ScopedEigenMap<MatrixXd> input(in.rows(), in.cols(), alloc),
output(out.rows(), out.cols(), alloc),
transposeProduct(mDegree + 1, mDegree + 1, alloc);

input = asEigen<Matrix>(in);
output = asEigen<Matrix>(out);

mCoefficients.resize(mDegree + 1, mDims);
mTikhonovMatrix.resize(mDegree + 1, mDegree + 1);

asEigen<Matrix>(mTikhonovMatrix) =
mTikhonovFactor * MatrixXd::Identity(mDegree + 1, mDegree + 1);

for (index i = 0; i < mDims; ++i)
{
generateDesignMatrix(input.col(i));

// tikhonov/ridge regularisation, given Ax = y where x could be noisy
// optimise the value _x = (A^T . A + R^T . R)^-1 . A^T . y
// where R is a tikhonov filter matrix, in case of ridge regression of the
// form a.I
transposeProduct = asEigen<Matrix>(mDesignMatrix).transpose() *
asEigen<Matrix>(mDesignMatrix) +
asEigen<Matrix>(mTikhonovMatrix).transpose() *
asEigen<Matrix>(mTikhonovMatrix);
asEigen<Matrix>(mCoefficients.col(i)) =
transposeProduct.inverse() *
asEigen<Matrix>(mDesignMatrix).transpose() * output.col(i);
}

mRegressed = true;
};

void getCoefficients(RealMatrixView coefficients) const
{
if (mInitialized) coefficients <<= mCoefficients;
};

void setCoefficients(InputRealMatrixView coefficients)
{
if (!mInitialized) mInitialized = true;

setDegree(coefficients.rows() - 1);
setDims(coefficients.cols());

mCoefficients <<= coefficients;
mRegressed = true;
}

void process(InputRealMatrixView in, RealMatrixView out,
Allocator& alloc = FluidDefaultAllocator()) const
{
using namespace _impl;
using namespace Eigen;

ScopedEigenMap<VectorXd> coefficientsColumn(mCoefficients.rows(), alloc),
inputColumn(in.rows(), alloc);

for (index i = 0; i < mDims; ++i)
{
inputColumn = asEigen<Matrix>(in.col(i));
coefficientsColumn = asEigen<Matrix>(mCoefficients.col(i));

generateDesignMatrix(inputColumn);

asEigen<Matrix>(out.col(i)) =
asEigen<Matrix>(mDesignMatrix) * coefficientsColumn;
}
}

private:
void generateDesignMatrix(Eigen::Ref<Eigen::VectorXd> in,
Allocator& alloc = FluidDefaultAllocator()) const
{
using namespace _impl;
using namespace Eigen;

ScopedEigenMap<ArrayXd> designColumn(in.size(), alloc),
inArray(in.size(), alloc);

designColumn = VectorXd::Ones(in.size());
inArray = in.array();

mDesignMatrix.resize(in.size(), mDegree + 1);

for (index i = 0; i < mDegree + 1;
++i, designColumn = designColumn * inArray)
asEigen<Matrix>(mDesignMatrix.col(i)) = designColumn;
}

index mDegree{2};
index mDims{1};
bool mRegressed{false};
bool mInitialized{false};

double mTikhonovFactor{0};

RealMatrix mCoefficients;

mutable RealMatrix mDesignMatrix;
mutable RealMatrix mTikhonovMatrix;
};

} // namespace algorithm
} // namespace fluid
Loading