This repository has been archived on 2023-11-03. You can view files and clone it, but cannot push or open issues or pull requests.
MINDLE/LinearRegression/linearRegressionVectors.cpp

198 lines
5.5 KiB
C++

/* @authors: Yann & Sam' */
#include <iostream>
#include <random>
#include <eigen3/Eigen/Dense>
using namespace std;
using namespace Eigen;
#define FIRSTALPHA 0.01
#define NBEXAMPLES 1000
#define NBITERATIONS 1000
#define NBPARAMETERS 7
#define INTERVALWIDTH 9
#define INTERVALVALUESINF -10
#define INTERVALVALUESSUP 10
#define NBITERATIONS_NEWTON 10
void getRandomCoordinates(Matrix<long double, Dynamic, Dynamic>&, Matrix<long double, 1, NBEXAMPLES>&, Matrix<long double, 1, NBPARAMETERS>);
Matrix<long double, 1, NBPARAMETERS> getGradient(Matrix<long double, Dynamic, Dynamic>, Matrix<long double, 1, NBEXAMPLES>, Matrix<long double, 1, NBPARAMETERS> const &);
Matrix <long double, Dynamic, Dynamic> getHessian(Matrix<long double, Dynamic, Dynamic>);
int main(int argc, char const *argv[])
{
int i;
/* We're just filling the 'leadingCoefficient' Matrix with values */
Matrix<long double, 1, NBPARAMETERS> leadingCoefficient;
for(i = 0; i < NBPARAMETERS; i++)
{
leadingCoefficient(0, i) = i + 1;
}
Matrix<long double, Dynamic, Dynamic> x(NBEXAMPLES, NBPARAMETERS);
Matrix<long double, 1, NBEXAMPLES> y;
getRandomCoordinates(x, y, leadingCoefficient);
std::string s;
if(argc == 1)
{
s = "graddesc";
}
else if(argc == 2)
{
s = argv[1];
}
else
{
std::cerr << "Too many arguments : expected 1 or 0 got " << argc - 1 << std::endl;
std::cerr << "Usage : ./bin/linearRegressionVectors.bin [graddesc|newton|normequ|compare]" << std::endl;
return 1;
}
if(s == "graddesc" || s == "compare")
{
Matrix<long double, 1, NBPARAMETERS> theta;
theta.setOnes();
long double alpha(FIRSTALPHA);
Matrix<long double, 1, NBPARAMETERS> gradOld = getGradient(x, y, theta);
Matrix<long double, 1, NBPARAMETERS> grad;
Matrix<long double, 1, NBPARAMETERS> gradDiff;
for(i = 0; i < NBITERATIONS; i++)
{
theta -= alpha * gradOld;
grad = getGradient(x, y, theta);
if(grad != gradOld)
{
gradDiff = grad - gradOld;
alpha = -alpha * gradOld.dot(gradDiff) / gradDiff.dot(gradDiff);
}
if(grad.isZero())
{
i++;
break;
}
gradOld = grad;
}
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << " Gradient Descent" << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << "Theta: " << theta << std::endl;
std::cout << "| Done in " << i << " iterations. |" << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
}
if(s == "newton" || s == "compare")
{
Matrix<long double, 1, NBPARAMETERS> theta;
theta.setOnes();
Matrix<long double, 1, NBPARAMETERS> grad;
Matrix<long double, Dynamic, Dynamic> hessian(NBPARAMETERS, NBPARAMETERS);
for(i = 0; i < NBITERATIONS_NEWTON; i++)
{
grad = getGradient(x, y, theta);
hessian = getHessian(x);
theta -= hessian.inverse() * grad.transpose();
if(grad.isZero())
{
i++;
break;
}
}
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << " Newton's mehtod" << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << "Theta: " << theta << std::endl;
std::cout << "| Done in " << i << " iterations. |" << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
}
if(s == "normequ" || s == "compare")
{
Matrix<long double, 1, NBPARAMETERS> theta = (x.transpose() * x).inverse() * x.transpose() * y.transpose();
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << " Normal equation" << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
std::cout << "Theta: " << theta << std::endl;
std::cout << "-------------------------------------------------------" << std::endl;
}
if(s == "help")
{
std::cout << "Usage : ./bin/linearRegressionVectors.bin [graddesc|newton|normequ|compare]" << std::endl;
}
if(s != "graddesc" && s!= "newton" && s != "compare" && s!= "normequ" && s!= "help")
{
std::cerr << "Unknown argument " << s << std::endl;
std::cerr << "Usage : ./bin/linearRegressionVectors.bin [graddesc|newton|normequ|compare]" << std::endl;
}
return 0;
}
void getRandomCoordinates(Matrix<long double, Dynamic, Dynamic> &x, Matrix<long double, 1, NBEXAMPLES> &y, Matrix<long double, 1, NBPARAMETERS> leadingCoefficient)
{
random_device rd;
mt19937 mt(rd());
/* We set a random engine for the inputs... */
uniform_real_distribution<double> random_bias_inputs(INTERVALVALUESINF, INTERVALVALUESSUP);
/* ... and another one the outputs */
uniform_real_distribution<double> random_bias_outputs(-INTERVALWIDTH, INTERVALWIDTH);
for(int i(0); i < NBEXAMPLES; i++)
{
x(i, 0) = 1.0;
for(int j(1); j < NBPARAMETERS; j++)
{
x(i, j) = random_bias_inputs(mt);
}
y(0, i) = leadingCoefficient.dot(x.row(i)) + random_bias_outputs(mt);
}
}
Matrix<long double, 1, NBPARAMETERS> getGradient(Matrix<long double, Dynamic, Dynamic> x, Matrix<long double, 1, NBEXAMPLES> y, Matrix<long double, 1, NBPARAMETERS> const &theta)
{
Matrix<long double, 1, NBPARAMETERS> result;
result.setZero();
for(int i(0); i < NBEXAMPLES; i++)
{
result += (theta.dot(x.row(i)) - y(0, i)) * (x.row(i));
}
result *= (1.0 / NBEXAMPLES);
return result;
}
Matrix <long double, Dynamic, Dynamic> getHessian(Matrix<long double, Dynamic, Dynamic> x)
{
return (x.transpose() * x) / NBEXAMPLES;
}