109 lines
2.4 KiB
C++
109 lines
2.4 KiB
C++
/* @authors: Yann & Sam' */
|
|
|
|
#include <iostream>
|
|
#include <random>
|
|
|
|
#include "../Modules/Graphics/graphics.hpp"
|
|
|
|
using namespace std;
|
|
using namespace Eigen;
|
|
|
|
#define FIRSTALPHA 0.01
|
|
#define NBEXAMPLES 500
|
|
#define NBITERATIONS 1000
|
|
#define INTERVALWIDTH 20
|
|
|
|
|
|
void getRandomCoordinates(long double[], long double[], Matrix<long double, 1, 2>);
|
|
void getGradient(const long double[], const long double[], const Matrix<long double, 1, 2>, Matrix<long double, 1, 2> &);
|
|
|
|
|
|
|
|
int main(int argc, char const *argv[])
|
|
{
|
|
(void)argc;
|
|
(void)argv;
|
|
|
|
/* We fix our coefficients here */
|
|
Matrix<long double, 1, 2> LEADINGCOEFFICIENTS = {1.0, 2.0};
|
|
|
|
long double x[NBEXAMPLES];
|
|
long double y[NBEXAMPLES];
|
|
|
|
/* Coordinates generation */
|
|
getRandomCoordinates(x, y, LEADINGCOEFFICIENTS);
|
|
|
|
long double alpha(FIRSTALPHA);
|
|
Matrix<long double, 1, 2> theta;
|
|
theta.setZero();
|
|
|
|
Matrix<long double, 1, 2> gradOld;
|
|
|
|
getGradient(x, y, theta, gradOld);
|
|
|
|
Matrix<long double, 1, 2> grad;
|
|
Matrix<long double, 1, 2> deltaGrad;
|
|
|
|
/* Training ! */
|
|
int i;
|
|
for(i = 0; i < NBITERATIONS; i++)
|
|
{
|
|
theta -= alpha * gradOld;
|
|
|
|
getGradient(x, y, theta, grad);
|
|
|
|
if(grad != gradOld)
|
|
{
|
|
deltaGrad = grad - gradOld;
|
|
alpha = -alpha * gradOld.dot(deltaGrad) / deltaGrad.dot(deltaGrad);
|
|
}
|
|
|
|
if(grad.isZero())
|
|
{
|
|
i++;
|
|
break;
|
|
}
|
|
|
|
gradOld = grad;
|
|
}
|
|
|
|
cout << "Theta: " << theta << endl;
|
|
cout << "| Done in " << i << " iterations. |" << endl;
|
|
|
|
/* This function will display the input coordinates,
|
|
and the computed regression, and will be waiting while the window stay open */
|
|
sfmlDisplay(string("Affine Regression (C++)"), Matrix<long double, 1, NBEXAMPLES>(x), Matrix<long double, 1, NBEXAMPLES>(y), theta);
|
|
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
void getRandomCoordinates(long double x[], long double y[], Matrix<long double, 1, 2> leadingCoefficients)
|
|
{
|
|
random_device rd;
|
|
mt19937 mt(rd());
|
|
uniform_real_distribution<double> random_bias(-INTERVALWIDTH, INTERVALWIDTH);
|
|
|
|
for(int i(0); i < NBEXAMPLES; i++)
|
|
{
|
|
x[i] = i;
|
|
y[i] = (i * leadingCoefficients(0, 1)) + leadingCoefficients(0, 0) + random_bias(mt);
|
|
}
|
|
}
|
|
|
|
|
|
void getGradient(const long double x[], const long double y[], const Matrix<long double, 1, 2> theta, Matrix<long double, 1, 2> &gradient)
|
|
{
|
|
gradient.setZero();
|
|
|
|
for(int i(0); i < NBEXAMPLES; i++)
|
|
{
|
|
gradient(0, 0) += (theta(0, 1) * x[i] + theta(0, 0)) - y[i];
|
|
gradient(0, 1) += ((theta(0, 1) * x[i] + theta(0, 0)) - y[i]) * x[i];
|
|
}
|
|
|
|
gradient /= NBEXAMPLES;
|
|
}
|