137 lines
3.9 KiB
Python
137 lines
3.9 KiB
Python
#!/usr/bin/env python3
|
|
|
|
|
|
"""
|
|
@authors: Yann & Sam'
|
|
"""
|
|
|
|
|
|
import random
|
|
|
|
import numpy as np
|
|
|
|
import tensorflow as tf
|
|
|
|
|
|
"""
|
|
The goal of this program is to make a linear regression for any number of
|
|
parameters in the input.
|
|
"""
|
|
|
|
|
|
###############################################################################
|
|
|
|
|
|
# This function generates 'nbExamples' tuples of coordinates. The first member
|
|
# being the values of the input and the second member being the output
|
|
# corresponding to the input, to which is added # some noise of
|
|
# variance 'intervalWidth'.
|
|
# The output is linear in each parameters of the input.
|
|
# 'leadingCoefficients' contains all the slopes.
|
|
def getRandomCoordinatesVectors(leadingCoefficients, nbExamples,
|
|
intervalWidth, intervalValues):
|
|
|
|
# 'coordinates' will be created in this way :
|
|
# [([x0, ..., xn], y0), ..., ([x0, ..., xn], ym)]
|
|
# (with 'n' the number of parameters, and 'm' the number of examples)
|
|
coordinates = []
|
|
|
|
for i in range(nbExamples):
|
|
|
|
X = []
|
|
|
|
for j in range(len(leadingCoefficients)):
|
|
|
|
# Creates the input parameters with random values for
|
|
# each dimension.
|
|
X.append(random.uniform(*intervalValues))
|
|
|
|
# Creates the output corresponding to the input.
|
|
y = np.dot(X, leadingCoefficients) + \
|
|
random.uniform(-abs(intervalWidth), abs(intervalWidth))
|
|
# --> 'np.dot()': Given two 1-D vectors, returns the inner product of
|
|
# the two vectors.
|
|
|
|
coordinates.append((X, y))
|
|
|
|
return coordinates
|
|
|
|
|
|
# This function tries to replace the Theano "black-box" with a TensorFlow one.
|
|
def computeIterationAvg(alpha, nbParameters, nbExamples):
|
|
|
|
exInputs = tf.placeholder(tf.float64, [nbExamples, nbParameters])
|
|
exOutputs = tf.placeholder(tf.float64, [nbExamples, 1])
|
|
|
|
thetaAvg = tf.Variable(np.zeros((nbParameters, 1)))
|
|
|
|
hAvg = tf.matmul(exInputs, thetaAvg)
|
|
diff = hAvg - exOutputs
|
|
JthetaAvg = tf.matmul(tf.transpose(diff), diff) / (2. * nbExamples)
|
|
|
|
initialization = tf.global_variables_initializer()
|
|
|
|
trainingAvg = tf.train.GradientDescentOptimizer(learning_rate=alpha
|
|
).minimize(JthetaAvg)
|
|
|
|
return exInputs, exOutputs, initialization, trainingAvg, thetaAvg
|
|
|
|
|
|
def main():
|
|
|
|
# ############################## Parameters ###############################
|
|
|
|
nbIterationsAvg = 1000
|
|
|
|
alpha = 0.001
|
|
|
|
# The leading coefficients and the constant of the line, here :
|
|
# y = x0 + 2 * x1 + 3 * x2 + 4 * x3
|
|
leadingCoefficients = [1, 2, 3, 4]
|
|
|
|
nbExamples = 25
|
|
intervalWidth = 3
|
|
intervalValues = -10, 10
|
|
|
|
# ###################### Computing iterations #############################
|
|
|
|
coordinates = getRandomCoordinatesVectors(leadingCoefficients, nbExamples,
|
|
intervalWidth, intervalValues)
|
|
|
|
inputs, outputs = zip(*coordinates)
|
|
|
|
inputs = list(inputs)
|
|
outputs = np.asarray(list(outputs)).reshape(nbExamples, 1)
|
|
|
|
# Device select
|
|
with tf.device("/cpu:0"):
|
|
|
|
# New session
|
|
with tf.Session() as sess:
|
|
|
|
# Let's get some stuff from the function declared above
|
|
exInputs, exOutputs, initialization, trainingAvg, thetaAvg = \
|
|
computeIterationAvg(alpha,
|
|
len(leadingCoefficients),
|
|
nbExamples)
|
|
|
|
sess.run(initialization)
|
|
|
|
# Training !
|
|
for i in range(nbIterationsAvg):
|
|
|
|
sess.run(trainingAvg, feed_dict={
|
|
exInputs: inputs, exOutputs: outputs
|
|
})
|
|
|
|
# What an ugly way to print something !
|
|
sess.run(tf.Print(thetaAvg, [thetaAvg],
|
|
message="Theta: ", summarize=len(leadingCoefficients)))
|
|
|
|
# Closing the session
|
|
sess.close()
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|