Merge branch 'master' of https://github.com/HorlogeSkynet/MachineLearning
This commit is contained in:
commit
8d4285c463
LinearRegression
OddsAndEnds
@ -32,7 +32,7 @@ def getRandomCoordinates(leadingCoeficient, nbPoints, intervalWidth):
|
||||
|
||||
|
||||
# Adds some points, represented by their coordinates, for next display
|
||||
def addCoordonates(coordinates):
|
||||
def addCoordinates(coordinates):
|
||||
|
||||
# The list of tuples becomes here a list of two lists as: [[x0, ..., xn], [y0, ..., yn]]
|
||||
L = list(map(list, zip(*coordinates)))
|
||||
@ -104,7 +104,7 @@ setDisplay(leadingCoeficient, nbPoints, intervalWidth, "Getting close by linear
|
||||
|
||||
|
||||
coordinates = getRandomCoordinates(leadingCoeficient, nbPoints, intervalWidth)
|
||||
addCoordonates(coordinates)
|
||||
addCoordinates(coordinates)
|
||||
|
||||
|
||||
# Linear Regression with gradient
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
|
||||
"""
|
||||
authors: Yann & Sam'
|
||||
@authors: Yann & Sam'
|
||||
"""
|
||||
|
||||
|
||||
@ -88,7 +88,7 @@ def graDescent(x_start, y_start, T, p):
|
||||
# Let's get the learning rate which gave us the best minimum reached during the descent
|
||||
min_t = bestLearningRate(T, X_Y, p)
|
||||
|
||||
# Let's add best minimum point reached to the "path" of the descent
|
||||
# Let's add the best minimum point reached to the "path" of the descent
|
||||
P.append((x_p - min_t * g_x, y_p - min_t * g_y))
|
||||
|
||||
if sqrt(g_x**2 + g_y**2) < 0.01:
|
||||
@ -98,14 +98,28 @@ def graDescent(x_start, y_start, T, p):
|
||||
return P
|
||||
|
||||
|
||||
def promptPath(path):
|
||||
|
||||
# The list of tuples becomes here a list of two lists as: [[x0, ..., xn], [y0, ..., yn]]
|
||||
L = list(map(list, zip(*path)))
|
||||
|
||||
# New figure for the plots !
|
||||
plt.figure("Gradient descent of Rosenbrock's function")
|
||||
|
||||
# Adding the path to the figure
|
||||
plt.plot(L[0], L[1], 'b')
|
||||
|
||||
plt.show(block=True)
|
||||
|
||||
|
||||
# ########################################### Parameters ###########################################
|
||||
|
||||
# Rosenbrock's parameter
|
||||
p = 10
|
||||
|
||||
# Startup descent point
|
||||
x_start = -0.2
|
||||
y_start = 0.6
|
||||
x_start = 1.5
|
||||
y_start = 0.5
|
||||
|
||||
# Which learning rates we'll test during the descent
|
||||
minLR = 0.0
|
||||
@ -123,12 +137,7 @@ T = np.arange(minLR, maxLR, incrementLR)
|
||||
# The effective descent path
|
||||
path = graDescent(x_start, y_start, T, p)
|
||||
|
||||
# New figure for the plots !
|
||||
plt.figure("Gradient descent of Rosenbrock's function")
|
||||
|
||||
# We add to the figure the path of the gradient descent
|
||||
plt.plot(path)
|
||||
|
||||
plt.show(block=True)
|
||||
# Let's prompt this path
|
||||
promptPath(path)
|
||||
|
||||
# ############################################### End ##############################################
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
|
||||
"""
|
||||
authors: Yann
|
||||
@authors: Yann & Sam'
|
||||
"""
|
||||
|
||||
|
||||
@ -20,9 +20,9 @@ def gradient(x, y, theta):
|
||||
return np.dot((np.dot(x, theta) - y), x)
|
||||
|
||||
|
||||
def getRandomCoordonatesVectors(nbParameters, leadingCoefficients, nbExamples, intervalWidth):
|
||||
def getRandomCoordinatesVectors(nbParameters, leadingCoefficients, nbExamples, intervalWidth):
|
||||
|
||||
coordonates = []
|
||||
coordinates = []
|
||||
for i in range(nbExamples):
|
||||
|
||||
X = [1]
|
||||
@ -32,18 +32,18 @@ def getRandomCoordonatesVectors(nbParameters, leadingCoefficients, nbExamples, i
|
||||
X.append(random.uniform(-abs(nbExamples), abs(nbExamples)))
|
||||
|
||||
y = np.dot(X, leadingCoefficients) + random.uniform(-abs(intervalWidth), abs(intervalWidth))
|
||||
coordonates.append((X, y))
|
||||
coordinates.append((X, y))
|
||||
|
||||
return coordonates
|
||||
return coordinates
|
||||
|
||||
|
||||
nbIterations = 10000
|
||||
nbIterations = 10
|
||||
nbParameters = 1
|
||||
leadingCoefficients = [1, 2]
|
||||
nbExamples = 25
|
||||
intervalWidth = 0
|
||||
|
||||
coordonates = getRandomCoordonatesVectors(nbParameters, leadingCoefficients, nbExamples, intervalWidth)
|
||||
coordinates = getRandomCoordinatesVectors(nbParameters, leadingCoefficients, nbExamples, intervalWidth)
|
||||
|
||||
alpha = 0.1
|
||||
theta = [0.1, 0.1]
|
||||
@ -54,15 +54,15 @@ flag = 'B&B'
|
||||
|
||||
for i in range(nbIterations):
|
||||
|
||||
for x, y in coordonates:
|
||||
for x, y in coordinates:
|
||||
|
||||
theta = theta - alpha * gradient(x, y, theta)
|
||||
theta -= alpha * gradient(x, y, theta)
|
||||
|
||||
if flag == 'try':
|
||||
|
||||
if cost(x, y, thetaOld) <= cost(x, y, theta):
|
||||
|
||||
alpha = 0.5 * alpha
|
||||
alpha *= 0.5
|
||||
|
||||
thetaOld = theta
|
||||
|
||||
@ -73,19 +73,24 @@ for i in range(nbIterations):
|
||||
Jtheta = cost(x, y, theta)
|
||||
JthetaNext = cost(x, y, theta - alpha * grad)
|
||||
|
||||
if -np.dot(grad, gradNext >= -0.01 * np.dot(grad, grad) and JthetaNext <= Jtheta - 0.0001 * alpha * np.dot(grad, grad)):
|
||||
if -np.dot(grad, gradNext) >= -0.01 * np.dot(grad, grad) and JthetaNext <= Jtheta - 0.0001 * alpha * np.dot(grad, grad):
|
||||
|
||||
alpha = alpha * 0.5
|
||||
alpha *= 0.5
|
||||
|
||||
elif flag == 'B&B':
|
||||
|
||||
grad = gradient(x, y, theta)
|
||||
deltaGrad = gradient(x, y, theta - alpha * grad) - grad
|
||||
|
||||
if np.dot(deltaGrad, deltaGrad != 0 and np.dot(deltaGrad, -alpha * grad) != 0):
|
||||
if np.dot(deltaGrad, deltaGrad) != 0 and np.dot(-alpha * grad, deltaGrad) != 0:
|
||||
|
||||
print(deltaGrad, grad)
|
||||
alpha = np.dot(deltaGrad, deltaGrad) / np.dot(-alpha * grad, deltaGrad)
|
||||
|
||||
else:
|
||||
|
||||
break
|
||||
|
||||
print(alpha)
|
||||
|
||||
print(theta)
|
||||
|
@ -7,7 +7,7 @@ import os
|
||||
|
||||
def mnist(nbTrainings=60000, nbTests=10000, oneHotEnconding=True):
|
||||
|
||||
# ... where the MNIST are theoretically downloaded
|
||||
# Set position where the MNIST folder is theoretically downloaded
|
||||
data_dir = os.path.join('/media/datasets/', 'mnist/')
|
||||
|
||||
# Loading of training images
|
||||
@ -51,8 +51,8 @@ def mnist(nbTrainings=60000, nbTests=10000, oneHotEnconding=True):
|
||||
|
||||
return imageAsOneHot
|
||||
|
||||
# If one-hot encoding is set to 'True'
|
||||
if oneHotEnconding:
|
||||
# If one-hot encoding is set to 'True'...
|
||||
if oneHotEnconding is True:
|
||||
|
||||
# ... let's return the training and the test images encoded in one-hot binary style (10 is for the digits '0', ..., '9')
|
||||
trainingY = imagesToOneHot(trainingY, 10)
|
||||
|
Reference in New Issue
Block a user