64 lines
1.8 KiB
Python
Executable File
64 lines
1.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
|
|
"""
|
|
@authors: Yann & Sam'
|
|
"""
|
|
|
|
|
|
import theano
|
|
from theano import tensor as T
|
|
|
|
|
|
"""
|
|
This program made us understand that 'theano.function' does only one update
|
|
per execution.
|
|
This program being only a way to understand the goal of the argument updates,
|
|
some other arguments of this function are useless here. However, in order
|
|
for the program to be syntactically correct those needed to be here.
|
|
These arguments are highlighted in the comments.
|
|
|
|
We had two hypothesis concerning the functioning of the argument updates :
|
|
-either it was making the update a finite number of times
|
|
-or it was making the update until it had no more impact on the function cost
|
|
|
|
So we developed this program which was making an update that even theoretically
|
|
would always modify cost.
|
|
If this program turned out to be an infinite loop, our second hypothesis would
|
|
be true.
|
|
If not, our first would be (and the number of updates done should appeared).
|
|
|
|
The result showed us that 'theano.function' was only doing one update per
|
|
execution !
|
|
"""
|
|
|
|
|
|
def main():
|
|
|
|
X = T.scalar() # Experimental input
|
|
Y = T.scalar() # Experimental output
|
|
|
|
w = theano.shared(value=1.0) # Variable to be updated
|
|
y = X + 1 / w # Theoretical model which only needed to depend on w
|
|
|
|
# Function calculating the error between the model and the reality.
|
|
# Needs to depend on y.
|
|
cost = y - Y
|
|
|
|
updates = [w, w + 1]
|
|
|
|
# Creation of the function train which by updating `w` made us
|
|
# validate or invalidate our hypothesis.
|
|
train = theano.function(inputs=[X, Y], outputs=cost, updates=[updates],
|
|
allow_input_downcast=True)
|
|
|
|
# Let's run it !
|
|
train(0.0, 0.0)
|
|
|
|
# The result that made us validate our hypothesis.
|
|
print(w.get_value() - 1)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|