After watching Tensorflow lessons i applied the first example own my own however with loop which range of 10 i get W=0 instead of 5.000001. Even with single backward propagation im getting NaN numpy value. I was confused, what am I missing?
Environment: Google Collab
import numpy as np #2.6.0
import tensorflow as tf #1.19.5
W = tf.Variable(0, dtype=tf.float32)
optimizer = tf.keras.optimizers.Adam(0,1)
def train_step():
with tf.GradientTape() as tape:
cost = W**2 - 10*W + 25
trainable_variable = [W]
grads = tape.gradient(cost, trainable_variable)
optimizer.apply_gradients(zip(grads, trainable_variable))
print(W) #<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=0.0>
train_step()
print(W) #<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=nan>
for i in range(10):
train_step()
print(W) #<tf.Variable 'Variable:0' shape=() dtype=float32, numpy=nan>