when I run the train step, the errors below. It looks something wrong with generated_image
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [82], in <cell line: 7>()
1 ### you cannot edit this cell
2
3 # You always must run the last cell before this one. You will get an error if not.
5 generated_image = tf.Variable(generated_image)
----> 7 train_step_test(train_step, generated_image)
File /tf/W4A2/public_tests.py:86, in train_step_test(target, generated_image)
82 def train_step_test(target, generated_image):
83 generated_image = tf.Variable(generated_image)
---> 86 J1 = target(generated_image)
87 print(J1)
88 assert type(J1) == EagerTensor, f"Wrong type {type(J1)} != {EagerTensor}"
File /usr/local/lib/python3.8/dist-packages/tensorflow/python/util/traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
File /tmp/__autograph_generated_filecuiry_cp.py:13, in outer_factory.<locals>.inner_factory.<locals>.tf__train_step(generated_image)
11 a_G = ag__.converted_call(ag__.ld(vgg_model_outputs), (ag__.ld(generated_image),), None, fscope)
12 J_style = ag__.converted_call(ag__.ld(compute_style_cost), (ag__.ld(a_S), ag__.ld(a_G)), None, fscope)
---> 13 J_content = ag__.converted_call(ag__.ld(compute_content_cost), (ag__.ld(a_C), ag__.ld(a_G)), None, fscope)
14 J = ag__.converted_call(ag__.ld(total_cost), (ag__.ld(J_content), ag__.ld(J_style)), dict(alpha=10, beta=40), fscope)
15 grad = ag__.converted_call(ag__.ld(tape).gradient, (ag__.ld(J), ag__.ld(generated_image)), None, fscope)
File /tmp/__autograph_generated_fileomdhzywl.py:14, in outer_factory.<locals>.inner_factory.<locals>.tf__compute_content_cost(content_output, generated_output)
12 a_G = ag__.ld(generated_output)[(- 1)]
13 (_, n_H, n_W, n_C) = ag__.converted_call(ag__.converted_call(ag__.ld(a_G).get_shape, (), None, fscope).as_list, (), None, fscope)
---> 14 a_C_unrolled = ag__.converted_call(ag__.ld(tf).reshape, (ag__.ld(content_output),), dict(shape=[(- 1), ag__.ld(n_C)]), fscope)
15 a_G_unrolled = ag__.converted_call(ag__.ld(tf).reshape, (ag__.ld(generated_output),), dict(shape=[(- 1), ag__.ld(n_C)]), fscope)
16 J_content = (ag__.converted_call(ag__.ld(tf).reduce_sum, (((ag__.ld(a_C_unrolled) - ag__.ld(a_G_unrolled)) ** 2),), None, fscope) / (((4 * ag__.ld(n_H)) * ag__.ld(n_W)) * ag__.ld(n_C)))
ValueError: in user code:
File "<ipython-input-49-b6fcbd5a7ea2>", line 23, in train_step *
J_content = compute_content_cost(a_C, a_G)
File "<ipython-input-12-7d786d95c734>", line 25, in compute_content_cost *
a_C_unrolled = tf.reshape(content_output, shape=[-1, n_C])
ValueError: Tried to convert 'tensor' to a tensor and failed. Error: Dimension 1 in both shapes must be equal, but are 50 and 25. Shapes are [1,50,50,512] and [1,25,25,512].
From merging shape 3 with other shapes. for '{{node Reshape_10/packed}} = Pack[N=6, T=DT_FLOAT, axis=0](Reshape_10/tensor/values_0, Reshape_10/tensor/values_1, Reshape_10/tensor/values_2, Reshape_10/tensor/values_3, Reshape_10/tensor/values_4, Reshape_10/tensor/values_5)' with input shapes: [1,400,400,64], [1,200,200,128], [1,100,100,256], [1,50,50,512], [1,25,25,512], [1,25,25,512].