Week 2 Excercise 3.2 alpaca_model

I am finding trouble with the following error at the alpaca_model:

I would be grateful if you could help.

ValueError                                Traceback (most recent call last)
<ipython-input-44-11ffc7a7acb3> in <module>
----> 1 model2 = alpaca_model(IMG_SIZE, data_augmentation)

<ipython-input-43-07a7b21e40ac> in alpaca_model(image_shape, data_augmentation)
     35 
     36     # set training to False to avoid keeping track of statistics in the batch norm layer
---> 37     x = base_model(x, training=False)
     38 
     39     # add the new Binary classification layers

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
    983 
    984         with ops.enable_auto_cast_variables(self._compute_dtype_object):
--> 985           outputs = call_fn(inputs, *args, **kwargs)
    986 
    987         if self._activity_regularizer:

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/functional.py in call(self, inputs, training, mask)
    384     """
    385     return self._run_internal_graph(
--> 386         inputs, training=training, mask=mask)
    387 
    388   def compute_output_shape(self, input_shape):

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/functional.py in _run_internal_graph(self, inputs, training, mask)
    506 
    507         args, kwargs = node.map_arguments(tensor_dict)
--> 508         outputs = node.layer(*args, **kwargs)
    509 
    510         # Update tensor_dict.

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
    983 
    984         with ops.enable_auto_cast_variables(self._compute_dtype_object):
--> 985           outputs = call_fn(inputs, *args, **kwargs)
    986 
    987         if self._activity_regularizer:

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/layers/convolutional.py in call(self, inputs)
   2848   def call(self, inputs):
   2849     return backend.spatial_2d_padding(
-> 2850         inputs, padding=self.padding, data_format=self.data_format)
   2851 
   2852   def get_config(self):

/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    199     """Call target, and fall back on dispatchers if there is a TypeError."""
    200     try:
--> 201       return target(*args, **kwargs)
    202     except (TypeError, ValueError):
    203       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/backend.py in spatial_2d_padding(x, padding, data_format)
   3325   else:
   3326     pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
-> 3327   return array_ops.pad(x, pattern)
   3328 
   3329 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    199     """Call target, and fall back on dispatchers if there is a TypeError."""
    200     try:
--> 201       return target(*args, **kwargs)
    202     except (TypeError, ValueError):
    203       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py in pad(tensor, paddings, mode, name, constant_values)
   3341     # remove the "Pad" fallback here.
   3342     if not tensor_util.is_tensor(constant_values) and constant_values == 0:
-> 3343       result = gen_array_ops.pad(tensor, paddings, name=name)
   3344     else:
   3345       result = gen_array_ops.pad_v2(

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/gen_array_ops.py in pad(input, paddings, name)
   6558     try:
   6559       return pad_eager_fallback(
-> 6560           input, paddings, name=name, ctx=_ctx)
   6561     except _core._SymbolicException:
   6562       pass  # Add nodes to the TensorFlow graph.

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/gen_array_ops.py in pad_eager_fallback(input, paddings, name, ctx)
   6578 
   6579 def pad_eager_fallback(input, paddings, name, ctx):
-> 6580   _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
   6581   _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], ctx, _dtypes.int32)
   6582   _inputs_flat = [input, paddings]

/opt/conda/lib/python3.7/site-packages/tensorflow/python/eager/execute.py in args_to_matching_eager(l, ctx, default_dtype)
    261       ret.append(
    262           ops.convert_to_tensor(
--> 263               t, dtype, preferred_dtype=default_dtype, ctx=ctx))
    264       if dtype is None:
    265         dtype = ret[-1].dtype

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
   1497 
   1498     if ret is None:
-> 1499       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1500 
   1501     if ret is NotImplemented:

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    336                                          as_ref=False):
    337   _ = as_ref
--> 338   return constant(v, dtype=dtype, name=name)
    339 
    340 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
    262   """
    263   return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 264                         allow_broadcast=True)
    265 
    266 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
    273       with trace.Trace("tf.constant"):
    274         return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 275     return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
    276 
    277   g = ops.get_default_graph()

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
    298 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
    299   """Implementation of eager constant."""
--> 300   t = convert_to_eager_tensor(value, ctx, dtype)
    301   if shape is None:
    302     return t

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     96       dtype = dtypes.as_dtype(dtype).as_datatype_enum
     97   ctx.ensure_initialized()
---> 98   return ops.EagerTensor(value, ctx.device_name, dtype)
     99 
    100 

ValueError: Attempt to convert a value (<function preprocess_input at 0x7fa630374a70>) with an unsupported type (<class 'function'>) to a Tensor.

The line of code that is throwing the error looks correct. It is the same as the code that I have and my code passed. So this means that the error is on one of the previous lines. Perhaps the x value that is input to the base_model function is itself a function and not a tensor. Please carefully examine the previous lines with this idea in mind. You can even add a print statement like this right before the line that “throws”:

print(f"type(x) = {type(x)}")

In my code, that gives this result:

type(x) = <class 'tensorflow.python.framework.ops.Tensor'>

What do you see when you do that?

1 Like

Yes, you’re totally right. It writes that x is a function, not a tensor.

I guess my mistake is in the line:
x = tf.keras.applications.mobilenet_v2.preprocess_input

Do I have to preprocess data in a different way?

The point is preprocess_input is a function, right? So you’ve set x to be that function. You need to set x to be the output of that function when you feed it the previous state of x, right? That’s the point.

When I set x, I have this error message:

ValueError                                Traceback (most recent call last)
<ipython-input-39-11ffc7a7acb3> in <module>
----> 1 model2 = alpaca_model(IMG_SIZE, data_augmentation)

<ipython-input-38-f937fbd80aad> in alpaca_model(image_shape, data_augmentation)
     27 
     28     # apply data augmentation to the inputs
---> 29     x = data_augmentation(inputs)
     30 
     31     # data preprocessing using the same weights the model was trained on

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
    924     if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
    925       return self._functional_construction_call(inputs, args, kwargs,
--> 926                                                 input_list)
    927 
    928     # Maintains info about the `Layer.call` stack.

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
   1090       # TODO(reedwm): We should assert input compatibility after the inputs
   1091       # are casted, not before.
-> 1092       input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
   1093       graph = backend.get_graph()
   1094       # Use `self._name_scope()` to avoid auto-incrementing the name.

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
    178                          'expected ndim=' + str(spec.ndim) + ', found ndim=' +
    179                          str(ndim) + '. Full shape received: ' +
--> 180                          str(x.shape.as_list()))
    181     if spec.max_ndim is not None:
    182       ndim = x.shape.ndims

ValueError: Input 0 of layer sequential_3 is incompatible with the layer: expected ndim=4, found ndim=3. Full shape received: [None, 160, 160]

That’s two steps earlier in the process. I think that says there is something incorrect about your data_augmenter function.

I put this print statement right before the call to the augmenter:

print(f"inputs.shape {inputs.shape}")

And here’s what I see from that:

inputs.shape (None, 160, 160, 3)

I’m going to guess you see something different than that. Now the question is why? :nerd_face:

1 Like

Yeah, depth was missing :joy:

I wrote img_shape instead of IMG_SHAPE.

Now, I have unsupported operand TypeError: unsupported operand type(s) for /=: 'Sequential' and 'float' at the line of preprocess. :sob:

inputs.shape (None, 160, 160, 3)

TypeError Traceback (most recent call last)
in
----> 1 model2 = alpaca_model(IMG_SIZE, data_augmentation)

in alpaca_model(image_shape, data_augmentation)
32
33 # data preprocessing using the same weights the model was trained on
—> 34 x = preprocess_input(x)
35
36

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/applications/mobilenet_v2.py in preprocess_input(x, data_format)
500 @keras_export(‘keras.applications.mobilenet_v2.preprocess_input’)
501 def preprocess_input(x, data_format=None):
→ 502 return imagenet_utils.preprocess_input(x, data_format=data_format, mode=‘tf’)
503
504

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/applications/imagenet_utils.py in preprocess_input(x, data_format, mode)
117 else:
118 return _preprocess_symbolic_input(
→ 119 x, data_format=data_format, mode=mode)
120
121

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/applications/imagenet_utils.py in _preprocess_symbolic_input(x, data_format, mode)
261 “”"
262 if mode == ‘tf’:
→ 263 x /= 127.5
264 x -= 1.
265 return x

It’s the same story again: it means your x is the wrong type. Which means that something is wrong with your augmenter step. Put the same print statement before the preprocess step:

print(f"type(x) = {type(x)}")

I’ll bet it shows that x is a Sequential object. It should be a tensor. So how did that happen?

1 Like

Yeah. It was a Sequential object because I used data_augmenter instead of data_augmentation function. I thought they were the same because we set data_augmentation = data_augmenter().

All done with the assignment. Thanks for the help!

1 Like