Course 5 Week 4 Exercise 8

(5, 4)
(6, 4)
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-36-a562b46d78e0> in <module>
      1 # UNIT TEST
----> 2 Transformer_test(Transformer, create_look_ahead_mask, create_padding_mask)

~/work/W4A1/public_tests.py in Transformer_test(target, create_look_ahead_mask, create_padding_mask)
    276         enc_padding_mask,
    277         look_ahead_mask,
--> 278         dec_padding_mask
    279     )
    280 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

<ipython-input-35-81f635689a69> in call(self, input_sentence, output_sentence, training, enc_padding_mask, look_ahead_mask, dec_padding_mask)
     56         # call self.decoder with the appropriate arguments to get the decoder output
     57         # dec_output.shape == (batch_size, tar_seq_len, fully_connected_dim)
---> 58         dec_output, attention_weights = self.decoder(enc_output,output_sentence, training, look_ahead_mask, dec_padding_mask)
     59 
     60         # pass decoder output through a linear layer and softmax (~2 lines)

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

<ipython-input-27-0bd7195bbe90> in call(self, x, enc_output, training, look_ahead_mask, padding_mask)
     49         # START CODE HERE
     50         # create word embeddings
---> 51         x = self.embedding(x)  # (batch_size, target_seq_len, fully_connected_dim)
     52 
     53         # scale embeddings by multiplying by the square root of their dimension

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/layers/embeddings.py in call(self, inputs)
    194       out = embedding_ops.embedding_lookup_v2(self.embeddings.variables, inputs)
    195     else:
--> 196       out = embedding_ops.embedding_lookup_v2(self.embeddings, inputs)
    197     if self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype:
    198       # Instead of casting the variable as in most layers, cast the output, as

/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    199     """Call target, and fall back on dispatchers if there is a TypeError."""
    200     try:
--> 201       return target(*args, **kwargs)
    202     except (TypeError, ValueError):
    203       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/embedding_ops.py in embedding_lookup_v2(params, ids, max_norm, name)
    392     ValueError: If `params` is empty.
    393   """
--> 394   return embedding_lookup(params, ids, "div", name, max_norm=max_norm)
    395 
    396 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    199     """Call target, and fall back on dispatchers if there is a TypeError."""
    200     try:
--> 201       return target(*args, **kwargs)
    202     except (TypeError, ValueError):
    203       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/embedding_ops.py in embedding_lookup(params, ids, partition_strategy, name, validate_indices, max_norm)
    326       name=name,
    327       max_norm=max_norm,
--> 328       transform_fn=None)
    329 
    330 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/embedding_ops.py in _embedding_lookup_and_transform(params, ids, partition_strategy, name, max_norm, transform_fn)
    136       with ops.colocate_with(params[0]):
    137         result = _clip(
--> 138             array_ops.gather(params[0], ids, name=name), ids, max_norm)
    139         if transform_fn:
    140           result = transform_fn(result)

/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
    199     """Call target, and fall back on dispatchers if there is a TypeError."""
    200     try:
--> 201       return target(*args, **kwargs)
    202     except (TypeError, ValueError):
    203       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py in gather(***failed resolving arguments***)
   4811     # TODO(apassos) find a less bad way of detecting resource variables
   4812     # without introducing a circular dependency.
-> 4813     return params.sparse_read(indices, name=name)
   4814   except AttributeError:
   4815     return gen_array_ops.gather_v2(params, indices, axis, name=name)

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py in sparse_read(self, indices, name)
    701       variable_accessed(self)
    702       value = gen_resource_variable_ops.resource_gather(
--> 703           self._handle, indices, dtype=self._dtype, name=name)
    704 
    705       if self._dtype == dtypes.variant:

/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py in resource_gather(resource, indices, dtype, batch_dims, validate_indices, name)
    547       return _result
    548     except _core._NotOkStatusException as e:
--> 549       _ops.raise_from_not_ok_status(e, name)
    550     except _core._FallbackException:
    551       pass

/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
   6860   message = e.message + (" name: " + name if name is not None else "")
   6861   # pylint: disable=protected-access
-> 6862   six.raise_from(core._status_to_exception(e.code, message), None)
   6863   # pylint: enable=protected-access
   6864 

/opt/conda/lib/python3.7/site-packages/six.py in raise_from(value, from_value)

InvalidArgumentError: indices[0,0,2] = -1 is not in [0, 35) [Op:ResourceGather]

This is the error I get for the last exercise and all my other exercises have passed. Unable to understand my mistake,

2 Likes

In the Transformer() class, in the call to self.decoder(…), you have the order of the first two arguments reversed.