C5_W4_A1 assignment - Q7 and Q8 call() missing 1 required positional argument: 'padding_mask'

I am getting this error for Q7 and Q8 in this assignment with Decoder_test(Decoder, create_look_ahead_mask, create_padding_mask):

 TypeError                                 Traceback (most recent call last)
<ipython-input-41-dd4b65c051b4> in <module>
      1 # UNIT TEST
----> 2 Decoder_test(Decoder, create_look_ahead_mask, create_padding_mask)

~/work/W4A1/public_tests.py in Decoder_test(target, create_look_ahead_mask, create_padding_mask)
    218                     target_vocab_size,
    219                     maximum_position_encoding)
--> 220     outd, att_weights = decoderk(x, encoderq_output, False, look_ahead_mask, None)
    221     assert tf.is_tensor(outd), "Wrong type for outd. It must be a dict"
    222     assert np.allclose(tf.shape(outd), tf.shape(encoderq_output)), f"Wrong shape. We expected { tf.shape(encoderq_output)}"

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

<ipython-input-40-e75b29735fe6> in call(self, x, enc_output, training, look_ahead_mask, padding_mask)
     64             # pass x and the encoder output through a stack of decoder layers and save the attention weights
     65             # of block 1 and 2 (~1 line)
---> 66             x, block1, block2 = self.dec_layers[i](x, enc_output, look_ahead_mask, padding_mask)
     67 
     68             #update attention_weights dictionary with the attention weights of block 1 and block 2

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

TypeError: call() missing 1 required positional argument: 'padding_mask'

so I am not sure what is wrong. I have the following for Q7:

    def call(self, x, enc_output, training, 
           look_ahead_mask, padding_mask):

and later:



and for Q8 in Transformer() I have

def call(self, input_sentence, output_sentence, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):

...


and the error in the Transformer test:

> ---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-39-a562b46d78e0> in <module>
      1 # UNIT TEST
----> 2 Transformer_test(Transformer, create_look_ahead_mask, create_padding_mask)

~/work/W4A1/public_tests.py in Transformer_test(target, create_look_ahead_mask, create_padding_mask)
    276         enc_padding_mask,
    277         look_ahead_mask,
--> 278         dec_padding_mask
    279     )
    280 

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

<ipython-input-38-d809badbd9b5> in call(self, input_sentence, output_sentence, training, enc_padding_mask, look_ahead_mask, dec_padding_mask)
     54         # call self.decoder with the appropriate arguments to get the decoder output
     55         # dec_output.shape == (batch_size, tar_seq_len, fully_connected_dim)
---> 56         dec_output, attention_weights = self.decoder(output_sentence, enc_output, training, look_ahead_mask, dec_padding_mask)
     57         #(tar, enc_output, training, look_ahead_mask, dec_padding_mask)
     58         # pass decoder output through a linear layer and softmax (~2 lines)

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

<ipython-input-25-e75b29735fe6> in call(self, x, enc_output, training, look_ahead_mask, padding_mask)
     64             # pass x and the encoder output through a stack of decoder layers and save the attention weights
     65             # of block 1 and 2 (~1 line)
---> 66             x, block1, block2 = self.dec_layers[i](x, enc_output, look_ahead_mask, padding_mask)
     67 
     68             #update attention_weights dictionary with the attention weights of block 1 and block 2

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
   1010         with autocast_variable.enable_auto_cast_variables(
   1011             self._compute_dtype_object):
-> 1012           outputs = call_fn(inputs, *args, **kwargs)
   1013 
   1014         if self._activity_regularizer:

TypeError: call() missing 1 required positional argument: 'padding_mask'

When you call self.dec_layers[i](...), you need to pass five parameters.
Your code only passes four.

I have removed the code fragments from your post.