All of the tests passed but on the final block :
UNIT TEST
Transformer_test(Transformer, create_look_ahead_mask, create_padding_mask)
I am getting an Error :
encoder_layer_out = [[[-0.6149346 1.3686488 -1.2326037 0.47888958]
[ 1.6967123 -0.83263165 -0.5972275 -0.26685312]
[ 1.442119 -1.2864674 -0.43361935 0.2779677 ]
[ 1.1852356 -1.5380929 -0.11273345 0.46559072]
[-0.63308257 -1.127487 0.2565169 1.5040525 ]]]
encoder_layer_out = [[[ 0.15969229 0.6766539 -1.6765249 0.84017855]
[ 1.7143396 -0.77358884 -0.37014523 -0.5706055 ]
[ 1.6432989 -0.99935913 -0.538209 -0.10573078]
[ 1.5857749 -1.1264372 -0.46521047 0.0058728 ]
[ 0.08590751 -0.81194097 -0.875895 1.6019285 ]]]
encoder_layer_out = [[[-0.19128206 1.0474675 -1.5460579 0.6898725 ]
[ 1.4826384 -1.3150132 0.08716178 -0.2547871 ]
[ 1.167125 -1.1333787 -0.8392772 0.8055309 ]
[ 1.0555865 -1.2907726 -0.6544113 0.8895974 ]
[ 0.18560201 -1.0709081 -0.6588961 1.5442021 ]]]
encoder_layer_out = [[[-0.54294515 1.6962909 -0.86189187 -0.2914541 ]
[ 1.422019 -1.3428799 0.25317556 -0.33231476]
[ 1.5721345 -0.6067703 -1.0718544 0.10649005]
[ 1.4195764 -1.0513583 -0.8211491 0.45293123]
[-1.6108713 0.7860212 -0.0603601 0.88521045]]]
encoder_layer_out = [[[ 1.1815082 0.7954658 -1.0808312 -0.8961429 ]
[ 1.6696426 -0.86414003 -0.13710538 -0.66839707]
[ 1.7068046 -0.32389355 -0.8047862 -0.5781248 ]
[ 1.6430861 -0.66853213 -0.9222786 -0.05227539]
[-0.5066343 0.4531508 -1.3030202 1.3565036 ]]]
encoder_layer_out = [[[ 0.7741163 1.1806433 -1.1967849 -0.7579746 ]
[ 1.4665614 -1.2658106 -0.4370998 0.23634912]
[ 1.6335142 -0.86573553 -0.76295334 -0.00482541]
[ 1.0432165 -1.2622757 -0.6949443 0.9140035 ]
[-1.2348831 0.6385331 -0.673813 1.270163 ]]]
InvalidArgumentError Traceback (most recent call last)
in
1 # UNIT TEST
----> 2 Transformer_test(Transformer, create_look_ahead_mask, create_padding_mask)
~/work/W4A1/public_tests.py in Transformer_test(target, create_look_ahead_mask, create_padding_mask)
276 enc_padding_mask,
277 look_ahead_mask,
β 278 dec_padding_mask
279 )
280
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in call(self, *args, **kwargs)
1010 with autocast_variable.enable_auto_cast_variables(
1011 self._compute_dtype_object):
β 1012 outputs = call_fn(inputs, *args, **kwargs)
1013
1014 if self._activity_regularizer:
in call(self, input_sentence, output_sentence, training, enc_padding_mask, look_ahead_mask, dec_padding_mask)
56 # call self.decoder with the appropriate arguments to get the decoder output
57 # dec_output.shape == (batch_size, tar_seq_len, embedding_dim)
β> 58 dec_output, attention_weights = self.decoder(output_sentence, enc_output, training, look_ahead_mask, dec_padding_mask)
59 #call(self, x, enc_output, training,look_ahead_mask, padding_mask)
60 # pass decoder output through a linear layer and softmax (~2 lines)
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py in call(self, *args, **kwargs)
1010 with autocast_variable.enable_auto_cast_variables(
1011 self._compute_dtype_object):
β 1012 outputs = call_fn(inputs, *args, **kwargs)
1013
1014 if self._activity_regularizer:
in call(self, x, enc_output, training, look_ahead_mask, padding_mask)
67 x, block1, block2 = self.dec_layers[i](x, enc_output, training,
68 look_ahead_mask, padding_mask)
β> 69 print(x[1,1])
70 #(self, x, enc_output, training, look_ahead_mask, padding_mask):
71 #update attention_weights dictionary with the attention weights of block 1 and block 2
/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 βββCall target, and fall back on dispatchers if there is a TypeError.βββ
200 try:
β 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py in _slice_helper(tensor, slice_spec, var)
1045 ellipsis_mask=ellipsis_mask,
1046 var=var,
β 1047 name=name)
1048
1049
/opt/conda/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 βββCall target, and fall back on dispatchers if there is a TypeError.βββ
200 try:
β 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py in strided_slice(input_, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, var, name)
1217 ellipsis_mask=ellipsis_mask,
1218 new_axis_mask=new_axis_mask,
β 1219 shrink_axis_mask=shrink_axis_mask)
1220
1221 parent_name = name
/opt/conda/lib/python3.7/site-packages/tensorflow/python/ops/gen_array_ops.py in strided_slice(input, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, name)
10445 return _result
10446 except _core._NotOkStatusException as e:
10447 _ops.raise_from_not_ok_status(e, name)
10448 except _core._FallbackException:
10449 pass
/opt/conda/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
6860 message = e.message + (" name: " + name if name is not None else ββ)
6861 # pylint: disable=protected-access
β 6862 six.raise_from(core._status_to_exception(e.code, message), None)
6863 # pylint: enable=protected-access
6864
/opt/conda/lib/python3.7/site-packages/six.py in raise_from(value, from_value)
InvalidArgumentError: slice index 1 of dimension 0 out of bounds. [Op:StridedSlice] name: transformer/decoder_2/strided_slice/