This is my code, but I got loss function comes after sometime inf and than nan can you please tell me where is the mistake
def custom_loss(x_train):
def loss(y_true, y_pred):
#some constants
a, b, c, d = 1, 1, 500, 70
## Getting x_batch from labels of y_true and sorting them in increasing order of indices
y_true_num = y_true.numpy()
y_pred_num = y_pred.numpy()
# print(y_true_num)
sorted_indices = np.argsort(y_true_num[:,1])
y_true_num = y_true_num[sorted_indices]
y_pred_num = y_pred_num[sorted_indices]
matching_indices_mask = np.isin(x_train[:,1], y_true_num[:, 1])
x_batch = x_train[matching_indices_mask]
sorted_indices = np.argsort(x_batch[:,1])
x_batch = x_batch[:,0][sorted_indices]
y_true_num = y_true_num[:,0]
y_pred_num = y_pred_num[:,0]
# print(y_true_num,x_batch)
# print("length - ",len(y_true_num),len(x_batch))
## Finding the trans conductance Gm of y_true and y_pred
assert len(y_true_num) == len(x_batch)
y_true_grad = np.array([])
y_pred_grad = np.array([])
for i in range(len(y_true_num)-1):
val = 0 if x_batch[i+1]==x_batch[i] else (y_true_num[i+1]-y_true_num[i])/(x_batch[i+1]-x_batch[i])
val2 = 0 if x_batch[i+1]==x_batch[i] else (y_pred_num[i+1]-y_pred_num[i])/(x_batch[i+1]-x_batch[i])
y_true_grad = np.append(y_true_grad,[val])
y_pred_grad = np.append(y_pred_grad,[val2])
## Finally Calculating the loss
# print("grads - ",len(y_true_grad),len(y_pred_grad))
y_true_grad = tf.convert_to_tensor(y_true_grad)
y_pred_grad = tf.convert_to_tensor(y_pred_grad)
normal_loss = tf.reduce_mean(tf.square(tf.boolean_mask((y_true - y_pred) / y_true, ~tf.math.is_nan((y_true - y_pred) / y_true))))
gradient_loss = tf.reduce_mean(tf.square(tf.boolean_mask(y_true_grad - y_pred_grad, ~tf.math.is_nan(y_true_grad - y_pred_grad))))
# normal_loss = a * tf.reduce_mean(tf.square((y_true - y_pred) / y_true))
# gradient_loss = c * tf.reduce_mean(tf.square(y_true_grad - y_pred_grad))
normal_loss = tf.cast(normal_loss,dtype=tf.float64)
print(normal_loss,gradient_loss)
# print("Data types - normal_loss:", normal_loss.dtype, "gradient_loss:", gradient_loss.dtype)
total_loss = normal_loss + gradient_loss
return total_loss
return loss