Help me visualization convolution neural network

I have build a screenshot classifier and working fine with 98% accuracy. Now I want to visualize convolution filters but I am getting following error in Jupyter Notebook:

—> 29 visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)
ValueError: The layer sequential_2 has never been called and thus has no defined input.

Here is my code:

# Prepare a random input image from the training set.
yes_img_files = [os.path.join('data/yes/', f) for f in os.listdir( 'data/yes' )]
no_img_files = [os.path.join('data/no', f) for f in os.listdir( 'data/no' )]
img_path = random.choice(yes_img_files + no_img_files)


img = load_img(img_path, target_size=(256, 256))  # this is a PIL image
x   = img_to_array(img)                           # Numpy array with shape (150, 150, 3)
x   = x.reshape((1,) + x.shape)                   # Numpy array with shape (1, 150, 150, 3)

# Scale by 1/255
x /= 255.0


# Run inference to ensure the model has been called
prediction = model.predict(x)


# Interpret the prediction
if prediction[0] > 0.5:
    print(f"The image is classified as 'Discord' with probability: {prediction[0][0]}")
else:
    print(f"The image is classified as 'Other' with probability: {1 - prediction[0][0]}")


# Define a new Model that will take an image as input, and will output
# intermediate representations for all layers in the previous model
successive_outputs = [layer.output for layer in model.layers]
visualization_model = tf.keras.models.Model(inputs = model.input, outputs = successive_outputs)


# Run the image through the network, thus obtaining all
# intermediate representations for this image.
successive_feature_maps = visualization_model.predict(x)


# These are the names of the layers, so you can have them as part of our plot
layer_names = [layer.name for layer in model.layers]

# Display the representations
for layer_name, feature_map in zip(layer_names, successive_feature_maps):
  
  if len(feature_map.shape) == 4:
    
    #-------------------------------------------
    # Just do this for the conv / maxpool layers, not the fully-connected layers
    #-------------------------------------------
    n_features = feature_map.shape[-1]  # number of features in the feature map
    size       = feature_map.shape[ 1]  # feature map shape (1, size, size, n_features)
    
    # Tile the images in this matrix
    display_grid = np.zeros((size, size * n_features))
    
    #-------------------------------------------------
    # Postprocess the feature to be visually palatable
    #-------------------------------------------------
    for i in range(n_features):
      x  = feature_map[0, :, :, i]
      x -= x.mean()
      x /= x.std ()
      x *=  64
      x += 128
      x  = np.clip(x, 0, 255).astype('uint8')
      display_grid[:, i * size : (i + 1) * size] = x # Tile each filter into a horizontal grid

    #-----------------
    # Display the grid
    #-----------------
    scale = 20. / n_features
    plt.figure( figsize=(scale * n_features, scale) )
    plt.title ( layer_name )
    plt.grid  ( False )
    plt.imshow( display_grid, aspect='auto', cmap='viridis' ) 

Model is trained and compiled. It is also making inference successfully but still getting error when accessing model.inputs.