Cartoonized images display on 320×480 ST7796 SPI LCD, using Python on Raspberry Pi 4B

Previous post introduced Cartoonize image using OpenCV in Python, run on Windows 11. This exercises run on Raspberry Pi 4B/64 bit Raspberry Pi OS (bookworm), and modified to display on Waveshare 3.5" 320×480 ST7796 SPI LCD.

Connection and setup demo (include ST7796 driver), read Test "Waveshare 3.5inch Capacitive Touch LCD" on Raspberry Pi Zero 2 W

OpenCV (cv2) is needed, to install OpenCV in Python virtual environment:

Switch to where you want to Python virtual environment located.

Create Python virtual environment to include site packages, where envPy_cv2 is the name of my virtual environment:
$ python -m venv --system-site-packages envPy_cv2

Activate the virtual environment:
$ source envPy_cv2/bin/activate

Install OpenCV:
$ pip install opencv-python

Exit virtual environment after finished:
$ deactivate

In Thonny, configure interpreter to select the Python executable in the new virtual environment.

Exercise code:

pyCartoonize.py, same as in previous exercise. Tested on Raspberry Pi.
"""
Testing exercise to cartoonize image

reference:
https://blog.finxter.com/5-best-ways-to-cartoonize-an-image-using-opencv-in-python/

In the post, 4 method are listed to Cartoonize an Image Using OpenCV in Python.
Method 1: Bilateral Filtering and Edge Detection.
          Produces a smooth, clean cartoon effect.
          Good for high-resolution images but can be computationally intensive.
Method 2: Color Quantization and Edge Enhancement.
          Delivers a visually distinct cartoon with flat colors and crisp borders.
          Works best with strongly contrasting images.
          The setup is slightly complex due to k-means.
Method 3: Enhancing Edges with a Combination of Filters.
          Provides a high-contrast edge-focused cartoon.
          Suitable for stylized images with strong lines.
          Not ideal for subtle color transitions.
Bonus Method 5: Simplified Cartoon Effect.
          Offers a quick solution with a minimal code footprint.
          Good for rapid prototyping but lacks the finesse of the other methods.

Remark:
All testing images were generated using MicroSoft Copilot, not real.
"""
import cv2
import numpy as np
import os
import time

"""
cartoonize part copy from:
https://blog.finxter.com/5-best-ways-to-cartoonize-an-image-using-opencv-in-python/
"""
# Method 1: Applying Bilateral Filtering and Edge Detection
def cartoonize_method1(image_file):
    
    # Read the image
    img = cv2.imread(image_file)

    # Apply bilateral filter
    cartoon_img = cv2.bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75)

    # Convert to grayscale and apply median blur
    gray = cv2.cvtColor(cartoon_img, cv2.COLOR_BGR2GRAY)
    blurred = cv2.medianBlur(gray, 7)

    # Detect edges and create a mask
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)
    colored_edges = cv2.bitwise_and(cartoon_img, cartoon_img, mask=edges)
    
    return colored_edges

# Method 2: Color Quantization and Edge Enhancement
def color_quantization(img, k):
    # Transform the image
    data = np.float32(img).reshape((-1, 3))

    # Determine criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)

    # Implementing K-Means
    ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    result = center[label.flatten()]
    result = result.reshape(img.shape)
    return result

def cartoonize_method2(image_file):
    # Read the image
    img = cv2.imread(image_file)

    # Color quantization
    quantized_img = color_quantization(img, k=9)

    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Apply median blur, detect edges
    blurred = cv2.medianBlur(gray, 7)
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)

    # Combine quantized image with edge mask
    cartoon_img = cv2.bitwise_and(quantized_img, quantized_img, mask=edges)
    
    return cartoon_img

# Method 3: Enhancing Edges with a Combination of Filters
def cartoonize_method3(image_file):
    # Read and scale down the image
    img = cv2.imread(image_file)
    img = cv2.pyrDown(cv2.imread(image_file))
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Apply median blur
    blurred = cv2.medianBlur(gray, 5)
    # Use a Laplacian filter for edge detection
    edges = cv2.Laplacian(blurred, cv2.CV_8U, ksize=5)
    # Invert the color of edges
    inverted_edges = 255-edges 
    # Combine original image with edges
    cartoon_img = cv2.bitwise_or(img, img, mask=inverted_edges)

    return cartoon_img

# Bonus One-Liner Method 5: Simplified Cartoon Effect
def cartoonize_method5(image_file):

    # Reading, smoothing, and edge detection all in one line
    return cv2.bitwise_and(cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), mask=cv2.Canny(cv2.imread(image_file), 100, 150))


# Read jpg image from "selected_images" sub-folder
images_folder = "./images_B"
files = os.listdir(images_folder)
jpg_files = [file for file in files if file.endswith('.jpg')]
sorted_jpg_files = sorted(jpg_files)

display_time = 3


while True:
    for filename in sorted_jpg_files:
        img_path = os.path.join(images_folder, filename)
        print(filename)
    
        org_img = cv2.imread(img_path)
        cv2.imshow('org_img', org_img)
    
        cartoonized_image = cartoonize_method1(img_path)
        cv2.imshow('pyCartoonize', cartoonized_image)
    
        end_time = time.time() + display_time
    
        while (time.time() < end_time):
            if cv2.getWindowProperty('pyCartoonize', cv2.WND_PROP_VISIBLE) < 1:
                break
            if cv2.getWindowProperty('org_img', cv2.WND_PROP_VISIBLE) < 1:
                break
            cv2.waitKey(1)
        if cv2.getWindowProperty('pyCartoonize', cv2.WND_PROP_VISIBLE) < 1:
                break
        if cv2.getWindowProperty('org_img', cv2.WND_PROP_VISIBLE) < 1:
                break
    if cv2.getWindowProperty('pyCartoonize', cv2.WND_PROP_VISIBLE) < 1:
        break
    if cv2.getWindowProperty('org_img', cv2.WND_PROP_VISIBLE) < 1:
        break

cv2.destroyAllWindows()


pyCartoonize_image_show_st7796.py, display single cartoonized image.
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#import chardet

"""
Python exercise run on Raspberry Pi 4:
Cartoonized image on Waveshare 3.5inch Capacitive Touch LCD with ST7796 SPI driver.
Display single cartoonized image

Connection and setup, read:
https://coxxect.blogspot.com/2025/01/test-waveshare-35inch-capacitive-touch.html

reference to cartoonize image using OpenCV in Python:
https://blog.finxter.com/5-best-ways-to-cartoonize-an-image-using-opencv-in-python/

In the post, 4 method are listed to Cartoonize an Image Using OpenCV in Python.
Method 1: Bilateral Filtering and Edge Detection.
          Produces a smooth, clean cartoon effect.
          Good for high-resolution images but can be computationally intensive.
Method 2: Color Quantization and Edge Enhancement.
          Delivers a visually distinct cartoon with flat colors and crisp borders.
          Works best with strongly contrasting images.
          The setup is slightly complex due to k-means.
Method 3: Enhancing Edges with a Combination of Filters.
          Provides a high-contrast edge-focused cartoon.
          Suitable for stylized images with strong lines.
          Not ideal for subtle color transitions.
Bonus Method 5: Simplified Cartoon Effect.
          Offers a quick solution with a minimal code footprint.
          Good for rapid prototyping but lacks the finesse of the other methods.

remark:
All test images were generated by MicroSoft Copilot, not real.
"""

import st7796
from PIL import Image, ImageOps
import cv2
import numpy as np

"""
cartoonize part copy from:
https://blog.finxter.com/5-best-ways-to-cartoonize-an-image-using-opencv-in-python/
"""
# Method 1: Applying Bilateral Filtering and Edge Detection
def cartoonize_method1(image_file):
    
    # Read the image
    img = cv2.imread(image_file)

    # Apply bilateral filter
    cartoon_img = cv2.bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75)

    # Convert to grayscale and apply median blur
    gray = cv2.cvtColor(cartoon_img, cv2.COLOR_BGR2GRAY)
    blurred = cv2.medianBlur(gray, 7)

    # Detect edges and create a mask
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)
    colored_edges = cv2.bitwise_and(cartoon_img, cartoon_img, mask=edges)
    
    return colored_edges

# Method 2: Color Quantization and Edge Enhancement
def color_quantization(img, k):
    # Transform the image
    data = np.float32(img).reshape((-1, 3))

    # Determine criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)

    # Implementing K-Means
    ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    result = center[label.flatten()]
    result = result.reshape(img.shape)
    return result

def cartoonize_method2(image_file):
    # Read the image
    img = cv2.imread(image_file)

    # Color quantization
    quantized_img = color_quantization(img, k=9)

    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Apply median blur, detect edges
    blurred = cv2.medianBlur(gray, 7)
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)

    # Combine quantized image with edge mask
    cartoon_img = cv2.bitwise_and(quantized_img, quantized_img, mask=edges)
    
    return cartoon_img

# Method 3: Enhancing Edges with a Combination of Filters
def cartoonize_method3(image_file):
    # Read and scale down the image
    img = cv2.imread(image_file)
    img = cv2.pyrDown(cv2.imread(image_file))
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Apply median blur
    blurred = cv2.medianBlur(gray, 5)
    # Use a Laplacian filter for edge detection
    edges = cv2.Laplacian(blurred, cv2.CV_8U, ksize=5)
    # Invert the color of edges
    inverted_edges = 255-edges 
    # Combine original image with edges
    cartoon_img = cv2.bitwise_or(img, img, mask=inverted_edges)

    return cartoon_img

# Bonus One-Liner Method 5: Simplified Cartoon Effect
def cartoonize_method5(image_file):

    # Reading, smoothing, and edge detection all in one line
    return cv2.bitwise_and(cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), mask=cv2.Canny(cv2.imread(image_file), 100, 150))

# End of cartoonize part

# For testing using Copilot generated 1024x1024 jpg
# Convert to 320x320
def resize(img):
    new_width = 320
    new_height = 320
    img_resized = img.resize((new_width, new_height), Image.LANCZOS)
    
    # expand to 320x480, with border of 80 pixel, (480-320)/2 = 80.
    img_expanded = ImageOps.expand(img_resized, border=(0, 80), fill='black')
    # Flip left right
    img_transposed = img_expanded.transpose(Image.FLIP_LEFT_RIGHT)
    
    return img_transposed
    

if __name__=='__main__':
    
    disp = st7796.st7796()
    print("st7796 LCD:", disp.width, "x", disp.height)
    disp.clear()

    # Read image from "images" sub-folder
    ImagePath = "./images_B/img_009.jpg"
    image = Image.open(ImagePath)
    print("Open image:", ImagePath)
    
    image_org = resize(image)
    
    ndarray_cartoonized = cartoonize_method1(ImagePath)
    print("type of ndarray_cartoonized:", type(ndarray_cartoonized))
    #convert from numpy.ndarray to PIL Image
    ndarray_cartoonized = ndarray_cartoonized.astype(np.uint8)
    ndarray_cartoonized = ndarray_cartoonized[:, :, [2, 1, 0]]  # convert RGB order
    image_cartoonized = Image.fromarray(ndarray_cartoonized)
    image_cartoonized_resized = resize(image_cartoonized)
    
    print("type for disp.show_image():", image_cartoonized_resized)
    disp.show_image(image_cartoonized_resized)


pyCartoonize_image_blend_st7796.py, switch between original and cartoonized images for single image.
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#import chardet

"""
Python exercise run on Raspberry Pi 4:
Cartoonized image on Waveshare 3.5inch Capacitive Touch LCD with ST7796 SPI driver.
Switch between original and cartoonized images for single image

Connection and setup, read:
https://coxxect.blogspot.com/2025/01/test-waveshare-35inch-capacitive-touch.html

remark:
All test images were generated by MicroSoft Copilot, not real.
"""

import st7796
from PIL import Image, ImageOps
import cv2
import numpy as np
import time

"""
cartoonize part copy from:
https://blog.finxter.com/5-best-ways-to-cartoonize-an-image-using-opencv-in-python/
"""
# Method 1: Applying Bilateral Filtering and Edge Detection
def cartoonize_method1(image_file):
    
    # Read the image
    img = cv2.imread(image_file)

    # Apply bilateral filter
    cartoon_img = cv2.bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75)

    # Convert to grayscale and apply median blur
    gray = cv2.cvtColor(cartoon_img, cv2.COLOR_BGR2GRAY)
    blurred = cv2.medianBlur(gray, 7)

    # Detect edges and create a mask
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)
    colored_edges = cv2.bitwise_and(cartoon_img, cartoon_img, mask=edges)
    
    return colored_edges

# Method 2: Color Quantization and Edge Enhancement
def color_quantization(img, k):
    # Transform the image
    data = np.float32(img).reshape((-1, 3))

    # Determine criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)

    # Implementing K-Means
    ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    result = center[label.flatten()]
    result = result.reshape(img.shape)
    return result

def cartoonize_method2(image_file):
    # Read the image
    img = cv2.imread(image_file)

    # Color quantization
    quantized_img = color_quantization(img, k=9)

    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Apply median blur, detect edges
    blurred = cv2.medianBlur(gray, 7)
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)

    # Combine quantized image with edge mask
    cartoon_img = cv2.bitwise_and(quantized_img, quantized_img, mask=edges)
    
    return cartoon_img

# Method 3: Enhancing Edges with a Combination of Filters
def cartoonize_method3(image_file):
    # Read and scale down the image
    img = cv2.imread(image_file)
    img = cv2.pyrDown(cv2.imread(image_file))
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Apply median blur
    blurred = cv2.medianBlur(gray, 5)
    # Use a Laplacian filter for edge detection
    edges = cv2.Laplacian(blurred, cv2.CV_8U, ksize=5)
    # Invert the color of edges
    inverted_edges = 255-edges 
    # Combine original image with edges
    cartoon_img = cv2.bitwise_or(img, img, mask=inverted_edges)

    return cartoon_img

# Bonus One-Liner Method 5: Simplified Cartoon Effect
def cartoonize_method5(image_file):

    # Reading, smoothing, and edge detection all in one line
    return cv2.bitwise_and(cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), mask=cv2.Canny(cv2.imread(image_file), 100, 150))

# End of cartoonize part

# For testing using Copilot generated 1024x1024 jpg
# Convert to 320x320
def resize(img):
    new_width = 320
    new_height = 320
    img_resized = img.resize((new_width, new_height), Image.LANCZOS)
    
    # expand to 320x480, with border of 80 pixel, (480-320)/2 = 80.
    img_expanded = ImageOps.expand(img_resized, border=(0, 80), fill='black')
    # Flip left right
    img_transposed = img_expanded.transpose(Image.FLIP_LEFT_RIGHT)
    
    return img_transposed
    

if __name__=='__main__':
    
    disp = st7796.st7796()
    print("st7796 LCD:", disp.width, "x", disp.height)
    disp.clear()

    # Read image from "images" sub-folder
    ImagePath = "./images_B/img_004.jpg"
    image = Image.open(ImagePath)
    print("Open image:", ImagePath)
    
    image_org_resized = resize(image)
    
    ndarray_cartoonized = cartoonize_method1(ImagePath)
    print("type of ndarray_cartoonized:", type(ndarray_cartoonized))
    
    #convert from numpy.ndarray to PIL Image
    ndarray_cartoonized = ndarray_cartoonized.astype(np.uint8)
    ndarray_cartoonized = ndarray_cartoonized[:, :, [2, 1, 0]]  # convert RGB order
    image_cartoonized = Image.fromarray(ndarray_cartoonized)
    image_cartoonized_resized = resize(image_cartoonized)
    
    print("type for disp.show_image():", image_cartoonized_resized)
    disp.show_image(image_cartoonized_resized)
    
    alpha_step = 20
    while True:
        for alpha in range(0, 101, alpha_step):
            blended_image = Image.blend(image_org_resized, image_cartoonized_resized, alpha/100)
            disp.show_image(blended_image) #show on LCD
        time.sleep(2)
        for alpha in range(100, -1, -alpha_step):
            blended_image = Image.blend(image_org_resized, image_cartoonized_resized, alpha/100)
            disp.show_image(blended_image) #show on LCD
        time.sleep(2)


pyCartoonize_slideshow_blend_st7796.py, Loop to switch between original and cartoonized images ib sub-folder.
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#import chardet

"""
Cartoonized image on Waveshare 3.5inch Capacitive Touch LCD with ST7796 SPI driver.
Loop to switch between original and cartoonized images ib sub-folder.

Connection and setup, read:
https://coxxect.blogspot.com/2025/01/test-waveshare-35inch-capacitive-touch.html

remark:
All test images were generated by MicroSoft Copilot, not real.
"""

import st7796
from PIL import Image, ImageOps
import cv2
import numpy as np
import time
import os

"""
cartoonize part copy from:
https://blog.finxter.com/5-best-ways-to-cartoonize-an-image-using-opencv-in-python/
"""
# Method 1: Applying Bilateral Filtering and Edge Detection
def cartoonize_method1(image_file):
    
    # Read the image
    img = cv2.imread(image_file)

    # Apply bilateral filter
    cartoon_img = cv2.bilateralFilter(img, d=9, sigmaColor=75, sigmaSpace=75)

    # Convert to grayscale and apply median blur
    gray = cv2.cvtColor(cartoon_img, cv2.COLOR_BGR2GRAY)
    blurred = cv2.medianBlur(gray, 7)

    # Detect edges and create a mask
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)
    colored_edges = cv2.bitwise_and(cartoon_img, cartoon_img, mask=edges)
    
    return colored_edges

# Method 2: Color Quantization and Edge Enhancement
def color_quantization(img, k):
    # Transform the image
    data = np.float32(img).reshape((-1, 3))

    # Determine criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)

    # Implementing K-Means
    ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
    center = np.uint8(center)
    result = center[label.flatten()]
    result = result.reshape(img.shape)
    return result

def cartoonize_method2(image_file):
    # Read the image
    img = cv2.imread(image_file)

    # Color quantization
    quantized_img = color_quantization(img, k=9)

    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Apply median blur, detect edges
    blurred = cv2.medianBlur(gray, 7)
    edges = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2)

    # Combine quantized image with edge mask
    cartoon_img = cv2.bitwise_and(quantized_img, quantized_img, mask=edges)
    
    return cartoon_img

# Method 3: Enhancing Edges with a Combination of Filters
def cartoonize_method3(image_file):
    # Read and scale down the image
    img = cv2.imread(image_file)
    img = cv2.pyrDown(cv2.imread(image_file))
    # Convert to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Apply median blur
    blurred = cv2.medianBlur(gray, 5)
    # Use a Laplacian filter for edge detection
    edges = cv2.Laplacian(blurred, cv2.CV_8U, ksize=5)
    # Invert the color of edges
    inverted_edges = 255-edges 
    # Combine original image with edges
    cartoon_img = cv2.bitwise_or(img, img, mask=inverted_edges)

    return cartoon_img

# Bonus One-Liner Method 5: Simplified Cartoon Effect
def cartoonize_method5(image_file):

    # Reading, smoothing, and edge detection all in one line
    return cv2.bitwise_and(cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), cv2.bilateralFilter(cv2.imread(image_file), d=9, sigmaColor=300, sigmaSpace=300), mask=cv2.Canny(cv2.imread(image_file), 100, 150))

# End of cartoonize part

# For testing using Copilot generated 1024x1024 jpg
# Convert to 320x320
def resize(img):
    new_width = 320
    new_height = 320
    img_resized = img.resize((new_width, new_height), Image.LANCZOS)
    
    # expand to 320x480, with border of 80 pixel, (480-320)/2 = 80.
    img_expanded = ImageOps.expand(img_resized, border=(0, 80), fill='black')
    # Flip left right
    img_transposed = img_expanded.transpose(Image.FLIP_LEFT_RIGHT)
    
    return img_transposed
    

if __name__=='__main__':
    
    disp = st7796.st7796()
    print("st7796 LCD:", disp.width, "x", disp.height)
    disp.clear()
    
    images_folder = "./images_B"
    files = os.listdir(images_folder)
    jpg_files = [file for file in files if file.endswith('.jpg')]
    jpg_file_list = sorted(jpg_files)
    
    print("=== Start ===")
    print("# of file:", len(jpg_file_list))
    
    while True:
        for i in range(0, len(jpg_file_list)-1):
            ImagePath = os.path.join(images_folder, jpg_file_list[i])
            print("Open image:", ImagePath)
            
            image = Image.open(ImagePath)
            image_org_resized = resize(image)
            disp.show_image(image_org_resized)
            
            end_time = time.time() + 2  # display original image for at least 2 second
            
            ndarray_cartoonized = cartoonize_method3(ImagePath)
            
            #convert from numpy.ndarray to PIL Image
            ndarray_cartoonized = ndarray_cartoonized.astype(np.uint8)
            ndarray_cartoonized = ndarray_cartoonized[:, :, [2, 1, 0]]  # convert RGB order
            image_cartoonized = Image.fromarray(ndarray_cartoonized)
            image_cartoonized_resized = resize(image_cartoonized)
            
            while (time.time() < end_time):
                pass
            
            alpha_step = 10
            for alpha in range(0, 101, alpha_step):
                blended_image = Image.blend(image_org_resized,
                                            image_cartoonized_resized,
                                            alpha/100)
                disp.show_image(blended_image) #show on LCD
            time.sleep(4)


Comments