Spaces:
Running
Running
added App
Browse files- app.py +412 -0
- estimate_homography.py +119 -0
- requirements.txt +7 -0
app.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import time
|
| 6 |
+
from streamlit_drawable_canvas import st_canvas
|
| 7 |
+
import matplotlib.pylab as plt
|
| 8 |
+
from estimate_homography import calculate_homography, fit_image_in_target_space
|
| 9 |
+
|
| 10 |
+
stitched_image_rgb, stitched_result = None, None
|
| 11 |
+
|
| 12 |
+
# Function to load an image from uploaded file
|
| 13 |
+
def load_image(uploaded_file):
|
| 14 |
+
img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_GRAYSCALE)
|
| 15 |
+
return img
|
| 16 |
+
|
| 17 |
+
# Function to compute stereo vision and disparity map
|
| 18 |
+
def compute_stereo_vision(img1, img2):
|
| 19 |
+
# Feature Detection and Matching using ORB (ORB is a good alternative for uncalibrated cameras)
|
| 20 |
+
orb = cv2.ORB_create() # ORB is a good alternative to SIFT for uncalibrated cameras
|
| 21 |
+
kp1, des1 = orb.detectAndCompute(img1, None)
|
| 22 |
+
kp2, des2 = orb.detectAndCompute(img2, None)
|
| 23 |
+
|
| 24 |
+
# BFMatcher with default params
|
| 25 |
+
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
|
| 26 |
+
matches = bf.match(des1, des2)
|
| 27 |
+
|
| 28 |
+
# Sort matches by distance
|
| 29 |
+
matches = sorted(matches, key=lambda x: x.distance)
|
| 30 |
+
|
| 31 |
+
# Estimate the Fundamental Matrix
|
| 32 |
+
pts1 = np.array([kp1[m.queryIdx].pt for m in matches])
|
| 33 |
+
pts2 = np.array([kp2[m.trainIdx].pt for m in matches])
|
| 34 |
+
|
| 35 |
+
# Fundamental matrix using RANSAC to reject outliers
|
| 36 |
+
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)
|
| 37 |
+
|
| 38 |
+
# Estimate the Camera Pose (Rotation and Translation)
|
| 39 |
+
K = np.eye(3) # Assuming no camera calibration
|
| 40 |
+
E = K.T @ F @ K # Essential matrix
|
| 41 |
+
_, R, T, _ = cv2.recoverPose(E, pts1, pts2)
|
| 42 |
+
|
| 43 |
+
# Stereo Rectification
|
| 44 |
+
stereo_rectify = cv2.stereoRectify(K, None, K, None, img1.shape[::-1], R, T, alpha=0)
|
| 45 |
+
left_map_x, left_map_y = cv2.initUndistortRectifyMap(K, None, R, K, img1.shape[::-1], cv2.CV_32F)
|
| 46 |
+
right_map_x, right_map_y = cv2.initUndistortRectifyMap(K, None, R, K, img2.shape[::-1], cv2.CV_32F)
|
| 47 |
+
|
| 48 |
+
# Apply the rectification transformations to the images
|
| 49 |
+
img1_rectified = cv2.remap(img1, left_map_x, left_map_y, interpolation=cv2.INTER_LINEAR)
|
| 50 |
+
img2_rectified = cv2.remap(img2, right_map_x, right_map_y, interpolation=cv2.INTER_LINEAR)
|
| 51 |
+
|
| 52 |
+
# Resize img2_rectified to match img1_rectified size (if necessary)
|
| 53 |
+
if img1_rectified.shape != img2_rectified.shape:
|
| 54 |
+
img2_rectified = cv2.resize(img2_rectified, (img1_rectified.shape[1], img1_rectified.shape[0]))
|
| 55 |
+
|
| 56 |
+
# Disparity Map Computation using StereoBM
|
| 57 |
+
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
|
| 58 |
+
disparity = stereo.compute(img1_rectified, img2_rectified)
|
| 59 |
+
|
| 60 |
+
return disparity, img1_rectified, img2_rectified
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def run_point_est(world_pts, img_pts, img):
|
| 64 |
+
if isinstance(img_pts, list):
|
| 65 |
+
img_pts = np.array(img_pts)
|
| 66 |
+
|
| 67 |
+
if isinstance(world_pts, list):
|
| 68 |
+
world_pts = np.array(world_pts)
|
| 69 |
+
|
| 70 |
+
# Plot the original image with marked points
|
| 71 |
+
st.write("Original Image with Points")
|
| 72 |
+
plt.figure()
|
| 73 |
+
plt.imshow(img)
|
| 74 |
+
plt.scatter(img_pts[:, 0], img_pts[:, 1], color='red')
|
| 75 |
+
plt.axis("off")
|
| 76 |
+
plt.title("Original Image with img points marked in red")
|
| 77 |
+
st.pyplot(plt)
|
| 78 |
+
|
| 79 |
+
H = calculate_homography(img_pts, world_pts) # img_pts = H * world_pts
|
| 80 |
+
|
| 81 |
+
#### Cross check ####
|
| 82 |
+
t_one = np.ones((img_pts.shape[0], 1))
|
| 83 |
+
t_out_pts = np.concatenate((world_pts, t_one), axis=1)
|
| 84 |
+
x = np.matmul(H, t_out_pts.T)
|
| 85 |
+
x = x / x[-1, :]
|
| 86 |
+
|
| 87 |
+
st.write("Given Image Points:", img_pts)
|
| 88 |
+
st.write("Calculated Image Points:", x.T)
|
| 89 |
+
st.write("Homography Matrix (OpenCV):", cv2.findHomography(world_pts, img_pts)[0])
|
| 90 |
+
st.write("Calculated Homography Matrix:", H)
|
| 91 |
+
|
| 92 |
+
#####################
|
| 93 |
+
h, w, _ = img.shape
|
| 94 |
+
corners_img = np.array([[0, 0], [w, 0], [w, h], [0, h]])
|
| 95 |
+
H_inv = np.linalg.inv(H)
|
| 96 |
+
t_out_pts = np.concatenate((corners_img, t_one), axis=1)
|
| 97 |
+
world_crd_corners = np.matmul(H_inv, t_out_pts.T)
|
| 98 |
+
world_crd_corners = world_crd_corners / world_crd_corners[-1, :] # Normalize
|
| 99 |
+
|
| 100 |
+
min_crd = np.amin(world_crd_corners.T, axis=0)
|
| 101 |
+
max_crd = np.amax(world_crd_corners.T, axis=0)
|
| 102 |
+
|
| 103 |
+
offset = min_crd.astype(np.int64)
|
| 104 |
+
offset[2] = 0
|
| 105 |
+
|
| 106 |
+
width_world = np.ceil(max_crd - min_crd)[0] + 1
|
| 107 |
+
height_world = np.ceil(max_crd - min_crd)[1] + 1
|
| 108 |
+
|
| 109 |
+
world_img = np.zeros((int(height_world), int(width_world), 3), dtype=np.uint8)
|
| 110 |
+
mask = np.ones((int(height_world), int(width_world)))
|
| 111 |
+
|
| 112 |
+
out = fit_image_in_target_space(img, world_img, mask, H, offset)
|
| 113 |
+
|
| 114 |
+
st.write("Corrected Image")
|
| 115 |
+
plt.figure()
|
| 116 |
+
plt.imshow(out)
|
| 117 |
+
plt.axis("off")
|
| 118 |
+
plt.title("Corrected Image with Point Point Correspondence")
|
| 119 |
+
st.pyplot(plt)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# Function to stitch images
|
| 123 |
+
def stitch_images(images):
|
| 124 |
+
stitcher = cv2.Stitcher_create() if cv2.__version__.startswith('4') else cv2.createStitcher()
|
| 125 |
+
status, stitched_image = stitcher.stitch(images)
|
| 126 |
+
if status == cv2.Stitcher_OK:
|
| 127 |
+
return stitched_image, status
|
| 128 |
+
else:
|
| 129 |
+
return None, status
|
| 130 |
+
|
| 131 |
+
# Function to match features
|
| 132 |
+
def match_features(images):
|
| 133 |
+
if len(images) < 2:
|
| 134 |
+
return None, "At least two images are required for feature matching."
|
| 135 |
+
|
| 136 |
+
gray1 = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
|
| 137 |
+
gray2 = cv2.cvtColor(images[1], cv2.COLOR_BGR2GRAY)
|
| 138 |
+
|
| 139 |
+
sift = cv2.SIFT_create()
|
| 140 |
+
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
|
| 141 |
+
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
|
| 142 |
+
|
| 143 |
+
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
|
| 144 |
+
matches = bf.match(descriptors1, descriptors2)
|
| 145 |
+
matches = sorted(matches, key=lambda x: x.distance)
|
| 146 |
+
|
| 147 |
+
matched_image = cv2.drawMatches(images[0], keypoints1, images[1], keypoints2, matches[:50], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
|
| 148 |
+
return matched_image, None
|
| 149 |
+
|
| 150 |
+
# Function to cartoonify an image
|
| 151 |
+
def cartoonify_image(image):
|
| 152 |
+
# Convert to grayscale
|
| 153 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 154 |
+
|
| 155 |
+
gray_blur = cv2.medianBlur(gray, 7)
|
| 156 |
+
|
| 157 |
+
edges = cv2.adaptiveThreshold(
|
| 158 |
+
gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 10
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
color = cv2.bilateralFilter(image, 9, 250, 250)
|
| 162 |
+
|
| 163 |
+
cartoon = cv2.bitwise_and(color, color, mask=edges)
|
| 164 |
+
|
| 165 |
+
return cartoon
|
| 166 |
+
|
| 167 |
+
# Streamlit layout and UI
|
| 168 |
+
st.set_page_config(page_title="Image Stitching and Feature Matching", layout="wide")
|
| 169 |
+
st.title("Image Stitching and Feature Matching Application")
|
| 170 |
+
|
| 171 |
+
# State to store captured images
|
| 172 |
+
if "captured_images" not in st.session_state:
|
| 173 |
+
st.session_state["captured_images"] = []
|
| 174 |
+
|
| 175 |
+
if "stitched_image" not in st.session_state:
|
| 176 |
+
st.session_state["stitched_image"] = None
|
| 177 |
+
# Sidebar for displaying captured images
|
| 178 |
+
st.sidebar.header("Captured Images")
|
| 179 |
+
if st.session_state["captured_images"]:
|
| 180 |
+
placeholder = st.sidebar.empty()
|
| 181 |
+
with placeholder.container():
|
| 182 |
+
for i, img in enumerate(st.session_state["captured_images"]):
|
| 183 |
+
img_thumbnail = cv2.resize(img, (100, 100))
|
| 184 |
+
st.image(cv2.cvtColor(img_thumbnail, cv2.COLOR_BGR2RGB), caption=f"Image {i+1}", use_container_width =False)
|
| 185 |
+
if st.button(f"Delete Image {i+1}", key=f"delete_{i}"):
|
| 186 |
+
st.session_state["captured_images"].pop(i)
|
| 187 |
+
placeholder.empty() # Clear and refresh the sidebar
|
| 188 |
+
break
|
| 189 |
+
|
| 190 |
+
# Capture the image from camera input
|
| 191 |
+
st.header("Upload or Capture Images")
|
| 192 |
+
uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
|
| 193 |
+
captured_image = st.camera_input("Take a picture using your camera")
|
| 194 |
+
|
| 195 |
+
if st.button("Add Captured Image"):
|
| 196 |
+
if captured_image:
|
| 197 |
+
captured_image_array = cv2.cvtColor(np.array(Image.open(captured_image)), cv2.COLOR_RGB2BGR)
|
| 198 |
+
st.session_state["captured_images"].append(captured_image_array)
|
| 199 |
+
st.success(f"Captured image {len(st.session_state['captured_images'])} added!")
|
| 200 |
+
|
| 201 |
+
# Combine uploaded and captured images
|
| 202 |
+
images = [cv2.cvtColor(np.array(Image.open(file)), cv2.COLOR_RGB2BGR) for file in uploaded_files]
|
| 203 |
+
images.extend(st.session_state["captured_images"])
|
| 204 |
+
|
| 205 |
+
st.write(f"Total images: {len(images)}")
|
| 206 |
+
|
| 207 |
+
# Placeholder for dynamic updates
|
| 208 |
+
loading_placeholder = st.empty()
|
| 209 |
+
|
| 210 |
+
# Function to show the loading animation
|
| 211 |
+
def show_loading_bar(placeholder):
|
| 212 |
+
with placeholder:
|
| 213 |
+
st.write("Processing images... Please wait.")
|
| 214 |
+
time.sleep(2)
|
| 215 |
+
|
| 216 |
+
if st.button("Stitch Images"):
|
| 217 |
+
if len(images) < 2:
|
| 218 |
+
st.error("Please provide at least two images for stitching.")
|
| 219 |
+
else:
|
| 220 |
+
show_loading_bar(loading_placeholder)
|
| 221 |
+
stitched_result, status = stitch_images(images)
|
| 222 |
+
loading_placeholder.empty()
|
| 223 |
+
if stitched_result is not None:
|
| 224 |
+
stitched_image_rgb = cv2.cvtColor(stitched_result, cv2.COLOR_BGR2RGB)
|
| 225 |
+
st.image(stitched_image_rgb, caption="Stitched Image", use_container_width=True)
|
| 226 |
+
st.session_state["stitched_image"] = stitched_image_rgb
|
| 227 |
+
st.success("Stitching completed successfully!")
|
| 228 |
+
else:
|
| 229 |
+
st.error(f"Stitching failed with status: {status}.")
|
| 230 |
+
if st.button("Show Matching Features"):
|
| 231 |
+
if len(images) < 2:
|
| 232 |
+
st.error("Please provide at least two images for feature matching.")
|
| 233 |
+
else:
|
| 234 |
+
show_loading_bar(loading_placeholder)
|
| 235 |
+
matched_image, error = match_features(images)
|
| 236 |
+
loading_placeholder.empty()
|
| 237 |
+
if matched_image is not None:
|
| 238 |
+
matched_image_rgb = cv2.cvtColor(matched_image, cv2.COLOR_BGR2RGB)
|
| 239 |
+
st.image(matched_image_rgb, caption="Feature Matching Visualization", use_container_width=True)
|
| 240 |
+
st.success("Feature matching completed successfully!")
|
| 241 |
+
else:
|
| 242 |
+
st.error(error)
|
| 243 |
+
|
| 244 |
+
if st.session_state["stitched_image"] is not None:
|
| 245 |
+
st.header("Homography Transformation on Stitched Image")
|
| 246 |
+
|
| 247 |
+
st.write("### Select Points on Stitched Image")
|
| 248 |
+
stitched_image = st.session_state["stitched_image"]
|
| 249 |
+
image = Image.fromarray(cv2.cvtColor(stitched_image, cv2.COLOR_BGR2RGB))
|
| 250 |
+
|
| 251 |
+
canvas_result = st_canvas(
|
| 252 |
+
fill_color="rgba(255, 0, 0, 0.3)",
|
| 253 |
+
stroke_width=3,
|
| 254 |
+
background_image=image,
|
| 255 |
+
update_streamlit=True,
|
| 256 |
+
drawing_mode="point",
|
| 257 |
+
height=image.height,
|
| 258 |
+
width=image.width,
|
| 259 |
+
key="canvas",
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
img_pts = []
|
| 263 |
+
|
| 264 |
+
if canvas_result.json_data is not None:
|
| 265 |
+
for obj in canvas_result.json_data["objects"]:
|
| 266 |
+
if obj["type"] == "circle":
|
| 267 |
+
x = obj["left"] + obj["width"] / 2
|
| 268 |
+
y = obj["top"] + obj["height"] / 2
|
| 269 |
+
img_pts.append([int(x), int(y)])
|
| 270 |
+
|
| 271 |
+
if img_pts:
|
| 272 |
+
st.write("### Selected Image Points")
|
| 273 |
+
st.write(img_pts)
|
| 274 |
+
|
| 275 |
+
st.write("### Enter Corresponding World Points")
|
| 276 |
+
world_pts = st.text_area(
|
| 277 |
+
"Enter world points as a list of tuples (e.g., [(0, 0), (300, 0), (0, 400), (300, 400)])",
|
| 278 |
+
value="[(0, 0), (300, 0), (0, 400), (300, 400)]",
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
if st.button("Run Homography Transformation"):
|
| 282 |
+
try:
|
| 283 |
+
world_pts = eval(world_pts)
|
| 284 |
+
if len(world_pts) != len(img_pts):
|
| 285 |
+
st.error("The number of world points must match the number of image points.")
|
| 286 |
+
else:
|
| 287 |
+
run_point_est(world_pts, img_pts, stitched_image)
|
| 288 |
+
except Exception as e:
|
| 289 |
+
st.error(f"Error: {e}")
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
if "stitched_image" in st.session_state:
|
| 293 |
+
st.header("Cartoonify & Do Homography on Your Stitched Image")
|
| 294 |
+
if st.button("Cartoonify Stitched Image"):
|
| 295 |
+
cartoon = cartoonify_image(cv2.cvtColor(st.session_state["stitched_image"], cv2.COLOR_RGB2BGR))
|
| 296 |
+
st.image(cv2.cvtColor(cartoon, cv2.COLOR_BGR2RGB), caption="Cartoonified Image", use_container_width=True)
|
| 297 |
+
st.success("Cartoonification completed successfully!")
|
| 298 |
+
|
| 299 |
+
# Upload images
|
| 300 |
+
st.subheader("Upload Left and Right Images")
|
| 301 |
+
left_image_file = st.file_uploader("Choose the Left Image", type=["jpg", "png", "jpeg"])
|
| 302 |
+
right_image_file = st.file_uploader("Choose the Right Image", type=["jpg", "png", "jpeg"])
|
| 303 |
+
|
| 304 |
+
# Check if both images are uploaded
|
| 305 |
+
if left_image_file and right_image_file:
|
| 306 |
+
# Load the uploaded images
|
| 307 |
+
img1 = load_image(left_image_file)
|
| 308 |
+
img2 = load_image(right_image_file)
|
| 309 |
+
|
| 310 |
+
# Display the uploaded images
|
| 311 |
+
st.image(img1, caption="Left Image", use_container_width =True)
|
| 312 |
+
st.image(img2, caption="Right Image", use_container_width =True)
|
| 313 |
+
|
| 314 |
+
# Compute the stereo vision and disparity map
|
| 315 |
+
disparity, img1_rectified, img2_rectified = compute_stereo_vision(img1, img2)
|
| 316 |
+
|
| 317 |
+
# Display the rectified images
|
| 318 |
+
# st.subheader("Rectified Left Image")
|
| 319 |
+
# st.image(img1_rectified, caption="Rectified Left Image", use_container_width =True)
|
| 320 |
+
|
| 321 |
+
# st.subheader("Rectified Right Image")
|
| 322 |
+
# st.image(img2_rectified, caption="Rectified Right Image", use_container_width =True)
|
| 323 |
+
|
| 324 |
+
# Show the disparity map
|
| 325 |
+
fig, ax = plt.subplots()
|
| 326 |
+
st.subheader("Disparity Map")
|
| 327 |
+
plt.imshow(disparity, cmap='gray')
|
| 328 |
+
plt.title("Disparity Map")
|
| 329 |
+
plt.colorbar()
|
| 330 |
+
st.pyplot(fig)
|
| 331 |
+
|
| 332 |
+
# # Optionally: Display an anaglyph or combined view of the images
|
| 333 |
+
# anaglyph = cv2.merge([img1_rectified, np.zeros_like(img1_rectified), img2_rectified])
|
| 334 |
+
# st.subheader("Anaglyph Stereo View")
|
| 335 |
+
# st.image(anaglyph, caption="Anaglyph Stereo View", use_container_width =True)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
# if "img_pts" not in st.session_state:
|
| 340 |
+
# st.session_state["img_pts"] = []
|
| 341 |
+
|
| 342 |
+
# if "world_pts" not in st.session_state:
|
| 343 |
+
# st.session_state["world_pts"] = []
|
| 344 |
+
|
| 345 |
+
# if "homography_ready" not in st.session_state:
|
| 346 |
+
# st.session_state["homography_ready"] = False
|
| 347 |
+
|
| 348 |
+
# if st.button('Homography Transformation'):
|
| 349 |
+
# if st.session_state["stitched_image"] is not None:
|
| 350 |
+
# st.write("### Select Points on Stitched Image")
|
| 351 |
+
# stitched_image = st.session_state["stitched_image"]
|
| 352 |
+
# image = Image.fromarray(cv2.cvtColor(stitched_image, cv2.COLOR_BGR2RGB))
|
| 353 |
+
|
| 354 |
+
# # Display canvas for selecting points
|
| 355 |
+
# canvas_result = st_canvas(
|
| 356 |
+
# fill_color="rgba(255, 0, 0, 0.3)",
|
| 357 |
+
# stroke_width=3,
|
| 358 |
+
# background_image=image,
|
| 359 |
+
# update_streamlit=True,
|
| 360 |
+
# drawing_mode="point",
|
| 361 |
+
# height=image.height,
|
| 362 |
+
# width=image.width,
|
| 363 |
+
# key="canvas",
|
| 364 |
+
# )
|
| 365 |
+
|
| 366 |
+
# # Collect selected points
|
| 367 |
+
# if canvas_result.json_data is not None:
|
| 368 |
+
# img_pts_temp = []
|
| 369 |
+
# for obj in canvas_result.json_data["objects"]:
|
| 370 |
+
# if obj["type"] == "circle":
|
| 371 |
+
# x = obj["left"] + obj["width"] / 2
|
| 372 |
+
# y = obj["top"] + obj["height"] / 2
|
| 373 |
+
# img_pts_temp.append([int(x), int(y)])
|
| 374 |
+
|
| 375 |
+
# # Only update points if there are new ones
|
| 376 |
+
# if img_pts_temp:
|
| 377 |
+
# st.session_state["img_pts"] = img_pts_temp
|
| 378 |
+
|
| 379 |
+
# # Display the selected points
|
| 380 |
+
# if st.session_state["img_pts"]:
|
| 381 |
+
# st.write("### Selected Image Points")
|
| 382 |
+
# st.write(st.session_state["img_pts"])
|
| 383 |
+
|
| 384 |
+
# # Input world points
|
| 385 |
+
# world_pts_input = st.text_area(
|
| 386 |
+
# "Enter world points as a list of tuples (e.g., [(0, 0), (300, 0), (0, 400), (300, 400)])",
|
| 387 |
+
# value="[(0, 0), (300, 0), (0, 400), (300, 400)]",
|
| 388 |
+
# )
|
| 389 |
+
|
| 390 |
+
# if st.button("Confirm Points and Run Homography"):
|
| 391 |
+
# try:
|
| 392 |
+
# st.session_state["world_pts"] = eval(world_pts_input)
|
| 393 |
+
# if len(st.session_state["world_pts"]) != len(st.session_state["img_pts"]):
|
| 394 |
+
# st.error("The number of world points must match the number of image points.")
|
| 395 |
+
# else:
|
| 396 |
+
# st.session_state["homography_ready"] = True
|
| 397 |
+
# st.success("Points confirmed! Ready for homography transformation.")
|
| 398 |
+
# except Exception as e:
|
| 399 |
+
# st.error(f"Error parsing world points: {e}")
|
| 400 |
+
|
| 401 |
+
# # Perform homography transformation
|
| 402 |
+
# if st.session_state.get("homography_ready"):
|
| 403 |
+
# st.write("### Running Homography Transformation...")
|
| 404 |
+
# try:
|
| 405 |
+
# run_point_est(
|
| 406 |
+
# st.session_state["world_pts"],
|
| 407 |
+
# st.session_state["img_pts"],
|
| 408 |
+
# st.session_state["stitched_image"],
|
| 409 |
+
# )
|
| 410 |
+
# st.session_state["homography_ready"] = False # Reset the flag after execution
|
| 411 |
+
# except Exception as e:
|
| 412 |
+
# st.error(f"Error during homography transformation: {e}")
|
estimate_homography.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
def calculate_homography(in_pts, out_pts):
|
| 5 |
+
"""
|
| 6 |
+
Calculates the homography matrix H such that in_pts = H * out_pts.
|
| 7 |
+
:param in_pts: Source points as a numpy array.
|
| 8 |
+
:param out_pts: Destination points as a numpy array.
|
| 9 |
+
:return: Homography matrix H.
|
| 10 |
+
"""
|
| 11 |
+
if isinstance(in_pts, list):
|
| 12 |
+
in_pts = np.array(in_pts)
|
| 13 |
+
|
| 14 |
+
if isinstance(out_pts, list):
|
| 15 |
+
out_pts = np.array(out_pts)
|
| 16 |
+
|
| 17 |
+
mat_A, mat_b = build_sys_equations(in_pts, out_pts)
|
| 18 |
+
H = np.matmul(np.linalg.pinv(mat_A), mat_b)
|
| 19 |
+
H = np.reshape(np.hstack((H, 1)), (3, 3))
|
| 20 |
+
return H
|
| 21 |
+
|
| 22 |
+
def build_sys_equations(in_pts, out_pts):
|
| 23 |
+
"""
|
| 24 |
+
Builds the system of equations for homography calculation.
|
| 25 |
+
:param in_pts: Array of input points.
|
| 26 |
+
:param out_pts: Array of output points.
|
| 27 |
+
:return: Matrix A and vector b.
|
| 28 |
+
"""
|
| 29 |
+
mat_A = np.zeros((np.size(in_pts), 8))
|
| 30 |
+
mat_b = in_pts.ravel()
|
| 31 |
+
|
| 32 |
+
i = 0
|
| 33 |
+
for x, y in out_pts:
|
| 34 |
+
# x row
|
| 35 |
+
mat_A[i][0:3] = [x, y, 1]
|
| 36 |
+
mat_A[i][-2:] = [-x * mat_b[i], -y * mat_b[i]]
|
| 37 |
+
|
| 38 |
+
# y row
|
| 39 |
+
mat_A[i + 1][-5:] = [x, y, 1, -x * mat_b[i + 1], -y * mat_b[i + 1]]
|
| 40 |
+
|
| 41 |
+
i += 2
|
| 42 |
+
|
| 43 |
+
return mat_A, mat_b
|
| 44 |
+
|
| 45 |
+
def fit_image_in_target_space(img_src, img_dst, mask, H, offset=np.array([0, 0, 0])):
|
| 46 |
+
"""
|
| 47 |
+
Warps img_src into img_dst using the homography matrix H.
|
| 48 |
+
:param img_src: Source image.
|
| 49 |
+
:param img_dst: Target image.
|
| 50 |
+
:param mask: Mask for the destination region.
|
| 51 |
+
:param H: Homography matrix.
|
| 52 |
+
:param offset: Offset correction array [x_offset, y_offset, 0].
|
| 53 |
+
:return: Transformed image.
|
| 54 |
+
"""
|
| 55 |
+
pts = get_pixel_coord(mask) # Get all pixel coordinates in the mask region.
|
| 56 |
+
pts = pts + offset # Apply offset correction.
|
| 57 |
+
out_src = np.matmul(H, pts.T)
|
| 58 |
+
out_src = out_src / out_src[-1, :] # Normalize to homogenous coordinates.
|
| 59 |
+
|
| 60 |
+
out_src = out_src[0:2, :].T # Extract x, y coordinates.
|
| 61 |
+
pts = pts[:, 0:2].astype(np.int64) # Target points in destination image.
|
| 62 |
+
|
| 63 |
+
h, w, _ = img_src.shape
|
| 64 |
+
img_dst = get_pixel_val(img_dst, img_src, pts, out_src, offset)
|
| 65 |
+
return img_dst
|
| 66 |
+
|
| 67 |
+
def get_pixel_coord(mask):
|
| 68 |
+
"""
|
| 69 |
+
Extracts x, y coordinates of white pixels in a binary mask.
|
| 70 |
+
:param mask: Binary mask.
|
| 71 |
+
:return: Homogenous coordinates of mask pixels.
|
| 72 |
+
"""
|
| 73 |
+
y, x = np.where(mask)
|
| 74 |
+
pts = np.concatenate((x[:, np.newaxis], y[:, np.newaxis], np.ones((x.size, 1))), axis=1)
|
| 75 |
+
return pts
|
| 76 |
+
|
| 77 |
+
def get_pixel_val(img_dst, img_src, pts, out_src, offset):
|
| 78 |
+
"""
|
| 79 |
+
Performs bilinear interpolation to fetch pixel values.
|
| 80 |
+
:param img_dst: Destination image.
|
| 81 |
+
:param img_src: Source image.
|
| 82 |
+
:param pts: Points in the destination image.
|
| 83 |
+
:param out_src: Corresponding points in the source image.
|
| 84 |
+
:param offset: Offset correction.
|
| 85 |
+
:return: Updated destination image.
|
| 86 |
+
"""
|
| 87 |
+
h, w, _ = img_src.shape
|
| 88 |
+
tl = np.floor(out_src[:, ::-1]).astype(np.int64)
|
| 89 |
+
br = np.ceil(out_src[:, ::-1]).astype(np.int64)
|
| 90 |
+
|
| 91 |
+
pts = pts - offset[:2]
|
| 92 |
+
valid_mask = ~np.logical_or.reduce(
|
| 93 |
+
(np.any(tl < 0, axis=1), np.any(br < 0, axis=1), tl[:, 0] >= h - 1, tl[:, 1] >= w - 1, br[:, 0] >= h - 1, br[:, 1] >= w - 1)
|
| 94 |
+
)
|
| 95 |
+
pts = pts[valid_mask]
|
| 96 |
+
out_src = out_src[valid_mask]
|
| 97 |
+
tl = tl[valid_mask]
|
| 98 |
+
br = br[valid_mask]
|
| 99 |
+
|
| 100 |
+
tr = np.concatenate((tl[:, 0:1], br[:, 1:2]), axis=1)
|
| 101 |
+
bl = np.concatenate((br[:, 0:1], tl[:, 1:2]), axis=1)
|
| 102 |
+
|
| 103 |
+
weight = np.zeros((out_src.shape[0], 4))
|
| 104 |
+
weight[:, 0] = np.linalg.norm(tl - out_src[:, ::-1], axis=1)
|
| 105 |
+
weight[:, 1] = np.linalg.norm(tr - out_src[:, ::-1], axis=1)
|
| 106 |
+
weight[:, 2] = np.linalg.norm(bl - out_src[:, ::-1], axis=1)
|
| 107 |
+
weight[:, 3] = np.linalg.norm(br - out_src[:, ::-1], axis=1)
|
| 108 |
+
|
| 109 |
+
weight[weight == 0] = 1
|
| 110 |
+
weight = 1 / weight
|
| 111 |
+
weight /= np.sum(weight, axis=1, keepdims=True)
|
| 112 |
+
|
| 113 |
+
img_dst[pts[:, 1], pts[:, 0], :] = (
|
| 114 |
+
img_src[tl[:, 0], tl[:, 1], :] * weight[:, 0:1]
|
| 115 |
+
+ img_src[tr[:, 0], tr[:, 1], :] * weight[:, 1:2]
|
| 116 |
+
+ img_src[bl[:, 0], bl[:, 1], :] * weight[:, 2:3]
|
| 117 |
+
+ img_src[br[:, 0], br[:, 1], :] * weight[:, 3:4]
|
| 118 |
+
)
|
| 119 |
+
return img_dst
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
opencv-python-contrib
|
| 3 |
+
opencv-python-headless
|
| 4 |
+
matplotlib
|
| 5 |
+
pillow
|
| 6 |
+
plotly
|
| 7 |
+
streamlit_drawable_canvas
|