File size: 11,246 Bytes
55d13a0
7709c11
 
55d13a0
7709c11
aa271fe
5819ee4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55d13a0
5819ee4
 
 
 
ac07878
7709c11
 
77c7489
7709c11
 
 
aa271fe
7709c11
ac07878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7709c11
ac07878
 
 
5819ee4
ac07878
 
 
 
 
 
 
 
 
 
 
 
 
5819ee4
ac07878
5819ee4
ac07878
5819ee4
 
 
 
 
 
 
 
 
 
 
 
ac07878
 
 
 
 
5819ee4
ac07878
 
 
 
 
 
 
 
 
 
7709c11
ac07878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5819ee4
ac07878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7709c11
 
 
 
 
 
 
4bdb54f
5819ee4
 
 
7709c11
 
aa271fe
7709c11
5819ee4
 
 
 
 
aa271fe
7709c11
 
 
173fd35
7709c11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c50ee85
5819ee4
 
 
7709c11
5819ee4
7709c11
 
 
ac07878
7709c11
5819ee4
 
7709c11
ac07878
7709c11
 
c50ee85
55d13a0
173fd35
7709c11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5819ee4
 
7709c11
 
 
 
 
 
 
 
 
 
 
 
 
55d13a0
ac07878
 
 
 
 
 
 
 
 
 
 
7709c11
 
ac07878
7709c11
 
55d13a0
5819ee4
 
 
 
 
 
55d13a0
7709c11
 
 
 
 
 
55d13a0
7709c11
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
import gradio as gr
import cv2
import numpy as np
from PIL import Image
import os

# Try to load the Haar Cascade classifier for face detection
face_cascade = None
cascade_paths = [
    "haarcascade_frontalface_default.xml",
    "./haarcascade_frontalface_default.xml",
    os.path.join(os.path.dirname(__file__), "haarcascade_frontalface_default.xml"),
    cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
]

for path in cascade_paths:
    if os.path.exists(path):
        face_cascade = cv2.CascadeClassifier(path)
        if not face_cascade.empty():
            print(f"Successfully loaded Haar Cascade from: {path}")
            break
        else:
            print(f"Failed to load Haar Cascade from: {path}")
    else:
        print(f"File not found: {path}")

if face_cascade is None or face_cascade.empty():
    print("Warning: Could not load Haar Cascade classifier. Face detection will be disabled.")
    face_cascade = None

def process_image(image, click_x, click_y, effect_type):
    if image is None:
        return None, "Please upload an image first."

    img_np = np.array(image)
    img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
    processed_img_np_bgr = img_np_bgr.copy()

    status_message = ""
    applied_to_region = False

    # Prioritize clicked region if available
    if click_x is not None and click_y is not None:
        # Try to find a face near the click
        faces = []
        if face_cascade is not None:
            gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
            try:
                all_faces = face_cascade.detectMultiScale(gray, 1.1, 4)
                min_distance = float("inf")
                target_face = None
                for (fx, fy, fw, fh) in all_faces:
                    face_center_x = fx + fw // 2
                    face_center_y = fy + fh // 2
                    distance = np.sqrt((face_center_x - click_x)**2 + (face_center_y - click_y)**2)
                    if distance < min_distance and distance < 100: # Within 100 pixels of click
                        min_distance = distance
                        target_face = (fx, fy, fw, fh)
                if target_face:
                    faces.append(target_face)
            except Exception as e:
                print(f"Face detection error during click processing: {e}")

        if len(faces) > 0:
            # Apply effect to the detected face near click
            x, y, w, h = faces[0]
            roi = processed_img_np_bgr[y:y+h, x:x+w]
            status_message = f"Applied {effect_type} effect to detected face near click."
            applied_to_region = True
        else:
            # Apply effect to a general region around the click
            region_size = 100
            x1 = max(0, int(click_x - region_size // 2))
            y1 = max(0, int(click_y - region_size // 2))
            x2 = min(image.width, int(click_x + region_size // 2))
            y2 = min(image.height, int(click_y + region_size // 2))

            roi = processed_img_np_bgr[y1:y2, x1:x2]
            status_message = f"Applied {effect_type} effect to clicked region."
            applied_to_region = True

        if applied_to_region:
            if effect_type == "blur":
                processed_roi = cv2.GaussianBlur(roi, (15, 15), 0)
            elif effect_type == "sharpen":
                kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
                processed_roi = cv2.filter2D(roi, -1, kernel)
            elif effect_type == "grayscale":
                processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR)
            elif effect_type == "pixelate":
                h_roi, w_roi = roi.shape[:2]
                temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR)
                processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST)
            else:
                processed_roi = roi
            
            if len(faces) > 0:
                processed_img_np_bgr[y:y+h, x:x+w] = processed_roi
            else:
                processed_img_np_bgr[y1:y2, x1:x2] = processed_roi

    if not applied_to_region: # Fallback if no click or no specific region applied
        # Apply effect to all detected faces if no click or no face near click
        faces = []
        if face_cascade is not None:
            gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
            try:
                faces = face_cascade.detectMultiScale(gray, 1.1, 4)
            except Exception as e:
                print(f"Face detection error: {e}")
                faces = []

        if len(faces) > 0:
            for (x, y, w, h) in faces:
                roi = processed_img_np_bgr[y:y+h, x:x+w]

                if effect_type == "blur":
                    processed_roi = cv2.GaussianBlur(roi, (35, 35), 0)
                elif effect_type == "sharpen":
                    kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
                    processed_roi = cv2.filter2D(roi, -1, kernel)
                elif effect_type == "grayscale":
                    processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
                    processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR)
                elif effect_type == "pixelate":
                    h_roi, w_roi = roi.shape[:2]
                    temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR)
                    processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST)
                else:
                    processed_roi = roi

                processed_img_np_bgr[y:y+h, x:x+w] = processed_roi
            status_message = f"Applied {effect_type} effect to {len(faces)} detected face(s)."
        else:
            # Apply effect to center region if no faces detected and no click
            h, w = img_np_bgr.shape[:2]
            center_x, center_y = w // 2, h // 2
            region_size = min(200, w//3, h//3)
            
            x1 = max(0, center_x - region_size // 2)
            y1 = max(0, center_y - region_size // 2)
            x2 = min(w, center_x + region_size // 2)
            y2 = min(h, center_y + region_size // 2)

            roi = processed_img_np_bgr[y1:y2, x1:x1+roi.shape[1]] = processed_roi
            
            if face_cascade is None:
                status_message = f"Applied {effect_type} effect to center region (face detection unavailable)."
            else:
                status_message = f"No faces detected. Applied {effect_type} effect to center region."

    img_pil = Image.fromarray(cv2.cvtColor(processed_img_np_bgr, cv2.COLOR_BGR2RGB))
    return img_pil, status_message

def detect_faces_only(image):
    if image is None:
        return None, "Please upload an image first."
    
    if face_cascade is None:
        return image, "Face detection is not available (Haar Cascade not loaded)."
    
    img_np = np.array(image)
    img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
    
    gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
    
    try:
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
    except Exception as e:
        return image, f"Face detection error: {str(e)}"
    
    # Draw rectangles around detected faces
    for (x, y, w, h) in faces:
        cv2.rectangle(img_np_bgr, (x, y), (x+w, y+h), (255, 0, 0), 2)
    
    img_pil = Image.fromarray(cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2RGB))
    return img_pil, f"Detected {len(faces)} face(s)."

# Custom CSS for better styling
css = """
.gradio-container {
    font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
}
.main-header {
    text-align: center;
    color: #2c3e50;
    margin-bottom: 20px;
}
.instruction-text {
    background-color: #f8f9fa;
    padding: 15px;
    border-radius: 8px;
    border-left: 4px solid #007bff;
    margin-bottom: 20px;
}
"""

# Gradio interface
with gr.Blocks(css=css, title="AI Image Editor") as demo:
    gr.HTML("<h1 class='main-header'>🎨 AI Image Editor (CPU-friendly)</h1>")
    
    face_detection_status = "βœ… Face detection enabled" if face_cascade is not None else "⚠️ Face detection disabled (Haar Cascade not found)"
    
    gr.HTML(f"""
    <div class='instruction-text'>
        <strong>Status:</strong> {face_detection_status}<br><br>
        <strong>Instructions:</strong>
        <ol>
            <li>Upload an image using the file uploader</li>
            <li>Click on the image to select a region (optional)</li>
            <li>Choose an effect from the dropdown menu</li>
            <li>Click "Apply Effect" to process the image</li>
            <li>If face detection is available, use "Detect Faces" to see detected faces</li>
        </ol>
        <em>Note: If you click on the image, the effect will be applied to the clicked region (prioritizing faces near the click). Otherwise, if face detection is available, effects will be applied to all detected faces. As a last resort, effects will be applied to the center region.</em>
    </div>
    """)
    
    with gr.Row():
        with gr.Column(scale=1):
            input_image = gr.Image(
                type="pil", 
                label="πŸ“ Upload Image", 
                interactive=True,
                height=400
            )
            
            with gr.Row():
                effect_dropdown = gr.Dropdown(
                    ["None", "blur", "sharpen", "grayscale", "pixelate"], 
                    label="🎭 Select Effect", 
                    value="blur"
                )
            
            with gr.Row():
                process_button = gr.Button("✨ Apply Effect", variant="primary", size="lg")
                if face_cascade is not None:
                    detect_button = gr.Button("πŸ‘€ Detect Faces", variant="secondary", size="lg")
            
            status_text = gr.Textbox(
                label="πŸ“Š Status", 
                interactive=False,
                placeholder="Ready to process..."
            )
        
        with gr.Column(scale=1):
            output_image = gr.Image(
                type="pil", 
                label="πŸ–ΌοΈ Processed Image",
                height=400
            )

    # Store click coordinates
    clicked_x = gr.State(None)
    clicked_y = gr.State(None)

    def get_coords(evt: gr.SelectData):
        if evt.index is not None and len(evt.index) == 2:
            return evt.index[0], evt.index[1]
        return None, None

    input_image.select(get_coords, None, [clicked_x, clicked_y])

    process_button.click(
        fn=process_image,
        inputs=[input_image, clicked_x, clicked_y, effect_dropdown],
        outputs=[output_image, status_text]
    )
    
    if face_cascade is not None:
        detect_button.click(
            fn=detect_faces_only,
            inputs=[input_image],
            outputs=[output_image, status_text]
        )

    gr.HTML("""
    <div style='text-align: center; margin-top: 20px; color: #6c757d;'>
        <p>Built with ❀️ for CPU-friendly image processing | Powered by OpenCV & Gradio</p>
    </div>
    """)

if __name__ == "__main__":
    demo.launch()