Spaces:
Sleeping
Sleeping
initial commit
Browse files- .gitignore +1 -0
- app.py +44 -0
- model.pkl +3 -0
- requirements.txt +2 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.venv/**
|
app.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import fastai.vision.all as fv
|
| 3 |
+
from PIL import Image, ImageDraw
|
| 4 |
+
import skimage
|
| 5 |
+
|
| 6 |
+
learn = fv.load_learner("model.pkl")
|
| 7 |
+
|
| 8 |
+
def call(image, step_size:int=100, blocks:int=4):
|
| 9 |
+
# print(image)
|
| 10 |
+
original_image = Image.fromarray(image).resize((400,400))
|
| 11 |
+
|
| 12 |
+
image = Image.new(mode='RGB', size=(step_size*blocks, step_size*blocks)) #, color=255
|
| 13 |
+
|
| 14 |
+
draw = ImageDraw.Draw(image)
|
| 15 |
+
for (x,y) in [ (x,y) for x in range(0, blocks * step_size, step_size) for y in range(0, blocks * step_size, step_size)]:
|
| 16 |
+
cropped_image = original_image.crop((x, y, x+step_size, y+step_size))
|
| 17 |
+
image.paste(cropped_image, (x,y))
|
| 18 |
+
prediction = learn.predict(cropped_image)
|
| 19 |
+
print(prediction)
|
| 20 |
+
marker = f"{prediction[0][0].upper()} {prediction[2][prediction[1].item()].item()*100:.0f}"
|
| 21 |
+
position = (x+10, y+10)
|
| 22 |
+
|
| 23 |
+
bbox = draw.textbbox(position, marker, font=None)
|
| 24 |
+
draw.rectangle(bbox, fill="white")
|
| 25 |
+
draw.text(position, marker, font=None, fill="black")
|
| 26 |
+
|
| 27 |
+
draw = ImageDraw.Draw(image)
|
| 28 |
+
for x in range(0, blocks * step_size, step_size):
|
| 29 |
+
# vertical line
|
| 30 |
+
line = ((x, 0), (x, blocks * step_size))
|
| 31 |
+
draw.line(line, fill=128, width=3)
|
| 32 |
+
|
| 33 |
+
# horizontal line
|
| 34 |
+
line = ((0, x), (blocks * step_size, x))
|
| 35 |
+
draw.line(line, fill=128, width=3)
|
| 36 |
+
|
| 37 |
+
return image
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
title = "Traffic Light Detector"
|
| 41 |
+
description = "Experiment traffic light detection to evaluate the value of captcha security controls"
|
| 42 |
+
|
| 43 |
+
iface = gr.Interface(fn=call, inputs="image", outputs="image", title=title, description=description)
|
| 44 |
+
iface.launch()
|
model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e34e8f433c7ea43cb69797f534eb30f4c73f29571620742c3cad7abcd32e0100
|
| 3 |
+
size 46956998
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastai
|
| 2 |
+
scikit-image
|