Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	
		vivek123eq2858
		
	commited on
		
		
					Commit 
							
							·
						
						f609dda
	
1
								Parent(s):
							
							4e32e3f
								
Initial commit: Upload Gesture AI to Hugging Face
Browse files- app.py +54 -0
 - requirements.txt +5 -0
 
    	
        app.py
    ADDED
    
    | 
         @@ -0,0 +1,54 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import gradio as gr
         
     | 
| 2 | 
         
            +
            import cv2
         
     | 
| 3 | 
         
            +
            import numpy as np
         
     | 
| 4 | 
         
            +
            from tensorflow.keras.models import load_model
         
     | 
| 5 | 
         
            +
            import mediapipe as mp
         
     | 
| 6 | 
         
            +
             
     | 
| 7 | 
         
            +
            model = load_model('gesture_model.h5')
         
     | 
| 8 | 
         
            +
            actions = ['I', 'help', 'need', 'sleep', 'angry', 'urgent']
         
     | 
| 9 | 
         
            +
            threshold = 0.8
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
            mp_holistic = mp.solutions.holistic
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            def extract_keypoints(results):
         
     | 
| 14 | 
         
            +
                pose = np.array([[res.x, res.y, res.z] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33 * 3)
         
     | 
| 15 | 
         
            +
                lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21 * 3)
         
     | 
| 16 | 
         
            +
                rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21 * 3)
         
     | 
| 17 | 
         
            +
                return np.concatenate([pose, lh, rh])
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            def predict_gesture(video_path):
         
     | 
| 20 | 
         
            +
                cap = cv2.VideoCapture(video_path)
         
     | 
| 21 | 
         
            +
                sequence = []
         
     | 
| 22 | 
         
            +
                sentence = []
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
                with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
         
     | 
| 25 | 
         
            +
                    while cap.isOpened():
         
     | 
| 26 | 
         
            +
                        ret, frame = cap.read()
         
     | 
| 27 | 
         
            +
                        if not ret:
         
     | 
| 28 | 
         
            +
                            break
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
                        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
         
     | 
| 31 | 
         
            +
                        results = holistic.process(image)
         
     | 
| 32 | 
         
            +
                        keypoints = extract_keypoints(results)
         
     | 
| 33 | 
         
            +
                        sequence.append(keypoints)
         
     | 
| 34 | 
         
            +
                        sequence = sequence[-30:]
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
                        if len(sequence) == 30:
         
     | 
| 37 | 
         
            +
                            res = model.predict(np.expand_dims(sequence, axis=0))[0]
         
     | 
| 38 | 
         
            +
                            if res[np.argmax(res)] > threshold:
         
     | 
| 39 | 
         
            +
                                action = actions[np.argmax(res)]
         
     | 
| 40 | 
         
            +
                                if not sentence or sentence[-1] != action:
         
     | 
| 41 | 
         
            +
                                    sentence.append(action)
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                    cap.release()
         
     | 
| 44 | 
         
            +
                    return ' '.join(sentence)
         
     | 
| 45 | 
         
            +
             
     | 
| 46 | 
         
            +
            iface = gr.Interface(
         
     | 
| 47 | 
         
            +
                fn=predict_gesture,
         
     | 
| 48 | 
         
            +
                inputs=gr.Video(label="Upload your gesture video"),
         
     | 
| 49 | 
         
            +
                outputs="text",
         
     | 
| 50 | 
         
            +
                title="Gesture Recognition AI",
         
     | 
| 51 | 
         
            +
                description="Upload a short gesture video (e.g., showing 'I need help') and get the recognized sentence."
         
     | 
| 52 | 
         
            +
            )
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            iface.launch()
         
     | 
    	
        requirements.txt
    ADDED
    
    | 
         @@ -0,0 +1,5 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            tensorflow
         
     | 
| 2 | 
         
            +
            opencv-python
         
     | 
| 3 | 
         
            +
            mediapipe
         
     | 
| 4 | 
         
            +
            gradio
         
     | 
| 5 | 
         
            +
            numpy
         
     |