Skip to content
Snippets Groups Projects
Commit 61c6f9d3 authored by Thomas Tran's avatar Thomas Tran
Browse files

Update 2 files

- /eye/actual.py
- /src/components/Video.jsx
parent 93c629d5
Branches tst58
No related tags found
No related merge requests found
from flask import Flask, request, jsonify
from flask_cors import CORS # Import CORS
import cv2 import cv2
import numpy as np
from gaze_tracking import GazeTracking from gaze_tracking import GazeTracking
from ultralytics import YOLO from ultralytics import YOLO
import base64
from io import BytesIO
# Initialize the gaze tracking model app = Flask(__name__)
gaze = GazeTracking()
# Initialize the YOLO model # Enable CORS for React app running on localhost:5173
model = YOLO('yolov8n.pt') # 'n' = nano (small and fast) # Allow all origins for the '/process_frame' route
CORS(app, resources={r"/process_frame": {"origins": "*", "methods": ["POST"]}})
# Start the webcam # Initialize models
webcam = cv2.VideoCapture(0) gaze = GazeTracking()
model = YOLO('yolov8n.pt') # Initialize YOLO model
while True: @app.route('/process_frame', methods=['POST'])
# Read a frame from the webcam def process_frame():
ret, frame = webcam.read() try:
if not ret: # Get the frame data from the POST request
break data = request.get_json()
frame_data = data['frame']
# Initialize distraction and focus variables # Decode the base64 frame
distracted = False img_data = base64.b64decode(frame_data.split(',')[1])
unfocused = False np_arr = np.frombuffer(img_data, np.uint8)
frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
# Process the frame with the gaze tracking model # Process the frame with the gaze tracking model
gaze.refresh(frame) gaze.refresh(frame)
gaze_frame = gaze.annotated_frame() gaze_frame = gaze.annotated_frame()
gaze_text = ""
if gaze.is_right(): # Process the frame with the YOLO model (detecting objects like phones)
gaze_text = "Looking right"
elif gaze.is_left():
gaze_text = "Looking left"
elif gaze.is_center():
gaze_text = "Looking center"
elif gaze.is_up():
gaze_text = "Looking up"
elif gaze.is_down():
gaze_text = "Looking down"
cv2.putText(gaze_frame, gaze_text, (60, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)
# Process the frame with the YOLO model
phone_detected = False phone_detected = False
results = model.predict(source=frame, save=False, verbose=False, conf=0.5) results = model.predict(source=frame, save=False, verbose=False, conf=0.5)
# Detect phones in the frame
for r in results: for r in results:
for box in r.boxes: for box in r.boxes:
cls_id = int(box.cls[0]) # Class ID (number) cls_id = int(box.cls[0]) # Class ID (number)
...@@ -54,7 +49,8 @@ while True: ...@@ -54,7 +49,8 @@ while True:
cv2.rectangle(gaze_frame, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.rectangle(gaze_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(gaze_frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) cv2.putText(gaze_frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Determine distraction and focus states # Check gaze and determine distraction status
distracted = False
if gaze.is_down(): if gaze.is_down():
if phone_detected: if phone_detected:
distracted = True distracted = True
...@@ -63,17 +59,22 @@ while True: ...@@ -63,17 +59,22 @@ while True:
else: else:
distracted = True distracted = True
# Display distraction and focus states for debugging # Add distraction status text to frame
cv2.putText(gaze_frame, f"Distracted: {distracted}", (60, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2) cv2.putText(gaze_frame, f"Distracted: {distracted}", (60, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# Display the combined frame # Convert the processed frame to base64 for sending back to the frontend
cv2.imshow("Demo", gaze_frame) _, buffer = cv2.imencode('.jpg', gaze_frame)
frame_base64 = base64.b64encode(buffer).decode('utf-8')
# Exit if the ESC key is pressed # Return the processed frame with distraction status
if cv2.waitKey(1) == 27: return jsonify({
break 'status': 'ok',
'frame': f"data:image/jpeg;base64,{frame_base64}",
'distracted': distracted # Include distraction status
})
# Release resources except Exception as e:
webcam.release() return jsonify({'status': 'error', 'message': str(e)}), 500
cv2.destroyAllWindows()
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5001) # Set port to 5001
import { useRef } from "react"; import { useRef, useState } from "react";
function Video() { function Video() {
const videoRef = useRef(null); const videoRef = useRef(null);
const streamRef = useRef(null); const streamRef = useRef(null);
const [distractionStatus, setDistractionStatus] = useState('');
const intervalRef = useRef(null); // Use a ref to persist interval ID across renders
function stopCam() { function stopCam() {
if (intervalRef.current) {
clearInterval(intervalRef.current); // Stop capturing frames
intervalRef.current = null;
}
if (streamRef.current) { if (streamRef.current) {
streamRef.current.getTracks().forEach(track => { streamRef.current.getTracks().forEach(track => track.stop());
track.stop(); streamRef.current = null; // Clean the ref
});
streamRef.current = null //clean the ref
} }
} }
...@@ -21,12 +25,40 @@ function Video() { ...@@ -21,12 +25,40 @@ function Video() {
streamRef.current = stream; streamRef.current = stream;
videoRef.current.srcObject = stream; // Connect the stream to the <video> videoRef.current.srcObject = stream; // Connect the stream to the <video>
videoRef.current.play(); // Start playing the video videoRef.current.play(); // Start playing the video
intervalRef.current = setInterval(captureAndSendFrame, 1000); // Capture every second
} }
}) })
.catch((error) => { .catch((error) => {
console.log("Error:", error); console.log("Error:", error);
}); });
} }
function captureAndSendFrame() {
const canvas = document.createElement('canvas');
const video = videoRef.current;
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
const frame = canvas.toDataURL('image/jpeg');
// Send the frame to the Flask server
fetch('http://localhost:5001/process_frame', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ frame: frame }),
})
.then((response) => response.json())
.then((data) => {
console.log('Received data:', data);
setDistractionStatus(data.distracted ? "Distracted" : "Focused");
})
.catch((error) => {
console.error('Error:', error);
});
}
return ( return (
<> <>
<video <video
...@@ -39,8 +71,9 @@ function Video() { ...@@ -39,8 +71,9 @@ function Video() {
></video> ></video>
<button onClick={startCam}>Start Camera</button> <button onClick={startCam}>Start Camera</button>
<button onClick={stopCam}>Stop Camera</button> <button onClick={stopCam}>Stop Camera</button>
<p>Status: {distractionStatus}</p>
</> </>
); );
} }
export default Video; export default Video;
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment