0% found this document useful (0 votes)
2 views

code try1

The document outlines a web application for real-time emotion detection using Flask, OpenCV, and DeepFace. It includes HTML for the user interface, CSS for styling, and Python scripts for video processing and emotion analysis. The application captures video, detects faces, analyzes emotions, and displays results on the web page.

Uploaded by

ammartajudin1
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views

code try1

The document outlines a web application for real-time emotion detection using Flask, OpenCV, and DeepFace. It includes HTML for the user interface, CSS for styling, and Python scripts for video processing and emotion analysis. The application captures video, detects faces, analyzes emotions, and displays results on the web page.

Uploaded by

ammartajudin1
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 4

css:

body {
font-family: Arial, sans-serif;
display: flex;
flex-direction: column;
align-items: center;
background-color: #f0f0f0;
}

h1 {
margin-top: 20px;
}

#videoContainer {
margin-top: 20px;
border: 2px solid #000;
width: 640px; /* Adjust width as needed */
}

img {
width: 100%; /* Make the video frame responsive */
}

html:

<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
<title>Real-time Emotion Detection</title>
</head>
<body>
<h1>Real-time Emotion Detection</h1>
<div id="videoContainer">
<img id="videoFeed" src="" alt="Video Feed" style="display:none;">
</div>
<button id="detectEmotionBtn">Detect Emotion</button>

<script>
document.getElementById("detectEmotionBtn").onclick = function() {
startDetection();
};

function startDetection() {
const videoFeed = document.getElementById("videoFeed");
videoFeed.style.display = "block"; // Show the video feed
videoFeed.src = "{{ url_for('video_feed') }}"; // Start the video feed
console.log("Emotion detection started!");
}
</script>
</body>
</html>

app.py:

import cv2
from flask import Flask, render_template, Response
from deepface import DeepFace

app = Flask(__name__)

# Load face cascade classifier


face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
'haarcascade_frontalface_default.xml')

# Initialize video capture


cap = cv2.VideoCapture(0)

def generate_frames():
while True:
success, frame = cap.read()
if not success:
break

# Convert frame to grayscale


gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Detect faces in the frame


faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))

for (x, y, w, h) in faces:


# Extract the face ROI (Region of Interest)
face_roi = frame[y:y + h, x:x + w]

try:
# Perform emotion analysis on the face ROI
result = DeepFace.analyze(face_roi, actions=['emotion'],
enforce_detection=True)
emotion = result[0]['dominant_emotion']

# Draw rectangle around face and label with predicted emotion


cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.9, (0, 0, 255), 2)

except Exception as e:
print(f"Error in emotion detection: {e}")

# Encode the frame in JPEG format


ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()

yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

@app.route('/')
def index():
return render_template('index.html')

@app.route('/video_feed')
def video_feed():
return Response(generate_frames(), mimetype='multipart/x-mixed-replace;
boundary=frame')
if __name__ == '__main__':
app.run(debug=True, threaded=True)

# Note: Cap release should be handled appropriately (e.g., when the server shuts
down)

emotion.py:

import cv2
from deepface import DeepFace
import time

# Load face cascade classifier


face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
'haarcascade_frontalface_default.xml')

# Start capturing video


cap = cv2.VideoCapture(0)

# Set time buffer (e.g., 5 seconds)


time_buffer = 5

# Set threshold (e.g., 70%)


threshold = 0.7

# Initialize emotion votes


emotion_votes = {}

# Initialize start time


start_time = time.time()

# Check if the video capture is opened successfully


if not cap.isOpened():
print("Error: Could not open video capture.")
exit()

while True:
# Capture frame-by-frame
ret, frame = cap.read()

if not ret:
print("Error: Could not read frame.")
break

# Convert frame to grayscale


gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# Detect faces in the frame


faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))

if not faces:
continue

# Extract the face ROI (Region of Interest)


x, y, w, h = faces[0]
face_roi = frame[y:y + h, x:x + w] # Use original frame for emotion analysis
try:
# Perform emotion analysis on the face ROI
result = DeepFace.analyze(face_roi, actions=['emotion'],
enforce_detection=True)

# Determine the dominant emotion


emotion = result['dominant_emotion']

# Update emotion votes


if emotion in emotion_votes:
emotion_votes[emotion] += 1
else:
emotion_votes[emotion] = 1

# Draw rectangle around face and label with predicted emotion


cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,
0, 255), 2)

except Exception as e:
print(f"Error in emotion detection: {e}")

# Display the resulting frame


cv2.imshow('Real-time Emotion Detection', frame)

# Check if time buffer has exceeded


if time.time() - start_time >= time_buffer:
# Calculate final emotion
final_emotion = max(emotion_votes, key=emotion_votes.get)

# Check if final emotion exceeds threshold


if emotion_votes[final_emotion] / sum(emotion_votes.values()) >= threshold:
print(f"Final emotion detected: {final_emotion}")
# Perform Quranic work for the final emotion detected
else:
print("Insufficient confidence in emotion detection.")

# Reset emotion votes and start time


emotion_votes = {}
start_time = time.time()

# Press 'q' to exit


if cv2.waitKey(1) & 0xFF == ord('q'):
break

# Release the capture and close all windows


cap.release()
cv2.destroyAllWindows()

You might also like