Загрузка данных


import cv2
import dlib
import numpy as np

def get_gaze_ratios(eye_points, gray_img):
    # Границы глаза
    min_x, max_x = np.min(eye_points[:, 0]), np.max(eye_points[:, 0])
    min_y, max_y = np.min(eye_points[:, 1]), np.max(eye_points[:, 1])
    
    eye_crop = gray_img[min_y:max_y, min_x:max_x]
    _, thresh = cv2.threshold(eye_crop, 65, 255, cv2.THRESH_BINARY)
    
    h, w = thresh.shape
    if h == 0 or w == 0:
        return 0.5, 0.5
    
    # Разделение для осей X (лево/право) и Y (верх/низ)
    left_side = thresh[:, :w//2]
    right_side = thresh[:, w//2:]
    top_side = thresh[:h//2, :]
    bottom_side = thresh[h//2:, :]
    
    # Считаем темные пиксели (зрачок)
    left_blacks = np.sum(left_side == 0)
    right_blacks = np.sum(right_side == 0)
    top_blacks = np.sum(top_side == 0)
    bottom_blacks = np.sum(bottom_side == 0)
    
    h_total = left_blacks + right_blacks
    v_total = top_blacks + bottom_blacks
    
    # Вычисляем горизонтальный и вертикальный коэффициенты
    h_ratio = left_blacks / h_total if h_total > 0 else 0.5
    v_ratio = top_blacks / v_total if v_total > 0 else 0.5
    
    return h_ratio, v_ratio

def detect_gaze_direction(img_path, dat_path="shape_predictor_68_face_landmarks.dat"):
    face_scanner = dlib.get_frontal_face_detector()
    landmark_finder = dlib.shape_predictor(dat_path)

    img = cv2.imread(img_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_scanner(gray)
    
    for face in faces:
        landmarks = landmark_finder(gray, face)
        left_eye = np.array([[landmarks.part(i).x, landmarks.part(i).y] for i in range(36, 42)])
        right_eye = np.array([[landmarks.part(i).x, landmarks.part(i).y] for i in range(42, 48)])
        
        h_ratio_left, v_ratio_left = get_gaze_ratios(left_eye, gray)
        h_ratio_right, v_ratio_right = get_gaze_ratios(right_eye, gray)
        
        # Усредняем значения по обоим глазам
        gaze_h = (h_ratio_left + h_ratio_right) / 2
        gaze_v = (v_ratio_left + v_ratio_right) / 2

        # Определяем итоговое направление
        if gaze_h < 0.4:
            direction = "Direction: RIGHT"
        elif gaze_h > 0.6:
            direction = "Direction: LEFT"
        elif gaze_v > 0.65:
            direction = "Direction: UP"
        elif gaze_v < 0.35:
            direction = "Direction: DOWN"
        else:
            direction = "Direction: CENTER"

        cv2.polylines(img,[left_eye], True, (0, 255, 255), 1)
        cv2.polylines(img, [right_eye], True, (0, 255, 255), 1)
        cv2.putText(img, direction, (30, 50), cv2.FONT_HERSHEY_DUPLEX, 1.2, (0, 255, 0), 2)
        
    cv2.imshow("Gaze Tracker", img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# Запусти этот скрипт на 4 разных фото (Вверх, Вниз, Лево, Право) и сделай новые скриншоты для отчета
detect_gaze_direction('my_photo.jpg')