import cv2
import numpy as np
import os
import datetime
import csv
import requests

# Load Haar Cascades for face and eye detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_eye.xml")

try:
    recognizer = cv2.face.LBPHFaceRecognizer_create()
except AttributeError:
    print("Error: OpenCV contrib package is missing. Install with: pip install opencv-contrib-python")
    exit()

# File Paths
API_ENDPOINT = "http://community.scrubbed.net:8080/api/checkin"
FACE_DIR = "registered_faces"
LOG_FILE = "attendance_log.csv"
MODEL_FILE = "face_recognizer.yml"
USER_ID_FILE = "last_user_id.txt"

selected_camera_index = 0

def select_camera():
    """Lists available cameras and allows the user to select one."""
    global selected_camera_index
    print("Detecting available cameras...")
    available_cameras = []
    for i in range(10):
        cap = cv2.VideoCapture(i)
        if cap.isOpened():
            available_cameras.append(i)
            cap.release()
    
    if not available_cameras:
        print("No cameras found.")
        return

    print("Available cameras:")
    for index in available_cameras:
        print(f"  {index}: Camera {index}")

    while True:
        try:
            choice = input(f"Select a camera (current: {selected_camera_index}): ")
            if choice == "":
                break
            choice = int(choice)
            if choice in available_cameras:
                selected_camera_index = choice
                print(f"Camera {selected_camera_index} selected.")
                break
            else:
                print("Invalid camera index.")
        except ValueError:
            print("Please enter a number.")


def preprocess_face(face_img):
    """Enhanced preprocessing for better recognition accuracy in real-world conditions."""
    # Resize to consistent size (increased from 100x100 to 200x200 for better detail)
    face_img = cv2.resize(face_img, (200, 200))
    
    # Apply histogram equalization for better contrast (less aggressive)
    face_img = cv2.equalizeHist(face_img)
    
    # Apply very light Gaussian blur to reduce noise (reduced blur)
    face_img = cv2.GaussianBlur(face_img, (1, 1), 0)
    
    # Apply CLAHE with reduced intensity for better lighting adaptation
    clahe = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(8,8))  # Reduced from 2.0 to 1.5
    face_img = clahe.apply(face_img)
    
    # Normalize pixel values (less aggressive normalization)
    face_img = cv2.normalize(face_img, None, 0, 255, cv2.NORM_MINMAX)
    
    return face_img


def collect_training_data(user_id):
    """Captures face images for training, ensuring variations (with glasses, different angles)."""
    user_folder = os.path.join(FACE_DIR, user_id)
    os.makedirs(user_folder, exist_ok=True)

    cap = cv2.VideoCapture(selected_camera_index)
    count = 0
    print(f"Starting training data collection for User {user_id}")
    print("Please move your head slowly in different directions and angles")
    print("Press 'q' to stop collection early")

    while count < 100:  # Increased from 50 to 100 for better training
        ret, frame = cap.read()
        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(80, 80))

        for (x, y, w, h) in faces:
            face_img = gray[y:y + h, x:x + w]
            
            # Only save if face is large enough and well-positioned
            if w > 80 and h > 80:
                # Add visual feedback
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                cv2.putText(frame, f"Capturing: {count}/100", (x, y - 10), 
                           cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
                
                # Save image with better naming
                img_path = os.path.join(user_folder, f"training_{count:03d}.jpg")
                cv2.imwrite(img_path, face_img)
                count += 1
                
                # Small delay to avoid duplicate frames
                cv2.waitKey(100)
                
                if count >= 100:
                    break

        cv2.imshow("Training Data Collection", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
    print(f"Training data collection complete. Captured {count} images for User {user_id}")
    return count


def train_face_recognizer():
    """Trains LBPH face recognizer with additional preprocessing for better accuracy."""
    faces, labels = [], []

    for user_id_str in os.listdir(FACE_DIR):
        user_folder = os.path.join(FACE_DIR, user_id_str)
        if os.path.isdir(user_folder):
            try:
                user_id = int(user_id_str)
            except ValueError:
                print(f"Skipping non-integer directory: {user_id_str}")
                continue

            # Check for multi-angle structure (subdirectories)
            angle_dirs = ['front', 'left', 'right', 'up', 'down']
            has_angle_structure = any(os.path.isdir(os.path.join(user_folder, angle)) for angle in angle_dirs)
            
            if has_angle_structure:
                # Multi-angle structure - get images from subdirectories
                for angle in angle_dirs:
                    angle_folder = os.path.join(user_folder, angle)
                    if os.path.exists(angle_folder):
                        for filename in os.listdir(angle_folder):
                            if filename.endswith('.jpg'):
                                img_path = os.path.join(angle_folder, filename)
                                img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
                                if img is None:
                                    continue
                                img = preprocess_face(img)
                                faces.append(img)
                                labels.append(user_id)
            else:
                # Old structure - get images directly from user folder
                for filename in os.listdir(user_folder):
                    if filename.endswith('.jpg'):
                        img_path = os.path.join(user_folder, filename)
                        img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
                        if img is None:
                            continue
                        img = preprocess_face(img)
                        faces.append(img)
                        labels.append(user_id)

    if not faces:
        print("No training data found.")
        return

    recognizer.train(faces, np.array(labels))
    recognizer.save(MODEL_FILE)
    print(f"Training complete with {len(faces)} images from {len(set(labels))} users.")


def recognize_user():
    """Recognizes a user even if wearing glasses or has slight appearance changes."""
    if not os.path.exists(MODEL_FILE):
        print("Error: Model file not found. Train the model first.")
        return

    recognizer.read(MODEL_FILE)
    cap = cv2.VideoCapture(selected_camera_index)

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=6, minSize=(50, 50))

        for (x, y, w, h) in faces:
            face_img = gray[y:y + h, x:x + w]
            face_img = preprocess_face(face_img)
            user_id, confidence = recognizer.predict(face_img)
            if confidence < 80:
                text = f"User: {user_id} (Acc: {round(confidence, 2)}%)"
                log_attendance(user_id)
                color = (0, 255, 0)
            else:
                text = "Unknown"
                color = (0, 0, 255)
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            cv2.putText(frame, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2, cv2.LINE_AA)

        cv2.imshow("Face Recognition", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()


def log_attendance(user_id):
    """Logs attendance while ensuring a single check-in and check-out per day."""
    now = datetime.datetime.now()
    timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
    today = now.strftime("%Y-%m-%d")

    if not os.path.exists(LOG_FILE):
        with open(LOG_FILE, "w", newline="") as file:
            writer = csv.writer(file)
            writer.writerow(["Timestamp", "User ID", "Status"])

    with open(LOG_FILE, "a", newline="") as file:
        writer = csv.writer(file)
        writer.writerow([timestamp, user_id, "Check-in"])
        send_post_request(user_id)


def send_post_request(user_id):
    """Sends attendance to API."""
    payload = {"user_id": user_id}
    headers = {"Content-Type": "application/json"}

    try:
        response = requests.post(API_ENDPOINT, json=payload, headers=headers)
        if response.status_code == 200:
            print(f"✅ Successfully sent attendance for User {user_id} to API., Response: {response.text}")
        else:
            print(f"⚠️ Failed. Status: {response.status_code}, Response: {response.text}")
    except requests.exceptions.RequestException as e:
        print(f"❌ Error sending attendance: {e}")

if __name__ == "__main__":
    while True:
        print("\n1: Register User\n2: Train Model\n3: Recognize Face\n4: Select Camera\n5: Exit")
        choice = input("Choose an option: ")
        if choice == "1":
            collect_training_data(input("Enter user ID: "))
        elif choice == "2":
            train_face_recognizer()
        elif choice == "3":
            recognize_user()
        elif choice == "4":
            select_camera()
        elif choice == "5":
            break
        else:
            print("Invalid choice. Try again.")