import base64
import csv
import datetime
import calendar
import json
import os
import secrets
import smtplib
import sys
import time
import urllib3
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.utils import formataddr
import pytz
import jwt
from functools import wraps

# Add the project root to the Python path
# This allows us to run the server directly while maintaining package imports
current_dir = os.path.dirname(os.path.abspath(__file__))
# Assuming 'franai' is the package directory, its parent is the project root.
project_root = os.path.dirname(current_dir)
if project_root not in sys.path:
    sys.path.append(project_root)

import cv2
import mysql.connector
import numpy as np
import requests
from flask import Flask, jsonify, make_response, request
from flask_cors import CORS

from franai.controllers.access_control import role_required
from franai.controllers.auth_controller import auth_bp
from franai.livestream import preprocess_face, train_face_recognizer
from franai.models.attendance_model import AttendanceLog
from franai.models.location_model import Location
from franai.models.registered_user_model import RegisteredUser
from franai.models.role_model import Role
from franai.models.staff_model import Staff
from franai.models.team_model import Team
from franai.models.token_model import RegistrationToken
from franai.services.config import Config
from services.spoof_detection_service import check_liveness

# Ensure the tables exist on startup
RegistrationToken.create_table_if_not_exists()
Role.create_tables_if_not_exists()
RegisteredUser.create_table_if_not_exists()
AttendanceLog.create_table_if_not_exists()
Location.create_table_if_not_exists()

def get_team_gid(pid):
    """Connects to the 'intra' database to get the team GID for a given PID."""
    team_gid = None
    # Use a separate connection for the 'intra' database
    try:
        conn = mysql.connector.connect(
            host=Config.DB_HOST,
            port=Config.DB_PORT,
            user=Config.DB_USER,
            password=Config.DB_PASSWORD,
            database='intra'  # Explicitly connect to the 'intra' database
        )
        if conn.is_connected():
            cursor = conn.cursor(dictionary=True)
            query = """
            SELECT tg.group_id 
            FROM tag_group tg
            JOIN user_group ug ON tg.group_id = ug.GID
            WHERE tg.user_id = %s 
            AND tg.workspace_default = 'Y'
            AND ug.workspace_status = 1
            LIMIT 1
            """
            cursor.execute(query, (pid,))
            result = cursor.fetchone()
            if result:
                team_gid = result['group_id']
    except mysql.connector.Error as e:
        print(f"Error connecting to 'intra' database or fetching team GID: {e}")
        return None # Return None on error
    finally:
        if 'conn' in locals() and conn.is_connected():
            cursor.close()
            conn.close()
    return team_gid

# InsightFace imports with error handling
try:
    import insightface
    import onnxruntime
    import pickle
    INSIGHTFACE_AVAILABLE = True
    print("✅ InsightFace libraries loaded successfully")
except ImportError as e:
    INSIGHTFACE_AVAILABLE = False
    print(f"⚠️ InsightFace not available: {e}")
    print("📝 System will continue with OpenCV only")

from franai.models.staff_model import Staff
from franai.models.team_model import Team

app = Flask(__name__)
# Register Blueprints
app.register_blueprint(auth_bp, url_prefix='/auth')

# --- SMTP Email Configuration ---
# IMPORTANT: For production, use environment variables.
app.config['SMTP_SERVER'] = os.environ.get('SMTP_SERVER', 'smtp.example.com')
app.config['SMTP_PORT'] = int(os.environ.get('SMTP_PORT', 587))
app.config['SMTP_USERNAME'] = os.environ.get('SMTP_USERNAME', 'user@example.com')
app.config['SMTP_PASSWORD'] = os.environ.get('SMTP_PASSWORD', 'password')
app.config['SENDER_EMAIL'] = os.environ.get('SENDER_EMAIL', 'noreply@example.com')
app.config['FRONTEND_URL'] = os.environ.get('FRONTEND_URL', 'http://localhost:5173')


@app.route('/api/teams', methods=['GET'])
def get_teams():
    """Get a list of all active teams."""
    teams = Team.get_all_teams()
    if teams is None:
        return jsonify({'error': 'An error occurred while fetching teams.'}), 500
    return jsonify(teams)


@app.route('/api/locations', methods=['GET'])
def get_locations():
    """Get a list of all locations."""
    locations = Location.get_all_locations()
    if locations is None:
        return jsonify({'error': 'An error occurred while fetching locations.'}), 500
    return jsonify(locations)


@app.route('/api/staff/search', methods=['GET'])
def search_staff():
    """Search for staff by name or get a default list, optionally filtered by team."""
    search_term = request.args.get('name', '')
    team_gid = request.args.get('team_gid', None)
    
    if not search_term:
        # If no search term, get a default list of users
        users = Staff.get_all_staff(team_gid=team_gid)
    else:
        # Otherwise, search by the provided name
        users = Staff.search_by_name(search_term, team_gid=team_gid)

    if users is None:
        # search_by_name can return None on error, get_all_staff returns []
        return jsonify({'error': 'An error occurred while fetching users.'}), 500
    
    return jsonify(users)

@app.route('/api/tokens/active', methods=['GET'])
@role_required('Super Admin')
def get_active_tokens(current_user_pid):
    """Get a list of all active (non-expired) registration tokens."""
    tokens = RegistrationToken.get_active_tokens()
    if tokens is None:
        return jsonify({'error': 'An error occurred while fetching active tokens.'}), 500
    
    # Convert datetime objects to ISO 8601 string format for JSON serialization
    for token in tokens:
        if isinstance(token.get('expires_at'), datetime.datetime):
            token['expires_at'] = token['expires_at'].isoformat()
            
    return jsonify(tokens)

@app.route('/api/tokens', methods=['GET'])
@role_required('Super Admin')
def get_all_tokens(current_user_pid):
    """Get a list of all registration tokens, including expired ones."""
    tokens = RegistrationToken.get_all_tokens()
    if tokens is None:
        return jsonify({'error': 'An error occurred while fetching all tokens.'}), 500
    
    # Convert datetime objects to ISO 8601 string format for JSON serialization
    for token in tokens:
        if isinstance(token.get('expires_at'), datetime.datetime):
            token['expires_at'] = token['expires_at'].isoformat()
            
    return jsonify(tokens)

@app.route('/admin/dashboard')
@role_required('Admin', 'Super Admin')
def admin_dashboard(current_user_pid):
    """A sample protected route that requires Admin or Super Admin role."""
    return jsonify(message=f"Welcome to the Admin Dashboard, user PID: {current_user_pid}!")

@app.route('/admin/generate-and-email-token', methods=['POST'])
@role_required('Super Admin') # Secure this endpoint
def generate_and_email_token(current_user_pid):
    """Generates a one-time code and emails a registration link to the staff member."""
    data = request.get_json()
    pid = data.get('pid')
    if not pid:
        return jsonify({'error': 'PID is required'}), 400

    # 1. Fetch employee info to get name and email
    employee_info = Staff.get_by_pid(pid)
    if not employee_info or not employee_info.get('company_email'):
        return jsonify({'error': f'Could not find a valid email for User ID {pid}.'}), 404
    
    employee_name = f"{employee_info.get('first_name', '')} {employee_info.get('last_name', '')}".strip()
    employee_email = employee_info.get('company_email')

    # 2. Get the team GID
    team_gid = get_team_gid(pid)

    # 3. Generate and store a secure token in the database
    code = RegistrationToken.create(pid, team_gid)
    if not code:
        return jsonify({'error': 'Failed to generate a secure token.'}), 500
    
    # 4. Construct the registration link
    registration_link = f"{app.config['FRONTEND_URL']}/self-register?code={code}"

    # 5. Send the email
    try:
        msg = MIMEMultipart()
        msg['From'] = formataddr(('Fran AI', app.config['SENDER_EMAIL']))
        msg['To'] = employee_email
        msg['Subject'] = "Your Secure Face Recognition Registration Link"

        body = f"""
        Hello {employee_name},

        Please use the following secure link to register for the Face Recognition Attendance System.

        Registration Link: {registration_link}

        Please note: This link is valid for 5 minutes and can only be used once.

        If you did not request this, please ignore this email.

        Thank you,
        Fran AI
        """
        bcc_email = 'christian.manuel@scrubbed.net'
        msg.attach(MIMEText(body, 'plain'))

        server = smtplib.SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT'])
        server.starttls()
        server.login(app.config['SMTP_USERNAME'], app.config['SMTP_PASSWORD'])
        text = msg.as_string()
        recipients = [employee_email, bcc_email]
        server.sendmail(app.config['SENDER_EMAIL'], recipients, text)
        server.quit()

        print(f"Successfully sent registration email to {employee_email} for PID {pid}")
        return jsonify({'success': True, 'message': f'Registration link sent to {employee_name} ({employee_email}).'})

    except Exception as e:
        print(f"ERROR: Failed to send email: {e}")
        # Clean up the generated code if email fails
        RegistrationToken.delete(code) # Clean up from DB
        return jsonify({'error': 'Failed to send registration email. Please check server configuration.'}), 500

@app.route('/admin/send-token-email', methods=['POST'])
def send_token_email():
    """
    Generates a token and emails it to the specified user's official email.
    Requires a valid authentication token.
    """
    # 1. Check for a valid authentication token
    token = request.headers.get('x-access-token')
    if not token:
        return jsonify({'error': 'Token is missing'}), 401

    try:
        # Decode the token to verify authentication, but we won't use the PID from it.
        jwt.decode(token, Config.JWT_SECRET_KEY, algorithms=["HS256"])
    except jwt.ExpiredSignatureError:
        return jsonify({'error': 'Token has expired'}), 401
    except jwt.InvalidTokenError:
        return jsonify({'error': 'Invalid token'}), 401

    # 2. Get the PID from the request payload
    data = request.get_json()
    pid = data.get('pid')

    if not pid:
        return jsonify({'error': 'PID is required in the payload'}), 400

    employee_info = Staff.get_by_pid(pid)
    if not employee_info or not employee_info.get('company_email'):
        return jsonify({'error': f'Could not find a valid email for User ID {pid}.'}), 404
    
    employee_name = f"{employee_info.get('first_name', '')} {employee_info.get('last_name', '')}".strip()
    employee_email = employee_info.get('company_email')
    team_gid = get_team_gid(pid)
    code = RegistrationToken.create(pid, team_gid)
    if not code:
        return jsonify({'error': 'Failed to generate a secure token.'}), 500
    
    registration_link = f"{app.config['FRONTEND_URL']}/self-register?code={code}"

    token_data = RegistrationToken.find_by_code(code)
    if not token_data:
        # This is an unlikely but important consistency check
        RegistrationToken.delete(code) # Clean up orphaned token
        return jsonify({'success': False, 'error': 'Failed to retrieve token details after creation.'}), 500

    try:
        # Create the root message and set the headers
        msg = MIMEMultipart('related')
        msg['From'] = formataddr(('Fran AI', app.config['SENDER_EMAIL']))
        msg['To'] = employee_email
        msg['Subject'] = "Your Secure Face Recognition Registration Link"

        # Create the HTML part of the message
        html_body = f"""
        <!DOCTYPE html>
        <html lang="en">
        <head>
            <meta charset="UTF-8">
            <meta name="viewport" content="width=device-width, initial-scale=1.0">
            <title>Secure Registration Link</title>
        </head>
        <body style="font-family: 'Segoe UI', Arial, sans-serif; margin: 0; padding: 30px; background-color: #f5f7fa; color: #333;">
                
            <!-- Logo -->
            <div style="text-align: center; margin-bottom: 30px; margin-top: 30px;">
                <img src="cid:franlogo" alt="Fran Logo" style="height: 50px;">
            </div>
            
            <div style="max-width: 600px; margin: auto; background-color: #ffffff; border-radius: 10px; padding: 30px; box-shadow: 0 2px 8px rgba(0,0,0,0.05);">

                <!-- Message Content -->
                <p style="font-size: 16px; font-weight: 600;">Hello {employee_name},</p>

                <p style="font-size: 16px;">
                    Your account for the Face Recognition Attendance System is ready. To activate it, please complete a brief and secure registration by clicking the button below.
                </p>

                <!-- CTA Button -->
                <div style="text-align: center; margin: 30px 0;">
                    <a href="{registration_link}" 
                    style="background-color: #007fae; color: #ffffff; padding: 15px 25px; text-decoration: none; border-radius: 6px; font-size: 16px; font-weight: bold;">
                        Register Now
                    </a>
                </div>

                <p style="font-size: 16px;">If you did not request this, please disregard this email.</p>

                <hr style="border: none; border-top: 1px solid #eee; margin-top: 30px;">

                <p style="font-size: 14px; color: #888; text-align: left;">
                    Thank you,<br>The AI Team
                </p>
            </div>
        </body>
        </html>
        """
        msg.attach(MIMEText(html_body, 'html'))

        # Embed the logo
        try:
            logo_path = os.path.join(current_dir, 'assets', 'images', 'fran-logo.png')
            with open(logo_path, 'rb') as f:
                img = MIMEImage(f.read())
                img.add_header('Content-ID', '<franlogo>')
                msg.attach(img)
        except FileNotFoundError:
            print("WARNING: fran-logo.png not found. Email will be sent without the logo.")

        # Send the email
        bcc_email = 'christian.manuel@scrubbed.net'
        server = smtplib.SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT'])
        server.starttls()
        server.login(app.config['SMTP_USERNAME'], app.config['SMTP_PASSWORD'])
        recipients = [employee_email, bcc_email]
        server.sendmail(app.config['SENDER_EMAIL'], recipients, msg.as_string())
        server.quit()
        
        # Mark the token as sent in the database
        RegistrationToken.mark_as_sent(code)
        
        return jsonify({'success': True, 'message': f'Registration link sent to {employee_name} ({employee_email}).'})
    except Exception as e:
        print(f"ERROR: Failed to send email: {e}")
        RegistrationToken.delete(code)
        return jsonify({'error': 'Failed to send registration email.'}), 500

@app.route('/admin/send-token-email-team', methods=['POST'])
@role_required('Super Admin')
def send_token_email_team(current_user_pid):
    """Sends registration links to all hired members of a specific team."""
    data = request.get_json()
    team_gid = data.get('team_gid')
    if not team_gid:
        return jsonify({'error': 'Team GID is required'}), 400

    staff_list = Staff.get_all_staff_by_team(team_gid)
    if not staff_list:
        return jsonify({'error': f'No hired staff found for Team GID {team_gid}.'}), 404

    success_count = 0
    failure_count = 0
    skipped_count = 0

    for staff_member in staff_list:
        pid = staff_member.get('pid')
        
        # --- New: Check if user is already registered ---
        if RegisteredUser.is_registered(pid):
            print(f"Skipping already registered user PID: {pid}")
            skipped_count += 1
            continue

        employee_name = f"{staff_member.get('first_name', '')} {staff_member.get('last_name', '')}".strip()
        employee_email = staff_member.get('company_email')

        if not employee_email:
            print(f"Skipping user {pid} due to missing email.")
            failure_count += 1
            continue

        code = RegistrationToken.create(pid, team_gid)
        if not code:
            print(f"Failed to generate token for user {pid}.")
            failure_count += 1
            continue
        
        registration_link = f"{app.config['FRONTEND_URL']}/self-register?code={code}"
        token_data = RegistrationToken.find_by_code(code)

        try:
            # Create the root message and set the headers
            msg = MIMEMultipart('related')
            msg['From'] = formataddr(('Fran AI', app.config['SENDER_EMAIL']))
            msg['To'] = employee_email
            msg['Subject'] = "Your Secure Face Recognition Registration Link"

            # Create the HTML part of the message
            html_body = f"""
            <!DOCTYPE html>
            <html lang="en">
            <head>
                <meta charset="UTF-8">
                <meta name="viewport" content="width=device-width, initial-scale=1.0">
                <title>Secure Registration Link</title>
            </head>
            <body style="font-family: 'Segoe UI', Arial, sans-serif; margin: 0; padding: 30px; background-color: #f5f7fa; color: #333;">

                <!-- Logo -->
                <div style="text-align: center; margin-bottom: 30px; margin-top: 30px;">
                    <img src="cid:franlogo" alt="Fran Logo" style="height: 50px;">
                </div>

                <div style="max-width: 600px; margin: auto; background-color: #ffffff; border-radius: 10px; padding: 30px; box-shadow: 0 2px 8px rgba(0,0,0,0.05);">

                    <!-- Message Content -->
                    <p style="font-size: 16px; font-weight: 600;">Hello {employee_name},</p>

                    <p style="font-size: 16px;">
                        Your account for the Face Recognition Attendance System is ready. To activate it, please complete a brief and secure registration by clicking the button below.
                    </p>

                    <!-- CTA Button -->
                    <div style="text-align: center; margin: 30px 0;">
                        <a href="{registration_link}" 
                        style="background-color: #007fae; color: #ffffff; padding: 15px 25px; text-decoration: none; border-radius: 6px; font-size: 16px; font-weight: bold;">
                            Register Now
                        </a>
                    </div>

                    <p style="font-size: 16px;">If you did not request this, please disregard this email.</p>

                    <hr style="border: none; border-top: 1px solid #eee; margin-top: 30px;">

                    <p style="font-size: 14px; color: #888; text-align: left;">
                        Thank you,<br>The AI Team
                    </p>
                </div>
            </body>
            </html>
            """
            msg.attach(MIMEText(html_body, 'html'))

            # Embed the logo
            try:
                logo_path = os.path.join(current_dir, 'assets', 'images', 'fran-logo.png')
                with open(logo_path, 'rb') as f:
                    img = MIMEImage(f.read())
                    img.add_header('Content-ID', '<franlogo>')
                    msg.attach(img)
            except FileNotFoundError:
                print(f"WARNING: fran-logo.png not found for team email to {employee_email}. Sending without logo.")

            # Send the email
            server = smtplib.SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT'])
            server.starttls()
            server.login(app.config['SMTP_USERNAME'], app.config['SMTP_PASSWORD'])
            server.sendmail(app.config['SENDER_EMAIL'], employee_email, msg.as_string())
            server.quit()
            
            RegistrationToken.mark_as_sent(code)
            success_count += 1
        except Exception as e:
            print(f"ERROR: Failed to send email to {employee_email} for PID {pid}: {e}")
            RegistrationToken.delete(code)
            failure_count += 1

    return jsonify({
        'success': True, 
        'message': f'Process complete. Sent {success_count} emails, skipped {skipped_count} registered users, and encountered {failure_count} failures.'
    })


@app.route('/admin/generate-token', methods=['POST'])
def generate_token():
    """
    Generates a one-time code and returns a registration link for the authenticated user.
    Bypasses role checks and uses the PID from the token itself for security.
    """
    token = request.headers.get('x-access-token')
    if not token:
        return jsonify({'error': 'Token is missing'}), 401

    try:
        decoded_token = jwt.decode(token, Config.JWT_SECRET_KEY, algorithms=["HS256"])
        current_user_pid = decoded_token.get('user')
    except jwt.ExpiredSignatureError:
        return jsonify({'error': 'Token has expired'}), 401
    except jwt.InvalidTokenError:
        return jsonify({'error': 'Invalid token'}), 401

    if not current_user_pid:
        return jsonify({'error': 'Could not identify user from token'}), 400

    # Get the team GID from the 'intra' database
    team_gid = get_team_gid(current_user_pid)

    # Generate and store a secure token in the database
    code = RegistrationToken.create(current_user_pid, team_gid)
    if not code:
        return jsonify({'error': 'Failed to generate a secure token.'}), 500
    
    # Construct the registration link
    registration_link = f"{app.config['FRONTEND_URL']}/self-register?code={code}"

    return jsonify({'success': True, 'link': registration_link})


# CORS policy for Fran
CORS(app, 
     origins=['https://fran-dev.scrubbed.net', 'https://fran.scrubbed.net', 'http://localhost:8081', 'http://localhost:3000', 'http://localhost:5173', 'http://localhost:5174', 'https://api-frandev.scrubbed.net'],
     supports_credentials=True,
     methods=['GET', 'POST', 'PUT', 'DELETE', 'OPTIONS'],
     allow_headers=['Content-Type', 'x-access-token', 'Authorization'])
FACE_DIR = "registered_faces"
EMBEDDINGS_DIR = "insightface_embeddings"  # Directory for InsightFace embeddings
API_ENDPOINT = "https://api-hris.scrubbed.net/users/getUserInfo"
SETTINGS_FILE = "app_settings.json"

# Ensure the embeddings directory exists
os.makedirs(EMBEDDINGS_DIR, exist_ok=True)

# Global InsightFace model (initialized once)
insightface_model = None

# Default settings
DEFAULT_SETTINGS = {
    "recognition_library": "insightface",  # Default to InsightFace
    "insightface_threshold": 0.6,
    "opencv_threshold": 100
}

def load_settings():
    """Load application settings from file"""
    try:
        if os.path.exists(SETTINGS_FILE):
            with open(SETTINGS_FILE, 'r') as f:
                settings = json.load(f)
                # Merge with defaults to ensure all keys exist
                for key, value in DEFAULT_SETTINGS.items():
                    if key not in settings:
                        settings[key] = value
                return settings
        else:
            return DEFAULT_SETTINGS.copy()
    except Exception as e:
        print(f"⚠️ Error loading settings: {e}")
        return DEFAULT_SETTINGS.copy()

def save_settings(settings):
    """Save application settings to file"""
    try:
        with open(SETTINGS_FILE, 'w') as f:
            json.dump(settings, f, indent=2)
        print(f"✅ Settings saved: {settings}")
        return True
    except Exception as e:
        print(f"❌ Error saving settings: {e}")
        return False

def initialize_insightface():
    """Initialize InsightFace model globally with optimized providers."""
    global insightface_model
    if not INSIGHTFACE_AVAILABLE:
        return False
    
    try:
        if insightface_model is None:
            # Prioritize GPU providers if available, otherwise fall back to CPU
            providers = ['CUDAExecutionProvider', 'TensorrtExecutionProvider', 'CPUExecutionProvider']
            
            insightface_model = insightface.app.FaceAnalysis(providers=providers)
            insightface_model.prepare(ctx_id=0, det_size=(640, 640))
            
            # Log the provider being used
            print(f"✅ InsightFace model initialized successfully using provider: {insightface_model.models['detection'].session.get_providers()}")
        return True
    except Exception as e:
        print(f"❌ Error initializing InsightFace model: {e}")
        return False

# Initialize the model on startup
initialize_insightface()

def generate_insightface_embeddings(user_id, user_folder, user_name, user_department):
    """Generate InsightFace embeddings for user images"""
    if not INSIGHTFACE_AVAILABLE:
        print("⚠️ InsightFace not available, skipping embedding generation")
        return False
    
    try:
        # Initialize model if not already done
        if not initialize_insightface():
            return False
        
        embeddings = []
        image_files = [f for f in os.listdir(user_folder) if f.endswith('.jpg')]
        
        print(f"🔄 Processing {len(image_files)} images for InsightFace embeddings...")
        
        for img_file in image_files:
            img_path = os.path.join(user_folder, img_file)
            img = cv2.imread(img_path)
            
            if img is not None:
                faces = insightface_model.get(img)
                if faces:
                    # Use first detected face
                    embedding = faces[0].embedding
                    embeddings.append(embedding)
                    print(f"✅ Generated embedding for {img_file}")
                else:
                    print(f"⚠️ No face detected in {img_file}")
            else:
                print(f"❌ Could not read image {img_file}")
        
        if embeddings:
            # Save embeddings to pickle file
            embedding_data = {
                'user_id': user_id,
                'name': user_name,
                'department': user_department,
                'embeddings': embeddings,
                'created_at': datetime.datetime.now().isoformat(),
                'image_count': len(embeddings)
            }
            
            embedding_file = os.path.join(EMBEDDINGS_DIR, f"insightface_embeddings_{user_id}.pkl")
            with open(embedding_file, 'wb') as f:
                pickle.dump(embedding_data, f)
            
            print(f"✅ Generated {len(embeddings)} InsightFace embeddings for user {user_id}")
            print(f"💾 Saved embeddings to {embedding_file}")
            return True
        else:
            print(f"⚠️ No faces detected in any images for user {user_id}")
            return False
            
    except Exception as e:
        print(f"❌ Error generating InsightFace embeddings: {e}")
        return False

def fetch_employee_info(user_id):
    """Fetch employee information from live API endpoint"""
    try:
        # Use GET request with user_id in URL path
        url = f"{API_ENDPOINT}/{user_id}"
        headers = {"Content-Type": "application/json"}
        
        response = requests.get(url, headers=headers, timeout=10, verify=False)
        
        if response.status_code == 200:
            # Parse the response to extract employee information
            response_data = response.json()
            
            # Extract employee details from API response
            employee_info = {
                'user_id': str(user_id),
                'name': response_data.get('name', f'User {user_id}'),
                'email': response_data.get('email', ''),
                'department': response_data.get('department', ''),
                'status': response_data.get('status', 'active'),
                'api_response': response_data  # Keep full response for debugging
            }
            
            print(f"✅ Fetched employee info for User {user_id}: {employee_info['name']} - {employee_info['department']}")
            return employee_info
            
        else:
            print(f"⚠️ API request failed for User {user_id}. Status: {response.status_code}, Response: {response.text}")
            return {
                'user_id': str(user_id),
                'name': f'User {user_id}',
                'email': '',
                'department': '',
                'status': 'unknown',
                'api_response': None
            }
            
    except requests.exceptions.RequestException as e:
        print(f"❌ Error fetching employee info for User {user_id}: {e}")
        return {
            'user_id': str(user_id),
            'name': f'User {user_id}',
            'email': '',
            'department': '',
            'status': 'unknown',
            'api_response': None
        }

def validate_registration_images(images):
    """Perform quality checks on the first registration image."""
    if not images:
        return False, "No images provided."

    try:
        # Decode the first image
        image_data = images[0]
        header, encoded = image_data.split(',', 1)
        image_bytes = base64.b64decode(encoded)
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

        if img is None:
            return False, "Invalid image format."

        # 1. Face Detection Check
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
        faces = face_cascade.detectMultiScale(gray, 1.1, 4)
        if len(faces) == 0:
            return False, "No face detected. Please ensure your face is clearly visible."

        # 2. Brightness Check
        brightness = np.mean(gray)
        if brightness < 50:
            return False, "Image is too dark. Please move to a brighter area."
        if brightness > 200:
            return False, "Image is too bright. Please avoid direct light."

        return True, "Validation successful."

    except Exception as e:
        print(f"Error during image validation: {e}")
        return False, "An error occurred during image validation."

@app.route('/admin/generate-one-time-code', methods=['POST'])
def generate_one_time_code():
    """Generates a secure, short-lived code for self-registration."""
    data = request.get_json()
    pid = data.get('pid')
    if not pid:
        return jsonify({'error': 'PID is required'}), 400

    # Generate a secure, URL-safe random code
    code = secrets.token_urlsafe(6)
    
    # Store the code with the PID and an expiration time (e.g., 5 minutes)
    one_time_codes[code] = {
        'pid': pid,
        'expires': time.time() + 300  # 5 minutes from now
    }
    
    print(f"Generated code {code} for PID {pid}")
    return jsonify({'success': True, 'code': code})

@app.route('/verify-one-time-code', methods=['POST'])
def verify_one_time_code():
    """Verifies a one-time code and returns the associated user info without deleting the token."""
    data = request.get_json()
    code = data.get('code')
    if not code:
        return jsonify({'error': 'Code is required'}), 400

    # Check if the code exists in the database and is not expired
    token_data = RegistrationToken.find_by_code(code)
    
    if token_data:
        pid = token_data['pid']
        
        # Code is valid, now fetch user info to return to the frontend
        employee_info = fetch_employee_info(pid)
        if not employee_info or employee_info.get('status') == 'unknown':
            return jsonify({'error': f'Could not retrieve information for User ID {pid}.'}), 404

        # Success: Return user info. The token remains valid for the registration step.
        return jsonify({
            'success': True,
            'user_id': employee_info.get('user_id'),
            'name': employee_info.get('name'),
            'email': employee_info.get('email'),
            'department': employee_info.get('department')
        })
    else:
        # If code is invalid or expired, just return the error.
        return jsonify({'error': 'Invalid or expired registration code.'}), 401

@app.route('/verify-user/<pid>', methods=['GET'])
def verify_user(pid):
    """Verify if a user exists and is eligible for self-registration."""
    try:
        # Fetch employee info to confirm the user exists
        employee_info = fetch_employee_info(pid)
        if not employee_info or employee_info.get('status') == 'unknown':
            return jsonify({'error': f'Could not retrieve information for User ID {pid}.'}), 404

        # Check if the user is already registered
        user_data_file = "user_data.json"
        if os.path.exists(user_data_file):
            with open(user_data_file, 'r') as f:
                user_data = json.load(f)
                if pid in user_data:
                    return jsonify({'error': 'Staff is already registered.'}), 409 # 409 Conflict

        # If user exists and is not registered, return their info
        return jsonify({
            'success': True,
            'user_id': employee_info.get('user_id'),
            'name': employee_info.get('name'),
            'email': employee_info.get('email'),
            'department': employee_info.get('department')
        })

    except Exception as e:
        print(f"Error verifying user {pid}: {e}")
        return jsonify({'error': 'An internal error occurred.'}), 500

@app.route('/register', methods=['POST'])
def register():
    data = request.get_json()
    user_id = str(data.get('userId'))
    images = data.get('images')
    self_register = data.get('self_register', False)

    if not user_id or not images:
        return jsonify({'error': 'User ID and images are required'}), 400

    # Server-side validation of the captured images before proceeding
    is_valid, message = validate_registration_images(images)
    if not is_valid:
        return jsonify({'error': message}), 400

    # Fetch employee info to get the name and other details
    employee_info = fetch_employee_info(user_id)
    if not employee_info or employee_info.get('status') == 'unknown':
        return jsonify({'error': f'Could not retrieve information for User ID {user_id}. Please check the ID and try again.'}), 404

    user_name = employee_info.get('name')
    user_email = employee_info.get('email')
    user_department = employee_info.get('department')

    user_folder = os.path.join(FACE_DIR, user_id)
    os.makedirs(user_folder, exist_ok=True)

    for i, image_data in enumerate(images):
        # Decode the base64 image
        header, encoded = image_data.split(',', 1)
        image_bytes = base64.b64decode(encoded)
        
        # Save the image
        with open(os.path.join(user_folder, f'{i}.jpg'), 'wb') as f:
            f.write(image_bytes)

    # Train the model after saving the images
    print("Starting training process...")
    print(f"Training with {len(images)} images for user {user_id}")
    train_face_recognizer()
    print("Training process finished.")
    
    # Verify training worked by checking model file
    if os.path.exists("face_recognizer.yml"):
        print("Model file updated successfully")
    else:
        print("WARNING: Model file not found after training")
    
    # Generate InsightFace embeddings if available
    if INSIGHTFACE_AVAILABLE:
        print("🔄 Generating InsightFace embeddings...")
        insightface_success = generate_insightface_embeddings(user_id, user_folder, user_name, user_department)
        if insightface_success:
            print("✅ InsightFace embedding generation completed")
        else:
            print("⚠️ InsightFace embedding generation failed")
    else:
        print("📝 InsightFace not available, skipping embedding generation")
    
    # Save user data to the database first
    db_success = RegisteredUser.add_or_update(user_id, self_register=self_register)
    if not db_success:
        return jsonify({'error': 'Failed to save registration metadata to the database.'}), 500

    # Save user data to JSON before deleting photos
    user_data_file = "user_data.json"
    user_data = {}
    if os.path.exists(user_data_file):
        with open(user_data_file, 'r') as f:
            user_data = json.load(f)
    
    # Add/update user data
    user_data[user_id] = {
        'name': user_name,
        'email': user_email,
        'department': user_department,
        'status': 'hired',
        'last_updated': datetime.datetime.now().isoformat()
    }
    
    with open(user_data_file, 'w') as f:
        json.dump(user_data, f, indent=2)
    
    # Keep photos temporarily for better training (delete after 5 minutes for security)
    print("Photos kept temporarily for training verification...")
    print("Photos will be automatically deleted after 5 minutes for security.")
    
    # Schedule photo deletion after 5 minutes
    import threading
    import shutil
    def delete_photos_later():
        import time
        time.sleep(300)  # 5 minutes
        if os.path.exists(user_folder):
            shutil.rmtree(user_folder)
            print(f"Photos for user {user_id} deleted after 5 minutes for security.")
    
    # Start deletion timer in background
    deletion_thread = threading.Thread(target=delete_photos_later)
    deletion_thread.daemon = True
    deletion_thread.start()

    return jsonify({'message': f'User {user_id} ({user_name}) registered successfully with {len(images)} images. Photos deleted for security.'})

@app.route('/recognize', methods=['POST'])
def recognize_face():
    """Main recognition endpoint that routes to the appropriate library based on settings"""
    data = request.get_json()
    image_data = data.get('image')
    
    if not image_data:
        return jsonify({'error': 'Image data is required'}), 400
    
    try:
        # Load settings to determine which library to use
        settings = load_settings()
        library = settings.get('recognition_library', 'insightface')
        
        print(f"🔍 Using recognition library: {library}")
        
        # Route to appropriate recognition method
        if library == 'insightface' and INSIGHTFACE_AVAILABLE:
            # Use InsightFace recognition
            return insightface_recognize_standalone()
        else:
            # Use OpenCV recognition (fallback or explicit choice)
            return opencv_recognize()
            
    except Exception as e:
        print(f"❌ Error in main recognition endpoint: {e}")
        return jsonify({'error': str(e)}), 500

def opencv_recognize():
    """OpenCV-based face recognition"""
    data = request.get_json()
    image_data = data.get('image')
    
    try:
        # Decode the base64 image
        header, encoded = image_data.split(',', 1)
        image_bytes = base64.b64decode(encoded)
        
        # Convert to numpy array
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        
        if img is None:
            return jsonify({'error': 'Invalid image data'}), 400
        
        # Convert to grayscale
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        # Load face cascade
        face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
        
        # Detect faces with more sensitive parameters for better detection
        faces = face_cascade.detectMultiScale(gray, scaleFactor=1.05, minNeighbors=3, minSize=(20, 20))
        
        print(f"🔍 OpenCV Face detection: Found {len(faces)} faces in image")
        print(f"📐 Image size: {img.shape}")
        
        if len(faces) == 0:
            print("❌ No faces detected - returning 400 error")
            return jsonify({'error': 'No face detected'}), 400
        
        # Get the first detected face
        (x, y, w, h) = faces[0]
        face_img = gray[y:y + h, x:x + w]
        face_img = preprocess_face(face_img)
        
        # Load and use the recognizer
        try:
            recognizer = cv2.face.LBPHFaceRecognizer_create()
            model_file = "face_recognizer.yml"
            
            if not os.path.exists(model_file):
                return jsonify({'error': 'Model file not found. Train the model first.'}), 400
            
            recognizer.read(model_file)
            user_id, confidence = recognizer.predict(face_img)
            
            # Load settings to get OpenCV threshold
            settings = load_settings()
            confidence_threshold = settings.get('opencv_threshold', 150)  # Lower is better in LBPH
            print(f"🔍 Using OpenCV threshold: {confidence_threshold}")
            
            print(f"Recognition attempt - User ID: {user_id}, Confidence: {confidence}, Threshold: {confidence_threshold}")
            print(f"Recognition result: {'SUCCESS' if confidence < confidence_threshold else 'FAILED'}")
            
            if confidence < confidence_threshold:
                # Fetch employee information from live API endpoint
                print(f"✅ RECOGNITION SUCCESS: User {user_id} recognized with confidence {confidence}")
                print(f"🔄 Fetching employee information for User {user_id} from live API...")
                employee_info = fetch_employee_info(user_id)
                print(f"📋 Employee info result: {employee_info}")
                
                # Log attendance with employee information
                import datetime
                import csv
                
                now = datetime.datetime.now()
                timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
                today = now.strftime("%Y-%m-%d")
                
                log_file = "attendance_log.csv"
                
                # Ensure log file exists with headers
                if not os.path.exists(log_file) or os.stat(log_file).st_size == 0:
                    with open(log_file, "w", newline="") as file:
                        writer = csv.writer(file)
                        writer.writerow(["Timestamp", "User ID", "User Name", "Status"])
                
                attendance_status = "Check-in"  # Default
                message = f'User {employee_info["name"]} (ID: {user_id}) checked in successfully'
                
                all_records = []
                today_records = []
                
                with open(log_file, 'r', newline='') as file:
                    reader = csv.DictReader(file)
                    for row in reader:
                        all_records.append(row)
                        if row.get('User ID') == str(user_id) and row.get('Timestamp', '').startswith(today):
                            today_records.append(row)
                
                # Determine attendance status based on today's records
                check_in_exists = any(r.get('Status') == 'Check-in' for r in today_records)
                check_out_exists = any(r.get('Status') == 'Check-out' for r in today_records)
                
                if not check_in_exists:
                    # First recognition today - Check-in
                    attendance_status = "Check-in"
                    message = f'User {employee_info["name"]} (ID: {user_id}) checked in successfully'
                    all_records.append({'Timestamp': timestamp, 'User ID': str(user_id), 'User Name': employee_info["name"], 'Status': attendance_status})
                elif check_in_exists and not check_out_exists:
                    # Second recognition today - Check-out
                    attendance_status = "Check-out"
                    message = f'User {employee_info["name"]} (ID: {user_id}) checked out successfully'
                    all_records.append({'Timestamp': timestamp, 'User ID': str(user_id), 'User Name': employee_info["name"], 'Status': attendance_status})
                elif check_in_exists and check_out_exists:
                    # Third+ recognition today - Update latest checkout
                    attendance_status = "Check-out"
                    message = f'User {employee_info["name"]} (ID: {user_id}) checkout time updated'
                    
                    # Find and update the last checkout record for today
                    for i in reversed(range(len(all_records))):
                        record = all_records[i]
                        if record.get('User ID') == str(user_id) and record.get('Timestamp', '').startswith(today) and record.get('Status') == 'Check-out':
                            record['Timestamp'] = timestamp # Update timestamp
                            record['User Name'] = employee_info["name"] # Also update the name
                            break
                    else: # If no checkout record found (shouldn't happen with logic above, but as fallback)
                        all_records.append({'Timestamp': timestamp, 'User ID': str(user_id), 'User Name': employee_info["name"], 'Status': attendance_status})
                
                # Write all records back to the file
                with open(log_file, "w", newline="") as file:
                    writer = csv.DictWriter(file, fieldnames=["Timestamp", "User ID", "User Name", "Status"])
                    writer.writeheader()
                    writer.writerows(all_records)
                
                response_data = {
                    'success': True,
                    'user_id': int(user_id),
                    'user_name': employee_info["name"],
                    'user_email': employee_info["email"],
                    'user_department': employee_info["department"],
                    'user_status': employee_info["status"],
                    'confidence': round(confidence, 2),
                    'timestamp': timestamp,
                    'attendance_status': attendance_status,
                    'message': message
                }
                print(f"📤 Sending response to frontend: {response_data}")
                return jsonify(response_data)
            else:
                return jsonify({
                    'success': False,
                    'message': 'Face not recognized',
                    'confidence': round(confidence, 2),
                    'debug_info': f'Confidence {confidence} >= threshold {confidence_threshold}'
                })
                
        except Exception as e:
            return jsonify({'error': f'Recognition error: {str(e)}'}), 500
            
    except Exception as e:
                    return jsonify({'error': f'Image processing error: {str(e)}'}), 500

@app.route('/admin/users', methods=['GET'])
@role_required('Admin', 'Super Admin', 'HR')
def get_all_users(current_user_pid):
    """Get all registered users from the database, with pagination and search."""
    try:
        # Get pagination and search parameters from request
        page = int(request.args.get('page', 1))
        limit = int(request.args.get('limit', 10))
        search_term = request.args.get('search', None)
        self_register = request.args.get('self_register', None)
        is_export = request.args.get('export', 'false').lower() == 'true'

        # Fetch paginated and filtered users
        users_data = RegisteredUser.get_all_with_hris_info(
            page=page,
            limit=limit,
            search_term=search_term,
            self_register=self_register,
            exporting=is_export
        )
        if users_data is None:
            return jsonify({'error': 'Could not fetch users from the database.'}), 500

        users_with_hris = users_data.get('users', [])
        total_users = users_data.get('total_records', 0)

        enriched_users = []
        for user_row in users_with_hris:
            pid = user_row['pid']
            
            # Get scores for the current user since their last modification date
            user_scores = {}
            if user_row.get('last_modified'):
                user_scores = AttendanceLog.get_user_recognition_scores_since(pid, user_row['last_modified'])

            enriched_users.append({
                'user_id': pid,
                'name': user_row.get('name', f'User {pid}'),
                'email': user_row.get('email', ''),
                'team_name': user_row.get('team_name', 'N/A'),
                'last_modified': user_row['last_modified'].isoformat() if user_row['last_modified'] else None,
                'registered_at': user_row['registered_at'].isoformat() if user_row['registered_at'] else None,
                'status': 'Active',
                'self_register': user_row.get('self_register', False),
                'min_score': user_scores.get('min_score'),
                'max_score': user_scores.get('max_score'),
                'avg_score': user_scores.get('avg_score'),
                'email_timestamp': user_row['email_timestamp'].isoformat() if user_row.get('email_timestamp') else None
            })
        
        if is_export:
            return jsonify({'users': enriched_users})

        return jsonify({
            'users': enriched_users,
            'total_records': total_users,
            'page': page,
            'limit': limit,
            'total_pages': (total_users + limit - 1) // limit if limit > 0 else 1
        })
    except Exception as e:
        print(f"❌ Error in get_all_users: {e}")
        return jsonify({'error': 'An internal error occurred while fetching users.'}), 500

@app.route('/admin/users/<user_id>/update-pkl', methods=['POST'])
@role_required('Admin', 'Super Admin')
def update_user_pkl(current_user_pid, user_id):
    """Updates a specific user's PKL file with the latest HRIS data."""
    print(f"🔄 Received request to update PKL for user_id: {user_id}")
    try:
        # 1. Check if the embedding file exists
        embedding_file = os.path.join(EMBEDDINGS_DIR, f"insightface_embeddings_{user_id}.pkl")
        if not os.path.exists(embedding_file):
            print(f"❌ PKL file not found for user {user_id}")
            return jsonify({'error': 'User embedding file not found.'}), 404

        # 2. Fetch the latest employee info from the database
        employee_info = Staff.get_by_pid(user_id)
        if not employee_info:
            print(f"❌ User {user_id} not found in HRIS database.")
            return jsonify({'error': 'User not found in HRIS database.'}), 404

        # 3. Read the existing PKL file
        with open(embedding_file, 'rb') as f:
            data = pickle.load(f)

        # 4. Get new data and check if an update is needed
        new_name = f"{employee_info.get('first_name', '')} {employee_info.get('last_name', '')}".strip()
        new_department = employee_info.get('team_name', 'N/A')
        
        current_name = data.get('name')
        current_department = data.get('department')

        if new_name == current_name and new_department == current_department:
            print(f"✅ No update needed for user {user_id}. Data is already current.")
            return jsonify({'success': True, 'message': 'User data is already up to date.'})

        # 5. Update the data and write it back
        print(f"🔄 Updating user {user_id}: Name '{current_name}' -> '{new_name}', Dept '{current_department}' -> '{new_department}'")
        data['name'] = new_name
        data['department'] = new_department
        
        with open(embedding_file, 'wb') as f:
            pickle.dump(data, f)

        print(f"✅ Successfully updated PKL file for user {user_id}")
        return jsonify({'success': True, 'message': f'Successfully updated PKL file for user {user_id}.'})

    except Exception as e:
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/admin/email-low-score-user/<user_id>', methods=['POST'])
@role_required('Super Admin')
def email_low_score_user(current_user_pid, user_id):
    """Emails a single user if their recognition score is low, using an HTML template."""
    try:
        employee_info = fetch_employee_info(user_id)
        if not employee_info or not employee_info.get('email'):
            return jsonify({'error': f'Could not find a valid email for User ID {user_id}.'}), 404

        employee_name = employee_info.get('name', '').strip()
        employee_email = employee_info.get('email')
        bcc_email = 'christian.manuel@scrubbed.net'
        
        # The re-registration link should point to the main page or a specific profile page
        reregister_link = f"{app.config['FRONTEND_URL']}"

        # Construct and send the HTML email
        msg = MIMEMultipart('related')
        msg['From'] = formataddr(('Fran AI', app.config['SENDER_EMAIL']))
        msg['To'] = employee_email
        msg['Subject'] = "Face Registration Update Required - Fran AI"

        html_body = f"""
        <table width="100%" cellpadding="0" cellspacing="0" role="presentation" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';background-color:#edf2f7;margin:0;padding:0;width:100%">
        <tbody>
            <tr>
                <td align="center" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol'">
                    <table width="100%" cellpadding="0" cellspacing="0" role="presentation" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';margin:0;padding:0;width:100%">
                        <tbody>
                            <tr>
                                <td style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';padding:25px 0;text-align:center">
                                    <div style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';color:#3d4852;font-size:19px;font-weight:bold;text-decoration:none;display:inline-block">
                                        <div style="display: inline-flex; align-items: center; gap: 15px;">
                                            <img src="https://tm.scrubbed.net/images/fran-logo.png" alt="FRAN Logo" style="height: 50px; width: auto;">
                                        </div>
                                    </div>
                                </td>
                            </tr>
                            <tr>
                                <td width="100%" cellpadding="0" cellspacing="0" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';background-color:#edf2f7;border-bottom:1px solid #edf2f7;border-top:1px solid #edf2f7;margin:0;padding:0;width:100%">
                                    <table align="center" width="570" cellpadding="0" cellspacing="0" role="presentation" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';background-color:#ffffff;border-color:#e8e5ef;border-radius:2px;border-width:1px;margin:0 auto;padding:0;width:570px">
                                        <tbody><tr>
                                            <td style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';max-width:100vw;padding:32px">
                                                <h1 style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';color:#3d4852;font-size:18px;font-weight:bold;margin-top:0;text-align:left">
                                                    <span class="il">Hello {employee_name},</span>
                                                </h1>
                
                                                <p style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';font-size:16px;line-height:1.5em;margin-top:0;text-align:justify">Our system found that your face registration score is below the required threshold, which may affect your recognition accuracy.</p>

                                                <p style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';font-size:16px;line-height:1.5em;margin-top:0;text-align:justify">We kindly request that you re-register your face, either through self-registration or at the office, to ensure the system can properly recognize you.</p>

                                                <p style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';font-size:16px;line-height:1.5em;margin-top:0;text-align:left">For self-registration guidelines, please refer to: <a href="https://drive.google.com/file/d/1HSfLS5RcTi15q0zAUtSy6qr7B_dsbrZT/view?usp=sharing" style="color: #2563eb; text-decoration: underline;">Self-Registration Guidelines</a></p>

                                                <p style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';font-size:16px;line-height:1.5em;margin-top:0;text-align:justify">Thank you for your cooperation!</p>

                                                <p style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';font-size:16px;line-height:1.5em;margin-top:0;text-align:justify">Best regards, <br> Fran AI</p>

                                                <div style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol'">
                                                </div>
                                            </td>
                                        </tr>
                                        </tbody></table>
                                    </td>
                                </tr>
                                <tr>
                                    <td style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol'">
                                        <table align="center" width="570" cellpadding="0" cellspacing="0" role="presentation" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';margin:0 auto;padding:0;text-align:center;width:570px">
                                            <tbody><tr>
                                                <td align="center" style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';max-width:100vw;padding:32px">
                                                    <p style="box-sizing:border-box;font-family:-apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji','Segoe UI Symbol';line-height:1.5em;margin-top:0;color:#b0adc5;font-size:12px;text-align:center">2025 <span class="il">Scrubbed.net</span></p>
                                                </td>
                                            </tr>
                                            </tbody></table>
                                        </td>
                                    </tr>
                                    </tbody>
                                    </table>
                                </td>
                            </tr>
                        </tbody>
                        </table>
                    </td>
                </tr>
            </tbody>
        </table>
        """
        msg.attach(MIMEText(html_body, 'html'))

        # Embed the logo
        try:
            logo_path = os.path.join(current_dir, 'assets', 'images', 'fran-logo.png')
            with open(logo_path, 'rb') as f:
                img = MIMEImage(f.read())
                img.add_header('Content-ID', '<franlogo>')
                msg.attach(img)
        except FileNotFoundError:
            print("WARNING: fran-logo.png not found. Email will be sent without the logo.")

        server = smtplib.SMTP(app.config['SMTP_SERVER'], app.config['SMTP_PORT'])
        server.starttls()
        server.login(app.config['SMTP_USERNAME'], app.config['SMTP_PASSWORD'])
        recipients = [employee_email, bcc_email]
        server.sendmail(app.config['SENDER_EMAIL'], recipients, msg.as_string())
        server.quit()

        # Also update the database to record that an email was sent
        RegisteredUser.update_email_timestamp(user_id)

        return jsonify({'success': True, 'message': f'Email sent to {employee_name} ({employee_email}).'})

    except Exception as e:
        print(f"ERROR: Failed to send low score email: {e}")
        return jsonify({'error': 'Failed to send email. Please check server configuration.'}), 500





@app.route('/admin/users/<user_id>', methods=['DELETE'])
def delete_user(user_id):
    """Delete a user from the system and retrain model"""
    try:
        # First, delete from the database.
        # We'll proceed with file cleanup even if this fails, to ensure robustness.
        try:
            pid_int = int(user_id)
            RegisteredUser.delete(pid_int)
        except (ValueError, TypeError):
            print(f"Warning: Could not convert user_id '{user_id}' to int for DB deletion. Skipping.")

        # Remove user from JSON data
        user_data_file = "user_data.json"
        user_deleted = False
        
        if os.path.exists(user_data_file):
            with open(user_data_file, 'r') as f:
                user_data = json.load(f)
            
            if user_id in user_data:
                del user_data[user_id]
                user_deleted = True
                
                with open(user_data_file, 'w') as f:
                    json.dump(user_data, f, indent=2)
        
        # Remove any remaining photo folders for this user
        user_folder = os.path.join(FACE_DIR, user_id)
        if os.path.exists(user_folder):
            import shutil
            shutil.rmtree(user_folder)
            user_deleted = True
        
        if user_deleted:
            # Retrain the model to remove the deleted user's data
            print(f"Retraining model after deleting user {user_id}...")
            train_face_recognizer()
            print("Model retrained successfully.")

            # Also delete the InsightFace embedding file
            embedding_file = os.path.join(EMBEDDINGS_DIR, f"insightface_embeddings_{user_id}.pkl")
            if os.path.exists(embedding_file):
                os.remove(embedding_file)
                print(f"Deleted InsightFace embedding file: {embedding_file}")
            
            return jsonify({'message': f'User {user_id} deleted successfully from system and model retrained.'})
        else:
            return jsonify({'error': 'User not found in system'}), 404
    except Exception as e:
        return jsonify({'error': f'Error deleting user: {str(e)}'}), 500

@app.route('/admin/users/<old_user_id>', methods=['PUT'])
def update_user_id(old_user_id):
    """Update user ID (rename folder)"""
    try:
        old_folder = os.path.join(FACE_DIR, old_user_id)
        new_folder = os.path.join(FACE_DIR, new_user_id)
        
        if not os.path.exists(old_folder):
            return jsonify({'error': 'User not found'}), 404
        
        data = request.get_json()
        new_user_id = data.get('new_user_id')
        
        if not new_user_id:
            return jsonify({'error': 'New user ID is required'}), 400
        
        new_folder = os.path.join(FACE_DIR, new_user_id)
        
        if os.path.exists(new_folder):
            return jsonify({'error': 'New user ID already exists'}), 400
        
        os.rename(old_folder, new_folder)
        
        # Note: No retraining needed - photos are deleted for security reasons
        # The trained model retains the user's face data without storing raw images
        
        return jsonify({'message': f'User ID updated from {old_user_id} to {new_user_id}'})
    except Exception as e:
        return jsonify({'error': f'Error updating user ID: {str(e)}'}), 500

@app.route('/admin/users/<user_id>/images', methods=['DELETE'])
def delete_user_images(user_id):
    """Delete all images for a user but keep the folder"""
    try:
        user_folder = os.path.join(FACE_DIR, user_id)
        if not os.path.exists(user_folder):
            return jsonify({'error': 'User not found'}), 404
        
        # Delete all jpg files
        deleted_count = 0
        for filename in os.listdir(user_folder):
            if filename.endswith('.jpg'):
                os.remove(os.path.join(user_folder, filename))
                deleted_count += 1
        
        # Note: No retraining needed - photos are deleted for security reasons
        # The trained model retains the user's face data without storing raw images
        
        return jsonify({'message': f'Deleted {deleted_count} images for user {user_id}'})
    except Exception as e:
        return jsonify({'error': f'Error deleting images: {str(e)}'}), 500

@app.route('/admin/users/<user_id>/images', methods=['GET'])
def get_user_images(user_id):
    """Get all images for a specific user"""
    try:
        user_folder = os.path.join(FACE_DIR, user_id)
        
        # Check if user exists in user_data.json
        user_data_file = "user_data.json"
        user_exists = False
        if os.path.exists(user_data_file):
            with open(user_data_file, 'r') as f:
                user_data = json.load(f)
                user_exists = user_id in user_data
        
        if not user_exists:
            return jsonify({'error': 'User not found'}), 404
        
        # If folder doesn't exist, images were deleted for security
        if not os.path.exists(user_folder):
            return jsonify({
                'images': [],
                'message': 'Images deleted for security after training',
                'status': 'trained'
            })
        
        images = []
        for filename in os.listdir(user_folder):
            if filename.endswith('.jpg'):
                image_path = os.path.join(user_folder, filename)
                # Convert image to base64
                with open(image_path, 'rb') as f:
                    image_data = base64.b64encode(f.read()).decode('utf-8')
                    images.append({
                        'filename': filename,
                        'data': f'data:image/jpeg;base64,{image_data}'
                    })
        
        return jsonify({'images': images})
    except Exception as e:
        return jsonify({'error': f'Error fetching images: {str(e)}'}), 500

@app.route('/admin/settings', methods=['GET'])
def get_admin_settings():
    """Get current admin settings"""
    try:
        settings = load_settings()
        return jsonify({
            'success': True,
            'settings': settings,
            'available_libraries': {
                'opencv': {
                    'name': 'OpenCV LBPH',
                    'accuracy': '70-80%',
                    'description': 'Traditional computer vision approach'
                },
                'insightface': {
                    'name': 'InsightFace',
                    'accuracy': '98%+',
                    'description': 'State-of-the-art deep learning model',
                    'available': INSIGHTFACE_AVAILABLE
                }
            }
        })
    except Exception as e:
        return jsonify({'error': f'Error loading settings: {str(e)}'}), 500

@app.route('/admin/settings', methods=['POST'])
def update_admin_settings():
    """Update admin settings"""
    try:
        data = request.get_json()
        if not data:
            return jsonify({'error': 'No data provided'}), 400
        
        current_settings = load_settings()
        
        # Validate recognition library
        if 'recognition_library' in data:
            library = data['recognition_library']
            if library not in ['opencv', 'insightface']:
                return jsonify({'error': 'Invalid recognition library. Must be "opencv" or "insightface"'}), 400
            
            # Check if InsightFace is available when trying to use it
            if library == 'insightface' and not INSIGHTFACE_AVAILABLE:
                return jsonify({
                    'error': 'InsightFace is not available. Please install InsightFace libraries or use OpenCV.',
                    'available_libraries': ['opencv']
                }), 400
        
        # Validate thresholds
        if 'insightface_threshold' in data:
            threshold = data['insightface_threshold']
            if not isinstance(threshold, (int, float)) or not (0.0 <= threshold <= 1.0):
                return jsonify({'error': 'InsightFace threshold must be between 0.0 and 1.0'}), 400
        
        if 'opencv_threshold' in data:
            threshold = data['opencv_threshold']
            if not isinstance(threshold, (int, float)) or threshold < 0:
                return jsonify({'error': 'OpenCV threshold must be a positive number'}), 400
        
        # Update settings
        current_settings.update(data)
        
        # Save settings
        if save_settings(current_settings):
            return jsonify({
                'success': True,
                'message': 'Settings updated successfully',
                'settings': current_settings
            })
        else:
            return jsonify({'error': 'Failed to save settings'}), 500
            
    except Exception as e:
        return jsonify({'error': f'Error updating settings: {str(e)}'}), 500

@app.route('/attendance/stats', methods=['GET'])
def get_attendance_stats():
    """Get attendance statistics for dashboard"""
    try:
        import datetime
        import csv
        from collections import defaultdict
        
        today = datetime.datetime.now().strftime("%Y-%m-%d")
        week_ago = (datetime.datetime.now() - datetime.timedelta(days=7)).strftime("%Y-%m-%d")
        month_ago = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime("%Y-%m-%d")
        
        # Initialize stats
        stats = {
            'today': 0,
            'thisWeek': 0,
            'thisMonth': 0,
            'recentActivity': []
        }
        
        # Read attendance log if it exists
        log_file = "attendance_log.csv"
        if os.path.exists(log_file):
            with open(log_file, 'r', newline='') as file:
                reader = csv.DictReader(file)
                for row in reader:
                    timestamp = row.get('Timestamp', '')
                    if timestamp:
                        try:
                            # Parse timestamp
                            dt = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
                            date_str = dt.strftime("%Y-%m-%d")
                            
                            # Count today's check-ins
                            if date_str == today:
                                stats['today'] += 1
                            
                            # Count this week's check-ins
                            if date_str >= week_ago:
                                stats['thisWeek'] += 1
                            
                            # Count this month's check-ins
                            if date_str >= month_ago:
                                stats['thisMonth'] += 1
                            
                            # Add to recent activity (last 10)
                            if len(stats['recentActivity']) < 10:
                                stats['recentActivity'].append({
                                    'id': len(stats['recentActivity']) + 1,
                                    'user': f"User {row.get('User ID', 'Unknown')}",
                                    'name': f"{row.get('User Name', 'Unknown')}",
                                    'time': dt.strftime("%I:%M %p"),
                                    'status': row.get('Status', 'Check-in')
                                })
                        except ValueError:
                            continue
        
        return jsonify(stats)
        
    except Exception as e:
        return jsonify({'error': f'Error getting stats: {str(e)}'}), 500

@app.route('/api/attendance/recent', methods=['GET'])
def get_recent_attendance():
    """Get the 5 most recent attendance records."""
    try:
        recent_logs = AttendanceLog.get_recent_attendance_logs(limit=5)
        if recent_logs is None:
            return jsonify({'error': 'Failed to retrieve recent attendance logs.'}), 500
        
        # Format timestamps for frontend display
        formatted_logs = []
        for log in recent_logs:
            formatted_logs.append({
                'name': log['name'] or f"User PID: {log['pid']}",
                'activity': f"{log['status']} at {log['recognition_timestamp'].strftime('%b %d, %Y %I:%M %p')}"
            })
        return jsonify(formatted_logs)
    except Exception as e:
        print(f"Error in get_recent_attendance: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/test/model', methods=['GET'])
def test_model():
    """Test if the face recognition model is working and check alignment"""
    try:
        model_file = "face_recognizer.yml"
        if not os.path.exists(model_file):
            return jsonify({'error': 'Model file not found'}), 400
        
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read(model_file)
        
        # Check registered faces in folders
        face_dir = "registered_faces"
        folder_users = []
        if os.path.exists(face_dir):
            folder_users = [d for d in os.listdir(face_dir) if os.path.isdir(os.path.join(face_dir, d))]
        
        # Check users in JSON data
        json_users = []
        user_data_file = "user_data.json"
        if os.path.exists(user_data_file):
            with open(user_data_file, 'r') as f:
                user_data = json.load(f)
                json_users = list(user_data.keys())
        
        return jsonify({
            'model_exists': True,
            'model_file_size': os.path.getsize(model_file),
            'folder_users': folder_users,
            'json_users': json_users,
            'alignment_issue': len(folder_users) != len(json_users),
            'confidence_threshold': 150,
            'status': 'Model is ready for recognition'
        })
    except Exception as e:
        return jsonify({'error': f'Model test failed: {str(e)}'}), 500

@app.route('/attendance/data', methods=['GET'])
def get_attendance_data():
    """Get paginated and filtered attendance data from the database."""
    try:
        # Get query parameters for filtering and pagination
        start_date = request.args.get('start_date', None)
        end_date = request.args.get('end_date', None)
        page = int(request.args.get('page', 1))
        limit = int(request.args.get('limit', 10))
        is_export = request.args.get('export', 'false').lower() == 'true'
        search_term = request.args.get('searchTerm', None)

        attendance_data = []
        logs = AttendanceLog.get_all_attendance_with_details(start_date, end_date, page, limit, exporting=is_export, search_term=search_term)

        if logs is None:
            return jsonify({'error': 'Failed to retrieve attendance data.'}), 500
        
        # If exporting, return the full list
        if is_export:
            return jsonify({'attendance': attendance_data})
    
        for log in logs:
            attendance_data.append({
                'id': log['id'],
                'name': log['name'] or f"User PID: {log['pid']}",
                'timestamp': log['recognition_timestamp'].strftime("%b. %d, %Y %I:%M %p"),
                'status': log['status'],
                'location': log['location_name'] or ''
            })

        # Otherwise, return the paginated response
        total_records = AttendanceLog.get_total_logs_count(start_date, end_date, search_term=search_term)
        return jsonify({
            'attendance': attendance_data,
            'total_records': total_records,
            'page': page,
            'limit': limit,
            'total_pages': (total_records + limit - 1) // limit
        })
        
    except Exception as e:
        print(f"Error in get_attendance_data: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/attendance/export-summary', methods=['GET'])
def export_daily_summary():
    """Exports the full daily attendance summary with frontend-matching format."""
    try:
        start_date = request.args.get('start_date', None)
        end_date = request.args.get('end_date', None)
        search_term = request.args.get('searchTerm', None)

        logs = AttendanceLog.get_daily_attendance(start_date=start_date, end_date=end_date, exporting=True, search_term=search_term)
        if logs is None:
            return jsonify({'error': 'Failed to retrieve daily summary for export.'}), 500

        # Helper to calculate duration, mirroring frontend logic
        def calculate_duration(checkin, checkout):
            if not checkin or not checkout: return 'N/A'
            diff = checkout - checkin
            if diff.total_seconds() < 0: return 'N/A'
            hours = int(diff.total_seconds() // 3600)
            minutes = int((diff.total_seconds() % 3600) // 60)
            return f"{hours}h {minutes}m"

        # Format data to match the frontend table exactly
        export_data = []
        for log in logs:
            check_in_dt = log.get('check_in')
            check_out_dt = log.get('check_out')

            export_data.append({
                'Name': log.get('name', ''),
                'Location': log.get('location_name', ''),
                'Check In': check_in_dt.strftime("%b. %d, %Y %I:%M %p") if check_in_dt else '',
                'Check Out': check_out_dt.strftime("%b. %d, %Y %I:%M %p") if check_out_dt else '',
                'Duration': calculate_duration(check_in_dt, check_out_dt)
            })

        return jsonify({'attendance': export_data})

    except Exception as e:
        import traceback
        traceback.print_exc()
        print(f"Error in export_daily_summary: {e}")
        return jsonify({'error': 'An internal server error occurred during export.'}), 500

@app.route('/attendance/summary', methods=['GET'])
def get_attendance_summary():
    """Get summary statistics for the attendance log based on a date range."""
    try:
        start_date = request.args.get('start_date', None)
        end_date = request.args.get('end_date', None)
        active_view = request.args.get('view', 'summary') # Default to summary
        search_term = request.args.get('searchTerm', None)

        summary = AttendanceLog.get_attendance_summary(start_date, end_date, view=active_view, search_term=search_term)
        if summary is None:
            return jsonify({'error': 'Failed to retrieve attendance summary.'}), 500
        
        # Get the correct total records based on the view
        if active_view == 'summary':
            total_records = AttendanceLog.get_total_daily_count(start_date, end_date, search_term=search_term)
        else: # 'logs' view
            total_records = AttendanceLog.get_total_logs_count(start_date, end_date, search_term=search_term)
        
        summary['total_records'] = total_records
        
        return jsonify(summary)

    except Exception as e:
        print(f"Error in get_attendance_summary: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/api/attendance/summary/quick-view', methods=['GET'])
@role_required('Super Admin', 'HR')
def get_quick_view_data(current_user_pid):
    """
    Get quick view summary for the dashboard.
    Adjusts the date range to cover the full month of the given end_date.
    """
    try:
        # Get original end_date from request
        end_date_str = request.args.get('end_date', None)

        # If no end_date is provided, we can't proceed
        if not end_date_str:
            return jsonify({'error': 'end_date is required'}), 400

        # Determine the month and year from the end_date
        target_date = datetime.datetime.strptime(end_date_str, '%Y-%m-%d')
        year = target_date.year
        month = target_date.month

        # Calculate the first day of the month
        first_day = datetime.date(year, month, 1)
        
        # Calculate the last day of the month
        _, last_day_of_month = calendar.monthrange(year, month)
        last_day = datetime.date(year, month, last_day_of_month)

        # Format dates back to strings
        final_start_date = first_day.strftime('%Y-%m-%d')
        final_end_date = last_day.strftime('%Y-%m-%d')

        summary = AttendanceLog.get_quick_view_summary(final_start_date, final_end_date)
        if summary is None:
            return jsonify({'error': 'Failed to retrieve quick view summary.'}), 500
        
        return jsonify(summary)

    except Exception as e:
        print(f"Error in get_quick_view_data: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

from dateutil.relativedelta import relativedelta

@app.route('/api/attendance/summary/weekly', methods=['GET'])
@role_required('Super Admin', 'HR')
def get_weekly_data(current_user_pid):
    """Get week-by-week summary for the dashboard, with optional comparison to the previous month."""
    try:
        start_date = request.args.get('start_date', None)
        end_date = request.args.get('end_date', None)
        team_gid = request.args.get('team_gid', None)

        if not start_date or not end_date:
            return jsonify({'error': 'start_date and end_date are required'}), 400

        # Get current period data
        summary = AttendanceLog.get_weekly_summary(start_date, end_date, team_gid=team_gid)
        if summary is None:
            return jsonify({'error': 'Failed to retrieve weekly summary.'}), 500

        return jsonify(summary)

    except Exception as e:
        print(f"Error in get_weekly_data: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/api/teams/attendance', methods=['GET'])
@role_required('Super Admin', 'HR')
def get_team_attendance_data(current_user_pid):
    """Get team-based attendance data for a specific week."""
    try:
        start_date = request.args.get('start_date', None)
        end_date = request.args.get('end_date', None)
        team_gid = request.args.get('team_gid', None)
        prev_start_date = request.args.get('prev_start_date', None)
        prev_end_date = request.args.get('prev_end_date', None)
        
        team_summary = AttendanceLog.get_team_attendance_summary(
            start_date=start_date, 
            end_date=end_date, 
            team_gid=team_gid,
            prev_start_date=prev_start_date,
            prev_end_date=prev_end_date
        )
        if team_summary is None:
            return jsonify({'error': 'Failed to retrieve team attendance summary.'}), 500
        
        return jsonify(team_summary)

    except Exception as e:
        print(f"Error in get_team_attendance_data: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/attendance/daily-summary', methods=['GET'])
def get_daily_summary():
    """Get paginated and filtered daily attendance summary."""
    try:
        start_date = request.args.get('start_date', None)
        end_date = request.args.get('end_date', None)
        page = int(request.args.get('page', 1))
        limit = int(request.args.get('limit', 15))
        search_term = request.args.get('searchTerm', None)

        logs = AttendanceLog.get_daily_attendance(start_date, end_date, page, limit, search_term=search_term)
        if logs is None:
            return jsonify({'error': 'Failed to retrieve daily summary.'}), 500

        total_records = AttendanceLog.get_total_daily_count(start_date, end_date, search_term=search_term)

        # Format timestamps for frontend display
        formatted_logs = []
        for log in logs:
            if log['check_in']:
                log['check_in'] = log['check_in'].strftime("%b. %d, %Y %I:%M %p")
            if log['check_out']:
                log['check_out'] = log['check_out'].strftime("%b. %d, %Y %I:%M %p")
            formatted_logs.append(log)

        return jsonify({
            'attendance': formatted_logs,
            'total_records': total_records,
            'page': page,
            'limit': limit,
            'total_pages': (total_records + limit - 1) // limit
        })

    except Exception as e:
        print(f"Error in get_daily_summary: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500


@app.route('/attendance/status/<user_id>', methods=['GET'])
def get_attendance_status(user_id):
    """Check if a user has a 'Check-in' record for the current day from the database."""
    try:
        # The model method now handles all the logic
        attendance_info = AttendanceLog.get_status_for_user(user_id)
        return jsonify(attendance_info)

    except Exception as e:
        print(f"Error in get_attendance_status endpoint: {e}")
        return jsonify({'error': 'An internal server error occurred.'}), 500

@app.route('/debug/recognize', methods=['POST'])
def debug_recognition():
    """Debug endpoint to test recognition with detailed output"""
    data = request.get_json()
    image_data = data.get('image')
    
    if not image_data:
        return jsonify({'error': 'Image data is required'}), 400
    
    try:
        # Decode the base64 image
        header, encoded = image_data.split(',', 1)
        image_bytes = base64.b64decode(encoded)
        
        # Convert to numpy array
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        
        if img is None:
            return jsonify({'error': 'Invalid image data'}), 400
        
        # Convert to grayscale
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        # Load face cascade
        face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
        
        # Detect faces with different parameters
        faces_1 = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4, minSize=(30, 30))
        faces_2 = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=6, minSize=(50, 50))
        
        debug_info = {
            'image_size': f"{img.shape[1]}x{img.shape[0]}",
            'faces_detected_sensitive': len(faces_1),
            'faces_detected_normal': len(faces_2),
            'face_locations_sensitive': faces_1.tolist() if len(faces_1) > 0 else [],
            'face_locations_normal': faces_2.tolist() if len(faces_2) > 0 else []
        }
        
        if len(faces_1) == 0:
            return jsonify({
                'success': False,
                'message': 'No face detected with sensitive parameters',
                'debug_info': debug_info
            }), 400
        
        # Get the first detected face
        (x, y, w, h) = faces_1[0]
        face_img = gray[y:y + h, x:x + w]
        face_img = preprocess_face(face_img)
        
        # Load and use the recognizer
        try:
            recognizer = cv2.face.LBPHFaceRecognizer_create()
            model_file = "face_recognizer.yml"
            
            if not os.path.exists(model_file):
                return jsonify({'error': 'Model file not found. Train the model first.'}), 400
            
            recognizer.read(model_file)
            user_id, confidence = recognizer.predict(face_img)
            
            # Test different thresholds
            thresholds = [80, 85, 90, 95, 100]
            threshold_results = {}
            
            for threshold in thresholds:
                threshold_results[f'threshold_{threshold}'] = {
                    'recognized': confidence < threshold,
                    'confidence': round(confidence, 2)
                }
            
            debug_info.update({
                'recognition_results': {
                    'user_id': int(user_id),
                    'confidence': round(confidence, 2),
                    'threshold_tests': threshold_results
                }
            })
            
            return jsonify({
                'success': True,
                'user_id': int(user_id),
                'confidence': round(confidence, 2),
                'debug_info': debug_info,
                'message': f'Debug recognition completed'
            })
                
        except Exception as e:
            return jsonify({'error': f'Recognition error: {str(e)}'}), 500
            
    except Exception as e:
        return jsonify({'error': f'Image processing error: {str(e)}'}), 500

# InsightFace API Endpoints
@app.route('/insightface/embeddings', methods=['GET'])
def get_insightface_embeddings():
    """Get all InsightFace embeddings for standalone app"""
    if not INSIGHTFACE_AVAILABLE:
        return jsonify({
            'success': False,
            'error': 'InsightFace not available'
        }), 503
    
    try:
        all_embeddings = []
        all_names = []
        all_ids = []
        
        # Load all embedding files from the dedicated directory
        embedding_files = [f for f in os.listdir(EMBEDDINGS_DIR) if f.startswith('insightface_embeddings_') and f.endswith('.pkl')]
        
        print(f"🔄 Loading {len(embedding_files)} InsightFace embedding files from {EMBEDDINGS_DIR}...")
        
        for embedding_file in embedding_files:
            try:
                file_path = os.path.join(EMBEDDINGS_DIR, embedding_file)
                with open(file_path, 'rb') as f:
                    data = pickle.load(f)
                    user_id = data['user_id']
                    
                    # Get user info from user_data.json
                    if os.path.exists('user_data.json'):
                        with open('user_data.json', 'r') as f:
                            user_data = json.load(f)
                            user_info = user_data.get(user_id, {})
                            user_name = user_info.get('name', f'User {user_id}')
                    else:
                        user_name = f'User {user_id}'
                    
                    # Add embeddings for this user
                    for embedding in data['embeddings']:
                        all_embeddings.append(embedding.tolist())
                        all_names.append(user_name)
                        all_ids.append(user_id)
                        
                print(f"✅ Loaded embeddings for {user_name} (ID: {user_id})")
                        
            except Exception as e:
                print(f"⚠️ Error loading {embedding_file}: {e}")
                continue
        
        print(f"✅ Total embeddings loaded: {len(all_embeddings)}")
        
        return jsonify({
            'success': True,
            'embeddings': all_embeddings,
            'names': all_names,
            'ids': all_ids,
            'count': len(all_embeddings)
        })
        
    except Exception as e:
        print(f"❌ Error in get_insightface_embeddings: {e}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

from skimage.metrics import structural_similarity as ssim

def check_sharpness(image):
    """Calculates the sharpness of an image."""
    try:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        return cv2.Laplacian(gray, cv2.CV_64F).var()
    except Exception:
        return 0

def check_skin_texture(image):
    """Calculates the Cr channel variance, an indicator of skin texture."""
    try:
        ycbcr = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
        return ycbcr[:, :, 1].var()
    except Exception:
        return 0

def check_micro_motion(images):
    """Calculates the average motion between frames."""
    try:
        if len(images) < 2: return 0
        prev_gray = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
        total_flow = 0
        for i in range(1, len(images)):
            current_gray = cv2.cvtColor(images[i], cv2.COLOR_BGR2GRAY)
            flow = cv2.calcOpticalFlowFarneback(prev_gray, current_gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
            magnitude, _ = cv2.cartToPolar(flow[..., 0], flow[..., 1])
            total_flow += np.mean(magnitude)
            prev_gray = current_gray
        return total_flow / (len(images) - 1)
    except Exception:
        return 0

@app.route('/insightface/recognize', methods=['POST'])
def insightface_recognize():
    """Recognize multiple faces using InsightFace with individual liveness checks."""
    start_time = time.time()
    try:
        data = request.get_json()
        image_data_list = data.get('images')
        location_id = data.get('location_id')
        
        if not image_data_list or len(image_data_list) < 3:
            return jsonify({'error': 'A list of 3 images is required'}), 400

        # --- Anti-Spoofing Check ---
        # The service handles decoding and liveness detection in one call.
        is_live, score, error_message = check_liveness(image_data_list)
        if not is_live:
            return jsonify({'success': False, 'users': [], 'error': error_message}), 400
        
        # --- Image Decoding ---
        t0 = time.time()
        decoded_images = []
        for image_data in image_data_list:
            header, encoded = image_data.split(',', 1)
            image_bytes = base64.b64decode(encoded)
            nparr = np.frombuffer(image_bytes, np.uint8)
            img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            if img is None:
                return jsonify({'success': False, 'error': 'Invalid image data in list'}), 400
            decoded_images.append(img)
        t1 = time.time()
        print(f"Image Decoding: {(t1 - t0) * 1000:.2f} ms")

        middle_frame = decoded_images[1]
        
        # --- Face Detection & Embedding ---
        t2 = time.time()
        faces = insightface_model.get(middle_frame)
        t3 = time.time()
        print(f"Face Detection & Embedding ({len(faces)} faces): {(t3 - t2) * 1000:.2f} ms")
        
        if not faces:
            return jsonify({'success': False, 'users': [], 'error': 'No faces detected'}), 400
        
        # --- Liveness, Matching & DB Logging ---
        t4 = time.time()
        recognized_users = []
        matched_users = []
        
        # --- Pre-load all known embeddings for efficiency ---
        all_known_embeddings = []
        embedding_files = [f for f in os.listdir(EMBEDDINGS_DIR) if f.startswith('insightface_embeddings_') and f.endswith('.pkl')]
        for embedding_file in embedding_files:
            try:
                file_path = os.path.join(EMBEDDINGS_DIR, embedding_file)
                with open(file_path, 'rb') as f:
                    data = pickle.load(f)
                    for known_embedding in data['embeddings']:
                        all_known_embeddings.append({
                            'user_id': data['user_id'],
                            'name': data.get('name', f'User {data["user_id"]}'),
                            'department': data.get('department', 'N/A'),
                            'embedding': known_embedding
                        })
            except Exception as e:
                print(f" Error loading {embedding_file}: {e}")
                continue

        # --- Process each detected face ---
        for face in faces:
            bbox = face.bbox.astype(int)
            
            # Crop the face for liveness checks
            padding = 20
            x1, y1, x2, y2 = max(0, bbox[0] - padding), max(0, bbox[1] - padding), min(middle_frame.shape[1], bbox[2] + padding), min(middle_frame.shape[0], bbox[3] + padding)
            face_crop = middle_frame[y1:y2, x1:x2]

            # --- LIVENESS DETECTION for each face ---
            sharpness_score = check_sharpness(face_crop)
            texture_score = check_skin_texture(face_crop)
            motion_score = check_micro_motion(decoded_images)
            face_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])

            # --- Get thresholds from environment variables ---
            MIN_SHARPNESS_THRESHOLD = float(os.environ.get("MIN_SHARPNESS_THRESHOLD", 20))
            MAX_SHARPNESS_THRESHOLD = float(os.environ.get("MAX_SHARPNESS_THRESHOLD", 3000))
            MIN_TEXTURE_THRESHOLD = float(os.environ.get("MIN_TEXTURE_THRESHOLD", 8))
            MAX_TEXTURE_THRESHOLD = float(os.environ.get("MAX_TEXTURE_THRESHOLD", 500))
            MIN_MOTION_THRESHOLD = float(os.environ.get("MIN_MOTION_THRESHOLD", 0.08))
            MAX_MOTION_THRESHOLD = float(os.environ.get("MAX_MOTION_THRESHOLD", 0.8))
            MIN_FACE_AREA = float(os.environ.get("MIN_FACE_AREA", 8000))
            MAX_FACE_AREA = float(os.environ.get("MAX_FACE_AREA", 120000))

            # Detailed logging for debugging
            print(f"--- Liveness Check for face at {bbox} ---")
            print(f"  - Sharpness: {sharpness_score:.2f} (Threshold: {MIN_SHARPNESS_THRESHOLD} - {MAX_SHARPNESS_THRESHOLD})")
            print(f"  - Texture:   {texture_score:.2f} (Threshold: {MIN_TEXTURE_THRESHOLD} - {MAX_TEXTURE_THRESHOLD})")
            print(f"  - Motion:    {motion_score:.4f} (Threshold: {MIN_MOTION_THRESHOLD} - {MAX_MOTION_THRESHOLD})")
            print(f"  - Face Area: {face_area} (Threshold: {MIN_FACE_AREA} - {MAX_FACE_AREA})")
            
            # --- Anti-Spoofing Heuristics ---
            is_live = True
            rejection_reason = ""

            # Rule 1: Basic Sanity Checks
            if not (MIN_SHARPNESS_THRESHOLD < sharpness_score < MAX_SHARPNESS_THRESHOLD):
                is_live = False
                rejection_reason = "Sharpness out of range"
            elif not (MIN_TEXTURE_THRESHOLD < texture_score < MAX_TEXTURE_THRESHOLD):
                is_live = False
                rejection_reason = "Texture out of range"
            elif not (MIN_MOTION_THRESHOLD < motion_score < MAX_MOTION_THRESHOLD):
                is_live = False
                rejection_reason = "Motion out of range"
            elif not (MIN_FACE_AREA < face_area < MAX_FACE_AREA):
                is_live = False
                rejection_reason = "Face Area out of range"

            # Rule 2: High Texture Spoof Heuristic (based on logs)
            # This detects the signature of a phone screen, which often has an unnaturally
            # high texture score from the pixel grid when the face area is large.
            # if face_area > 23000 and texture_score > 200:
            #     is_live = False
            #     rejection_reason = "Spoof Detected: Phone screen pattern (large area, high texture)"

            # Rule 3: High Sharpness Spoof Heuristic
            # This detects a high-resolution screen displaying a very clear photo, which results
            # in an unnaturally high sharpness score that is unlikely in a live feed.
            # if face_area > 23000 and sharpness_score > 600:
            #     is_live = False
            #     rejection_reason = "Spoof Detected: Unnaturally high sharpness"

            # Rule 2: The "Phone Screen" Heuristic 
            # This detects the signature of a phone screen: low sharpness but unnaturally high texture from the pixel grid.
            # if face_area > 23000 and sharpness_score < 200 and texture_score > 50:
            #     is_live = False
            #     rejection_reason = "Spoof Detected: Phone screen pattern (low sharpness, high texture)"

            if not is_live:
                print(f"  - ❌ RESULT: Liveness check FAILED. Reason: {rejection_reason}")
                continue # Skip to the next face
            
            print("  - ✅ RESULT: Liveness check PASSED.")

            # --- RECOGNITION for each face ---
            face_embedding = face.embedding
            best_match_id = None
            best_similarity = 0.0
            settings = load_settings()
            threshold = float(settings.get('insightface_threshold', 0.6))

            for known in all_known_embeddings:
                # Calculate cosine similarity correctly
                similarity = float(np.dot(face_embedding, known['embedding']) / (np.linalg.norm(face_embedding) * np.linalg.norm(known['embedding'])))
                if similarity > best_similarity and similarity > threshold:
                    best_similarity = similarity
                    best_match_id = known['user_id']
                    best_match_name = known['name']
                    best_match_department = known['department']

            if best_match_id:
                matched_users.append({
                    'user_id': best_match_id,
                    'employee_info': {'name': best_match_name, 'department': best_match_department},
                    'similarity': best_similarity
                })

        # --- Post-processing: Log attendance only for successfully matched users ---
        if matched_users:
            for user in matched_users:
                best_match_id = user['user_id']
                employee_info = user['employee_info']
                
                # Log attendance and get status from the database
                attendance_status, message, db_timestamp = AttendanceLog.add_record(best_match_id, location_id, similarity=round(float(user['similarity']), 4))
                if attendance_status:
                    print(f"✅ SUCCESSFUL MATCH & LOG: User ID: {best_match_id}, Name: {employee_info['name']}, Similarity: {user['similarity']:.4f}, Status: {attendance_status}")
                    recognized_users.append({
                        'user_id': best_match_id,
                        'user_name': employee_info["name"],
                        'user_department': employee_info["department"],
                        'confidence': round(float(user['similarity']), 3),
                        'message': message,
                        'attendance_status': attendance_status,
                        'timestamp': db_timestamp.strftime("%b. %d, %Y %I:%M %p"),
                        'location_id': location_id
                    })
                else:
                    # Handle case where DB logging fails
                    print(f"❌ FAILED DB LOG: User ID: {best_match_id}, Name: {employee_info['name']}")
        
        t5 = time.time()
        print(f"Liveness, Matching & DB Logging: {(t5 - t4) * 1000:.2f} ms")
        
        end_time = time.time()
        print(f"Total Request Time: {(end_time - start_time) * 1000:.2f} ms")

        if recognized_users:
            return jsonify({
                'success': True,
                'users': recognized_users
            })
        else:
            return jsonify({'success': False, 'users': [], 'error': 'No known faces found'}), 400
            
    except Exception as e:
        print(f" FATAL ERROR in /insightface/recognize: {e}")
        return jsonify({'success': False, 'error': 'An unexpected server error occurred.'}), 500

def insightface_recognize_standalone():
    """Recognize face using InsightFace"""
    if not INSIGHTFACE_AVAILABLE:
        return jsonify({
            'success': False,
            'error': 'InsightFace not available'
        }), 503
    
    try:
        data = request.get_json()
        image_data = data.get('image')
        
        if not image_data:
            return jsonify({'error': 'No image data provided'}), 400
        
        # Decode image
        header, encoded = image_data.split(',', 1)
        image_bytes = base64.b64decode(encoded)
        nparr = np.frombuffer(image_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        
        if img is None:
            return jsonify({
                'success': False,
                'error': 'Invalid image data'
            }), 400
        
        # Initialize InsightFace if not already done
        if not initialize_insightface():
            return jsonify({
                'success': False,
                'error': 'Failed to initialize InsightFace model'
            }), 500
        
        # Get face embedding with more sensitive detection
        print(f"🔍 Processing image: {img.shape}")
        faces = insightface_model.get(img, max_num=1)
        print(f"🔍 Faces detected: {len(faces) if faces else 0}")
        
        if not faces:
            # Try OpenCV face detection as fallback
            print("🔄 InsightFace failed, trying OpenCV face detection...")
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
            opencv_faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30))
            print(f"🔍 OpenCV faces detected: {len(opencv_faces)}")
            
            if len(opencv_faces) == 0:
                return jsonify({
                    'success': False,
                    'error': 'No face detected',
                    'message': 'Please ensure your face is clearly visible in the camera',
                    'fallback': 'Try adjusting lighting or camera angle',
                    'debug_info': {
                        'image_shape': img.shape,
                        'image_channels': img.shape[2] if len(img.shape) > 2 else 1,
                        'insightface_detection': 'failed',
                        'opencv_detection': 'failed'
                    }
                }), 400
            else:
                return jsonify({
                    'success': False,
                    'error': 'Face detected but InsightFace processing failed',
                    'message': 'Face found but unable to process with InsightFace',
                    'fallback': 'Try using OpenCV recognition instead',
                    'debug_info': {
                        'image_shape': img.shape,
                        'opencv_faces_found': len(opencv_faces),
                        'insightface_detection': 'failed',
                        'opencv_detection': 'success'
                    }
                }), 400
        
        face_embedding = faces[0].embedding
        
        # Compare with known embeddings
        best_match = None
        best_similarity = 0.0
        
        # Load settings to get threshold
        settings = load_settings()
        threshold = float(settings.get('insightface_threshold', 0.6))  # Cosine similarity threshold (higher is better)
        print(f"🔍 Using InsightFace threshold: {threshold}")
        
        # Load all embeddings from the correct directory
        embedding_files = [f for f in os.listdir(EMBEDDINGS_DIR) if f.startswith('insightface_embeddings_') and f.endswith('.pkl')]
        
        for embedding_file in embedding_files:
            try:
                # Construct the full path to the file
                file_path = os.path.join(EMBEDDINGS_DIR, embedding_file)
                with open(file_path, 'rb') as f:
                    data = pickle.load(f)
                    user_id = data['user_id']
                    
                    for known_embedding in data['embeddings']:
                        # Calculate cosine similarity (higher is better)
                        similarity = float(np.dot(face_embedding, known_embedding) / (np.linalg.norm(face_embedding) * np.linalg.norm(known_embedding)))
                        print(f"🔍 User {user_id} similarity: {similarity:.3f} (threshold: {threshold})")
                        
                        if similarity > best_similarity and similarity > threshold:
                            best_similarity = float(similarity)  # Convert numpy.float32 to Python float
                            best_match = user_id
                            print(f"✅ New best match: User {user_id} with similarity {similarity:.3f}")
                            
            except Exception as e:
                print(f"⚠️ Error loading {embedding_file}: {e}")
                continue
        
        if best_match:
            confidence = float(best_similarity)  # Convert numpy.float32 to Python float
            
            # Fetch employee information from live API endpoint
            print(f"✅ RECOGNITION SUCCESS: User {best_match} recognized with similarity {confidence}")
            print(f"🔄 Fetching employee information for User {best_match} from live API...")
            employee_info = fetch_employee_info(best_match)
            print(f"📋 Employee info result: {employee_info}")

            # --- ATTENDANCE LOGIC ---
            now = datetime.datetime.now()
            timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
            today = now.strftime("%Y-%m-%d")
            log_file = "attendance_log.csv"
            
            # Ensure log file exists with headers
            if not os.path.exists(log_file) or os.stat(log_file).st_size == 0:
                with open(log_file, "w", newline="") as file:
                    writer = csv.writer(file)
                    writer.writerow(["Timestamp", "User ID", "User Name", "Status"])
            
            all_records = []
            today_records = []
            
            with open(log_file, 'r', newline='') as file:
                reader = csv.DictReader(file)
                for row in reader:
                    all_records.append(row)
                    if row.get('User ID') == str(best_match) and row.get('Timestamp', '').startswith(today):
                        today_records.append(row)
            
            # Determine attendance status
            check_in_exists = any(r.get('Status') == 'Check-in' for r in today_records)
            
            if not check_in_exists:
                attendance_status = "Check-in"
                message = f'User {employee_info["name"]} checked in successfully'
            else:
                attendance_status = "Check-out"
                message = f'User {employee_info["name"]} checked out successfully'

            # Append the new record to the log
            with open(log_file, 'a', newline='') as file:
                writer = csv.writer(file)
                writer.writerow([timestamp, str(best_match), employee_info["name"], attendance_status])

            return jsonify({
                'success': True,
                'user_id': best_match,
                'user_name': employee_info["name"],
                'user_email': employee_info["email"],
                'user_department': employee_info["department"],
                'user_status': employee_info["status"],
                'confidence': round(confidence, 3),
                'timestamp': timestamp,
                'attendance_status': attendance_status,
                'message': message,
                'method': 'insightface'
            })
        else:
            # Enhanced error response with fallback information
            return jsonify({
                'success': False,
                'error': 'No match found',
                'threshold': float(threshold),
                'message': 'Face detected but no matching user found in database',
                'fallback': 'Try registering the user or check if embeddings exist'
            }), 400
            
    except Exception as e:
        print(f"❌ Error in insightface_recognize: {e}")
        # Fallback to OpenCV recognition if InsightFace fails
        try:
            print("🔄 Attempting fallback to OpenCV recognition...")
            # Call the regular OpenCV recognition endpoint
            from flask import current_app
            with current_app.test_request_context('/recognize', method='POST', json={'image': data.get('image')}):
                opencv_result = recognize_face()
                if hasattr(opencv_result, 'get_json'):
                    opencv_data = opencv_result.get_json()
                    if opencv_data.get('success'):
                        opencv_data['method'] = 'opencv_fallback'
                        opencv_data['fallback_reason'] = f'InsightFace failed: {str(e)}'
                        return jsonify(opencv_data)
        except Exception as fallback_error:
            print(f"❌ Fallback to OpenCV also failed: {fallback_error}")
        
        return jsonify({
            'success': False,
            'error': str(e),
            'fallback_attempted': True,
            'message': 'Both InsightFace and OpenCV recognition failed'
        }), 500

@app.route('/admin/sync-database', methods=['POST'])
@role_required('Admin', 'Super Admin')
def sync_database(current_user_pid):
    """
    Synchronizes the user_data.json file with the registered_users database table.
    This is a manual utility to ensure consistency.
    """
    try:
        user_data_file = "user_data.json"
        if not os.path.exists(user_data_file):
            return jsonify({'error': 'user_data.json not found.'}), 404

        with open(user_data_file, 'r') as f:
            user_data = json.load(f)

        synced_count = 0
        failed_count = 0
        
        for pid_str in user_data.keys():
            try:
                pid = int(pid_str)
                # The add_or_update method handles both inserts and updates.
                success = RegisteredUser.add_or_update(pid)
                if success:
                    synced_count += 1
                else:
                    failed_count += 1
            except (ValueError, TypeError):
                print(f"Skipping invalid PID from user_data.json: {pid_str}")
                failed_count += 1
                continue
        
        message = f"Database synchronization complete. Synced: {synced_count}, Failed: {failed_count}."
        print(f"✅ {message}")
        return jsonify({'success': True, 'message': message, 'synced': synced_count, 'failed': failed_count})

    except Exception as e:
        print(f"❌ Error during database synchronization: {e}")
        return jsonify({'error': 'An internal error occurred during synchronization.'}), 500

if __name__ == '__main__':
    # from franai.services.scheduler import start_scheduler
    # start_scheduler()  
    app.run(debug=True, port=5001, use_reloader=True)