import os
import logging

from apscheduler.schedulers.background import BackgroundScheduler
from django.core.management import call_command

from horilla import settings

from .gdrive import *

# from horilla.settings import DBBACKUP_STORAGE_OPTIONS
from .models import *
from .pgdump import *
from .db_utils import dump_database
from .zip import *

scheduler = BackgroundScheduler()

# Set up logging
logger = logging.getLogger(__name__)

# def backup_database():
#     folder_path = DBBACKUP_STORAGE_OPTIONS['location']
#     local_backup = LocalBackup.objects.first()
#     if folder_path and local_backup:
#         DBBACKUP_STORAGE_OPTIONS['location'] = local_backup.backup_path
#         folder_path = DBBACKUP_STORAGE_OPTIONS['location']
#         if local_backup.backup_db:
#             call_command('dbbackup')
#         if local_backup.backup_media:
#             call_command("mediabackup")
#         files = sorted(os.listdir(folder_path), key=lambda x: os.path.getctime(os.path.join(folder_path, x)))

#         # Remove all files except the last two
#         if len(files) > 2:
#             for file_name in files[:-2]:
#                 file_path = os.path.join(folder_path, file_name)
#                 if os.path.isfile(file_path):
#                     try:
#                         os.remove(file_path)
#                     except:
#                         pass


# def start_backup_job():
#     """
#     Start the backup job based on the LocalBackup configuration.
#     """
#     # Check if any LocalBackup object exists
#     if LocalBackup.objects.exists():
#         local_backup = LocalBackup.objects.first()

#             # Remove existing job if it exists
#         try:
#             scheduler.remove_job('backup_job')
#         except:
#             pass

#         # Add new job based on LocalBackup configuration
#         if local_backup.interval:
#             scheduler.add_job(backup_database, 'interval', seconds=local_backup.seconds, id='backup_job')
#         else:
#             scheduler.add_job(backup_database, trigger='cron', hour=local_backup.hour, minute=local_backup.minute, id='backup_job')
#         # Start the scheduler if it's not already running
#         if not scheduler.running:
#             scheduler.start()
#     else:
#         stop_backup_job()


# def stop_backup_job():
#     """
#     Stop the backup job if it exists.
#     """
#     try:
#         scheduler.remove_job('backup_job')
#     except:
#         pass


# def restart_backup_job():
#     """
#     Restart the backup job by stopping it and starting it again.
#     """
#     stop_backup_job()
#     start_backup_job()


def google_drive_backup():
    if GoogleDriveBackup.objects.exists():
        google_drive = GoogleDriveBackup.objects.first()
        gdrive_folder_id = google_drive.gdrive_folder_id
        
        logger.info("Starting Google Drive backup process")
        
        # Prepare authentication parameters based on method
        auth_params = {}
        if google_drive.auth_method == 'service_account':
            auth_params = {
                'service_account_file': google_drive.service_account_file.path
            }
        elif google_drive.auth_method == 'oauth':
            auth_params = {
                'client_id': google_drive.oauth_client_id,
                'client_secret': google_drive.oauth_client_secret,
                'refresh_token': google_drive.oauth_refresh_token
            }
        else:
            logger.error(f"Unknown authentication method: {google_drive.auth_method}")
            return
        
        if google_drive.backup_db:
            try:
                logger.info("Starting database backup")
                success = dump_database("backupdb.dump")
                if success:
                    logger.info(f"Database dump successful, uploading to Google Drive using {google_drive.auth_method}")
                    upload_file("backupdb.dump", gdrive_folder_id, google_drive.auth_method, **auth_params)
                    os.remove("backupdb.dump")
                    logger.info("Database backup uploaded and local file cleaned up")
                else:
                    logger.error("Database backup failed")
            except Exception as e:
                logger.error(f"Error during database backup: {e}")
                # Clean up backup file if it exists
                if os.path.exists("backupdb.dump"):
                    os.remove("backupdb.dump")
                    logger.info("Cleaned up failed database dump file")
                
        if google_drive.backup_media:
            try:
                logger.info("Starting media backup")
                folder_to_zip = settings.MEDIA_ROOT
                output_zip_file = "media.zip"
                zip_folder(folder_to_zip, output_zip_file)
                logger.info(f"Media files zipped, uploading to Google Drive using {google_drive.auth_method}")
                upload_file("media.zip", gdrive_folder_id, google_drive.auth_method, **auth_params)
                os.remove("media.zip")
                logger.info("Media backup uploaded and local file cleaned up")
            except Exception as e:
                logger.error(f"Error during media backup: {e}")
                # Clean up backup file if it exists
                if os.path.exists("media.zip"):
                    os.remove("media.zip")
                    logger.info("Cleaned up failed media backup file")
                    
        logger.info("Google Drive backup process completed")


def start_gdrive_backup_job():
    """
    Start the backup job based on the LocalBackup configuration.
    """
    # Check if any Gdrive Backup object exists
    if GoogleDriveBackup.objects.exists():
        gdrive_backup = GoogleDriveBackup.objects.first()

        # Remove existing job if it exists
        try:
            scheduler.remove_job("backup_job")
        except:
            pass
        # Add new job based on Gdrive Backup configuration
        if gdrive_backup.interval:
            scheduler.add_job(
                google_drive_backup,
                "interval",
                seconds=gdrive_backup.seconds,
                id="gdrive_backup_job",
            )
        else:
            scheduler.add_job(
                google_drive_backup,
                trigger="cron",
                hour=gdrive_backup.hour,
                minute=gdrive_backup.minute,
                id="gdrive_backup_job",
            )

        # Start the scheduler if it's not already running
        if not scheduler.running:
            scheduler.start()

    else:
        stop_gdrive_backup_job()


def stop_gdrive_backup_job():
    """
    Stop the backup job if it exists.
    """
    try:
        scheduler.remove_job("gdrive_backup_job")
    except:
        pass


# def restart_gdrive_backup_job():
#     """
#     Restart the backup job by stopping it and starting it again.
#     """
#     stop_gdrive_backup_job()
#     start_gdrive_backup_job()
