import os
import json
import sys
import subprocess
import requests
from concurrent.futures import ThreadPoolExecutor

# Configuration: Set the number of concurrent downloads
NUM_WORKERS = 5  # Adjust this variable to control the number of simultaneous downloads

# Function to download the JSON file from the specified URL
def download_json(bucket_name, download_directory):
    try:
        url = f"https://livetimelapse.com.au/ai/chatgpt/internal/b2/servers/www.php?bucket_name={bucket_name}"
        response = requests.get(url, stream=True)
        response.raise_for_status()  # Raise an exception for HTTP errors

        # Save JSON to local file
        json_path = os.path.join(download_directory, f"{bucket_name}_file_list.json")
        with open(json_path, "w") as json_file:
            json.dump(response.json(), json_file, indent=4)

        print(f"JSON file downloaded and saved to: {json_path}")
        return json_path
    except Exception as e:
        print(f"Error downloading JSON file: {e}")
        sys.exit(1)

# Function to load the JSON file into memory
def load_json_file(json_path):
    try:
        with open(json_path, "r") as json_file:
            return json.load(json_file)
    except Exception as e:
        print(f"Error loading JSON file: {e}")
        sys.exit(1)

# Function to create directories and save a file
def download_file(file_info, base_dir, stats):
    try:
        file_path = file_info["path"]
        cdn_url = file_info["cdn"]  # Use CDN URL instead of Friendly URL

        # Create full local file path
        local_path = os.path.join(base_dir, file_path)
        os.makedirs(os.path.dirname(local_path), exist_ok=True)

        # Check if file already exists
        if os.path.exists(local_path):
            print(f"File already exists, skipping: {local_path}")
            stats["skipped"] += 1
            return

        # Log the download
        print(f"Downloading: {cdn_url}")

        # Download the file
        response = requests.get(cdn_url, stream=True)
        response.raise_for_status()

        with open(local_path, "wb") as file:
            for chunk in response.iter_content(chunk_size=8192):
                file.write(chunk)

        print(f"Saved to: {local_path}")
        stats["downloaded"] += 1
    except Exception as e:
        print(f"Error downloading {file_info['path']}: {e}")

# Function to compress files using 7-Zip
def compress_with_7zip(directory, zip_name):
    try:
        # Construct the 7-Zip command
        zip_path = os.path.abspath(zip_name)
        dir_path = os.path.abspath(directory)
        command = f'7z a "{zip_path}" "{dir_path}\\*"'  # Use 7-Zip to compress

        print(f"Compressing files into: {zip_path}")
        result = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)

        # Output the result of the compression
        print(result.stdout)
        print(f"Compression completed: {zip_path}")
    except subprocess.CalledProcessError as e:
        print(f"Error during compression: {e.stderr}")

# Main function to download JSON, process files, and compress
def main(bucket_name, download_directory):
    # Ensure the download directory exists
    os.makedirs(download_directory, exist_ok=True)

    # Step 1: Download the JSON file
    json_path = download_json(bucket_name, download_directory)

    # Step 2: Load the JSON file
    file_list = load_json_file(json_path)

    # Step 3: Create a directory for the bucket
    bucket_dir = os.path.join(download_directory, bucket_name)
    os.makedirs(bucket_dir, exist_ok=True)

    # Step 4: Initialize stats
    stats = {"downloaded": 0, "skipped": 0}

    # Step 5: Download files using ThreadPoolExecutor
    print(f"Downloading files to: {bucket_dir}")
    with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
        for file_info in file_list:
            executor.submit(download_file, file_info, bucket_dir, stats)

    # Wait for all threads to finish
    executor.shutdown(wait=True)

    # Step 6: Print stats
    print(f"\nDownload Summary:")
    print(f"Total files downloaded: {stats['downloaded']}")
    print(f"Total files skipped (already exist): {stats['skipped']}")

    # Step 7: Compress files using 7-Zip
    zip_name = f"{bucket_dir}.zip"
    compress_with_7zip(bucket_dir, zip_name)

if __name__ == "__main__":
    if len(sys.argv) != 3:
        print("Usage: python3 script.py <bucket_name> <download_directory>")
        sys.exit(1)

    bucket_name = sys.argv[1]
    download_directory = sys.argv[2]

    main(bucket_name, download_directory)
