import os
from io import BytesIO

import math
import requests
import cloudscraper
from PIL import Image
from bs4 import BeautifulSoup
from decord import VideoReader

__author__ = "Jochem van Dolder"
__license__ = "MIT"

SERVER_URL = os.getenv("SERVER_URL")

DEFAULT_APP = os.getenv("DEFAULT_APP")

BG_COL = os.getenv("BG_COL")
D_GREY = os.getenv("D_GREY")
N_GREY = os.getenv("N_GREY")
L_GREY = os.getenv("L_GREY")
EMB_BG = os.getenv("EMB_BG")

DEFAULT_CHAR = os.getenv("DEFAULT_CHAR")
DEFAULT_URL = os.getenv("DEFAULT_URL")
DEFAULT_COLOR = os.getenv("DEFAULT_COLOR")

FONT_FAM = os.getenv("FONT_FAM")

EMB_IMG_H = int(os.getenv("EMB_IMG_H"))

MIN_FRAME_DURATION_MS = int(os.getenv("MIN_FRAME_DURATION_MS"))
MAX_FRAME_DURATION_MS = int(os.getenv("MAX_FRAME_DURATION_MS"))
DEFAULT_FRAME_DURATION_MS = int(os.getenv("DEFAULT_FRAME_DURATION_MS"))
MAX_RAM_MB = int(os.getenv("MAX_RAM_MB"))

MAX_URL_RECURSION = int(os.getenv("MAX_URL_RECURSION"))

ENTRY_HEIGHT = int(os.getenv("ENTRY_HEIGHT"))
ENTRY_COUNT = int(os.getenv("ENTRY_COUNT"))
HISTORY_WIDTH = int(os.getenv("HISTORY_WIDTH"))

NUM_COLORS = int(os.getenv("NUM_COLORS"))
SCRAPER_TIMEOUT_MS = int(os.getenv("SCRAPER_TIMEOUT_MS"))

### --- GLOBAL CLOUDSCRAPER INSTANCE --- ###

scraper = cloudscraper.create_scraper()

def parse_pil(img:Image.Image):
    frames, durations = [], []
    if getattr(img, "is_animated", False):
        for i in range(img.n_frames):
            img.seek(i)
            frames.append(img.convert("RGBA"))
            durations.append(int(img.info.get("duration", DEFAULT_FRAME_DURATION_MS)))
        return frames, durations
    else:
        return [img.convert("RGBA")], [0]

def get_frames_from_url(url, depth=0):
    if depth > MAX_URL_RECURSION: return
    try:
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
        response = requests.get(url, headers=headers)
        return parse_pil(Image.open(BytesIO(response.content)))
    except: pass
    try:
        response = scraper.get(url)
        return parse_pil(Image.open(BytesIO(response.content)))
    except: pass
    try:
        response = scraper.get(url)
        if response.status_code != 200:
            response = requests.get(url)
        if response.status_code != 200: return
        soup = BeautifulSoup(response.text, "html.parser")
        for img in soup.find_all("img"):
            if not img['src'].startswith('http'): continue
            ret = get_frames_from_url(img['src'], depth+1)
            if not ret: continue
            return ret
    except: pass
    print(f"{url} did not resolve to an image")

def get_frames_from_file(path):
    ext = os.path.splitext(path)[1].lower()
    image_exts = {".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".webp", ".gif"}
    video_exts = {".mp4", ".mov", ".avi", ".mkv", ".webm"}
    if not ((ext in image_exts) or (ext in video_exts)): return
    if ext not in video_exts: return parse_pil(Image.open(path))
    frames, durations = [], []
    max_ram_byte = MAX_RAM_MB*10**6
    dur_acc = 0
    eff_min_dur = MIN_FRAME_DURATION_MS
    vr = VideoReader(path)
    fps = vr.get_avg_fps()
    height, width = vr[0].shape[:2]
    frame_count = len(vr)
    vid_length = frame_count/fps
    if frame_count*width*height*4 > max_ram_byte:
        eff_min_dur = int((1000*vid_length*width*height*4)/max_ram_byte)
    if eff_min_dur > MAX_FRAME_DURATION_MS:
        eff_min_dur = MAX_FRAME_DURATION_MS
        scale = math.sqrt((max_ram_byte*eff_min_dur)/(1000*vid_length*width*height*4))
        width = int(width*scale)
        height = int(height*scale)
        if height < EMB_IMG_H: return print("Video is too big\nplease make it smaller")
    if fps <= 0: fps = 30
    frame_duration = int(1000 / fps)
    indices = []
    for i in range(len(vr)):
        dur_acc += frame_duration
        if dur_acc < eff_min_dur: continue
        indices.append(i)
        durations.append(dur_acc//eff_min_dur*eff_min_dur)
        dur_acc = dur_acc%eff_min_dur
    vr = VideoReader(path, width=width, height=height)
    batch = vr.get_batch(indices).asnumpy()
    frames = [Image.fromarray(frame).convert("RGBA") for frame in batch]
    return frames, durations
