#!/usr/bin/env python3
# -*- coding: utf-8 -*-


import traceback
import datetime
import json
import time
import os
import shutil
import time
from PIL import Image

from pyvirtualdisplay import Display

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import NoSuchElementException


def datuak_jaso(url2):

    try:
        
        driver2 = webdriver.Chrome('../../chromedriver/chromedriver')
        driver2.get(url2)
        pub_data = meta(driver2.page_source)

        title = ""
        try:
            titleclass = driver2.find_element_by_class_name(
                'c-mainarticle__title')
        except NoSuchElementException:
            titleclass = None

        if titleclass:
            title = titleclass.text

        subtitle = ""
        try:
            subtclass = driver2.find_element_by_class_name(
                'c-mainarticle__subtitle')
        except NoSuchElementException:
            subtclass = None

        if subtclass:
            subtitle = subtclass.text

        img_src = None
        try:
            img1class = driver2.find_element_by_class_name(
                'c-mainarticle__img')
            if img1class:
                img_src = img1class.get_attribute("src")

        except NoSuchElementException:
            img1class = None

        return {
            "titulua": title,
            "azpititulua": subtitle,
            "argazkia": img_src,
            "pub_data": pub_data
        }

    except Exception as e:
        print("eeeeeee", e)
        traceback.print_exc()
    finally:
        if driver2:
            driver2.close()


def meta(html_content) -> datetime:
    soup = BeautifulSoup(html_content, 'html.parser')
    meta_tag = soup.find('meta', {'property': 'article:published_time'})

    if meta_tag:
        published_time = meta_tag.get('content')
        # <meta property="article:published_time" content="2024-01-13T21:53:00+01:00"/>
        datetime_object = datetime.datetime.strptime(
            published_time, '%Y-%m-%dT%H:%M:%S%z')
        return published_time
    else:
        print("No se encontró la etiqueta meta con property='article:published_time'.")
        return None


def save_screenshot(driver: webdriver.Chrome, path: str = '/tmp/screenshot.png') -> None:
    tmp_path="/tmp/screen1.png"
    # Ref: https://stackoverflow.com/a/52572919/
    original_size = driver.get_window_size()
    required_width = driver.execute_script(
        'return document.body.parentNode.scrollWidth')
    required_height = driver.execute_script(
        'return document.body.parentNode.scrollHeight')
    driver.set_window_size(required_width, required_height)
    # driver.save_screenshot(path)  # has scrollbar
    driver.find_element_by_tag_name(
        'body').screenshot(tmp_path)  # avoids scrollbar
    driver.set_window_size(original_size['width'], original_size['height'])

    image = Image.open( tmp_path )
    w2=400
    h2=800
    print(f"Original size : {image.size}") 
    resize1 = image.resize(( w2, round(image.size[1]*w2/2 / image.size[0])*2 ))
    
    print(f"Original size1 : {resize1.size}")
    resize2 = resize1.crop((0, 0, w2, h2))
    print(f"Original size1 : {resize2.size}")
    print("image path: ", path )
    resize2.save(  path )
    os.unlink(tmp_path)


try: 
    non=os.path.abspath(os.path.dirname(os.sys.argv[0]))
    os.chdir(non)

    display = Display(visible=0, size=(1920, 1080))
    display.start()

    driver = webdriver.Chrome('../../chromedriver/chromedriver')


    driver.get("https://www.berria.eus")

    time.sleep(2)

    decpush = driver.find_element_by_id('onesignal-slidedown-cancel-button')
    decpush.click()

    decookies = driver.find_element_by_id('CybotCookiebotDialogBodyButtonDecline')
    decookies.click()

    time.sleep(2)

    altura_total = driver.execute_script(
        "return Math.max( document.body.scrollHeight, document.body.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight, document.documentElement.offsetHeight);")

    # Establece el tamaño de la ventana del navegador para abarcar toda la altura de la página
    driver.set_window_size(driver.execute_script(
        "return window.innerWidth;"), altura_total)

    time.sleep(3)


    save_screenshot(driver, "/tmp/berria.png")
    shutil.move( "/tmp/berria.png", "../dat/berria_osoa.png")

    titles = driver.find_elements_by_class_name('c-item__title')
    emaitza = {
        "hedabidea": "berria",
        "logo": "berria.png",
        "azala": "dat/berria_osoa.png",
        "albisteak": []
    }

    for title in titles:
        at = title.find_element_by_tag_name("a")
        url_t = at.get_attribute("href")
        tt = datuak_jaso(url_t)
        if tt['titulua'] and tt['argazkia']:
            emaitza["albisteak"].append(tt)

        if len(emaitza["albisteak"]) >= 10:
            break

    driver.close()

    with open('../dat/berria.json', 'w', encoding='utf-8') as f:
        json.dump(emaitza, f, ensure_ascii=False, indent=4)


    with open('../dat/albisteak.json', 'r') as f:
        albisteak = json.load(f)


    l2=[ x for x in albisteak if x['hedabidea'] != emaitza['hedabidea']]
    l2.append(emaitza)
    with open('../dat/albisteak.json', 'w', encoding='utf-8') as f:
        json.dump(l2, f, ensure_ascii=False, indent=4)

    #Jaso RSS
    rss_tit = [ albistea['titulua'] for  hedabidea in l2  for albistea  in hedabidea.get('albisteak', []) ]
    with open('../dat/rss.json', 'w', encoding='utf-8') as f:
        json.dump(rss_tit, f, ensure_ascii=False, indent=4)

finally:
    if driver:
        driver.quit()

    if display:
        display.stop()