import os
import sys

from pyspark.storagelevel import StorageLevel
sys.path.append(os.path.dirname(sys.path[0]))  # 上级目录
from utils.templates import Templates
# from ..utils.templates import Templates
# from AmazonSpider.pyspark_job.utils.templates import Templates
from pyspark.sql.types import StringType
# 分组排序的udf窗口函数
from pyspark.sql.window import Window
from pyspark.sql import functions as F


class DeAsinImage(Templates):

    def __init__(self):
        super().__init__()
        self.site_name = "de"
        self.db_save = f"tmp_asin_image_copy"
        self.spark = self.create_spark_object(app_name=f"{self.db_save}: {self.site_name}")
        self.df_save = self.spark.sql(f"select 1+1;")
        self.df1 = self.spark.sql(f"select 1+1;")
        self.df2 = self.spark.sql(f"select 1+1;")
        self.df = self.spark.sql(f"select 1+1;")
        self.partitions_by = ['site_name']
        self.reset_partitions(partitions_num=1)

    def read_data(self):
        sql1 = f"""
        select
            *,
            1 as flag
        from
            tmp_asin_image
        where
            site_name = 'de1';
        """

        sql2 = f"""
        select
            *,
            2 as flag
        from
            tmp_asin_image
        where
            site_name = 'de';
        """
        self.df1 = self.spark.sql(sqlQuery=sql1).cache()
        self.df2 = self.spark.sql(sqlQuery=sql2).cache()

    def handle_data(self):
        self.df = self.df1.unionAll(self.df2)
        df_window = Window.partitionBy(["asin"]).orderBy(self.df.flag.asc())
        self.df = self.df.withColumn("rk",F.dense_rank().over(window=df_window))
        self.df_save = self.df.filter("rk = 1")
        self.df_save = self.df_save.drop("flag").drop("rk")

if __name__ == "__main__":
    handle_obj = DeAsinImage()
    handle_obj.run()