import os
import sys

sys.path.append(os.path.dirname(sys.path[0]))
from utils.common_util import CommonUtil
from utils.spark_util import SparkUtil
from utils.templates_rerun import TemplatesRerun
from pyspark.sql import functions as F


class RerunDemo(TemplatesRerun):

    def __init__(self, site_name, date_type, date_info):
        super().__init__()
        self.site_name = site_name
        self.date_type = date_type
        self.date_info = date_info
        app_name = f"{self.__class__.__name__}:{self.site_name}:{self.date_type}:{self.date_info}"
        self.spark = SparkUtil.get_spark_session(app_name)
        # 重跑表名
        self.db_save = "dim_st_detail"
        # 分区
        self.partitions_num = 3
        # 分区字段
        self.partitions_by = ['site_name', 'date_type', 'date_info']
        # 拼接where条件
        self.partitions_dict = {
            'site_name': self.site_name,
            'date_type': self.date_type,
            'date_info': self.date_info
        }
        # 修改字段
        self.rerun_field = 'st_appear_history_counts'
        # 重跑范围
        self.rerun_range = '2024-07'
        # 重跑模块
        self.page = 'ABA搜索词'
        # 重跑任务负责人
        self.task_owner = 'chenyuanjie'
        # 重跑原因
        self.rerun_reason = '数据异常'
        # 详情
        self.rerun_reason_description = '词频数据有误,重跑验证'
        # 定义df对象
        self.df_history_data = self.df_history_data
        self.df_save = self.spark.sql("select 1+1;")

    def rerun_handle_data(self):
        # todo
        sql = f"""
        select 
            year_week 
        from dim_date_20_to_30 
        where week_day=1
          and year_month = '{self.date_info}'
        """
        df_week = self.spark.sql(sql)
        year_week_tuple = tuple(df_week.select('year_week').rdd.flatMap(lambda x: x).collect())

        sql = f"""
        select * 
        from ods_brand_analytics 
        where site_name = '{self.site_name}' 
          and date_type = 'week' 
          and date_info in {year_week_tuple} 
          and rank <= 1500000;
        """
        df_st_detail_days = self.spark.sql(sql).cache()
        df_st_detail_days = df_st_detail_days.drop_duplicates(['search_term', 'date_info'])
        df_st_detail_days = df_st_detail_days.groupby(['search_term']).agg(
            F.count('date_info').alias('st_appear_history_counts')
        )
        self.df_save = self.df_history_data.join(
            df_st_detail_days, 'search_term', 'left'
        )


if __name__ == '__main__':
    site_name = CommonUtil.get_sys_arg(1, None)
    date_type = CommonUtil.get_sys_arg(2, None)
    date_info = CommonUtil.get_sys_arg(3, None)
    obj = RerunDemo(site_name=site_name, date_type=date_type, date_info=date_info)
    obj.rerun()