rerun_demo.py 2.53 KB
Newer Older
chenyuanjie committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
import os
import sys


sys.path.append(os.path.dirname(sys.path[0]))
from utils.common_util import CommonUtil
from utils.spark_util import SparkUtil
from utils.templates_rerun import TemplatesRerun


class RerunDemo(TemplatesRerun):

    def __init__(self, site_name, date_type, date_info):
        super().__init__()
        self.site_name = site_name
        self.date_type = date_type
        self.date_info = date_info
        app_name = f"{self.__class__.__name__}:{self.site_name}:{self.date_type}:{self.date_info}"
        self.spark = SparkUtil.get_spark_session(app_name)
        # 重跑表名
        self.db_save = "dwt_aba_st_analytics"
        # 分区
        self.partitions_num = 10
        # 分区字段
        self.partitions_by = ['site_name', 'date_type', 'date_info']
        # 拼接where条件
        self.partitions_dict = {
            'site_name': self.site_name,
            'date_type': self.date_type,
            'date_info': self.date_info
        }
        # 修改字段
        self.rerun_field = 'is_hidden_cate'
        # 重跑范围
        self.rerun_range = '2022-01,2024-12'
        # 重跑模块
        self.page = 'ABA搜索词'
        # 重跑任务负责人
        self.task_owner = 'chenyuanjie'
        # 重跑原因
        self.rerun_reason = '字段新增'
        # 详情
        self.rerun_reason_description = '新增字段is_hidden_cate,标记该搜索词是否属于隐藏分类,1是 0否'
        # 定义df对象
        self.df_history_data = self.df_history_data
        self.df_save = self.spark.sql("select 1+1;")

    def rerun_handle_data(self):
        # todo
        sql = f"""
        select 
            category_id as st_bsr_cate_1_id_new, 
            1 as is_hidden_cate 
        from dim_bsr_category_tree
        where site_name = '{self.site_name}'
          and en_name in ("Apps & Games", "Audible Books & Originals", "Books", "CDs & Vinyl", "Digital Music", "Kindle Store", "Movies & TV", "Software")
          and category_parent_id = 0;
        """
        df_is_hidden_cate = self.spark.sql(sqlQuery=sql).repartition(self.partitions_num).cache()
        self.df_save = self.df_history_data.join(
            df_is_hidden_cate, 'st_bsr_cate_1_id_new', 'left'
        ).na.fill({
            "is_hidden_cate": 0
        })


if __name__ == '__main__':
    site_name = CommonUtil.get_sys_arg(1, None)
    date_type = CommonUtil.get_sys_arg(2, None)
    date_info = CommonUtil.get_sys_arg(3, None)
    obj = RerunDemo(site_name=site_name, date_type=date_type, date_info=date_info)
    obj.rerun()