dwd_asin_title_number.py 4.17 KB
Newer Older
chenyuanjie committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
import os
import sys

sys.path.append(os.path.dirname(sys.path[0]))

from utils.common_util import CommonUtil
from utils.hdfs_utils import HdfsUtils
from utils.spark_util import SparkUtil
from pyspark.sql import functions as F
from pyspark.sql.types import IntegerType
from yswg_utils.common_udf import udf_title_number_parse_reg
from utils.db_util import DBUtil

"""
根据不同的历史asin解析asin标题 获取标题中的信息
"""


class DwdAsinTitleNumber(object):

    def __init__(self, site_name, date_type, date_info):
        self.site_name = site_name
        self.date_type = date_type
        self.date_info = date_info
        app_name = f"{self.__class__.__name__}:{site_name}:{date_type}:{date_info}"
        self.spark = SparkUtil.get_spark_session(app_name)
        self.hive_tb = "dwd_asin_title_number"

        self.udf_title_number_parse_reg = udf_title_number_parse_reg()

    def run(self):
        sql = f"""
            select 
                asin_title as title, 
                asin
            from dim_asin_detail
            where site_name = '{self.site_name}'
            and date_type = '{CommonUtil.get_rel_date_type('dim_asin_detail', self.date_type)}'
            and date_info = '{self.date_info}'
        """
        print(sql)
        df_asin_detail = self.spark.sql(sql)

        sql = f"""
            WITH ranked_edit_logs AS (
                SELECT 
                    edit_key_id, 
                    val_after, 
                    ROW_NUMBER() OVER (PARTITION BY edit_key_id ORDER BY create_time DESC) AS rn 
                FROM sys_edit_log 
                WHERE module = '流量选品' 
                AND filed = 'package_quantity' 
                AND site_name='{self.site_name}'
            )
            SELECT 
                edit_key_id AS asin, 
                cast(val_after as int) AS user_package_num
            FROM ranked_edit_logs 
            WHERE rn = 1
        """
        print(sql)
        pg_con_info = DBUtil.get_connection_info("postgresql", "us")
        if pg_con_info is not None:
            df_user_package_num = SparkUtil.read_jdbc_query(
                session=self.spark,
                url=pg_con_info['url'],
                pwd=pg_con_info['pwd'],
                username=pg_con_info['username'],
                query=sql
            )
            df_user_package_num = F.broadcast(df_user_package_num)

        df_asin_detail = df_asin_detail.withColumn(
            "split_detail",
            F.explode(self.udf_title_number_parse_reg(F.col("title")))
        )
        df_all = df_asin_detail.join(
            df_user_package_num, on='asin', how='left'
        )

        df_all = df_all.select(
            F.col("asin"),
            F.col("title"),
            F.col("split_detail").getField("label").alias("label"),
            # 注意强转成数字类型
            F.coalesce(
                F.col("user_package_num"),
                F.col("split_detail").getField("value").cast(IntegerType())
            ).alias("value"),
            F.col("split_detail").getField("match").alias("match"),
            F.current_date().alias("update_time"),
            F.lit(self.site_name).alias("site_name"),
            F.lit(self.date_type).alias("date_type"),
            F.lit(self.date_info).alias("date_info")
        )

        # 分区数量调整
        df_save = df_all.repartition(5)
        partition_dict = {
            "site_name": self.site_name,
            "date_type": self.date_type,
            "date_info": self.date_info,
        }
        partition_by = list(partition_dict.keys())
        hdfs_path = CommonUtil.build_hdfs_path(self.hive_tb, partition_dict=partition_dict)
        print(f"清除hdfs目录中.....{hdfs_path}")
        HdfsUtils.delete_hdfs_file(hdfs_path)

        print(f"当前存储的表名为:{self.hive_tb},分区为{partition_by}", )
        df_save.write.saveAsTable(name=self.hive_tb, format='hive', mode='append', partitionBy=partition_by)
        print("success")


if __name__ == '__main__':
    site_name = CommonUtil.get_sys_arg(1, None)
    date_type = CommonUtil.get_sys_arg(2, None)
    date_info = CommonUtil.get_sys_arg(3, None)
    obj = DwdAsinTitleNumber(site_name, date_type, date_info)
    obj.run()