dim_asin_title_info_old.py 7.96 KB
import os
import re
import sys
import time

os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
sys.path.append(os.path.dirname(sys.path[0]))  # 上级目录
from pyspark.storagelevel import StorageLevel
from utils.templates import Templates
# from ..utils.templates import Templates
from pyspark.sql.types import StringType, IntegerType
# 分组排序的udf窗口函数
from pyspark.sql.window import Window
from pyspark.sql import functions as F


class DimAsinTitleInfo(Templates):

    def __init__(self, site_name='us', date_type="month", date_info='2022-1'):
        super().__init__()
        self.site_name = site_name
        self.date_type = date_type
        self.date_info = date_info
        self.db_save_vertical = f'dim_asin_title_info_vertical'
        self.db_save_wide = f'dim_asin_title_info_wide'
        self.spark = self.create_spark_object(app_name=f"{self.db_save_vertical}: {self.site_name}, {self.date_type}, {self.date_info}")
        self.df_theme = self.spark.sql(f"select 1+1;")
        self.df_asin_title = self.spark.sql(f"select 1+1;")
        self.df_save_vertical = self.spark.sql(f"select 1+1;")  # 竖表
        self.df_save_wide = self.spark.sql(f"select 1+1;")  # 宽表
        # 注册自定义函数 (UDF)
        self.u_theme_pattern = F.udf(self.udf_theme_pattern, StringType())
        # 其他变量
        self.pattern = str()  # 正则匹配
        # 分区参数
        self.partitions_by = ['site_name']
        self.partitions_num = 100

    @staticmethod
    def udf_theme_pattern(title, pattern):
        results_list = re.findall(pattern, title)  # , re.IGNORECASE,
        if results_list:
            unique_first_values = set()  # 集合 -- 自带去重功能
            for item in results_list:
                theme = item[0].strip()  # 去掉匹配的两头空格
                unique_first_values.add(theme)
            return ','.join(unique_first_values)
        else:
            return None

    def read_data(self):
        sql = f"select id, theme_type_en, theme_en, theme_en_lower, theme_ch from ods_theme where site_name='{self.site_name}'"
        print("sql:", sql)
        self.df_theme = self.spark.sql(sql).cache()
        self.df_theme.show(10, truncate=False)
        # sql = f"-- select asin, title as asin_volume, date_info from ods_asin_detail where site_name='{self.site_name}' and date_type='week'"  # and date_info>='2023-15'
        sql = f"select asin, title as asin_title, date_info, site_name from ods_asin_detail where site_name='{self.site_name}' and date_type='week' and date_info>='2023-25'"  # and date_info>='2023-15'
        print("sql:", sql)
        self.df_asin_title = self.spark.sql(sqlQuery=sql).cache()
        self.df_asin_title.show(10, truncate=False)

    def handle_data(self):
        self.handle_filter_dirty_data()
        self.handle_theme()

    def handle_filter_dirty_data(self):
        """
        过滤脏数据+保留最新的title
        """
        # 小写
        self.df_asin_title = self.df_asin_title.withColumn("asin_title_lower", F.lower(self.df_asin_title["asin_title"]))
        # 过滤空值
        self.df_asin_title = self.df_asin_title.filter("asin_title_lower is not null")
        # 过滤null和none字符串
        self.df_asin_title = self.df_asin_title.filter("asin_title_lower not in ('none', 'null', 'nan')")
        # 取最新的date_info对应的title
        window = Window.partitionBy('asin').orderBy(F.desc('date_info'))  # 按照 date_info 列进行分区,并按照 date 列进行排序
        self.df_asin_title = self.df_asin_title.withColumn('row_number', F.row_number().over(window))  # 使用窗口函数为每个分区的行编号
        self.df_asin_title = self.df_asin_title.filter(self.df_asin_title.row_number == 1).drop('row_number')  # 只保留每个分区中 row_number 最大的行,并删除 row_number 列

    def handle_theme(self):
        pdf_theme = self.df_theme.toPandas()
        pattern_list = list(set(pdf_theme.theme_en_lower))
        # pattern_list = [f' {pattern} ' for pattern in pattern_list if pattern]  # 去掉空字符串 -- 优化匹配
        pattern_list = [re.escape(f' {pattern} ') for pattern in pattern_list if pattern]  # 去掉空字符串 -- 优化匹配
        pattern_list.sort(key=len, reverse=True)  # 根据长度进行排序
        pattern_str = '|'.join(pattern_list)  # | 或匹配
        # pattern_str = re.escape(pattern_str)
        self.pattern = '(?=(' + pattern_str + '))'  # 正则匹配模式
        # self.pattern = re.escape(self.pattern)
        print("self.pattern:", self.pattern)
        df_asin_title = self.df_asin_title.cache()  # 后面用作匹配asin_title

        self.df_asin_title = self.df_asin_title.withColumn("asin_title_lower", F.concat(F.lit(" "), "asin_title_lower", F.lit(" ")))  # 标题两头加空字符串用来匹配整个词

        self.df_asin_title = self.df_asin_title.withColumn("theme_en_lower", self.u_theme_pattern('asin_title_lower', F.lit(self.pattern)))
        # 将列拆分为数组多列
        self.df_asin_title = self.df_asin_title.withColumn("theme_en_lower", F.split(self.df_asin_title["theme_en_lower"], ","))
        # 将数组合并到多行
        self.df_asin_title = self.df_asin_title.withColumn("theme_en_lower", F.explode(self.df_asin_title["theme_en_lower"]))
        # self.df_asin_title.show(100, truncate=False)
        # self.df_asin_title.filter("asin='0060574437'").show(100, truncate=False)
        # self.df_asin_title.filter('theme_en_lower is null').show(20, truncate=False)   # 没有记录
        self.df_asin_title = self.df_asin_title.join(
            self.df_theme, on=['theme_en_lower'], how='left'  # 改成inner, 这样避免正则匹配结果不准
        )

        self.df_save_vertical = self.df_asin_title
        self.df_save_vertical.show(30, truncate=False)
        self.df_asin_title = self.df_asin_title.drop_duplicates(['asin', 'theme_type_en', 'theme_ch'])
        # self.df_asin_title.filter('theme_en_lower is null').show(20, truncate=False)   # 没有记录
        # self.df_save.show(30, truncate=False)
        self.df_asin_title = self.df_asin_title.withColumn("theme_type_en_counts", F.concat("theme_type_en", F.lit("_counts")))
        self.df_asin_title = self.df_asin_title.withColumn("theme_type_en_ids", F.concat("theme_type_en", F.lit("_ids")))
        # self.df_asin_title.filter('theme_type_en_counts is null').show(20, truncate=False)   # 没有记录
        self.df_asin_title = self.df_asin_title.filter('theme_type_en_counts is not null')
        pivot_df1 = self.df_asin_title.groupBy("asin").pivot("theme_type_en_counts").agg(F.expr("IFNULL(count(*), 0) AS value"))
        pivot_df1 = pivot_df1.na.fill(0)
        pivot_df2 = self.df_asin_title.groupBy("asin").pivot("theme_type_en_ids").agg(F.concat_ws(",", F.collect_list("id")))
        pivot_df1.show(30, truncate=False)
        # pivot_df2.show(30, truncate=False)
        self.df_save_wide = df_asin_title.join(
            pivot_df1, on='asin', how='left'
        ).join(
            pivot_df2, on='asin', how='left'
        )
        # self.df_save_wide.show(30, truncate=False)
        print(self.df_save_wide.columns)

    def save_data(self):
        self.reset_partitions(partitions_num=50)
        self.save_data_common(
            df_save=self.df_save_vertical,
            db_save=self.db_save_vertical,
            partitions_num=self.partitions_num,
            partitions_by=self.partitions_by
        )
        self.reset_partitions(partitions_num=100)
        self.save_data_common(
            df_save=self.df_save_wide,
            db_save=self.db_save_wide,
            partitions_num=self.partitions_num,
            partitions_by=self.partitions_by
        )


if __name__ == '__main__':
    site_name = sys.argv[1]  # 参数1:站点
    date_type = sys.argv[2]  # 参数2:类型:day/week/4_week/month/quarter
    date_info = sys.argv[3]  # 参数3:年-月-日/年-周/年-月/年-季, 比如: 2022-1
    handle_obj = DimAsinTitleInfo(site_name=site_name, date_type=date_type, date_info=date_info)
    handle_obj.run()