import os import sys os.environ["PYARROW_IGNORE_TIMEZONE"] = "1" sys.path.append(os.path.dirname(sys.path[0])) # 上级目录 from utils.templates import Templates # from ..utils.templates import Templates from pyspark.sql.types import StringType # 分组排序的udf窗口函数 from pyspark.sql.window import Window from pyspark.sql import functions as F class DimAsinTitleInfo(Templates): def __init__(self, site_name='us'): super().__init__() self.site_name = site_name self.db_save_vertical = f'dim_asin_title_info_vertical' self.db_save_wide = f'dim_asin_title_info_wide' self.spark = self.create_spark_object(app_name=f"{self.db_save_vertical}: {self.site_name}, {self.date_type}, {self.date_info}") self.df_theme = self.spark.sql(f"select 1+1;") self.df_asin_title = self.spark.sql(f"select 1+1;") self.df_save_vertical = self.spark.sql(f"select 1+1;") # 竖表 self.df_save_wide = self.spark.sql(f"select 1+1;") # 宽表 # 注册自定义函数 (UDF) self.u_theme_pattern = F.udf(self.udf_theme_pattern, StringType()) # 其他变量 # self.pattern = str() # 正则匹配 self.theme_list_str = str() # 正则匹配 # 分区参数 self.partitions_by = ['site_name'] self.partitions_num = 100 @staticmethod def udf_theme_pattern(title, theme_list_str): found_themes = [theme.strip() for theme in eval(theme_list_str) if theme in title] if found_themes: return ','.join(set(found_themes)) else: return None def read_data(self): sql = f"select id as theme_id, theme_type_en, theme_en, theme_en_lower, theme_ch from ods_theme where site_name='{self.site_name}'" print("sql:", sql) self.df_theme = self.spark.sql(sql).cache() self.df_theme.show(10, truncate=False) # sql = f"-- select asin, title as asin_volume, date_info from ods_asin_detail where site_name='{self.site_name}' and date_type='week'" # and date_info>='2023-15' sql = f"select asin, title as asin_title, date_info, site_name from ods_asin_detail where site_name='{self.site_name}' and date_type='week' " # and date_info>='2023-25' limit 10000 print("sql:", sql) self.df_asin_title = self.spark.sql(sqlQuery=sql).cache() self.df_asin_title.show(10, truncate=False) def handle_data(self): self.handle_filter_dirty_data() self.handle_theme() def handle_filter_dirty_data(self): """ 过滤脏数据+保留最新的title """ # 小写 self.df_asin_title = self.df_asin_title.withColumn("asin_title_lower", F.lower(self.df_asin_title["asin_title"])) # 过滤空值 self.df_asin_title = self.df_asin_title.filter("asin_title_lower is not null") # 过滤null和none字符串 self.df_asin_title = self.df_asin_title.filter("asin_title_lower not in ('none', 'null', 'nan')") # 取最新的date_info对应的title window = Window.partitionBy('asin').orderBy(F.desc('date_info')) # 按照 date_info 列进行分区,并按照 date 列进行排序 self.df_asin_title = self.df_asin_title.withColumn('row_number', F.row_number().over(window)) # 使用窗口函数为每个分区的行编号 self.df_asin_title = self.df_asin_title.filter(self.df_asin_title.row_number == 1).drop('row_number') # 只保留每个分区中 row_number 最大的行,并删除 row_number 列 def handle_theme(self): pdf_theme = self.df_theme.toPandas() theme_list = list(set(pdf_theme.theme_en_lower)) self.theme_list_str = str([f" {theme} " for theme in theme_list]) print("self.theme_list_str:", self.theme_list_str) # 匹配宽表时用到 df_asin_title = self.df_asin_title.cache() # 后面用作匹配asin_title self.df_asin_title = self.df_asin_title.withColumn("asin_title_lower", F.concat(F.lit(" "), "asin_title_lower", F.lit(" "))) # 标题两头加空字符串用来匹配整个词 self.df_asin_title = self.df_asin_title.withColumn("theme_en_lower", self.u_theme_pattern('asin_title_lower', F.lit(self.theme_list_str))) # 将列拆分为数组多列 self.df_asin_title = self.df_asin_title.withColumn("theme_en_lower", F.split(self.df_asin_title["theme_en_lower"], ",")) # 将数组合并到多行 self.df_asin_title = self.df_asin_title.withColumn("theme_en_lower", F.explode(self.df_asin_title["theme_en_lower"])) self.df_asin_title = self.df_asin_title.join( self.df_theme, on=['theme_en_lower'], how='left' # 改成inner, 这样避免正则匹配结果不准 ) # 1. 竖表 self.df_save_vertical = self.df_asin_title.cache() print(self.df_save_vertical.columns) self.df_save_vertical.show(30, truncate=False) # self.df_save_vertical.filter("theme_en_lower is not null").show(30, truncate=False) # 2. 宽表 self.df_asin_title = self.df_asin_title.drop_duplicates(['asin', 'theme_type_en', 'theme_ch']) self.df_asin_title = self.df_asin_title.withColumn("theme_type_en_counts", F.concat("theme_type_en", F.lit("_counts"))) self.df_asin_title = self.df_asin_title.withColumn("theme_type_en_ids", F.concat("theme_type_en", F.lit("_ids"))) # self.df_asin_title.filter('theme_type_en_counts is null').show(20, truncate=False) # 没有记录 self.df_asin_title = self.df_asin_title.filter('theme_type_en_counts is not null') pivot_df1 = self.df_asin_title.groupBy("asin").pivot("theme_type_en_counts").agg( F.expr("IFNULL(count(*), 0) AS value")) pivot_df1 = pivot_df1.na.fill(0) pivot_df2 = self.df_asin_title.groupBy("asin").pivot("theme_type_en_ids").agg( F.concat_ws(",", F.collect_list("theme_id"))) pivot_df1.show(30, truncate=False) pivot_df2.show(30, truncate=False) self.df_save_wide = df_asin_title.join( pivot_df1, on='asin', how='left' ).join( pivot_df2, on='asin', how='left' ) # self.df_save_wide.show(30, truncate=False) print(self.df_save_wide.columns) def save_data(self): self.reset_partitions(partitions_num=100) self.save_data_common( df_save=self.df_save_vertical, db_save=self.db_save_vertical, partitions_num=self.partitions_num, partitions_by=self.partitions_by ) self.reset_partitions(partitions_num=100) self.save_data_common( df_save=self.df_save_wide, db_save=self.db_save_wide, partitions_num=self.partitions_num, partitions_by=self.partitions_by ) if __name__ == '__main__': site_name = sys.argv[1] # 参数1:站点 handle_obj = DimAsinTitleInfo(site_name=site_name) handle_obj.run()