tmp_self_asin_detail.py 1.81 KB
import os
import re
import sys

os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
sys.path.append(os.path.dirname(sys.path[0]))  # 上级目录
from utils.templates import Templates
# from ..utils.templates import Templates
from pyspark.sql.types import StringType, BooleanType, StructType, StructField, DoubleType, FloatType

# 分组排序的udf窗口函数
from pyspark.sql.window import Window
from pyspark.sql import functions as F
from yswg_utils.common_udf import parse_weight_str


class TmpSelfAsinDetail(Templates):

    def __init__(self, site_name='us'):
        super().__init__()
        self.site_name = site_name
        self.db_save = f'dim_asin_stable_info'
        self.spark = self.create_spark_object(app_name=f"{self.db_save}: {self.site_name}")
        self.df_asin_detail = self.spark.sql(f"select 1+1;")

    def read_data(self):
        sql = "select * from ods_self_asin_detail where site_name='us';"  # limit 1000000
        self.df_asin_detail = self.spark.sql(sql).cache()
        self.df_asin_detail.show()

    def handle_data(self):
        window = Window.partitionBy(['asin']).orderBy(F.desc("date_info"))  # 按照 date_info 列进行分区,并按照 date 列进行排序

        self.df_asin_detail = self.df_asin_detail.withColumn('row_number', F.row_number().over(window))  # 使用窗口函数为每个分区的行编号
        self.df_asin_detail = self.df_asin_detail.filter(self.df_asin_detail.row_number == 1).drop('row_number')  # 只保留每个分区中 row_number 最大的行,并删除 row_number 列

    def save_data(self):
        df_save = self.df_asin_detail.toPandas()
        print(df_save.shape)
        df_save.to_csv("/root/tmp_self_asin_detail.csv", index=False)


if __name__ == '__main__':
    site_name = 'us'
    handle_obj = TmpSelfAsinDetail(site_name=site_name)
    handle_obj.run()