dwt_st_asin_info.py 3.1 KB
import os
import sys

from pyspark.storagelevel import StorageLevel

sys.path.append(os.path.dirname(sys.path[0]))  # 上级目录
from utils.templates import Templates
# from ..utils.templates import Templates
#from AmazonSpider.pyspark_job.utils.templates import Templates
# 分组排序的udf窗口函数
from pyspark.sql.window import Window
from pyspark.sql import functions as F
from pyspark.sql.types import StringType, IntegerType


class DwtStAsinInfo(Templates):
    def __init__(self, site_name="us", date_type="week", date_info="2022-1"):
        super().__init__()
        self.site_name = site_name
        self.date_type = date_type
        self.date_info = date_info
        self.db_save = f"dwt_st_asin_info"
        self.spark = self.create_spark_object(app_name=f"{self.db_save} {self.site_name}, {self.date_type}, {self.date_info}")
        self.df_date = self.get_year_week_tuple()
        self.df_save = self.spark.sql(f"select 1+1;")
        self.df_st_asin_info = self.spark.sql(f"select 1+1;")
        self.df_st_counts = self.spark.sql(f"select 1+1;")
        self.df_st_info = self.spark.sql(f"select 1+1;")
        self.partitions_by = ['site_name', 'date_type', 'date_info']
        if self.date_type in ["week"]:
            self.reset_partitions(100)
        elif self.date_type in ["month", "4_week"]:
            self.reset_partitions(350)
        elif self.date_type in ["quarter"]:
            self.reset_partitions(600)

    def read_data(self):
        print("1.1 读取dwd_st_asin_info表")
        sql = f"select * from dwd_st_asin_info " \
              f"where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info = '{self.date_info}';"
        print("sql:", sql)
        self.df_st_asin_info = self.spark.sql(sql).cache()
        self.df_st_asin_info.show(10, truncate=False)
        print("1.2 读取dwd_st_counts表")
        sql = f"select search_term, st_adv_counts, st_ao_val from dwd_st_counts " \
              f"where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info = '{self.date_info}';"
        print("sql:", sql)
        self.df_st_counts = self.spark.sql(sql).cache()
        self.df_st_counts.show(10, truncate=False)
        print("1.3 读取dim_st_detail表")
        sql = f"select search_term, st_quantity_being_sold from dim_st_detail " \
              f"where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info = '{self.date_info}';"
        print("sql:", sql)
        self.df_st_info = self.spark.sql(sql).cache()
        self.df_st_info.show(10, truncate=False)

    def handle_data(self):
        self.df_save = self.df_st_asin_info.join(
            self.df_st_counts, on=['search_term'], how='left'
        ).join(
            self.df_st_info, on=['search_term'], how='left'
        )


if __name__ == '__main__':
    site_name = sys.argv[1]  # 参数1:站点
    date_type = sys.argv[2]  # 参数2:类型:week/4_week/month/quarter
    date_info = sys.argv[3]  # 参数3:年-周/年-月/年-季, 比如: 2022-1
    handle_obj = DwtStAsinInfo(site_name=site_name, date_type=date_type, date_info=date_info)
    handle_obj.run()