1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
import sys
sys.path.append(os.path.dirname(sys.path[0])) # 上级目录
from utils.hdfs_utils import HdfsUtils
from utils.spark_util import SparkUtil
from utils.templates import Templates
from pyspark.sql import functions as F
class DwdAllSynAsin(Templates):
def __init__(self, site_name='us'):
super().__init__()
self.site_name = site_name
self.db_save = f'dwd_all_syn_asin'
self.spark = self.create_spark_object(
app_name=f"{self.db_save}: {self.site_name}")
self.reset_partitions(partitions_num=40)
self.partitions_by = ['site_name']
self.df_asin = self.spark.sql(f"select 1+1;")
self.df_save = self.spark.sql(f"select 1+1;")
def read_data(self):
sql1 = f"""
select
asin,
sort_array(collect_set(created_at)) as created_at_list
from
(select
asin,
date_format(created_at, 'yyyy-MM') as created_at
from
ods_asin_detail
where
site_name = '{self.site_name}'
and date_type in ('week', 'month', 'month_week')) t
group by asin;
"""
print(sql1)
self.df_asin = self.spark.sql(sqlQuery=sql1).cache()
def handle_data(self):
hdfs_path = f"/home/{SparkUtil.DEF_USE_DB}/dwd/{self.db_save}/site_name={self.site_name}"
print(f"清除hdfs目录中.....{hdfs_path}")
HdfsUtils.delete_hdfs_file(hdfs_path)
self.df_save = self.df_asin.withColumn("time_list", F.col("created_at_list").cast("string"))
self.df_save = self.df_save.drop("created_at_list")
self.df_save = self.df_save.withColumn("site_name", F.lit(self.site_name))
if __name__ == '__main__':
site_name = sys.argv[1]
if site_name in ['uk', 'de']:
handle_obj = DwdAllSynAsin(site_name=site_name)
handle_obj.run()
else:
print("暂不计算该维度数据!")
quit()