dim_st_detail.py
29.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
"""
author: 方星钧(ffman)
description: 清洗6大站点对应的 “ods_brand_analytics” 的表: 字段重命名+热搜词、新出词、上升词
table_read_name: ods_brand_analytics
table_save_name: dim_st_detail
table_save_level: dim
version: 1.0
created_date: 2022-11-21
updated_date: 2022-11-22
"""
import os
import sys
import time
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
sys.path.append(os.path.dirname(sys.path[0])) # 上级目录
from pyspark.storagelevel import StorageLevel
from utils.templates import Templates
# from ..utils.templates import Templates
from pyspark.sql.types import StringType, IntegerType
# 分组排序的udf窗口函数
from pyspark.sql.window import Window
from pyspark.sql import functions as F
class DwtStDetail(Templates):
def __init__(self, site_name='us', date_type="month", date_info='2022-1'):
super().__init__()
self.site_name = site_name
# self.date_type = date_type
self.date_type = date_type.replace("month_week", "month")
self.date_info = date_info
self.db_save = f'dim_st_detail'
self.spark = self.create_spark_object(app_name=f"{self.db_save}: {self.site_name}, {self.date_type}, {self.date_info}")
# if self.date_type == '4_week':
# self.date_info = '2022-12-17'
self.get_date_info_tuple()
self.get_year_week_tuple()
self.get_year_month_days_dict()
self.date_info_last = self.get_date_info_last()
self.date_info_last_year = self.date_info.replace(f"{self.year}", f"{int(self.year)-1}") # 去年同期
self.df_st_detail = self.spark.sql(f"select 1+1;")
self.df_st_detail_last = self.spark.sql(f"select 1+1;")
self.df_st_detail_last_year = self.spark.sql(f"select 1+1;")
# self.df_st_detail_last_year = self.date_info.replace(f"{self.year}", f"{int(self.year)-1}") # 去年同期
self.df_st_detail_days = self.spark.sql(f"select 1+1;")
self.df_st_quantity = self.spark.sql(f"select 1+1;")
self.df_st_rate = self.spark.sql(f"select 1+1;")
self.df_asin_history = self.spark.sql(f"select 1+1;")
self.df_st_asin_zr = self.spark.sql(f"select 1+1;")
self.df_save = self.spark.sql(f"select 1+1;")
self.partitions_num = 3
self.reset_partitions(partitions_num=self.partitions_num)
self.partitions_by = ['site_name', 'date_type', 'date_info']
self.u_handle_columns_asin123 = self.spark.udf.register("u_handle_columns_asin123", self.udf_handle_columns_asin123, StringType())
self.u_is_ascending = self.spark.udf.register("u_is_ascending", self.udf_is_ascending, IntegerType())
self.u_is_search = self.spark.udf.register("u_is_search", self.udf_is_search, IntegerType())
self.u_get_asin_top = self.spark.udf.register("u_get_asin_top", self.udf_get_asin_top, StringType())
def get_date_info_last(self):
# 获取上一个周期的日期数据
df = self.df_date.toPandas()
date_info_last = ''
if self.date_type == 'day':
date_id = tuple(df.loc[df.date == self.date_info].id)[0]
df_loc = df.loc[df.id == (date_id - 1)]
date_info_last = tuple(df_loc.date)[0]
if self.date_type in ['week', 'week_old']:
date_id = tuple(df.loc[(df[f'year_week'] == self.date_info) & (
df.week_day == 1)].id)[0]
df_loc = df.loc[df.id == (date_id - 7)]
date_info_last = tuple(df_loc[f'year_week'])[0]
if self.date_type == '4_week':
date_id = tuple(df.loc[(df[f'year_week'] == self.date_info) & (
df.week_day == 1)].id)[0]
df_loc = df.loc[df.id == (date_id - 28)]
date_info_last = tuple(df_loc.year_week)[0]
if self.date_type in ['month', 'month_old']:
if int(self.month) == 1:
date_info_last = f"{int(self.year) - 1}-12"
else:
month = f"0{int(self.month) - 1}" if int(self.month) <= 10 else f"{int(self.month) - 1}"
date_info_last = f"{self.year}-{month}"
if self.date_type == 'last30day':
date_id = tuple(df.loc[df.date == self.date_info].id)[0]
df_loc = df.loc[df.id == (date_id - 30)]
date_info_last = tuple(df_loc.date)[0]
print("date_info_last:", date_info_last)
return date_info_last
@staticmethod
def udf_handle_columns_asin123(asin):
if len(str(asin)) == 10:
return asin
else:
return None
@staticmethod
def udf_is_ascending(x):
if x >= 0.5:
return 1
else:
return 0
@staticmethod
def udf_is_search(x):
if x >= 0.8:
return 1
else:
return 0
@staticmethod
def udf_get_asin_top(asin1, value1, asin2, value2, asin3, value3, flag):
"""通过分享转化比大小顺序找到对应的asin顺序,从而找到bs分类id"""
if max(value1, value2, value3) == value1:
asin_top1 = asin1
if max(value2, value3) == value2:
asin_top2 = asin2
asin_top3 = asin3
else:
asin_top2 = asin3
asin_top3 = asin2
elif max(value1, value2, value3) == value2:
asin_top1 = asin2
if max(value1, value3) == value1:
asin_top2 = asin1
asin_top3 = asin3
else:
asin_top2 = asin3
asin_top3 = asin1
else:
asin_top1 = asin3
if max(value1, value2) == value1:
asin_top2 = asin1
asin_top3 = asin2
else:
asin_top2 = asin2
asin_top3 = asin1
if flag == 1:
return asin_top1
elif flag == 2:
return asin_top2
else:
return asin_top3
def read_data_st_detail(self):
print("1.1 读取ods_brand_analytics表")
# 当前date_info
params = f" and rank<=1500000" if self.date_type == 'week' else ""
sql = f"select * from ods_brand_analytics where site_name='{self.site_name}' and date_type='{date_type}' and date_info='{self.date_info}' {params}"
print("sql:", sql)
self.df_st_detail = self.spark.sql(sql).cache()
self.df_st_detail.show(10, truncate=False)
# 上一个周期的date_info
try:
sql = f"select search_term, rank as rank_last from ods_brand_analytics where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info='{self.date_info_last}' {params}"
print("sql:", sql)
self.df_st_detail_last = self.spark.sql(sql).cache()
if self.df_st_detail_last.count() == 0:
print("date_info_last对应分区没有数据:", self.date_info_last)
os.system(
rf"/mnt/run_shell/spark_shell/ods/spark_ods_brand_analytics.sh {self.site_name} {self.date_type} {self.date_info_last}")
# 重新建立spark对象,刷新元数据对应关系
print("重新建立spark对象,刷新元数据对应关系,等待10s,暂时发现不可行")
self.spark = self.create_spark_object(
app_name=f"{self.db_save}: {self.site_name}, {self.date_type}, {self.date_info}")
time.sleep(10)
self.df_st_detail_last = self.spark.sql(sql).cache()
self.df_st_detail_last.show(10, truncate=False)
quit()
except Exception as e:
print("error:", e)
# 当前周期的year_week_tuple
sql = f"select * from ods_brand_analytics where site_name='{self.site_name}' and date_type='week' and date_info in {self.year_week_tuple} and rank<=1500000"
print("sql:", sql)
self.df_st_detail_days = self.spark.sql(sql).cache()
self.df_st_detail_days = self.df_st_detail_days.drop_duplicates(['search_term', 'date_info'])
self.df_st_detail_days = self.df_st_detail_days.groupby(['search_term']).agg(
F.count('date_info').alias('st_appear_history_counts'))
# self.df_st_detail_days.filter('search_term="nan"').show(10, truncate=False)
# st_history_counts = 28 if self.date_type == '4_week' else 1 if self.date_type == 'day' else len(self.year_week_tuple)
st_history_counts = 1 if date_type in ['week', 'week_old'] else len(self.year_week_tuple)
# self.df_st_detail_days = self.df_st_detail_days.withColumn('st_history_counts', F.lit(st_history_counts))
self.df_st_detail = self.df_st_detail.withColumn('st_history_counts', F.lit(st_history_counts))
self.df_st_detail_days.show(10, truncate=False)
# 去年同期的date_info_last_year
sql = f"select search_term, 0 as st_is_new_market_segment from ods_brand_analytics where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info='{self.date_info_last_year}' {params};"
print("sql:", sql)
self.df_st_detail_last_year = self.spark.sql(sql).cache()
self.df_st_detail_last_year.show(10, truncate=False)
def read_data_st_quantity(self):
print("1.2 读取ods_st_quantity_being_sold表")
# if self.site_name in ['us'] and (
# (int(self.month) >= 12 and int(self.year) >= 2022) or (int(self.year) >= 2023)):
# sql = f"select search_term, quantity_being_sold as st_quantity_being_sold from ods_st_quantity_being_sold " \
# f"where site_name='{self.site_name}' and date_type='day' and date_info in {self.date_info_tuple};"
# else:
# sql = f"select search_term, quantity_being_sold as st_quantity_being_sold from ods_brand_analytics " \
# f"where site_name='{self.site_name}' and date_type='week' and date_info >= '2022-01';"
if self.date_type == 'month' and ((self.site_name == 'us' and self.date_info >= '2023-10') or (self.site_name in ['uk', 'de'] and self.date_info >= '2024-05')):
sql = f"select search_term, quantity_being_sold as st_quantity_being_sold from ods_st_quantity_being_sold " \
f"where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info='{self.date_info}';"
# elif self.site_name == 'us' and date_type == 'month_week' and date_info >= '2023-10':
else:
sql = f"select search_term, quantity_being_sold as st_quantity_being_sold from ods_st_quantity_being_sold " \
f"where site_name='{self.site_name}' and date_type='week' and date_info in {self.year_week_tuple};"
print("sql:", sql)
self.df_st_quantity = self.spark.sql(sqlQuery=sql).cache()
self.df_st_quantity.show(10, truncate=False)
def read_data_st_search_num(self):
print("1.3 读取ods_rank_search_rate_repeat表")
if (int(self.year) <= 2022 and int(self.month) <= 8) or int(self.year) <= 2021:
params = f"date_info='2022-08'"
else:
params = f"date_info='{self.year}-{self.month}'"
sql = f"select rank, search_num as st_search_num, rate as st_search_rate, search_sum as st_search_sum " \
f"from ods_rank_search_rate_repeat where site_name='{self.site_name}' and date_type='month' and {params};"
print("sql:", sql)
self.df_st_rate = self.spark.sql(sql).cache()
self.df_st_rate.show(10, truncate=False)
if self.df_st_rate.count() == 0:
sql = f"select rank, search_num as st_search_num, rate as st_search_rate, search_sum as st_search_sum, date_info " \
f"from ods_rank_search_rate_repeat where site_name='{self.site_name}';"
print("sql:", sql)
self.df_st_rate = self.spark.sql(sql).cache()
print("self.df_st_rate开窗前:", self.df_st_rate.count())
window = Window.partitionBy(["rank"]).orderBy(
self.df_st_rate.date_info.desc()
)
self.df_st_rate = self.df_st_rate.withColumn("date_info_rank", F.row_number().over(window=window)). \
filter("date_info_rank=1")
print("self.df_st_rate开窗后:", self.df_st_rate.count())
self.df_st_rate = self.df_st_rate.drop("date_info_rank", "date_info")
self.df_st_rate.show(10, truncate=False)
def read_data_asin_history(self):
print("1.4 读取dim_cal_asin_history_detail表")
sql = f"select asin, bsr_cate_1_id, bsr_cate_current_id, node_id as bsr_cate_current_id_new, category_first_id as bsr_cate_1_id_new " \
f"from dim_cal_asin_history_detail where site_name='{self.site_name}';"
print("sql:", sql)
self.df_asin_history = self.spark.sql(sql).cache()
self.df_asin_history.show(10, truncate=False)
def read_data_st_asin(self):
print("1.5 读取dim_st_asin_info表")
if self.date_type == 'month' and ((self.site_name == 'us' and self.date_info >= '2023-10') or (self.site_name in ['uk', 'de'] and self.date_info >= '2024-05')):
sql = f"select search_term, asin, page_row, date_info, updated_time as st_updated_time " \
f"from dim_st_asin_info where site_name='{self.site_name}' and date_type='{self.date_type}' and date_info='{self.date_info}';"
else:
sql = f"select search_term, asin, page_row, date_info, updated_time as st_updated_time " \
f"from dim_st_asin_info where site_name='{self.site_name}' and date_type='week' and date_info in {self.year_week_tuple};"
print("sql:", sql)
self.df_st_asin = self.spark.sql(sql).cache()
self.df_st_asin_zr = self.df_st_asin.filter('data_type="zr"').drop("st_updated_time").cache()
self.df_st_asin.show(10, truncate=False)
def read_data(self):
self.read_data_st_detail()
self.read_data_st_quantity()
self.read_data_st_search_num()
self.read_data_asin_history()
self.read_data_st_asin()
def handle_data(self):
self.handle_columns_null()
self.handle_st_click_and_conversion()
self.handle_st_rate_and_quantity()
self.handle_st_first()
self.handle_st_ascending()
self.handle_st_search()
self.handle_st_top3_asin()
self.handle_st_new_market_segment()
self.handle_columns_renamed()
self.df_save = self.df_st_detail
self.df_save = self.df_save.drop_duplicates(['search_term'])
self.handle_st_bs_category()
print("date_type:", date_type)
self.df_save = self.df_save.withColumn('date_type', F.lit(date_type))
print("self.df_save.columns:", self.df_save.columns)
self.df_save.show(10, truncate=False)
# quit()
def handle_columns_null(self):
# 点击率和转换率和asin123字段脏数据处理,数值类型置为0,字符串类型置为null
self.df_st_detail = self.df_st_detail.withColumn('click_share1', F.when(self.df_st_detail.click_share1 >= 0,
self.df_st_detail.click_share1).otherwise(
0.0))
self.df_st_detail = self.df_st_detail.withColumn('click_share2', F.when(self.df_st_detail.click_share2 >= 0,
self.df_st_detail.click_share2).otherwise(
0.0))
self.df_st_detail = self.df_st_detail.withColumn('click_share3', F.when(self.df_st_detail.click_share3 >= 0,
self.df_st_detail.click_share3).otherwise(
0.0))
self.df_st_detail = self.df_st_detail.withColumn('conversion_share1',
F.when(self.df_st_detail.conversion_share1 >= 0,
self.df_st_detail.conversion_share1).otherwise(0.0))
self.df_st_detail = self.df_st_detail.withColumn('conversion_share2',
F.when(self.df_st_detail.conversion_share2 >= 0,
self.df_st_detail.conversion_share2).otherwise(0.0))
self.df_st_detail = self.df_st_detail.withColumn('conversion_share3',
F.when(self.df_st_detail.conversion_share3 >= 0,
self.df_st_detail.conversion_share3).otherwise(0.0))
self.df_st_detail = self.df_st_detail.withColumn("brand1", F.when(F.col("brand1") == '', None).otherwise(F.col("brand1")))
self.df_st_detail = self.df_st_detail.withColumn("brand2", F.when(F.col("brand2") == '', None).otherwise(F.col("brand2")))
self.df_st_detail = self.df_st_detail.withColumn("brand3", F.when(F.col("brand3") == '', None).otherwise(F.col("brand3")))
self.df_st_detail = self.df_st_detail.withColumn("category1", F.when(F.col("category1") == '', None).otherwise(F.col("category1")))
self.df_st_detail = self.df_st_detail.withColumn("category2", F.when(F.col("category2") == '', None).otherwise(F.col("category2")))
self.df_st_detail = self.df_st_detail.withColumn("category3", F.when(F.col("category3") == '', None).otherwise(F.col("category3")))
self.df_st_detail = self.df_st_detail.withColumn('asin1', self.u_handle_columns_asin123('asin1'))
self.df_st_detail = self.df_st_detail.withColumn('asin2', self.u_handle_columns_asin123('asin2'))
self.df_st_detail = self.df_st_detail.withColumn('asin3', self.u_handle_columns_asin123('asin3'))
def handle_st_click_and_conversion(self):
print("关键词的点击率和转化率求和")
self.df_st_detail = self.df_st_detail.withColumn(
"st_click_share_sum",
self.df_st_detail.click_share1 + self.df_st_detail.click_share2 + self.df_st_detail.click_share3
).withColumn(
"st_conversion_share_sum",
self.df_st_detail.conversion_share1 + self.df_st_detail.conversion_share2 + self.df_st_detail.conversion_share3
)
# 添加新列 "competition_level",其值基于两个字段来判断
self.df_st_detail = self.df_st_detail.withColumn("st_competition_level",
F.when((self.df_st_detail.st_click_share_sum <= 0.2) & (self.df_st_detail.st_conversion_share_sum <= 0.2), "低竞争")
.when((self.df_st_detail.st_click_share_sum >= 0.8) & (self.df_st_detail.st_conversion_share_sum >= 0.8), "高竞争")
.otherwise("其它竞争"))
def handle_st_rate_and_quantity(self):
print("关键词的在售商品数,搜索量,转化率,销量(月度)")
# st_quantity_being_sold
self.df_st_quantity = self.df_st_quantity.filter("st_quantity_being_sold > 0").groupby(['search_term']).agg(
{"st_quantity_being_sold": "mean"}
)
self.df_st_quantity = self.df_st_quantity.withColumnRenamed(
"avg(st_quantity_being_sold)", "st_quantity_being_sold"
)
# 关键词的搜索量,转化率,销量(月度)
self.df_st_detail = self.df_st_detail.join(
self.df_st_rate, on=['rank'], how='left'
).join(
self.df_st_quantity, on=['search_term'], how='left'
)
def handle_st_first(self):
print("新出词(当前天/周/4周/月/季度,同比前1天/周/4周/月/季度,第1次出现)")
self.df_st_detail_last = self.df_st_detail_last.withColumn("st_is_first_text", F.lit(0))
self.df_st_detail = self.df_st_detail.join(
self.df_st_detail_last.select("search_term", "st_is_first_text"), on='search_term', how='left'
)
self.df_st_detail = self.df_st_detail.fillna(
{"st_is_first_text": 1}
)
# self.df_st_detail.show(10, truncate=False)
def handle_st_ascending(self):
print("上升词(相邻2天/周/月/季度,上升超过50%的排名)")
self.df_st_detail = self.df_st_detail.join(
self.df_st_detail_last.select("search_term", "rank_last"), on='search_term', how='left'
)
self.df_st_detail = self.df_st_detail.na.fill({'rank_last': 0})
self.df_st_detail = self.df_st_detail.withColumn(
"st_is_ascending_text_rate",
(self.df_st_detail.rank_last - self.df_st_detail.rank) / self.df_st_detail.rank_last
)
self.df_st_detail = self.df_st_detail.na.fill({'st_is_ascending_text_rate': -1})
self.df_st_detail = self.df_st_detail.withColumn(
"st_is_ascending_text", self.u_is_ascending(self.df_st_detail.st_is_ascending_text_rate))
self.df_st_detail = self.df_st_detail.drop("rank_last")
def handle_st_search(self):
print("热搜词(历史出现占比>=80%)")
self.df_st_detail = self.df_st_detail.join(
self.df_st_detail_days, on='search_term', how='left'
)
self.df_st_detail = self.df_st_detail.fillna({'st_appear_history_counts': 0})
self.df_st_detail = self.df_st_detail.withColumn(
"st_is_search_text_rate",
self.df_st_detail[f"st_appear_history_counts"] / self.df_st_detail[f"st_history_counts"])
self.df_st_detail = self.df_st_detail.withColumn(
"st_is_search_text", self.u_is_search(self.df_st_detail.st_is_search_text_rate))
def handle_st_top3_asin(self):
print("关键词的top3asin--匹配关键词的bsr一级分类id")
self.df_st_detail = self.df_st_detail.withColumn(
"st_top_asin1",
self.u_get_asin_top(
"asin1", "conversion_share1",
"asin2", "conversion_share2",
"asin3", "conversion_share3",
F.lit(1)
)
)
self.df_st_detail = self.df_st_detail.withColumn(
"st_top_asin2",
self.u_get_asin_top(
"asin1", "conversion_share1",
"asin2", "conversion_share2",
"asin3", "conversion_share3",
F.lit(2)
)
)
self.df_st_detail = self.df_st_detail.withColumn(
"st_top_asin3",
self.u_get_asin_top(
"asin1", "conversion_share1",
"asin2", "conversion_share2",
"asin3", "conversion_share3",
F.lit(3)
)
)
df1 = self.df_st_detail.select("search_term", "st_top_asin1").\
withColumnRenamed("st_top_asin1", "asin").withColumn("type", F.lit(1))
df2 = self.df_st_detail.select("search_term", "st_top_asin2"). \
withColumnRenamed("st_top_asin2", "asin").withColumn("type", F.lit(2))
df3 = self.df_st_detail.select("search_term", "st_top_asin3"). \
withColumnRenamed("st_top_asin3", "asin").withColumn("type", F.lit(3))
df = df1.unionByName(df2, allowMissingColumns=True).unionByName(df3, allowMissingColumns=True)
df = df.join(self.df_asin_history, on='asin', how="left")
# df.show(10, truncate=False)
# df.filter("asin='B00E4WOQU0'").show()
window = Window.partitionBy(["search_term"]).orderBy(
df.type.asc_nulls_last()
)
df = df.withColumn("type_rank", F.row_number().over(window=window)). \
filter("type_rank=1")
df = df.drop("type_rank", "type", "asin")
# df.show(10, truncate=False)
# df.filter("asin='B00E4WOQU0'").show()
self.df_st_detail = self.df_st_detail.join(df, on="search_term", how="left")
self.df_st_detail = self.df_st_detail.withColumnRenamed("bsr_cate_1_id", "st_bsr_cate_1_id")
self.df_st_detail = self.df_st_detail.withColumnRenamed("bsr_cate_current_id", "st_bsr_cate_current_id")
self.df_st_detail = self.df_st_detail.withColumnRenamed("bsr_cate_1_id_new", "st_bsr_cate_1_id_new")
self.df_st_detail = self.df_st_detail.withColumnRenamed("bsr_cate_current_id_new", "st_bsr_cate_current_id_new")
def handle_st_new_market_segment(self):
print("判断关键词是否属于新细分市场")
self.df_st_detail = self.df_st_detail.join(
self.df_st_detail_last_year, on=['search_term'], how='left'
)
self.df_st_detail = self.df_st_detail.fillna(
{"st_is_new_market_segment": 1}
)
def handle_columns_renamed(self):
self.df_st_detail = self.df_st_detail.drop('id', 'created_time', 'updated_time', 'product_title1', 'product_title2', 'product_title3', 'quantity_being_sold')
self.df_st_detail = self.df_st_detail.withColumnRenamed('rank', 'st_rank')
self.df_st_detail = self.df_st_detail.withColumnRenamed('asin1', 'st_asin1')
self.df_st_detail = self.df_st_detail.withColumnRenamed('click_share1', 'st_click_share1')
self.df_st_detail = self.df_st_detail.withColumnRenamed('conversion_share1', 'st_conversion_share1')
self.df_st_detail = self.df_st_detail.withColumnRenamed('asin2', 'st_asin2')
self.df_st_detail = self.df_st_detail.withColumnRenamed('click_share2', 'st_click_share2')
self.df_st_detail = self.df_st_detail.withColumnRenamed('conversion_share2', 'st_conversion_share2')
self.df_st_detail = self.df_st_detail.withColumnRenamed('asin3', 'st_asin3')
self.df_st_detail = self.df_st_detail.withColumnRenamed('click_share3', 'st_click_share3')
self.df_st_detail = self.df_st_detail.withColumnRenamed('conversion_share3', 'st_conversion_share3')
self.df_st_detail = self.df_st_detail.withColumnRenamed('brand1', 'st_brand1')
self.df_st_detail = self.df_st_detail.withColumnRenamed('brand2', 'st_brand2')
self.df_st_detail = self.df_st_detail.withColumnRenamed('brand3', 'st_brand3')
self.df_st_detail = self.df_st_detail.withColumnRenamed('category1', 'st_category1')
self.df_st_detail = self.df_st_detail.withColumnRenamed('category2', 'st_category2')
self.df_st_detail = self.df_st_detail.withColumnRenamed('category3', 'st_category3')
# self.df_save.show(10, truncate=False)
def handle_st_bs_category(self):
window = Window.partitionBy(['search_term']).orderBy(
self.df_st_asin.date_info.desc_nulls_last(),
)
self.df_st_asin = self.df_st_asin.withColumn('row_rank', F.row_number().over(window=window))
self.df_st_asin = self.df_st_asin.filter('row_rank=1').select("search_term", "st_updated_time")
# 匹配关键词为null的分类
self.df_st_asin_zr = self.df_st_asin_zr.join(
self.df_asin_history.select("asin", "bsr_cate_1_id_new", "bsr_cate_current_id_new"), on='asin', how='left'
)
# 排查一下是否asin没有分类导致没有数据
# self.df_st_asin_zr.filter("search_term in ('southern fried homicide', 'tanked tv show', "
# "'umizoomi prime video', 'dirty jnever gonna not dance again pink', 'rebolution')").show(200, truncate=False)
self.df_st_asin_zr = self.df_st_asin_zr.filter('bsr_cate_1_id_new is not null')
window = Window.partitionBy(['search_term']).orderBy(
self.df_st_asin_zr.date_info.desc_nulls_last(),
self.df_st_asin_zr.page_row.asc_nulls_last(),
)
# """double daddy lifetime movie
# good day nappy roots
# bleeding love leona lewis
# fast car vavo remix
# ei nelly
# the mother jennifer lopez movie
#
# southern fried homicide
# tanked tv show
# umizoomi prime video
# dirty j
# rebolution"""
self.df_st_asin_zr = self.df_st_asin_zr.withColumn('row_rank', F.row_number().over(window=window))
# self.df_st_asin_zr.filter("search_term in ('southern fried homicide', 'tanked tv show', "
# "'umizoomi prime video', 'dirty jnever gonna not dance again pink', 'rebolution')").show(200, truncate=False)
self.df_st_asin_zr = self.df_st_asin_zr.filter('row_rank=1')
self.df_st_asin_zr = self.df_st_asin_zr.drop('row_rank', 'asin', 'page_row', 'date_info')
self.df_st_asin_zr = self.df_st_asin_zr.withColumnRenamed("bsr_cate_1_id_new", "bsr_cate_1_id_new_up")
self.df_st_asin_zr = self.df_st_asin_zr.withColumnRenamed("bsr_cate_current_id_new", "bsr_cate_current_id_new_up")
print("------")
self.df_st_asin_zr.show(20, truncate=False)
self.df_save = self.df_save.join(
self.df_st_asin_zr, on='search_term', how='left'
).join(
self.df_st_asin, on='search_term', how='left'
)
print(self.df_save.columns)
self.df_save = self.df_save.withColumn("st_bsr_cate_1_id_new", F.when(F.col("st_bsr_cate_1_id_new").isNull(), F.col("bsr_cate_1_id_new_up")).otherwise(F.col("st_bsr_cate_1_id_new")))
self.df_save = self.df_save.withColumn("st_bsr_cate_current_id_new", F.when(F.col("st_bsr_cate_current_id_new").isNull(), F.col("bsr_cate_current_id_new_up")).otherwise(F.col("st_bsr_cate_current_id_new")))
self.df_save = self.df_save.drop('bsr_cate_1_id_new_up', 'bsr_cate_current_id_new_up')
if __name__ == '__main__':
site_name = sys.argv[1] # 参数1:站点
date_type = sys.argv[2] # 参数2:类型:day/week/4_week/month/quarter
date_info = sys.argv[3] # 参数3:年-月-日/年-周/年-月/年-季, 比如: 2022-1
handle_obj = DwtStDetail(site_name=site_name, date_type=date_type, date_info=date_info)
handle_obj.run()