1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
import copy
import json
import os
import random
import re
import ast
import sys
import threading
import time
import logging
import traceback
import zlib
import pandas as pd
import numpy as np
import redis
from datetime import datetime
sys.path.append("/opt/module/spark-3.2.0-bin-hadoop3.2/demo/py_demo/")
sys.path.append(os.path.dirname(sys.path[0])) # 上级目录
from sqlalchemy import create_engine
from utils.templates import Templates
# from ..utils.templates import Templates
from utils.templates_mysql import TemplatesMysql
# from ..utils.templates_mysql import TemplatesMysql
from pyspark.sql.types import IntegerType
from pyspark.sql import functions as F
from pyspark.sql.types import *
from psycopg2.errors import NumericValueOutOfRange
from sqlalchemy.exc import OperationalError, DataError, PendingRollbackError
from utils.mysql_db import sql_connect, sql_update_many, sql_delete, get_country_engine
from pyspark.sql import SparkSession
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s', level=logging.INFO)
# from ..utils.DolphinschedulerHelper import DolphinschedulerHelper
from utils.DolphinschedulerHelper import DolphinschedulerHelper
from utils.db_util import DbTypes, DBUtil
from queue import Queue
class SpiderAsinDetail(Templates):
def __init__(self, site_name='us', date_type="day", date_info='2022-10-01', consumer_type='lastest', topic_name="us_asin_detail", batch_size_history=100000, processing_time=900):
super(SpiderAsinDetail, self).__init__()
self.site_name = site_name
self.date_type = date_type
self.date_info = date_info
self.consumer_type = consumer_type # 消费实时还是消费历史
self.topic_name = topic_name
self.batch_size_history = batch_size_history
self.processing_time = processing_time
self.db_save = f'spider_asin_detail'
# self.spark = self.create_spark_object(app_name=f"{self.db_save}: {self.site_name}, {self.date_type}, {self.date_info}, {self.consumer_type}")
self.app_name = self.get_app_name()
self.spark = self.create_spark_object(app_name=f"{self.app_name}")
# 通过date_type 获取 topic
self.get_topic_name()
# 获取日期变量
self.get_year_week_tuple()
# 连接数据库
# self.engine_mysql = DBUtil.get_db_engine(db_type=DbTypes.mysql.name, site_name=self.site_name)
self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name)
# 获取数据库表名
self.db_detail_name = str()
self.db_variation_name = str()
self.db_image_name = str()
self.get_db_name()
self.columns_detail_list = self.get_db_detail_columns()
# self.get_db_columns()
# 通过date_type 获取 schema
self.init_schema()
# self.topic_name = topic_name # 主题名字
# self.schema = self.init_schema()
self.pdf_type_list = ["asin_vartion_list", "img_list", "asin_detail"]
self.chunk_size = 1000
# 创建分区表队列
self.part_name_queue_image = Queue()
self.part_name_queue_variation = Queue()
# self.beginning_offsets = 2993_0000 if self.site_name == 'uk' else 0
# self.beginning_offsets = 3777_0000 if self.site_name == 'de' else 0
# self.beginning_offsets = 219_9000 if self.site_name == 'us' else 0
def init_schema(self):
self.schema = StructType([
StructField("asin", StringType(), True),
StructField("week", StringType(), True),
StructField("month", StringType(), True),
StructField("asin_vartion_list", StringType(), True),
StructField("img_list", StringType(), True),
StructField("title", StringType(), True),
StructField("img_url", StringType(), True),
StructField("rating", StringType(), True),
StructField("total_comments", StringType(), True),
StructField("price", FloatType(), True),
StructField("rank", StringType(), True),
StructField("category", StringType(), True),
StructField("launch_time", StringType(), True),
StructField("volume", StringType(), True),
StructField("weight", StringType(), True),
StructField("page_inventory", IntegerType(), True),
StructField("buy_box_seller_type", IntegerType(), True),
StructField("title_len", IntegerType(), True),
StructField("img_num", IntegerType(), True),
StructField("img_type", StringType(), True),
StructField("activity_type", StringType(), True),
StructField("one_two_val", StringType(), True),
StructField("three_four_val", StringType(), True),
StructField("eight_val", StringType(), True),
StructField("qa_num", IntegerType(), True),
StructField("five_star", IntegerType(), True),
StructField("four_star", IntegerType(), True),
StructField("three_star", IntegerType(), True),
StructField("two_star", IntegerType(), True),
StructField("one_star", IntegerType(), True),
StructField("low_star", IntegerType(), True),
StructField("together_asin", StringType(), True),
StructField("brand", StringType(), True),
StructField("ac_name", StringType(), True),
StructField("material", StringType(), True),
StructField("node_id", StringType(), True),
StructField("data_type", IntegerType(), True),
StructField("sp_num", StringType(), True),
StructField("describe", StringType(), True),
StructField("date_info", StringType(), True),
StructField("weight_str", StringType(), True),
StructField("package_quantity", StringType(), True),
StructField("pattern_name", StringType(), True),
StructField("seller_id", StringType(), True),
StructField("variat_num", IntegerType(), True),
StructField("site_name", StringType(), True),
StructField("best_sellers_rank", StringType(), True),
StructField("best_sellers_herf", StringType(), True),
StructField("account_url", StringType(), True),
StructField("account_name", StringType(), True),
StructField("parentAsin", StringType(), True),
StructField("asinUpdateTime", StringType(), True),
StructField("spider_int", StringType(), True),
StructField("follow_sellers", StringType(), True),
StructField("buy_sales", StringType(), True),
StructField("product_description", StringType(), True),
StructField("category_state", IntegerType(), True),
StructField("five_six_val", IntegerType(), True),
StructField("image_view", IntegerType(), True),
StructField("review_label_json", StringType(), True),
StructField("product_json", StringType(), True),
StructField("review_ai_text", StringType(), True),
StructField("product_detail_json", StringType(), True),
StructField("lob_asin_json", StringType(), True),
StructField("sp_initial_seen_asins_json", StringType(), True),
StructField("compare_similar_asin_json", StringType(), True),
StructField("sp_4stars_initial_seen_asins_json", StringType(), True),
StructField("customer_reviews_json", StringType(), True),
StructField("sp_delivery_initial_seen_asins_json", StringType(), True),
StructField("together_asin_json", StringType(), True),
StructField("min_match_asin_json", StringType(), True),
StructField("seller_json", StringType(), True),
StructField("variat_num", IntegerType(), True),
StructField("current_asin", StringType(), True),
])
# ['', '', '', '', '', '']
def get_topic_name(self):
if self.site_name in ["us", "uk", "de"] and self.date_type == "month":
self.topic_name = f"{site_name}_asin_detail_{self.date_type}_{self.date_info.replace('-', '_')}"
else:
self.topic_name = f"{site_name}_asin_detail"
def get_db_name(self):
self.db_detail_name = f"{self.site_name}_asin_detail_{self.date_info.split('-')[0]}_{self.date_info.split('-')[1]}"
self.db_detail_name = self.db_detail_name.replace("_detail", "_detail_month") if self.date_type=='month' else self.db_detail_name
self.db_variation_name = f"{self.site_name}_variat"
self.db_image_name = f"{self.site_name}_asin_image"
logging.info(f"db_detail_name:{self.db_detail_name}, db_variation_name:{self.db_variation_name}, db_image_name:{self.db_image_name}")
def get_db_detail_columns(self):
sql = f"select * from {self.db_detail_name} limit 0;"
df = pd.read_sql(sql, con=self.engine_pg14)
columns_list = list(set(df.columns))
columns_list.remove("id")
columns_list.remove("updated_time")
return columns_list
def field_length_dispose(self, pdf):
pdf.price = pdf.price.apply(lambda x: round(x, 2) if x is not None else None) # 截取字符
pdf.ac_name = pdf.ac_name.apply(lambda x: str(x)[:100] if x is not None else None) # 截取字符
pdf.brand = pdf.brand.apply(lambda x: str(x)[:100] if x is not None else None) # 截取字符
pdf.title = pdf.title.apply(lambda x: str(x)[:400] if x is not None else None) # 截取字符
pdf.category = pdf.category.apply(lambda x: str(x)[:400] if x is not None else None) # 截取字符
# pdf.img_url = pdf.img_url.apply(lambda x: str(x)[:400] if x is not None else None) # 截取字符
pdf.img_url = pdf.img_url.apply(lambda x: str(x)[:390] if x is not None else None) # 截取字符
pdf.material = pdf.material.apply(lambda x: str(x)[:150] if x is not None else None) # 截取字符
pdf.volume = pdf.volume.apply(lambda x: str(x)[:50] if x is not None else None) # 截取字符
if self.date_type in ["month", "week"]:
pdf.package_quantity = pdf.package_quantity.apply(lambda x: str(x)[:50] if x is not None else None) # 截取字符
pdf.pattern_name = pdf.pattern_name.apply(lambda x: str(x)[:50] if x is not None else None) # 截取字符
pdf.weight_str = pdf.weight_str.apply(lambda x: str(x)[:250] if x is not None else None) # 截取字符
return pdf
def start_process_instance(self):
if site_name == 'us':
# 最后一周走月流程
# year, week = self.year_week_tuple[-1].split("-")
# sql = f"select count(*) as st_count from {self.site_name}_brand_analytics_{year} where week={week} ;"
# print("sql:", sql)
# year, month = self.date_info.split("-")
# sql = f"select count(*) from {self.site_name}_brand_analytics_month_{year} where year={year} and month={month} ;"
# df = pd.read_sql(sql, con=self.engine_mysql)
# if list(df.st_count)[0] >= 100_0000:
# process_df_name = f"{site_name}-月流程-ABA+反查(旧版)+流量选品(旧版)-api"
# else:
# self.date_type = "month_week"
# process_df_name = f"{site_name}-30day+反查(旧版)+流量选品(旧版)-api"
process_df_name = f"ALL站点-图片+变体表清洗" # 先走变体清洗
else:
# process_df_name = f"{site_name}-ABA+反查(旧版)+流量选品(旧版)-api"
process_df_name = f"ALL站点-图片+变体表清洗" # 先走变体清洗
print(f"process_df_name:{process_df_name}")
DolphinschedulerHelper.start_process_instance(
project_name="big_data_selection",
process_df_name=process_df_name,
startParams={
"site_name": self.site_name,
"date_type": self.date_type,
"date_info": self.date_info
},
warning_Type="ALL"
)
@staticmethod
# 将asin转换成数值--从而可以划分指定分区表
def asin_to_number(asin):
"""
Convert a 10-character ASIN string to a unique number.
This function assumes that ASIN consists of uppercase letters and digits.
"""
def char_to_number(char):
if char.isdigit():
return int(char)
else:
return ord(char) - 55 # 'A' -> 10, 'B' -> 11, ..., 'Z' -> 35
if len(asin) != 10:
raise ValueError(f"ASIN must be 10 characters long --{asin}--")
base = 36
asin_number = 0
for i, char in enumerate(reversed(asin)):
asin_number += char_to_number(char) * (base ** i)
# The final number is taken modulo 1 billion to fit the range 1-10 billion
return asin_number % 1000000000
@staticmethod
# 列表均匀拆分成多个列表
def divide_list_into_equal_parts(lst, n):
"""
Divide a list into n equal parts.
:param lst: List to be divided.
:param n: Number of parts to divide into.
:return: List of n lists.
"""
# Calculate the size of each part
part_size = len(lst) // n
return [lst[i * part_size:(i + 1) * part_size] for i in range(n)]
@staticmethod
# 将df对象的一行裂变成多行
def handle_data_df_explode(pdf, pdf_type='asin_vartion_list', columns=[]):
# 根据不同表类型解析df对象
pdf[pdf_type] = pdf[pdf_type].apply(json.loads)
# 对对应数据进行处理,将df_type内列表展开
exploded_list = pdf[pdf_type].explode()
# 展开后转换为一个大列表
df_type_list = [i for i in exploded_list.tolist() if not isinstance(i, float)]
df_type_list = [i for i in df_type_list if isinstance(i, list)]
if df_type_list:
df = pd.DataFrame(df_type_list, columns=columns)
return df
else:
return None
def save_data_image(self, pdf):
logging.info("img处理")
# 获取对应表字段
pdf['mapped_asin'] = pdf['asin'].apply(self.asin_to_number)
while True:
try:
pdf.to_sql(name=f"{self.site_name}_asin_image", con=self.engine_pg14, if_exists='append', index=False, chunksize=100_0000)
logging.info(f"入库{self.site_name}_asin_image, 图片数量为:{pdf.shape} 成功 {pdf.head(2)}")
break
except Exception as e:
print(e, traceback.format_exc())
time.sleep(random.randint(3, 10))
self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name)
continue
def save_data_variation(self, pdf):
pdf.drop_duplicates(subset=["asin", "parent_asin"], inplace=True)
# 处理字段长度问题
pdf['color'] = pdf['color'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None)
pdf['size'] = pdf['size'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None)
pdf['style'] = pdf['style'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None)
pdf['column_2'] = pdf['column_2'].apply(
lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None)
logging.info(f"变体数量为:{pdf.shape}")
# 处理分区名称问题
pdf['mapped_asin'] = pdf['parent_asin'].apply(self.asin_to_number)
while True:
try:
pdf.to_sql(name=f"{self.site_name}_asin_variation", con=self.engine_pg14, if_exists='append', index=False, chunksize=100_0000)
logging.info(f"入库{self.site_name}_asin_variation, 变体数量为:{pdf.shape} 成功 {pdf.head(2)}")
break
except Exception as e:
print(e, traceback.format_exc())
time.sleep(random.randint(3, 10))
self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name)
continue
def save_data_asin_detail(self, pdf):
print(f"{self.db_detail_name}: {pdf.columns}")
# pdf.rename(columns={"asinUpdateTime": "created_time"}, inplace=True)
pdf = pdf.loc[:, self.columns_detail_list]
pdf = self.field_length_dispose(pdf)
while True:
try:
# 分批次删除
# asin_tuple_all = tuple(pdf.asin)
# for i in range(0, len(asin_tuple_all)+1, self.chunk_size):
# asin_tuple = asin_tuple_all[i: i+self.chunk_size]
# if asin_tuple:
# asin_tuple = asin_tuple if len(asin_tuple) > 1 else f"('{asin_tuple[0]}')"
# with self.engine_pg14.begin() as conn:
# sql_del = f"delete from {self.db_detail_name} where asin in {asin_tuple};"
# print("sql_del:", sql_del[:100])
# conn.execute(sql_del)
# 存储
pdf.to_sql(name=self.db_detail_name, con=self.engine_pg14, if_exists='append', index=False, chunksize=100_0000)
break
except Exception as e:
logging.info(f"error: {e}")
time.sleep(random.randint(5, 20))
self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name)
continue
def save_data_common(self, pdf, pdf_type):
logging.info(f"{pdf_type} 处理")
start_time = time.time()
pdf.rename(columns={"asinUpdateTime": "created_time"}, inplace=True)
if pdf_type == 'asin_vartion_list':
columns_list = ['asin', 'color', 'parent_asin', 'size', 'state', 'style', 'column_2'] # , 'created_time'
pdf = self.handle_data_df_explode(pdf, pdf_type=pdf_type, columns=columns_list)
pdf['asin'] = pdf['asin'].apply(lambda x: str(x).replace('/', ''))
self.save_data_variation(pdf=pdf)
elif pdf_type == "img_list":
columns_list = ['asin', 'img_url', 'img_order_by', 'data_type'] # , 'created_time'
pdf = self.handle_data_df_explode(pdf, pdf_type=pdf_type, columns=columns_list)
pdf['asin'] = pdf['asin'].apply(lambda x: str(x).replace('/', ''))
self.save_data_image(pdf=pdf)
elif pdf_type == "asin_detail":
# if self.site_name != 'us' and self.date_type != 'month':
self.save_data_asin_detail(pdf=pdf)
logging.info(f"{pdf_type}: 耗时 -- {time.time() - start_time}")
def save_data(self, pdf):
threads = []
for pdf_type in self.pdf_type_list:
thread = threading.Thread(target=self.save_data_common, args=(pdf, pdf_type))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
logging.info("所有线程处理完成")
def handle_kafka_df(self, kafka_df):
kafka_df.show(20)
# pyspark的kafka_df对象转换成pandas的df对象
pdf = kafka_df.toPandas()
# pdf['asin'] = pdf['asin'].str.replace('/', '', regex=True) # 去掉脏数据
pdf['asin'] = pdf['asin'].apply(lambda x: str(x).replace('/', ''))
# print(111111111111, pdf.loc[pdf.asin.str.contains("/")], pdf.loc[pdf.asin.str.contains("/")].shape)
# 去重
pdf = pdf.drop_duplicates(['asin'])
# 过滤--不符合当前周期的数据
pdf = pdf.loc[(~pdf.date_info.isna()) & (pdf.date_info == self.date_info)]
if pdf.shape[0]:
logging.info(f"{pdf.keys()}")
logging.info(f"----------------------------")
if self.date_type == "day":
logging.info(f"天数据处理")
img_columns = ['asin', 'img_url', 'img_order_by', 'data_type', 'created_time']
img_df = self.handle_data_df(pdf, df_type='asin_vartion_list', columns=img_columns)
if img_df.shape[0]:
self.save_data_image(df=img_df)
vartion_columns = ['asin', 'color', 'parent_asin', 'size', 'state', 'style', 'column_2', 'created_time']
vartion_df = self.handle_data_df(pdf, df_type='asin_vartion_list', columns=vartion_columns)
if vartion_df.shape[0]:
self.save_data_variation(df=vartion_df)
df = pdf[self.detail_col]
df['site'] = df['site'].fillna(self.site_name)
df.drop_duplicates(['asin', 'site'], inplace=True)
now_date = time.strftime("%Y-%m-%d", time.gmtime(time.time()))
detail_table_data_info = f"{self.site_name}_self_asin_detail"
for name, group in df.groupby(['site']):
asins = list(group["asin"])
# 详情入库表名
if asins:
if len(asins) == 1:
sql_del = f"delete from `{detail_table_data_info}` where `asin`= '{asins[0]}' and `site`='{name[0]}' and created_at>='{now_date}';"
else:
sql_del = f"delete from `{detail_table_data_info}` where `asin` in {tuple(asins)} and `site`='{name[0]}' and created_at>='{now_date}';"
logging.info(f"{name}, {sql_del}")
sql_delete(sql_del)
logging.info(f"清理 {detail_table_data_info} 表中 {asins[0:10]} 数据")
df.to_sql(name=f'{detail_table_data_info}', con=self.engine_pg14, if_exists='append', index=False)
logging.info(f"入库 {detail_table_data_info} 成功 {df.head(10)}")
else:
self.save_data(pdf=pdf)
else:
logging.info(f"{pdf.shape}")
def handle_kafka_history(self, kafka_df):
# kafka_df = kafka_df.withColumn("asin", F.regexp_replace("asin", "/", ""))
# kafka_df = kafka_df.withColumn("asin", F.translate("asin", "/", ""))
self.handle_kafka_df(kafka_df)
def handle_kafka_stream(self, kafka_df, epoch_id):
# kafka_df = kafka_df.withColumn("asin", F.regexp_replace("asin", "/", ""))
# kafka_df = kafka_df.withColumn("asin", F.translate("asin", "/", ""))
self.handle_kafka_df(kafka_df)
if __name__ == '__main__':
site_name = sys.argv[1] # 参数1:站点
# batch_size_history = 15000 if site_name == 'us' else 10000
batch_size_history = 50000
date_type = sys.argv[2] # 参数2:类型:week/4_week/month/quarter/day
date_info = sys.argv[3] # 参数3:年-周/年-月/年-季/年-月-日, 比如: 2022-1
consumer_type = sys.argv[4] # 参数4:实时 latest 历史 history
# us day date_info 2023-11-07
handle_obj = SpiderAsinDetail(site_name=site_name, date_type=date_type, date_info=date_info, consumer_type=consumer_type, batch_size_history=batch_size_history)
handle_obj.run_kafka()