import copy import json import os import random import re import ast import sys import threading import time import logging import traceback import zlib import pandas as pd import numpy as np import redis from datetime import datetime sys.path.append("/opt/module/spark-3.2.0-bin-hadoop3.2/demo/py_demo/") sys.path.append(os.path.dirname(sys.path[0])) # 上级目录 from sqlalchemy import create_engine from utils.templates import Templates # from ..utils.templates import Templates from utils.templates_mysql import TemplatesMysql # from ..utils.templates_mysql import TemplatesMysql from pyspark.sql.types import IntegerType from pyspark.sql import functions as F from pyspark.sql.types import * from psycopg2.errors import NumericValueOutOfRange from sqlalchemy.exc import OperationalError, DataError, PendingRollbackError from utils.mysql_db import sql_connect, sql_update_many, sql_delete, get_country_engine from pyspark.sql import SparkSession logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s', level=logging.INFO) # from ..utils.DolphinschedulerHelper import DolphinschedulerHelper from utils.DolphinschedulerHelper import DolphinschedulerHelper from utils.db_util import DbTypes, DBUtil from queue import Queue class SpiderAsinDetail(Templates): def __init__(self, site_name='us', date_type="day", date_info='2022-10-01', consumer_type='lastest', topic_name="us_asin_detail", batch_size_history=100000, processing_time=900): super(SpiderAsinDetail, self).__init__() self.site_name = site_name self.date_type = date_type self.date_info = date_info self.consumer_type = consumer_type # 消费实时还是消费历史 self.topic_name = topic_name self.batch_size_history = batch_size_history self.processing_time = processing_time self.db_save = f'spider_asin_detail' self.spark = self.create_spark_object(app_name=f"{self.db_save}: {self.site_name}, {self.date_type}, {self.date_info}, {self.consumer_type}") # 通过date_type 获取 topic self.get_topic_name() # 获取日期变量 self.get_year_week_tuple() # 连接数据库 self.engine_mysql = DBUtil.get_db_engine(db_type=DbTypes.mysql.name, site_name=self.site_name) self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name) # 获取数据库表名 self.db_detail_name = str() self.db_variation_name = str() self.db_image_name = str() self.get_db_name() self.columns_detail_list = self.get_db_detail_columns() # self.get_db_columns() # 通过date_type 获取 schema self.init_schema() # self.topic_name = topic_name # 主题名字 # self.schema = self.init_schema() self.pdf_type_list = ["asin_vartion_list", "img_list", "asin_detail"] self.chunk_size = 1000 # 创建分区表队列 self.part_name_queue_image = Queue() self.part_name_queue_variation = Queue() self.beginning_offsets = 338_0000 if self.site_name == 'us' else 0 def init_schema(self): self.schema = StructType([ StructField("asin", StringType(), True), StructField("week", StringType(), True), StructField("month", StringType(), True), StructField("asin_vartion_list", StringType(), True), StructField("img_list", StringType(), True), StructField("title", StringType(), True), StructField("img_url", StringType(), True), StructField("rating", StringType(), True), StructField("total_comments", StringType(), True), StructField("price", FloatType(), True), StructField("rank", StringType(), True), StructField("category", StringType(), True), StructField("launch_time", StringType(), True), StructField("volume", StringType(), True), StructField("weight", StringType(), True), StructField("page_inventory", IntegerType(), True), StructField("buy_box_seller_type", IntegerType(), True), StructField("title_len", IntegerType(), True), StructField("img_num", IntegerType(), True), StructField("img_type", StringType(), True), StructField("activity_type", StringType(), True), StructField("one_two_val", StringType(), True), StructField("three_four_val", StringType(), True), StructField("eight_val", StringType(), True), StructField("qa_num", IntegerType(), True), StructField("five_star", IntegerType(), True), StructField("four_star", IntegerType(), True), StructField("three_star", IntegerType(), True), StructField("two_star", IntegerType(), True), StructField("one_star", IntegerType(), True), StructField("low_star", IntegerType(), True), StructField("together_asin", StringType(), True), StructField("brand", StringType(), True), StructField("ac_name", StringType(), True), StructField("material", StringType(), True), StructField("node_id", StringType(), True), StructField("data_type", IntegerType(), True), StructField("sp_num", StringType(), True), StructField("describe", StringType(), True), StructField("date_info", StringType(), True), StructField("weight_str", StringType(), True), StructField("package_quantity", StringType(), True), StructField("pattern_name", StringType(), True), StructField("seller_id", StringType(), True), StructField("variat_num", IntegerType(), True), StructField("site_name", StringType(), True), StructField("best_sellers_rank", StringType(), True), StructField("best_sellers_herf", StringType(), True), StructField("account_url", StringType(), True), StructField("account_name", StringType(), True), StructField("parentAsin", StringType(), True), StructField("asinUpdateTime", StringType(), True), StructField("spider_int", StringType(), True), StructField("follow_sellers", StringType(), True), StructField("buy_sales", StringType(), True), StructField("product_description", StringType(), True), StructField("category_state", IntegerType(), True), StructField("five_six_val", IntegerType(), True), StructField("image_view", IntegerType(), True), StructField("review_label_json", StringType(), True), StructField("product_json", StringType(), True), StructField("review_ai_text", StringType(), True), StructField("product_detail_json", StringType(), True), StructField("lob_asin_json", StringType(), True), StructField("sp_initial_seen_asins_json", StringType(), True), StructField("sp_4stars_initial_seen_asins_json", StringType(), True), StructField("sp_delivery_initial_seen_asins_json", StringType(), True), StructField("compare_similar_asin_json", StringType(), True), StructField("customer_reviews_json", StringType(), True), StructField("together_asin_json", StringType(), True), StructField("min_match_asin_json", StringType(), True), StructField("seller_json", StringType(), True), ]) def get_topic_name(self): # 需要注意表名问题22 if self.date_type == "month" and self.site_name == 'us': # 月表主题 self.topic_name = f"{self.site_name}_asin_detail_month_{self.date_info.replace('-', '_')}" elif self.date_type == "week": # 周表主题 self.topic_name = f"{self.site_name}_asin_detail" elif self.date_type == "day": # 天表主题 self.topic_name = f"{self.site_name}_self_asin_detail" else: print("date_type传参有问题,中断程序") quit() def get_db_name(self): self.db_detail_name = f"{self.site_name}_asin_detail_{self.date_info.split('-')[0]}_{self.date_info.split('-')[1]}" self.db_detail_name = self.db_detail_name.replace("_detail", "_detail_month") if self.date_type=='month' else self.db_detail_name self.db_variation_name = f"{self.site_name}_variat" self.db_image_name = f"{self.site_name}_asin_image" logging.info(f"db_detail_name:{self.db_detail_name}, db_variation_name:{self.db_variation_name}, db_image_name:{self.db_image_name}") def get_db_detail_columns(self): sql = f"select * from {self.db_detail_name} limit 0;" df = pd.read_sql(sql, con=self.engine_pg14) columns_list = list(set(df.columns)) columns_list.remove("id") columns_list.remove("updated_time") return columns_list def field_length_dispose(self, pdf): pdf.price = pdf.price.apply(lambda x: round(x, 2) if x is not None else None) # 截取字符 pdf.ac_name = pdf.ac_name.apply(lambda x: str(x)[:100] if x is not None else None) # 截取字符 pdf.brand = pdf.brand.apply(lambda x: str(x)[:100] if x is not None else None) # 截取字符 pdf.title = pdf.title.apply(lambda x: str(x)[:400] if x is not None else None) # 截取字符 pdf.category = pdf.category.apply(lambda x: str(x)[:400] if x is not None else None) # 截取字符 # pdf.img_url = pdf.img_url.apply(lambda x: str(x)[:400] if x is not None else None) # 截取字符 pdf.img_url = pdf.img_url.apply(lambda x: str(x)[:390] if x is not None else None) # 截取字符 pdf.material = pdf.material.apply(lambda x: str(x)[:150] if x is not None else None) # 截取字符 pdf.volume = pdf.volume.apply(lambda x: str(x)[:50] if x is not None else None) # 截取字符 if self.date_type in ["month", "week"]: pdf.package_quantity = pdf.package_quantity.apply(lambda x: str(x)[:50] if x is not None else None) # 截取字符 pdf.pattern_name = pdf.pattern_name.apply(lambda x: str(x)[:50] if x is not None else None) # 截取字符 pdf.weight_str = pdf.weight_str.apply(lambda x: str(x)[:250] if x is not None else None) # 截取字符 return pdf def start_process_instance(self): if site_name == 'us': # 最后一周走月流程 # year, week = self.year_week_tuple[-1].split("-") # sql = f"select count(*) as st_count from {self.site_name}_brand_analytics_{year} where week={week} ;" # print("sql:", sql) # year, month = self.date_info.split("-") # sql = f"select count(*) from {self.site_name}_brand_analytics_month_{year} where year={year} and month={month} ;" # df = pd.read_sql(sql, con=self.engine_mysql) # if list(df.st_count)[0] >= 100_0000: # process_df_name = f"{site_name}-月流程-ABA+反查(旧版)+流量选品(旧版)-api" # else: # self.date_type = "month_week" # process_df_name = f"{site_name}-30day+反查(旧版)+流量选品(旧版)-api" process_df_name = f"ALL站点-图片+变体表清洗" # 先走变体清洗 else: # process_df_name = f"{site_name}-ABA+反查(旧版)+流量选品(旧版)-api" process_df_name = f"ALL站点-图片+变体表清洗" # 先走变体清洗 print(f"process_df_name:{process_df_name}") DolphinschedulerHelper.start_process_instance( project_name="big_data_selection", process_df_name=process_df_name, startParams={ "site_name": self.site_name, "date_type": self.date_type, "date_info": self.date_info }, warning_Type="ALL" ) @staticmethod # 将asin转换成数值--从而可以划分指定分区表 def asin_to_number(asin): """ Convert a 10-character ASIN string to a unique number. This function assumes that ASIN consists of uppercase letters and digits. """ def char_to_number(char): if char.isdigit(): return int(char) else: return ord(char) - 55 # 'A' -> 10, 'B' -> 11, ..., 'Z' -> 35 if len(asin) != 10: raise ValueError("ASIN must be 10 characters long") base = 36 asin_number = 0 for i, char in enumerate(reversed(asin)): asin_number += char_to_number(char) * (base ** i) # The final number is taken modulo 1 billion to fit the range 1-10 billion return asin_number % 1000000000 @staticmethod # 列表均匀拆分成多个列表 def divide_list_into_equal_parts(lst, n): """ Divide a list into n equal parts. :param lst: List to be divided. :param n: Number of parts to divide into. :return: List of n lists. """ # Calculate the size of each part part_size = len(lst) // n return [lst[i * part_size:(i + 1) * part_size] for i in range(n)] @staticmethod # 将df对象的一行裂变成多行 def handle_data_df_explode(pdf, pdf_type='asin_vartion_list', columns=[]): # 根据不同表类型解析df对象 pdf[pdf_type] = pdf[pdf_type].apply(json.loads) # 对对应数据进行处理,将df_type内列表展开 exploded_list = pdf[pdf_type].explode() # 展开后转换为一个大列表 df_type_list = [i for i in exploded_list.tolist() if not isinstance(i, float)] df_type_list = [i for i in df_type_list if isinstance(i, list)] if df_type_list: df = pd.DataFrame(df_type_list, columns=columns) return df else: return None def save_data_image_common(self, df, thread_id): while True: # 从队列里面获取一个part_name if not self.part_name_queue_image.empty(): part_name = self.part_name_queue_image.get(timeout=3) # 设置超时以避免线程永久阻塞 df_del = df.loc[df.part_name == part_name] df_del.drop(columns=['part_name', 'site'], inplace=True) asin_tuple = list(set(df_del.asin)) logging.info(f"thread_id: {thread_id}, 图片表删除: {df_del.shape}, len: {len(asin_tuple)} {part_name}") # chunk_size = 500 # split_list = [asin_tuple[i:i + chunk_size] for i in range(0, len(asin_tuple), chunk_size)] while True: try: # with self.engine_pg14.begin() as conn: # for asin_list in split_list: # if len(asin_list) == 1: # sql_del = f"delete from {part_name} where asin in ('{asin_list[0]}');" # else: # sql_del = f"delete from {part_name} where asin in {tuple(asin_list)}" # logging.info(f"删除 {part_name} 表中数据, sql: {sql_del[0:100]}") # conn.execute(sql_del) df_del.to_sql(name=f"{part_name}", con=self.engine_pg14, if_exists='append', index=False) logging.info(f"入库{part_name} 成功 {df_del.head(10)}") break except Exception as e: print(e, traceback.format_exc()) time.sleep(random.randint(3, 10)) self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name) continue else: break def save_data_image_common_old(self, df, part_name): # self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name) df_del = df.loc[df.part_name == part_name] df_del.drop(columns=['part_name', 'site'], inplace=True) # mapped_asin_tuple = list(set(df_del.mapped_asin)) asin_tuple = list(set(df_del.asin)) logging.info(f"图片表删除: {df_del.shape}, len: {len(asin_tuple)} {part_name}") # sql_del # chunk_size = 500 if self.site_name != 'us' else 100 chunk_size = 100 split_list = [asin_tuple[i:i + chunk_size] for i in range(0, len(asin_tuple), chunk_size)] while True: try: for i in split_list: with self.engine_pg14.begin() as conn: if len(i) == 1: sql_del = f"delete from {part_name} where asin in ('{i[0]}');" else: sql_del = f"delete from {part_name} where asin in {tuple(i)}" logging.info(f"清理 {part_name} 表中数据, sql: {sql_del[0:100]}") conn.execute(sql_del) # try: logging.info(f"{df_del.keys()}") # df_del.drop(columns=['site'], inplace=True) # df_del.drop(columns=['part_name', 'site'], inplace=True) # df_del.to_csv(f"/root/{self.site_name}_asin_mapped_image_{time.time()}.csv") df_del.to_sql(name=f"{part_name}", con=self.engine_pg14, if_exists='append', index=False) logging.info(f"入库{part_name} 成功 {df.head(10)}") break except Exception as e: # logging.info(f"img入库字段超过长度 {e}") # df_del.to_csv(f"/root/{self.site_name}_asin_image_{time.time()}.csv") print(e, traceback.format_exc()) time.sleep(random.randint(3, 10)) self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name) continue def save_data_image(self, pdf): logging.info("img处理") # 获取对应表字段 if "site" not in pdf.keys(): pdf["site"] = self.site_name logging.info("site is not null") pdf["site"] = pdf['site'].fillna(self.site_name) pdf['mapped_asin'] = pdf['asin'].apply(self.asin_to_number) # if self.site_name == "us": # pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 1000_0000) + 1}_{int(x % 1000_0000 / 200_0000) + 1}") # else: # # pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 500_0000) + 1}") # pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 1000_0000) + 1}") pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 1000_0000) + 1}") # int(522 % 100 / 20) + 1 # Example usage part_name_list = list(set(pdf.part_name)) print(f"当前批次图片要处理{len(part_name_list)}张图片表") # 将列表元素放入队列 for item in part_name_list: self.part_name_queue_image.put(item) # 多线程调用图片处理公共方法 threads = [] for thread_id in range(10): thread = threading.Thread(target=self.save_data_image_common, args=(pdf, thread_id)) threads.append(thread) thread.start() for thread in threads: thread.join() logging.info(f"多线程图片处理完成") def save_data_image_old(self, pdf): logging.info("img处理") # 获取对应表字段 if "site" not in pdf.keys(): pdf["site"] = self.site_name logging.info("site is not null") pdf["site"] = pdf['site'].fillna(self.site_name) pdf['mapped_asin'] = pdf['asin'].apply(self.asin_to_number) # if self.site_name == "us": # pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 1000_0000) + 1}_{int(x % 1000_0000 / 200_0000) + 1}") # else: # pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 1000_0000) + 1}") pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_image_part{int(x / 1000_0000) + 1}") # int(522 % 100 / 20) + 1 # Example usage part_name_list = list(set(pdf.part_name)) n = 15 if self.site_name == 'us' else 15 part_name_list = self.divide_list_into_equal_parts(part_name_list, n) for p_list in part_name_list: index = part_name_list.index(p_list) threads = [] for part_name in p_list: thread = threading.Thread(target=self.save_data_image_common, args=(pdf, part_name)) threads.append(thread) thread.start() for thread in threads: thread.join() logging.info(f"第{index+1}批次图片处理完成") def save_data_variation_common(self, pdf, thread_id): while True: # 从队列里面获取一个part_name if not self.part_name_queue_variation.empty(): part_name = self.part_name_queue_variation.get(timeout=3) # 设置超时以避免线程永久阻塞 df_part = pdf.loc[pdf.part_name == part_name] df_part.drop(columns=['part_name'], inplace=True) asin_tuple = list(set(df_part.parent_asin)) logging.info(f"thread_id: {thread_id}, 变体表删除: {df_part.shape}, len: {len(asin_tuple)} {part_name}") # chunk_size = 500 # split_list = [asin_tuple[i:i + chunk_size] for i in range(0, len(asin_tuple), chunk_size)] while True: try: # with self.engine_pg14.begin() as conn: # for asin_list in split_list: # if len(asin_list) == 1: # sql_del = f"delete from {part_name} where parent_asin in ('{asin_list[0]}');" # else: # sql_del = f"delete from {part_name} where parent_asin in {tuple(asin_list)}" # logging.info(f"删除 {part_name} 表中数据, sql: {sql_del[0:100]}") # conn.execute(sql_del) df_part.to_sql(name=f"{part_name}", con=self.engine_pg14, if_exists='append', index=False) logging.info(f"入库{part_name} 成功 {df_part.head(10)}") break except Exception as e: print(e, traceback.format_exc()) time.sleep(random.randint(3, 10)) self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name) continue else: break def save_data_variation(self, pdf): pdf.drop_duplicates(subset=["asin", "parent_asin"], inplace=True) # 处理字段长度问题 pdf['color'] = pdf['color'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) pdf['size'] = pdf['size'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) pdf['style'] = pdf['style'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) pdf['column_2'] = pdf['column_2'].apply( lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) logging.info(f"变体数量为:{pdf.shape}") # 处理分区名称问题 pdf['mapped_asin'] = pdf['parent_asin'].apply(self.asin_to_number) # pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_variat_part{int(x / 10000_0000) + 1}_{int(x % 10000_0000 / 500_0000) + 1}") pdf['part_name'] = pdf.mapped_asin.apply(lambda x: f"{self.site_name}_asin_variation_part{int(x / 1000_0000) + 1}") part_name_list = list(set(pdf.part_name)) print(f"当前批次变体要处理{len(part_name_list)}张变体表") # 将列表元素放入队列 for item in part_name_list: self.part_name_queue_variation.put(item) # 多线程调用变体处理公共方法 threads = [] for thread_id in range(10): thread = threading.Thread(target=self.save_data_variation_common, args=(pdf, thread_id)) threads.append(thread) thread.start() for thread in threads: thread.join() logging.info(f"多线程变体处理完成") def save_data_variation_old(self, pdf): pdf.drop_duplicates(subset=["asin", "parent_asin"], inplace=True) asins = list(set(pdf["parent_asin"])) logging.info(f"{pdf}") if asins: # chunk_size = 1000 split_list = [asins[i:i + self.chunk_size] for i in range(0, len(asins), self.chunk_size)] for chunk in split_list: if len(chunk) == 1: sql_del = f"delete from `{self.db_variation_name}` where parent_asin in ('{tuple(chunk)[0]}');" else: sql_del = f"delete from `{self.db_variation_name}` where parent_asin in {tuple(chunk)};" logging.info(f"sql: {sql_del[0:100]}") # for i in range(5): while True: try: with self.engine_mysql.begin() as conn: conn.execute(sql_del) logging.info(f"清理 {self.db_variation_name} 表中数据 {chunk[0:10]} 清理 {self.db_variation_name} 表中数据") break except Exception as e: print(e, traceback.format_exc(e)) time.sleep(random.randint(3, 10)) self.engine_mysql = DBUtil.get_db_engine(db_type=DbTypes.mysql.name, site_name=self.site_name) continue # row_id = sql_delete(sql_del) # if row_id == -1: # logging.info(f"删除失败 {self.db_variation_name} 表中数据 {chunk}") # time.sleep(6) # sql_connect(self.site_name) # continue # else: # logging.info(f"清理 {self.db_variation_name} 表中数据 {chunk[0:10]} 清理 {self.db_variation_name} 表中数据") # break pdf['color'] = pdf['color'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) pdf['size'] = pdf['size'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) pdf['style'] = pdf['style'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) pdf['column_2'] = pdf['column_2'].apply(lambda x: x.encode('utf-8', 'ignore').decode('utf-8')[:180] if x else None) logging.info(f"变体数量为:{pdf.shape}") for i in range(3): try: pdf.to_sql(name=f'{self.db_variation_name}', con=self.engine_mysql, if_exists='append', index=False) logging.info(f"入库 {self.db_variation_name} 成功 {pdf.head(10)}") break except PendingRollbackError as e: logging.info(f"链接错误 重试{e}") continue def save_data_asin_detail(self, pdf): print(f"{self.db_detail_name}: {pdf.columns}") pdf.rename(columns={"asinUpdateTime": "created_time"}, inplace=True) pdf = pdf.loc[:, self.columns_detail_list] pdf = self.field_length_dispose(pdf) while True: try: # 分批次删除 asin_tuple_all = tuple(pdf.asin) for i in range(0, len(asin_tuple_all)+1, self.chunk_size): asin_tuple = asin_tuple_all[i: i+self.chunk_size] if asin_tuple: asin_tuple = asin_tuple if len(asin_tuple) > 1 else f"('{asin_tuple[0]}')" with self.engine_pg14.begin() as conn: sql_del = f"delete from {self.db_detail_name} where asin in {asin_tuple};" print("sql_del:", sql_del[:100]) conn.execute(sql_del) # 存储 pdf.to_sql(name=self.db_detail_name, con=self.engine_pg14, if_exists='append', index=False) break except Exception as e: logging.info(f"error: {e}") time.sleep(random.randint(5, 20)) self.engine_pg14 = DBUtil.get_db_engine(db_type=DbTypes.postgresql_14.name, site_name=self.site_name) continue def save_data_common(self, pdf, pdf_type): logging.info(f"{pdf_type} 处理") start_time = time.time() if pdf_type == 'asin_vartion_list': columns_list = ['asin', 'color', 'parent_asin', 'size', 'state', 'style', 'column_2'] pdf = self.handle_data_df_explode(pdf, pdf_type=pdf_type, columns=columns_list) pdf.asin = pdf.asin.apply(lambda x: str(x)[:10]) self.save_data_variation(pdf=pdf) elif pdf_type == "img_list": columns_list = ['asin', 'img_url', 'img_order_by', 'data_type'] pdf = self.handle_data_df_explode(pdf, pdf_type=pdf_type, columns=columns_list) pdf.asin = pdf.asin.apply(lambda x: str(x)[:10]) self.save_data_image(pdf=pdf) elif pdf_type == "asin_detail": if self.site_name != 'us' and self.date_type != 'month': self.save_data_asin_detail(pdf=pdf) logging.info(f"{pdf_type}: 耗时 -- {time.time() - start_time}") def save_data(self, pdf): threads = [] for pdf_type in self.pdf_type_list: thread = threading.Thread(target=self.save_data_common, args=(pdf, pdf_type)) threads.append(thread) thread.start() for thread in threads: thread.join() logging.info("所有线程处理完成") def handle_kafka_df(self, kafka_df): kafka_df.show(20) # kafka_df.filter("asin in ('B0D1K8WQ9P', 'B0C3H46VM8')").show(20) # pyspark的kafka_df对象转换成pandas的df对象 pdf = kafka_df.toPandas() # 去重 pdf = pdf.drop_duplicates(['asin']) # 过滤--不符合当前周期的数据 pdf = pdf.loc[(~pdf.date_info.isna()) & (pdf.date_info == self.date_info)] if pdf.shape[0]: logging.info(f"{pdf.keys()}") logging.info(f"----------------------------") if self.date_type == "day": logging.info(f"天数据处理") img_columns = ['asin', 'img_url', 'img_order_by', 'data_type'] img_df = self.handle_data_df(pdf, df_type='asin_vartion_list', columns=img_columns) if img_df.shape[0]: self.save_data_image(df=img_df) vartion_columns = ['asin', 'color', 'parent_asin', 'size', 'state', 'style', 'column_2'] vartion_df = self.handle_data_df(pdf, df_type='asin_vartion_list', columns=vartion_columns) if vartion_df.shape[0]: self.save_data_variation(df=vartion_df) df = pdf[self.detail_col] df['site'] = df['site'].fillna(self.site_name) df.drop_duplicates(['asin', 'site'], inplace=True) now_date = time.strftime("%Y-%m-%d", time.gmtime(time.time())) detail_table_data_info = f"{self.site_name}_self_asin_detail" for name, group in df.groupby(['site']): asins = list(group["asin"]) # 详情入库表名 if asins: if len(asins) == 1: sql_del = f"delete from `{detail_table_data_info}` where `asin`= '{asins[0]}' and `site`='{name[0]}' and created_at>='{now_date}';" else: sql_del = f"delete from `{detail_table_data_info}` where `asin` in {tuple(asins)} and `site`='{name[0]}' and created_at>='{now_date}';" logging.info(f"{name}, {sql_del}") sql_delete(sql_del) logging.info(f"清理 {detail_table_data_info} 表中 {asins[0:10]} 数据") df.to_sql(name=f'{detail_table_data_info}', con=self.engine_pg14, if_exists='append', index=False) logging.info(f"入库 {detail_table_data_info} 成功 {df.head(10)}") else: self.save_data(pdf=pdf) else: logging.info(f"{pdf.shape}") def handle_kafka_history(self, kafka_df): self.handle_kafka_df(kafka_df) def handle_kafka_stream(self, kafka_df, epoch_id): self.handle_kafka_df(kafka_df) if __name__ == '__main__': site_name = sys.argv[1] # 参数1:站点 batch_size_history = 15000 if site_name == 'us' else 10000 date_type = sys.argv[2] # 参数2:类型:week/4_week/month/quarter/day date_info = sys.argv[3] # 参数3:年-周/年-月/年-季/年-月-日, 比如: 2022-1 consumer_type = sys.argv[4] # 参数4:实时 latest 历史 history # us day date_info 2023-11-07 handle_obj = SpiderAsinDetail(site_name=site_name, date_type=date_type, date_info=date_info, consumer_type=consumer_type, batch_size_history=batch_size_history) handle_obj.run_kafka()