import ast import os import sys import pandas as pd os.environ["PYARROW_IGNORE_TIMEZONE"] = "1" sys.path.append(os.path.dirname(sys.path[0])) # 上级目录 from utils.templates import Templates # from ..utils.templates import Templates from pyspark.sql import functions as F from pyspark.sql.types import ArrayType, FloatType class PicturesDimFeaturesSlice(Templates): def __init__(self, site_name='us'): super(PicturesDimFeaturesSlice, self).__init__() self.site_name = site_name self.db_save = f'pictures_dim_features_slice' self.spark = self.create_spark_object( app_name=f"{self.db_save}: {self.site_name}") self.df_asin_features = self.spark.sql(f"select 1+1;") self.df_save = self.spark.sql(f"select 1+1;") # self.partitions_by = ['site_name', 'block'] self.partitions_by = ['site_name'] self.partitions_num = 1000 def read_data(self): # sql = f"select id, asin, img_vector as embedding from ods_asin_extract_features;" sql = f"select id, asin, features as embedding from pictures_ods_features;" print("sql:", sql) self.df_save = self.spark.sql(sql).cache() self.df_save.show(10) # 由于不需要在这一步生成array类型 # partitions_num = self.df_asin_features.rdd.getNumPartitions() # print("分区数量:", partitions_num) # 642 # # self.partitions_num = 1000 # self.df_save = self.df_save.repartition(self.partitions_num) # print("重置分区数量:", self.partitions_num) # 642 def handle_data(self): # 定义一个将字符串转换为列表的UDF # str_to_list_udf = F.udf(lambda s: ast.literal_eval(s), ArrayType(FloatType())) # # 对DataFrame中的列应用这个UDF # self.df_save = self.df_save.withColumn("embedding", str_to_list_udf(self.df_save["embedding"])) self.df_save = self.df_save.withColumn('site_name', F.lit(self.site_name)) if __name__ == '__main__': site_name = sys.argv[1] # 参数1:站点 handle_obj = PicturesDimFeaturesSlice(site_name=site_name) handle_obj.run()