search_02.py 3.58 KB
Newer Older
chenyuanjie committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
import os
import re
import sys
import time

import numpy as np
import pandas as pd

sys.path.append(os.path.dirname(sys.path[0]))  # 上级目录
from pyspark.storagelevel import StorageLevel
from utils.templates import Templates
# from ..utils.templates import Templates
import faiss
import numpy as np
from pyspark.sql.functions import pandas_udf, PandasUDFType
import pandas as pd


# # 定义一个Pandas UDF,该UDF在每个分区上加载索引并进行查询
# @pandas_udf('int', PandasUDFType.SCALAR)
# def find_nearest_neighbors(series):
#     # 加载索引
#     index = faiss.read_index("/home/ffman/tmp/my_index.faiss")
#     # 查询最近的5个邻居
#     _, I = index.search(np.array(series.tolist()).astype('float32'), 5)
#     return pd.Series(I[:, 0])  # 返回最近的邻居的索引

from pyspark.sql import SparkSession
from pyspark.sql.functions import split, udf
from pyspark.sql.types import ArrayType, FloatType
from pyspark import SparkFiles
import numpy as np
def str_to_floats(s):
    # 删除 "[" 和 "]" 字符
    s = s.replace("[", "").replace("]", "")
    # 根据逗号分割并转换为浮点数
    return [float(x) for x in s.split(",")]


str_to_floats_udf = udf(str_to_floats, ArrayType(FloatType()))


class Search(Templates):

    def __init__(self, site_name='us'):
        super(Search, self).__init__()
        self.site_name = site_name
        self.db_save = f'image_search'
        self.spark = self.create_spark_object(app_name=f"{self.db_save}: {self.site_name}")
        self.df_save = self.spark.sql(f"select 1+1;")

    def read_data(self):
        self.df_save = self.spark.read.parquet("hdfs://hadoop5:8020/home/ffman/faiss/embeddings/folder")
        self.df_save.show(20)
        print("self.df_save:", self.df_save.count(), self.df_save.schema)
        sql = f"select id, img_vector as embedding from ods_asin_extract_features;"
        self.df_save = self.spark.sql(sql).cache()
        self.df_save = self.df_save.withColumn("embedding", str_to_floats_udf(self.df_save["embedding"]))  # convert strings to floats
        self.df_save.show(20)
        print("self.df_save:", self.df_save.count(), self.df_save.schema)

        # 将嵌入向量从数据中提取出来,注意这会将所有的数据加载到内存中,如果数据太大可能会出现内存问题
        data = self.df_save.collect()
        embeddings = np.array([row['embedding'] for row in data])

        # 创建Faiss索引
        print("开始创建索引")
        index = faiss.IndexFlatL2(512)
        index.add(embeddings)
        faiss.write_index(index, "/home/ffman/tmp/my_index.faiss")
        print("索引创建完成+存储hdfs完成")
        # 在驱动程序上构建索引并保存到磁盘
        # embeddings = np.random.rand(1000, 512).astype('float32')  # 假设你的嵌入向量
        # index = faiss.IndexFlatL2(embeddings.shape[1])
        # index.add(embeddings)


        while True:
            # 假设query是你要查询的嵌入向量
            query = np.random.rand(512).astype('float32')
            print("query:", query.shape)
            # 查找最近的5个邻居
            D, I = index.search(query.reshape(1, -1), 5)

            # 打印结果
            print("Distances: ", D)
            print("Indices: ", I)
            time.sleep(1)

    def handle_data(self):
        # 在每个分区上进行查询
        # df = self.df_save.withColumn('nearest_neighbor', find_nearest_neighbors(self.df_save['embedding']))
        # df.show(20)
        quit()


if __name__ == '__main__':
    site_name = sys.argv[1]  # 参数1:站点
    handle_obj = Search()
    handle_obj.run()