|
import json
|
|
import torch
|
|
import torch.nn.functional as F
|
|
from transformers import AutoTokenizer, AutoModel
|
|
import chromadb
|
|
from chromadb.utils import embedding_functions
|
|
|
|
def retrieve_relevant_context(user_query):
|
|
# Load the query-answer data
|
|
with open('prompt_sql_query.json', 'r') as f:
|
|
sql_query_data = json.load(f)
|
|
|
|
user_prompts = [item['prompt'] for item in sql_query_data]
|
|
best_sql_queries = [item['sql_query'] for item in sql_query_data]
|
|
relevant_tables = [item['redshift_tables'] for item in sql_query_data]
|
|
|
|
# Load a pre-trained embedding model
|
|
model_name = "sentence-transformers/all-MiniLM-L6-v2"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
model = AutoModel.from_pretrained(model_name)
|
|
|
|
def encode(texts):
|
|
"""Encode a list of texts into embeddings."""
|
|
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
|
|
with torch.no_grad():
|
|
embeddings = model(**inputs).last_hidden_state[:, 0]
|
|
embeddings = F.normalize(embeddings, p=2, dim=1)
|
|
return embeddings.numpy().tolist()
|
|
|
|
# Initialize ChromaDB client
|
|
client = chromadb.PersistentClient(path="./chromadb_store")
|
|
collection = client.get_or_create_collection(name="sql_queries")
|
|
|
|
# Store embeddings in ChromaDB if not already stored
|
|
if collection.count() == 0:
|
|
for idx, prompt in enumerate(user_prompts):
|
|
embedding = encode([prompt])[0]
|
|
collection.add(embeddings=[embedding], documents=[prompt], ids=[str(idx)])
|
|
|
|
# Query ChromaDB
|
|
query_embedding = encode([user_query])[0]
|
|
results = collection.query(query_embeddings=[query_embedding], n_results=3)
|
|
|
|
retrieved_prompts = results['documents'][0]
|
|
retrieved_indices = results['ids'][0]
|
|
|
|
retrieved_sql_queries = [best_sql_queries[int(idx)] for idx in retrieved_indices]
|
|
retrieved_tables = [relevant_tables[int(idx)] for idx in retrieved_indices]
|
|
|
|
retrieved_prompt_sql_pairs = "Sample prompt - sql query pairs:\n"
|
|
for idx, prompt in enumerate(retrieved_prompts):
|
|
retrieved_prompt_sql_pairs += f"{idx + 1}. Prompt: {prompt}\n SQL Query: {retrieved_sql_queries[idx]}\n\n"
|
|
|
|
print(retrieved_prompt_sql_pairs)
|
|
|
|
query_tables = list(set([item for sublist in retrieved_tables for item in sublist]))
|
|
with open('redshift_tables.json', 'r') as f:
|
|
schema_data = json.load(f)
|
|
|
|
retrieved_tables_info = ""
|
|
for index, table in enumerate(query_tables, start=1):
|
|
retrieved_tables_info += f"{index}. {schema_data[0].get(table, 'Table schema not found')}\n\n"
|
|
|
|
print(retrieved_tables_info)
|
|
return retrieved_prompt_sql_pairs, retrieved_tables_info
|
|
|
|
# Example usage
|
|
# retrieve_relevant_context("What is the total IM device sold for DMI in first quarter of 2024?")
|
|
|