Spaces:
Running
Running
FauziIsyrinApridal
commited on
Commit
Β·
886eee7
1
Parent(s):
aa95ea3
perbaiki app untuk mengambil list data lalu membandingkan tanggal data terbaru dengan tanggal vector store jika data lebih baru ulangi oembuatan vector store test
Browse files- app.py +84 -74
- evaluate.py +527 -0
- rag_evaluation_20250627_133749.log +0 -0
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
-
import tempfile
|
4 |
-
import zipfile
|
5 |
from dotenv import load_dotenv
|
6 |
from langsmith import traceable
|
|
|
|
|
7 |
|
8 |
from app.chat import initialize_session_state, display_chat_history
|
9 |
-
from app.data_loader import get_data, load_docs
|
10 |
from app.document_processor import process_documents, save_vector_store_to_supabase, load_vector_store_from_supabase
|
11 |
from app.prompts import sahabat_prompt
|
12 |
from app.db import supabase
|
@@ -17,91 +17,103 @@ from langchain_community.document_transformers import LongContextReorder
|
|
17 |
|
18 |
load_dotenv()
|
19 |
|
20 |
-
#
|
|
|
|
|
21 |
BUCKET_NAME = "pnp-bot-storage-archive"
|
22 |
-
VECTOR_STORE_PREFIX = "vector_store"
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
llm,
|
40 |
-
retriever=vector_store.as_retriever(search_kwargs={"k": 6}),
|
41 |
-
combine_docs_chain_kwargs={"prompt": sahabat_prompt},
|
42 |
-
return_source_documents=True,
|
43 |
-
memory=memory
|
44 |
-
)
|
45 |
-
|
46 |
-
return chain
|
47 |
-
|
48 |
-
def reorder_embedding(docs):
|
49 |
-
reordering = LongContextReorder()
|
50 |
-
return reordering.transform_documents(docs)
|
51 |
-
|
52 |
-
def get_latest_data_timestamp(folder):
|
53 |
-
latest_time = 0
|
54 |
-
for root, _, files in os.walk(folder):
|
55 |
-
for file in files:
|
56 |
-
path = os.path.join(root, file)
|
57 |
-
file_time = os.path.getmtime(path)
|
58 |
-
latest_time = max(latest_time, file_time)
|
59 |
return latest_time
|
60 |
|
61 |
-
|
62 |
-
|
|
|
63 |
try:
|
64 |
response = supabase.storage.from_(BUCKET_NAME).list()
|
65 |
timestamps = []
|
66 |
-
|
67 |
for file in response:
|
68 |
-
if file[
|
69 |
-
file[
|
70 |
):
|
71 |
-
timestamps.append(file[
|
72 |
-
|
73 |
-
# Return the latest timestamp if both files exist
|
74 |
if len(timestamps) >= 2:
|
75 |
return max(timestamps)
|
76 |
return None
|
77 |
-
|
78 |
except Exception as e:
|
79 |
print(f"Error getting Supabase timestamp: {e}")
|
80 |
return None
|
81 |
|
82 |
-
|
83 |
-
|
|
|
84 |
supabase_timestamp = get_supabase_vector_store_timestamp()
|
85 |
if supabase_timestamp is None:
|
86 |
return True
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
supabase_time = datetime.fromisoformat(supabase_timestamp.replace('Z', '+00:00')).timestamp()
|
91 |
-
data_time = get_latest_data_timestamp(DATA_DIR)
|
92 |
-
|
93 |
return data_time > supabase_time
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
@traceable(name="Main Chatbot RAG App")
|
96 |
def main():
|
97 |
initialize_session_state()
|
|
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
if len(st.session_state['history']) == 0:
|
102 |
if vector_store_is_outdated():
|
103 |
with st.spinner("Memuat dan memproses dokumen..."):
|
104 |
-
get_data()
|
105 |
docs = load_docs()
|
106 |
if len(docs) > 0:
|
107 |
reordered_docs = reorder_embedding(docs)
|
@@ -110,30 +122,28 @@ def main():
|
|
110 |
with st.spinner("Mengunggah vector store ke Supabase..."):
|
111 |
success = save_vector_store_to_supabase(vector_store, supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
112 |
if success:
|
113 |
-
st.success("Vector store berhasil diunggah ke Supabase!")
|
114 |
else:
|
115 |
-
st.error("Gagal mengunggah vector store ke Supabase")
|
116 |
else:
|
117 |
-
st.warning("Folder 'data/' kosong. Chatbot tetap bisa digunakan,
|
118 |
vector_store = None
|
119 |
else:
|
120 |
with st.spinner("Memuat vector store dari Supabase..."):
|
121 |
vector_store = load_vector_store_from_supabase(supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
122 |
if vector_store:
|
123 |
-
st.success("Vector store berhasil dimuat dari Supabase!")
|
124 |
else:
|
125 |
-
st.error("Gagal memuat vector store dari Supabase")
|
126 |
else:
|
127 |
-
vector_store = st.session_state.get(
|
128 |
-
if vector_store is None:
|
129 |
-
vector_store = load_vector_store_from_supabase(supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
130 |
|
131 |
-
st.session_state[
|
132 |
|
133 |
-
if st.session_state[
|
134 |
-
chain = create_conversational_chain(st.session_state[
|
135 |
display_chat_history(chain)
|
136 |
|
137 |
|
138 |
if __name__ == "__main__":
|
139 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
|
|
|
|
3 |
from dotenv import load_dotenv
|
4 |
from langsmith import traceable
|
5 |
+
from datetime import datetime
|
6 |
+
from typing import List, Dict, Optional
|
7 |
|
8 |
from app.chat import initialize_session_state, display_chat_history
|
9 |
+
from app.data_loader import get_data, list_all_files, load_docs
|
10 |
from app.document_processor import process_documents, save_vector_store_to_supabase, load_vector_store_from_supabase
|
11 |
from app.prompts import sahabat_prompt
|
12 |
from app.db import supabase
|
|
|
17 |
|
18 |
load_dotenv()
|
19 |
|
20 |
+
# ---------------------------------------------------------
|
21 |
+
# β‘οΈ CONFIG
|
22 |
+
# ---------------------------------------------------------
|
23 |
BUCKET_NAME = "pnp-bot-storage-archive"
|
24 |
+
VECTOR_STORE_PREFIX = "vector_store"
|
25 |
+
|
26 |
+
# ---------------------------------------------------------
|
27 |
+
# β‘οΈ UTILITY
|
28 |
+
# ---------------------------------------------------------
|
29 |
+
def get_latest_data_timestamp_from_files(bucket_name: str, supabase) -> float:
|
30 |
+
"""Get the latest timestamp from files in a Supabase storage bucket."""
|
31 |
+
files = list_all_files(bucket_name, supabase=supabase)
|
32 |
+
latest_time = 0.0
|
33 |
+
for file in files:
|
34 |
+
iso_time = file.get("updated_at") or file.get("created_at")
|
35 |
+
if iso_time:
|
36 |
+
try:
|
37 |
+
timestamp = datetime.fromisoformat(iso_time.replace('Z', '+00:00')).timestamp()
|
38 |
+
latest_time = max(latest_time, timestamp)
|
39 |
+
except Exception as e:
|
40 |
+
print(f"Gagal parsing waktu dari {file.get('name')}: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
return latest_time
|
42 |
|
43 |
+
|
44 |
+
def get_supabase_vector_store_timestamp() -> Optional[str]:
|
45 |
+
"""Get the latest timestamp of vector store files in the Supabase storage."""
|
46 |
try:
|
47 |
response = supabase.storage.from_(BUCKET_NAME).list()
|
48 |
timestamps = []
|
|
|
49 |
for file in response:
|
50 |
+
if file["name"].startswith(VECTOR_STORE_PREFIX) and (
|
51 |
+
file["name"].endswith(".faiss") or file["name"].endswith(".pkl")
|
52 |
):
|
53 |
+
timestamps.append(file["updated_at"])
|
|
|
|
|
54 |
if len(timestamps) >= 2:
|
55 |
return max(timestamps)
|
56 |
return None
|
|
|
57 |
except Exception as e:
|
58 |
print(f"Error getting Supabase timestamp: {e}")
|
59 |
return None
|
60 |
|
61 |
+
|
62 |
+
def vector_store_is_outdated() -> bool:
|
63 |
+
"""Check if vector store needs to be updated based on files in Supabase storage."""
|
64 |
supabase_timestamp = get_supabase_vector_store_timestamp()
|
65 |
if supabase_timestamp is None:
|
66 |
return True
|
67 |
+
supabase_time = datetime.fromisoformat(supabase_timestamp.replace("Z", "+00:00")).timestamp()
|
68 |
+
data_time = get_latest_data_timestamp_from_files("pnp-bot-storage", supabase)
|
69 |
+
|
|
|
|
|
|
|
70 |
return data_time > supabase_time
|
71 |
|
72 |
+
|
73 |
+
def reorder_embedding(docs):
|
74 |
+
"""Reorder documents for long context retrieval."""
|
75 |
+
reordering = LongContextReorder()
|
76 |
+
return reordering.transform_documents(docs)
|
77 |
+
|
78 |
+
|
79 |
+
# ---------------------------------------------------------
|
80 |
+
# β‘οΈ RAG CHAIN
|
81 |
+
# ---------------------------------------------------------
|
82 |
+
@traceable(name="Create RAG Conversational Chain")
|
83 |
+
def create_conversational_chain(vector_store):
|
84 |
+
"""Create a Conversational Retrieval Chain for RAG."""
|
85 |
+
llm = Replicate(
|
86 |
+
model="fauziisyrinapridal/sahabat-ai-v1:afb9fa89fe786362f619fd4fef34bd1f7a4a4da23073d8a6fbf54dcbe458f216",
|
87 |
+
model_kwargs={"temperature": 0.1, "top_p": 0.9, "max_new_tokens": 6000}
|
88 |
+
)
|
89 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True, output_key="answer")
|
90 |
+
chain = ConversationalRetrievalChain.from_llm(
|
91 |
+
llm,
|
92 |
+
retriever=vector_store.as_retriever(search_kwargs={"k": 6}),
|
93 |
+
combine_docs_chain_kwargs={"prompt": sahabat_prompt},
|
94 |
+
return_source_documents=True,
|
95 |
+
memory=memory,
|
96 |
+
)
|
97 |
+
return chain
|
98 |
+
|
99 |
+
|
100 |
+
def get_rag_chain(vector_store):
|
101 |
+
"""Return a Conversational Retrieval Chain for external use."""
|
102 |
+
return create_conversational_chain(vector_store)
|
103 |
+
|
104 |
+
|
105 |
+
# ---------------------------------------------------------
|
106 |
+
# β‘οΈ MAIN FUNCTION
|
107 |
+
# ---------------------------------------------------------
|
108 |
@traceable(name="Main Chatbot RAG App")
|
109 |
def main():
|
110 |
initialize_session_state()
|
111 |
+
vector_store = None
|
112 |
|
113 |
+
if len(st.session_state["history"]) == 0:
|
|
|
|
|
114 |
if vector_store_is_outdated():
|
115 |
with st.spinner("Memuat dan memproses dokumen..."):
|
116 |
+
get_data()
|
117 |
docs = load_docs()
|
118 |
if len(docs) > 0:
|
119 |
reordered_docs = reorder_embedding(docs)
|
|
|
122 |
with st.spinner("Mengunggah vector store ke Supabase..."):
|
123 |
success = save_vector_store_to_supabase(vector_store, supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
124 |
if success:
|
125 |
+
st.success("β
Vector store berhasil diunggah ke Supabase!")
|
126 |
else:
|
127 |
+
st.error("β Gagal mengunggah vector store ke Supabase.")
|
128 |
else:
|
129 |
+
st.warning("β οΈ Folder 'data/' kosong. Chatbot tetap bisa digunakan, tetapi tanpa konteks dokumen.")
|
130 |
vector_store = None
|
131 |
else:
|
132 |
with st.spinner("Memuat vector store dari Supabase..."):
|
133 |
vector_store = load_vector_store_from_supabase(supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
134 |
if vector_store:
|
135 |
+
st.success("β
Vector store berhasil dimuat dari Supabase!")
|
136 |
else:
|
137 |
+
st.error("β Gagal memuat vector store dari Supabase.")
|
138 |
else:
|
139 |
+
vector_store = st.session_state.get("vector_store") or load_vector_store_from_supabase(supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
|
|
|
|
140 |
|
141 |
+
st.session_state["vector_store"] = vector_store
|
142 |
|
143 |
+
if st.session_state["vector_store"] is not None:
|
144 |
+
chain = create_conversational_chain(st.session_state["vector_store"])
|
145 |
display_chat_history(chain)
|
146 |
|
147 |
|
148 |
if __name__ == "__main__":
|
149 |
+
main()
|
evaluate.py
ADDED
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import random
|
4 |
+
import logging
|
5 |
+
from datetime import datetime
|
6 |
+
from typing import List, Dict, Any
|
7 |
+
import streamlit as st
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
from langsmith import Client, traceable
|
10 |
+
from typing_extensions import Annotated, TypedDict
|
11 |
+
from langchain_openai import ChatOpenAI
|
12 |
+
from langchain_community.llms import Replicate
|
13 |
+
from langchain.memory import ConversationBufferMemory
|
14 |
+
from langchain.chains import ConversationalRetrievalChain
|
15 |
+
import backoff
|
16 |
+
from ratelimit import limits, sleep_and_retry
|
17 |
+
import json
|
18 |
+
|
19 |
+
# Import dari aplikasi utama Anda
|
20 |
+
from app.document_processor import load_vector_store_from_supabase
|
21 |
+
from app.prompts import sahabat_prompt
|
22 |
+
from app.db import supabase
|
23 |
+
|
24 |
+
# Setup logging
|
25 |
+
logging.basicConfig(
|
26 |
+
level=logging.INFO,
|
27 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
28 |
+
handlers=[
|
29 |
+
logging.FileHandler(f'rag_evaluation_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'),
|
30 |
+
logging.StreamHandler()
|
31 |
+
]
|
32 |
+
)
|
33 |
+
logger = logging.getLogger(__name__)
|
34 |
+
|
35 |
+
load_dotenv()
|
36 |
+
|
37 |
+
# Konfigurasi
|
38 |
+
BUCKET_NAME = "pnp-bot-storage-archive"
|
39 |
+
VECTOR_STORE_PREFIX = "vector_store"
|
40 |
+
|
41 |
+
# Rate limiting settings
|
42 |
+
MAX_CALLS_PER_MINUTE = 50
|
43 |
+
MAX_CALLS_PER_HOUR = 1000
|
44 |
+
|
45 |
+
# Dataset evaluasi untuk Politeknik Negeri Padang
|
46 |
+
evaluation_dataset = [
|
47 |
+
{
|
48 |
+
'question': '''Bagaimana sistem pendidikan yang diterapkan di Politeknik Negeri Padang?''',
|
49 |
+
'ground_truth': '''Sistem pendidikan yang diterapkan di Politeknik adalah dengan menggabungkan pendidikan teoritis, praktek (terapan) di Laboratorium dan praktek industry. Pelaksanaan praktik di industri dilakukan oleh mahasiswa selama satu semester untuk menambah wawasan, pengalaman dan pengembangan ilmu guna membentuk tenaga ahli yang terampil dan profesional.'''
|
50 |
+
},
|
51 |
+
{
|
52 |
+
'question': '''Apa saja mata kuliah yang terdapat dalam kurikulum pendidikan Politeknik Negeri Padang?''',
|
53 |
+
'ground_truth': '''Kurikulum Pendidikan telah disusun berbasis kompetensi dengan kelompok mata kuliah sebagai berikut : - Mata Kuliah Pengembangan Kepribadian (MPK) - Mata Kuliah Keimuan dan Keterampilan (MKK) - Mata Kuliah Berkarya (MKB) - Mata Kuliah Berkehidupan Bermasyarakat (MBB)'''
|
54 |
+
},
|
55 |
+
{
|
56 |
+
'question': '''Bagaimana Politeknik Negeri Padang mendukung misi tridharma perguruan tinggi?''',
|
57 |
+
'ground_truth': '''Politeknik Negeri Padang dalam menjalankan misi tridharma perguruan tinggi didukung oleh tenaga pendidik dan tenaga kependidikan yang profesional pada bidangnya. Jumlah dan kualifikasi staf tersebut berdasarkan keadaan Desember 2017 sebagai berikut : - Tenaga Pendidik : S1 = 14 orang, S2 = 256 orang, S3 = 21 orang (Yang sedang menempuh S3 = 7 orang, Yang sedang menempuh S2 = 5 orang) - Tenaga Kependidikan : SD = 5 orang, SMP = 4 orang, SLTA = 71 orang, D3 = 25 orang, S1 = 54 orang, S2 = 15 orang.'''
|
58 |
+
},
|
59 |
+
{
|
60 |
+
'question': '''Bagaimana Politeknik Negeri Padang menyediakan akses internet bagi mahasiswa?''',
|
61 |
+
'ground_truth': '''Politeknik Negeri Padang telah memiliki Anjungan Internet Mandiri (AIM) yang dapat diakses oleh mahasiswa secara gratis, yang tersedia pada titik-titik strategis. Juga tersedia kawasan hot spot area di sekitar kampus sehingga mahasiswa dapat memanfaatkan internet dengan bebas menggunakan laptop/PC.'''
|
62 |
+
},
|
63 |
+
{
|
64 |
+
'question': '''Apa saja contoh kerjasama Politeknik Negeri Padang dengan industri?''',
|
65 |
+
'ground_truth': '''PT. Siemens Indonesia, PT. Toyota Aichi Takaoua Japan, PT. PLN, PT. INTI, Futaba Rashi Siisha Kusho Japan, PT. Sintom, PT. Krakatau Steel, Komatssu Shinge Koumuten, PT. PAL Indonesia, PT. Hexindo, Taishurin Co. Ltd Fukuoaka Japan, PT. Texmaco Perkasa, PT. LEN Industri, PT. Toyota Astra Motor, PT. Indah Kiat, PT. Trakindo Utama, BTN.'''
|
66 |
+
},
|
67 |
+
{
|
68 |
+
'question': '''Bagaimana Politeknik Negeri Padang membantu mahasiswa dalam bidang prestasi dan ekonomi?''',
|
69 |
+
'ground_truth': '''Tersedia bantuan untuk sekitar 800 mahasiswa setiap tahunnya. Beasiswa yang diterima antara lain: - Beasiswa Peningkatan Prestasi Akademik (PPA), - Beasiswa Kerja Mahasiswa (BKM), - Beasiswa Bantuan Belajar Mahasiswa (BBM), - Beasiswa TPSDP, - Beasiswa Kredit Bantuan Belajar Mahasiswa (KBBM), - Beasiswa Depertemen Hankam (ABRI), - Beasiswa PT. Toyota Astra, - Beasiswa ORBIT (ICMI), - Beasiswa Supersemar.'''
|
70 |
+
},
|
71 |
+
{
|
72 |
+
'question': '''Bagaimana status akreditasi program studi di Politeknik Negeri Padang?''',
|
73 |
+
'ground_truth': '''Program studi di Politeknik Negeri Padang memiliki status akreditasi yang bervariasi seperti Baik, Baik Sekali, hingga Unggul. Contohnya, Teknik Mesin (D3) terakreditasi Baik Sekali hingga 2029, Teknik Manufaktur (D4) terakreditasi Unggul hingga 2028, dan Teknik Sipil (D3) terakreditasi A hingga 2026. Setiap program memiliki SK dan sertifikat akreditasi resmi.'''
|
74 |
+
},
|
75 |
+
{
|
76 |
+
'question': '''Bagaimana proses penerimaan mahasiswa baru di Politeknik Negeri Padang?''',
|
77 |
+
'ground_truth': '''Penerimaan mahasiswa baru di Politeknik Negeri Padang dilakukan melalui berbagai jalur seleksi seperti SNBT, SNMPN, dan kelas kerjasama. Tersedia brosur dan informasi detail melalui situs http://penerimaan.pnp.ac.id. Program studi Teknik Alat Berat misalnya memiliki kelas kerjasama dengan PT Trakindo Utama. Jadwal seleksi dan pengumuman dapat diakses secara daring.'''
|
78 |
+
},
|
79 |
+
{
|
80 |
+
'question': '''Apa bentuk kerjasama yang dilakukan Politeknik Negeri Padang?''',
|
81 |
+
'ground_truth': '''Politeknik Negeri Padang menjalin kerjasama dengan industri, pemerintah, BUMN, dan asosiasi profesi baik dalam negeri maupun luar negeri. Bentuk kerjasama mencakup rekrutmen, prakerin (praktik kerja industri), kunjungan industri, bimbingan karir, serta pembuatan MoU. Tujuannya untuk menjaga mutu lulusan dan penyaluran SDM.'''
|
82 |
+
},
|
83 |
+
{
|
84 |
+
'question': '''Siapa saja pimpinan di Politeknik Negeri Padang saat ini?''',
|
85 |
+
'ground_truth': '''Direktur Politeknik Negeri Padang adalah Dr. Ir. Surfa Yondri, S.T., S.ST., M.Kom. Wakil Direktur Bidang Akademik adalah Ir. Revalin Herdianto, ST., M.Sc., Ph.D. Pimpinan lainnya antara lain Nasrullah, ST., M.T., dan Sarmiadi, S.E., M.M. yang memiliki pengalaman panjang dalam jabatan struktural di kampus.'''
|
86 |
+
},
|
87 |
+
{
|
88 |
+
'question': '''Bagaimana sejarah singkat berdirinya Politeknik Negeri Padang?''',
|
89 |
+
'ground_truth': '''Politeknik Negeri Padang didirikan pada tahun 1987 sebagai salah satu dari 17 politeknik pertama di Indonesia. Awalnya bernama Politeknik Engineering Universitas Andalas. Pada 1997 menjadi Politeknik Universitas Andalas lalu berubah menjadi Politeknik Negeri Padang. Saat ini memiliki 32 program studi dari jenjang D3 hingga Magister Terapan.'''
|
90 |
+
}
|
91 |
+
]
|
92 |
+
|
93 |
+
# Schema untuk evaluasi
|
94 |
+
class CorrectnessGrade(TypedDict):
|
95 |
+
explanation: Annotated[str, ..., "Penjelasan alasan penilaian"]
|
96 |
+
correct: Annotated[bool, ..., "True jika jawaban benar, False jika salah"]
|
97 |
+
|
98 |
+
class RelevanceGrade(TypedDict):
|
99 |
+
explanation: Annotated[str, ..., "Penjelasan alasan penilaian"]
|
100 |
+
relevant: Annotated[bool, ..., "True jika jawaban relevan dengan pertanyaan"]
|
101 |
+
|
102 |
+
class GroundedGrade(TypedDict):
|
103 |
+
explanation: Annotated[str, ..., "Penjelasan alasan penilaian"]
|
104 |
+
grounded: Annotated[bool, ..., "True jika jawaban berdasarkan dokumen yang diambil"]
|
105 |
+
|
106 |
+
class RetrievalRelevanceGrade(TypedDict):
|
107 |
+
explanation: Annotated[str, ..., "Penjelasan alasan penilaian"]
|
108 |
+
relevant: Annotated[bool, ..., "True jika dokumen yang diambil relevan dengan pertanyaan"]
|
109 |
+
|
110 |
+
# Prompt untuk evaluasi dalam Bahasa Indonesia
|
111 |
+
correctness_instructions = """Anda adalah seorang guru yang menilai kuis.
|
112 |
+
|
113 |
+
Anda akan diberikan PERTANYAAN, JAWABAN BENAR (ground truth), dan JAWABAN SISWA.
|
114 |
+
|
115 |
+
Berikut kriteria penilaian yang harus diikuti:
|
116 |
+
(1) Nilai jawaban siswa HANYA berdasarkan akurasi faktual relatif terhadap jawaban benar.
|
117 |
+
(2) Pastikan jawaban siswa tidak mengandung pernyataan yang bertentangan.
|
118 |
+
(3) Tidak apa-apa jika jawaban siswa berisi informasi lebih banyak dari jawaban benar, selama akurat secara faktual.
|
119 |
+
|
120 |
+
Kebenaran:
|
121 |
+
Nilai kebenaran True berarti jawaban siswa memenuhi semua kriteria.
|
122 |
+
Nilai kebenaran False berarti jawaban siswa tidak memenuhi semua kriteria.
|
123 |
+
|
124 |
+
Jelaskan penalaran Anda secara bertahap untuk memastikan penalaran dan kesimpulan benar.
|
125 |
+
Hindari menyebutkan jawaban benar di awal."""
|
126 |
+
|
127 |
+
relevance_instructions = """Anda adalah seorang guru yang menilai kuis.
|
128 |
+
|
129 |
+
Anda akan diberikan PERTANYAAN dan JAWABAN SISWA.
|
130 |
+
|
131 |
+
Berikut kriteria penilaian yang harus diikuti:
|
132 |
+
(1) Pastikan JAWABAN SISWA ringkas dan relevan dengan PERTANYAAN
|
133 |
+
(2) Pastikan JAWABAN SISWA membantu menjawab PERTANYAAN
|
134 |
+
|
135 |
+
Relevansi:
|
136 |
+
Nilai relevansi True berarti jawaban siswa memenuhi semua kriteria.
|
137 |
+
Nilai relevansi False berarti jawaban siswa tidak memenuhi semua kriteria.
|
138 |
+
|
139 |
+
Jelaskan penalaran Anda secara bertahap untuk memastikan penalaran dan kesimpulan benar.
|
140 |
+
Hindari menyebutkan jawaban benar di awal."""
|
141 |
+
|
142 |
+
grounded_instructions = """Anda adalah seorang guru yang menilai kuis.
|
143 |
+
|
144 |
+
Anda akan diberikan FAKTA dan JAWABAN SISWA.
|
145 |
+
|
146 |
+
Berikut kriteria penilaian yang harus diikuti:
|
147 |
+
(1) Pastikan JAWABAN SISWA berdasarkan FAKTA yang diberikan.
|
148 |
+
(2) Pastikan JAWABAN SISWA tidak mengandung informasi "halusinasi" di luar cakupan FAKTA.
|
149 |
+
|
150 |
+
Berdasarkan Fakta:
|
151 |
+
Nilai True berarti jawaban siswa memenuhi semua kriteria.
|
152 |
+
Nilai False berarti jawaban siswa tidak memenuhi semua kriteria.
|
153 |
+
|
154 |
+
Jelaskan penalaran Anda secara bertahap untuk memastikan penalaran dan kesimpulan benar.
|
155 |
+
Hindari menyebutkan jawaban benar di awal."""
|
156 |
+
|
157 |
+
retrieval_relevance_instructions = """Anda adalah seorang guru yang menilai kuis.
|
158 |
+
|
159 |
+
Anda akan diberikan PERTANYAAN dan sekumpulan FAKTA yang disediakan siswa.
|
160 |
+
|
161 |
+
Berikut kriteria penilaian yang harus diikuti:
|
162 |
+
(1) Tujuan Anda adalah mengidentifikasi FAKTA yang sama sekali tidak terkait dengan PERTANYAAN
|
163 |
+
(2) Jika fakta mengandung kata kunci APAPUN atau makna semantik terkait pertanyaan, anggap relevan
|
164 |
+
(3) Tidak apa-apa jika fakta memiliki BEBERAPA informasi yang tidak terkait dengan pertanyaan selama (2) terpenuhi
|
165 |
+
|
166 |
+
Relevansi:
|
167 |
+
Nilai relevansi True berarti FAKTA mengandung kata kunci APAPUN atau makna semantik terkait PERTANYAAN.
|
168 |
+
Nilai relevansi False berarti FAKTA sama sekali tidak terkait dengan PERTANYAAN.
|
169 |
+
|
170 |
+
Jelaskan penalaran Anda secara bertahap untuk memastikan penalaran dan kesimpulan benar.
|
171 |
+
Hindari menyebutkan jawaban benar di awal."""
|
172 |
+
|
173 |
+
# Inisialisasi evaluator LLM dengan retry dan rate limiting
|
174 |
+
class SafeLLMEvaluator:
|
175 |
+
def __init__(self, model_name="gpt-4o", temperature=0):
|
176 |
+
self.model_name = model_name
|
177 |
+
self.temperature = temperature
|
178 |
+
self._init_llms()
|
179 |
+
|
180 |
+
def _init_llms(self):
|
181 |
+
"""Initialize LLM evaluators with structured output"""
|
182 |
+
try:
|
183 |
+
self.grader_llm = ChatOpenAI(
|
184 |
+
model=self.model_name,
|
185 |
+
temperature=self.temperature
|
186 |
+
).with_structured_output(CorrectnessGrade, method="json_schema", strict=True)
|
187 |
+
|
188 |
+
self.relevance_llm = ChatOpenAI(
|
189 |
+
model=self.model_name,
|
190 |
+
temperature=self.temperature
|
191 |
+
).with_structured_output(RelevanceGrade, method="json_schema", strict=True)
|
192 |
+
|
193 |
+
self.grounded_llm = ChatOpenAI(
|
194 |
+
model=self.model_name,
|
195 |
+
temperature=self.temperature
|
196 |
+
).with_structured_output(GroundedGrade, method="json_schema", strict=True)
|
197 |
+
|
198 |
+
self.retrieval_relevance_llm = ChatOpenAI(
|
199 |
+
model=self.model_name,
|
200 |
+
temperature=self.temperature
|
201 |
+
).with_structured_output(RetrievalRelevanceGrade, method="json_schema", strict=True)
|
202 |
+
|
203 |
+
logger.info(f"β
LLM evaluators initialized with model: {self.model_name}")
|
204 |
+
|
205 |
+
except Exception as e:
|
206 |
+
logger.error(f"β Failed to initialize LLM evaluators: {e}")
|
207 |
+
raise
|
208 |
+
|
209 |
+
# Global evaluator instance
|
210 |
+
evaluator = SafeLLMEvaluator()
|
211 |
+
|
212 |
+
# Rate limiting and retry decorators
|
213 |
+
@sleep_and_retry
|
214 |
+
@limits(calls=MAX_CALLS_PER_MINUTE, period=60)
|
215 |
+
@backoff.on_exception(
|
216 |
+
backoff.expo,
|
217 |
+
(Exception,),
|
218 |
+
max_tries=3,
|
219 |
+
max_time=30,
|
220 |
+
jitter=backoff.random_jitter
|
221 |
+
)
|
222 |
+
def safe_api_call(llm, messages):
|
223 |
+
"""Safely make API calls with rate limiting and retry"""
|
224 |
+
try:
|
225 |
+
response = llm.invoke(messages)
|
226 |
+
logger.debug(f"β
API call successful")
|
227 |
+
return response
|
228 |
+
except Exception as e:
|
229 |
+
logger.warning(f"β οΈ API call failed: {e}")
|
230 |
+
raise
|
231 |
+
|
232 |
+
@traceable(name="Create RAG Chain for Evaluation")
|
233 |
+
def create_rag_chain(vector_store):
|
234 |
+
"""Membuat RAG chain untuk evaluasi dengan optimasi"""
|
235 |
+
try:
|
236 |
+
llm = Replicate(
|
237 |
+
model="fauziisyrinapridal/sahabat-ai-v1:afb9fa89fe786362f619fd4fef34bd1f7a4a4da23073d8a6fbf54dcbe458f216",
|
238 |
+
model_kwargs={"temperature": 0.1, "top_p": 0.9, "max_new_tokens": 4000} # Reduced tokens
|
239 |
+
)
|
240 |
+
|
241 |
+
memory = ConversationBufferMemory(
|
242 |
+
memory_key="chat_history",
|
243 |
+
return_messages=True,
|
244 |
+
output_key='answer'
|
245 |
+
)
|
246 |
+
|
247 |
+
# Reduced retrieval count to minimize API calls
|
248 |
+
chain = ConversationalRetrievalChain.from_llm(
|
249 |
+
llm,
|
250 |
+
retriever=vector_store.as_retriever(search_kwargs={"k": 4}), # Reduced from 6 to 4
|
251 |
+
combine_docs_chain_kwargs={"prompt": sahabat_prompt},
|
252 |
+
return_source_documents=True,
|
253 |
+
memory=memory
|
254 |
+
)
|
255 |
+
|
256 |
+
logger.info("β
RAG chain created successfully")
|
257 |
+
return chain
|
258 |
+
|
259 |
+
except Exception as e:
|
260 |
+
logger.error(f"β Failed to create RAG chain: {e}")
|
261 |
+
raise
|
262 |
+
|
263 |
+
@traceable(name="RAG Bot Answer")
|
264 |
+
@backoff.on_exception(backoff.expo, Exception, max_tries=3)
|
265 |
+
def rag_bot_answer(question: str, vector_store) -> dict:
|
266 |
+
"""Fungsi untuk mendapatkan jawaban dari RAG bot dengan error handling"""
|
267 |
+
try:
|
268 |
+
chain = create_rag_chain(vector_store)
|
269 |
+
result = chain({"question": question})
|
270 |
+
|
271 |
+
logger.info(f"β
RAG answer generated for question: {question[:50]}...")
|
272 |
+
return {
|
273 |
+
"answer": result['answer'],
|
274 |
+
"documents": result.get('source_documents', [])
|
275 |
+
}
|
276 |
+
except Exception as e:
|
277 |
+
logger.error(f"β Error in rag_bot_answer: {e}")
|
278 |
+
return {
|
279 |
+
"answer": "Terjadi kesalahan dalam memproses pertanyaan.",
|
280 |
+
"documents": []
|
281 |
+
}
|
282 |
+
|
283 |
+
# Enhanced evaluator functions with rate limiting
|
284 |
+
def correctness_evaluator(question: str, answer: str, ground_truth: str) -> tuple[bool, str]:
|
285 |
+
"""Evaluator untuk kebenaran jawaban dengan error handling"""
|
286 |
+
try:
|
287 |
+
answers = f"""PERTANYAAN: {question}
|
288 |
+
JAWABAN BENAR: {ground_truth}
|
289 |
+
JAWABAN SISWA: {answer}"""
|
290 |
+
|
291 |
+
messages = [
|
292 |
+
{"role": "system", "content": correctness_instructions},
|
293 |
+
{"role": "user", "content": answers}
|
294 |
+
]
|
295 |
+
|
296 |
+
grade = safe_api_call(evaluator.grader_llm, messages)
|
297 |
+
logger.debug(f"β
Correctness evaluation completed")
|
298 |
+
return grade["correct"], grade["explanation"]
|
299 |
+
|
300 |
+
except Exception as e:
|
301 |
+
logger.error(f"β Correctness evaluation failed: {e}")
|
302 |
+
return False, f"Error in evaluation: {str(e)}"
|
303 |
+
|
304 |
+
def relevance_evaluator(question: str, answer: str) -> tuple[bool, str]:
|
305 |
+
"""Evaluator untuk relevansi jawaban dengan error handling"""
|
306 |
+
try:
|
307 |
+
content = f"PERTANYAAN: {question}\nJAWABAN SISWA: {answer}"
|
308 |
+
messages = [
|
309 |
+
{"role": "system", "content": relevance_instructions},
|
310 |
+
{"role": "user", "content": content}
|
311 |
+
]
|
312 |
+
|
313 |
+
grade = safe_api_call(evaluator.relevance_llm, messages)
|
314 |
+
logger.debug(f"β
Relevance evaluation completed")
|
315 |
+
return grade["relevant"], grade["explanation"]
|
316 |
+
|
317 |
+
except Exception as e:
|
318 |
+
logger.error(f"β Relevance evaluation failed: {e}")
|
319 |
+
return False, f"Error in evaluation: {str(e)}"
|
320 |
+
|
321 |
+
def groundedness_evaluator(answer: str, documents) -> tuple[bool, str]:
|
322 |
+
"""Evaluator untuk groundedness jawaban dengan error handling"""
|
323 |
+
try:
|
324 |
+
if not documents:
|
325 |
+
return False, "No documents provided for grounding evaluation"
|
326 |
+
|
327 |
+
doc_string = "\n\n".join([doc.page_content for doc in documents])
|
328 |
+
content = f"FAKTA: {doc_string}\nJAWABAN SISWA: {answer}"
|
329 |
+
messages = [
|
330 |
+
{"role": "system", "content": grounded_instructions},
|
331 |
+
{"role": "user", "content": content}
|
332 |
+
]
|
333 |
+
|
334 |
+
grade = safe_api_call(evaluator.grounded_llm, messages)
|
335 |
+
logger.debug(f"β
Groundedness evaluation completed")
|
336 |
+
return grade["grounded"], grade["explanation"]
|
337 |
+
|
338 |
+
except Exception as e:
|
339 |
+
logger.error(f"β Groundedness evaluation failed: {e}")
|
340 |
+
return False, f"Error in evaluation: {str(e)}"
|
341 |
+
|
342 |
+
def retrieval_relevance_evaluator(question: str, documents) -> tuple[bool, str]:
|
343 |
+
"""Evaluator untuk relevansi retrieval dengan error handling"""
|
344 |
+
try:
|
345 |
+
if not documents:
|
346 |
+
return False, "No documents provided for retrieval evaluation"
|
347 |
+
|
348 |
+
doc_string = "\n\n".join([doc.page_content for doc in documents])
|
349 |
+
content = f"FAKTA: {doc_string}\nPERTANYAAN: {question}"
|
350 |
+
messages = [
|
351 |
+
{"role": "system", "content": retrieval_relevance_instructions},
|
352 |
+
{"role": "user", "content": content}
|
353 |
+
]
|
354 |
+
|
355 |
+
grade = safe_api_call(evaluator.retrieval_relevance_llm, messages)
|
356 |
+
logger.debug(f"β
Retrieval relevance evaluation completed")
|
357 |
+
return grade["relevant"], grade["explanation"]
|
358 |
+
|
359 |
+
except Exception as e:
|
360 |
+
logger.error(f"β Retrieval relevance evaluation failed: {e}")
|
361 |
+
return False, f"Error in evaluation: {str(e)}"
|
362 |
+
|
363 |
+
def controlled_delay(min_delay=2, max_delay=5):
|
364 |
+
"""Add controlled delay to avoid rate limits"""
|
365 |
+
delay = random.uniform(min_delay, max_delay)
|
366 |
+
logger.debug(f"β³ Waiting {delay:.2f} seconds...")
|
367 |
+
time.sleep(delay)
|
368 |
+
|
369 |
+
@traceable(name="Run RAG Evaluation Enhanced")
|
370 |
+
def run_enhanced_evaluation(batch_size: int = None, start_index: int = 0):
|
371 |
+
"""Menjalankan evaluasi RAG dengan optimasi dan monitoring"""
|
372 |
+
logger.info("π Memulai evaluasi RAG Enhanced untuk Politeknik Negeri Padang...")
|
373 |
+
|
374 |
+
# Load vector store
|
375 |
+
logger.info("π Memuat vector store dari Supabase...")
|
376 |
+
try:
|
377 |
+
vector_store = load_vector_store_from_supabase(supabase, BUCKET_NAME, VECTOR_STORE_PREFIX)
|
378 |
+
if not vector_store:
|
379 |
+
logger.error("β Gagal memuat vector store!")
|
380 |
+
return None
|
381 |
+
logger.info("β
Vector store berhasil dimuat!")
|
382 |
+
except Exception as e:
|
383 |
+
logger.error(f"β Error loading vector store: {e}")
|
384 |
+
return None
|
385 |
+
|
386 |
+
# Determine evaluation scope
|
387 |
+
if batch_size:
|
388 |
+
end_index = min(start_index + batch_size, len(evaluation_dataset))
|
389 |
+
dataset_subset = evaluation_dataset[start_index:end_index]
|
390 |
+
logger.info(f"π Evaluating batch {start_index}-{end_index-1} ({len(dataset_subset)} questions)")
|
391 |
+
else:
|
392 |
+
dataset_subset = evaluation_dataset
|
393 |
+
logger.info(f"π Evaluating all {len(dataset_subset)} questions")
|
394 |
+
|
395 |
+
# Hasil evaluasi
|
396 |
+
results = []
|
397 |
+
total_questions = len(dataset_subset)
|
398 |
+
start_time = time.time()
|
399 |
+
|
400 |
+
# Progress tracking
|
401 |
+
success_count = 0
|
402 |
+
error_count = 0
|
403 |
+
|
404 |
+
for i, item in enumerate(dataset_subset, 1):
|
405 |
+
question_start_time = time.time()
|
406 |
+
logger.info(f"\nπ Evaluasi pertanyaan {i}/{total_questions}")
|
407 |
+
|
408 |
+
question = item['question']
|
409 |
+
ground_truth = item['ground_truth']
|
410 |
+
|
411 |
+
try:
|
412 |
+
# Dapatkan jawaban dari RAG
|
413 |
+
logger.info(f"π€ Getting RAG answer...")
|
414 |
+
rag_result = rag_bot_answer(question, vector_store)
|
415 |
+
answer = rag_result['answer']
|
416 |
+
documents = rag_result['documents']
|
417 |
+
|
418 |
+
logger.info(f"β Pertanyaan: {question[:100]}...")
|
419 |
+
logger.info(f"π¬ Jawaban: {answer[:100]}...")
|
420 |
+
logger.info(f"π Dokumen ditemukan: {len(documents)}")
|
421 |
+
|
422 |
+
# Add delay before evaluations
|
423 |
+
controlled_delay(1, 3)
|
424 |
+
|
425 |
+
# Evaluasi dengan error handling
|
426 |
+
logger.info("π Running evaluations...")
|
427 |
+
|
428 |
+
correctness_score, correctness_explanation = correctness_evaluator(question, answer, ground_truth)
|
429 |
+
controlled_delay(1, 2)
|
430 |
+
|
431 |
+
relevance_score, relevance_explanation = relevance_evaluator(question, answer)
|
432 |
+
controlled_delay(1, 2)
|
433 |
+
|
434 |
+
groundedness_score, groundedness_explanation = groundedness_evaluator(answer, documents)
|
435 |
+
controlled_delay(1, 2)
|
436 |
+
|
437 |
+
retrieval_relevance_score, retrieval_explanation = retrieval_relevance_evaluator(question, documents)
|
438 |
+
|
439 |
+
result = {
|
440 |
+
'question_index': start_index + i,
|
441 |
+
'question': question,
|
442 |
+
'answer': answer,
|
443 |
+
'ground_truth': ground_truth,
|
444 |
+
'documents_count': len(documents),
|
445 |
+
'correctness': correctness_score,
|
446 |
+
'correctness_explanation': correctness_explanation,
|
447 |
+
'relevance': relevance_score,
|
448 |
+
'relevance_explanation': relevance_explanation,
|
449 |
+
'groundedness': groundedness_score,
|
450 |
+
'groundedness_explanation': groundedness_explanation,
|
451 |
+
'retrieval_relevance': retrieval_relevance_score,
|
452 |
+
'retrieval_explanation': retrieval_explanation,
|
453 |
+
'processing_time': time.time() - question_start_time
|
454 |
+
}
|
455 |
+
|
456 |
+
results.append(result)
|
457 |
+
success_count += 1
|
458 |
+
|
459 |
+
logger.info(f"π Skor - Benar: {correctness_score}, Relevan: {relevance_score}, "
|
460 |
+
f"Berdasarkan Dokumen: {groundedness_score}, Retrieval Relevan: {retrieval_relevance_score}")
|
461 |
+
logger.info(f"β±οΈ Waktu pemrosesan: {result['processing_time']:.2f} detik")
|
462 |
+
|
463 |
+
except Exception as e:
|
464 |
+
error_count += 1
|
465 |
+
logger.error(f"β Error processing question {i}: {e}")
|
466 |
+
|
467 |
+
# Create error result
|
468 |
+
error_result = {
|
469 |
+
'question_index': start_index + i,
|
470 |
+
'question': question,
|
471 |
+
'answer': "ERROR",
|
472 |
+
'ground_truth': ground_truth,
|
473 |
+
'documents_count': 0,
|
474 |
+
'correctness': False,
|
475 |
+
'correctness_explanation': f"Error: {str(e)}",
|
476 |
+
'relevance': False,
|
477 |
+
'relevance_explanation': f"Error: {str(e)}",
|
478 |
+
'groundedness': False,
|
479 |
+
'groundedness_explanation': f"Error: {str(e)}",
|
480 |
+
'retrieval_relevance': False,
|
481 |
+
'retrieval_explanation': f"Error: {str(e)}",
|
482 |
+
'processing_time': time.time() - question_start_time
|
483 |
+
}
|
484 |
+
results.append(error_result)
|
485 |
+
|
486 |
+
# Progress update
|
487 |
+
elapsed_time = time.time() - start_time
|
488 |
+
avg_time_per_question = elapsed_time / i
|
489 |
+
estimated_total_time = avg_time_per_question * total_questions
|
490 |
+
remaining_time = estimated_total_time - elapsed_time
|
491 |
+
|
492 |
+
logger.info(f"π Progress: {i}/{total_questions} ({i/total_questions*100:.1f}%)")
|
493 |
+
logger.info(f"β±οΈ Waktu berlalu: {elapsed_time:.1f}s, Estimasi sisa: {remaining_time:.1f}s")
|
494 |
+
|
495 |
+
# Add delay between questions
|
496 |
+
if i < total_questions:
|
497 |
+
controlled_delay(2, 4)
|
498 |
+
|
499 |
+
# Hitung statistik keseluruhan
|
500 |
+
total_time = time.time() - start_time
|
501 |
+
successful_results = [r for r in results if r['answer'] != "ERROR"]
|
502 |
+
|
503 |
+
if successful_results:
|
504 |
+
total_correctness = sum(r['correctness'] for r in successful_results)
|
505 |
+
total_relevance = sum(r['relevance'] for r in successful_results)
|
506 |
+
total_groundedness = sum(r['groundedness'] for r in successful_results)
|
507 |
+
total_retrieval_relevance = sum(r['retrieval_relevance'] for r in successful_results)
|
508 |
+
successful_count = len(successful_results)
|
509 |
+
else:
|
510 |
+
total_correctness = total_relevance = total_groundedness = total_retrieval_relevance = 0
|
511 |
+
successful_count = 0
|
512 |
+
|
513 |
+
# Print results
|
514 |
+
logger.info(f"\nπ HASIL EVALUASI ENHANCED:")
|
515 |
+
logger.info(f"{'='*60}")
|
516 |
+
logger.info(f"Total Pertanyaan: {total_questions}")
|
517 |
+
logger.info(f"Berhasil Diproses: {success_count}")
|
518 |
+
logger.info(f"Error: {error_count}")
|
519 |
+
logger.info(f"Total Waktu: {total_time:.1f} detik ({total_time/60:.1f} menit)")
|
520 |
+
logger.info(f"Rata-rata per Pertanyaan: {total_time/total_questions:.1f} detik")
|
521 |
+
|
522 |
+
if successful_count > 0:
|
523 |
+
logger.info(f"\nπ― SKOR EVALUASI (dari {successful_count} pertanyaan berhasil):")
|
524 |
+
logger.info(f"Kebenaran (Correctness): {total_correctness}/{successful_count} ({total_correctness/successful_count*100:.1f}%)")
|
525 |
+
logger.info(f"Relevansi (Relevance): {total_relevance}/{successful_count} ({total_relevance/successful_count*100:.1f}%)")
|
526 |
+
logger.info(f"Berdasarkan Dokumen (Groundedness): {total_groundedness}/{successful_count} ({total_groundedness/successful_count*100:.1f}%)")
|
527 |
+
logger.info(f"Retrieval Relevan: {total_retrieval_relevance}/{successful_count} ({total_retrieval_relevance/successful_count*100:.1f}%)")
|
rag_evaluation_20250627_133749.log
ADDED
File without changes
|
requirements.txt
CHANGED
@@ -116,4 +116,5 @@ SpeechRecognition
|
|
116 |
chardet
|
117 |
streamlit_mic_recorder
|
118 |
gtts
|
119 |
-
playwright
|
|
|
|
116 |
chardet
|
117 |
streamlit_mic_recorder
|
118 |
gtts
|
119 |
+
playwright
|
120 |
+
dotenv
|