deploying the model with streamlit
Browse files- requirements.txt +2 -1
- task6_model_deployment/assets/logo.png +0 -0
- task6_model_deployment/scripts/__pycache__/query_engine.cpython-311.pyc +0 -0
- task6_model_deployment/scripts/{03_query_engine.py β query_engine.py} +1 -0
- task6_model_deployment/scripts/{01-vector_database_creation.py β vector_database_creation.py} +0 -0
- task6_model_deployment/scripts/{02_vector_database_loading.py β vector_database_loading.py} +0 -0
requirements.txt
CHANGED
@@ -4,4 +4,5 @@ python-dotenv
|
|
4 |
llama-index
|
5 |
llama-index-vector-stores-pinecone
|
6 |
llama-index-embeddings-huggingface
|
7 |
-
llama-index-llms-groq
|
|
|
|
4 |
llama-index
|
5 |
llama-index-vector-stores-pinecone
|
6 |
llama-index-embeddings-huggingface
|
7 |
+
llama-index-llms-groq
|
8 |
+
streamlit
|
task6_model_deployment/assets/logo.png
ADDED
![]() |
task6_model_deployment/scripts/__pycache__/query_engine.cpython-311.pyc
ADDED
Binary file (4.36 kB). View file
|
|
task6_model_deployment/scripts/{03_query_engine.py β query_engine.py}
RENAMED
@@ -8,6 +8,7 @@ from llama_index.core.response.pprint_utils import pprint_source_node
|
|
8 |
from llama_index.core import Settings
|
9 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
10 |
from llama_index.llms.groq import Groq
|
|
|
11 |
# Load environment variables from the .env file
|
12 |
load_dotenv()
|
13 |
|
|
|
8 |
from llama_index.core import Settings
|
9 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
10 |
from llama_index.llms.groq import Groq
|
11 |
+
|
12 |
# Load environment variables from the .env file
|
13 |
load_dotenv()
|
14 |
|
task6_model_deployment/scripts/{01-vector_database_creation.py β vector_database_creation.py}
RENAMED
File without changes
|
task6_model_deployment/scripts/{02_vector_database_loading.py β vector_database_loading.py}
RENAMED
File without changes
|