Update app.py
Browse files
app.py
CHANGED
@@ -5,13 +5,12 @@ import nest_asyncio
|
|
5 |
nest_asyncio.apply() # allow asyncio in Streamlit :contentReference[oaicite:3]{index=3}
|
6 |
|
7 |
# βββ LlamaIndex & Parser Imports ββββββββββββββββββββββββββββββββ
|
8 |
-
from llama_index import StorageContext, load_index_from_storage
|
|
|
9 |
from llama_parse import LlamaParse
|
10 |
-
from llama_index import VectorStoreIndex
|
11 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
12 |
-
from llama_index.core.workflow
|
13 |
-
from llama_index.core.workflow.step_decorator import step
|
14 |
-
from llama_index.core.workflow.events import Event, StartEvent, StopEvent, Context
|
15 |
from llama_index.core.memory import ChatMemoryBuffer
|
16 |
|
17 |
# βββ Constants βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
5 |
nest_asyncio.apply() # allow asyncio in Streamlit :contentReference[oaicite:3]{index=3}
|
6 |
|
7 |
# βββ LlamaIndex & Parser Imports ββββββββββββββββββββββββββββββββ
|
8 |
+
from llama_index.core import StorageContext, load_index_from_storage
|
9 |
+
from llama_index.llms.openai import OpenAI
|
10 |
from llama_parse import LlamaParse
|
11 |
+
from llama_index.core import VectorStoreIndex
|
12 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
13 |
+
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow, step, Context
|
|
|
|
|
14 |
from llama_index.core.memory import ChatMemoryBuffer
|
15 |
|
16 |
# βββ Constants βββββββββββββββββββββββββββββββββββββββββββββββββββ
|