Commit
·
c3bf538
1
Parent(s):
c6fb015
feat: Build complete application with all features
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .DS_Store +0 -0
- .gitignore +33 -0
- README.md +52 -0
- backend/__pycache__/celery_worker.cpython-311.pyc +0 -0
- backend/__pycache__/main.cpython-311.pyc +0 -0
- backend/__pycache__/schemas.cpython-311.pyc +0 -0
- backend/alembic/__pycache__/env.cpython-311.pyc +0 -0
- backend/alembic/env.py +9 -31
- backend/alembic/versions/{17ec047335c5_create_analysis_jobs_table.py → 7bad611f5ad5_create_initial_tables.py} +6 -14
- backend/alembic/versions/__pycache__/17ec047335c5_create_analysis_jobs_table.cpython-311.pyc +0 -0
- backend/celery_worker.py +5 -4
- backend/core/__pycache__/config.cpython-311.pyc +0 -0
- backend/core/__pycache__/database.cpython-311.pyc +0 -0
- backend/core/config.py +1 -0
- backend/core/database.py +7 -1
- backend/main.py +87 -17
- backend/models/__pycache__/analysis_job.cpython-311.pyc +0 -0
- backend/models/analysis_job.py +19 -2
- backend/requirements.txt +20 -8
- backend/schemas.py +52 -2
- backend/tasks/__pycache__/data_tasks.cpython-311.pyc +0 -0
- backend/tasks/__pycache__/news_tasks.cpython-311.pyc +0 -0
- backend/tasks/advisor_tasks.py +47 -0
- backend/tasks/analyst_tasks.py +117 -0
- backend/tasks/coordinator_task.py +38 -0
- backend/tasks/data_tasks.py +93 -39
- backend/tasks/main_task.py +203 -0
- backend/tasks/news_tasks.py +345 -40
- backend/tasks/prediction_tasks.py +52 -0
- backend/tmp.py +25 -0
- backend/tools/__pycache__/data_tools.cpython-311.pyc +0 -0
- backend/tools/__pycache__/news_tools.cpython-311.pyc +0 -0
- backend/tools/advisor_tools.py +70 -0
- backend/tools/analyst_tools.py +151 -0
- backend/tools/data_tools.py +10 -10
- backend/tools/download_model.py +25 -0
- backend/tools/news_tools.py +262 -71
- backend/tools/prediction_tools.py +61 -0
- docker-compose.yml +12 -4
- frontend/package-lock.json +0 -0
- frontend/package.json +10 -1
- frontend/postcss.config.js +6 -0
- frontend/src/App.jsx +107 -29
- frontend/src/components/Header.jsx +17 -0
- frontend/src/components/HistoricalChart.jsx +67 -0
- frontend/src/components/HistoryPanel.jsx +86 -0
- frontend/src/components/JobForm.jsx +44 -0
- frontend/src/components/JobStatusCard.jsx +66 -0
- frontend/src/components/LoadingSkeleton.jsx +46 -0
- frontend/src/components/PredictionChart.jsx +36 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
.gitignore
CHANGED
@@ -1 +1,34 @@
|
|
|
|
1 |
.env
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
.env
|
3 |
+
|
4 |
+
venv/
|
5 |
+
ai_env/
|
6 |
+
.venv/
|
7 |
+
|
8 |
+
__pycache__/
|
9 |
+
*.pyc
|
10 |
+
*.pyo
|
11 |
+
*.pyd
|
12 |
+
|
13 |
+
build/
|
14 |
+
dist/
|
15 |
+
*.egg-info/
|
16 |
+
|
17 |
+
frontend/node_modules/
|
18 |
+
|
19 |
+
frontend/dist/
|
20 |
+
|
21 |
+
frontend/npm-debug.log*
|
22 |
+
frontend/yarn-debug.log*
|
23 |
+
frontend/yarn-error.log*
|
24 |
+
|
25 |
+
|
26 |
+
.vscode/
|
27 |
+
|
28 |
+
.DS_Store
|
29 |
+
|
30 |
+
|
31 |
+
data/
|
32 |
+
mlruns/
|
33 |
+
|
34 |
+
tmp_download.py
|
README.md
CHANGED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Quantitative Analysis Platform
|
2 |
+
> A multi-agent AI system designed to provide retail investors with hedge-fund-level research and insights
|
3 |
+
|
4 |
+
|
5 |
+
## The Problem
|
6 |
+
Retail investors are at a massive disadvantage. They lack the sophisticated tools, real-time data aggregation, and unbiased analysis that hedge funds use daily. Decisions are often driven by emotion and noise from social media, leading to poor outcomes. This platform was built to bridge that gap.
|
7 |
+
|
8 |
+
## Features
|
9 |
+
- **Multi-Agent Pipeline:** A robust, asynchronous backend where specialized AI agents collaborate to build a complete analysis.
|
10 |
+
- **Data Agent:** Fetches real-time, comprehensive financial data for any stock.
|
11 |
+
- **Intelligence Agent:** Scrapes Google News, Yahoo Finance, and Reddit to gather and analyze market sentiment.
|
12 |
+
- **LLM Analyst Agent:** Utilizes **Google's Gemini 1.5 Flash** to analyze all collected data, identify trends, and generate a human-like investment thesis with a forecast and actionable strategy.
|
13 |
+
- **Interactive Dashboard:** A clean, modern React frontend to visualize the data, including news feeds and historical price charts.
|
14 |
+
- **Job History:** Users can view and revisit all their past analyses.
|
15 |
+
|
16 |
+
|
17 |
+
## Tech Stack & Architecture
|
18 |
+
|
19 |
+
### Frontend
|
20 |
+
- **React (Vite):** For a fast and modern user interface.
|
21 |
+
- **Tailwind CSS:** For professional and responsive styling.
|
22 |
+
- **Recharts:** For beautiful and interactive data visualizations.
|
23 |
+
- **Axios:** For seamless communication with the backend API.
|
24 |
+
|
25 |
+
### Backend
|
26 |
+
- **FastAPI:** A high-performance Python framework for building the API.
|
27 |
+
- **Celery & Redis:** To manage the asynchronous, multi-step agent pipeline, ensuring the UI is always fast and responsive.
|
28 |
+
- **PostgreSQL (Neon):** A scalable, serverless cloud database for storing job data.
|
29 |
+
- **SQLAlchemy & Alembic:** For robust database interaction and schema migrations.
|
30 |
+
- **LangChain & Google Gemini 1.5 Flash:** The core AI engine for the Analyst Agent.
|
31 |
+
|
32 |
+
|
33 |
+
### Architecture
|
34 |
+
```mermaid
|
35 |
+
graph TD
|
36 |
+
A[User on React Frontend] -->|1. POST /jobs (ticker)| B(FastAPI Backend);
|
37 |
+
B -->|2. Dispatch Task| C[Redis Queue];
|
38 |
+
C -->|3. Pick up Job| D(Celery Worker);
|
39 |
+
D -->|4. Run Pipeline| E[Agent 1: Data];
|
40 |
+
E -->|5. Update DB| F[(Neon DB)];
|
41 |
+
D -->|6. Run Pipeline| G[Agent 2: Intelligence];
|
42 |
+
G -->|7. Update DB| F;
|
43 |
+
D -->|8. Run Pipeline| H[Agent 3: LLM Analyst];
|
44 |
+
H -->|9. Call Gemini API| I{Gemini 1.5 Flash};
|
45 |
+
I -->|10. Return Thesis| H;
|
46 |
+
H -->|11. Final Update| F;
|
47 |
+
A -->|12. GET /jobs/{id} (Polling)| B;
|
48 |
+
B -->|13. Read Status/Result| F;
|
49 |
+
end
|
50 |
+
|
51 |
+
## Local Setup
|
52 |
+
|
backend/__pycache__/celery_worker.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/celery_worker.cpython-311.pyc and b/backend/__pycache__/celery_worker.cpython-311.pyc differ
|
|
backend/__pycache__/main.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/main.cpython-311.pyc and b/backend/__pycache__/main.cpython-311.pyc differ
|
|
backend/__pycache__/schemas.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/schemas.cpython-311.pyc and b/backend/__pycache__/schemas.cpython-311.pyc differ
|
|
backend/alembic/__pycache__/env.cpython-311.pyc
DELETED
Binary file (3.24 kB)
|
|
backend/alembic/env.py
CHANGED
@@ -1,68 +1,46 @@
|
|
1 |
-
# backend/alembic/env.py
|
2 |
-
|
3 |
from logging.config import fileConfig
|
4 |
-
|
5 |
from sqlalchemy import engine_from_config
|
6 |
from sqlalchemy.pool import NullPool
|
7 |
-
|
8 |
from alembic import context
|
9 |
-
|
10 |
-
# This is the crucial part: we import our app's config and models
|
11 |
import sys
|
12 |
import os
|
|
|
|
|
13 |
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..')))
|
14 |
|
15 |
from core.config import settings
|
16 |
from core.database import Base
|
17 |
-
|
|
|
18 |
|
19 |
-
# this is the Alembic Config object, which provides
|
20 |
-
# access to the values within the .ini file in use.
|
21 |
config = context.config
|
22 |
|
23 |
-
#
|
24 |
config.set_main_option("sqlalchemy.url", settings.DATABASE_URL)
|
25 |
|
26 |
-
# Interpret the config file for Python logging.
|
27 |
-
# This line reads the logging configuration from alembic.ini
|
28 |
if config.config_file_name is not None:
|
29 |
fileConfig(config.config_file_name)
|
30 |
|
31 |
-
#
|
32 |
-
# for 'autogenerate' support
|
33 |
target_metadata = Base.metadata
|
34 |
|
35 |
def run_migrations_offline() -> None:
|
36 |
-
"""Run migrations in 'offline' mode."""
|
37 |
url = config.get_main_option("sqlalchemy.url")
|
38 |
-
context.configure(
|
39 |
-
url=url,
|
40 |
-
target_metadata=target_metadata,
|
41 |
-
literal_binds=True,
|
42 |
-
dialect_opts={"paramstyle": "named"},
|
43 |
-
)
|
44 |
-
|
45 |
with context.begin_transaction():
|
46 |
context.run_migrations()
|
47 |
|
48 |
-
|
49 |
def run_migrations_online() -> None:
|
50 |
-
"""Run migrations in 'online' mode."""
|
51 |
connectable = engine_from_config(
|
52 |
-
config.get_section(config.
|
53 |
prefix="sqlalchemy.",
|
54 |
poolclass=NullPool,
|
55 |
)
|
56 |
-
|
57 |
with connectable.connect() as connection:
|
58 |
-
context.configure(
|
59 |
-
connection=connection, target_metadata=target_metadata
|
60 |
-
)
|
61 |
-
|
62 |
with context.begin_transaction():
|
63 |
context.run_migrations()
|
64 |
|
65 |
-
|
66 |
if context.is_offline_mode():
|
67 |
run_migrations_offline()
|
68 |
else:
|
|
|
|
|
|
|
1 |
from logging.config import fileConfig
|
|
|
2 |
from sqlalchemy import engine_from_config
|
3 |
from sqlalchemy.pool import NullPool
|
|
|
4 |
from alembic import context
|
|
|
|
|
5 |
import sys
|
6 |
import os
|
7 |
+
|
8 |
+
# Make sure the app's modules can be found
|
9 |
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..')))
|
10 |
|
11 |
from core.config import settings
|
12 |
from core.database import Base
|
13 |
+
# Import all your models here so Alembic can see them
|
14 |
+
from models.analysis_job import AnalysisJob
|
15 |
|
|
|
|
|
16 |
config = context.config
|
17 |
|
18 |
+
# Set the database URL from our application's settings
|
19 |
config.set_main_option("sqlalchemy.url", settings.DATABASE_URL)
|
20 |
|
|
|
|
|
21 |
if config.config_file_name is not None:
|
22 |
fileConfig(config.config_file_name)
|
23 |
|
24 |
+
# Set the target metadata for autogenerate
|
|
|
25 |
target_metadata = Base.metadata
|
26 |
|
27 |
def run_migrations_offline() -> None:
|
|
|
28 |
url = config.get_main_option("sqlalchemy.url")
|
29 |
+
context.configure(url=url, target_metadata=target_metadata, literal_binds=True, dialect_opts={"paramstyle": "named"})
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
with context.begin_transaction():
|
31 |
context.run_migrations()
|
32 |
|
|
|
33 |
def run_migrations_online() -> None:
|
|
|
34 |
connectable = engine_from_config(
|
35 |
+
config.get_section(config.config_ini_section, {}),
|
36 |
prefix="sqlalchemy.",
|
37 |
poolclass=NullPool,
|
38 |
)
|
|
|
39 |
with connectable.connect() as connection:
|
40 |
+
context.configure(connection=connection, target_metadata=target_metadata)
|
|
|
|
|
|
|
41 |
with context.begin_transaction():
|
42 |
context.run_migrations()
|
43 |
|
|
|
44 |
if context.is_offline_mode():
|
45 |
run_migrations_offline()
|
46 |
else:
|
backend/alembic/versions/{17ec047335c5_create_analysis_jobs_table.py → 7bad611f5ad5_create_initial_tables.py}
RENAMED
@@ -1,8 +1,8 @@
|
|
1 |
-
"""Create
|
2 |
|
3 |
-
Revision ID:
|
4 |
Revises:
|
5 |
-
Create Date: 2025-09-
|
6 |
|
7 |
"""
|
8 |
from typing import Sequence, Union
|
@@ -12,7 +12,7 @@ import sqlalchemy as sa
|
|
12 |
|
13 |
|
14 |
# revision identifiers, used by Alembic.
|
15 |
-
revision: str = '
|
16 |
down_revision: Union[str, Sequence[str], None] = None
|
17 |
branch_labels: Union[str, Sequence[str], None] = None
|
18 |
depends_on: Union[str, Sequence[str], None] = None
|
@@ -21,20 +21,12 @@ depends_on: Union[str, Sequence[str], None] = None
|
|
21 |
def upgrade() -> None:
|
22 |
"""Upgrade schema."""
|
23 |
# ### commands auto generated by Alembic - please adjust! ###
|
24 |
-
|
25 |
-
sa.Column('id', sa.UUID(), nullable=False),
|
26 |
-
sa.Column('ticker', sa.String(), nullable=False),
|
27 |
-
sa.Column('status', sa.String(), nullable=False),
|
28 |
-
sa.Column('result', sa.JSON(), nullable=True),
|
29 |
-
sa.PrimaryKeyConstraint('id')
|
30 |
-
)
|
31 |
-
op.create_index(op.f('ix_analysis_jobs_ticker'), 'analysis_jobs', ['ticker'], unique=False)
|
32 |
# ### end Alembic commands ###
|
33 |
|
34 |
|
35 |
def downgrade() -> None:
|
36 |
"""Downgrade schema."""
|
37 |
# ### commands auto generated by Alembic - please adjust! ###
|
38 |
-
|
39 |
-
op.drop_table('analysis_jobs')
|
40 |
# ### end Alembic commands ###
|
|
|
1 |
+
"""Create initial tables
|
2 |
|
3 |
+
Revision ID: 7bad611f5ad5
|
4 |
Revises:
|
5 |
+
Create Date: 2025-09-03 16:52:00.828276
|
6 |
|
7 |
"""
|
8 |
from typing import Sequence, Union
|
|
|
12 |
|
13 |
|
14 |
# revision identifiers, used by Alembic.
|
15 |
+
revision: str = '7bad611f5ad5'
|
16 |
down_revision: Union[str, Sequence[str], None] = None
|
17 |
branch_labels: Union[str, Sequence[str], None] = None
|
18 |
depends_on: Union[str, Sequence[str], None] = None
|
|
|
21 |
def upgrade() -> None:
|
22 |
"""Upgrade schema."""
|
23 |
# ### commands auto generated by Alembic - please adjust! ###
|
24 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
# ### end Alembic commands ###
|
26 |
|
27 |
|
28 |
def downgrade() -> None:
|
29 |
"""Downgrade schema."""
|
30 |
# ### commands auto generated by Alembic - please adjust! ###
|
31 |
+
pass
|
|
|
32 |
# ### end Alembic commands ###
|
backend/alembic/versions/__pycache__/17ec047335c5_create_analysis_jobs_table.cpython-311.pyc
DELETED
Binary file (2.22 kB)
|
|
backend/celery_worker.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
from celery import Celery
|
2 |
from core.config import settings
|
3 |
|
4 |
-
# giving app a more descriptive name
|
5 |
celery = Celery(
|
6 |
"quantitative_analysis_platform",
|
7 |
broker=settings.CELERY_BROKER_URL,
|
8 |
backend=settings.CELERY_RESULT_BACKEND,
|
9 |
-
|
|
|
|
|
|
|
10 |
)
|
11 |
|
12 |
celery.conf.update(
|
@@ -15,5 +17,4 @@ celery.conf.update(
|
|
15 |
result_serializer="json",
|
16 |
timezone="UTC",
|
17 |
enable_utc=True,
|
18 |
-
)
|
19 |
-
|
|
|
1 |
from celery import Celery
|
2 |
from core.config import settings
|
3 |
|
|
|
4 |
celery = Celery(
|
5 |
"quantitative_analysis_platform",
|
6 |
broker=settings.CELERY_BROKER_URL,
|
7 |
backend=settings.CELERY_RESULT_BACKEND,
|
8 |
+
# This is the corrected list. We only have one task file now.
|
9 |
+
include=[
|
10 |
+
"tasks.main_task"
|
11 |
+
]
|
12 |
)
|
13 |
|
14 |
celery.conf.update(
|
|
|
17 |
result_serializer="json",
|
18 |
timezone="UTC",
|
19 |
enable_utc=True,
|
20 |
+
)
|
|
backend/core/__pycache__/config.cpython-311.pyc
CHANGED
Binary files a/backend/core/__pycache__/config.cpython-311.pyc and b/backend/core/__pycache__/config.cpython-311.pyc differ
|
|
backend/core/__pycache__/database.cpython-311.pyc
CHANGED
Binary files a/backend/core/__pycache__/database.cpython-311.pyc and b/backend/core/__pycache__/database.cpython-311.pyc differ
|
|
backend/core/config.py
CHANGED
@@ -4,6 +4,7 @@ class Settings(BaseSettings):
|
|
4 |
DATABASE_URL: str
|
5 |
CELERY_BROKER_URL: str
|
6 |
CELERY_RESULT_BACKEND: str
|
|
|
7 |
|
8 |
model_config = SettingsConfigDict(env_file=".env")
|
9 |
|
|
|
4 |
DATABASE_URL: str
|
5 |
CELERY_BROKER_URL: str
|
6 |
CELERY_RESULT_BACKEND: str
|
7 |
+
GOOGLE_API_KEY: str
|
8 |
|
9 |
model_config = SettingsConfigDict(env_file=".env")
|
10 |
|
backend/core/database.py
CHANGED
@@ -2,6 +2,12 @@ from sqlalchemy import create_engine
|
|
2 |
from sqlalchemy.orm import sessionmaker, declarative_base
|
3 |
from .config import settings
|
4 |
|
5 |
-
engine = create_engine(
|
|
|
|
|
|
|
|
|
|
|
6 |
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
|
|
7 |
Base = declarative_base()
|
|
|
2 |
from sqlalchemy.orm import sessionmaker, declarative_base
|
3 |
from .config import settings
|
4 |
|
5 |
+
engine = create_engine(
|
6 |
+
settings.DATABASE_URL,
|
7 |
+
pool_recycle=1800,
|
8 |
+
pool_pre_ping=True,
|
9 |
+
)
|
10 |
+
|
11 |
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
12 |
+
|
13 |
Base = declarative_base()
|
backend/main.py
CHANGED
@@ -1,21 +1,91 @@
|
|
1 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from fastapi import FastAPI, Depends, HTTPException
|
3 |
from fastapi.middleware.cors import CORSMiddleware
|
4 |
from sqlalchemy.orm import Session
|
|
|
5 |
from uuid import UUID
|
6 |
-
import
|
|
|
7 |
import schemas
|
8 |
from core.database import SessionLocal, engine
|
9 |
-
from tasks.
|
10 |
-
from tasks.news_tasks import run_intelligence_analysis
|
11 |
-
from celery import chain
|
12 |
|
13 |
model.Base.metadata.create_all(bind=engine)
|
14 |
-
|
|
|
|
|
|
|
|
|
15 |
|
16 |
app.add_middleware(
|
17 |
CORSMiddleware,
|
18 |
-
allow_origins=["*"],
|
|
|
|
|
|
|
19 |
)
|
20 |
|
21 |
def get_db():
|
@@ -32,19 +102,19 @@ def create_analysis_job(job_request: schemas.JobCreate, db: Session = Depends(ge
|
|
32 |
db.commit()
|
33 |
db.refresh(db_job)
|
34 |
|
35 |
-
|
36 |
-
analysis_chain = chain(
|
37 |
-
run_data_analysis.s(str(db_job.id), db_job.ticker),
|
38 |
-
# By making the signature immutable, we tell Celery to ignore
|
39 |
-
# the result of the previous task and only use the arguments we provide.
|
40 |
-
run_intelligence_analysis.s(str(db_job.id)).set(immutable=True)
|
41 |
-
)
|
42 |
-
analysis_chain.apply_async()
|
43 |
|
44 |
return db_job
|
45 |
|
46 |
@app.get("/jobs/{job_id}", response_model=schemas.Job)
|
47 |
def get_job_status(job_id: UUID, db: Session = Depends(get_db)):
|
48 |
db_job = db.query(model.AnalysisJob).filter(model.AnalysisJob.id == job_id).first()
|
49 |
-
if db_job is None:
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from fastapi import FastAPI, Depends, HTTPException
|
2 |
+
# from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
# from sqlalchemy.orm import Session
|
4 |
+
# from uuid import UUID
|
5 |
+
# import models.analysis_job as model
|
6 |
+
# import schemas
|
7 |
+
# from core.database import SessionLocal, engine
|
8 |
+
# # Import only our single, main task
|
9 |
+
# from tasks.main_task import run_full_analysis
|
10 |
+
|
11 |
+
# # This ensures tables are created on startup (good for development)
|
12 |
+
# model.Base.metadata.create_all(bind=engine)
|
13 |
+
|
14 |
+
# app = FastAPI(
|
15 |
+
# title="Quantitative Analysis Platform API",
|
16 |
+
# version="0.1.0",
|
17 |
+
# )
|
18 |
+
|
19 |
+
# app.add_middleware(
|
20 |
+
# CORSMiddleware,
|
21 |
+
# allow_origins=["*"],
|
22 |
+
# allow_credentials=True,
|
23 |
+
# allow_methods=["*"],
|
24 |
+
# allow_headers=["*"],
|
25 |
+
# )
|
26 |
+
|
27 |
+
# # Dependency to get a database session for each API request
|
28 |
+
# def get_db():
|
29 |
+
# db = SessionLocal()
|
30 |
+
# try:
|
31 |
+
# yield db
|
32 |
+
# finally:
|
33 |
+
# db.close()
|
34 |
+
|
35 |
+
# @app.post("/jobs", response_model=schemas.Job, status_code=201)
|
36 |
+
# def create_analysis_job(job_request: schemas.JobCreate, db: Session = Depends(get_db)):
|
37 |
+
# """
|
38 |
+
# Creates a new analysis job and dispatches the main background task.
|
39 |
+
# """
|
40 |
+
# db_job = model.AnalysisJob(ticker=job_request.ticker.upper())
|
41 |
+
# db.add(db_job)
|
42 |
+
# db.commit()
|
43 |
+
# db.refresh(db_job)
|
44 |
+
|
45 |
+
# # Dispatch our single, all-in-one analysis task to run in the background
|
46 |
+
# run_full_analysis.delay(str(db_job.id), db_job.ticker)
|
47 |
+
|
48 |
+
# # Immediately return the created job to the frontend
|
49 |
+
# return db_job
|
50 |
+
|
51 |
+
# @app.get("/jobs/{job_id}", response_model=schemas.Job)
|
52 |
+
# def get_job_status(job_id: UUID, db: Session = Depends(get_db)):
|
53 |
+
# """
|
54 |
+
# Allows the frontend to poll for the status and result of an analysis job.
|
55 |
+
# """
|
56 |
+
# db_job = db.query(model.AnalysisJob).filter(model.AnalysisJob.id == job_id).first()
|
57 |
+
# if db_job is None:
|
58 |
+
# raise HTTPException(status_code=404, detail="Job not found")
|
59 |
+
# return db_job
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
from fastapi import FastAPI, Depends, HTTPException
|
66 |
from fastapi.middleware.cors import CORSMiddleware
|
67 |
from sqlalchemy.orm import Session
|
68 |
+
from sqlalchemy import desc # Import desc for ordering
|
69 |
from uuid import UUID
|
70 |
+
from typing import List # Import List for the history endpoint
|
71 |
+
import models.analysis_job as model
|
72 |
import schemas
|
73 |
from core.database import SessionLocal, engine
|
74 |
+
from tasks.main_task import run_full_analysis
|
|
|
|
|
75 |
|
76 |
model.Base.metadata.create_all(bind=engine)
|
77 |
+
|
78 |
+
app = FastAPI(
|
79 |
+
title="Quantitative Analysis Platform API",
|
80 |
+
version="0.1.0",
|
81 |
+
)
|
82 |
|
83 |
app.add_middleware(
|
84 |
CORSMiddleware,
|
85 |
+
allow_origins=["*"],
|
86 |
+
allow_credentials=True,
|
87 |
+
allow_methods=["*"],
|
88 |
+
allow_headers=["*"],
|
89 |
)
|
90 |
|
91 |
def get_db():
|
|
|
102 |
db.commit()
|
103 |
db.refresh(db_job)
|
104 |
|
105 |
+
run_full_analysis.delay(str(db_job.id), db_job.ticker)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
return db_job
|
108 |
|
109 |
@app.get("/jobs/{job_id}", response_model=schemas.Job)
|
110 |
def get_job_status(job_id: UUID, db: Session = Depends(get_db)):
|
111 |
db_job = db.query(model.AnalysisJob).filter(model.AnalysisJob.id == job_id).first()
|
112 |
+
if db_job is None:
|
113 |
+
raise HTTPException(status_code=404, detail="Job not found")
|
114 |
+
return db_job
|
115 |
+
|
116 |
+
# --- NEW ENDPOINT FOR HISTORY PANEL ---
|
117 |
+
@app.get("/jobs", response_model=List[schemas.Job])
|
118 |
+
def get_jobs_history(db: Session = Depends(get_db)):
|
119 |
+
db_jobs = db.query(model.AnalysisJob).order_by(desc(model.AnalysisJob.created_at)).limit(20).all()
|
120 |
+
return db_jobs
|
backend/models/__pycache__/analysis_job.cpython-311.pyc
CHANGED
Binary files a/backend/models/__pycache__/analysis_job.cpython-311.pyc and b/backend/models/__pycache__/analysis_job.cpython-311.pyc differ
|
|
backend/models/analysis_job.py
CHANGED
@@ -1,7 +1,23 @@
|
|
1 |
-
from sqlalchemy import Column, String, JSON
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from sqlalchemy.dialects.postgresql import UUID
|
3 |
import uuid
|
4 |
from core.database import Base
|
|
|
5 |
|
6 |
class AnalysisJob(Base):
|
7 |
__tablename__ = "analysis_jobs"
|
@@ -9,4 +25,5 @@ class AnalysisJob(Base):
|
|
9 |
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
10 |
ticker = Column(String, nullable=False, index=True)
|
11 |
status = Column(String, default="PENDING", nullable=False)
|
12 |
-
result = Column(JSON, nullable=True)
|
|
|
|
1 |
+
# from sqlalchemy import Column, String, JSON
|
2 |
+
# from sqlalchemy.dialects.postgresql import UUID
|
3 |
+
# import uuid
|
4 |
+
# from core.database import Base
|
5 |
+
|
6 |
+
# class AnalysisJob(Base):
|
7 |
+
# __tablename__ = "analysis_jobs"
|
8 |
+
|
9 |
+
# id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
10 |
+
# ticker = Column(String, nullable=False, index=True)
|
11 |
+
# status = Column(String, default="PENDING", nullable=False)
|
12 |
+
# result = Column(JSON, nullable=True)
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
from sqlalchemy import Column, String, JSON, DateTime
|
17 |
from sqlalchemy.dialects.postgresql import UUID
|
18 |
import uuid
|
19 |
from core.database import Base
|
20 |
+
from datetime import datetime
|
21 |
|
22 |
class AnalysisJob(Base):
|
23 |
__tablename__ = "analysis_jobs"
|
|
|
25 |
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
26 |
ticker = Column(String, nullable=False, index=True)
|
27 |
status = Column(String, default="PENDING", nullable=False)
|
28 |
+
result = Column(JSON, nullable=True)
|
29 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
backend/requirements.txt
CHANGED
@@ -1,25 +1,37 @@
|
|
|
|
1 |
fastapi
|
2 |
uvicorn[standard]
|
|
|
3 |
pydantic-settings
|
4 |
|
5 |
-
|
6 |
sqlalchemy
|
7 |
psycopg2-binary
|
8 |
alembic
|
9 |
|
10 |
-
#
|
11 |
celery
|
12 |
-
redis
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
#
|
15 |
yfinance
|
16 |
|
17 |
-
#
|
18 |
newspaper3k
|
19 |
lxml_html_clean
|
|
|
|
|
|
|
|
|
|
|
20 |
torch
|
21 |
transformers
|
22 |
sentence-transformers
|
23 |
-
|
24 |
-
|
25 |
-
snscrape@git+https://github.com/JustAnotherArchivist/snscrape.git@master
|
|
|
1 |
+
# FastAPI and Server
|
2 |
fastapi
|
3 |
uvicorn[standard]
|
4 |
+
gunicorn
|
5 |
pydantic-settings
|
6 |
|
7 |
+
# Database
|
8 |
sqlalchemy
|
9 |
psycopg2-binary
|
10 |
alembic
|
11 |
|
12 |
+
# Task Queue
|
13 |
celery
|
14 |
+
redis
|
15 |
+
|
16 |
+
# --- CORE DATA SCIENCE LIBS (PINNING NUMPY) ---
|
17 |
+
numpy<2.0 # CRITICAL FIX: Pin numpy to a version compatible with pandas-ta
|
18 |
+
pandas
|
19 |
+
pandas-ta
|
20 |
+
matplotlib
|
21 |
|
22 |
+
# Data Agent & Prediction
|
23 |
yfinance
|
24 |
|
25 |
+
# Intelligence Agent
|
26 |
newspaper3k
|
27 |
lxml_html_clean
|
28 |
+
snscrape@git+https://github.com/JustAnotherArchivist/snscrape.git@master
|
29 |
+
requests
|
30 |
+
beautifulsoup4
|
31 |
+
|
32 |
+
# AI / ML / LLM
|
33 |
torch
|
34 |
transformers
|
35 |
sentence-transformers
|
36 |
+
langchain
|
37 |
+
langchain-google-genai
|
|
backend/schemas.py
CHANGED
@@ -1,7 +1,57 @@
|
|
1 |
from pydantic import BaseModel, ConfigDict
|
2 |
from uuid import UUID
|
3 |
-
from typing import Optional, Dict, Any
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
class JobCreate(BaseModel):
|
6 |
ticker: str
|
7 |
|
@@ -9,6 +59,6 @@ class Job(BaseModel):
|
|
9 |
id: UUID
|
10 |
ticker: str
|
11 |
status: str
|
12 |
-
result: Optional[
|
13 |
|
14 |
model_config = ConfigDict(from_attributes=True)
|
|
|
1 |
from pydantic import BaseModel, ConfigDict
|
2 |
from uuid import UUID
|
3 |
+
from typing import Optional, Dict, Any, List
|
4 |
|
5 |
+
# --- Schemas for Intelligence Briefing ---
|
6 |
+
class NewsArticle(BaseModel):
|
7 |
+
title: str
|
8 |
+
url: str
|
9 |
+
source: str
|
10 |
+
sentiment: str
|
11 |
+
sentiment_score: float
|
12 |
+
|
13 |
+
class SentimentSummary(BaseModel):
|
14 |
+
total_items: int
|
15 |
+
positive: int
|
16 |
+
negative: int
|
17 |
+
neutral: int
|
18 |
+
error: Optional[str] = None
|
19 |
+
|
20 |
+
class IntelligenceBriefing(BaseModel):
|
21 |
+
articles: List[NewsArticle]
|
22 |
+
sentiment_summary: SentimentSummary
|
23 |
+
|
24 |
+
# --- Schema for our LLM Analysis ---
|
25 |
+
class LLMReport(BaseModel):
|
26 |
+
llm_report: str
|
27 |
+
error: Optional[str] = None
|
28 |
+
|
29 |
+
# --- Main Schema for the 'result' field ---
|
30 |
+
class JobResult(BaseModel):
|
31 |
+
# Data Agent fields
|
32 |
+
ticker: Optional[str] = None
|
33 |
+
company_name: Optional[str] = None
|
34 |
+
current_price: Optional[float] = None
|
35 |
+
previous_close: Optional[float] = None
|
36 |
+
market_cap: Optional[int] = None
|
37 |
+
pe_ratio: Optional[float] = None
|
38 |
+
pb_ratio: Optional[float] = None
|
39 |
+
dividend_yield: Optional[float] = None
|
40 |
+
sector: Optional[str] = None
|
41 |
+
industry: Optional[str] = None
|
42 |
+
summary: Optional[str] = None
|
43 |
+
website: Optional[str] = None
|
44 |
+
|
45 |
+
# Intelligence Agent field
|
46 |
+
intelligence_briefing: Optional[IntelligenceBriefing] = None
|
47 |
+
|
48 |
+
# LLM Analyst field - THIS IS THE FIX
|
49 |
+
llm_analysis: Optional[LLMReport] = None
|
50 |
+
|
51 |
+
# General error field
|
52 |
+
error: Optional[str] = None
|
53 |
+
|
54 |
+
# --- Main Job Schemas for API endpoints ---
|
55 |
class JobCreate(BaseModel):
|
56 |
ticker: str
|
57 |
|
|
|
59 |
id: UUID
|
60 |
ticker: str
|
61 |
status: str
|
62 |
+
result: Optional[JobResult] = None
|
63 |
|
64 |
model_config = ConfigDict(from_attributes=True)
|
backend/tasks/__pycache__/data_tasks.cpython-311.pyc
DELETED
Binary file (2.4 kB)
|
|
backend/tasks/__pycache__/news_tasks.cpython-311.pyc
DELETED
Binary file (2.82 kB)
|
|
backend/tasks/advisor_tasks.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from celery_worker import celery
|
2 |
+
from core.database import SessionLocal
|
3 |
+
from models.analysis_job import AnalysisJob
|
4 |
+
from tools.advisor_tools import generate_investment_thesis
|
5 |
+
from uuid import UUID
|
6 |
+
|
7 |
+
@celery.task
|
8 |
+
def run_advisor_analysis(job_id: str):
|
9 |
+
db = SessionLocal()
|
10 |
+
job = None
|
11 |
+
final_result = ""
|
12 |
+
try:
|
13 |
+
job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
14 |
+
if not job or not job.result:
|
15 |
+
raise ValueError(f"Job {job_id} not found or has no result data for the advisor.")
|
16 |
+
|
17 |
+
print(f"Status - SUMMARIZING for job {job_id}...")
|
18 |
+
job.status = "SUMMARIZING"
|
19 |
+
db.commit()
|
20 |
+
|
21 |
+
current_data = job.result
|
22 |
+
|
23 |
+
advisor_summary = generate_investment_thesis(current_data)
|
24 |
+
|
25 |
+
new_result = current_data.copy()
|
26 |
+
new_result['advisor_summary'] = advisor_summary
|
27 |
+
job.result = new_result
|
28 |
+
|
29 |
+
job.status = "SUCCESS" # This is the final successful step
|
30 |
+
db.commit()
|
31 |
+
|
32 |
+
print(f"Advisor analysis for job {job_id} completed successfully.")
|
33 |
+
final_result = str(job.result)
|
34 |
+
|
35 |
+
except Exception as e:
|
36 |
+
print(f"Error during advisor analysis for job {job_id}: {e}")
|
37 |
+
if job:
|
38 |
+
job.status = "FAILED"
|
39 |
+
error_data = job.result if job.result else {}
|
40 |
+
error_data['error'] = f"Advisor analysis failed: {str(e)}"
|
41 |
+
job.result = error_data
|
42 |
+
db.commit()
|
43 |
+
final_result = f"Error: {e}"
|
44 |
+
finally:
|
45 |
+
db.close()
|
46 |
+
|
47 |
+
return final_result
|
backend/tasks/analyst_tasks.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from celery_worker import celery
|
2 |
+
# from core.database import SessionLocal
|
3 |
+
# from models.analysis_job import AnalysisJob
|
4 |
+
# from tools.analyst_tools import get_llm_analysis
|
5 |
+
# from uuid import UUID
|
6 |
+
|
7 |
+
# @celery.task
|
8 |
+
# def run_llm_analysis(job_id: str):
|
9 |
+
# db = SessionLocal()
|
10 |
+
# job = None
|
11 |
+
# try:
|
12 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
13 |
+
# if not job or not job.result:
|
14 |
+
# raise ValueError("Job not found or has no initial data.")
|
15 |
+
|
16 |
+
# job.status = "ANALYZING" # New status for the frontend
|
17 |
+
# db.commit()
|
18 |
+
|
19 |
+
# current_data = job.result
|
20 |
+
# ticker = current_data.get("ticker")
|
21 |
+
# company_name = current_data.get("company_name")
|
22 |
+
# intelligence_briefing = current_data.get("intelligence_briefing", {})
|
23 |
+
|
24 |
+
# llm_report_data = get_llm_analysis(ticker, company_name, intelligence_briefing)
|
25 |
+
|
26 |
+
# new_result = current_data.copy()
|
27 |
+
# new_result['llm_analysis'] = llm_report_data
|
28 |
+
# job.result = new_result
|
29 |
+
|
30 |
+
# job.status = "SUCCESS"
|
31 |
+
# db.commit()
|
32 |
+
|
33 |
+
# print(f"LLM analysis for job {job_id} completed successfully.")
|
34 |
+
|
35 |
+
# except Exception as e:
|
36 |
+
# print(f"Error during LLM analysis for job {job_id}: {e}")
|
37 |
+
# if job:
|
38 |
+
# job.status = "FAILED"
|
39 |
+
# error_data = job.result if job.result else {}
|
40 |
+
# error_data['error'] = f"LLM analysis failed: {str(e)}"
|
41 |
+
# job.result = error_data
|
42 |
+
# db.commit()
|
43 |
+
# finally:
|
44 |
+
# db.close()
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
# from celery_worker import celery
|
56 |
+
# from core.database import SessionLocal
|
57 |
+
# from models.analysis_job import AnalysisJob
|
58 |
+
# from tools.analyst_tools import get_llm_analysis
|
59 |
+
# from uuid import UUID
|
60 |
+
|
61 |
+
# @celery.task
|
62 |
+
# def run_llm_analysis(job_id: str):
|
63 |
+
# with SessionLocal() as db:
|
64 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
65 |
+
# if not job or not job.result:
|
66 |
+
# print(f"Job {job_id} not found or has no data for analyst.")
|
67 |
+
# return
|
68 |
+
|
69 |
+
# try:
|
70 |
+
# job.status = "ANALYZING"
|
71 |
+
# db.commit()
|
72 |
+
|
73 |
+
# current_data = job.result
|
74 |
+
# ticker = current_data.get("ticker")
|
75 |
+
# company_name = current_data.get("company_name")
|
76 |
+
# intelligence_briefing = current_data.get("intelligence_briefing", {})
|
77 |
+
|
78 |
+
# llm_report_data = get_llm_analysis(ticker, company_name, intelligence_briefing)
|
79 |
+
|
80 |
+
# new_result = dict(current_data)
|
81 |
+
# new_result['llm_analysis'] = llm_report_data
|
82 |
+
# job.result = new_result
|
83 |
+
|
84 |
+
# job.status = "SUCCESS"
|
85 |
+
# db.commit()
|
86 |
+
|
87 |
+
# print(f"LLM analysis for job {job_id} completed successfully.")
|
88 |
+
# return "LLM analysis successful."
|
89 |
+
# except Exception as e:
|
90 |
+
# print(f"Error during LLM analysis for job {job_id}: {e}")
|
91 |
+
# job.status = "FAILED"
|
92 |
+
# error_data = job.result if job.result else {}
|
93 |
+
# error_data['error'] = f"LLM analysis failed: {str(e)}"
|
94 |
+
# job.result = error_data
|
95 |
+
# db.commit()
|
96 |
+
# return f"LLM analysis failed: {e}"
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
from celery_worker import celery
|
109 |
+
from tools.analyst_tools import get_llm_analysis
|
110 |
+
|
111 |
+
@celery.task
|
112 |
+
def get_llm_analysis_task(full_job_result: dict):
|
113 |
+
print(f"Executing get_llm_analysis_task...")
|
114 |
+
ticker = full_job_result.get("ticker")
|
115 |
+
company_name = full_job_result.get("company_name")
|
116 |
+
intelligence_briefing = full_job_result.get("intelligence_briefing", {})
|
117 |
+
return get_llm_analysis(ticker, company_name, intelligence_briefing)
|
backend/tasks/coordinator_task.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from celery_worker import celery
|
2 |
+
from core.database import SessionLocal
|
3 |
+
from models.analysis_job import AnalysisJob
|
4 |
+
from uuid import UUID
|
5 |
+
|
6 |
+
@celery.task(bind=True)
|
7 |
+
def coordinator_task(self, results, job_id: str):
|
8 |
+
"""
|
9 |
+
This task receives the results from all previous tasks, assembles the
|
10 |
+
final result, and saves it to the database ONCE.
|
11 |
+
"""
|
12 |
+
print(f"Coordinator task started for job {job_id}...")
|
13 |
+
with SessionLocal() as db:
|
14 |
+
job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
15 |
+
if not job:
|
16 |
+
print(f"Job {job_id} not found in coordinator.")
|
17 |
+
return
|
18 |
+
|
19 |
+
try:
|
20 |
+
# results[0] is from get_data_task
|
21 |
+
# results[1] is from get_intelligence_task
|
22 |
+
# results[2] is from get_llm_analysis_task
|
23 |
+
|
24 |
+
final_result = {
|
25 |
+
**results[0], # Unpack the dictionary from the data task
|
26 |
+
"intelligence_briefing": results[1],
|
27 |
+
"llm_analysis": results[2],
|
28 |
+
}
|
29 |
+
|
30 |
+
job.result = final_result
|
31 |
+
job.status = "SUCCESS"
|
32 |
+
db.commit()
|
33 |
+
print(f"Coordinator task for job {job_id} successfully saved final result.")
|
34 |
+
except Exception as e:
|
35 |
+
print(f"Error in coordinator for job {job_id}: {e}")
|
36 |
+
job.status = "FAILED"
|
37 |
+
job.result = {"error": f"Final assembly failed: {str(e)}"}
|
38 |
+
db.commit()
|
backend/tasks/data_tasks.py
CHANGED
@@ -1,43 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from celery_worker import celery
|
2 |
-
from core.database import SessionLocal
|
3 |
-
from models.analysis_job import AnalysisJob
|
4 |
from tools.data_tools import get_stock_data
|
5 |
-
from uuid import UUID
|
6 |
|
7 |
@celery.task
|
8 |
-
def
|
9 |
-
|
10 |
-
|
11 |
-
try:
|
12 |
-
job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
13 |
-
if not job:
|
14 |
-
print(f"Job {job_id} not found in database.")
|
15 |
-
return
|
16 |
-
|
17 |
-
print(f"Starting data analysis for job {job_id}, ticker {ticker}...")
|
18 |
-
job.status = "RUNNING"
|
19 |
-
db.commit()
|
20 |
-
|
21 |
-
data = get_stock_data(ticker)
|
22 |
-
|
23 |
-
job.result = data
|
24 |
-
job.status = "SUCCESS"
|
25 |
-
db.commit()
|
26 |
-
print(f"Data analysis for job {job_id} completed successfully.")
|
27 |
-
|
28 |
-
# get the result BEFORE closing the session
|
29 |
-
final_result = str(job.result)
|
30 |
-
|
31 |
-
except Exception as e:
|
32 |
-
print(f"Error during data analysis for job {job_id}: {e}")
|
33 |
-
if job:
|
34 |
-
job.status = "FAILED"
|
35 |
-
job.result = {"error": str(e)}
|
36 |
-
db.commit()
|
37 |
-
final_result = f"Error: {e}"
|
38 |
-
finally:
|
39 |
-
# always close the database session
|
40 |
-
db.close()
|
41 |
-
|
42 |
-
# return the variable that is no longer attached to the session
|
43 |
-
return final_result
|
|
|
1 |
+
# from celery_worker import celery
|
2 |
+
# from core.database import SessionLocal
|
3 |
+
# from models.analysis_job import AnalysisJob
|
4 |
+
# from tools.data_tools import get_stock_data
|
5 |
+
# from uuid import UUID
|
6 |
+
|
7 |
+
# @celery.task
|
8 |
+
# def run_data_analysis(job_id: str, ticker: str):
|
9 |
+
# db = SessionLocal()
|
10 |
+
# job = None
|
11 |
+
# final_result = ""
|
12 |
+
# try:
|
13 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
14 |
+
# if not job:
|
15 |
+
# raise ValueError(f"Job {job_id} not found in database.")
|
16 |
+
|
17 |
+
# print(f"Status - DATA_FETCHING for job {job_id}...")
|
18 |
+
# job.status = "DATA_FETCHING"
|
19 |
+
# db.commit()
|
20 |
+
|
21 |
+
# data = get_stock_data(ticker)
|
22 |
+
|
23 |
+
# if "error" in data:
|
24 |
+
# raise ValueError(data["error"])
|
25 |
+
|
26 |
+
# job.result = data
|
27 |
+
# db.commit()
|
28 |
+
# print(f"Data analysis for job {job_id} completed successfully.")
|
29 |
+
|
30 |
+
# final_result = str(job.result)
|
31 |
+
|
32 |
+
# except Exception as e:
|
33 |
+
# print(f"Error during data analysis for job {job_id}: {e}")
|
34 |
+
# if job:
|
35 |
+
# job.status = "FAILED"
|
36 |
+
# job.result = {"error": f"Data analysis failed: {str(e)}"}
|
37 |
+
# db.commit()
|
38 |
+
# final_result = f"Error: {e}"
|
39 |
+
# finally:
|
40 |
+
# db.close()
|
41 |
+
|
42 |
+
# return final_result
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
# from celery_worker import celery
|
51 |
+
# from core.database import SessionLocal
|
52 |
+
# from models.analysis_job import AnalysisJob
|
53 |
+
# from tools.data_tools import get_stock_data
|
54 |
+
# from uuid import UUID
|
55 |
+
|
56 |
+
# @celery.task
|
57 |
+
# def run_data_analysis(job_id: str, ticker: str):
|
58 |
+
# with SessionLocal() as db:
|
59 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
60 |
+
# if not job:
|
61 |
+
# print(f"Job {job_id} not found.")
|
62 |
+
# return
|
63 |
+
|
64 |
+
# try:
|
65 |
+
# job.status = "DATA_FETCHING"
|
66 |
+
# db.commit()
|
67 |
+
|
68 |
+
# data = get_stock_data(ticker)
|
69 |
+
# if "error" in data:
|
70 |
+
# raise ValueError(data["error"])
|
71 |
+
|
72 |
+
# job.result = data
|
73 |
+
# db.commit()
|
74 |
+
# print(f"Data analysis for job {job_id} completed successfully.")
|
75 |
+
# return "Data fetching successful."
|
76 |
+
# except Exception as e:
|
77 |
+
# print(f"Error during data analysis for job {job_id}: {e}")
|
78 |
+
# job.status = "FAILED"
|
79 |
+
# job.result = {"error": f"Data analysis failed: {str(e)}"}
|
80 |
+
# db.commit()
|
81 |
+
# return f"Data fetching failed: {e}"
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
from celery_worker import celery
|
|
|
|
|
92 |
from tools.data_tools import get_stock_data
|
|
|
93 |
|
94 |
@celery.task
|
95 |
+
def get_data_task(ticker: str):
|
96 |
+
print(f"Executing get_data_task for {ticker}...")
|
97 |
+
return get_stock_data(ticker)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/tasks/main_task.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from celery_worker import celery
|
2 |
+
# from core.database import SessionLocal
|
3 |
+
# from models.analysis_job import AnalysisJob
|
4 |
+
# from tools.data_tools import get_stock_data
|
5 |
+
# from tools.news_tools import get_combined_news_and_sentiment
|
6 |
+
# from tools.analyst_tools import get_llm_analysis
|
7 |
+
# from uuid import UUID
|
8 |
+
# import json
|
9 |
+
|
10 |
+
# @celery.task
|
11 |
+
# def run_full_analysis(job_id: str, ticker: str):
|
12 |
+
# print(f"\n--- [START] Full Analysis for Job ID: {job_id} ---")
|
13 |
+
|
14 |
+
# # --- Stage 1: Data Fetching ---
|
15 |
+
# try:
|
16 |
+
# with SessionLocal() as db:
|
17 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
18 |
+
# if not job: raise ValueError("Job not found")
|
19 |
+
# job.status = "DATA_FETCHING"
|
20 |
+
# db.commit()
|
21 |
+
# print("[LOG] STATUS UPDATE: DATA_FETCHING")
|
22 |
+
|
23 |
+
# data_result = get_stock_data(ticker)
|
24 |
+
# if "error" in data_result: raise ValueError(data_result['error'])
|
25 |
+
# company_name = data_result.get("company_name", ticker)
|
26 |
+
|
27 |
+
# with SessionLocal() as db:
|
28 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
29 |
+
# job.result = data_result
|
30 |
+
# db.commit()
|
31 |
+
# db.refresh(job) # Force reload from DB
|
32 |
+
# print(f"[LOG] DB SAVE 1 (Data): Result keys are now: {list(job.result.keys())}")
|
33 |
+
|
34 |
+
# except Exception as e:
|
35 |
+
# print(f"!!! [FAILURE] Stage 1 (Data): {e}")
|
36 |
+
# # ... error handling ...
|
37 |
+
# return
|
38 |
+
|
39 |
+
# # --- Stage 2: Intelligence Gathering ---
|
40 |
+
# try:
|
41 |
+
# with SessionLocal() as db:
|
42 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
43 |
+
# job.status = "INTELLIGENCE_GATHERING"
|
44 |
+
# db.commit()
|
45 |
+
# print("[LOG] STATUS UPDATE: INTELLIGENCE_GATHERING")
|
46 |
+
|
47 |
+
# intelligence_result = get_combined_news_and_sentiment(ticker, company_name)
|
48 |
+
|
49 |
+
# with SessionLocal() as db:
|
50 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
51 |
+
# current_result = dict(job.result)
|
52 |
+
# current_result['intelligence_briefing'] = intelligence_result
|
53 |
+
# job.result = current_result
|
54 |
+
# db.commit()
|
55 |
+
# db.refresh(job) # Force reload
|
56 |
+
# print(f"[LOG] DB SAVE 2 (Intelligence): Result keys are now: {list(job.result.keys())}")
|
57 |
+
# except Exception as e:
|
58 |
+
# print(f"!!! [FAILURE] Stage 2 (Intelligence): {e}")
|
59 |
+
# # ... error handling ...
|
60 |
+
# return
|
61 |
+
|
62 |
+
# # --- Stage 3: LLM Analysis ---
|
63 |
+
# try:
|
64 |
+
# with SessionLocal() as db:
|
65 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
66 |
+
# job.status = "ANALYZING"
|
67 |
+
# db.commit()
|
68 |
+
# print("[LOG] STATUS UPDATE: ANALYZING")
|
69 |
+
|
70 |
+
# with SessionLocal() as db:
|
71 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
72 |
+
# data_for_llm = job.result
|
73 |
+
|
74 |
+
# llm_result = get_llm_analysis(ticker, company_name, data_for_llm.get("intelligence_briefing", {}))
|
75 |
+
# if "error" in llm_result: raise ValueError(llm_result['error'])
|
76 |
+
|
77 |
+
# # --- Final Assembly and Save ---
|
78 |
+
# print("[LOG] Finalizing results...")
|
79 |
+
# with SessionLocal() as db:
|
80 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
81 |
+
# final_result_data = dict(job.result)
|
82 |
+
# final_result_data['llm_analysis'] = llm_result
|
83 |
+
# job.result = final_result_data
|
84 |
+
# job.status = "SUCCESS"
|
85 |
+
# db.commit()
|
86 |
+
# db.refresh(job)
|
87 |
+
# print(f"[LOG] DB SAVE 3 (Final): Result keys are now: {list(job.result.keys())}")
|
88 |
+
|
89 |
+
# print(f"--- [SUCCESS] Full analysis for {job_id} complete. ---")
|
90 |
+
|
91 |
+
# except Exception as e:
|
92 |
+
# print(f"!!! [FAILURE] Stage 3 (LLM): {e}")
|
93 |
+
# with SessionLocal() as db:
|
94 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
95 |
+
# if job:
|
96 |
+
# job.status = "FAILED"
|
97 |
+
# error_data = job.result if job.result else {}
|
98 |
+
# error_data['error'] = str(e)
|
99 |
+
# job.result = error_data
|
100 |
+
# db.commit()
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
from celery_worker import celery
|
112 |
+
from core.database import SessionLocal
|
113 |
+
from models.analysis_job import AnalysisJob
|
114 |
+
from tools.data_tools import get_stock_data
|
115 |
+
from tools.news_tools import get_combined_news_and_sentiment
|
116 |
+
from tools.analyst_tools import get_llm_analysis
|
117 |
+
from uuid import UUID
|
118 |
+
import json
|
119 |
+
|
120 |
+
@celery.task
|
121 |
+
def run_full_analysis(job_id: str, ticker: str):
|
122 |
+
"""
|
123 |
+
The single, main task that orchestrates the entire analysis pipeline.
|
124 |
+
"""
|
125 |
+
print(f"\n--- [START] Full Analysis for Job ID: {job_id} ---")
|
126 |
+
|
127 |
+
# We will use one job object throughout and update it, committing as we go.
|
128 |
+
# This requires careful session management.
|
129 |
+
db = SessionLocal()
|
130 |
+
job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
131 |
+
|
132 |
+
if not job:
|
133 |
+
print(f"Job {job_id} not found. Aborting.")
|
134 |
+
db.close()
|
135 |
+
return
|
136 |
+
|
137 |
+
try:
|
138 |
+
# --- Stage 1: Data Fetching ---
|
139 |
+
print(f"Stage 1: DATA_FETCHING for job {job_id}")
|
140 |
+
job.status = "DATA_FETCHING"
|
141 |
+
db.commit()
|
142 |
+
|
143 |
+
data_result = get_stock_data(ticker)
|
144 |
+
if "error" in data_result:
|
145 |
+
raise ValueError(f"Data fetching failed: {data_result['error']}")
|
146 |
+
|
147 |
+
company_name = data_result.get("company_name", ticker)
|
148 |
+
|
149 |
+
job.result = data_result
|
150 |
+
db.commit()
|
151 |
+
print("-> Data fetching stage complete.")
|
152 |
+
|
153 |
+
# --- Stage 2: Intelligence Gathering ---
|
154 |
+
print(f"Stage 2: INTELLIGENCE_GATHERING for job {job_id}")
|
155 |
+
job.status = "INTELLIGENCE_GATHERING"
|
156 |
+
db.commit()
|
157 |
+
|
158 |
+
intelligence_result = get_combined_news_and_sentiment(ticker, company_name)
|
159 |
+
|
160 |
+
current_result = dict(job.result)
|
161 |
+
current_result['intelligence_briefing'] = intelligence_result
|
162 |
+
job.result = current_result
|
163 |
+
db.commit()
|
164 |
+
print("-> Intelligence gathering stage complete.")
|
165 |
+
|
166 |
+
# --- Stage 3: LLM Analysis ---
|
167 |
+
print(f"Stage 3: ANALYZING for job {job_id}")
|
168 |
+
job.status = "ANALYZING"
|
169 |
+
db.commit()
|
170 |
+
|
171 |
+
# We need to refresh the job object to get the latest result for the LLM
|
172 |
+
db.refresh(job)
|
173 |
+
data_for_llm = job.result
|
174 |
+
|
175 |
+
llm_result = get_llm_analysis(ticker, company_name, data_for_llm.get("intelligence_briefing", {}))
|
176 |
+
if "error" in llm_result:
|
177 |
+
raise ValueError(f"LLM analysis failed: {llm_result['error']}")
|
178 |
+
|
179 |
+
# --- Final Assembly and Save ---
|
180 |
+
print("Finalizing results for job {job_id}")
|
181 |
+
final_result_data = dict(job.result)
|
182 |
+
final_result_data['llm_analysis'] = llm_result
|
183 |
+
|
184 |
+
job.result = final_result_data
|
185 |
+
job.status = "SUCCESS"
|
186 |
+
db.commit()
|
187 |
+
|
188 |
+
print(f"--- [SUCCESS] Full analysis for {job_id} complete. ---")
|
189 |
+
|
190 |
+
except Exception as e:
|
191 |
+
error_message = str(e)
|
192 |
+
print(f"!!! [FAILURE] Full analysis for {job_id} FAILED: {error_message}")
|
193 |
+
if job:
|
194 |
+
job.status = "FAILED"
|
195 |
+
# Provide a cleaner error message for the user, while keeping technical details
|
196 |
+
user_friendly_error = f"Analysis failed for ticker '{ticker}'. This stock may not be listed or there was a problem fetching its data. Please check the ticker symbol and try again. (Details: {error_message})"
|
197 |
+
|
198 |
+
error_data = job.result if job.result else {}
|
199 |
+
error_data['error'] = user_friendly_error
|
200 |
+
job.result = error_data
|
201 |
+
db.commit()
|
202 |
+
finally:
|
203 |
+
db.close()
|
backend/tasks/news_tasks.py
CHANGED
@@ -1,43 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from celery_worker import celery
|
2 |
-
from
|
3 |
-
from models.analysis_job import AnalysisJob
|
4 |
-
from tools.news_tools import get_news_and_sentiment, get_twitter_sentiment
|
5 |
-
from uuid import UUID
|
6 |
|
7 |
@celery.task
|
8 |
-
def
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
13 |
-
if not job or not job.result: raise ValueError(f"Job {job_id} not found or has no data.")
|
14 |
-
|
15 |
-
current_data = job.result
|
16 |
-
company_name = current_data.get("company_name")
|
17 |
-
if not company_name: raise ValueError("Company name not found in data.")
|
18 |
-
|
19 |
-
print(f"Starting intelligence analysis for {company_name}...")
|
20 |
-
|
21 |
-
news = get_news_and_sentiment(current_data.get("ticker"), company_name)
|
22 |
-
twitter = get_twitter_sentiment(f"{company_name} stock")
|
23 |
-
|
24 |
-
current_data['intelligence_briefing'] = {"news": news, "twitter": twitter}
|
25 |
-
job.result = current_data
|
26 |
-
job.status = "SUCCESS"
|
27 |
-
db.commit()
|
28 |
-
|
29 |
-
print(f"Intelligence analysis for job {job_id} completed.")
|
30 |
-
final_result = str(job.result)
|
31 |
-
|
32 |
-
except Exception as e:
|
33 |
-
print(f"Error during intelligence analysis for job {job_id}: {e}")
|
34 |
-
if job:
|
35 |
-
job.status = "FAILED"
|
36 |
-
error_data = job.result if job.result else {}
|
37 |
-
error_data['error'] = f"Intelligence analysis failed: {str(e)}"
|
38 |
-
job.result = error_data
|
39 |
-
db.commit()
|
40 |
-
final_result = f"Error: {e}"
|
41 |
-
finally:
|
42 |
-
db.close()
|
43 |
-
return final_result
|
|
|
1 |
+
# # tasks/news_tasks.py - SIMPLIFIED VERSION THAT ALWAYS WORKS
|
2 |
+
|
3 |
+
# from celery_worker import celery
|
4 |
+
# from core.database import SessionLocal
|
5 |
+
# from models.analysis_job import AnalysisJob
|
6 |
+
# from uuid import UUID
|
7 |
+
# import logging
|
8 |
+
# from datetime import datetime
|
9 |
+
# import yfinance as yf
|
10 |
+
|
11 |
+
# logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
# def get_stock_basic_info(ticker: str):
|
14 |
+
# """Get basic stock information to create realistic content"""
|
15 |
+
# try:
|
16 |
+
# stock = yf.Ticker(ticker)
|
17 |
+
# info = stock.info
|
18 |
+
# return {
|
19 |
+
# 'name': info.get('longName', ticker.replace('.NS', '')),
|
20 |
+
# 'sector': info.get('sector', 'Unknown'),
|
21 |
+
# 'industry': info.get('industry', 'Unknown'),
|
22 |
+
# 'current_price': info.get('currentPrice', 0),
|
23 |
+
# 'previous_close': info.get('previousClose', 0)
|
24 |
+
# }
|
25 |
+
# except Exception as e:
|
26 |
+
# logger.warning(f"Could not get stock info for {ticker}: {e}")
|
27 |
+
# return {
|
28 |
+
# 'name': ticker.replace('.NS', ''),
|
29 |
+
# 'sector': 'Unknown',
|
30 |
+
# 'industry': 'Unknown',
|
31 |
+
# 'current_price': 0,
|
32 |
+
# 'previous_close': 0
|
33 |
+
# }
|
34 |
+
|
35 |
+
# def create_realistic_articles(ticker: str, company_name: str, stock_info: dict):
|
36 |
+
# """Create realistic articles based on stock information"""
|
37 |
+
|
38 |
+
# # Calculate price movement for realistic sentiment
|
39 |
+
# current_price = stock_info.get('current_price', 0)
|
40 |
+
# previous_close = stock_info.get('previous_close', 0)
|
41 |
+
|
42 |
+
# price_change = 0
|
43 |
+
# if current_price and previous_close:
|
44 |
+
# price_change = ((current_price - previous_close) / previous_close) * 100
|
45 |
+
|
46 |
+
# # Generate articles based on actual stock performance
|
47 |
+
# articles = []
|
48 |
+
|
49 |
+
# if price_change > 2:
|
50 |
+
# articles.extend([
|
51 |
+
# {
|
52 |
+
# "title": f"{company_name} Shares Rally {price_change:.1f}% on Strong Market Sentiment",
|
53 |
+
# "url": f"https://finance.yahoo.com/quote/{ticker}",
|
54 |
+
# "source": "Market Analysis",
|
55 |
+
# "sentiment": "Positive",
|
56 |
+
# "sentiment_score": 0.8
|
57 |
+
# },
|
58 |
+
# {
|
59 |
+
# "title": f"Investors Show Confidence in {company_name} as Stock Gains Momentum",
|
60 |
+
# "url": f"https://www.moneycontrol.com/india/stockpricequote/{ticker}",
|
61 |
+
# "source": "Financial Express",
|
62 |
+
# "sentiment": "Positive",
|
63 |
+
# "sentiment_score": 0.7
|
64 |
+
# }
|
65 |
+
# ])
|
66 |
+
# elif price_change < -2:
|
67 |
+
# articles.extend([
|
68 |
+
# {
|
69 |
+
# "title": f"{company_name} Stock Declines {abs(price_change):.1f}% Amid Market Volatility",
|
70 |
+
# "url": f"https://finance.yahoo.com/quote/{ticker}",
|
71 |
+
# "source": "Market Watch",
|
72 |
+
# "sentiment": "Negative",
|
73 |
+
# "sentiment_score": 0.8
|
74 |
+
# },
|
75 |
+
# {
|
76 |
+
# "title": f"Market Correction Impacts {company_name} Share Price",
|
77 |
+
# "url": f"https://www.moneycontrol.com/india/stockpricequote/{ticker}",
|
78 |
+
# "source": "Economic Times",
|
79 |
+
# "sentiment": "Negative",
|
80 |
+
# "sentiment_score": 0.6
|
81 |
+
# }
|
82 |
+
# ])
|
83 |
+
# else:
|
84 |
+
# articles.extend([
|
85 |
+
# {
|
86 |
+
# "title": f"{company_name} Stock Shows Steady Performance in Current Market",
|
87 |
+
# "url": f"https://finance.yahoo.com/quote/{ticker}",
|
88 |
+
# "source": "Yahoo Finance",
|
89 |
+
# "sentiment": "Neutral",
|
90 |
+
# "sentiment_score": 0.5
|
91 |
+
# },
|
92 |
+
# {
|
93 |
+
# "title": f"Technical Analysis: {company_name} Maintains Stable Trading Range",
|
94 |
+
# "url": f"https://www.moneycontrol.com/india/stockpricequote/{ticker}",
|
95 |
+
# "source": "Market Analysis",
|
96 |
+
# "sentiment": "Neutral",
|
97 |
+
# "sentiment_score": 0.5
|
98 |
+
# }
|
99 |
+
# ])
|
100 |
+
|
101 |
+
# # Add sector-specific articles
|
102 |
+
# sector = stock_info.get('sector', 'Unknown')
|
103 |
+
# if sector != 'Unknown':
|
104 |
+
# articles.extend([
|
105 |
+
# {
|
106 |
+
# "title": f"{sector} Sector Update: Key Players Including {company_name} in Focus",
|
107 |
+
# "url": "https://example.com/sector-analysis",
|
108 |
+
# "source": "Sector Reports",
|
109 |
+
# "sentiment": "Neutral",
|
110 |
+
# "sentiment_score": 0.6
|
111 |
+
# },
|
112 |
+
# {
|
113 |
+
# "title": f"Industry Outlook: {stock_info.get('industry', 'Market')} Trends Affecting {company_name}",
|
114 |
+
# "url": "https://example.com/industry-outlook",
|
115 |
+
# "source": "Industry Analysis",
|
116 |
+
# "sentiment": "Positive",
|
117 |
+
# "sentiment_score": 0.6
|
118 |
+
# }
|
119 |
+
# ])
|
120 |
+
|
121 |
+
# # Add general market articles
|
122 |
+
# articles.extend([
|
123 |
+
# {
|
124 |
+
# "title": f"Quarterly Performance Review: {company_name} Financials and Market Position",
|
125 |
+
# "url": f"https://finance.yahoo.com/quote/{ticker}/financials",
|
126 |
+
# "source": "Financial Reports",
|
127 |
+
# "sentiment": "Neutral",
|
128 |
+
# "sentiment_score": 0.5
|
129 |
+
# },
|
130 |
+
# {
|
131 |
+
# "title": f"Analyst Coverage: Investment Recommendations for {company_name} Stock",
|
132 |
+
# "url": "https://example.com/analyst-coverage",
|
133 |
+
# "source": "Research Reports",
|
134 |
+
# "sentiment": "Positive",
|
135 |
+
# "sentiment_score": 0.7
|
136 |
+
# },
|
137 |
+
# {
|
138 |
+
# "title": f"Market Sentiment Analysis: Retail vs Institutional Interest in {company_name}",
|
139 |
+
# "url": "https://example.com/market-sentiment",
|
140 |
+
# "source": "Market Research",
|
141 |
+
# "sentiment": "Neutral",
|
142 |
+
# "sentiment_score": 0.5
|
143 |
+
# }
|
144 |
+
# ])
|
145 |
+
|
146 |
+
# return articles[:8] # Return top 8 articles
|
147 |
+
|
148 |
+
# def try_real_news_sources(ticker: str, company_name: str):
|
149 |
+
# """Attempt to get real news, but don't fail if it doesn't work"""
|
150 |
+
# real_articles = []
|
151 |
+
|
152 |
+
# try:
|
153 |
+
# # Try Yahoo Finance news (most reliable)
|
154 |
+
# logger.info(f"Attempting to fetch real Yahoo Finance news for {ticker}")
|
155 |
+
# stock = yf.Ticker(ticker)
|
156 |
+
# news = stock.news
|
157 |
+
|
158 |
+
# if news:
|
159 |
+
# logger.info(f"Found {len(news)} Yahoo Finance articles")
|
160 |
+
# for article in news[:5]: # Take first 5
|
161 |
+
# if article.get('title'):
|
162 |
+
# # Simple sentiment analysis
|
163 |
+
# title_lower = article['title'].lower()
|
164 |
+
# if any(word in title_lower for word in ['gain', 'rise', 'growth', 'profit', 'strong']):
|
165 |
+
# sentiment = 'Positive'
|
166 |
+
# score = 0.7
|
167 |
+
# elif any(word in title_lower for word in ['fall', 'decline', 'loss', 'weak', 'drop']):
|
168 |
+
# sentiment = 'Negative'
|
169 |
+
# score = 0.7
|
170 |
+
# else:
|
171 |
+
# sentiment = 'Neutral'
|
172 |
+
# score = 0.5
|
173 |
+
|
174 |
+
# real_articles.append({
|
175 |
+
# "title": article['title'].strip(),
|
176 |
+
# "url": article.get('link', ''),
|
177 |
+
# "source": article.get('publisher', 'Yahoo Finance'),
|
178 |
+
# "sentiment": sentiment,
|
179 |
+
# "sentiment_score": score,
|
180 |
+
# "is_real": True
|
181 |
+
# })
|
182 |
+
|
183 |
+
# logger.info(f"Successfully retrieved {len(real_articles)} real articles")
|
184 |
+
|
185 |
+
# except Exception as e:
|
186 |
+
# logger.warning(f"Could not fetch real news: {e}")
|
187 |
+
|
188 |
+
# return real_articles
|
189 |
+
|
190 |
+
# @celery.task
|
191 |
+
# def run_intelligence_analysis(job_id: str):
|
192 |
+
# """Simplified intelligence analysis that always provides results"""
|
193 |
+
# db = SessionLocal()
|
194 |
+
# job = None
|
195 |
+
|
196 |
+
# try:
|
197 |
+
# logger.info(f"Starting intelligence analysis for job {job_id}")
|
198 |
+
|
199 |
+
# # Get job
|
200 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
201 |
+
# if not job or not job.result:
|
202 |
+
# raise ValueError(f"Job {job_id} not found or has no initial data.")
|
203 |
+
|
204 |
+
# job.status = "INTELLIGENCE_GATHERING"
|
205 |
+
# db.commit()
|
206 |
+
|
207 |
+
# current_data = job.result
|
208 |
+
# ticker = current_data.get("ticker")
|
209 |
+
# company_name = current_data.get("company_name", ticker.replace('.NS', ''))
|
210 |
+
|
211 |
+
# logger.info(f"Analyzing {company_name} ({ticker})")
|
212 |
+
|
213 |
+
# # Get basic stock information
|
214 |
+
# stock_info = get_stock_basic_info(ticker)
|
215 |
+
# logger.info(f"Stock info: {stock_info['name']} - {stock_info['sector']}")
|
216 |
+
|
217 |
+
# # Try to get real news first
|
218 |
+
# real_articles = try_real_news_sources(ticker, company_name)
|
219 |
+
|
220 |
+
# # Create realistic articles
|
221 |
+
# realistic_articles = create_realistic_articles(ticker, company_name, stock_info)
|
222 |
+
|
223 |
+
# # Combine real and realistic articles
|
224 |
+
# all_articles = real_articles + realistic_articles
|
225 |
+
|
226 |
+
# # Remove duplicates and limit to 10 articles
|
227 |
+
# seen_titles = set()
|
228 |
+
# unique_articles = []
|
229 |
+
# for article in all_articles:
|
230 |
+
# if article['title'] not in seen_titles:
|
231 |
+
# seen_titles.add(article['title'])
|
232 |
+
# unique_articles.append(article)
|
233 |
+
|
234 |
+
# final_articles = unique_articles[:10]
|
235 |
+
|
236 |
+
# # Count sentiments
|
237 |
+
# sentiment_counts = {'Positive': 0, 'Negative': 0, 'Neutral': 0}
|
238 |
+
# for article in final_articles:
|
239 |
+
# sentiment_counts[article['sentiment']] += 1
|
240 |
+
|
241 |
+
# # Create intelligence briefing
|
242 |
+
# intelligence_briefing = {
|
243 |
+
# "articles": final_articles,
|
244 |
+
# "sentiment_summary": {
|
245 |
+
# "total_items": len(final_articles),
|
246 |
+
# "positive": sentiment_counts['Positive'],
|
247 |
+
# "negative": sentiment_counts['Negative'],
|
248 |
+
# "neutral": sentiment_counts['Neutral'],
|
249 |
+
# "real_articles": len(real_articles),
|
250 |
+
# "generated_articles": len(realistic_articles),
|
251 |
+
# "analysis_timestamp": datetime.now().isoformat()
|
252 |
+
# }
|
253 |
+
# }
|
254 |
+
|
255 |
+
# # Update job result
|
256 |
+
# new_result = current_data.copy()
|
257 |
+
# new_result['intelligence_briefing'] = intelligence_briefing
|
258 |
+
# job.result = new_result
|
259 |
+
# job.status = "INTELLIGENCE_COMPLETE"
|
260 |
+
|
261 |
+
# db.commit()
|
262 |
+
|
263 |
+
# logger.info(f"Intelligence analysis completed successfully:")
|
264 |
+
# logger.info(f"- Total articles: {len(final_articles)}")
|
265 |
+
# logger.info(f"- Real articles: {len(real_articles)}")
|
266 |
+
# logger.info(f"- Generated articles: {len(realistic_articles)}")
|
267 |
+
# logger.info(f"- Sentiment: {sentiment_counts}")
|
268 |
+
|
269 |
+
# return str(job.result)
|
270 |
+
|
271 |
+
# except Exception as e:
|
272 |
+
# logger.error(f"Intelligence analysis failed for job {job_id}: {e}")
|
273 |
+
|
274 |
+
# if job:
|
275 |
+
# job.status = "FAILED"
|
276 |
+
# error_data = job.result if job.result else {}
|
277 |
+
# error_data['error'] = f"Intelligence analysis failed: {str(e)}"
|
278 |
+
# job.result = error_data
|
279 |
+
# db.commit()
|
280 |
+
|
281 |
+
# return f"Error: {e}"
|
282 |
+
|
283 |
+
# finally:
|
284 |
+
# db.close()
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
# from celery_worker import celery
|
295 |
+
# from core.database import SessionLocal
|
296 |
+
# from models.analysis_job import AnalysisJob
|
297 |
+
# from tools.news_tools import get_combined_news_and_sentiment
|
298 |
+
# from uuid import UUID
|
299 |
+
|
300 |
+
# @celery.task
|
301 |
+
# def run_intelligence_analysis(job_id: str):
|
302 |
+
# with SessionLocal() as db:
|
303 |
+
# job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
304 |
+
# if not job or not job.result:
|
305 |
+
# print(f"Job {job_id} not found or has no data for intelligence.")
|
306 |
+
# return
|
307 |
+
|
308 |
+
# try:
|
309 |
+
# job.status = "INTELLIGENCE_GATHERING"
|
310 |
+
# db.commit()
|
311 |
+
|
312 |
+
# current_data = job.result
|
313 |
+
# ticker = current_data.get("ticker")
|
314 |
+
# company_name = current_data.get("company_name")
|
315 |
+
|
316 |
+
# intelligence_briefing = get_combined_news_and_sentiment(ticker, company_name)
|
317 |
+
|
318 |
+
# new_result = dict(current_data)
|
319 |
+
# new_result['intelligence_briefing'] = intelligence_briefing
|
320 |
+
# job.result = new_result
|
321 |
+
|
322 |
+
# db.commit()
|
323 |
+
# print(f"Intelligence analysis for job {job_id} completed successfully.")
|
324 |
+
# return "Intelligence gathering successful."
|
325 |
+
# except Exception as e:
|
326 |
+
# print(f"Error during intelligence analysis for job {job_id}: {e}")
|
327 |
+
# job.status = "FAILED"
|
328 |
+
# error_data = job.result if job.result else {}
|
329 |
+
# error_data['error'] = f"Intelligence analysis failed: {str(e)}"
|
330 |
+
# job.result = error_data
|
331 |
+
# db.commit()
|
332 |
+
# return f"Intelligence gathering failed: {e}"
|
333 |
+
|
334 |
+
|
335 |
+
|
336 |
+
|
337 |
+
|
338 |
+
|
339 |
+
|
340 |
+
|
341 |
from celery_worker import celery
|
342 |
+
from tools.news_tools import get_combined_news_and_sentiment
|
|
|
|
|
|
|
343 |
|
344 |
@celery.task
|
345 |
+
def get_intelligence_task(ticker: str, company_name: str):
|
346 |
+
print(f"Executing get_intelligence_task for {company_name}...")
|
347 |
+
# This task now depends on the company_name from the first task's result
|
348 |
+
return get_combined_news_and_sentiment(ticker, company_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/tasks/prediction_tasks.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from celery_worker import celery
|
2 |
+
from core.database import SessionLocal
|
3 |
+
from models.analysis_job import AnalysisJob
|
4 |
+
from tools.prediction_tools import generate_forecast
|
5 |
+
from uuid import UUID
|
6 |
+
|
7 |
+
@celery.task
|
8 |
+
def run_prediction_analysis(job_id: str):
|
9 |
+
db = SessionLocal()
|
10 |
+
job = None
|
11 |
+
final_result = ""
|
12 |
+
try:
|
13 |
+
job = db.query(AnalysisJob).filter(AnalysisJob.id == UUID(job_id)).first()
|
14 |
+
if not job or not job.result:
|
15 |
+
raise ValueError(f"Job {job_id} not found or has no initial data.")
|
16 |
+
|
17 |
+
print(f"Status - PREDICTING for job {job_id}...")
|
18 |
+
job.status = "PREDICTING"
|
19 |
+
db.commit()
|
20 |
+
|
21 |
+
current_data = job.result
|
22 |
+
ticker = current_data.get("ticker")
|
23 |
+
if not ticker:
|
24 |
+
raise ValueError("Ticker not found in initial data.")
|
25 |
+
|
26 |
+
forecast_data = generate_forecast(ticker)
|
27 |
+
|
28 |
+
if "error" in forecast_data:
|
29 |
+
raise ValueError(forecast_data["error"])
|
30 |
+
|
31 |
+
new_result = current_data.copy()
|
32 |
+
new_result['prediction_analysis'] = forecast_data
|
33 |
+
job.result = new_result
|
34 |
+
|
35 |
+
db.commit()
|
36 |
+
|
37 |
+
print(f"Prediction analysis for job {job_id} completed successfully.")
|
38 |
+
final_result = str(job.result)
|
39 |
+
|
40 |
+
except Exception as e:
|
41 |
+
print(f"Error during prediction analysis for job {job_id}: {e}")
|
42 |
+
if job:
|
43 |
+
job.status = "FAILED"
|
44 |
+
error_data = job.result if job.result else {}
|
45 |
+
error_data['error'] = f"Prediction analysis failed: {str(e)}"
|
46 |
+
job.result = error_data
|
47 |
+
db.commit()
|
48 |
+
final_result = f"Error: {e}"
|
49 |
+
finally:
|
50 |
+
db.close()
|
51 |
+
|
52 |
+
return final_result
|
backend/tmp.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# test_news.py - Run this to test the news functionality
|
2 |
+
from tools.news_tools import get_combined_news_and_sentiment_debug
|
3 |
+
|
4 |
+
def test_news():
|
5 |
+
# Test with a popular Indian stock
|
6 |
+
ticker = "RELIANCE.NS"
|
7 |
+
company_name = "Reliance Industries"
|
8 |
+
|
9 |
+
print(f"Testing news scraping for {ticker} ({company_name})")
|
10 |
+
result = get_combined_news_and_sentiment_debug(ticker, company_name)
|
11 |
+
|
12 |
+
print(f"\nResults:")
|
13 |
+
print(f"Total articles: {result['sentiment_summary']['total_items']}")
|
14 |
+
print(f"Positive: {result['sentiment_summary']['positive']}")
|
15 |
+
print(f"Negative: {result['sentiment_summary']['negative']}")
|
16 |
+
print(f"Neutral: {result['sentiment_summary']['neutral']}")
|
17 |
+
|
18 |
+
if result['articles']:
|
19 |
+
print(f"\nSample articles:")
|
20 |
+
for i, article in enumerate(result['articles'][:3]):
|
21 |
+
print(f"{i+1}. {article['title']}")
|
22 |
+
print(f" Source: {article['source']} | Sentiment: {article['sentiment']}")
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
test_news()
|
backend/tools/__pycache__/data_tools.cpython-311.pyc
CHANGED
Binary files a/backend/tools/__pycache__/data_tools.cpython-311.pyc and b/backend/tools/__pycache__/data_tools.cpython-311.pyc differ
|
|
backend/tools/__pycache__/news_tools.cpython-311.pyc
CHANGED
Binary files a/backend/tools/__pycache__/news_tools.cpython-311.pyc and b/backend/tools/__pycache__/news_tools.cpython-311.pyc differ
|
|
backend/tools/advisor_tools.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from typing import Dict, Any
|
5 |
+
import json
|
6 |
+
|
7 |
+
def generate_investment_thesis(full_job_result: Dict[str, Any]) -> str:
|
8 |
+
"""
|
9 |
+
Uses the Gemini 1.5 Flash model to generate a qualitative investment thesis.
|
10 |
+
"""
|
11 |
+
print("Generating investment thesis with Gemini 1.5 Flash...")
|
12 |
+
|
13 |
+
# Initialize the LLM
|
14 |
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
|
15 |
+
|
16 |
+
# Create a simplified summary of the data to pass to the LLM
|
17 |
+
# This prevents sending thousands of characters of raw data
|
18 |
+
fundamentals_summary = (
|
19 |
+
f"Company: {full_job_result.get('company_name', 'N/A')}\n"
|
20 |
+
f"Current Price: {full_job_result.get('current_price', 'N/A')}\n"
|
21 |
+
f"Market Cap: {full_job_result.get('market_cap', 'N/A')}\n"
|
22 |
+
f"P/E Ratio: {full_job_result.get('pe_ratio', 'N/A'):.2f}\n"
|
23 |
+
f"Sector: {full_job_result.get('sector', 'N/A')}"
|
24 |
+
)
|
25 |
+
|
26 |
+
prediction_summary = full_job_result.get('prediction_analysis', {}).get('summary', 'No prediction summary available.')
|
27 |
+
|
28 |
+
# We need to handle the case where intelligence gathering might have failed
|
29 |
+
intelligence_briefing = full_job_result.get('intelligence_briefing', {})
|
30 |
+
if intelligence_briefing and intelligence_briefing.get('news'):
|
31 |
+
news_summary = ", ".join([f"'{article['title']}' ({article['sentiment']})" for article in intelligence_briefing['news'][:2]])
|
32 |
+
else:
|
33 |
+
news_summary = "No news articles found."
|
34 |
+
|
35 |
+
# Define the prompt template
|
36 |
+
prompt = PromptTemplate(
|
37 |
+
input_variables=["fundamentals", "prediction", "news"],
|
38 |
+
template="""
|
39 |
+
You are a sharp, concise senior financial analyst for an Indian market-focused fund.
|
40 |
+
Your task is to provide a clear investment thesis based on the data provided.
|
41 |
+
Do not offer financial advice. Analyze the data objectively.
|
42 |
+
|
43 |
+
**Data Overview:**
|
44 |
+
- **Fundamentals:** {fundamentals}
|
45 |
+
- **Quantitative Forecast:** {prediction}
|
46 |
+
- **Recent News Headlines & Sentiment:** {news}
|
47 |
+
|
48 |
+
**Your Analysis (in Markdown format):**
|
49 |
+
**1. Executive Summary:** A 2-sentence summary of the company's current situation based on the data.
|
50 |
+
**2. Bull Case:** 2-3 bullet points on the positive signals from the data.
|
51 |
+
**3. Bear Case:** 2-3 bullet points on the primary risks or negative signals.
|
52 |
+
**4. Final Recommendation:** State ONE of the following: 'Strong Buy', 'Buy', 'Hold', 'Sell', or 'Strong Sell' and provide a brief 1-sentence justification based purely on the provided data mix.
|
53 |
+
"""
|
54 |
+
)
|
55 |
+
|
56 |
+
# Create the LangChain chain
|
57 |
+
chain = LLMChain(llm=llm, prompt=prompt)
|
58 |
+
|
59 |
+
# Run the chain with our summarized data
|
60 |
+
try:
|
61 |
+
response = chain.run(
|
62 |
+
fundamentals=fundamentals_summary,
|
63 |
+
prediction=prediction_summary,
|
64 |
+
news=news_summary
|
65 |
+
)
|
66 |
+
print("Successfully generated thesis from Gemini.")
|
67 |
+
return response
|
68 |
+
except Exception as e:
|
69 |
+
print(f"Error calling Gemini API: {e}")
|
70 |
+
return "Error: Could not generate the advisor summary."
|
backend/tools/analyst_tools.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yfinance as yf
|
2 |
+
import pandas as pd
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain.chains import LLMChain
|
6 |
+
from typing import Dict, Any
|
7 |
+
import time
|
8 |
+
|
9 |
+
def get_llm_analysis(ticker: str, company_name: str, intelligence_briefing: Dict[str, Any]) -> Dict[str, Any]:
|
10 |
+
"""
|
11 |
+
Uses Gemini 1.5 Flash to analyze historical data and news to generate
|
12 |
+
a forecast and a complete investment thesis.
|
13 |
+
"""
|
14 |
+
print(f"Starting LLM-powered analysis for {ticker} with Gemini 1.5 Flash...")
|
15 |
+
|
16 |
+
# clean and format ticker properly
|
17 |
+
original_ticker = ticker
|
18 |
+
if not ticker.endswith(('.NS', '.BO', '.L', '.TO')):
|
19 |
+
# For Indian stocks, try NSE first
|
20 |
+
ticker = f"{ticker}.NS"
|
21 |
+
print(f"Formatted ticker from '{original_ticker}' to '{ticker}' for Yahoo Finance")
|
22 |
+
|
23 |
+
historical_data_text = "Could not fetch historical price data due to repeated errors."
|
24 |
+
|
25 |
+
for attempt in range(3):
|
26 |
+
try:
|
27 |
+
print(f"Attempt {attempt + 1}/3 to download historical data for {ticker}...")
|
28 |
+
stock_data = yf.download(ticker, period="100d", interval="1d", progress=False)
|
29 |
+
|
30 |
+
if not stock_data.empty:
|
31 |
+
# Convert to a more readable format for the LLM
|
32 |
+
stock_data = stock_data.round(2) # Round to 2 decimal places
|
33 |
+
# Include only the last 20 days for LLM context efficiency
|
34 |
+
recent_data = stock_data.tail(20)
|
35 |
+
historical_data_text = f"Recent 20 days of data for {ticker}:\n{recent_data.to_string()}"
|
36 |
+
print("-> Successfully downloaded historical data.")
|
37 |
+
break
|
38 |
+
else:
|
39 |
+
raise ValueError("Downloaded data is empty.")
|
40 |
+
|
41 |
+
except Exception as e:
|
42 |
+
print(f"-> Attempt {attempt + 1} failed: {e}")
|
43 |
+
# If NSE fails, try BSE
|
44 |
+
if attempt == 0 and ticker.endswith('.NS'):
|
45 |
+
ticker = f"{original_ticker}.BO"
|
46 |
+
print(f"Retrying with BSE ticker: {ticker}")
|
47 |
+
elif attempt < 2:
|
48 |
+
print(" Waiting 2 seconds before retrying...")
|
49 |
+
time.sleep(2)
|
50 |
+
|
51 |
+
# If all attempts failed, check if we can get basic info
|
52 |
+
if "Could not fetch historical price data" in historical_data_text:
|
53 |
+
try:
|
54 |
+
print("Attempting to get basic stock info as fallback...")
|
55 |
+
stock = yf.Ticker(ticker)
|
56 |
+
info = stock.info
|
57 |
+
if info and info.get('regularMarketPrice'):
|
58 |
+
current_price = info.get('regularMarketPrice')
|
59 |
+
previous_close = info.get('previousClose', current_price)
|
60 |
+
historical_data_text = f"Limited data available for {ticker}:\nCurrent Price: ₹{current_price}\nPrevious Close: ₹{previous_close}"
|
61 |
+
print("-> Got basic stock info as fallback.")
|
62 |
+
else:
|
63 |
+
print("-> No data available from any source.")
|
64 |
+
except Exception as e:
|
65 |
+
print(f"-> Fallback also failed: {e}")
|
66 |
+
|
67 |
+
# 2. Summarize the news data into a simple text block
|
68 |
+
articles = intelligence_briefing.get('articles', [])
|
69 |
+
if articles:
|
70 |
+
news_summary = "\n".join([f"- {article['title']} (Source: {article['source']}, Sentiment: {article['sentiment']})" for article in articles[:10]]) # Limit to 10 articles
|
71 |
+
else:
|
72 |
+
news_summary = "No recent news or social media mentions found."
|
73 |
+
|
74 |
+
# 3. Initialize the LLM
|
75 |
+
try:
|
76 |
+
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest", temperature=0.3)
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Error initializing Gemini: {e}")
|
79 |
+
return {"error": f"Failed to initialize Gemini API: {str(e)}"}
|
80 |
+
|
81 |
+
# 4. Enhanced prompt that handles missing data gracefully
|
82 |
+
prompt = PromptTemplate(
|
83 |
+
input_variables=["ticker", "company_name", "historical_data", "news_summary"],
|
84 |
+
template="""
|
85 |
+
You are a Senior Financial Analyst for an Indian market-focused investment fund.
|
86 |
+
Your task is to provide a comprehensive analysis and a 30-day forecast for the stock: {ticker} ({company_name}).
|
87 |
+
|
88 |
+
**Important Instructions:**
|
89 |
+
- If historical data is limited or unavailable, focus your analysis on news sentiment and general market conditions
|
90 |
+
- Always provide a forecast range even with limited data, but adjust confidence accordingly
|
91 |
+
- Base your analysis ONLY on the provided data
|
92 |
+
|
93 |
+
**Data Provided:**
|
94 |
+
|
95 |
+
1. **Historical Price Data:**
|
96 |
+
```
|
97 |
+
{historical_data}
|
98 |
+
```
|
99 |
+
|
100 |
+
2. **Recent News & Social Media Headlines:**
|
101 |
+
{news_summary}
|
102 |
+
|
103 |
+
**Your Analysis Report (in Markdown format):**
|
104 |
+
|
105 |
+
## 30-Day Price Forecast
|
106 |
+
|
107 |
+
**Analysis:** Analyze available data (price trends if available, news sentiment, market conditions). If price data is limited, focus on sentiment analysis and sector trends.
|
108 |
+
|
109 |
+
**Predicted Range:** Provide a realistic price range for 30 days (e.g., ₹1500 - ₹1650). If no current price available, state "Unable to provide specific range due to data limitations."
|
110 |
+
|
111 |
+
**Justification:** Explain your forecast based on available information.
|
112 |
+
|
113 |
+
**Confidence:** High/Moderate/Low based on data quality and availability.
|
114 |
+
|
115 |
+
## Investment Thesis
|
116 |
+
|
117 |
+
**Bull Case:**
|
118 |
+
- Point 1 based on positive signals from available data
|
119 |
+
- Point 2 based on news sentiment or market conditions
|
120 |
+
- Point 3 if sufficient data available
|
121 |
+
|
122 |
+
**Bear Case:**
|
123 |
+
- Point 1 based on negative signals or risks
|
124 |
+
- Point 2 based on market concerns
|
125 |
+
- Point 3 if sufficient data available
|
126 |
+
|
127 |
+
## Actionable Strategy
|
128 |
+
|
129 |
+
**Signal:** Buy/Sell/Hold (choose one)
|
130 |
+
|
131 |
+
**Strategy:** Provide 1-2 sentences with specific actionable advice based on the analysis above.
|
132 |
+
|
133 |
+
**Risk Management:** Brief note on stop-loss or position sizing if relevant.
|
134 |
+
"""
|
135 |
+
)
|
136 |
+
|
137 |
+
# 5. Run the LangChain chain with error handling
|
138 |
+
chain = LLMChain(llm=llm, prompt=prompt)
|
139 |
+
try:
|
140 |
+
response = chain.run(
|
141 |
+
ticker=ticker,
|
142 |
+
company_name=company_name,
|
143 |
+
historical_data=historical_data_text,
|
144 |
+
news_summary=news_summary
|
145 |
+
)
|
146 |
+
print("Successfully generated analysis from Gemini.")
|
147 |
+
return {"llm_report": response}
|
148 |
+
except Exception as e:
|
149 |
+
error_msg = f"Failed to generate analysis from Gemini: {str(e)}"
|
150 |
+
print(f"Error calling Gemini API: {e}")
|
151 |
+
return {"error": error_msg}
|
backend/tools/data_tools.py
CHANGED
@@ -2,42 +2,42 @@ import yfinance as yf
|
|
2 |
from typing import Dict, Any
|
3 |
|
4 |
def get_stock_data(ticker: str) -> Dict[str, Any]:
|
5 |
-
|
6 |
-
# for NSE stocks, yfinance expects the '.NS' suffix. For BSE, it's '.BO'.
|
7 |
-
# assume NSE by default if no suffix is provided.
|
8 |
if not ticker.endswith(('.NS', '.BO')):
|
9 |
-
print(f"Ticker '{ticker}' has no exchange suffix. Assuming NSE and appending '.NS'.")
|
10 |
ticker = f"{ticker}.NS"
|
11 |
|
12 |
stock = yf.Ticker(ticker)
|
13 |
|
14 |
-
# yfinance can sometimes fail for certain tickers or data points.
|
15 |
try:
|
16 |
info = stock.info
|
17 |
except Exception as e:
|
18 |
print(f"Could not fetch info for {ticker}: {e}")
|
19 |
return {"error": f"Invalid ticker or no data available for {ticker}"}
|
20 |
|
21 |
-
# check if we got a valid response
|
22 |
if not info or info.get('regularMarketPrice') is None:
|
23 |
return {"error": f"Invalid ticker or no data available for {ticker}"}
|
24 |
|
25 |
-
#
|
26 |
data = {
|
27 |
"ticker": ticker,
|
28 |
"company_name": info.get('longName'),
|
29 |
"current_price": info.get('currentPrice') or info.get('regularMarketPrice'),
|
30 |
"previous_close": info.get('previousClose'),
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
"market_cap": info.get('marketCap'),
|
32 |
-
"pe_ratio": info.get('trailingPE')
|
33 |
"pb_ratio": info.get('priceToBook'),
|
34 |
"dividend_yield": info.get('dividendYield'),
|
35 |
"sector": info.get('sector'),
|
36 |
"industry": info.get('industry'),
|
37 |
"summary": info.get('longBusinessSummary'),
|
38 |
"website": info.get('website'),
|
39 |
-
|
|
|
40 |
}
|
41 |
|
42 |
-
# clean up data by removing any keys with none values
|
43 |
return {k: v for k, v in data.items() if v is not None}
|
|
|
2 |
from typing import Dict, Any
|
3 |
|
4 |
def get_stock_data(ticker: str) -> Dict[str, Any]:
|
|
|
|
|
|
|
5 |
if not ticker.endswith(('.NS', '.BO')):
|
|
|
6 |
ticker = f"{ticker}.NS"
|
7 |
|
8 |
stock = yf.Ticker(ticker)
|
9 |
|
|
|
10 |
try:
|
11 |
info = stock.info
|
12 |
except Exception as e:
|
13 |
print(f"Could not fetch info for {ticker}: {e}")
|
14 |
return {"error": f"Invalid ticker or no data available for {ticker}"}
|
15 |
|
|
|
16 |
if not info or info.get('regularMarketPrice') is None:
|
17 |
return {"error": f"Invalid ticker or no data available for {ticker}"}
|
18 |
|
19 |
+
# --- UPGRADED DATA FETCHING ---
|
20 |
data = {
|
21 |
"ticker": ticker,
|
22 |
"company_name": info.get('longName'),
|
23 |
"current_price": info.get('currentPrice') or info.get('regularMarketPrice'),
|
24 |
"previous_close": info.get('previousClose'),
|
25 |
+
"day_high": info.get('dayHigh'),
|
26 |
+
"day_low": info.get('dayLow'),
|
27 |
+
"fifty_two_week_high": info.get('fiftyTwoWeekHigh'),
|
28 |
+
"fifty_two_week_low": info.get('fiftyTwoWeekLow'),
|
29 |
+
"volume": info.get('volume'),
|
30 |
+
"average_volume": info.get('averageVolume'),
|
31 |
"market_cap": info.get('marketCap'),
|
32 |
+
"pe_ratio": info.get('trailingPE'),
|
33 |
"pb_ratio": info.get('priceToBook'),
|
34 |
"dividend_yield": info.get('dividendYield'),
|
35 |
"sector": info.get('sector'),
|
36 |
"industry": info.get('industry'),
|
37 |
"summary": info.get('longBusinessSummary'),
|
38 |
"website": info.get('website'),
|
39 |
+
# Get CEO info if available
|
40 |
+
"ceo": next((officer['name'] for officer in info.get('companyOfficers', []) if 'CEO' in officer.get('title', '')), "N/A"),
|
41 |
}
|
42 |
|
|
|
43 |
return {k: v for k, v in data.items() if v is not None}
|
backend/tools/download_model.py
CHANGED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sentence_transformers import CrossEncoder
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Define the model name and the directory to save it to
|
5 |
+
MODEL_NAME = 'cross-encoder/nli-roberta-base'
|
6 |
+
MODEL_PATH = './sentiment_model'
|
7 |
+
|
8 |
+
def main():
|
9 |
+
"""
|
10 |
+
Downloads the specified model from Hugging Face and saves it locally.
|
11 |
+
"""
|
12 |
+
print(f"Downloading model: {MODEL_NAME}")
|
13 |
+
|
14 |
+
# Check if the directory exists
|
15 |
+
if not os.path.exists(MODEL_PATH):
|
16 |
+
os.makedirs(MODEL_PATH)
|
17 |
+
|
18 |
+
# This command downloads the model and saves it to the specified path
|
19 |
+
model = CrossEncoder(MODEL_NAME)
|
20 |
+
model.save(MODEL_PATH)
|
21 |
+
|
22 |
+
print(f"Model downloaded and saved to {MODEL_PATH}")
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
main()
|
backend/tools/news_tools.py
CHANGED
@@ -1,86 +1,277 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
|
4 |
-
from transformers import
|
5 |
-
import
|
|
|
6 |
from typing import List, Dict, Any
|
|
|
7 |
|
8 |
-
#
|
9 |
-
|
10 |
-
|
|
|
|
|
11 |
MODEL_PATH = '/code/sentiment_model'
|
12 |
|
13 |
-
def
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
logits = sentiment_model(**inputs).logits
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
if model_labels:
|
37 |
-
# e.g., {0: 'positive', 1: 'neutral', 2: 'negative'}
|
38 |
-
sentiment_map = {int(k): v.capitalize() for k, v in model_labels.items()}
|
39 |
-
|
40 |
-
best_index = scores.index(max(scores))
|
41 |
-
return sentiment_map.get(best_index, "Unknown")
|
42 |
-
|
43 |
|
44 |
-
def
|
45 |
-
|
46 |
-
|
47 |
-
print(f"Fetching news for {company_name}...")
|
48 |
-
search_url = f"https://news.google.com/rss/search?q={company_name.replace(' ', '+')}+stock&hl=en-IN&gl=IN&ceid=IN:en"
|
49 |
-
news_source = newspaper.build(search_url, memoize_articles=False, language='en')
|
50 |
articles_data = []
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
"total_tweets": len(tweets),
|
83 |
-
"positive": counts['Positive'],
|
84 |
-
"negative": counts['Negative'],
|
85 |
-
"neutral": counts['Neutral']
|
86 |
-
}
|
|
|
1 |
+
import yfinance as yf
|
2 |
+
import requests
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
from transformers import pipeline
|
5 |
+
import time
|
6 |
+
import urllib.parse
|
7 |
from typing import List, Dict, Any
|
8 |
+
import logging
|
9 |
|
10 |
+
# Set up logging
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
# --- Model Loading ---
|
14 |
+
sentiment_pipeline = None
|
15 |
MODEL_PATH = '/code/sentiment_model'
|
16 |
|
17 |
+
def load_sentiment_pipeline():
|
18 |
+
global sentiment_pipeline
|
19 |
+
if sentiment_pipeline is None:
|
20 |
+
logger.info("Loading sentiment analysis pipeline...")
|
21 |
+
try:
|
22 |
+
# Try to load the custom model
|
23 |
+
sentiment_pipeline = pipeline('text-classification', model=MODEL_PATH, tokenizer=MODEL_PATH)
|
24 |
+
logger.info("Custom sentiment pipeline loaded.")
|
25 |
+
except Exception as e:
|
26 |
+
logger.warning(f"Could not load custom model ({e}), using default pipeline...")
|
27 |
+
try:
|
28 |
+
# Fallback to default sentiment analysis
|
29 |
+
sentiment_pipeline = pipeline('sentiment-analysis')
|
30 |
+
logger.info("Default sentiment pipeline loaded.")
|
31 |
+
except Exception as e2:
|
32 |
+
logger.error(f"Could not load any sentiment pipeline: {e2}")
|
33 |
+
# Create a dummy pipeline that always returns neutral
|
34 |
+
sentiment_pipeline = lambda texts, **kwargs: [{'label': 'NEUTRAL', 'score': 0.5} for _ in texts]
|
35 |
+
|
36 |
+
# --- Helper function for making web requests ---
|
37 |
+
def get_session():
|
38 |
+
session = requests.Session()
|
39 |
+
session.headers.update({
|
40 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
41 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
42 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
43 |
+
'Connection': 'keep-alive',
|
44 |
+
})
|
45 |
+
return session
|
46 |
|
47 |
+
# --- PRODUCTION NEWS SCRAPING TOOLS ---
|
48 |
+
def scrape_google_news(company_name: str) -> List[Dict[str, Any]]:
|
49 |
+
"""Scrape Google News - this is working perfectly based on your test"""
|
50 |
+
logger.info(f"Fetching Google News for {company_name}...")
|
51 |
+
articles_data = []
|
|
|
52 |
|
53 |
+
try:
|
54 |
+
session = get_session()
|
55 |
+
|
56 |
+
# Try multiple query variations (your test showed this works)
|
57 |
+
queries = [
|
58 |
+
f'"{company_name}" stock',
|
59 |
+
f'{company_name} share price',
|
60 |
+
company_name
|
61 |
+
]
|
62 |
+
|
63 |
+
for query in queries:
|
64 |
+
try:
|
65 |
+
encoded_query = urllib.parse.quote(query)
|
66 |
+
url = f"https://news.google.com/rss/search?q={encoded_query}&hl=en&gl=US&ceid=US:en"
|
67 |
+
|
68 |
+
response = session.get(url, timeout=15)
|
69 |
+
if response.status_code == 200:
|
70 |
+
soup = BeautifulSoup(response.content, 'xml')
|
71 |
+
items = soup.find_all('item')
|
72 |
+
|
73 |
+
for item in items[:10]: # Top 10 articles
|
74 |
+
title_elem = item.find('title')
|
75 |
+
if title_elem and title_elem.text:
|
76 |
+
articles_data.append({
|
77 |
+
"title": title_elem.text.strip(),
|
78 |
+
"url": item.find('link').text if item.find('link') else '',
|
79 |
+
"source": item.find('source').text if item.find('source') else 'Google News'
|
80 |
+
})
|
81 |
+
|
82 |
+
if articles_data:
|
83 |
+
break # Stop if we found articles
|
84 |
+
|
85 |
+
except Exception as e:
|
86 |
+
logger.error(f"Google News query '{query}' failed: {e}")
|
87 |
+
continue
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
logger.error(f"Google News scraping failed: {e}")
|
91 |
|
92 |
+
logger.info(f"-> Google News returned {len(articles_data)} articles.")
|
93 |
+
return articles_data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
+
def scrape_yahoo_finance_news(ticker: str) -> List[Dict[str, Any]]:
|
96 |
+
"""Yahoo Finance news scraper"""
|
97 |
+
logger.info(f"Fetching Yahoo Finance News for {ticker}...")
|
|
|
|
|
|
|
98 |
articles_data = []
|
99 |
+
|
100 |
+
try:
|
101 |
+
# Try yfinance first
|
102 |
+
stock = yf.Ticker(ticker)
|
103 |
+
news = stock.news
|
104 |
+
|
105 |
+
if news:
|
106 |
+
for article in news[:10]: # Top 10
|
107 |
+
if article.get('title'):
|
108 |
+
articles_data.append({
|
109 |
+
"title": article['title'].strip(),
|
110 |
+
"url": article.get('link', ''),
|
111 |
+
"source": article.get('publisher', 'Yahoo Finance'),
|
112 |
+
})
|
113 |
+
|
114 |
+
except Exception as e:
|
115 |
+
logger.error(f"Yahoo Finance scraping failed: {e}")
|
116 |
+
|
117 |
+
logger.info(f"-> Yahoo Finance returned {len(articles_data)} articles.")
|
118 |
+
return articles_data
|
119 |
|
120 |
+
def scrape_reddit_mentions(company_name: str) -> List[Dict[str, Any]]:
|
121 |
+
"""Reddit mentions scraper - working well based on your test"""
|
122 |
+
logger.info(f"Fetching Reddit mentions for {company_name}...")
|
123 |
+
mentions_data = []
|
124 |
+
|
125 |
+
try:
|
126 |
+
session = get_session()
|
127 |
+
subreddits = ['stocks', 'investing', 'IndiaInvestments', 'SecurityAnalysis', 'ValueInvesting']
|
128 |
+
|
129 |
+
for subreddit in subreddits:
|
130 |
+
try:
|
131 |
+
# Search queries that worked in your test
|
132 |
+
search_queries = [
|
133 |
+
f'"{company_name}"',
|
134 |
+
company_name.split()[0] if ' ' in company_name else company_name
|
135 |
+
]
|
136 |
+
|
137 |
+
for query in search_queries:
|
138 |
+
search_url = f"https://www.reddit.com/r/{subreddit}/search.json"
|
139 |
+
params = {
|
140 |
+
'q': query,
|
141 |
+
'sort': 'new',
|
142 |
+
'limit': 10,
|
143 |
+
'restrict_sr': 'true',
|
144 |
+
't': 'month'
|
145 |
+
}
|
146 |
+
|
147 |
+
response = session.get(search_url, params=params, timeout=10)
|
148 |
+
if response.status_code == 200:
|
149 |
+
data = response.json()
|
150 |
+
posts = data.get('data', {}).get('children', [])
|
151 |
+
|
152 |
+
for post in posts:
|
153 |
+
post_data = post.get('data', {})
|
154 |
+
if post_data.get('title'):
|
155 |
+
mentions_data.append({
|
156 |
+
"title": post_data['title'].strip(),
|
157 |
+
"url": f"https://reddit.com{post_data.get('permalink', '')}",
|
158 |
+
"source": f"r/{subreddit}"
|
159 |
+
})
|
160 |
+
|
161 |
+
if posts:
|
162 |
+
break # Found posts with this query
|
163 |
+
|
164 |
+
time.sleep(0.5) # Rate limiting
|
165 |
+
|
166 |
+
except Exception as e:
|
167 |
+
logger.error(f"Reddit r/{subreddit} failed: {e}")
|
168 |
+
|
169 |
+
time.sleep(1) # Rate limiting between subreddits
|
170 |
+
|
171 |
+
except Exception as e:
|
172 |
+
logger.error(f"Reddit scraping failed: {e}")
|
173 |
+
|
174 |
+
logger.info(f"-> Reddit returned {len(mentions_data)} mentions.")
|
175 |
+
return mentions_data
|
176 |
|
177 |
+
# --- THE MAIN TOOL FUNCTION ---
|
178 |
+
def get_combined_news_and_sentiment(ticker: str, company_name: str) -> Dict[str, Any]:
|
179 |
+
"""Main function that combines all news sources and analyzes sentiment"""
|
180 |
+
logger.info(f"Starting news analysis for {ticker} ({company_name})")
|
181 |
+
|
182 |
+
# Load sentiment pipeline
|
183 |
+
load_sentiment_pipeline()
|
184 |
+
|
185 |
+
all_sources = []
|
186 |
+
|
187 |
+
# Collect from all sources (based on your successful test)
|
188 |
+
try:
|
189 |
+
google_articles = scrape_google_news(company_name)
|
190 |
+
all_sources.extend(google_articles)
|
191 |
+
except Exception as e:
|
192 |
+
logger.error(f"Google News failed: {e}")
|
193 |
+
|
194 |
+
try:
|
195 |
+
yahoo_articles = scrape_yahoo_finance_news(ticker)
|
196 |
+
all_sources.extend(yahoo_articles)
|
197 |
+
except Exception as e:
|
198 |
+
logger.error(f"Yahoo Finance failed: {e}")
|
199 |
+
|
200 |
+
try:
|
201 |
+
reddit_mentions = scrape_reddit_mentions(company_name)
|
202 |
+
all_sources.extend(reddit_mentions)
|
203 |
+
except Exception as e:
|
204 |
+
logger.error(f"Reddit failed: {e}")
|
205 |
|
206 |
+
logger.info(f"Total items collected from all sources: {len(all_sources)}")
|
207 |
+
|
208 |
+
if not all_sources:
|
209 |
+
return {
|
210 |
+
"articles": [],
|
211 |
+
"sentiment_summary": {
|
212 |
+
"total_items": 0,
|
213 |
+
"positive": 0,
|
214 |
+
"negative": 0,
|
215 |
+
"neutral": 0,
|
216 |
+
"error": "Could not fetch any news from any source."
|
217 |
+
}
|
218 |
+
}
|
219 |
+
|
220 |
+
# Perform sentiment analysis
|
221 |
+
try:
|
222 |
+
titles = [item['title'] for item in all_sources if item.get('title')]
|
223 |
+
results = sentiment_pipeline(titles, truncation=True, max_length=512)
|
224 |
|
225 |
+
# Map sentiment results back to articles
|
226 |
+
for i, item in enumerate(all_sources):
|
227 |
+
if i < len(results):
|
228 |
+
label = results[i]['label']
|
229 |
+
|
230 |
+
# Normalize different label formats
|
231 |
+
if label.upper() in ['POSITIVE', 'POS', 'LABEL_2']:
|
232 |
+
sentiment = 'Positive'
|
233 |
+
elif label.upper() in ['NEGATIVE', 'NEG', 'LABEL_0']:
|
234 |
+
sentiment = 'Negative'
|
235 |
+
else:
|
236 |
+
sentiment = 'Neutral'
|
237 |
+
|
238 |
+
item['sentiment'] = sentiment
|
239 |
+
item['sentiment_score'] = round(results[i]['score'], 2)
|
240 |
+
else:
|
241 |
+
# Fallback: simple keyword-based sentiment
|
242 |
+
title_lower = item['title'].lower()
|
243 |
+
if any(word in title_lower for word in ['gain', 'rise', 'growth', 'profit', 'strong', 'bullish']):
|
244 |
+
item['sentiment'] = 'Positive'
|
245 |
+
item['sentiment_score'] = 0.7
|
246 |
+
elif any(word in title_lower for word in ['fall', 'decline', 'loss', 'weak', 'bearish', 'drop']):
|
247 |
+
item['sentiment'] = 'Negative'
|
248 |
+
item['sentiment_score'] = 0.7
|
249 |
+
else:
|
250 |
+
item['sentiment'] = 'Neutral'
|
251 |
+
item['sentiment_score'] = 0.5
|
252 |
|
253 |
+
# Count sentiments
|
254 |
+
counts = {'Positive': 0, 'Negative': 0, 'Neutral': 0}
|
255 |
+
for item in all_sources:
|
256 |
+
counts[item.get('sentiment', 'Neutral')] += 1
|
257 |
+
|
258 |
+
except Exception as e:
|
259 |
+
logger.error(f"Sentiment analysis failed: {e}")
|
260 |
+
# Fallback to neutral sentiment for all articles
|
261 |
+
for item in all_sources:
|
262 |
+
item['sentiment'] = 'Neutral'
|
263 |
+
item['sentiment_score'] = 0.5
|
264 |
+
counts = {'Positive': 0, 'Negative': 0, 'Neutral': len(all_sources)}
|
265 |
+
|
266 |
+
result = {
|
267 |
+
"articles": all_sources,
|
268 |
+
"sentiment_summary": {
|
269 |
+
"total_items": len(all_sources),
|
270 |
+
"positive": counts['Positive'],
|
271 |
+
"negative": counts['Negative'],
|
272 |
+
"neutral": counts['Neutral']
|
273 |
+
}
|
274 |
+
}
|
275 |
|
276 |
+
logger.info(f"News analysis completed: {len(all_sources)} articles, {counts}")
|
277 |
+
return result
|
|
|
|
|
|
|
|
|
|
backend/tools/prediction_tools.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yfinance as yf
|
2 |
+
from prophet import Prophet
|
3 |
+
import pandas as pd
|
4 |
+
from typing import Dict, Any
|
5 |
+
|
6 |
+
def generate_forecast(ticker: str) -> Dict[str, Any]:
|
7 |
+
print(f"Generating forecast for ticker {ticker}...")
|
8 |
+
|
9 |
+
stock_data = yf.download(ticker, period="2y", progress=False)
|
10 |
+
|
11 |
+
if stock_data.empty:
|
12 |
+
return {"error": f"Could not download historical data for {ticker}."}
|
13 |
+
|
14 |
+
# --- THE FINAL, MOST ROBUST FIX FOR THE DATAFRAME ---
|
15 |
+
# 1. Create a new DataFrame with only the columns we need.
|
16 |
+
df_prophet = stock_data[['Close']].copy()
|
17 |
+
# 2. Reset the index to turn the 'Date' index into a column.
|
18 |
+
df_prophet.reset_index(inplace=True)
|
19 |
+
# 3. Rename the columns to what Prophet expects.
|
20 |
+
df_prophet.columns = ['ds', 'y']
|
21 |
+
# --- END OF FIX ---
|
22 |
+
|
23 |
+
model = Prophet(
|
24 |
+
daily_seasonality=False,
|
25 |
+
weekly_seasonality=True,
|
26 |
+
yearly_seasonality=True,
|
27 |
+
changepoint_prior_scale=0.05
|
28 |
+
)
|
29 |
+
model.fit(df_prophet)
|
30 |
+
|
31 |
+
future = model.make_future_dataframe(periods=30)
|
32 |
+
forecast = model.predict(future)
|
33 |
+
|
34 |
+
current_price = df_prophet['y'].iloc[-1]
|
35 |
+
predicted_price_30_days = forecast['yhat'].iloc[-1]
|
36 |
+
trend = "upward" if predicted_price_30_days > current_price else "downward"
|
37 |
+
change_percent = ((predicted_price_30_days - current_price) / current_price) * 100
|
38 |
+
|
39 |
+
forecast_data = {
|
40 |
+
"summary": (
|
41 |
+
f"The model predicts a {trend} trend over the next 30 days. "
|
42 |
+
f"Current price: {current_price:.2f}, "
|
43 |
+
f"predicted price in 30 days: {predicted_price_30_days:.2f} "
|
44 |
+
f"({change_percent:+.2f}% change)."
|
45 |
+
),
|
46 |
+
# Convert datetime objects to strings for JSON compatibility
|
47 |
+
"history_plot_data": [
|
48 |
+
{'ds': r['ds'].isoformat(), 'y': r['y']} for r in df_prophet.tail(90).to_dict('records')
|
49 |
+
],
|
50 |
+
"forecast_plot_data": [
|
51 |
+
{
|
52 |
+
'ds': r['ds'].isoformat(),
|
53 |
+
'yhat': r['yhat'],
|
54 |
+
'yhat_lower': r['yhat_lower'],
|
55 |
+
'yhat_upper': r['yhat_upper']
|
56 |
+
} for r in forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(120).to_dict('records')
|
57 |
+
]
|
58 |
+
}
|
59 |
+
|
60 |
+
print(f"Forecast for {ticker} generated successfully.")
|
61 |
+
return forecast_data
|
docker-compose.yml
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
services:
|
|
|
2 |
redis:
|
3 |
image: redis:7-alpine
|
4 |
ports:
|
5 |
- "6379:6379"
|
|
|
6 |
|
7 |
backend:
|
8 |
build:
|
9 |
-
context: .
|
10 |
dockerfile: ./backend/Dockerfile
|
11 |
ports:
|
12 |
- "8000:8000"
|
@@ -14,19 +16,21 @@ services:
|
|
14 |
- ./backend:/code/app
|
15 |
env_file:
|
16 |
- .env
|
17 |
-
command: python -m uvicorn main:app --host 0.0.0.0 --port 8000
|
|
|
18 |
depends_on:
|
19 |
- redis
|
20 |
|
21 |
worker:
|
22 |
build:
|
23 |
-
context: .
|
24 |
dockerfile: ./backend/Dockerfile
|
25 |
volumes:
|
26 |
- ./backend:/code/app
|
27 |
env_file:
|
28 |
- .env
|
29 |
command: python -m celery -A celery_worker.celery worker --loglevel=info
|
|
|
30 |
depends_on:
|
31 |
- redis
|
32 |
- backend
|
@@ -40,5 +44,9 @@ services:
|
|
40 |
volumes:
|
41 |
- ./frontend:/app
|
42 |
- /app/node_modules
|
|
|
43 |
depends_on:
|
44 |
-
- backend
|
|
|
|
|
|
|
|
1 |
services:
|
2 |
+
# --- Application Services ---
|
3 |
redis:
|
4 |
image: redis:7-alpine
|
5 |
ports:
|
6 |
- "6379:6379"
|
7 |
+
restart: always
|
8 |
|
9 |
backend:
|
10 |
build:
|
11 |
+
context: .
|
12 |
dockerfile: ./backend/Dockerfile
|
13 |
ports:
|
14 |
- "8000:8000"
|
|
|
16 |
- ./backend:/code/app
|
17 |
env_file:
|
18 |
- .env
|
19 |
+
command: python -m uvicorn main:app --host 0.0.0.0 --port 8000 --reload
|
20 |
+
restart: always
|
21 |
depends_on:
|
22 |
- redis
|
23 |
|
24 |
worker:
|
25 |
build:
|
26 |
+
context: .
|
27 |
dockerfile: ./backend/Dockerfile
|
28 |
volumes:
|
29 |
- ./backend:/code/app
|
30 |
env_file:
|
31 |
- .env
|
32 |
command: python -m celery -A celery_worker.celery worker --loglevel=info
|
33 |
+
restart: always
|
34 |
depends_on:
|
35 |
- redis
|
36 |
- backend
|
|
|
44 |
volumes:
|
45 |
- ./frontend:/app
|
46 |
- /app/node_modules
|
47 |
+
restart: always
|
48 |
depends_on:
|
49 |
+
- backend
|
50 |
+
|
51 |
+
# No other services or volumes are needed.
|
52 |
+
# The 'mlflow' service and its 'mlflow_server_data' volume have been completely removed.
|
frontend/package-lock.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
frontend/package.json
CHANGED
@@ -10,18 +10,27 @@
|
|
10 |
"preview": "vite preview"
|
11 |
},
|
12 |
"dependencies": {
|
|
|
|
|
13 |
"react": "^19.1.1",
|
14 |
-
"react-dom": "^19.1.1"
|
|
|
|
|
15 |
},
|
16 |
"devDependencies": {
|
17 |
"@eslint/js": "^9.33.0",
|
|
|
|
|
18 |
"@types/react": "^19.1.10",
|
19 |
"@types/react-dom": "^19.1.7",
|
20 |
"@vitejs/plugin-react": "^5.0.0",
|
|
|
21 |
"eslint": "^9.33.0",
|
22 |
"eslint-plugin-react-hooks": "^5.2.0",
|
23 |
"eslint-plugin-react-refresh": "^0.4.20",
|
24 |
"globals": "^16.3.0",
|
|
|
|
|
25 |
"vite": "^7.1.2"
|
26 |
}
|
27 |
}
|
|
|
10 |
"preview": "vite preview"
|
11 |
},
|
12 |
"dependencies": {
|
13 |
+
"axios": "^1.11.0",
|
14 |
+
"lucide-react": "^0.542.0",
|
15 |
"react": "^19.1.1",
|
16 |
+
"react-dom": "^19.1.1",
|
17 |
+
"react-markdown": "^10.1.0",
|
18 |
+
"recharts": "^3.1.2"
|
19 |
},
|
20 |
"devDependencies": {
|
21 |
"@eslint/js": "^9.33.0",
|
22 |
+
"@tailwindcss/line-clamp": "^0.4.4",
|
23 |
+
"@tailwindcss/typography": "^0.5.16",
|
24 |
"@types/react": "^19.1.10",
|
25 |
"@types/react-dom": "^19.1.7",
|
26 |
"@vitejs/plugin-react": "^5.0.0",
|
27 |
+
"autoprefixer": "^10.4.21",
|
28 |
"eslint": "^9.33.0",
|
29 |
"eslint-plugin-react-hooks": "^5.2.0",
|
30 |
"eslint-plugin-react-refresh": "^0.4.20",
|
31 |
"globals": "^16.3.0",
|
32 |
+
"postcss": "^8.5.6",
|
33 |
+
"tailwindcss": "^3.4.0",
|
34 |
"vite": "^7.1.2"
|
35 |
}
|
36 |
}
|
frontend/postcss.config.js
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export default {
|
2 |
+
plugins: {
|
3 |
+
tailwindcss: {},
|
4 |
+
autoprefixer: {},
|
5 |
+
},
|
6 |
+
}
|
frontend/src/App.jsx
CHANGED
@@ -1,35 +1,113 @@
|
|
1 |
-
import { useState } from 'react'
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import './
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
function App() {
|
7 |
-
const [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
return (
|
10 |
-
|
11 |
-
<
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
<
|
16 |
-
<
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
}
|
34 |
|
35 |
-
export default App
|
|
|
1 |
+
import React, { useState, useEffect } from 'react';
|
2 |
+
import Header from './components/Header';
|
3 |
+
import JobForm from './components/JobForm';
|
4 |
+
import JobStatusCard from './components/JobStatusCard';
|
5 |
+
import ResultsDisplay from './components/ResultsDisplay';
|
6 |
+
import LoadingSkeleton from './components/LoadingSkeleton';
|
7 |
+
import HistoryPanel from './components/HistoryPanel';
|
8 |
+
import { createJob, getJob } from './services/api';
|
9 |
+
import { XCircle } from 'lucide-react';
|
10 |
|
11 |
function App() {
|
12 |
+
const [job, setJob] = useState(null);
|
13 |
+
const [isLoading, setIsLoading] = useState(false);
|
14 |
+
const [isPolling, setIsPolling] = useState(false);
|
15 |
+
const [error, setError] = useState(null);
|
16 |
+
|
17 |
+
const handleAnalysisRequest = async (ticker) => {
|
18 |
+
setIsLoading(true);
|
19 |
+
setIsPolling(true);
|
20 |
+
setError(null);
|
21 |
+
setJob(null);
|
22 |
+
try {
|
23 |
+
const response = await createJob(ticker);
|
24 |
+
setJob(response.data);
|
25 |
+
} catch (err) {
|
26 |
+
setError('Failed to create job. Please check the API server and try again.');
|
27 |
+
setIsLoading(false);
|
28 |
+
setIsPolling(false);
|
29 |
+
}
|
30 |
+
};
|
31 |
+
|
32 |
+
const handleSelectHistoryJob = (historyJob) => {
|
33 |
+
setIsLoading(false);
|
34 |
+
setIsPolling(false);
|
35 |
+
setError(null);
|
36 |
+
setJob(historyJob);
|
37 |
+
}
|
38 |
+
|
39 |
+
useEffect(() => {
|
40 |
+
if (!job?.id || !isPolling) return;
|
41 |
+
|
42 |
+
if (job.status !== 'PENDING') {
|
43 |
+
setIsLoading(false);
|
44 |
+
}
|
45 |
+
|
46 |
+
const intervalId = setInterval(async () => {
|
47 |
+
try {
|
48 |
+
const response = await getJob(job.id);
|
49 |
+
const updatedJob = response.data;
|
50 |
+
setJob(updatedJob);
|
51 |
+
|
52 |
+
if (updatedJob.status === 'SUCCESS' || updatedJob.status === 'FAILED') {
|
53 |
+
clearInterval(intervalId);
|
54 |
+
setIsPolling(false);
|
55 |
+
}
|
56 |
+
} catch (err) {
|
57 |
+
setError('Failed to poll job status.');
|
58 |
+
clearInterval(intervalId);
|
59 |
+
setIsPolling(false);
|
60 |
+
}
|
61 |
+
}, 3000);
|
62 |
+
|
63 |
+
return () => clearInterval(intervalId);
|
64 |
+
}, [job, isPolling]);
|
65 |
|
66 |
return (
|
67 |
+
<div className="min-h-screen bg-gray-900 text-white font-sans">
|
68 |
+
<Header />
|
69 |
+
<HistoryPanel onSelectJob={handleSelectHistoryJob} />
|
70 |
+
|
71 |
+
<main className="container mx-auto p-4 md:p-8">
|
72 |
+
<div className="max-w-4xl mx-auto">
|
73 |
+
<p className="text-lg text-gray-400 mb-8 text-center">
|
74 |
+
Enter an Indian stock ticker to receive a comprehensive, AI-powered analysis.
|
75 |
+
</p>
|
76 |
+
|
77 |
+
<JobForm onAnalyze={handleAnalysisRequest} isLoading={isLoading || isPolling} />
|
78 |
+
|
79 |
+
{error && <div className="my-6 p-4 bg-red-900/50 rounded-lg text-red-300 text-center">{error}</div>}
|
80 |
+
|
81 |
+
{isLoading && !job && <LoadingSkeleton />}
|
82 |
+
|
83 |
+
{job && !isLoading && <JobStatusCard job={job} />}
|
84 |
+
|
85 |
+
{job?.status === 'SUCCESS' && job.result && (
|
86 |
+
<ResultsDisplay result={job.result} />
|
87 |
+
)}
|
88 |
+
|
89 |
+
{job?.status === 'FAILED' && job.result?.error && (
|
90 |
+
<div className="mt-8 p-6 bg-gray-800/30 border border-red-500/30 rounded-lg text-center animate-fade-in">
|
91 |
+
<XCircle className="w-16 h-16 text-red-400 mx-auto mb-4" />
|
92 |
+
<h2 className="text-2xl font-bold text-red-300 mb-2">Analysis Failed</h2>
|
93 |
+
<p className="text-gray-400 max-w-lg mx-auto">
|
94 |
+
We couldn't complete the analysis for <strong className="text-white">{job.ticker}</strong>.
|
95 |
+
This usually means the stock symbol is incorrect or not listed.
|
96 |
+
</p>
|
97 |
+
<p className="text-xs text-gray-500 mt-4">Please double-check the ticker (e.g., RELIANCE.NS) and try again.</p>
|
98 |
+
|
99 |
+
<details className="mt-6 text-left w-full max-w-lg mx-auto">
|
100 |
+
<summary className="cursor-pointer text-xs text-gray-500 hover:text-gray-400 focus:outline-none">Show technical details</summary>
|
101 |
+
<pre className="mt-2 bg-gray-900 p-4 rounded-md text-gray-400 text-xs whitespace-pre-wrap font-mono">
|
102 |
+
{job.result.error}
|
103 |
+
</pre>
|
104 |
+
</details>
|
105 |
+
</div>
|
106 |
+
)}
|
107 |
+
</div>
|
108 |
+
</main>
|
109 |
+
</div>
|
110 |
+
);
|
111 |
}
|
112 |
|
113 |
+
export default App;
|
frontend/src/components/Header.jsx
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React from 'react';
|
2 |
+
import { Bot } from 'lucide-react'; // Using a cool icon
|
3 |
+
|
4 |
+
function Header() {
|
5 |
+
return (
|
6 |
+
<header className="py-6 border-b border-gray-700">
|
7 |
+
<div className="container mx-auto flex items-center justify-center space-x-3">
|
8 |
+
<Bot className="w-8 h-8 text-green-400" />
|
9 |
+
<h1 className="text-3xl font-bold tracking-tight text-gray-100">
|
10 |
+
Quantitative Analysis Platform
|
11 |
+
</h1>
|
12 |
+
</div>
|
13 |
+
</header>
|
14 |
+
);
|
15 |
+
}
|
16 |
+
|
17 |
+
export default Header;
|
frontend/src/components/HistoricalChart.jsx
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React, { useState, useEffect } from 'react';
|
2 |
+
import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer } from 'recharts';
|
3 |
+
import axios from 'axios';
|
4 |
+
|
5 |
+
// A more reliable public CORS proxy
|
6 |
+
const PROXY_URL = 'https://api.allorigins.win/raw?url=';
|
7 |
+
|
8 |
+
const fetchHistoricalData = async (ticker) => {
|
9 |
+
const yahooUrl = `https://query1.finance.yahoo.com/v8/finance/chart/${ticker}?range=100d&interval=1d`;
|
10 |
+
const encodedUrl = encodeURIComponent(yahooUrl);
|
11 |
+
|
12 |
+
try {
|
13 |
+
const response = await axios.get(PROXY_URL + encodedUrl);
|
14 |
+
const data = response.data.chart.result[0];
|
15 |
+
const timestamps = data.timestamp;
|
16 |
+
const prices = data.indicators.quote[0].close;
|
17 |
+
|
18 |
+
// Filter out any null price points which can crash the chart
|
19 |
+
return timestamps
|
20 |
+
.map((ts, i) => ({
|
21 |
+
date: new Date(ts * 1000).toLocaleDateString('en-IN', {day: 'numeric', month: 'short'}),
|
22 |
+
price: prices[i] ? prices[i].toFixed(2) : null,
|
23 |
+
}))
|
24 |
+
.filter(point => point.price !== null);
|
25 |
+
|
26 |
+
} catch (error) {
|
27 |
+
console.error("Failed to fetch historical data for chart:", error);
|
28 |
+
return [];
|
29 |
+
}
|
30 |
+
};
|
31 |
+
|
32 |
+
function HistoricalChart({ ticker }) {
|
33 |
+
const [data, setData] = useState([]);
|
34 |
+
const [loading, setLoading] = useState(true);
|
35 |
+
|
36 |
+
useEffect(() => {
|
37 |
+
if (ticker) {
|
38 |
+
setLoading(true);
|
39 |
+
fetchHistoricalData(ticker).then(chartData => {
|
40 |
+
setData(chartData);
|
41 |
+
setLoading(false);
|
42 |
+
});
|
43 |
+
}
|
44 |
+
}, [ticker]);
|
45 |
+
|
46 |
+
if (loading) return <div className="h-[300px] flex items-center justify-center"><p className="text-gray-400">Loading Chart Data...</p></div>;
|
47 |
+
if (data.length === 0) return <div className="h-[300px] flex items-center justify-center"><p className="text-gray-400">Could not load chart data.</p></div>;
|
48 |
+
|
49 |
+
return (
|
50 |
+
<div style={{ width: '100%', height: 300 }}>
|
51 |
+
<ResponsiveContainer>
|
52 |
+
<LineChart data={data} margin={{ top: 5, right: 10, left: -20, bottom: 5 }}>
|
53 |
+
<CartesianGrid strokeDasharray="3 3" stroke="rgba(255, 255, 255, 0.1)" />
|
54 |
+
<XAxis dataKey="date" stroke="#9CA3AF" tick={{ fontSize: 12 }} />
|
55 |
+
<YAxis stroke="#9CA3AF" tick={{ fontSize: 12 }} domain={['auto', 'auto']} />
|
56 |
+
<Tooltip
|
57 |
+
contentStyle={{ backgroundColor: '#1F2937', border: '1px solid #374151' }}
|
58 |
+
labelStyle={{ color: '#F3F4F6' }}
|
59 |
+
/>
|
60 |
+
<Line type="monotone" dataKey="price" stroke="#34D399" strokeWidth={2} name="Close Price" dot={false} />
|
61 |
+
</LineChart>
|
62 |
+
</ResponsiveContainer>
|
63 |
+
</div>
|
64 |
+
);
|
65 |
+
}
|
66 |
+
|
67 |
+
export default HistoricalChart;
|
frontend/src/components/HistoryPanel.jsx
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React, { useState, useEffect } from 'react';
|
2 |
+
import { getJobsHistory } from '../services/api';
|
3 |
+
import { History, LoaderCircle } from 'lucide-react';
|
4 |
+
|
5 |
+
function HistoryPanel({ onSelectJob }) {
|
6 |
+
const [history, setHistory] = useState([]);
|
7 |
+
const [isLoading, setIsLoading] = useState(false);
|
8 |
+
const [isOpen, setIsOpen] = useState(false);
|
9 |
+
|
10 |
+
const fetchHistory = () => {
|
11 |
+
setIsLoading(true);
|
12 |
+
getJobsHistory()
|
13 |
+
.then(response => {
|
14 |
+
// Filter for only completed jobs to make the list cleaner
|
15 |
+
setHistory(response.data.filter(job => job.status === 'SUCCESS' || job.status === 'FAILED'));
|
16 |
+
})
|
17 |
+
.catch(error => console.error("Failed to fetch history:", error))
|
18 |
+
.finally(() => setIsLoading(false));
|
19 |
+
};
|
20 |
+
|
21 |
+
// When the panel opens, fetch the history
|
22 |
+
const togglePanel = () => {
|
23 |
+
const newIsOpen = !isOpen;
|
24 |
+
setIsOpen(newIsOpen);
|
25 |
+
if (newIsOpen) {
|
26 |
+
fetchHistory();
|
27 |
+
}
|
28 |
+
};
|
29 |
+
|
30 |
+
const handleSelect = (job) => {
|
31 |
+
onSelectJob(job);
|
32 |
+
setIsOpen(false);
|
33 |
+
}
|
34 |
+
|
35 |
+
return (
|
36 |
+
<>
|
37 |
+
<button
|
38 |
+
onClick={togglePanel}
|
39 |
+
className="fixed bottom-6 right-6 bg-green-600 hover:bg-green-700 text-white p-4 rounded-full shadow-lg z-50 transition-transform hover:scale-110 focus:outline-none focus:ring-2 focus:ring-green-400"
|
40 |
+
aria-label="Toggle analysis history"
|
41 |
+
>
|
42 |
+
<History className="w-8 h-8" />
|
43 |
+
</button>
|
44 |
+
|
45 |
+
{/* Overlay to close panel when clicking outside */}
|
46 |
+
{isOpen && <div onClick={() => setIsOpen(false)} className="fixed inset-0 bg-black/50 z-30 transition-opacity"></div>}
|
47 |
+
|
48 |
+
<div className={`fixed top-0 right-0 h-full bg-gray-900 border-l border-gray-700 shadow-2xl z-40 transition-transform duration-500 ease-in-out ${isOpen ? 'translate-x-0' : 'translate-x-full'} w-full md:w-96`}>
|
49 |
+
<div className="p-4 border-b border-gray-700 flex justify-between items-center">
|
50 |
+
<h2 className="text-xl font-bold">Analysis History</h2>
|
51 |
+
<button onClick={() => setIsOpen(false)} className="text-gray-400 hover:text-white text-3xl">×</button>
|
52 |
+
</div>
|
53 |
+
<div className="p-4 overflow-y-auto h-[calc(100%-4rem)]">
|
54 |
+
{isLoading ? (
|
55 |
+
<div className="flex justify-center items-center h-full pt-20">
|
56 |
+
<LoaderCircle className="w-8 h-8 animate-spin text-green-400" />
|
57 |
+
</div>
|
58 |
+
) : history.length === 0 ? (
|
59 |
+
<div className="text-center pt-20">
|
60 |
+
<p className="text-gray-500">No past analyses found.</p>
|
61 |
+
<p className="text-xs text-gray-600 mt-2">Complete an analysis to see it here.</p>
|
62 |
+
</div>
|
63 |
+
) : (
|
64 |
+
<ul className="space-y-3">
|
65 |
+
{history.map(job => (
|
66 |
+
<li
|
67 |
+
key={job.id}
|
68 |
+
onClick={() => handleSelect(job)}
|
69 |
+
className="p-3 bg-gray-800 rounded-md cursor-pointer hover:bg-green-800/50 border border-transparent hover:border-green-400/50 transition-all"
|
70 |
+
>
|
71 |
+
<p className="font-bold text-white">{job.ticker}</p>
|
72 |
+
<div className="flex justify-between text-xs text-gray-400 mt-1">
|
73 |
+
<span>{new Date(job.created_at).toLocaleString('en-IN', { dateStyle: 'medium', timeStyle: 'short' })}</span>
|
74 |
+
<span className={`font-semibold px-2 py-0.5 rounded-full ${job.status === 'SUCCESS' ? 'bg-green-900/50 text-green-300' : 'bg-red-900/50 text-red-300'}`}>{job.status}</span>
|
75 |
+
</div>
|
76 |
+
</li>
|
77 |
+
))}
|
78 |
+
</ul>
|
79 |
+
)}
|
80 |
+
</div>
|
81 |
+
</div>
|
82 |
+
</>
|
83 |
+
);
|
84 |
+
}
|
85 |
+
|
86 |
+
export default HistoryPanel;
|
frontend/src/components/JobForm.jsx
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React, { useState } from 'react';
|
2 |
+
import { Search, LoaderCircle } from 'lucide-react';
|
3 |
+
|
4 |
+
// The component now receives 'onAnalyze' and 'isLoading' as props
|
5 |
+
function JobForm({ onAnalyze, isLoading }) {
|
6 |
+
const [ticker, setTicker] = useState('');
|
7 |
+
|
8 |
+
const handleSubmit = (e) => {
|
9 |
+
e.preventDefault();
|
10 |
+
if (!ticker.trim() || isLoading) return;
|
11 |
+
onAnalyze(ticker); // Call the function passed down from App.jsx
|
12 |
+
};
|
13 |
+
|
14 |
+
return (
|
15 |
+
<form onSubmit={handleSubmit} className="mb-12">
|
16 |
+
<div className="flex items-center bg-gray-800 border-2 border-gray-600 rounded-lg overflow-hidden focus-within:border-green-400 transition-colors duration-300">
|
17 |
+
<span className="pl-4 text-gray-400">
|
18 |
+
<Search className="w-6 h-6" />
|
19 |
+
</span>
|
20 |
+
<input
|
21 |
+
type="text"
|
22 |
+
value={ticker}
|
23 |
+
onChange={(e) => setTicker(e.target.value.toUpperCase())}
|
24 |
+
placeholder="e.g., RELIANCE.NS"
|
25 |
+
className="w-full p-4 bg-transparent text-lg text-gray-200 placeholder-gray-500 focus:outline-none"
|
26 |
+
disabled={isLoading}
|
27 |
+
/>
|
28 |
+
<button
|
29 |
+
type="submit"
|
30 |
+
className="bg-green-600 hover:bg-green-700 text-white font-bold py-4 px-6 transition-colors duration-300 disabled:bg-gray-500 disabled:cursor-not-allowed"
|
31 |
+
disabled={isLoading}
|
32 |
+
>
|
33 |
+
{isLoading ? (
|
34 |
+
<LoaderCircle className="animate-spin w-6 h-6" />
|
35 |
+
) : (
|
36 |
+
'Analyze'
|
37 |
+
)}
|
38 |
+
</button>
|
39 |
+
</div>
|
40 |
+
</form>
|
41 |
+
);
|
42 |
+
}
|
43 |
+
|
44 |
+
export default JobForm;
|
frontend/src/components/JobStatusCard.jsx
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React from 'react';
|
2 |
+
import { LoaderCircle, CheckCircle2, XCircle, FileClock, Database, Search, Bot } from 'lucide-react';
|
3 |
+
|
4 |
+
function JobStatusCard({ job }) {
|
5 |
+
const getStatusInfo = (status) => {
|
6 |
+
// This map now perfectly matches the statuses set in your main_task.py
|
7 |
+
const statusMap = {
|
8 |
+
'PENDING': {
|
9 |
+
icon: <FileClock className="w-8 h-8 text-yellow-400" />,
|
10 |
+
text: 'Your analysis is in the queue.',
|
11 |
+
bgColor: 'bg-yellow-900/50 border-yellow-400/30',
|
12 |
+
},
|
13 |
+
'DATA_FETCHING': {
|
14 |
+
icon: <Database className="w-8 h-8 text-blue-400 animate-pulse" />,
|
15 |
+
text: 'Agent 1: Data Agent is fetching fundamentals...',
|
16 |
+
bgColor: 'bg-blue-900/50 border-blue-400/30',
|
17 |
+
},
|
18 |
+
'INTELLIGENCE_GATHERING': {
|
19 |
+
icon: <Search className="w-8 h-8 text-blue-400 animate-pulse" />,
|
20 |
+
text: 'Agent 2: Intelligence Agent is scanning news...',
|
21 |
+
bgColor: 'bg-blue-900/50 border-blue-400/30',
|
22 |
+
},
|
23 |
+
'ANALYZING': {
|
24 |
+
icon: <Bot className="w-8 h-8 text-blue-400 animate-spin" />,
|
25 |
+
text: 'Agent 3: Analyst Agent is generating a report with Gemini...',
|
26 |
+
bgColor: 'bg-blue-900/50 border-blue-400/30',
|
27 |
+
},
|
28 |
+
'SUCCESS': {
|
29 |
+
icon: <CheckCircle2 className="w-8 h-8 text-green-400" />,
|
30 |
+
text: 'All agents have completed their tasks!',
|
31 |
+
bgColor: 'bg-green-900/50 border-green-400/30',
|
32 |
+
},
|
33 |
+
'FAILED': {
|
34 |
+
icon: <XCircle className="w-8 h-8 text-red-400" />,
|
35 |
+
text: `Analysis failed. See error below.`,
|
36 |
+
bgColor: 'bg-red-900/50 border-red-400/30',
|
37 |
+
}
|
38 |
+
};
|
39 |
+
|
40 |
+
return statusMap[status] || {
|
41 |
+
icon: <FileClock className="w-8 h-8 text-gray-400" />,
|
42 |
+
text: 'Waiting for status...',
|
43 |
+
bgColor: 'bg-gray-800 border-gray-600',
|
44 |
+
};
|
45 |
+
};
|
46 |
+
|
47 |
+
const statusInfo = getStatusInfo(job.status);
|
48 |
+
|
49 |
+
return (
|
50 |
+
<div className="my-8">
|
51 |
+
<div className={`p-6 rounded-lg border flex items-center space-x-4 transition-all duration-500 ${statusInfo.bgColor}`}>
|
52 |
+
<div className="flex-shrink-0">
|
53 |
+
{statusInfo.icon}
|
54 |
+
</div>
|
55 |
+
<div>
|
56 |
+
<p className="font-bold text-lg text-gray-200">
|
57 |
+
Analysis for {job.ticker}
|
58 |
+
</p>
|
59 |
+
<p className="text-gray-400">{statusInfo.text}</p>
|
60 |
+
</div>
|
61 |
+
</div>
|
62 |
+
</div>
|
63 |
+
);
|
64 |
+
}
|
65 |
+
|
66 |
+
export default JobStatusCard;
|
frontend/src/components/LoadingSkeleton.jsx
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React from 'react';
|
2 |
+
|
3 |
+
// A simple helper for the pulsing effect
|
4 |
+
const SkeletonBlock = ({ className }) => (
|
5 |
+
<div className={`bg-gray-700 rounded-md animate-pulse ${className}`} />
|
6 |
+
);
|
7 |
+
|
8 |
+
function LoadingSkeleton() {
|
9 |
+
return (
|
10 |
+
<div className="mt-8 p-6 bg-gray-800/50 border border-gray-700 rounded-lg">
|
11 |
+
{/* Header Skeleton */}
|
12 |
+
<div className="mb-6 pb-6 border-b border-gray-700">
|
13 |
+
<SkeletonBlock className="h-12 w-3/4 mb-3" />
|
14 |
+
<SkeletonBlock className="h-6 w-1/2" />
|
15 |
+
</div>
|
16 |
+
|
17 |
+
{/* Key Metrics Skeleton */}
|
18 |
+
<div className="grid grid-cols-2 md:grid-cols-5 gap-4 mb-10">
|
19 |
+
<SkeletonBlock className="h-20" />
|
20 |
+
<SkeletonBlock className="h-20" />
|
21 |
+
<SkeletonBlock className="h-20" />
|
22 |
+
<SkeletonBlock className="h-20" />
|
23 |
+
<SkeletonBlock className="h-20" />
|
24 |
+
</div>
|
25 |
+
|
26 |
+
{/* Chart Skeleton */}
|
27 |
+
<div className="mb-12 bg-gray-900/40 p-6 rounded-xl">
|
28 |
+
<SkeletonBlock className="h-8 w-1/3 mb-4" />
|
29 |
+
<SkeletonBlock className="h-72 w-full" />
|
30 |
+
</div>
|
31 |
+
|
32 |
+
{/* AI Report Skeleton */}
|
33 |
+
<div className="mb-12">
|
34 |
+
<SkeletonBlock className="h-8 w-1/3 mb-4" />
|
35 |
+
<div className="p-6 bg-gray-900/40 rounded-xl space-y-4">
|
36 |
+
<SkeletonBlock className="h-6 w-full" />
|
37 |
+
<SkeletonBlock className="h-6 w-5/6" />
|
38 |
+
<SkeletonBlock className="h-6 w-full" />
|
39 |
+
<SkeletonBlock className="h-6 w-3/4" />
|
40 |
+
</div>
|
41 |
+
</div>
|
42 |
+
</div>
|
43 |
+
);
|
44 |
+
}
|
45 |
+
|
46 |
+
export default LoadingSkeleton;
|
frontend/src/components/PredictionChart.jsx
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import React from 'react';
|
2 |
+
import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, Legend, ResponsiveContainer, Area } from 'recharts';
|
3 |
+
|
4 |
+
function PredictionChart({ data }) {
|
5 |
+
if (!data || data.length === 0) return null;
|
6 |
+
|
7 |
+
// Format the date for the X-axis
|
8 |
+
const formatXAxis = (tickItem) => {
|
9 |
+
return new Date(tickItem).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' });
|
10 |
+
};
|
11 |
+
|
12 |
+
return (
|
13 |
+
<div style={{ width: '100%', height: 400 }}>
|
14 |
+
<ResponsiveContainer>
|
15 |
+
<LineChart data={data} margin={{ top: 5, right: 20, left: -10, bottom: 5 }}>
|
16 |
+
<CartesianGrid strokeDasharray="3 3" stroke="#4A5568" />
|
17 |
+
<XAxis dataKey="ds" tickFormatter={formatXAxis} stroke="#A0AEC0" />
|
18 |
+
<YAxis stroke="#A0AEC0" domain={['dataMin - 5', 'dataMax + 5']} />
|
19 |
+
<Tooltip
|
20 |
+
contentStyle={{ backgroundColor: '#1A202C', border: '1px solid #4A5568' }}
|
21 |
+
labelFormatter={(label) => new Date(label).toLocaleDateString('en-IN')}
|
22 |
+
/>
|
23 |
+
<Legend />
|
24 |
+
{/* Confidence Interval Area */}
|
25 |
+
<Area type="monotone" dataKey="yhat_upper" fill="#2F855A" stroke="#2F855A" fillOpacity={0.1} name="Upper Confidence" dot={false} />
|
26 |
+
<Area type="monotone" dataKey="yhat_lower" fill="#2F855A" stroke="#2F855A" fillOpacity={0.1} name="Lower Confidence" dot={false} />
|
27 |
+
|
28 |
+
{/* Forecast Line */}
|
29 |
+
<Line type="monotone" dataKey="yhat" stroke="#48BB78" strokeWidth={2} name="Forecast" dot={false} />
|
30 |
+
</LineChart>
|
31 |
+
</ResponsiveContainer>
|
32 |
+
</div>
|
33 |
+
);
|
34 |
+
}
|
35 |
+
|
36 |
+
export default PredictionChart;
|