dhruv2842 commited on
Commit
643a619
·
verified ·
1 Parent(s): bc98cbc

Upload 29 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a slim Python base image
2
+ FROM python:3.10-slim
3
+
4
+ # Prevents Python from writing pyc files to disk and buffers logs immediately
5
+ ENV PYTHONDONTWRITEBYTECODE=1
6
+ ENV PYTHONUNBUFFERED=1
7
+
8
+ # Set working directory in the container
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies (for audio, etc.)
12
+ RUN apt-get update && apt-get install -y ffmpeg libsndfile1 && rm -rf /var/lib/apt/lists/*
13
+
14
+ # Copy dependency file first to leverage Docker cache
15
+ COPY requirements.txt .
16
+
17
+ # Install Python dependencies
18
+ RUN pip install --upgrade pip && pip install --no-cache-dir -r requirements.txt
19
+
20
+ # Copy the rest of the app
21
+ COPY . .
22
+
23
+ # Expose the FastAPI port (Hugging Face prefers 7860, but 8000 is also fine)
24
+ EXPOSE 8000
25
+
26
+ # Run the app using Uvicorn
27
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
core/__pycache__/database.cpython-38.pyc ADDED
Binary file (493 Bytes). View file
 
core/config.py ADDED
File without changes
core/database.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import create_engine
2
+ from sqlalchemy.ext.declarative import declarative_base
3
+ from sqlalchemy.orm import sessionmaker
4
+
5
+ SQLALCHEMY_DATABASE_URL = "sqlite:///./appointments.db"
6
+
7
+ engine = create_engine(
8
+ SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
9
+ )
10
+
11
+ SessionLocal = sessionmaker(bind=engine, autocommit=False, autoflush=False)
12
+ Base = declarative_base()
core/models/__pycache__/appointment.cpython-38.pyc ADDED
Binary file (613 Bytes). View file
 
core/models/__pycache__/user.cpython-38.pyc ADDED
Binary file (619 Bytes). View file
 
core/models/appointment.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import Column, Integer, String
2
+ from core.database import Base
3
+
4
+ class Appointment(Base):
5
+ __tablename__ = "appointments"
6
+
7
+ id = Column(Integer, primary_key=True, index=True)
8
+ patient_name = Column(String, nullable=False)
9
+ age = Column(Integer, nullable=False)
10
+ symptoms = Column(String, nullable=False)
11
+ specialist = Column(String, nullable=False)
12
+
13
+
core/models/user.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import Column, Integer, String
2
+ from core.database import Base
3
+
4
+ class User(Base):
5
+ __tablename__ = "users"
6
+
7
+ id = Column(Integer, primary_key=True, index=True)
8
+ full_name = Column(String, nullable=False)
9
+ email = Column(String, unique=True, index=True, nullable=False)
10
+ password = Column(String, nullable=False)
11
+ specialization = Column(String, nullable=True) # Only for doctors
main.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from core.database import engine
4
+ from core.models.appointment import Appointment
5
+ from core.models.user import User
6
+ from routes import appointments, users
7
+
8
+ # Create FastAPI app first
9
+ app = FastAPI()
10
+
11
+ # ✅ Enable CORS BEFORE including any routers
12
+ app.add_middleware(
13
+ CORSMiddleware,
14
+ allow_origins=["http://localhost:3000"], # React frontend
15
+ allow_credentials=True,
16
+ allow_methods=["*"],
17
+ allow_headers=["*"],
18
+ )
19
+
20
+ # Create tables
21
+ Appointment.__table__.create(bind=engine, checkfirst=True)
22
+ User.__table__.create(bind=engine, checkfirst=True)
23
+
24
+ # Register routers
25
+ app.include_router(appointments.router)
26
+ app.include_router(users.router)
requirements.txt ADDED
Binary file (368 Bytes). View file
 
routes/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from . import appointments
2
+ from . import users # ✅ This should import the file
routes/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (191 Bytes). View file
 
routes/__pycache__/appointments.cpython-38.pyc ADDED
Binary file (2.03 kB). View file
 
routes/__pycache__/users.cpython-38.pyc ADDED
Binary file (1.67 kB). View file
 
routes/appointments.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends
2
+ from sqlalchemy.orm import Session
3
+ from schemas.appointment import AppointmentInput
4
+ from core.database import SessionLocal
5
+ from core.models.appointment import Appointment
6
+
7
+ router = APIRouter()
8
+
9
+ # Dependency
10
+ def get_db():
11
+ db = SessionLocal()
12
+ try:
13
+ yield db
14
+ finally:
15
+ db.close()
16
+
17
+ @router.post("/appointments/create")
18
+ def create_appointment(data: AppointmentInput, db: Session = Depends(get_db)):
19
+ new_appointment = Appointment(
20
+ patient_name=data.patient_name,
21
+ age=data.age,
22
+ symptoms=data.symptoms,
23
+ specialist=data.specialist
24
+ )
25
+ db.add(new_appointment)
26
+ db.commit()
27
+ db.refresh(new_appointment)
28
+
29
+ return {
30
+ "message": "Appointment saved to database",
31
+ "data": {
32
+ "id": new_appointment.id,
33
+ "patient_name": new_appointment.patient_name,
34
+ "age": new_appointment.age,
35
+ "symptoms": new_appointment.symptoms,
36
+ "specialist": new_appointment.specialist
37
+ }
38
+ }
39
+ from fastapi import APIRouter, UploadFile, File
40
+ from utils.stt_processor import simulate_stt # make sure path is correct
41
+ import shutil
42
+ import os
43
+ from utils.specialist_predictor import predict_specialist
44
+
45
+ @router.post("/appointments/voice")
46
+ async def create_appointment_from_voice(audio: UploadFile = File(...), db: Session = Depends(get_db)):
47
+ # 1. Save temp audio
48
+ temp_path = f"temp_{audio.filename}"
49
+ with open(temp_path, "wb") as buffer:
50
+ shutil.copyfileobj(audio.file, buffer)
51
+
52
+ try:
53
+ # 2. Extract symptoms from audio
54
+ data = simulate_stt(temp_path)
55
+ symptoms = data.get("symptoms", "")
56
+
57
+ # 3. Predict specialist using model
58
+ specialist, score = predict_specialist(symptoms)
59
+
60
+ # 4. Create appointment object
61
+ new_appointment = Appointment(
62
+ patient_name=data.get("patient_name", "Unknown"),
63
+ age=data.get("age", 0),
64
+ symptoms=symptoms,
65
+ specialist=specialist
66
+ )
67
+
68
+ db.add(new_appointment)
69
+ db.commit()
70
+ db.refresh(new_appointment)
71
+
72
+ # 5. Return response
73
+ return {
74
+ "message": "Appointment created successfully from voice",
75
+ "predicted_specialist": specialist,
76
+ "similarity_score": round(score, 4),
77
+ "appointment": {
78
+ "id": new_appointment.id,
79
+ "patient_name": new_appointment.patient_name,
80
+ "age": new_appointment.age,
81
+ "symptoms": symptoms,
82
+ "specialist": specialist
83
+ }
84
+ }
85
+
86
+ finally:
87
+ # Clean up file
88
+ os.remove(temp_path)
routes/users.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends,HTTPException
2
+ from sqlalchemy.orm import Session
3
+ from core.database import SessionLocal
4
+ from core.models.user import User
5
+ from schemas.user import UserCreate,UserLogin
6
+ from utils.security import hash_password
7
+ from utils.auth import create_access_token # adjust based on your folder structure
8
+
9
+ from passlib.hash import bcrypt
10
+ router = APIRouter(
11
+ prefix="/users", # ✅ important for proper routing like /users/signup
12
+ tags=["Users"]
13
+ )
14
+ def get_db():
15
+ db = SessionLocal()
16
+ try:
17
+ yield db
18
+ finally:
19
+ db.close()
20
+ print("✅ users.router loaded")
21
+ @router.post("/signup")
22
+ def create_user(user: UserCreate, db: Session = Depends(get_db)):
23
+ db_user = User(
24
+ full_name=user.full_name,
25
+ email=user.email,
26
+ password=hash_password(user.password), # ⚠️ Should hash password before storing
27
+ specialization=user.specialization
28
+ )
29
+ db.add(db_user)
30
+ db.commit()
31
+ db.refresh(db_user)
32
+ return {"message": "User created successfully", "user_id": db_user.id}
33
+
34
+
35
+ @router.post("/login")
36
+ def login(user: UserLogin, db: Session = Depends(get_db)):
37
+ db_user = db.query(User).filter(User.email == user.email).first()
38
+ if not db_user:
39
+ raise HTTPException(status_code=400, detail="Invalid email or password")
40
+
41
+ if not bcrypt.verify(user.password, db_user.password):
42
+ raise HTTPException(status_code=400, detail="Invalid email or password")
43
+
44
+ # Generate JWT token
45
+ token = create_access_token(data={"sub": db_user.email})
46
+
47
+ return {"access_token": token, "token_type": "bearer", "user_id": db_user.id, "name": db_user.full_name}
schemas/__pycache__/appointment.cpython-38.pyc ADDED
Binary file (756 Bytes). View file
 
schemas/__pycache__/user.cpython-38.pyc ADDED
Binary file (585 Bytes). View file
 
schemas/appointment.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter
2
+ from pydantic import BaseModel
3
+
4
+ router = APIRouter()
5
+ class AppointmentInput(BaseModel):
6
+ patient_name: str
7
+ age: int
8
+ symptoms: str
9
+ specialist: str
10
+
11
+ @router.post("/appointments/create")
12
+ async def create_appointment(data: AppointmentInput):
13
+ print(f"Received appointment: {data}")
14
+ return {"message": "Appointment created", "data": data}
schemas/user.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+ class UserCreate(BaseModel):
4
+ full_name: str
5
+ email: str
6
+ password: str
7
+ specialization: str
8
+
9
+
10
+
11
+ class UserLogin(BaseModel):
12
+ email: str
13
+ password: str
semantic_specialist_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9472ff0898bdaadba953f7e56549c804c21dbfb5dec7a2a1f57ade6dc80a8813
3
+ size 572198
utils/__pycache__/auth.cpython-38.pyc ADDED
Binary file (671 Bytes). View file
 
utils/__pycache__/security.cpython-38.pyc ADDED
Binary file (593 Bytes). View file
 
utils/__pycache__/specialist_predictor.cpython-38.pyc ADDED
Binary file (785 Bytes). View file
 
utils/__pycache__/stt_processor.cpython-38.pyc ADDED
Binary file (2.06 kB). View file
 
utils/auth.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta
2
+ from jose import JWTError, jwt
3
+
4
+ SECRET_KEY = "your-secret-key" # Use a strong secret and store securely
5
+ ALGORITHM = "HS256"
6
+ ACCESS_TOKEN_EXPIRE_MINUTES = 30
7
+
8
+ def create_access_token(data: dict, expires_delta: timedelta = None):
9
+ to_encode = data.copy()
10
+ expire = datetime.utcnow() + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES))
11
+ to_encode.update({"exp": expire})
12
+ encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
13
+ return encoded_jwt
utils/security.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from passlib.context import CryptContext
2
+
3
+ pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
4
+
5
+ def hash_password(password: str) -> str:
6
+ return pwd_context.hash(password)
7
+
8
+ def verify_password(plain_password: str, hashed_password: str) -> bool:
9
+ return pwd_context.verify(plain_password, hashed_password)
utils/specialist_predictor.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer, util
2
+ import torch
3
+ import joblib
4
+
5
+ # Load model components once
6
+ bundle = joblib.load("semantic_specialist_model.pkl")
7
+ model = SentenceTransformer(bundle["model_name"])
8
+ known_embeddings = bundle["known_embeddings"]
9
+ symptom_specialist_pairs = bundle["symptom_specialist_pairs"]
10
+
11
+ def predict_specialist(symptom_text: str):
12
+ input_embedding = model.encode(symptom_text, convert_to_tensor=True)
13
+ similarities = util.pytorch_cos_sim(input_embedding, known_embeddings)[0]
14
+ top_idx = similarities.argmax().item()
15
+ specialist = symptom_specialist_pairs[top_idx][1]
16
+ score = similarities[top_idx].item()
17
+ return specialist, score
utils/stt_processor.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import speech_recognition as sr
2
+ import re
3
+ import os
4
+ from pydub import AudioSegment
5
+
6
+ def convert_to_wav_pydub(input_path: str, output_path: str = "converted_temp.wav") -> str:
7
+ audio = AudioSegment.from_file(input_path)
8
+ audio = audio.set_frame_rate(16000).set_channels(1).set_sample_width(2) # 16-bit PCM
9
+ audio.export(output_path, format="wav")
10
+ return output_path
11
+
12
+ def transcribe_audio(audio_file_path: str) -> str:
13
+ recognizer = sr.Recognizer()
14
+
15
+ # Convert to proper WAV if needed
16
+ if not audio_file_path.lower().endswith(".wav"):
17
+ converted_path = convert_to_wav_pydub(audio_file_path) # ✅ FIXED HERE
18
+ delete_after_use = True
19
+ else:
20
+ converted_path = convert_to_wav_pydub(audio_file_path) # Even if WAV, reconvert to ensure PCM format
21
+ delete_after_use = True
22
+
23
+ try:
24
+ with sr.AudioFile(converted_path) as source:
25
+ audio_data = recognizer.record(source)
26
+ text = recognizer.recognize_google(audio_data)
27
+ print("✅ Transcribed Text:", text)
28
+ return text
29
+ except sr.UnknownValueError:
30
+ print("⚠️ Could not understand audio")
31
+ return "Could not understand audio"
32
+ except sr.RequestError as e:
33
+ print("❌ API Request Error:", e)
34
+ return f"Request failed: {e}"
35
+ finally:
36
+ if delete_after_use and os.path.exists(converted_path):
37
+ try:
38
+ os.remove(converted_path)
39
+ except PermissionError:
40
+ print("⚠️ Warning: File could not be deleted, still in use.")
41
+
42
+ def simulate_stt(audio_file_path: str) -> dict:
43
+ raw_text = transcribe_audio(audio_file_path)
44
+
45
+ # Extract structured data using regex
46
+ name_match = re.search(r"my name is ([a-zA-Z ]+?)(?= i am| and|,|\.|$)", raw_text, re.IGNORECASE)
47
+ age_match = re.search(r"i am (\d+) years old", raw_text, re.IGNORECASE)
48
+ symptoms_match = re.search(r"suffering from (.+)", raw_text, re.IGNORECASE)
49
+
50
+ return {
51
+ "patient_name": name_match.group(1).strip() if name_match else "Unknown",
52
+ "age": int(age_match.group(1)) if age_match else 0,
53
+ "symptoms": symptoms_match.group(1).strip() if symptoms_match else "Not mentioned",
54
+ "preferred_doctor": "Not specified"
55
+ }