Spaces:
Runtime error
Runtime error
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |
from flask import Flask, request, jsonify | |
from flask_cors import CORS | |
from transformers import pipeline | |
import os | |
import json | |
import re | |
import logging | |
import google.generativeai as genai | |
from tempfile import NamedTemporaryFile | |
# ===== Fix HF Spaces cache write error ===== | |
os.environ["TRANSFORMERS_CACHE"] = "./cache" | |
os.environ["HF_HOME"] = "./hf_cache" | |
# === Setup === | |
app = Flask(__name__) | |
CORS(app, resources={r"/predict": {"origins": "*"}}, supports_credentials=True) | |
# Logging config | |
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s - %(message)s') | |
# === Load model === | |
logging.info("Loading HuggingFace audio emotion recognition model...") | |
pipe = pipeline("audio-classification", model="superb/wav2vec2-base-superb-er") | |
logging.info("Model loaded successfully.") | |
# === Gemini setup === | |
logging.info("Setting up Gemini API...") | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Load from environment variable | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
logging.info("Gemini API configured.") | |
# === Gemini recommendation logic === | |
def get_gemini_recommendations(emotion): | |
logging.info(f"Generating recommendations for emotion: {emotion}") | |
prompt = f""" | |
Given the user's emotion: "{emotion}", suggest a JSON object with personalized: | |
- 3 books | |
- 3 songs/music | |
- 3 movies | |
Each item must include: | |
- The title | |
- A link (Spotify for music, Goodreads for books, IMDb for movies) | |
Respond strictly in this JSON format: | |
{{ | |
"books": [ | |
{{"title": "book1", "link": "https://goodreads.com/..."}} ... | |
], | |
"music": [ | |
... | |
], | |
"movies": [ | |
... | |
] | |
}} | |
Return only the JSON response and nothing else. | |
""" | |
try: | |
response = model.generate_content(prompt) | |
content = response.text.strip() | |
match = re.search(r'\{.*\}', content, re.DOTALL) | |
if match: | |
return json.loads(match.group(0)) | |
else: | |
return {"books": [], "music": [], "movies": []} | |
except Exception as e: | |
logging.error(f"Gemini API error: {e}") | |
return {"books": [], "music": [], "movies": []} | |
# === Flask API route === | |
def predict(): | |
logging.info("Received REST prediction request.") | |
try: | |
if 'file' not in request.files: | |
return jsonify({'error': 'No file part'}), 400 | |
file = request.files['file'] | |
if file.filename == '': | |
return jsonify({'error': 'No selected file'}), 400 | |
# Save file temporarily | |
with NamedTemporaryFile(delete=False, suffix=".wav") as temp: | |
file.save(temp.name) | |
file_path = temp.name | |
# Emotion prediction | |
results = pipe(file_path) | |
emotion = results[0]['label'].lower() | |
# Get Gemini recommendations | |
recommendations = get_gemini_recommendations(emotion) | |
# Clean up temp file | |
os.remove(file_path) | |
return jsonify({ | |
"emotion": emotion, | |
"recommendations": recommendations | |
}) | |
except Exception as e: | |
logging.error(f"Prediction error: {e}") | |
return jsonify({'error': str(e)}), 500 | |
# === Run Flask App === | |
if __name__ == '__main__': | |
logging.info("Starting EmoTune Flask API...") | |
app.run(host="0.0.0.0", port=7860) | |