mirror of
https://github.com/bnair123/MusicAnalyser.git
synced 2026-02-25 11:46:07 +00:00
Implement Phase 3 Music Analysis and LLM Engine
- Refactor Database: Add `Artist` model, M2M relationship, and `AnalysisSnapshot` model. - Backend Services: Implement `StatsService` for computable metrics and `NarrativeService` for Gemini LLM integration. - Fix Ingestion: Correctly handle multiple artists per track and backfill existing data. - Testing: Add unit tests for statistics logic and live verification scripts. - Documentation: Add `PHASE_4_FRONTEND_GUIDE.md`.
This commit is contained in:
67
backend/app/services/narrative_service.py
Normal file
67
backend/app/services/narrative_service.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import os
|
||||
import json
|
||||
import google.generativeai as genai
|
||||
from typing import Dict, Any
|
||||
|
||||
class NarrativeService:
|
||||
def __init__(self, model_name: str = "gemini-2.5-flash"):
|
||||
self.api_key = os.getenv("GEMINI_API_KEY")
|
||||
if not self.api_key:
|
||||
print("WARNING: GEMINI_API_KEY not found. LLM features will fail.")
|
||||
else:
|
||||
genai.configure(api_key=self.api_key)
|
||||
|
||||
self.model_name = model_name
|
||||
|
||||
def generate_narrative(self, stats_json: Dict[str, Any]) -> Dict[str, str]:
|
||||
if not self.api_key:
|
||||
return {"error": "Missing API Key"}
|
||||
|
||||
prompt = f"""
|
||||
You are analyzing a user's Spotify listening data. Below is a JSON summary of metrics I've computed. Your job is to:
|
||||
|
||||
1. Write a narrative "Vibe Check" (2-3 paragraphs) describing their overall listening personality this period.
|
||||
2. Identify 3-5 notable patterns or anomalies.
|
||||
3. Provide a "Musical Persona" label (e.g., "Late-Night Binge Listener", "Genre Chameleon", "Album Purist").
|
||||
4. Write a brief, playful "roast" (1-2 sentences) based on the data.
|
||||
|
||||
Guidelines:
|
||||
- Do NOT recalculate any numbers.
|
||||
- Use specific metrics to support observations (e.g., "Your whiplash score of 18.3 BPM suggests...").
|
||||
- Keep tone conversational but insightful.
|
||||
- Avoid mental health claims; stick to behavioral descriptors.
|
||||
- Highlight both positive patterns and quirks.
|
||||
|
||||
Data:
|
||||
{json.dumps(stats_json, indent=2)}
|
||||
|
||||
Output Format (return valid JSON):
|
||||
{{
|
||||
"vibe_check": "...",
|
||||
"patterns": ["...", "..."],
|
||||
"persona": "...",
|
||||
"roast": "..."
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
# Handle full model path if passed or default short name
|
||||
# The library often accepts 'gemini-2.5-flash' but list_models returns 'models/gemini-2.5-flash'
|
||||
model_id = self.model_name
|
||||
if not model_id.startswith("models/") and "/" not in model_id:
|
||||
# Try simple name, if it fails user might need to pass 'models/...'
|
||||
pass
|
||||
|
||||
model = genai.GenerativeModel(model_id)
|
||||
response = model.generate_content(prompt)
|
||||
|
||||
# Clean up response to ensure valid JSON (sometimes LLMs add markdown blocks)
|
||||
text = response.text.strip()
|
||||
if text.startswith("```json"):
|
||||
text = text.replace("```json", "").replace("```", "")
|
||||
elif text.startswith("```"):
|
||||
text = text.replace("```", "")
|
||||
|
||||
return json.loads(text)
|
||||
|
||||
except Exception as e:
|
||||
return {"error": str(e), "raw_response": response.text if 'response' in locals() else "No response"}
|
||||
Reference in New Issue
Block a user