mirror of
https://github.com/bnair123/MusicAnalyser.git
synced 2026-02-25 11:46:07 +00:00
Add skip tracking, compressed heatmap, listening log, docs, tests, and OpenAI support
Major changes: - Add skip tracking: poll currently-playing every 15s, detect skips (<30s listened) - Add listening-log and sessions API endpoints - Fix ReccoBeats client to extract spotify_id from href response - Compress heatmap from 24 hours to 6 x 4-hour blocks - Add OpenAI support in narrative service (use max_completion_tokens for new models) - Add ListeningLog component with timeline and list views - Update all frontend components to use real data (album art, play counts) - Add docker-compose external network (dockernet) support - Add comprehensive documentation (API, DATA_MODEL, ARCHITECTURE, FRONTEND) - Add unit tests for ingest and API endpoints
This commit is contained in:
@@ -1,101 +1,154 @@
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from google import genai
|
||||
from typing import Dict, Any, List, Optional
|
||||
from typing import Dict, Any
|
||||
|
||||
try:
|
||||
from openai import OpenAI
|
||||
except ImportError:
|
||||
OpenAI = None
|
||||
|
||||
try:
|
||||
from google import genai
|
||||
except ImportError:
|
||||
genai = None
|
||||
|
||||
|
||||
class NarrativeService:
|
||||
def __init__(self, model_name: str = "gemini-2.0-flash-exp"):
|
||||
self.api_key = os.getenv("GEMINI_API_KEY")
|
||||
self.client = genai.Client(api_key=self.api_key) if self.api_key else None
|
||||
if not self.api_key:
|
||||
print("WARNING: GEMINI_API_KEY not found. LLM features will fail.")
|
||||
|
||||
def __init__(self, model_name: str = "gpt-5-mini-2025-08-07"):
|
||||
self.model_name = model_name
|
||||
self.provider = self._detect_provider()
|
||||
self.client = self._init_client()
|
||||
|
||||
def _detect_provider(self) -> str:
|
||||
openai_key = os.getenv("OPENAI_API_KEY") or os.getenv("OPENAI_APIKEY")
|
||||
gemini_key = os.getenv("GEMINI_API_KEY")
|
||||
|
||||
if self.model_name.startswith("gpt") and openai_key and OpenAI:
|
||||
return "openai"
|
||||
elif gemini_key and genai:
|
||||
return "gemini"
|
||||
elif openai_key and OpenAI:
|
||||
return "openai"
|
||||
elif gemini_key and genai:
|
||||
return "gemini"
|
||||
return "none"
|
||||
|
||||
def _init_client(self):
|
||||
if self.provider == "openai":
|
||||
api_key = os.getenv("OPENAI_API_KEY") or os.getenv("OPENAI_APIKEY")
|
||||
return OpenAI(api_key=api_key)
|
||||
elif self.provider == "gemini":
|
||||
api_key = os.getenv("GEMINI_API_KEY")
|
||||
return genai.Client(api_key=api_key)
|
||||
return None
|
||||
|
||||
def generate_full_narrative(self, stats_json: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Orchestrates the generation of the full narrative report.
|
||||
Currently uses a single call for consistency and speed.
|
||||
"""
|
||||
if not self.api_key:
|
||||
if not self.client:
|
||||
print("WARNING: No LLM client available")
|
||||
return self._get_fallback_narrative()
|
||||
|
||||
clean_stats = self._shape_payload(stats_json)
|
||||
|
||||
prompt = f"""
|
||||
You are a witty, insightful, and slightly snarky music critic analyzing a user's Spotify listening data.
|
||||
Your goal is to generate a JSON report that acts as a deeper, more honest "Spotify Wrapped".
|
||||
prompt = self._build_prompt(clean_stats)
|
||||
|
||||
**CORE RULES:**
|
||||
1. **NO Mental Health Diagnoses:** Do not mention depression, anxiety, or therapy. Stick to behavioral descriptors (e.g., "introspective", "high-energy").
|
||||
2. **Be Specific:** Use the provided metrics. Don't say "You like pop," say "Your Mainstream Score of 85% suggests..."
|
||||
3. **Roast Gently:** Be playful but not cruel.
|
||||
4. **JSON Output Only:** Return strictly valid JSON.
|
||||
|
||||
**DATA TO ANALYZE:**
|
||||
{json.dumps(clean_stats, indent=2)}
|
||||
|
||||
**REQUIRED JSON STRUCTURE:**
|
||||
{{
|
||||
"vibe_check": "2-3 paragraphs describing their overall listening personality this period.",
|
||||
"patterns": ["Observation 1", "Observation 2", "Observation 3 (Look for specific habits like skipping or late-night sessions)"],
|
||||
"persona": "A creative label (e.g., 'The Genre Chameleon', 'Nostalgic Dad-Rocker').",
|
||||
"era_insight": "A specific comment on their Musical Age ({clean_stats.get('era', {}).get('musical_age', 'N/A')}) and Nostalgia Gap.",
|
||||
"roast": "A 1-2 sentence playful roast about their taste.",
|
||||
"comparison": "A short comment comparing this period to the previous one (if data exists)."
|
||||
}}
|
||||
"""
|
||||
try:
|
||||
response = self.client.models.generate_content(
|
||||
model=self.model_name,
|
||||
contents=prompt,
|
||||
config=genai.types.GenerateContentConfig(response_mime_type="application/json")
|
||||
)
|
||||
|
||||
return self._clean_and_parse_json(response.text)
|
||||
|
||||
if self.provider == "openai":
|
||||
return self._call_openai(prompt)
|
||||
elif self.provider == "gemini":
|
||||
return self._call_gemini(prompt)
|
||||
except Exception as e:
|
||||
print(f"LLM Generation Error: {e}")
|
||||
return self._get_fallback_narrative()
|
||||
|
||||
return self._get_fallback_narrative()
|
||||
|
||||
def _call_openai(self, prompt: str) -> Dict[str, Any]:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a witty music critic. Output only valid JSON.",
|
||||
},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
response_format={"type": "json_object"},
|
||||
max_completion_tokens=1500,
|
||||
temperature=0.8,
|
||||
)
|
||||
return self._clean_and_parse_json(response.choices[0].message.content)
|
||||
|
||||
def _call_gemini(self, prompt: str) -> Dict[str, Any]:
|
||||
response = self.client.models.generate_content(
|
||||
model=self.model_name,
|
||||
contents=prompt,
|
||||
config=genai.types.GenerateContentConfig(
|
||||
response_mime_type="application/json"
|
||||
),
|
||||
)
|
||||
return self._clean_and_parse_json(response.text)
|
||||
|
||||
def _build_prompt(self, clean_stats: Dict[str, Any]) -> str:
|
||||
return f"""Analyze this Spotify listening data and generate a personalized report.
|
||||
|
||||
**RULES:**
|
||||
1. NO mental health diagnoses. Use behavioral descriptors only.
|
||||
2. Be specific - reference actual metrics from the data.
|
||||
3. Be playful but not cruel.
|
||||
4. Return ONLY valid JSON.
|
||||
|
||||
**DATA:**
|
||||
{json.dumps(clean_stats, indent=2)}
|
||||
|
||||
**REQUIRED JSON:**
|
||||
{{
|
||||
"vibe_check_short": "1-2 sentence hook for the hero banner.",
|
||||
"vibe_check": "2-3 paragraphs describing their overall listening personality.",
|
||||
"patterns": ["Observation 1", "Observation 2", "Observation 3"],
|
||||
"persona": "A creative label (e.g., 'The Genre Chameleon').",
|
||||
"era_insight": "Comment on Musical Age ({clean_stats.get("era", {}).get("musical_age", "N/A")}).",
|
||||
"roast": "1-2 sentence playful roast.",
|
||||
"comparison": "Compare to previous period if data exists."
|
||||
}}"""
|
||||
|
||||
def _shape_payload(self, stats: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Compresses the stats JSON to save tokens and focus the LLM.
|
||||
Removes raw lists beyond top 5/10.
|
||||
"""
|
||||
s = stats.copy()
|
||||
|
||||
# Simplify Volume
|
||||
|
||||
if "volume" in s:
|
||||
s["volume"] = {
|
||||
k: v for k, v in s["volume"].items()
|
||||
volume_copy = {
|
||||
k: v
|
||||
for k, v in s["volume"].items()
|
||||
if k not in ["top_tracks", "top_artists", "top_albums", "top_genres"]
|
||||
}
|
||||
# Add back condensed top lists (just names)
|
||||
s["volume"]["top_tracks"] = [t["name"] for t in stats["volume"].get("top_tracks", [])[:5]]
|
||||
s["volume"]["top_artists"] = [a["name"] for a in stats["volume"].get("top_artists", [])[:5]]
|
||||
s["volume"]["top_genres"] = [g["name"] for g in stats["volume"].get("top_genres", [])[:5]]
|
||||
volume_copy["top_tracks"] = [
|
||||
t["name"] for t in stats["volume"].get("top_tracks", [])[:5]
|
||||
]
|
||||
volume_copy["top_artists"] = [
|
||||
a["name"] for a in stats["volume"].get("top_artists", [])[:5]
|
||||
]
|
||||
volume_copy["top_genres"] = [
|
||||
g["name"] for g in stats["volume"].get("top_genres", [])[:5]
|
||||
]
|
||||
s["volume"] = volume_copy
|
||||
|
||||
if "time_habits" in s:
|
||||
s["time_habits"] = {
|
||||
k: v for k, v in s["time_habits"].items() if k != "heatmap"
|
||||
}
|
||||
|
||||
if "sessions" in s:
|
||||
s["sessions"] = {
|
||||
k: v for k, v in s["sessions"].items() if k != "session_list"
|
||||
}
|
||||
|
||||
# Simplify Time (Keep distributions but maybe round them?)
|
||||
# Keeping hourly/daily is fine, they are small arrays.
|
||||
|
||||
# Simplify Vibe (Remove huge transition arrays if they accidentally leaked, though stats service handles this)
|
||||
|
||||
# Remove period details if verbose
|
||||
return s
|
||||
|
||||
def _clean_and_parse_json(self, raw_text: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Robust JSON extractor.
|
||||
"""
|
||||
try:
|
||||
# 1. Try direct parse
|
||||
return json.loads(raw_text)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
# 2. Extract between first { and last }
|
||||
try:
|
||||
match = re.search(r"\{.*\}", raw_text, re.DOTALL)
|
||||
if match:
|
||||
@@ -107,16 +160,11 @@ Your goal is to generate a JSON report that acts as a deeper, more honest "Spoti
|
||||
|
||||
def _get_fallback_narrative(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"vibe_check": "Data processing error. You're too mysterious for us to analyze right now.",
|
||||
"vibe_check_short": "Your taste is... interesting.",
|
||||
"vibe_check": "Data processing error. You're too mysterious to analyze right now.",
|
||||
"patterns": [],
|
||||
"persona": "The Enigma",
|
||||
"era_insight": "Time is a flat circle.",
|
||||
"roast": "You broke the machine. Congratulations.",
|
||||
"comparison": "N/A"
|
||||
"comparison": "N/A",
|
||||
}
|
||||
|
||||
# Individual accessors if needed by frontend, though full_narrative is preferred
|
||||
def generate_vibe_check(self, stats): return self.generate_full_narrative(stats).get("vibe_check")
|
||||
def identify_patterns(self, stats): return self.generate_full_narrative(stats).get("patterns")
|
||||
def generate_persona(self, stats): return self.generate_full_narrative(stats).get("persona")
|
||||
def generate_roast(self, stats): return self.generate_full_narrative(stats).get("roast")
|
||||
Reference in New Issue
Block a user