feat: implement AI-curated playlist service and dashboard integration

- Added hierarchical AGENTS.md knowledge base
- Implemented PlaylistService with 6h themed and 24h devotion mix logic
- Integrated AI theme generation for 6h playlists via Gemini/OpenAI
- Added /playlists/refresh and metadata endpoints to API
- Updated background worker with scheduled playlist curation
- Created frontend PlaylistsSection, Tooltip components and integrated into Dashboard
- Added Alembic migration for playlist tracking columns
- Fixed Docker healthcheck with curl installation
This commit is contained in:
bnair123
2025-12-30 09:45:19 +04:00
parent fa28b98c1a
commit 93e7c13f3d
18 changed files with 1037 additions and 295 deletions

View File

@@ -62,6 +62,78 @@ class NarrativeService:
return self._get_fallback_narrative()
def generate_playlist_theme(self, listening_data: Dict[str, Any]) -> Dict[str, Any]:
"""Generate playlist theme based on daily listening patterns."""
if not self.client:
return self._get_fallback_theme()
prompt = self._build_theme_prompt(listening_data)
try:
if self.provider == "openai":
return self._call_openai_for_theme(prompt)
elif self.provider == "gemini":
return self._call_gemini_for_theme(prompt)
except Exception as e:
print(f"Theme generation error: {e}")
return self._get_fallback_theme()
return self._get_fallback_theme()
def _call_openai_for_theme(self, prompt: str) -> Dict[str, Any]:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[
{
"role": "system",
"content": "You are a specialized music curator. Output only valid JSON.",
},
{"role": "user", "content": prompt},
],
response_format={"type": "json_object"},
)
return self._clean_and_parse_json(response.choices[0].message.content)
def _call_gemini_for_theme(self, prompt: str) -> Dict[str, Any]:
response = self.client.models.generate_content(
model=self.model_name,
contents=prompt,
config=genai.types.GenerateContentConfig(
response_mime_type="application/json"
),
)
return self._clean_and_parse_json(response.text)
def _build_theme_prompt(self, data: Dict[str, Any]) -> str:
return f"""Analyze this listening data from the last 6 hours and curate a specific "themed" playlist.
**DATA:**
- Peak hour: {data.get("peak_hour")}
- Avg energy: {data.get("avg_energy"):.2f}
- Avg valence: {data.get("avg_valence"):.2f}
- Top artists: {", ".join([a["name"] for a in data.get("top_artists", [])])}
- Total plays: {data.get("total_plays")}
**RULES:**
1. Create a "theme_name" (e.g. "Morning Coffee Jazz", "Midnight Deep Work").
2. Provide a "description" (2-3 sentences explaining why).
3. Identify 10-15 "curated_tracks" (song names only) that fit this vibe and the artists listed.
4. Return ONLY valid JSON.
**REQUIRED JSON:**
{{
"theme_name": "String",
"description": "String",
"curated_tracks": ["Track 1", "Track 2", ...]
}}"""
def _get_fallback_theme(self) -> Dict[str, Any]:
return {
"theme_name": "Daily Mix",
"description": "A curated mix of your recent favorites.",
"curated_tracks": [],
}
def _call_openai(self, prompt: str) -> Dict[str, Any]:
response = self.client.chat.completions.create(
model=self.model_name,
@@ -88,6 +160,31 @@ class NarrativeService:
return self._clean_and_parse_json(response.text)
def _build_prompt(self, clean_stats: Dict[str, Any]) -> str:
volume = clean_stats.get("volume", {})
concentration = volume.get("concentration", {})
time_habits = clean_stats.get("time_habits", {})
vibe = clean_stats.get("vibe", {})
peak_hour = time_habits.get("peak_hour")
if isinstance(peak_hour, int):
peak_listening = f"{peak_hour}:00"
else:
peak_listening = peak_hour or "N/A"
concentration_score = (
round(concentration.get("hhi", 0), 3)
if concentration and concentration.get("hhi") is not None
else "N/A"
)
playlist_diversity = (
round(1 - concentration.get("hhi", 0), 3)
if concentration and concentration.get("hhi") is not None
else "N/A"
)
avg_energy = vibe.get("avg_energy", 0)
avg_valence = vibe.get("avg_valence", 0)
top_artists = volume.get("top_artists", [])
top_artists_str = ", ".join(top_artists) if top_artists else "N/A"
era_label = clean_stats.get("era", {}).get("musical_age", "N/A")
return f"""Analyze this Spotify listening data and generate a personalized report.
**RULES:**
@@ -96,6 +193,14 @@ class NarrativeService:
3. Be playful but not cruel.
4. Return ONLY valid JSON.
**LISTENING HIGHLIGHTS:**
- Peak listening: {peak_listening}
- Concentration score: {concentration_score}
- Playlist diversity: {playlist_diversity}
- Average energy: {avg_energy:.2f}
- Average valence: {avg_valence:.2f}
- Top artists: {top_artists_str}
**DATA:**
{json.dumps(clean_stats, indent=2)}
@@ -105,7 +210,7 @@ class NarrativeService:
"vibe_check": "2-3 paragraphs describing their overall listening personality.",
"patterns": ["Observation 1", "Observation 2", "Observation 3"],
"persona": "A creative label (e.g., 'The Genre Chameleon').",
"era_insight": "Comment on Musical Age ({clean_stats.get("era", {}).get("musical_age", "N/A")}).",
"era_insight": "Comment on Musical Age ({era_label}).",
"roast": "1-2 sentence playful roast.",
"comparison": "Compare to previous period if data exists."
}}"""