mirror of
https://github.com/bnair123/MusicAnalyser.git
synced 2026-02-25 11:46:07 +00:00
- Migrate database from SQLite to PostgreSQL (100.91.248.114:5433) - Fix playlist curation to use actual top tracks instead of AI name matching - Add /playlists/history endpoint for historical playlist viewing - Add Playlist Archives section to frontend with expandable history - Add playlist-modify-* scopes to Spotify OAuth for playlist creation - Rewrite Genius client to use official API (fixes 403 scraping blocks) - Ensure playlists are created on Spotify before curation attempts - Add DATABASE.md documentation for PostgreSQL schema - Add migrations for PlaylistConfig and composition storage
127 lines
4.2 KiB
Python
127 lines
4.2 KiB
Python
import pytest
|
|
from unittest.mock import Mock, AsyncMock, MagicMock
|
|
from datetime import datetime
|
|
from app.services.playlist_service import PlaylistService
|
|
from app.models import PlaylistConfig, Track
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_db():
|
|
session = MagicMock()
|
|
# Mock query return values
|
|
session.query.return_value.filter.return_value.first.return_value = None
|
|
return session
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_spotify():
|
|
client = AsyncMock()
|
|
client.create_playlist.return_value = {"id": "new_playlist_id"}
|
|
client.get_tracks.return_value = []
|
|
return client
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_recco():
|
|
client = AsyncMock()
|
|
return client
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_narrative():
|
|
service = Mock()
|
|
service.generate_playlist_theme.return_value = {
|
|
"theme_name": "Test Theme",
|
|
"description": "Test Description",
|
|
"curated_tracks": [],
|
|
}
|
|
return service
|
|
|
|
|
|
@pytest.fixture
|
|
def playlist_service(mock_db, mock_spotify, mock_recco, mock_narrative):
|
|
return PlaylistService(mock_db, mock_spotify, mock_recco, mock_narrative)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ensure_playlists_exist_creates_new(
|
|
playlist_service, mock_db, mock_spotify
|
|
):
|
|
# Setup: DB empty, Env vars assumed empty (or mocked)
|
|
mock_db.query.return_value.filter.return_value.first.return_value = None
|
|
|
|
result = await playlist_service.ensure_playlists_exist("user123")
|
|
|
|
assert result["six_hour_id"] == "new_playlist_id"
|
|
assert result["daily_id"] == "new_playlist_id"
|
|
assert mock_spotify.create_playlist.call_count == 2
|
|
# Verify persistence call
|
|
assert mock_db.add.call_count == 2 # Once for each
|
|
assert mock_db.commit.call_count == 2
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_ensure_playlists_exist_loads_from_db(
|
|
playlist_service, mock_db, mock_spotify
|
|
):
|
|
# Setup: DB has configs
|
|
mock_six = PlaylistConfig(key="six_hour", spotify_id="db_six_id")
|
|
mock_daily = PlaylistConfig(key="daily", spotify_id="db_daily_id")
|
|
|
|
# Mock return values for separate queries
|
|
# This is tricky with MagicMock chains.
|
|
|
|
# Simpler approach: Assuming the service calls query(PlaylistConfig).filter(...)
|
|
# We can just check the result logic without complex DB mocking if we abstract the DB access.
|
|
# But let's try to mock the specific return values based on call order if possible.
|
|
mock_query = mock_db.query.return_value
|
|
mock_filter = mock_query.filter
|
|
|
|
# Configure filter().first() to return mock_six then mock_daily
|
|
# But ensure_playlists_exist calls filter twice.
|
|
# mock_filter.return_value is the same object.
|
|
# mock_filter.return_value.first.side_effect = [mock_six, mock_daily]
|
|
# This assumes sequential execution order which is fragile but works for unit test.
|
|
# IMPORTANT: Ensure filter side_effect is cleared if set previously
|
|
mock_filter.side_effect = None
|
|
mock_filter.return_value.first.side_effect = [mock_six, mock_daily]
|
|
|
|
result = await playlist_service.ensure_playlists_exist("user123")
|
|
|
|
assert result["six_hour_id"] == "db_six_id"
|
|
assert result["daily_id"] == "db_daily_id"
|
|
mock_spotify.create_playlist.assert_not_called()
|
|
|
|
|
|
def test_optimize_playlist_flow(playlist_service):
|
|
tracks = [
|
|
{"id": "1", "energy": 0.8}, # High
|
|
{"id": "2", "energy": 0.2}, # Low
|
|
{"id": "3", "energy": 0.5}, # Medium
|
|
{"id": "4", "energy": 0.9}, # High
|
|
{"id": "5", "energy": 0.3}, # Low
|
|
]
|
|
|
|
# Expected sort: Low, Low, Medium, High, High
|
|
# Then split:
|
|
# Sorted: 2(0.2), 5(0.3), 3(0.5), 1(0.8), 4(0.9)
|
|
# Len 5.
|
|
# Low end: 5 * 0.3 = 1.5 -> 1. (Index 1) -> [2]
|
|
# High start: 5 * 0.7 = 3.5 -> 3. (Index 3) -> [1, 4]
|
|
# Medium: [5, 3]
|
|
# Result: Low + High + Medium = [2] + [1, 4] + [5, 3]
|
|
# Order: 2, 1, 4, 5, 3
|
|
# Energies: 0.2, 0.8, 0.9, 0.3, 0.5
|
|
|
|
optimized = playlist_service._optimize_playlist_flow(tracks)
|
|
|
|
ids = [t["id"] for t in optimized]
|
|
# Check if High energy tracks are in the middle/early part (Ramp Up)
|
|
# The current logic is Low -> High -> Medium.
|
|
# So we expect High energy block (1, 4) to be in the middle?
|
|
# Wait, code was: low_energy + high_energy + medium_energy
|
|
|
|
assert ids == ["2", "1", "4", "5", "3"]
|
|
assert optimized[0]["energy"] == 0.2
|
|
assert optimized[1]["energy"] == 0.8
|