test: add integration tests with real media for music video pipeline

Build real PNG, WAV, and MP4 fixtures (no AI models) and exercise the
full assembler and Creative Director pipeline end-to-end.  Fix MoviePy v2
crossfade API (vfx.CrossFadeIn) and font resolution (DejaVu-Sans).

14 new integration tests — 638 total, all passing.

https://claude.ai/code/session_01KJm6jQkNi3aA3yoQJn636c
This commit is contained in:
Claude
2026-02-24 16:48:14 +00:00
parent 1103da339c
commit b098b00959
6 changed files with 905 additions and 4 deletions

View File

@@ -91,7 +91,7 @@ include = [
[tool.pytest.ini_options]
testpaths = ["tests"]
pythonpath = ["src"]
pythonpath = ["src", "tests"]
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "function"
addopts = "-v --tb=short"

View File

@@ -23,10 +23,14 @@ try:
CompositeVideoClip,
ImageClip,
concatenate_videoclips,
vfx,
)
except ImportError:
_MOVIEPY_AVAILABLE = False
# Resolve a font that actually exists on this system.
_DEFAULT_FONT = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"
def _require_moviepy() -> None:
if not _MOVIEPY_AVAILABLE:
@@ -68,7 +72,7 @@ def stitch_clips(
for clip in clips[1:]:
clip = clip.with_start(
processed[-1].end - transition_duration
).crossfadein(transition_duration)
).with_effects([vfx.CrossFadeIn(transition_duration)])
processed.append(clip)
final = CompositeVideoClip(processed)
else:
@@ -163,7 +167,7 @@ def add_title_card(
color="white",
size=(w, h),
method="caption",
font="Arial",
font=_DEFAULT_FONT,
).with_duration(duration)
clips = [txt, video] if position == "start" else [video, txt]
@@ -213,7 +217,7 @@ def add_subtitles(
stroke_width=2,
size=(w - 40, None),
method="caption",
font="Arial",
font=_DEFAULT_FONT,
)
.with_start(cap["start"])
.with_end(cap["end"])

0
tests/fixtures/__init__.py vendored Normal file
View File

178
tests/fixtures/media.py vendored Normal file
View File

@@ -0,0 +1,178 @@
"""Real media file fixtures for integration tests.
Generates actual PNG images, WAV audio files, and MP4 video clips
using numpy, Pillow, and MoviePy — no AI models required.
"""
from __future__ import annotations
import wave
from pathlib import Path
import numpy as np
from PIL import Image, ImageDraw
# ── Color palettes for visual variety ─────────────────────────────────────────
SCENE_COLORS = [
(30, 60, 120), # dark blue — "night sky"
(200, 100, 30), # warm orange — "sunrise"
(50, 150, 50), # forest green — "mountain forest"
(20, 120, 180), # teal blue — "river"
(180, 60, 60), # crimson — "sunset"
(40, 40, 80), # deep purple — "twilight"
]
def make_storyboard_frame(
path: Path,
label: str,
color: tuple[int, int, int] = (60, 60, 60),
width: int = 320,
height: int = 180,
) -> Path:
"""Create a real PNG image with a colored background and text label.
Returns the path to the written file.
"""
img = Image.new("RGB", (width, height), color=color)
draw = ImageDraw.Draw(img)
# Draw label text in white, centered
bbox = draw.textbbox((0, 0), label)
tw, th = bbox[2] - bbox[0], bbox[3] - bbox[1]
x = (width - tw) // 2
y = (height - th) // 2
draw.text((x, y), label, fill=(255, 255, 255))
# Add a border
draw.rectangle([2, 2, width - 3, height - 3], outline=(255, 255, 255), width=2)
path.parent.mkdir(parents=True, exist_ok=True)
img.save(path)
return path
def make_storyboard(
output_dir: Path,
scene_labels: list[str],
width: int = 320,
height: int = 180,
) -> list[Path]:
"""Generate a full storyboard — one PNG per scene."""
frames = []
for i, label in enumerate(scene_labels):
color = SCENE_COLORS[i % len(SCENE_COLORS)]
path = output_dir / f"frame_{i:03d}.png"
make_storyboard_frame(path, label, color=color, width=width, height=height)
frames.append(path)
return frames
def make_audio_track(
path: Path,
duration_seconds: float = 10.0,
sample_rate: int = 44100,
frequency: float = 440.0,
fade_in: float = 0.5,
fade_out: float = 0.5,
) -> Path:
"""Create a real WAV audio file — a sine wave tone with fade in/out.
Good enough to verify audio overlay, mixing, and codec encoding.
"""
n_samples = int(sample_rate * duration_seconds)
t = np.linspace(0, duration_seconds, n_samples, endpoint=False)
# Generate a sine wave with slight frequency variation for realism
signal = np.sin(2 * np.pi * frequency * t)
# Add a second harmonic for richness
signal += 0.3 * np.sin(2 * np.pi * frequency * 2 * t)
# Fade in/out
fade_in_samples = int(sample_rate * fade_in)
fade_out_samples = int(sample_rate * fade_out)
if fade_in_samples > 0:
signal[:fade_in_samples] *= np.linspace(0, 1, fade_in_samples)
if fade_out_samples > 0:
signal[-fade_out_samples:] *= np.linspace(1, 0, fade_out_samples)
# Normalize and convert to 16-bit PCM
signal = (signal / np.max(np.abs(signal)) * 32767 * 0.8).astype(np.int16)
path.parent.mkdir(parents=True, exist_ok=True)
with wave.open(str(path), "w") as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(signal.tobytes())
return path
def make_video_clip(
path: Path,
duration_seconds: float = 3.0,
fps: int = 12,
width: int = 320,
height: int = 180,
color_start: tuple[int, int, int] = (30, 30, 80),
color_end: tuple[int, int, int] = (80, 30, 30),
label: str = "",
) -> Path:
"""Create a real MP4 video clip with a color gradient animation.
Frames transition smoothly from color_start to color_end,
producing a visible animation that's easy to visually verify.
"""
from moviepy import ImageSequenceClip
n_frames = int(duration_seconds * fps)
frames = []
for i in range(n_frames):
t = i / max(1, n_frames - 1)
r = int(color_start[0] + (color_end[0] - color_start[0]) * t)
g = int(color_start[1] + (color_end[1] - color_start[1]) * t)
b = int(color_start[2] + (color_end[2] - color_start[2]) * t)
img = Image.new("RGB", (width, height), color=(r, g, b))
if label:
draw = ImageDraw.Draw(img)
draw.text((10, 10), label, fill=(255, 255, 255))
# Frame counter
draw.text((10, height - 20), f"f{i}/{n_frames}", fill=(200, 200, 200))
frames.append(np.array(img))
path.parent.mkdir(parents=True, exist_ok=True)
clip = ImageSequenceClip(frames, fps=fps)
clip.write_videofile(str(path), codec="libx264", audio=False, logger=None)
return path
def make_scene_clips(
output_dir: Path,
scene_labels: list[str],
duration_per_clip: float = 3.0,
fps: int = 12,
width: int = 320,
height: int = 180,
) -> list[Path]:
"""Generate one video clip per scene, each with a distinct color animation."""
clips = []
for i, label in enumerate(scene_labels):
c1 = SCENE_COLORS[i % len(SCENE_COLORS)]
c2 = SCENE_COLORS[(i + 1) % len(SCENE_COLORS)]
path = output_dir / f"clip_{i:03d}.mp4"
make_video_clip(
path, duration_seconds=duration_per_clip, fps=fps,
width=width, height=height,
color_start=c1, color_end=c2, label=label,
)
clips.append(path)
return clips

View File

@@ -0,0 +1,275 @@
"""Integration tests for creative.assembler — real files, no mocks.
Every test creates actual media files (PNG, WAV, MP4), runs them through
the assembler functions, and inspects the output with MoviePy / Pillow.
"""
import pytest
from pathlib import Path
from moviepy import VideoFileClip, AudioFileClip
from creative.assembler import (
stitch_clips,
overlay_audio,
add_title_card,
add_subtitles,
export_final,
)
from fixtures.media import (
make_audio_track,
make_video_clip,
make_scene_clips,
)
# ── Fixtures ──────────────────────────────────────────────────────────────────
@pytest.fixture
def media_dir(tmp_path):
"""Isolated directory for generated media."""
d = tmp_path / "media"
d.mkdir()
return d
@pytest.fixture
def two_clips(media_dir):
"""Two real 3-second MP4 clips."""
return make_scene_clips(
media_dir, ["Scene A", "Scene B"],
duration_per_clip=3.0, fps=12, width=320, height=180,
)
@pytest.fixture
def five_clips(media_dir):
"""Five real 2-second MP4 clips — enough for a short video."""
return make_scene_clips(
media_dir,
["Dawn", "Sunrise", "Mountains", "River", "Sunset"],
duration_per_clip=2.0, fps=12, width=320, height=180,
)
@pytest.fixture
def audio_10s(media_dir):
"""A real 10-second WAV audio track."""
return make_audio_track(media_dir / "track.wav", duration_seconds=10.0)
@pytest.fixture
def audio_30s(media_dir):
"""A real 30-second WAV audio track."""
return make_audio_track(
media_dir / "track_long.wav",
duration_seconds=30.0,
frequency=330.0,
)
# ── Stitch clips ─────────────────────────────────────────────────────────────
class TestStitchClipsReal:
def test_stitch_two_clips_no_transition(self, two_clips, tmp_path):
"""Stitching 2 x 3s clips → ~6s video."""
out = tmp_path / "stitched.mp4"
result = stitch_clips(
[str(p) for p in two_clips],
transition_duration=0,
output_path=str(out),
)
assert result["success"]
assert result["clip_count"] == 2
assert out.exists()
assert out.stat().st_size > 1000 # non-trivial file
video = VideoFileClip(str(out))
assert video.duration == pytest.approx(6.0, abs=0.5)
assert video.size == [320, 180]
video.close()
def test_stitch_with_crossfade(self, two_clips, tmp_path):
"""Cross-fade transition shortens total duration."""
out = tmp_path / "crossfade.mp4"
result = stitch_clips(
[str(p) for p in two_clips],
transition_duration=1.0,
output_path=str(out),
)
assert result["success"]
video = VideoFileClip(str(out))
# 2 x 3s - 1s overlap = ~5s
assert video.duration == pytest.approx(5.0, abs=1.0)
video.close()
def test_stitch_five_clips(self, five_clips, tmp_path):
"""Stitch 5 clips → continuous video with correct frame count."""
out = tmp_path / "five.mp4"
result = stitch_clips(
[str(p) for p in five_clips],
transition_duration=0.5,
output_path=str(out),
)
assert result["success"]
assert result["clip_count"] == 5
video = VideoFileClip(str(out))
# 5 x 2s - 4 * 0.5s overlap = 8s
assert video.duration >= 7.0
assert video.size == [320, 180]
video.close()
# ── Audio overlay ─────────────────────────────────────────────────────────────
class TestOverlayAudioReal:
def test_overlay_adds_audio_stream(self, two_clips, audio_10s, tmp_path):
"""Overlaying audio onto a silent video produces audible output."""
# First stitch clips
stitched = tmp_path / "silent.mp4"
stitch_clips(
[str(p) for p in two_clips],
transition_duration=0,
output_path=str(stitched),
)
out = tmp_path / "with_audio.mp4"
result = overlay_audio(str(stitched), str(audio_10s), output_path=str(out))
assert result["success"]
assert out.exists()
video = VideoFileClip(str(out))
assert video.audio is not None # has audio stream
assert video.duration == pytest.approx(6.0, abs=0.5)
video.close()
def test_audio_trimmed_to_video_length(self, two_clips, audio_30s, tmp_path):
"""30s audio track is trimmed to match ~6s video duration."""
stitched = tmp_path / "short.mp4"
stitch_clips(
[str(p) for p in two_clips],
transition_duration=0,
output_path=str(stitched),
)
out = tmp_path / "trimmed.mp4"
result = overlay_audio(str(stitched), str(audio_30s), output_path=str(out))
assert result["success"]
video = VideoFileClip(str(out))
# Audio should be trimmed to video length, not 30s
assert video.duration < 10.0
video.close()
# ── Title cards ───────────────────────────────────────────────────────────────
class TestAddTitleCardReal:
def test_prepend_title_card(self, two_clips, tmp_path):
"""Title card at start adds to total duration."""
stitched = tmp_path / "base.mp4"
stitch_clips(
[str(p) for p in two_clips],
transition_duration=0,
output_path=str(stitched),
)
base_video = VideoFileClip(str(stitched))
base_duration = base_video.duration
base_video.close()
out = tmp_path / "titled.mp4"
result = add_title_card(
str(stitched),
title="My Music Video",
duration=3.0,
position="start",
output_path=str(out),
)
assert result["success"]
assert result["title"] == "My Music Video"
video = VideoFileClip(str(out))
# Title card (3s) + base video (~6s) = ~9s
assert video.duration == pytest.approx(base_duration + 3.0, abs=1.0)
video.close()
def test_append_credits(self, two_clips, tmp_path):
"""Credits card at end adds to total duration."""
clip_path = str(two_clips[0]) # single 3s clip
out = tmp_path / "credits.mp4"
result = add_title_card(
clip_path,
title="THE END",
duration=2.0,
position="end",
output_path=str(out),
)
assert result["success"]
video = VideoFileClip(str(out))
# 3s clip + 2s credits = ~5s
assert video.duration == pytest.approx(5.0, abs=1.0)
video.close()
# ── Subtitles ─────────────────────────────────────────────────────────────────
class TestAddSubtitlesReal:
def test_burn_captions(self, two_clips, tmp_path):
"""Subtitles are burned onto the video (duration unchanged)."""
stitched = tmp_path / "base.mp4"
stitch_clips(
[str(p) for p in two_clips],
transition_duration=0,
output_path=str(stitched),
)
captions = [
{"text": "Welcome to the show", "start": 0.0, "end": 2.0},
{"text": "Here we go!", "start": 2.5, "end": 4.5},
{"text": "Finale", "start": 5.0, "end": 6.0},
]
out = tmp_path / "subtitled.mp4"
result = add_subtitles(str(stitched), captions, output_path=str(out))
assert result["success"]
assert result["caption_count"] == 3
video = VideoFileClip(str(out))
# Duration should be unchanged
assert video.duration == pytest.approx(6.0, abs=0.5)
assert video.size == [320, 180]
video.close()
# ── Export final ──────────────────────────────────────────────────────────────
class TestExportFinalReal:
def test_reencodes_video(self, two_clips, tmp_path):
"""Final export produces a valid re-encoded file."""
clip_path = str(two_clips[0])
out = tmp_path / "final.mp4"
result = export_final(
clip_path,
output_path=str(out),
codec="libx264",
bitrate="2000k",
)
assert result["success"]
assert result["codec"] == "libx264"
assert out.exists()
assert out.stat().st_size > 500
video = VideoFileClip(str(out))
assert video.duration == pytest.approx(3.0, abs=0.5)
video.close()

View File

@@ -0,0 +1,444 @@
"""Integration test: end-to-end music video pipeline with real media files.
Exercises the Creative Director pipeline and Assembler with genuine PNG,
WAV, and MP4 files. Only AI model inference is replaced with fixture
generators; all MoviePy / FFmpeg operations run for real.
The final output video is inspected for:
- Duration — correct within tolerance
- Resolution — 320x180 (fixture default)
- Audio stream — present
- File size — non-trivial (>10 kB)
"""
import pytest
from pathlib import Path
from unittest.mock import patch
from moviepy import VideoFileClip
from creative.director import (
create_project,
run_storyboard,
run_music,
run_video_generation,
run_assembly,
run_full_pipeline,
_projects,
)
from creative.assembler import (
stitch_clips,
overlay_audio,
add_title_card,
add_subtitles,
export_final,
)
from fixtures.media import (
make_storyboard,
make_audio_track,
make_video_clip,
make_scene_clips,
)
# ── Fixtures ──────────────────────────────────────────────────────────────────
SCENES = [
{"description": "Dawn breaks over misty mountains", "duration": 4},
{"description": "A river carves through green valleys", "duration": 4},
{"description": "Wildflowers sway in warm sunlight", "duration": 4},
{"description": "Clouds gather as evening approaches", "duration": 4},
{"description": "Stars emerge over a quiet lake", "duration": 4},
]
@pytest.fixture(autouse=True)
def clear_projects():
"""Clear in-memory project store between tests."""
_projects.clear()
yield
_projects.clear()
@pytest.fixture
def media_dir(tmp_path):
d = tmp_path / "media"
d.mkdir()
return d
@pytest.fixture
def scene_defs():
"""Five-scene creative brief for a short music video."""
return [dict(s) for s in SCENES]
@pytest.fixture
def storyboard_frames(media_dir):
"""Real PNG storyboard frames for all scenes."""
return make_storyboard(
media_dir / "frames",
[s["description"][:20] for s in SCENES],
width=320, height=180,
)
@pytest.fixture
def audio_track(media_dir):
"""Real 25-second WAV audio track."""
return make_audio_track(
media_dir / "soundtrack.wav",
duration_seconds=25.0,
frequency=440.0,
)
@pytest.fixture
def video_clips(media_dir):
"""Real 4-second MP4 clips, one per scene (~20s total)."""
return make_scene_clips(
media_dir / "clips",
[s["description"][:20] for s in SCENES],
duration_per_clip=4.0,
fps=12,
width=320,
height=180,
)
# ── Direct assembly (zero AI mocking) ───────────────────────────────────────
class TestMusicVideoAssembly:
"""Build a real music video from fixture clips + audio, inspect output."""
def test_full_music_video(self, video_clips, audio_track, tmp_path):
"""Stitch 5 clips -> overlay audio -> title -> credits -> inspect."""
# 1. Stitch with crossfade
stitched = tmp_path / "stitched.mp4"
stitch_result = stitch_clips(
[str(p) for p in video_clips],
transition_duration=0.5,
output_path=str(stitched),
)
assert stitch_result["success"]
assert stitch_result["clip_count"] == 5
# 2. Overlay audio
with_audio = tmp_path / "with_audio.mp4"
audio_result = overlay_audio(
str(stitched), str(audio_track),
output_path=str(with_audio),
)
assert audio_result["success"]
# 3. Title card at start
titled = tmp_path / "titled.mp4"
title_result = add_title_card(
str(with_audio),
title="Dawn to Dusk",
duration=3.0,
position="start",
output_path=str(titled),
)
assert title_result["success"]
# 4. Credits at end
final_path = tmp_path / "final_music_video.mp4"
credits_result = add_title_card(
str(titled),
title="THE END",
duration=2.0,
position="end",
output_path=str(final_path),
)
assert credits_result["success"]
# ── Inspect final video ──────────────────────────────────────────
assert final_path.exists()
assert final_path.stat().st_size > 10_000 # non-trivial file
video = VideoFileClip(str(final_path))
# Duration: 5x4s - 4x0.5s crossfade = 18s + 3s title + 2s credits = 23s
expected_body = 5 * 4.0 - 4 * 0.5 # 18s
expected_total = expected_body + 3.0 + 2.0 # 23s
assert video.duration >= 15.0 # floor sanity check
assert video.duration == pytest.approx(expected_total, abs=3.0)
# Resolution
assert video.size == [320, 180]
# Audio present
assert video.audio is not None
video.close()
def test_with_subtitles(self, video_clips, audio_track, tmp_path):
"""Full video with burned-in captions."""
# Stitch without transitions for predictable duration
stitched = tmp_path / "stitched.mp4"
stitch_clips(
[str(p) for p in video_clips],
transition_duration=0,
output_path=str(stitched),
)
# Overlay audio
with_audio = tmp_path / "with_audio.mp4"
overlay_audio(
str(stitched), str(audio_track),
output_path=str(with_audio),
)
# Burn subtitles — one caption per scene
captions = [
{"text": "Dawn breaks over misty mountains", "start": 0.0, "end": 3.5},
{"text": "A river carves through green valleys", "start": 4.0, "end": 7.5},
{"text": "Wildflowers sway in warm sunlight", "start": 8.0, "end": 11.5},
{"text": "Clouds gather as evening approaches", "start": 12.0, "end": 15.5},
{"text": "Stars emerge over a quiet lake", "start": 16.0, "end": 19.5},
]
final = tmp_path / "subtitled_video.mp4"
result = add_subtitles(str(with_audio), captions, output_path=str(final))
assert result["success"]
assert result["caption_count"] == 5
video = VideoFileClip(str(final))
# 5x4s = 20s total (no crossfade)
assert video.duration == pytest.approx(20.0, abs=1.0)
assert video.size == [320, 180]
assert video.audio is not None
video.close()
def test_export_final_quality(self, video_clips, tmp_path):
"""Export with specific codec/bitrate and verify."""
stitched = tmp_path / "raw.mp4"
stitch_clips(
[str(p) for p in video_clips[:2]],
transition_duration=0,
output_path=str(stitched),
)
final = tmp_path / "hq.mp4"
result = export_final(
str(stitched),
output_path=str(final),
codec="libx264",
bitrate="5000k",
)
assert result["success"]
assert result["codec"] == "libx264"
assert final.stat().st_size > 5000
video = VideoFileClip(str(final))
# Two 4s clips = 8s
assert video.duration == pytest.approx(8.0, abs=1.0)
video.close()
# ── Creative Director pipeline (AI calls replaced with fixtures) ────────────
class TestCreativeDirectorPipeline:
"""Run the full director pipeline; only AI model inference is stubbed
with real-file fixture generators. All assembly runs for real."""
def _make_storyboard_stub(self, frames_dir):
"""Return a callable that produces real PNGs in tool-result format."""
def stub(descriptions):
frames = make_storyboard(
frames_dir, descriptions, width=320, height=180,
)
return {
"success": True,
"frame_count": len(frames),
"frames": [
{"path": str(f), "scene_index": i, "prompt": descriptions[i]}
for i, f in enumerate(frames)
],
}
return stub
def _make_song_stub(self, audio_dir):
"""Return a callable that produces a real WAV in tool-result format."""
def stub(lyrics="", genre="pop", duration=60, title=""):
path = make_audio_track(
audio_dir / "song.wav",
duration_seconds=min(duration, 25),
)
return {
"success": True,
"path": str(path),
"genre": genre,
"duration": min(duration, 25),
}
return stub
def _make_video_stub(self, clips_dir):
"""Return a callable that produces real MP4s in tool-result format."""
counter = [0]
def stub(image_path=None, prompt="scene", duration=4, **kwargs):
path = make_video_clip(
clips_dir / f"gen_{counter[0]:03d}.mp4",
duration_seconds=duration,
fps=12, width=320, height=180,
label=prompt[:20],
)
counter[0] += 1
return {
"success": True,
"path": str(path),
"duration": duration,
}
return stub
def test_full_pipeline_end_to_end(self, scene_defs, tmp_path):
"""run_full_pipeline with real fixtures at every stage."""
frames_dir = tmp_path / "frames"
frames_dir.mkdir()
audio_dir = tmp_path / "audio"
audio_dir.mkdir()
clips_dir = tmp_path / "clips"
clips_dir.mkdir()
assembly_dir = tmp_path / "assembly"
assembly_dir.mkdir()
with (
patch("tools.image_tools.generate_storyboard",
side_effect=self._make_storyboard_stub(frames_dir)),
patch("tools.music_tools.generate_song",
side_effect=self._make_song_stub(audio_dir)),
patch("tools.video_tools.image_to_video",
side_effect=self._make_video_stub(clips_dir)),
patch("tools.video_tools.generate_video_clip",
side_effect=self._make_video_stub(clips_dir)),
patch("creative.director._project_dir",
return_value=tmp_path / "project"),
patch("creative.director._save_project"),
patch("creative.assembler._output_dir",
return_value=assembly_dir),
):
result = run_full_pipeline(
title="Integration Test Video",
description="End-to-end pipeline test",
scenes=scene_defs,
lyrics="Test lyrics for the song",
genre="rock",
)
assert result["success"], f"Pipeline failed: {result}"
assert result["project_id"]
assert result["final_video"] is not None
assert result["project"]["status"] == "complete"
assert result["project"]["has_final"] is True
assert result["project"]["clip_count"] == 5
# Inspect the final video
final_path = Path(result["final_video"]["path"])
assert final_path.exists()
assert final_path.stat().st_size > 5000
video = VideoFileClip(str(final_path))
# 5x4s clips - 4x1s crossfade = 16s body + 4s title card ~= 20s
assert video.duration >= 10.0
assert video.size == [320, 180]
assert video.audio is not None
video.close()
def test_step_by_step_pipeline(self, scene_defs, tmp_path):
"""Run each pipeline step individually — mirrors manual usage."""
frames_dir = tmp_path / "frames"
frames_dir.mkdir()
audio_dir = tmp_path / "audio"
audio_dir.mkdir()
clips_dir = tmp_path / "clips"
clips_dir.mkdir()
assembly_dir = tmp_path / "assembly"
assembly_dir.mkdir()
# 1. Create project
with (
patch("creative.director._project_dir",
return_value=tmp_path / "proj"),
patch("creative.director._save_project"),
):
proj = create_project(
"Step-by-Step Video",
"Manual pipeline test",
scenes=scene_defs,
lyrics="Step by step, we build it all",
)
pid = proj["project"]["id"]
assert proj["success"]
# 2. Storyboard
with (
patch("tools.image_tools.generate_storyboard",
side_effect=self._make_storyboard_stub(frames_dir)),
patch("creative.director._save_project"),
):
sb = run_storyboard(pid)
assert sb["success"]
assert sb["frame_count"] == 5
# 3. Music
with (
patch("tools.music_tools.generate_song",
side_effect=self._make_song_stub(audio_dir)),
patch("creative.director._save_project"),
):
mus = run_music(pid, genre="electronic")
assert mus["success"]
assert mus["genre"] == "electronic"
# Verify the audio file exists and is valid
audio_path = Path(mus["path"])
assert audio_path.exists()
assert audio_path.stat().st_size > 1000
# 4. Video generation (uses storyboard frames → image_to_video)
with (
patch("tools.video_tools.image_to_video",
side_effect=self._make_video_stub(clips_dir)),
patch("creative.director._save_project"),
):
vid = run_video_generation(pid)
assert vid["success"]
assert vid["clip_count"] == 5
# Verify each clip exists
for clip_info in vid["clips"]:
clip_path = Path(clip_info["path"])
assert clip_path.exists()
assert clip_path.stat().st_size > 1000
# 5. Assembly (all real MoviePy operations)
with (
patch("creative.director._save_project"),
patch("creative.assembler._output_dir",
return_value=assembly_dir),
):
asm = run_assembly(pid, transition_duration=0.5)
assert asm["success"]
# Inspect final output
final_path = Path(asm["path"])
assert final_path.exists()
assert final_path.stat().st_size > 5000
video = VideoFileClip(str(final_path))
# 5x4s - 4x0.5s = 18s body, + title card ~= 22s
assert video.duration >= 10.0
assert video.size == [320, 180]
assert video.audio is not None
video.close()
# Verify project reached completion
project = _projects[pid]
assert project.status == "complete"
assert project.final_video is not None
assert len(project.video_clips) == 5
assert len(project.storyboard_frames) == 5
assert project.music_track is not None