Skip to content

Transitions

Transitions combine two videos with visual effects at the boundary.

Usage

from videopython.base import Video, FadeTransition, BlurTransition

video1 = Video.from_path("clip1.mp4")
video2 = Video.from_path("clip2.mp4")

# Fluent API (recommended)
combined = video1.transition_to(video2, FadeTransition(effect_time_seconds=1.5))

# With blur transition
combined = video1.transition_to(video2, BlurTransition(effect_time_seconds=1.0))

# Direct apply (alternative)
fade = FadeTransition(effect_time_seconds=1.5)
combined = fade.apply((video1, video2))

Requirements

Both videos must have the same dimensions and frame rate to be combined. Use .resize() and .resample_fps() first if needed.

Transition (Base Class)

Transition

Bases: ABC

Abstract class for Transitions on Videos.

To build a new transition, you need to implement the _apply abstractmethod.

Source code in src/videopython/base/transitions.py
class Transition(ABC):
    """Abstract class for Transitions on Videos.

    To build a new transition, you need to implement the `_apply`
    abstractmethod.
    """

    @final
    def apply(self, videos: tuple[Video, Video]) -> Video:
        if not videos[0].metadata.can_be_merged_with(videos[1].metadata):
            raise IncompatibleVideoError("Videos have incompatible metadata and cannot be merged")
        return self._apply(videos)

    @abstractmethod
    def _apply(self, videos: tuple[Video, Video]) -> Video:
        pass

    @abstractmethod
    def to_dict(self) -> dict[str, Any]:
        pass

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> "Transition":
        if not isinstance(data, dict) or "type" not in data:
            raise ValueError("Transition dict must have a 'type' key")
        transition_type = data["type"]
        if transition_type not in _TRANSITION_REGISTRY:
            raise ValueError(f"Unknown transition type: '{transition_type}'")
        return _TRANSITION_REGISTRY[transition_type]._from_dict(data)

    @classmethod
    @abstractmethod
    def _from_dict(cls, data: dict[str, Any]) -> "Transition":
        pass

FadeTransition

FadeTransition

Bases: Transition

Cross-dissolve between two videos by blending overlapping frames.

Each video must be at least as long as the overlap duration.

Source code in src/videopython/base/transitions.py
class FadeTransition(Transition):
    """Cross-dissolve between two videos by blending overlapping frames.

    Each video must be at least as long as the overlap duration.
    """

    def __init__(self, effect_time_seconds: float):
        """Initialize fade transition.

        Args:
            effect_time_seconds: Seconds of overlap where the first video
                fades out while the second fades in.
        """
        self.effect_time_seconds = effect_time_seconds

    def to_dict(self) -> dict[str, Any]:
        return {"type": "fade", "effect_time_seconds": self.effect_time_seconds}

    @classmethod
    def _from_dict(cls, data: dict[str, Any]) -> "FadeTransition":
        return cls(effect_time_seconds=data["effect_time_seconds"])

    def fade(self, frames1, frames2):
        if len(frames1) != len(frames2):
            raise ValueError(f"Frame sequences must have equal length, got {len(frames1)} and {len(frames2)}")
        t = len(frames1)
        # Calculate transitioned frames using weighted average
        transitioned_frames = (
            frames1 * (t - np.arange(t) - 1)[:, np.newaxis, np.newaxis, np.newaxis]
            + frames2 * np.arange(t)[:, np.newaxis, np.newaxis, np.newaxis]
        ) / (t - 1)

        return transitioned_frames.astype(np.uint8)

    def _apply(self, videos: tuple[Video, Video]) -> Video:
        video_fps = videos[0].fps
        for video in videos:
            if video.total_seconds < self.effect_time_seconds:
                raise InsufficientDurationError("Not enough space to make transition!")

        effect_time_fps = math.floor(self.effect_time_seconds * video_fps)
        transition = self.fade(videos[0].frames[-effect_time_fps:], videos[1].frames[:effect_time_fps])

        faded_videos = Video.from_frames(
            np.r_[
                "0,2",
                videos[0].frames[:-effect_time_fps],
                transition,
                videos[1].frames[effect_time_fps:],
            ],
            fps=video_fps,
        )
        faded_videos.audio = videos[0].audio.concat(videos[1].audio, crossfade=(effect_time_fps / video_fps))
        return faded_videos

__init__

__init__(effect_time_seconds: float)

Initialize fade transition.

Parameters:

Name Type Description Default
effect_time_seconds float

Seconds of overlap where the first video fades out while the second fades in.

required
Source code in src/videopython/base/transitions.py
def __init__(self, effect_time_seconds: float):
    """Initialize fade transition.

    Args:
        effect_time_seconds: Seconds of overlap where the first video
            fades out while the second fades in.
    """
    self.effect_time_seconds = effect_time_seconds

BlurTransition

BlurTransition

Bases: Transition

Transitions between two videos by blurring the first out and the second in.

Source code in src/videopython/base/transitions.py
class BlurTransition(Transition):
    """Transitions between two videos by blurring the first out and the second in."""

    def __init__(
        self, effect_time_seconds: float = 1.5, blur_iterations: int = 400, blur_kernel_size: tuple[int, int] = (11, 11)
    ):
        """Initialize blur transition.

        Args:
            effect_time_seconds: Duration of the blur transition in seconds.
            blur_iterations: Blur strength at the peak of the transition.
                Higher values make the mid-point more heavily blurred.
            blur_kernel_size: Gaussian kernel [width, height] in pixels.
                Must be odd numbers. Larger values spread the blur wider.
        """
        self.effect_time_seconds = effect_time_seconds
        self.blur_iterations = blur_iterations
        self.blur_kernel_size = blur_kernel_size

    def to_dict(self) -> dict[str, Any]:
        return {
            "type": "blur",
            "effect_time_seconds": self.effect_time_seconds,
            "blur_iterations": self.blur_iterations,
            "blur_kernel_size": list(self.blur_kernel_size),
        }

    @classmethod
    def _from_dict(cls, data: dict[str, Any]) -> "BlurTransition":
        kwargs: dict[str, Any] = {}
        if "effect_time_seconds" in data:
            kwargs["effect_time_seconds"] = data["effect_time_seconds"]
        if "blur_iterations" in data:
            kwargs["blur_iterations"] = data["blur_iterations"]
        if "blur_kernel_size" in data:
            kwargs["blur_kernel_size"] = tuple(data["blur_kernel_size"])
        return cls(**kwargs)

    def _apply(self, videos: tuple[Video, Video]) -> Video:
        video_fps = videos[0].fps
        for video in videos:
            if video.total_seconds < self.effect_time_seconds:
                raise InsufficientDurationError("Not enough space to make transition!")

        effect_time_fps = math.floor(self.effect_time_seconds * video_fps)

        # Create frame-only videos for blur effect (avoids audio slicing issues)
        end_frames = Video.from_frames(videos[0].frames[-effect_time_fps:], fps=video_fps)
        start_frames = Video.from_frames(videos[1].frames[:effect_time_fps], fps=video_fps)

        ascending_blur = Blur("ascending", self.blur_iterations, self.blur_kernel_size)
        descending_blur = Blur("descending", self.blur_iterations, self.blur_kernel_size)
        transition = ascending_blur.apply(end_frames) + descending_blur.apply(start_frames)

        blurred_videos = Video.from_frames(
            np.r_[
                "0,2",
                videos[0].frames[:-effect_time_fps],
                transition.frames,
                videos[1].frames[effect_time_fps:],
            ],
            fps=video_fps,
        )
        blurred_videos.audio = videos[0].audio.concat(videos[1].audio)
        return blurred_videos

__init__

__init__(
    effect_time_seconds: float = 1.5,
    blur_iterations: int = 400,
    blur_kernel_size: tuple[int, int] = (11, 11),
)

Initialize blur transition.

Parameters:

Name Type Description Default
effect_time_seconds float

Duration of the blur transition in seconds.

1.5
blur_iterations int

Blur strength at the peak of the transition. Higher values make the mid-point more heavily blurred.

400
blur_kernel_size tuple[int, int]

Gaussian kernel [width, height] in pixels. Must be odd numbers. Larger values spread the blur wider.

(11, 11)
Source code in src/videopython/base/transitions.py
def __init__(
    self, effect_time_seconds: float = 1.5, blur_iterations: int = 400, blur_kernel_size: tuple[int, int] = (11, 11)
):
    """Initialize blur transition.

    Args:
        effect_time_seconds: Duration of the blur transition in seconds.
        blur_iterations: Blur strength at the peak of the transition.
            Higher values make the mid-point more heavily blurred.
        blur_kernel_size: Gaussian kernel [width, height] in pixels.
            Must be odd numbers. Larger values spread the blur wider.
    """
    self.effect_time_seconds = effect_time_seconds
    self.blur_iterations = blur_iterations
    self.blur_kernel_size = blur_kernel_size

InstantTransition

InstantTransition

Bases: Transition

Hard cut between two videos with no transition effect.

Source code in src/videopython/base/transitions.py
class InstantTransition(Transition):
    """Hard cut between two videos with no transition effect."""

    def _apply(self, videos: tuple[Video, Video]) -> Video:
        return videos[0] + videos[1]

    def to_dict(self) -> dict[str, Any]:
        return {"type": "instant"}

    @classmethod
    def _from_dict(cls, data: dict[str, Any]) -> "InstantTransition":
        return cls()