Add two-clip support for Join Extend mode

SourcePrep gets optional source_clip_2 input — when connected in Join
Extend mode, joins two separate clips instead of splitting one in half.
MergeBack gets optional original_clip_2 to reconstruct the full result
from both originals. Single-clip behavior is unchanged.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-20 10:19:45 +01:00
parent 08d6f21f18
commit 32e28ac4b4
2 changed files with 35 additions and 9 deletions

View File

@@ -106,9 +106,12 @@ Blend methods:
"blend_method": (["optical_flow", "alpha", "none"], {"default": "optical_flow", "description": "Blending method at seams."}), "blend_method": (["optical_flow", "alpha", "none"], {"default": "optical_flow", "description": "Blending method at seams."}),
"of_preset": (["fast", "balanced", "quality", "max"], {"default": "balanced", "description": "Optical flow quality preset."}), "of_preset": (["fast", "balanced", "quality", "max"], {"default": "balanced", "description": "Optical flow quality preset."}),
}, },
"optional": {
"original_clip_2": ("IMAGE", {"description": "Second original clip for Join Extend with two separate clips."}),
},
} }
def merge(self, original_clip, vace_output, vace_pipe, blend_method, of_preset): def merge(self, original_clip, vace_output, vace_pipe, blend_method, of_preset, original_clip_2=None):
mode = vace_pipe["mode"] mode = vace_pipe["mode"]
trim_start = vace_pipe["trim_start"] trim_start = vace_pipe["trim_start"]
trim_end = vace_pipe["trim_end"] trim_end = vace_pipe["trim_end"]
@@ -120,9 +123,15 @@ Blend methods:
return (vace_output,) return (vace_output,)
# Splice modes: reconstruct full video # Splice modes: reconstruct full video
two_clip = vace_pipe.get("two_clip", False)
V = vace_output.shape[0] V = vace_output.shape[0]
head = original_clip[:trim_start] head = original_clip[:trim_start]
if two_clip and original_clip_2 is not None:
tail = original_clip_2[trim_end:]
right_orig = original_clip_2
else:
tail = original_clip[trim_end:] tail = original_clip[trim_end:]
right_orig = original_clip
result = torch.cat([head, vace_output, tail], dim=0) result = torch.cat([head, vace_output, tail], dim=0)
if blend_method == "none" or (left_ctx == 0 and right_ctx == 0): if blend_method == "none" or (left_ctx == 0 and right_ctx == 0):
@@ -142,7 +151,7 @@ Blend methods:
for j in range(right_ctx): for j in range(right_ctx):
alpha = 1.0 - (j + 1) / (right_ctx + 1) alpha = 1.0 - (j + 1) / (right_ctx + 1)
frame_idx = V - right_ctx + j frame_idx = V - right_ctx + j
result[trim_start + frame_idx] = blend_frame(original_clip[trim_end - right_ctx + j], vace_output[frame_idx], alpha) result[trim_start + frame_idx] = blend_frame(right_orig[trim_end - right_ctx + j], vace_output[frame_idx], alpha)
return (result,) return (result,)

View File

@@ -323,7 +323,7 @@ input_left / input_right (0 = use all available):
Pre Extend: input_right = leading reference frames to keep Pre Extend: input_right = leading reference frames to keep
Middle Extend: input_left/input_right = frames each side of split Middle Extend: input_left/input_right = frames each side of split
Edge Extend: input_left/input_right = start/end edge size (overrides edge_frames) Edge Extend: input_left/input_right = start/end edge size (overrides edge_frames)
Join Extend: input_left/input_right = edge context from each half Join Extend: input_left/input_right = edge context from each half (or each clip if source_clip_2 connected)
Bidirectional: input_left = trailing context frames to keep Bidirectional: input_left = trailing context frames to keep
Frame Interpolation: pass-through (no trimming) Frame Interpolation: pass-through (no trimming)
Replace/Inpaint: input_left/input_right = context frames around replace region Replace/Inpaint: input_left/input_right = context frames around replace region
@@ -385,6 +385,12 @@ input_left / input_right (0 = use all available):
), ),
}, },
"optional": { "optional": {
"source_clip_2": (
"IMAGE",
{
"description": "Second clip for Join Extend — join two separate clips instead of splitting one in half.",
},
),
"inpaint_mask": ( "inpaint_mask": (
"MASK", "MASK",
{ {
@@ -401,7 +407,7 @@ input_left / input_right (0 = use all available):
}, },
} }
def prepare(self, source_clip, mode, split_index, input_left, input_right, edge_frames, inpaint_mask=None, keyframe_positions=None): def prepare(self, source_clip, mode, split_index, input_left, input_right, edge_frames, source_clip_2=None, inpaint_mask=None, keyframe_positions=None):
B, H, W, C = source_clip.shape B, H, W, C = source_clip.shape
dev = source_clip.device dev = source_clip.device
@@ -464,6 +470,10 @@ input_left / input_right (0 = use all available):
return (output, mode, 0, sym, mask_ph(), kp_out, pipe) return (output, mode, 0, sym, mask_ph(), kp_out, pipe)
elif mode == "Join Extend": elif mode == "Join Extend":
if source_clip_2 is not None:
first_half = source_clip
second_half = source_clip_2
else:
half = B // 2 half = B // 2
first_half = source_clip[:half] first_half = source_clip[:half]
second_half = source_clip[half:] second_half = source_clip[half:]
@@ -475,7 +485,14 @@ input_left / input_right (0 = use all available):
part_2 = first_half[-sym:] part_2 = first_half[-sym:]
part_3 = second_half[:sym] part_3 = second_half[:sym]
output = torch.cat([part_2, part_3], dim=0) output = torch.cat([part_2, part_3], dim=0)
pipe = {"mode": mode, "trim_start": half - sym, "trim_end": half + sym, "left_ctx": sym, "right_ctx": sym} two_clip = source_clip_2 is not None
if two_clip:
trim_start = first_half.shape[0] - sym
trim_end = sym
else:
trim_start = half - sym
trim_end = half + sym
pipe = {"mode": mode, "trim_start": trim_start, "trim_end": trim_end, "left_ctx": sym, "right_ctx": sym, "two_clip": two_clip}
return (output, mode, 0, sym, mask_ph(), kp_out, pipe) return (output, mode, 0, sym, mask_ph(), kp_out, pipe)
elif mode == "Bidirectional Extend": elif mode == "Bidirectional Extend":