Merge pull request 'modular' (#1) from modular into main
Reviewed-on: #1
This commit was merged in pull request #1.
This commit is contained in:
@@ -1,13 +1,13 @@
|
||||
from .sharp_node import SharpFrameSelector
|
||||
from .sharp_node import SharpnessAnalyzer, SharpFrameSelector
|
||||
|
||||
# Map the class to a name ComfyUI recognizes
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"SharpnessAnalyzer": SharpnessAnalyzer,
|
||||
"SharpFrameSelector": SharpFrameSelector
|
||||
}
|
||||
|
||||
# Map the internal name to a human-readable label in the menu
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"SharpFrameSelector": "Sharp Frame Selector (Video)"
|
||||
"SharpnessAnalyzer": "1. Sharpness Analyzer",
|
||||
"SharpFrameSelector": "2. Sharp Frame Selector"
|
||||
}
|
||||
|
||||
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
|
||||
@@ -4,15 +4,17 @@ app.registerExtension({
|
||||
name: "SharpFrames.Tooltips",
|
||||
async beforeRegisterNodeDef(nodeType, nodeData, app) {
|
||||
if (nodeData.name === "SharpFrameSelector") {
|
||||
|
||||
// Define your tooltips here
|
||||
const tooltips = {
|
||||
"selection_method": "Strategy:\n'batched' = 1 best frame per time slot (Good for video).\n'best_n' = Top N sharpest frames globally.",
|
||||
"batch_size": "For 'batched' mode only.\nHow many frames to analyze at once.\nExample: 24fps video + batch 24 = 1 output frame per second.",
|
||||
"num_frames": "For 'best_n' mode only.\nTotal number of frames you want to keep."
|
||||
// Must match Python INPUT_TYPES keys exactly
|
||||
"selection_method": "Strategy:\n• 'batched': Best for video. Splits time into slots.\n• 'best_n': Global top sharpest frames.",
|
||||
"batch_size": "For 'batched' mode.\nSize of the analysis window (in frames).",
|
||||
"batch_buffer": "For 'batched' mode.\nFrames to skip AFTER each batch (dead zone).",
|
||||
"num_frames": "For 'best_n' mode.\nTotal frames to output.",
|
||||
"min_sharpness": "Threshold Filter.\nDiscard frames with score below this.\nNote: Scores are lower on resized images.",
|
||||
"images": "Input High-Res images.",
|
||||
"scores": "Input Sharpness Scores from Analyzer."
|
||||
};
|
||||
|
||||
// Hook into the node creation to apply them
|
||||
const onNodeCreated = nodeType.prototype.onNodeCreated;
|
||||
nodeType.prototype.onNodeCreated = function () {
|
||||
onNodeCreated?.apply(this, arguments);
|
||||
@@ -21,6 +23,9 @@ app.registerExtension({
|
||||
for (const w of this.widgets) {
|
||||
if (tooltips[w.name]) {
|
||||
w.tooltip = tooltips[w.name];
|
||||
// Force update for immediate feedback
|
||||
w.options = w.options || {};
|
||||
w.options.tooltip = tooltips[w.name];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
28
readme
28
readme
@@ -1,22 +1,26 @@
|
||||
# ComfyUI Sharp Frame Selector
|
||||
# 🔪 ComfyUI Sharp Frame Selector
|
||||
|
||||
A custom node for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) that automatically filters video frames to select only the sharpest ones.
|
||||
A suite of custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) designed to intelligently extract the sharpest frames from video footage.
|
||||
|
||||
This is a ComfyUI implementation of the logic found in [sharp-frames](https://github.com/Reflct/sharp-frames-python). It calculates the Laplacian variance of each frame to determine focus quality and selects the best candidates based on your chosen strategy.
|
||||
Based on the [sharp-frames](https://github.com/Reflct/sharp-frames-python) logic, this tool uses **Laplacian Variance** to score image clarity. It is optimized for high-resolution video processing using a **Sidechain Workflow** that saves massive amounts of RAM.
|
||||
|
||||
## Features
|
||||
## ✨ Key Features
|
||||
|
||||
- **No external CLI tools required**: Runs entirely within ComfyUI using OpenCV.
|
||||
- **Batched Selection**: Perfect for videos. Divides the timeline into chunks (e.g., every 1 second) and picks the single sharpest frame from that chunk. Ensures you never miss a scene.
|
||||
- **Best-N Selection**: Simply picks the top N sharpest frames from the entire batch, regardless of when they occur.
|
||||
- **GPU Efficient**: Keeps image data on the GPU where possible, only moving small batches to CPU for the sharpness calculation.
|
||||
* **Sidechain Optimization:** Analyze lightweight 512px proxy images to control the selection of heavy 4K raw frames.
|
||||
* **Batched Extraction:** Splits video into time slots (e.g., 1 second) and picks the single best frame from each slot. Perfect for ensuring action scenes are not missed.
|
||||
* **Threshold Filtering:** Automatically discards frames that are too blurry, even if they are the "winner" of their batch.
|
||||
* **Buffer Control:** Optional dead-zones between batches to reduce frame count or ensure temporal separation.
|
||||
|
||||
## Installation
|
||||
---
|
||||
|
||||
### Method 1: Manager (Recommended)
|
||||
If this node is available in the ComfyUI Manager, search for "Sharp Frame Selector" and install.
|
||||
## 🚀 Installation
|
||||
|
||||
### Method 2: Manual
|
||||
### Option 1: ComfyUI Manager (Recommended)
|
||||
1. Open ComfyUI Manager.
|
||||
2. Search for **"Sharp Frame Selector"**.
|
||||
3. Click **Install**.
|
||||
|
||||
### Option 2: Manual Installation
|
||||
Clone this repository into your `custom_nodes` folder:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -2,71 +2,93 @@ import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
# --- NODE 1: ANALYZER (Unchanged) ---
|
||||
class SharpnessAnalyzer:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"images": ("IMAGE",)}}
|
||||
|
||||
RETURN_TYPES = ("SHARPNESS_SCORES",)
|
||||
RETURN_NAMES = ("scores",)
|
||||
FUNCTION = "analyze_sharpness"
|
||||
CATEGORY = "SharpFrames"
|
||||
|
||||
def analyze_sharpness(self, images):
|
||||
print(f"[SharpAnalyzer] Calculating scores for {len(images)} frames...")
|
||||
scores = []
|
||||
for i in range(len(images)):
|
||||
img_np = (images[i].cpu().numpy() * 255).astype(np.uint8)
|
||||
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
||||
score = cv2.Laplacian(gray, cv2.CV_64F).var()
|
||||
scores.append(score)
|
||||
return (scores,)
|
||||
|
||||
# --- NODE 2: SELECTOR (Updated with Buffer) ---
|
||||
class SharpFrameSelector:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"images": ("IMAGE",),
|
||||
"scores": ("SHARPNESS_SCORES",),
|
||||
"selection_method": (["batched", "best_n"],),
|
||||
"batch_size": ("INT", {"default": 24, "min": 1, "max": 10000, "step": 1}),
|
||||
# NEW: Restored the buffer option
|
||||
"batch_buffer": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}),
|
||||
"num_frames": ("INT", {"default": 10, "min": 1, "max": 10000, "step": 1}),
|
||||
"min_sharpness": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10000.0, "step": 0.1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "INT")
|
||||
RETURN_NAMES = ("selected_images", "count")
|
||||
FUNCTION = "process_images"
|
||||
FUNCTION = "select_frames"
|
||||
CATEGORY = "SharpFrames"
|
||||
|
||||
def process_images(self, images, selection_method, batch_size, num_frames):
|
||||
# images is a Tensor: [Batch, Height, Width, Channels] (RGB, 0.0-1.0)
|
||||
|
||||
total_input_frames = len(images)
|
||||
print(f"[SharpSelector] Analyzing {total_input_frames} frames...")
|
||||
|
||||
scores = []
|
||||
|
||||
# We must iterate to calculate score per frame
|
||||
# OpenCV runs on CPU, so we must move frame-by-frame or batch-to-cpu
|
||||
for i in range(total_input_frames):
|
||||
# 1. Grab single frame, move to CPU, convert to numpy
|
||||
# 2. Scale 0.0-1.0 to 0-255
|
||||
img_np = (images[i].cpu().numpy() * 255).astype(np.uint8)
|
||||
|
||||
# 3. Convert RGB to Gray for Laplacian
|
||||
gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
|
||||
|
||||
# 4. Calculate Variance of Laplacian
|
||||
score = cv2.Laplacian(gray, cv2.CV_64F).var()
|
||||
scores.append(score)
|
||||
def select_frames(self, images, scores, selection_method, batch_size, batch_buffer, num_frames, min_sharpness):
|
||||
if len(images) != len(scores):
|
||||
min_len = min(len(images), len(scores))
|
||||
images = images[:min_len]
|
||||
scores = scores[:min_len]
|
||||
|
||||
selected_indices = []
|
||||
|
||||
# --- SELECTION LOGIC ---
|
||||
if selection_method == "batched":
|
||||
# Best frame every N frames
|
||||
for i in range(0, total_input_frames, batch_size):
|
||||
chunk_end = min(i + batch_size, total_input_frames)
|
||||
total_frames = len(scores)
|
||||
|
||||
# THE FIX: Step includes the buffer size
|
||||
# If batch=24 and buffer=2, we jump 26 frames each time
|
||||
step_size = batch_size + batch_buffer
|
||||
|
||||
for i in range(0, total_frames, step_size):
|
||||
# The chunk is strictly the batch_size
|
||||
chunk_end = min(i + batch_size, total_frames)
|
||||
chunk_scores = scores[i : chunk_end]
|
||||
|
||||
# argmax gives relative index (0 to batch_size), add 'i' for absolute
|
||||
best_in_chunk_idx = np.argmax(chunk_scores)
|
||||
selected_indices.append(i + best_in_chunk_idx)
|
||||
if len(chunk_scores) > 0:
|
||||
best_in_chunk_idx = np.argmax(chunk_scores)
|
||||
best_score = chunk_scores[best_in_chunk_idx]
|
||||
|
||||
if best_score >= min_sharpness:
|
||||
selected_indices.append(i + best_in_chunk_idx)
|
||||
|
||||
elif selection_method == "best_n":
|
||||
# Top N sharpest frames globally, sorted by time
|
||||
target_count = min(num_frames, total_input_frames)
|
||||
# (Logic remains the same, buffer applies to Batched only)
|
||||
valid_indices = [i for i, s in enumerate(scores) if s >= min_sharpness]
|
||||
valid_scores = np.array([scores[i] for i in valid_indices])
|
||||
|
||||
# argsort sorts low to high, we take the last N (highest scores)
|
||||
top_indices = np.argsort(scores)[-target_count:]
|
||||
|
||||
# Sort indices to keep original video order
|
||||
selected_indices = sorted(top_indices)
|
||||
if len(valid_scores) > 0:
|
||||
target_count = min(num_frames, len(valid_scores))
|
||||
top_local_indices = np.argsort(valid_scores)[-target_count:]
|
||||
top_global_indices = [valid_indices[i] for i in top_local_indices]
|
||||
selected_indices = sorted(top_global_indices)
|
||||
|
||||
print(f"[SharpSelector] Selected {len(selected_indices)} frames.")
|
||||
|
||||
# Filter the original GPU tensor using the selected indices
|
||||
if len(selected_indices) == 0:
|
||||
h, w = images[0].shape[0], images[0].shape[1]
|
||||
empty = torch.zeros((1, h, w, 3), dtype=images.dtype, device=images.device)
|
||||
return (empty, 0)
|
||||
|
||||
result_images = images[selected_indices]
|
||||
|
||||
return (result_images, len(selected_indices))
|
||||
Reference in New Issue
Block a user