-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathBlackBarsV3.py
197 lines (164 loc) · 6.99 KB
/
BlackBarsV3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import torch
import torch.nn.functional as F
from typing import Tuple
import math
class BlackBarsV3:
"""
ComfyUI custom node that applies industry-standard letterboxing/pillarboxing
with aspect ratio detection and padding options. Unlike V2, this version
always preserves the full image by padding instead of cropping.
"""
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.ASPECT_RATIOS = {
"2.39:1": 2.39,
"2.35:1": 2.35,
"1.85:1": 1.85,
"16:9": 1.77777778,
"3:2": 1.5,
"4:3": 1.33333333,
"1:1": 1.0,
"9:16": 0.5625
}
self.COMMON_RESOLUTIONS = {
(1920, 1080): "16:9",
(3840, 2160): "16:9",
(1280, 720): "16:9",
(720, 480): "4:3",
(720, 576): "4:3",
(1080, 1920): "9:16",
(1080, 1080): "1:1"
}
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"images": ("IMAGE",),
"target_ratio": ([
"auto",
"2.39:1 (Anamorphic)",
"2.35:1 (CinemaScope)",
"1.85:1 (Theatrical)",
"16:9 (HD)",
"4:3 (Classic)",
"1:1 (Square)",
"9:16 (Vertical)"
],),
"safe_area": ("BOOLEAN", {"default": False}),
"show_guides": ("BOOLEAN", {"default": False})
}
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "apply_black_bars"
CATEGORY = "image/format"
def detect_aspect_ratio(self, width: int, height: int) -> str:
"""
Detects the aspect ratio of the input image and returns the closest standard ratio.
"""
# First check if it's a common resolution
if (width, height) in self.COMMON_RESOLUTIONS:
return self.COMMON_RESOLUTIONS[(width, height)]
# Calculate actual ratio
actual_ratio = width / height
# Find closest standard ratio
closest_ratio = min(self.ASPECT_RATIOS.items(),
key=lambda x: abs(x[1] - actual_ratio))
return closest_ratio[0]
def calculate_padding(self, input_res: Tuple[int, int],
target_ratio: str) -> Tuple[int, int, int, int, int, int]:
"""
Calculates the required padding and new dimensions to achieve target ratio.
Returns (new_width, new_height, top, bottom, left, right) padding sizes.
"""
width, height = input_res
input_ratio = width / height
if target_ratio == "auto":
target_ratio = self.detect_aspect_ratio(width, height)
# Strip suffix from target ratio if present
target_ratio = target_ratio.split(" ")[0]
target_ratio_float = self.ASPECT_RATIOS[target_ratio]
if abs(input_ratio - target_ratio_float) < 0.01:
return width, height, 0, 0, 0, 0 # No padding needed
if input_ratio > target_ratio_float:
# Need to increase height
new_height = int(width / target_ratio_float)
pad_height = new_height - height
top_pad = pad_height // 2
bottom_pad = pad_height - top_pad
return width, new_height, top_pad, bottom_pad, 0, 0
else:
# Need to increase width
new_width = int(height * target_ratio_float)
pad_width = new_width - width
left_pad = pad_width // 2
right_pad = pad_width - left_pad
return new_width, height, 0, 0, left_pad, right_pad
def apply_safe_area_guides(self, images: torch.Tensor) -> torch.Tensor:
"""
Adds safe area guides overlay (90% and 80% markers)
"""
B, H, W, C = images.shape
guide_image = images.clone()
# Calculate safe area boundaries
action_safe = {
'top': int(H * 0.1),
'bottom': int(H * 0.9),
'left': int(W * 0.1),
'right': int(W * 0.9)
}
title_safe = {
'top': int(H * 0.2),
'bottom': int(H * 0.8),
'left': int(W * 0.2),
'right': int(W * 0.8)
}
# Draw guide lines
line_color = torch.tensor([0.8, 0.8, 0.8], device=self.device)
line_thickness = 2
# Action safe area (90%)
guide_image[:, action_safe['top']:action_safe['top'] + line_thickness, :, :] = line_color
guide_image[:, action_safe['bottom']-line_thickness:action_safe['bottom'], :, :] = line_color
guide_image[:, :, action_safe['left']:action_safe['left'] + line_thickness, :] = line_color
guide_image[:, :, action_safe['right']-line_thickness:action_safe['right'], :] = line_color
# Title safe area (80%)
guide_image[:, title_safe['top']:title_safe['top'] + line_thickness, :, :] = line_color
guide_image[:, title_safe['bottom']-line_thickness:title_safe['bottom'], :, :] = line_color
guide_image[:, :, title_safe['left']:title_safe['left'] + line_thickness, :] = line_color
guide_image[:, :, title_safe['right']-line_thickness:title_safe['right'], :] = line_color
return guide_image
def apply_black_bars(self, images: torch.Tensor,
target_ratio: str,
safe_area: bool,
show_guides: bool) -> Tuple[torch.Tensor]:
"""
Apply padding to achieve target aspect ratio while preserving the entire image.
Args:
images: Input tensor of shape (B, H, W, C)
target_ratio: Target aspect ratio (or "auto")
safe_area: Whether to apply safe area guides
show_guides: Whether to show padding guides
Returns:
Tuple containing the processed images tensor with padding
"""
# Ensure input is torch tensor on the correct device
if not isinstance(images, torch.Tensor):
images = torch.tensor(images, device=self.device)
else:
images = images.to(self.device)
B, H, W, C = images.shape
# Calculate required padding and new dimensions
new_width, new_height, top, bottom, left, right = self.calculate_padding((W, H), target_ratio)
# Create new tensor with black padding
result = torch.zeros((B, new_height, new_width, C), device=self.device)
# Place original image in center
result[:, top:top+H, left:left+W, :] = images
# Apply safe area guides if requested
if safe_area or show_guides:
result = self.apply_safe_area_guides(result)
return (result,)
NODE_CLASS_MAPPINGS = {
"BlackBarsV3": BlackBarsV3
}
NODE_DISPLAY_NAME_MAPPINGS = {
"BlackBarsV3": "Black Bars V3"
}