Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 8e6f296

Browse files
committedMar 11, 2025··
feat(server): make ComfyUI log level configurable
This commit introduces the `comfyui-log-level` and `comfyui-inference-log-level` arguments to the server, allowing users to adjust the log level and reduce excessive logging.
1 parent 7513393 commit 8e6f296

File tree

2 files changed

+38
-5
lines changed

2 files changed

+38
-5
lines changed
 

‎server/app.py

+23-1
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,10 @@ async def on_startup(app: web.Application):
287287
patch_loop_datagram(app["media_ports"])
288288

289289
app["pipeline"] = Pipeline(
290-
cwd=app["workspace"], disable_cuda_malloc=True, gpu_only=True
290+
cwd=app["workspace"],
291+
disable_cuda_malloc=True,
292+
gpu_only=True,
293+
comfyui_inference_log_level=app.get("comfui_inference_log_level", None),
291294
)
292295
app["pcs"] = set()
293296
app["video_tracks"] = {}
@@ -328,6 +331,18 @@ async def on_shutdown(app: web.Application):
328331
action="store_true",
329332
help="Include stream ID as a label in Prometheus metrics.",
330333
)
334+
parser.add_argument(
335+
"--comfyui-log-level",
336+
default=None,
337+
choices=logging._nameToLevel.keys(),
338+
help="Set the global logging level for ComfyUI",
339+
)
340+
parser.add_argument(
341+
"--comfyui-inference-log-level",
342+
default=None,
343+
choices=logging._nameToLevel.keys(),
344+
help="Set the logging level for ComfyUI inference",
345+
)
331346
args = parser.parse_args()
332347

333348
logging.basicConfig(
@@ -377,4 +392,11 @@ def force_print(*args, **kwargs):
377392
print(*args, **kwargs, flush=True)
378393
sys.stdout.flush()
379394

395+
# Allow overriding of ComyfUI log levels.
396+
if args.comfyui_log_level:
397+
log_level = logging._nameToLevel.get(args.comfyui_log_level.upper())
398+
logging.getLogger("comfy").setLevel(log_level)
399+
if args.comfyui_inference_log_level:
400+
app["comfui_inference_log_level"] = args.comfyui_inference_log_level
401+
380402
web.run_app(app, host=args.host, port=int(args.port), print=force_print)

‎server/pipeline.py

+15-4
Original file line numberDiff line numberDiff line change
@@ -5,19 +5,28 @@
55

66
from typing import Any, Dict, Union, List
77
from comfystream.client import ComfyStreamClient
8+
from utils import temporary_log_level
89

910
WARMUP_RUNS = 5
1011

1112

1213
class Pipeline:
13-
def __init__(self, **kwargs):
14+
def __init__(self, comfyui_inference_log_level: int = None, **kwargs):
15+
"""Initialize the pipeline with the given configuration.
16+
Args:
17+
comfyui_inference_log_level: The logging level for ComfyUI inference.
18+
Defaults to None, using the global ComfyUI log level.
19+
**kwargs: Additional arguments to pass to the ComfyStreamClient
20+
"""
1421
self.client = ComfyStreamClient(**kwargs, max_workers=5) # TODO: hardcoded max workers, should it be configurable?
1522

1623
self.video_incoming_frames = asyncio.Queue()
1724
self.audio_incoming_frames = asyncio.Queue()
1825

1926
self.processed_audio_buffer = np.array([], dtype=np.int16)
2027

28+
self._comfyui_inference_log_level = comfyui_inference_log_level
29+
2130
async def warm_video(self):
2231
dummy_frame = av.VideoFrame()
2332
dummy_frame.side_data.input = torch.randn(1, 512, 512, 3)
@@ -76,7 +85,8 @@ def audio_postprocess(self, output: Union[torch.Tensor, np.ndarray]) -> av.Audio
7685

7786
async def get_processed_video_frame(self):
7887
# TODO: make it generic to support purely generative video cases
79-
out_tensor = await self.client.get_video_output()
88+
async with temporary_log_level("comfy", self._comfyui_inference_log_level):
89+
out_tensor = await self.client.get_video_output()
8090
frame = await self.video_incoming_frames.get()
8191
while frame.side_data.skipped:
8292
frame = await self.video_incoming_frames.get()
@@ -91,7 +101,8 @@ async def get_processed_audio_frame(self):
91101
# TODO: make it generic to support purely generative audio cases and also add frame skipping
92102
frame = await self.audio_incoming_frames.get()
93103
if frame.samples > len(self.processed_audio_buffer):
94-
out_tensor = await self.client.get_audio_output()
104+
async with temporary_log_level("comfy", self._comfyui_inference_log_level):
105+
out_tensor = await self.client.get_audio_output()
95106
self.processed_audio_buffer = np.concatenate([self.processed_audio_buffer, out_tensor])
96107
out_data = self.processed_audio_buffer[:frame.samples]
97108
self.processed_audio_buffer = self.processed_audio_buffer[frame.samples:]
@@ -109,4 +120,4 @@ async def get_nodes_info(self) -> Dict[str, Any]:
109120
return nodes_info
110121

111122
async def cleanup(self):
112-
await self.client.cleanup()
123+
await self.client.cleanup()

0 commit comments

Comments
 (0)
Please sign in to comment.