Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(server): make ComfyUI log level configurable #150

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion server/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,10 @@ async def on_startup(app: web.Application):
patch_loop_datagram(app["media_ports"])

app["pipeline"] = Pipeline(
cwd=app["workspace"], disable_cuda_malloc=True, gpu_only=True
cwd=app["workspace"],
disable_cuda_malloc=True,
gpu_only=True,
comfyui_inference_log_level=app.get("comfui_inference_log_level", None),
)
app["pcs"] = set()
app["video_tracks"] = {}
Expand Down Expand Up @@ -386,6 +389,18 @@ async def on_shutdown(app: web.Application):
action="store_true",
help="Include stream ID as a label in Prometheus metrics.",
)
parser.add_argument(
"--comfyui-log-level",
default=None,
choices=logging._nameToLevel.keys(),
help="Set the global logging level for ComfyUI",
)
parser.add_argument(
"--comfyui-inference-log-level",
default=None,
choices=logging._nameToLevel.keys(),
help="Set the logging level for ComfyUI inference",
)
args = parser.parse_args()

logging.basicConfig(
Expand Down Expand Up @@ -435,4 +450,11 @@ def force_print(*args, **kwargs):
print(*args, **kwargs, flush=True)
sys.stdout.flush()

# Allow overriding of ComyfUI log levels.
if args.comfyui_log_level:
log_level = logging._nameToLevel.get(args.comfyui_log_level.upper())
logging.getLogger("comfy").setLevel(log_level)
if args.comfyui_inference_log_level:
app["comfui_inference_log_level"] = args.comfyui_inference_log_level

web.run_app(app, host=args.host, port=int(args.port), print=force_print)
19 changes: 15 additions & 4 deletions server/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,27 @@

from typing import Any, Dict, Union, List
from comfystream.client import ComfyStreamClient
from utils import temporary_log_level

WARMUP_RUNS = 5


class Pipeline:
def __init__(self, **kwargs):
def __init__(self, comfyui_inference_log_level: int = None, **kwargs):
"""Initialize the pipeline with the given configuration.
Args:
comfyui_inference_log_level: The logging level for ComfyUI inference.
Defaults to None, using the global ComfyUI log level.
**kwargs: Additional arguments to pass to the ComfyStreamClient
"""
self.client = ComfyStreamClient(**kwargs)
self.video_incoming_frames = asyncio.Queue()
self.audio_incoming_frames = asyncio.Queue()

self.processed_audio_buffer = np.array([], dtype=np.int16)

self._comfyui_inference_log_level = comfyui_inference_log_level

async def warm_video(self):
dummy_frame = av.VideoFrame()
dummy_frame.side_data.input = torch.randn(1, 512, 512, 3)
Expand Down Expand Up @@ -75,7 +84,8 @@ def audio_postprocess(self, output: Union[torch.Tensor, np.ndarray]) -> av.Audio

async def get_processed_video_frame(self):
# TODO: make it generic to support purely generative video cases
out_tensor = await self.client.get_video_output()
async with temporary_log_level("comfy", self._comfyui_inference_log_level):
out_tensor = await self.client.get_video_output()
frame = await self.video_incoming_frames.get()
while frame.side_data.skipped:
frame = await self.video_incoming_frames.get()
Expand All @@ -90,7 +100,8 @@ async def get_processed_audio_frame(self):
# TODO: make it generic to support purely generative audio cases and also add frame skipping
frame = await self.audio_incoming_frames.get()
if frame.samples > len(self.processed_audio_buffer):
out_tensor = await self.client.get_audio_output()
async with temporary_log_level("comfy", self._comfyui_inference_log_level):
out_tensor = await self.client.get_audio_output()
self.processed_audio_buffer = np.concatenate([self.processed_audio_buffer, out_tensor])
out_data = self.processed_audio_buffer[:frame.samples]
self.processed_audio_buffer = self.processed_audio_buffer[frame.samples:]
Expand All @@ -108,4 +119,4 @@ async def get_nodes_info(self) -> Dict[str, Any]:
return nodes_info

async def cleanup(self):
await self.client.cleanup()
await self.client.cleanup()
2 changes: 1 addition & 1 deletion server/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .utils import patch_loop_datagram, add_prefix_to_app_routes
from .utils import patch_loop_datagram, add_prefix_to_app_routes, temporary_log_level
from .fps_meter import FPSMeter
20 changes: 20 additions & 0 deletions server/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import logging
from aiohttp import web
from typing import List, Tuple
from contextlib import asynccontextmanager

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -63,3 +64,22 @@ def add_prefix_to_app_routes(app: web.Application, prefix: str):
for route in list(app.router.routes()):
new_path = prefix + route.resource.canonical
app.router.add_route(route.method, new_path, route.handler)


@asynccontextmanager
async def temporary_log_level(logger_name: str, level: int):
"""Temporarily set the log level of a logger.

Args:
logger_name: The name of the logger to set the level for.
level: The log level to set.
"""
if level is not None:
logger = logging.getLogger(logger_name)
original_level = logger.level
logger.setLevel(level)
try:
yield
finally:
if level is not None:
logger.setLevel(original_level)