Skip to content

vllm.profiler.gpu_profiler

logger module-attribute

logger = init_logger(__name__)

CudaProfilerWrapper

Source code in vllm/profiler/gpu_profiler.py
class CudaProfilerWrapper:
    def __init__(self) -> None:
        self._profiler_running = False
        # Note: lazy import to avoid dependency issues if CUDA is not available.
        import torch.cuda.profiler as cuda_profiler

        self._cuda_profiler = cuda_profiler

    def start(self) -> None:
        try:
            self._cuda_profiler.start()
            self._profiler_running = True
            logger.info_once("Started CUDA profiler")
        except Exception as e:
            logger.warning_once("Failed to start CUDA profiler: %s", e)

    def stop(self) -> None:
        if self._profiler_running:
            try:
                self._cuda_profiler.stop()
                logger.info_once("Stopped CUDA profiler")
            except Exception as e:
                logger.warning_once("Failed to stop CUDA profiler: %s", e)
            finally:
                self._profiler_running = False

    def shutdown(self) -> None:
        """Ensure profiler is stopped when shutting down."""
        self.stop()

_cuda_profiler instance-attribute

_cuda_profiler = profiler

_profiler_running instance-attribute

_profiler_running = False

__init__

__init__() -> None
Source code in vllm/profiler/gpu_profiler.py
def __init__(self) -> None:
    self._profiler_running = False
    # Note: lazy import to avoid dependency issues if CUDA is not available.
    import torch.cuda.profiler as cuda_profiler

    self._cuda_profiler = cuda_profiler

shutdown

shutdown() -> None

Ensure profiler is stopped when shutting down.

Source code in vllm/profiler/gpu_profiler.py
def shutdown(self) -> None:
    """Ensure profiler is stopped when shutting down."""
    self.stop()

start

start() -> None
Source code in vllm/profiler/gpu_profiler.py
def start(self) -> None:
    try:
        self._cuda_profiler.start()
        self._profiler_running = True
        logger.info_once("Started CUDA profiler")
    except Exception as e:
        logger.warning_once("Failed to start CUDA profiler: %s", e)

stop

stop() -> None
Source code in vllm/profiler/gpu_profiler.py
def stop(self) -> None:
    if self._profiler_running:
        try:
            self._cuda_profiler.stop()
            logger.info_once("Stopped CUDA profiler")
        except Exception as e:
            logger.warning_once("Failed to stop CUDA profiler: %s", e)
        finally:
            self._profiler_running = False