vllm (sha256:b3cc300a6f651b6ba190a722afce05de6e21ba18b3ed7d10849d891be3b2cde3)
Published 2026-03-15 01:20:56 +01:00 by hydrar
Installation
docker pull git.hydrar.se/hydrar/vllm@sha256:b3cc300a6f651b6ba190a722afce05de6e21ba18b3ed7d10849d891be3b2cde3sha256:b3cc300a6f651b6ba190a722afce05de6e21ba18b3ed7d10849d891be3b2cde3About this package
vLLM inference engine for CPU platforms
Image layers
| ARG RELEASE |
| ARG LAUNCHPAD_BUILD_ARCH |
| LABEL org.opencontainers.image.ref.name=ubuntu |
| LABEL org.opencontainers.image.version=22.04 |
| ADD file:52c0e467fa2e92f101018df01a0ff56580c752b7553fbe6df88e16b02af6d4ee in / |
| CMD ["/bin/bash"] |
| WORKDIR /workspace |
| ARG PYTHON_VERSION=3.12 |
| ARG PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu |
| RUN |2 PYTHON_VERSION=3.12 PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu /bin/sh -c apt-get update -y && apt-get install -y --no-install-recommends sudo ccache git curl wget ca-certificates gcc-12 g++-12 libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 jq lsof make xz-utils && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 && curl -LsSf https://astral.sh/uv/install.sh | sh # buildkit |
| ENV CC=/usr/bin/gcc-12 CXX=/usr/bin/g++-12 |
| ENV CCACHE_DIR=/root/.cache/ccache |
| ENV CMAKE_CXX_COMPILER_LAUNCHER=ccache |
| ENV PATH=/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin |
| ENV VIRTUAL_ENV=/opt/venv |
| ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python |
| RUN |2 PYTHON_VERSION=3.12 PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu /bin/sh -c uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV} # buildkit |
| ENV PATH=/opt/venv/bin:/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin |
| ENV UV_HTTP_TIMEOUT=500 |
| ENV PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu |
| ENV UV_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu |
| ENV UV_INDEX_STRATEGY=unsafe-best-match |
| ENV UV_LINK_MODE=copy |
| COPY requirements/common.txt requirements/common.txt # buildkit |
| COPY requirements/cpu.txt requirements/cpu.txt # buildkit |
| RUN |2 PYTHON_VERSION=3.12 PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu /bin/sh -c uv pip install --upgrade pip && uv pip install -r requirements/cpu.txt # buildkit |
| ARG TARGETARCH=amd64 |
| ENV TARGETARCH=amd64 |
| ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/opt/venv/lib/libiomp5.so |
| RUN |3 PYTHON_VERSION=3.12 PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu TARGETARCH=amd64 /bin/sh -c echo 'ulimit -c 0' >> ~/.bashrc # buildkit |
| WORKDIR /vllm-workspace |
| RUN |3 PYTHON_VERSION=3.12 PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu TARGETARCH=amd64 /bin/sh -c uv pip install dist/*.whl ray[default,adag] # buildkit |
| LABEL org.opencontainers.image.title=vLLM CPU |
| LABEL org.opencontainers.image.description=vLLM inference engine for CPU platforms |
| LABEL org.opencontainers.image.vendor=vLLM Project |
| LABEL org.opencontainers.image.source=https://github.com/vllm-project/vllm |
| ARG TARGETARCH=amd64 |
| ARG VLLM_CPU_X86 |
| ARG VLLM_CPU_ARM_BF16 |
| ARG PYTHON_VERSION |
| LABEL ai.vllm.build.target-arch=amd64 |
| LABEL ai.vllm.build.cpu-x86=false |
| LABEL ai.vllm.build.cpu-arm-bf16=false |
| LABEL ai.vllm.build.python-version=3.12 |
| ENTRYPOINT ["vllm" "serve"] |
Labels
| Key | Value |
|---|---|
| ai.vllm.build.cpu-arm-bf16 | false |
| ai.vllm.build.cpu-x86 | false |
| ai.vllm.build.python-version | 3.12 |
| ai.vllm.build.target-arch | amd64 |
| org.opencontainers.image.description | vLLM inference engine for CPU platforms |
| org.opencontainers.image.ref.name | ubuntu |
| org.opencontainers.image.source | https://github.com/vllm-project/vllm |
| org.opencontainers.image.title | vLLM CPU |
| org.opencontainers.image.vendor | vLLM Project |
| org.opencontainers.image.version | 22.04 |
Details
Versions (1)
View all
vllm-cpu-0.17.1
2026-03-15