mirror of
https://github.com/NVIDIA/nvidia-container-toolkit
synced 2025-06-26 18:18:24 +00:00
Compare commits
1 Commits
v1.18.0-rc
...
pull-reque
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0db8ca9893 |
193
.common-ci.yml
193
.common-ci.yml
@@ -22,7 +22,15 @@ variables:
|
||||
BUILD_MULTI_ARCH_IMAGES: "true"
|
||||
|
||||
stages:
|
||||
- pull
|
||||
- trigger
|
||||
- image
|
||||
- lint
|
||||
- go-checks
|
||||
- go-build
|
||||
- unit-tests
|
||||
- package-build
|
||||
- image-build
|
||||
- test
|
||||
- scan
|
||||
- release
|
||||
- sign
|
||||
@@ -45,6 +53,108 @@ workflow:
|
||||
# We then add all the regular triggers
|
||||
- !reference [.pipeline-trigger-rules, rules]
|
||||
|
||||
# The main or manual job is used to filter out distributions or architectures that are not required on
|
||||
# every build.
|
||||
.main-or-manual:
|
||||
rules:
|
||||
- !reference [.pipeline-trigger-rules, rules]
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||
when: manual
|
||||
|
||||
# The trigger-pipeline job adds a manualy triggered job to the pipeline on merge requests.
|
||||
trigger-pipeline:
|
||||
stage: trigger
|
||||
script:
|
||||
- echo "starting pipeline"
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
when: manual
|
||||
allow_failure: false
|
||||
- when: always
|
||||
|
||||
# Define the distribution targets
|
||||
.dist-centos7:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
DIST: centos7
|
||||
|
||||
.dist-centos8:
|
||||
variables:
|
||||
DIST: centos8
|
||||
|
||||
.dist-ubi8:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
DIST: ubi8
|
||||
|
||||
.dist-ubuntu18.04:
|
||||
variables:
|
||||
DIST: ubuntu18.04
|
||||
|
||||
.dist-ubuntu20.04:
|
||||
variables:
|
||||
DIST: ubuntu20.04
|
||||
|
||||
.dist-packaging:
|
||||
variables:
|
||||
DIST: packaging
|
||||
|
||||
# Define architecture targets
|
||||
.arch-aarch64:
|
||||
variables:
|
||||
ARCH: aarch64
|
||||
|
||||
.arch-amd64:
|
||||
variables:
|
||||
ARCH: amd64
|
||||
|
||||
.arch-arm64:
|
||||
variables:
|
||||
ARCH: arm64
|
||||
|
||||
.arch-ppc64le:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
ARCH: ppc64le
|
||||
|
||||
.arch-x86_64:
|
||||
variables:
|
||||
ARCH: x86_64
|
||||
|
||||
# Define the platform targets
|
||||
.platform-amd64:
|
||||
variables:
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
.platform-arm64:
|
||||
variables:
|
||||
PLATFORM: linux/arm64
|
||||
|
||||
# Define test helpers
|
||||
.integration:
|
||||
stage: test
|
||||
variables:
|
||||
IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
before_script:
|
||||
- apk add --no-cache make bash jq
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
- docker pull "${IMAGE_NAME}:${VERSION}-${DIST}"
|
||||
script:
|
||||
- make -f deployments/container/Makefile test-${DIST}
|
||||
|
||||
# Define the test targets
|
||||
test-packaging:
|
||||
extends:
|
||||
- .integration
|
||||
- .dist-packaging
|
||||
needs:
|
||||
- image-packaging
|
||||
|
||||
# Download the regctl binary for use in the release steps
|
||||
.regctl-setup:
|
||||
before_script:
|
||||
@@ -54,3 +164,84 @@ workflow:
|
||||
- curl -sSLo bin/regctl https://github.com/regclient/regclient/releases/download/${REGCTL_VERSION}/regctl-linux-amd64
|
||||
- chmod a+x bin/regctl
|
||||
- export PATH=$(pwd)/bin:${PATH}
|
||||
|
||||
# .release forms the base of the deployment jobs which push images to the CI registry.
|
||||
# This is extended with the version to be deployed (e.g. the SHA or TAG) and the
|
||||
# target os.
|
||||
.release:
|
||||
stage: release
|
||||
variables:
|
||||
# Define the source image for the release
|
||||
IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
# OUT_IMAGE_VERSION is overridden for external releases
|
||||
OUT_IMAGE_VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
before_script:
|
||||
- !reference [.regctl-setup, before_script]
|
||||
# We ensure that the components of the output image are set:
|
||||
- 'echo Image Name: ${OUT_IMAGE_NAME} ; [[ -n "${OUT_IMAGE_NAME}" ]] || exit 1'
|
||||
- 'echo Version: ${OUT_IMAGE_VERSION} ; [[ -n "${OUT_IMAGE_VERSION}" ]] || exit 1'
|
||||
|
||||
- apk add --no-cache make bash
|
||||
script:
|
||||
# Log in to the "output" registry, tag the image and push the image
|
||||
- 'echo "Logging in to CI registry ${CI_REGISTRY}"'
|
||||
- regctl registry login "${CI_REGISTRY}" -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}"
|
||||
- '[ ${CI_REGISTRY} = ${OUT_REGISTRY} ] || echo "Logging in to output registry ${OUT_REGISTRY}"'
|
||||
- '[ ${CI_REGISTRY} = ${OUT_REGISTRY} ] || regctl registry login "${OUT_REGISTRY}" -u "${OUT_REGISTRY_USER}" -p "${OUT_REGISTRY_TOKEN}"'
|
||||
|
||||
# Since OUT_IMAGE_NAME and OUT_IMAGE_VERSION are set, this will push the CI image to the
|
||||
# Target
|
||||
- make -f deployments/container/Makefile push-${DIST}
|
||||
|
||||
# Define a staging release step that pushes an image to an internal "staging" repository
|
||||
# This is triggered for all pipelines (i.e. not only tags) to test the pipeline steps
|
||||
# outside of the release process.
|
||||
.release:staging:
|
||||
extends:
|
||||
- .release
|
||||
variables:
|
||||
OUT_REGISTRY_USER: "${NGC_REGISTRY_USER}"
|
||||
OUT_REGISTRY_TOKEN: "${NGC_REGISTRY_TOKEN}"
|
||||
OUT_REGISTRY: "${NGC_REGISTRY}"
|
||||
OUT_IMAGE_NAME: "${NGC_REGISTRY_STAGING_IMAGE_NAME}"
|
||||
|
||||
# Define an external release step that pushes an image to an external repository.
|
||||
# This includes a devlopment image off main.
|
||||
.release:external:
|
||||
extends:
|
||||
- .release
|
||||
variables:
|
||||
FORCE_PUBLISH_IMAGES: "yes"
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG
|
||||
variables:
|
||||
OUT_IMAGE_VERSION: "${CI_COMMIT_TAG}"
|
||||
- if: $CI_COMMIT_BRANCH == $RELEASE_DEVEL_BRANCH
|
||||
variables:
|
||||
OUT_IMAGE_VERSION: "${DEVEL_RELEASE_IMAGE_VERSION}"
|
||||
|
||||
# Define the release jobs
|
||||
release:staging-ubi8:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubi8
|
||||
needs:
|
||||
- image-ubi8
|
||||
|
||||
release:staging-ubuntu20.04:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- test-toolkit-ubuntu20.04
|
||||
- test-containerd-ubuntu20.04
|
||||
- test-crio-ubuntu20.04
|
||||
- test-docker-ubuntu20.04
|
||||
|
||||
release:staging-packaging:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-packaging
|
||||
needs:
|
||||
- test-packaging
|
||||
|
||||
2
.github/workflows/e2e.yaml
vendored
2
.github/workflows/e2e.yaml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
env:
|
||||
E2E_INSTALL_CTK: "true"
|
||||
E2E_IMAGE_NAME: ghcr.io/nvidia/container-toolkit
|
||||
E2E_IMAGE_TAG: ${{ inputs.version }}
|
||||
E2E_IMAGE_TAG: ${{ inputs.version }}-ubuntu20.04
|
||||
E2E_SSH_USER: ${{ secrets.E2E_SSH_USER }}
|
||||
E2E_SSH_HOST: ${{ steps.holodeck_public_dns_name.outputs.result }}
|
||||
run: |
|
||||
|
||||
12
.github/workflows/image.yaml
vendored
12
.github/workflows/image.yaml
vendored
@@ -79,9 +79,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target:
|
||||
- application
|
||||
dist:
|
||||
- ubuntu20.04
|
||||
- ubi8
|
||||
- packaging
|
||||
ispr:
|
||||
- ${{ github.ref_name != 'main' && !startsWith( github.ref_name, 'release-' ) }}
|
||||
exclude:
|
||||
- ispr: true
|
||||
dist: ubi8
|
||||
needs: packages
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -117,4 +123,4 @@ jobs:
|
||||
BUILD_MULTI_ARCH_IMAGES: ${{ inputs.build_multi_arch_images }}
|
||||
run: |
|
||||
echo "${VERSION}"
|
||||
make -f deployments/container/Makefile build-${{ matrix.target }}
|
||||
make -f deployments/container/Makefile build-${{ matrix.dist }}
|
||||
|
||||
228
.gitlab-ci.yml
Normal file
228
.gitlab-ci.yml
Normal file
@@ -0,0 +1,228 @@
|
||||
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
include:
|
||||
- .common-ci.yml
|
||||
|
||||
# Define the package build helpers
|
||||
.multi-arch-build:
|
||||
before_script:
|
||||
- apk add --no-cache coreutils build-base sed git bash make
|
||||
- '[[ -n "${SKIP_QEMU_SETUP}" ]] || docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes'
|
||||
|
||||
.package-artifacts:
|
||||
variables:
|
||||
ARTIFACTS_NAME: "toolkit-container-${CI_PIPELINE_ID}"
|
||||
ARTIFACTS_ROOT: "toolkit-container-${CI_PIPELINE_ID}"
|
||||
DIST_DIR: ${CI_PROJECT_DIR}/${ARTIFACTS_ROOT}
|
||||
|
||||
.package-build:
|
||||
extends:
|
||||
- .multi-arch-build
|
||||
- .package-artifacts
|
||||
stage: package-build
|
||||
timeout: 3h
|
||||
script:
|
||||
- ./scripts/build-packages.sh ${DIST}-${ARCH}
|
||||
|
||||
artifacts:
|
||||
name: ${ARTIFACTS_NAME}
|
||||
paths:
|
||||
- ${ARTIFACTS_ROOT}
|
||||
needs:
|
||||
- job: package-meta-packages
|
||||
artifacts: true
|
||||
|
||||
# Define the package build targets
|
||||
package-meta-packages:
|
||||
extends:
|
||||
- .package-artifacts
|
||||
stage: package-build
|
||||
variables:
|
||||
SKIP_LIBNVIDIA_CONTAINER: "yes"
|
||||
SKIP_NVIDIA_CONTAINER_TOOLKIT: "yes"
|
||||
parallel:
|
||||
matrix:
|
||||
- PACKAGING: [deb, rpm]
|
||||
before_script:
|
||||
- apk add --no-cache coreutils build-base sed git bash make
|
||||
script:
|
||||
- ./scripts/build-packages.sh ${PACKAGING}
|
||||
artifacts:
|
||||
name: ${ARTIFACTS_NAME}
|
||||
paths:
|
||||
- ${ARTIFACTS_ROOT}
|
||||
|
||||
package-centos7-aarch64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos7
|
||||
- .arch-aarch64
|
||||
|
||||
package-centos7-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos7
|
||||
- .arch-x86_64
|
||||
|
||||
package-centos8-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos8
|
||||
- .arch-ppc64le
|
||||
|
||||
package-ubuntu18.04-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-amd64
|
||||
|
||||
package-ubuntu18.04-arm64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-arm64
|
||||
|
||||
package-ubuntu18.04-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-ppc64le
|
||||
|
||||
.buildx-setup:
|
||||
before_script:
|
||||
- export BUILDX_VERSION=v0.6.3
|
||||
- apk add --no-cache curl
|
||||
- mkdir -p ~/.docker/cli-plugins
|
||||
- curl -sSLo ~/.docker/cli-plugins/docker-buildx "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.linux-amd64"
|
||||
- chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
- docker buildx create --use --platform=linux/amd64,linux/arm64
|
||||
|
||||
- '[[ -n "${SKIP_QEMU_SETUP}" ]] || docker run --rm --privileged multiarch/qemu-user-static --reset -p yes'
|
||||
|
||||
# Define the image build targets
|
||||
.image-build:
|
||||
stage: image-build
|
||||
variables:
|
||||
IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
PUSH_ON_BUILD: "true"
|
||||
before_script:
|
||||
- !reference [.buildx-setup, before_script]
|
||||
|
||||
- apk add --no-cache bash make git
|
||||
- 'echo "Logging in to CI registry ${CI_REGISTRY}"'
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
script:
|
||||
- make -f deployments/container/Makefile build-${DIST}
|
||||
|
||||
image-ubi8:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubi8
|
||||
needs:
|
||||
# Note: The ubi8 image uses the centos7 packages
|
||||
- package-centos7-aarch64
|
||||
- package-centos7-x86_64
|
||||
|
||||
image-ubuntu20.04:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- package-ubuntu18.04-amd64
|
||||
- package-ubuntu18.04-arm64
|
||||
- job: package-ubuntu18.04-ppc64le
|
||||
optional: true
|
||||
|
||||
# The DIST=packaging target creates an image containing all built packages
|
||||
image-packaging:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-packaging
|
||||
needs:
|
||||
- job: package-ubuntu18.04-amd64
|
||||
- job: package-ubuntu18.04-arm64
|
||||
- job: package-amazonlinux2-aarch64
|
||||
optional: true
|
||||
- job: package-amazonlinux2-x86_64
|
||||
optional: true
|
||||
- job: package-centos7-aarch64
|
||||
optional: true
|
||||
- job: package-centos7-x86_64
|
||||
optional: true
|
||||
- job: package-centos8-ppc64le
|
||||
optional: true
|
||||
- job: package-debian10-amd64
|
||||
optional: true
|
||||
- job: package-opensuse-leap15.1-x86_64
|
||||
optional: true
|
||||
- job: package-ubuntu18.04-ppc64le
|
||||
optional: true
|
||||
|
||||
# Define publish test helpers
|
||||
.test:docker:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "docker"
|
||||
|
||||
.test:containerd:
|
||||
# TODO: The containerd tests fail due to issues with SIGHUP.
|
||||
# Until this is resolved with retry up to twice and allow failure here.
|
||||
retry: 2
|
||||
allow_failure: true
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "containerd"
|
||||
|
||||
.test:crio:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "crio"
|
||||
|
||||
# Define the test targets
|
||||
test-toolkit-ubuntu20.04:
|
||||
extends:
|
||||
- .test:toolkit
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
|
||||
test-containerd-ubuntu20.04:
|
||||
extends:
|
||||
- .test:containerd
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
|
||||
test-crio-ubuntu20.04:
|
||||
extends:
|
||||
- .test:crio
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
|
||||
test-docker-ubuntu20.04:
|
||||
extends:
|
||||
- .test:docker
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
309
.nvidia-ci.yml
309
.nvidia-ci.yml
@@ -39,62 +39,19 @@ variables:
|
||||
KITMAKER_RELEASE_FOLDER: "kitmaker"
|
||||
PACKAGE_ARCHIVE_RELEASE_FOLDER: "releases"
|
||||
|
||||
# .copy-images copies the required application and packaging images from the
|
||||
# IN_IMAGE="${IN_IMAGE_NAME}:${IN_IMAGE_TAG}${TAG_SUFFIX}"
|
||||
# to
|
||||
# OUT_IMAGE="${OUT_IMAGE_NAME}:${OUT_IMAGE_TAG}${TAG_SUFFIX}"
|
||||
# The script also logs into IN_REGISTRY and OUT_REGISTRY using the supplied
|
||||
# username and tokens.
|
||||
.copy-images:
|
||||
parallel:
|
||||
matrix:
|
||||
- TAG_SUFFIX: ["", "-packaging"]
|
||||
before_script:
|
||||
- !reference [.regctl-setup, before_script]
|
||||
- apk add --no-cache make bash
|
||||
variables:
|
||||
REGCTL: regctl
|
||||
script:
|
||||
- |
|
||||
if [ -n ${IN_REGISTRY} ] && [ -n ${IN_REGISTRY_USER} ]; then
|
||||
echo "Logging in to ${IN_REGISTRY}"
|
||||
${REGCTL} registry login "${IN_REGISTRY}" -u "${IN_REGISTRY_USER}" -p "${IN_REGISTRY_TOKEN}" || exit 1
|
||||
fi
|
||||
|
||||
if [ -n ${OUT_REGISTRY} ] && [ -n ${OUT_REGISTRY_USER} ] && [ "${IN_REGISTRY}" != "${OUT_REGISTRY}" ]; then
|
||||
echo "Logging in to ${OUT_REGISTRY}"
|
||||
${REGCTL} registry login "${OUT_REGISTRY}" -u "${OUT_REGISTRY_USER}" -p "${OUT_REGISTRY_TOKEN}" || exit 1
|
||||
fi
|
||||
|
||||
export IN_IMAGE="${IN_IMAGE_NAME}:${IN_IMAGE_TAG}${TAG_SUFFIX}"
|
||||
export OUT_IMAGE="${OUT_IMAGE_NAME}:${OUT_IMAGE_TAG}${TAG_SUFFIX}"
|
||||
|
||||
echo "Copying ${IN_IMAGE} to ${OUT_IMAGE}"
|
||||
${REGCTL} image copy ${IN_IMAGE} ${OUT_IMAGE}
|
||||
|
||||
# pull-images pulls images from the public CI registry to the internal CI registry.
|
||||
pull-images:
|
||||
extends:
|
||||
- .copy-images
|
||||
stage: pull
|
||||
.image-pull:
|
||||
stage: image-build
|
||||
variables:
|
||||
IN_REGISTRY: "${STAGING_REGISTRY}"
|
||||
IN_IMAGE_NAME: ${STAGING_REGISTRY}/container-toolkit
|
||||
IN_IMAGE_TAG: "${STAGING_VERSION}"
|
||||
|
||||
OUT_REGISTRY: "${CI_REGISTRY}"
|
||||
IN_IMAGE_NAME: container-toolkit
|
||||
IN_VERSION: "${STAGING_VERSION}"
|
||||
OUT_REGISTRY_USER: "${CI_REGISTRY_USER}"
|
||||
OUT_REGISTRY_TOKEN: "${CI_REGISTRY_PASSWORD}"
|
||||
OUT_REGISTRY: "${CI_REGISTRY}"
|
||||
OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
OUT_IMAGE_TAG: "${CI_COMMIT_SHORT_SHA}"
|
||||
PUSH_MULTIPLE_TAGS: "false"
|
||||
# We delay the job start to allow the public pipeline to generate the required images.
|
||||
rules:
|
||||
# If the pipeline is triggered from a tag or the WEB UI we don't delay the
|
||||
# start of the pipeline.
|
||||
- if: $CI_COMMIT_TAG || $CI_PIPELINE_SOURCE == "web"
|
||||
# If the pipeline is triggered through other means (i.e. a branch or MR)
|
||||
# we add a 30 minute delay to ensure that the images are available in the
|
||||
# public CI registry.
|
||||
- when: delayed
|
||||
start_in: 30 minutes
|
||||
timeout: 30 minutes
|
||||
@@ -103,6 +60,30 @@ pull-images:
|
||||
when:
|
||||
- job_execution_timeout
|
||||
- stuck_or_timeout_failure
|
||||
before_script:
|
||||
- !reference [.regctl-setup, before_script]
|
||||
- apk add --no-cache make bash
|
||||
- >
|
||||
regctl manifest get ${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} --list > /dev/null && echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST}" || ( echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} does not exist" && sleep infinity )
|
||||
script:
|
||||
- regctl registry login "${OUT_REGISTRY}" -u "${OUT_REGISTRY_USER}" -p "${OUT_REGISTRY_TOKEN}"
|
||||
- make -f deployments/container/Makefile IMAGE=${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} OUT_IMAGE=${OUT_IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}-${DIST} push-${DIST}
|
||||
|
||||
image-ubi8:
|
||||
extends:
|
||||
- .dist-ubi8
|
||||
- .image-pull
|
||||
|
||||
image-ubuntu20.04:
|
||||
extends:
|
||||
- .dist-ubuntu20.04
|
||||
- .image-pull
|
||||
|
||||
# The DIST=packaging target creates an image containing all built packages
|
||||
image-packaging:
|
||||
extends:
|
||||
- .dist-packaging
|
||||
- .image-pull
|
||||
|
||||
# We skip the integration tests for the internal CI:
|
||||
.integration:
|
||||
@@ -114,37 +95,27 @@ pull-images:
|
||||
|
||||
# The .scan step forms the base of the image scan operation performed before releasing
|
||||
# images.
|
||||
scan-images:
|
||||
.scan:
|
||||
stage: scan
|
||||
needs:
|
||||
- pull-images
|
||||
image: "${PULSE_IMAGE}"
|
||||
parallel:
|
||||
matrix:
|
||||
- TAG_SUFFIX: [""]
|
||||
PLATFORM: ["linux/amd64", "linux/arm64"]
|
||||
- TAG_SUFFIX: "-packaging"
|
||||
PLATFORM: "linux/amd64"
|
||||
variables:
|
||||
IMAGE: "${CI_REGISTRY_IMAGE}/container-toolkit:${CI_COMMIT_SHORT_SHA}"
|
||||
IMAGE_ARCHIVE: "container-toolkit-${CI_JOB_ID}.tar"
|
||||
IMAGE: "${CI_REGISTRY_IMAGE}/container-toolkit:${CI_COMMIT_SHORT_SHA}-${DIST}"
|
||||
IMAGE_ARCHIVE: "container-toolkit-${DIST}-${ARCH}-${CI_JOB_ID}.tar"
|
||||
rules:
|
||||
- if: $IGNORE_SCANS == "yes"
|
||||
allow_failure: true
|
||||
- when: on_success
|
||||
script:
|
||||
- |
|
||||
docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
export SCAN_IMAGE=${IMAGE}${TAG_SUFFIX}
|
||||
echo "Scanning image ${SCAN_IMAGE} for ${PLATFORM}"
|
||||
docker pull --platform="${PLATFORM}" "${SCAN_IMAGE}"
|
||||
docker save "${SCAN_IMAGE}" -o "${IMAGE_ARCHIVE}"
|
||||
AuthHeader=$(echo -n $SSA_CLIENT_ID:$SSA_CLIENT_SECRET | base64 -w0)
|
||||
- if: $SKIP_SCANS != "yes"
|
||||
- when: manual
|
||||
before_script:
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
# TODO: We should specify the architecture here and scan all architectures
|
||||
- docker pull --platform="${PLATFORM}" "${IMAGE}"
|
||||
- docker save "${IMAGE}" -o "${IMAGE_ARCHIVE}"
|
||||
- AuthHeader=$(echo -n $SSA_CLIENT_ID:$SSA_CLIENT_SECRET | base64 -w0)
|
||||
- >
|
||||
export SSA_TOKEN=$(curl --request POST --header "Authorization: Basic $AuthHeader" --header "Content-Type: application/x-www-form-urlencoded" ${SSA_ISSUER_URL} | jq ".access_token" | tr -d '"')
|
||||
if [ -z "$SSA_TOKEN" ]; then exit 1; else echo "SSA_TOKEN set!"; fi
|
||||
|
||||
pulse-cli -n $NSPECT_ID --ssa $SSA_TOKEN scan -i $IMAGE_ARCHIVE -p $CONTAINER_POLICY -o
|
||||
rm -f "${IMAGE_ARCHIVE}"
|
||||
- if [ -z "$SSA_TOKEN" ]; then exit 1; else echo "SSA_TOKEN set!"; fi
|
||||
script:
|
||||
- pulse-cli -n $NSPECT_ID --ssa $SSA_TOKEN scan -i $IMAGE_ARCHIVE -p $CONTAINER_POLICY -o
|
||||
- rm -f "${IMAGE_ARCHIVE}"
|
||||
artifacts:
|
||||
when: always
|
||||
expire_in: 1 week
|
||||
@@ -155,10 +126,62 @@ scan-images:
|
||||
- vulns.json
|
||||
- policy_evaluation.json
|
||||
|
||||
upload-kitmaker-packages:
|
||||
# Define the scan targets
|
||||
scan-ubuntu20.04-amd64:
|
||||
extends:
|
||||
- .dist-ubuntu20.04
|
||||
- .platform-amd64
|
||||
- .scan
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
|
||||
scan-ubuntu20.04-arm64:
|
||||
extends:
|
||||
- .dist-ubuntu20.04
|
||||
- .platform-arm64
|
||||
- .scan
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
- scan-ubuntu20.04-amd64
|
||||
|
||||
scan-ubi8-amd64:
|
||||
extends:
|
||||
- .dist-ubi8
|
||||
- .platform-amd64
|
||||
- .scan
|
||||
needs:
|
||||
- image-ubi8
|
||||
|
||||
scan-ubi8-arm64:
|
||||
extends:
|
||||
- .dist-ubi8
|
||||
- .platform-arm64
|
||||
- .scan
|
||||
needs:
|
||||
- image-ubi8
|
||||
- scan-ubi8-amd64
|
||||
|
||||
scan-packaging:
|
||||
extends:
|
||||
- .dist-packaging
|
||||
- .scan
|
||||
needs:
|
||||
- image-packaging
|
||||
|
||||
# Define external release helpers
|
||||
.release:ngc:
|
||||
extends:
|
||||
- .release:external
|
||||
variables:
|
||||
OUT_REGISTRY_USER: "${NGC_REGISTRY_USER}"
|
||||
OUT_REGISTRY_TOKEN: "${NGC_REGISTRY_TOKEN}"
|
||||
OUT_REGISTRY: "${NGC_REGISTRY}"
|
||||
OUT_IMAGE_NAME: "${NGC_REGISTRY_IMAGE}"
|
||||
|
||||
.release:packages:
|
||||
stage: release
|
||||
needs:
|
||||
- pull-images
|
||||
- image-packaging
|
||||
variables:
|
||||
VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
PACKAGE_REGISTRY: "${CI_REGISTRY}"
|
||||
@@ -176,81 +199,34 @@ upload-kitmaker-packages:
|
||||
- ./scripts/release-kitmaker-artifactory.sh "${KITMAKER_ARTIFACTORY_REPO}"
|
||||
- rm -rf ${ARTIFACTS_DIR}
|
||||
|
||||
push-images-to-staging:
|
||||
# Define the package release targets
|
||||
release:packages:kitmaker:
|
||||
extends:
|
||||
- .copy-images
|
||||
stage: release
|
||||
- .release:packages
|
||||
|
||||
release:staging-ubuntu20.04:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- scan-images
|
||||
variables:
|
||||
IN_REGISTRY: "${CI_REGISTRY}"
|
||||
IN_REGISTRY_USER: "${CI_REGISTRY_USER}"
|
||||
IN_REGISTRY_TOKEN: "${CI_REGISTRY_PASSWORD}"
|
||||
IN_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
IN_IMAGE_TAG: "${CI_COMMIT_SHORT_SHA}"
|
||||
- image-ubuntu20.04
|
||||
|
||||
OUT_REGISTRY: "${NGC_REGISTRY}"
|
||||
OUT_REGISTRY_USER: "${NGC_REGISTRY_USER}"
|
||||
OUT_REGISTRY_TOKEN: "${NGC_REGISTRY_TOKEN}"
|
||||
OUT_IMAGE_NAME: "${NGC_STAGING_REGISTRY}/container-toolkit"
|
||||
OUT_IMAGE_TAG: "${CI_COMMIT_SHORT_SHA}"
|
||||
|
||||
.release-images:
|
||||
# Define the external release targets
|
||||
# Release to NGC
|
||||
release:ngc-ubuntu20.04:
|
||||
extends:
|
||||
- .copy-images
|
||||
stage: release
|
||||
needs:
|
||||
- scan-images
|
||||
- push-images-to-staging
|
||||
variables:
|
||||
IN_REGISTRY: "${CI_REGISTRY}"
|
||||
IN_REGISTRY_USER: "${CI_REGISTRY_USER}"
|
||||
IN_REGISTRY_TOKEN: "${CI_REGISTRY_PASSWORD}"
|
||||
IN_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
IN_IMAGE_TAG: "${CI_COMMIT_SHORT_SHA}"
|
||||
- .dist-ubuntu20.04
|
||||
- .release:ngc
|
||||
|
||||
OUT_REGISTRY: "${NGC_REGISTRY}"
|
||||
OUT_REGISTRY_USER: "${NGC_REGISTRY_USER}"
|
||||
OUT_REGISTRY_TOKEN: "${NGC_REGISTRY_TOKEN}"
|
||||
OUT_IMAGE_NAME: "${NGC_REGISTRY_IMAGE}"
|
||||
OUT_IMAGE_TAG: "${CI_COMMIT_TAG}"
|
||||
|
||||
release-images-to-ngc:
|
||||
release:ngc-ubi8:
|
||||
extends:
|
||||
- .release-images
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG
|
||||
- .dist-ubi8
|
||||
- .release:ngc
|
||||
|
||||
release-images-dummy:
|
||||
release:ngc-packaging:
|
||||
extends:
|
||||
- .release-images
|
||||
variables:
|
||||
REGCTL: "echo [DUMMY] regctl"
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG == null || $CI_COMMIT_TAG == ""
|
||||
|
||||
# .sign-images forms the base of the jobs which sign images in the NGC registry.
|
||||
.sign-images:
|
||||
stage: sign
|
||||
image: ubuntu:latest
|
||||
parallel:
|
||||
matrix:
|
||||
- TAG_SUFFIX: ["", "-packaging"]
|
||||
variables:
|
||||
IMAGE_NAME: "${NGC_REGISTRY_IMAGE}"
|
||||
IMAGE_TAG: "${CI_COMMIT_TAG}"
|
||||
NGC_CLI: "ngc-cli/ngc"
|
||||
before_script:
|
||||
- !reference [.ngccli-setup, before_script]
|
||||
script:
|
||||
- |
|
||||
# We ensure that the IMAGE_NAME and IMAGE_TAG is set
|
||||
echo Image Name: ${IMAGE_NAME} && [[ -n "${IMAGE_NAME}" ]] || exit 1
|
||||
echo Image Tag: ${IMAGE_TAG} && [[ -n "${IMAGE_TAG}" ]] || exit 1
|
||||
|
||||
export IMAGE=${IMAGE_NAME}:${IMAGE_TAG}${TAG_SUFFIX}
|
||||
echo "Signing the image ${IMAGE}"
|
||||
${NGC_CLI} registry image publish --source ${IMAGE} ${IMAGE} --public --discoverable --allow-guest --sign --org nvidia
|
||||
- .dist-packaging
|
||||
- .release:ngc
|
||||
|
||||
# Define the external image signing steps for NGC
|
||||
# Download the ngc cli binary for use in the sign steps
|
||||
@@ -268,24 +244,45 @@ release-images-dummy:
|
||||
- unzip ngccli_linux.zip
|
||||
- chmod u+x ngc-cli/ngc
|
||||
|
||||
sign-ngc-images:
|
||||
extends:
|
||||
- .sign-images
|
||||
needs:
|
||||
- release-images-to-ngc
|
||||
# .sign forms the base of the deployment jobs which signs images in the CI registry.
|
||||
# This is extended with the image name and version to be deployed.
|
||||
.sign:ngc:
|
||||
image: ubuntu:latest
|
||||
stage: sign
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG
|
||||
variables:
|
||||
NGC_CLI_API_KEY: "${NGC_REGISTRY_TOKEN}"
|
||||
IMAGE_NAME: "${NGC_REGISTRY_IMAGE}"
|
||||
IMAGE_TAG: "${CI_COMMIT_TAG}-${DIST}"
|
||||
retry:
|
||||
max: 2
|
||||
before_script:
|
||||
- !reference [.ngccli-setup, before_script]
|
||||
# We ensure that the IMAGE_NAME and IMAGE_TAG is set
|
||||
- 'echo Image Name: ${IMAGE_NAME} && [[ -n "${IMAGE_NAME}" ]] || exit 1'
|
||||
- 'echo Image Tag: ${IMAGE_TAG} && [[ -n "${IMAGE_TAG}" ]] || exit 1'
|
||||
script:
|
||||
- 'echo "Signing the image ${IMAGE_NAME}:${IMAGE_TAG}"'
|
||||
- ngc-cli/ngc registry image publish --source ${IMAGE_NAME}:${IMAGE_TAG} ${IMAGE_NAME}:${IMAGE_TAG} --public --discoverable --allow-guest --sign --org nvidia
|
||||
|
||||
sign-images-dummy:
|
||||
sign:ngc-ubuntu20.04:
|
||||
extends:
|
||||
- .sign-images
|
||||
- .dist-ubuntu20.04
|
||||
- .sign:ngc
|
||||
needs:
|
||||
- release-images-dummy
|
||||
variables:
|
||||
NGC_CLI: "echo [DUMMY] ngc-cli/ngc"
|
||||
rules:
|
||||
- if: $CI_COMMIT_TAG == null || $CI_COMMIT_TAG == ""
|
||||
- release:ngc-ubuntu20.04
|
||||
|
||||
sign:ngc-ubi8:
|
||||
extends:
|
||||
- .dist-ubi8
|
||||
- .sign:ngc
|
||||
needs:
|
||||
- release:ngc-ubi8
|
||||
|
||||
sign:ngc-packaging:
|
||||
extends:
|
||||
- .dist-packaging
|
||||
- .sign:ngc
|
||||
needs:
|
||||
- release:ngc-packaging
|
||||
|
||||
147
CHANGELOG.md
147
CHANGELOG.md
@@ -1,139 +1,34 @@
|
||||
# NVIDIA Container Toolkit Changelog
|
||||
|
||||
## v1.18.0-rc.1
|
||||
|
||||
- Add create-soname-symlinks hook
|
||||
- Require matching version of libnvidia-container-tools
|
||||
- Add envvar for libcuda.so parent dir to CDI spec
|
||||
- Add EnvVar to Discover interface
|
||||
- Resolve to legacy by default in nvidia-container-runtime-hook
|
||||
- Default to jit-cdi mode in the nvidia runtime
|
||||
- Use functional options to construct runtime mode resolver
|
||||
- Add NVIDIA_CTK_CONFIG_FILE_PATH envvar
|
||||
- Switch to cuda ubi9 base image
|
||||
- Use single version tag for image
|
||||
- BUGFIX: modifier: respect GPU volume-mount device requests
|
||||
- Ensure consistent sorting of annotation devices
|
||||
- Extract deb and rpm packages to single image
|
||||
- Remove docker-run as default runtime candidate
|
||||
- Return annotation devices from VisibleDevices
|
||||
- Make CDI device requests consistent with other methods
|
||||
- Construct container info once
|
||||
- Add logic to extract annotation device requests to image type
|
||||
- Add IsPrivileged function to CUDA container type
|
||||
- Add device IDs to nvcdi.GetSpec API
|
||||
- Refactor extracting requested devices from the container image
|
||||
- Add EnvVars option for all nvidia-ctk cdi commands
|
||||
- Add nvidia-cdi-refresh service
|
||||
- Add discovery of arch-specific vulkan ICD
|
||||
- Add disabled-device-node-modification hook to CDI spec
|
||||
- Add a hook to disable device node creation in a container
|
||||
- Remove redundant deduplication of search paths for WSL
|
||||
- Added ability to disable specific (or all) CDI hooks
|
||||
- Consolidate HookName functionality on internal/discover pkg
|
||||
- Add envvar to control debug logging in CDI hooks
|
||||
- Add FeatureFlags to the nvcdi API
|
||||
- Reenable nvsandboxutils for driver discovery
|
||||
- Edit discover.mounts to have a deterministic output
|
||||
- Refactor the way we create CDI Hooks
|
||||
- Issue warning on unsupported CDI hook
|
||||
- Run update-ldcache in isolated namespaces
|
||||
- Add cuda-compat-mode config option
|
||||
- Fix mode detection on Thor-based systems
|
||||
- Add rprivate to CDI mount options
|
||||
- Skip nil discoverers in merge
|
||||
- bump runc go dep to v1.3.0
|
||||
- Fix resolution of libs in LDCache on ARM
|
||||
- Updated .release:staging to stage images in nvstaging
|
||||
- Refactor toolkit installer
|
||||
- Allow container runtime executable path to be specified
|
||||
- Add support for building ubuntu22.04 on arm64
|
||||
- Fix race condition in mounts cache
|
||||
- Add support for building ubuntu22.04 on amd64
|
||||
- Fix update-ldcache arguments
|
||||
- Remove positional arguments from nvidia-ctk-installer
|
||||
- Remove deprecated --runtime-args from nvidia-ctk-installer
|
||||
- Add version info to nvidia-ctk-installer
|
||||
- Update nvidia-ctk-installer app name to match binary name
|
||||
- Allow nvidia-ctk config --set to accept comma-separated lists
|
||||
- Disable enable-cuda-compat hook for management containers
|
||||
- Allow enable-cuda-compat hook to be disabled in CDI spec generation
|
||||
- Add disable-cuda-compat-lib-hook feature flag
|
||||
- Add basic integration tests for forward compat
|
||||
- Ensure that mode hook is executed last
|
||||
- Add enable-cuda-compat hook to CDI spec generation
|
||||
- Add ldconfig hook in legacy mode
|
||||
- Add enable-cuda-compat hook if required
|
||||
- Add enable-cuda-compat hook to allow compat libs to be discovered
|
||||
- Use libcontainer execseal to run ldconfig
|
||||
- Add ignore-imex-channel-requests feature flag
|
||||
- Disable nvsandboxutils in nvcdi API
|
||||
- Allow cdi mode to work with --gpus flag
|
||||
- Add E2E GitHub Action for Container Toolkit
|
||||
- Add remote-test option for E2E
|
||||
- Enable CDI in runtime if CDI_ENABLED is set
|
||||
- Fix overwriting docker feature flags
|
||||
- Add option in toolkit container to enable CDI in runtime
|
||||
- Remove Set from engine config API
|
||||
- Add EnableCDI() method to engine.Interface
|
||||
- Add IMEX binaries to CDI discovery
|
||||
- Rename test folder to tests
|
||||
## v1.17.4
|
||||
- Disable mounting of compat libs from container by default
|
||||
- Add allow-cuda-compat-libs-from-container feature flag
|
||||
- Disable mounting of compat libs from container
|
||||
- Skip graphics modifier in CSV mode
|
||||
- Move nvidia-toolkit to nvidia-ctk-installer
|
||||
- Automated regression testing for the NVIDIA Container Toolkit
|
||||
- Add support for containerd version 3 config
|
||||
- Remove watch option from create-dev-char-symlinks
|
||||
- Add string TOML source
|
||||
- Improve the implementation for UseLegacyConfig
|
||||
- Properly pass configSearchPaths to a Driver constructor
|
||||
- Fix create-device-node test when devices exist
|
||||
- Add imex mode to CDI spec generation
|
||||
- Only allow host-relative LDConfig paths
|
||||
- Fix NVIDIA_IMEX_CHANNELS handling on legacy images
|
||||
- Fix bug in default config file path
|
||||
- Fix fsnotify.Remove logic function.
|
||||
- Force symlink creation in create-symlink hook
|
||||
|
||||
### Changes in the Toolkit Container
|
||||
|
||||
- Create /work/nvidia-toolkit symlink
|
||||
- Use Apache license for images
|
||||
- Switch to golang distroless image
|
||||
- Switch to cuda ubi9 base image
|
||||
- Use single version tag for image
|
||||
- Extract deb and rpm packages to single image
|
||||
- Bump nvidia/cuda in /deployments/container
|
||||
- Bump nvidia/cuda in /deployments/container
|
||||
- Add E2E GitHub Action for Container Toolkit
|
||||
- Bump nvidia/cuda in /deployments/container
|
||||
- Move nvidia-toolkit to nvidia-ctk-installer
|
||||
- Add support for containerd version 3 config
|
||||
- Improve the implementation for UseLegacyConfig
|
||||
- Bump nvidia/cuda in /deployments/container
|
||||
- Add imex mode to CDI spec generation
|
||||
- Only allow host-relative LDConfig paths
|
||||
- Fallback to file for runtime config
|
||||
- Add string TOML source
|
||||
|
||||
### Changes in libnvidia-container
|
||||
|
||||
- Fix pointer accessing local variable out of scope
|
||||
- Require version match between libnvidia-container-tools and libnvidia-container1
|
||||
- Add libnvidia-gpucomp.so to the list of compute libs
|
||||
- Use VERSION_ prefix for version parts in makefiles
|
||||
- Add additional logging
|
||||
- Do not discard container flags when --cuda-compat-mode is not specified
|
||||
- Remove unneeded --no-cntlibs argument from list command
|
||||
- Add cuda-compat-mode flag to configure command
|
||||
- Skip files when user has insufficient permissions
|
||||
- Fix building with Go 1.24
|
||||
- Add no-cntlibs CLI option to nvidia-container-cli
|
||||
- Fix always using fallback
|
||||
- Add fallback for systems without memfd_create()
|
||||
- Create virtual copy of host ldconfig binary before calling fexecve()
|
||||
- Fix some typos in text.
|
||||
|
||||
### Changes in the Toolkit Container
|
||||
- Bump CUDA base image version to 12.6.3
|
||||
|
||||
## v1.17.3
|
||||
- Only allow host-relative LDConfig paths by default.
|
||||
### Changes in libnvidia-container
|
||||
- Create virtual copy of host ldconfig binary before calling fexecve()
|
||||
|
||||
## v1.17.2
|
||||
- Fixed a bug where legacy images would set imex channels as `all`.
|
||||
|
||||
## v1.17.1
|
||||
- Fixed a bug where specific symlinks existing in a container image could cause a container to fail to start.
|
||||
- Fixed a bug on Tegra-based systems where a container would fail to start.
|
||||
- Fixed a bug where the default container runtime config path was not properly set.
|
||||
|
||||
### Changes in the Toolkit Container
|
||||
- Fallback to using a config file if the current runtime config can not be determined from the command line.
|
||||
|
||||
## v1.17.0
|
||||
- Promote v1.17.0-rc.2 to v1.17.0
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-cdi-hook/chmod"
|
||||
createsonamesymlinks "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-cdi-hook/create-soname-symlinks"
|
||||
symlinks "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-cdi-hook/create-symlinks"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-cdi-hook/cudacompat"
|
||||
disabledevicenodemodification "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-cdi-hook/disable-device-node-modification"
|
||||
@@ -36,7 +35,6 @@ func New(logger logger.Interface) []*cli.Command {
|
||||
symlinks.NewCommand(logger),
|
||||
chmod.NewCommand(logger),
|
||||
cudacompat.NewCommand(logger),
|
||||
createsonamesymlinks.NewCommand(logger),
|
||||
disabledevicenodemodification.NewCommand(logger),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
/**
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package create_soname_symlinks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/ldconfig"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/logger"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
|
||||
)
|
||||
|
||||
const (
|
||||
reexecUpdateLdCacheCommandName = "reexec-create-soname-symlinks"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
logger logger.Interface
|
||||
}
|
||||
|
||||
type options struct {
|
||||
folders cli.StringSlice
|
||||
ldconfigPath string
|
||||
containerSpec string
|
||||
}
|
||||
|
||||
func init() {
|
||||
reexec.Register(reexecUpdateLdCacheCommandName, createSonameSymlinksHandler)
|
||||
if reexec.Init() {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// NewCommand constructs an create-soname-symlinks command with the specified logger
|
||||
func NewCommand(logger logger.Interface) *cli.Command {
|
||||
c := command{
|
||||
logger: logger,
|
||||
}
|
||||
return c.build()
|
||||
}
|
||||
|
||||
// build the create-soname-symlinks command
|
||||
func (m command) build() *cli.Command {
|
||||
cfg := options{}
|
||||
|
||||
// Create the 'create-soname-symlinks' command
|
||||
c := cli.Command{
|
||||
Name: "create-soname-symlinks",
|
||||
Usage: "Create soname symlinks libraries in specified directories",
|
||||
Before: func(c *cli.Context) error {
|
||||
return m.validateFlags(c, &cfg)
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
return m.run(c, &cfg)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags = []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "folder",
|
||||
Usage: "Specify a directory to generate soname symlinks in. Can be specified multiple times",
|
||||
Destination: &cfg.folders,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "ldconfig-path",
|
||||
Usage: "Specify the path to ldconfig on the host",
|
||||
Destination: &cfg.ldconfigPath,
|
||||
Value: "/sbin/ldconfig",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "container-spec",
|
||||
Usage: "Specify the path to the OCI container spec. If empty or '-' the spec will be read from STDIN",
|
||||
Destination: &cfg.containerSpec,
|
||||
},
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func (m command) validateFlags(c *cli.Context, cfg *options) error {
|
||||
if cfg.ldconfigPath == "" {
|
||||
return errors.New("ldconfig-path must be specified")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m command) run(c *cli.Context, cfg *options) error {
|
||||
s, err := oci.LoadContainerState(cfg.containerSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load container state: %v", err)
|
||||
}
|
||||
|
||||
containerRootDir, err := s.GetContainerRoot()
|
||||
if err != nil || containerRootDir == "" || containerRootDir == "/" {
|
||||
return fmt.Errorf("failed to determined container root: %v", err)
|
||||
}
|
||||
|
||||
cmd, err := ldconfig.NewRunner(
|
||||
reexecUpdateLdCacheCommandName,
|
||||
cfg.ldconfigPath,
|
||||
containerRootDir,
|
||||
cfg.folders.Value()...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// createSonameSymlinksHandler wraps createSonameSymlinks with error handling.
|
||||
func createSonameSymlinksHandler() {
|
||||
if err := createSonameSymlinks(os.Args); err != nil {
|
||||
log.Printf("Error updating ldcache: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// createSonameSymlinks ensures that soname symlinks are created in the
|
||||
// specified directories.
|
||||
// It is invoked from a reexec'd handler and provides namespace isolation for
|
||||
// the operations performed by this hook. At the point where this is invoked,
|
||||
// we are in a new mount namespace that is cloned from the parent.
|
||||
//
|
||||
// args[0] is the reexec initializer function name
|
||||
// args[1] is the path of the ldconfig binary on the host
|
||||
// args[2] is the container root directory
|
||||
// The remaining args are directories where soname symlinks need to be created.
|
||||
func createSonameSymlinks(args []string) error {
|
||||
if len(args) < 3 {
|
||||
return fmt.Errorf("incorrect arguments: %v", args)
|
||||
}
|
||||
hostLdconfigPath := args[1]
|
||||
containerRootDirPath := args[2]
|
||||
|
||||
ldconfig, err := ldconfig.New(
|
||||
hostLdconfigPath,
|
||||
containerRootDirPath,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct ldconfig runner: %w", err)
|
||||
}
|
||||
|
||||
return ldconfig.CreateSonameSymlinks(args[3:]...)
|
||||
}
|
||||
46
cmd/nvidia-cdi-hook/update-ldcache/container-root.go
Normal file
46
cmd/nvidia-cdi-hook/update-ldcache/container-root.go
Normal file
@@ -0,0 +1,46 @@
|
||||
/**
|
||||
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package ldcache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/moby/sys/symlink"
|
||||
)
|
||||
|
||||
// A containerRoot represents the root filesystem of a container.
|
||||
type containerRoot string
|
||||
|
||||
// hasPath checks whether the specified path exists in the root.
|
||||
func (r containerRoot) hasPath(path string) bool {
|
||||
resolved, err := r.resolve(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err := os.Stat(resolved); err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// resolve returns the absolute path including root path.
|
||||
// Symlinks are resolved, but are guaranteed to resolve in the root.
|
||||
func (r containerRoot) resolve(path string) (string, error) {
|
||||
absolute := filepath.Clean(filepath.Join(string(r), path))
|
||||
return symlink.FollowSymlinkInScope(absolute, string(r))
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package ldconfig
|
||||
package ldcache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -29,8 +29,8 @@ import (
|
||||
"syscall"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/moby/sys/reexec"
|
||||
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -182,7 +182,7 @@ func createTmpFs(target string, size int) error {
|
||||
// createReexecCommand creates a command that can be used to trigger the reexec
|
||||
// initializer.
|
||||
// On linux this command runs in new namespaces.
|
||||
func createReexecCommand(args []string) (*exec.Cmd, error) {
|
||||
func createReexecCommand(args []string) *exec.Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
@@ -196,5 +196,5 @@ func createReexecCommand(args []string) (*exec.Cmd, error) {
|
||||
syscall.CLONE_NEWNET,
|
||||
}
|
||||
|
||||
return cmd, nil
|
||||
return cmd
|
||||
}
|
||||
@@ -17,11 +17,14 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package ldconfig
|
||||
package ldcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/moby/sys/reexec"
|
||||
)
|
||||
|
||||
func pivotRoot(newroot string) error {
|
||||
@@ -36,6 +39,13 @@ func mountProc(newroot string) error {
|
||||
return fmt.Errorf("not supported")
|
||||
}
|
||||
|
||||
func createReexecCommand(args []string) (*exec.Cmd, error) {
|
||||
return nil, fmt.Errorf("not supported")
|
||||
// createReexecCommand creates a command that can be used ot trigger the reexec
|
||||
// initializer.
|
||||
func createReexecCommand(args []string) *exec.Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package ldconfig
|
||||
package ldcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -16,7 +16,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package ldconfig
|
||||
package ldcache
|
||||
|
||||
import "syscall"
|
||||
|
||||
@@ -21,16 +21,24 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/moby/sys/reexec"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/ldconfig"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/logger"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
|
||||
)
|
||||
|
||||
const (
|
||||
// ldsoconfdFilenamePattern specifies the pattern for the filename
|
||||
// in ld.so.conf.d that includes references to the specified directories.
|
||||
// The 00-nvcr prefix is chosen to ensure that these libraries have a
|
||||
// higher precedence than other libraries on the system, but lower than
|
||||
// the 00-cuda-compat that is included in some containers.
|
||||
ldsoconfdFilenamePattern = "00-nvcr-*.conf"
|
||||
|
||||
reexecUpdateLdCacheCommandName = "reexec-update-ldcache"
|
||||
)
|
||||
|
||||
@@ -115,15 +123,15 @@ func (m command) run(c *cli.Context, cfg *options) error {
|
||||
return fmt.Errorf("failed to determined container root: %v", err)
|
||||
}
|
||||
|
||||
cmd, err := ldconfig.NewRunner(
|
||||
args := []string{
|
||||
reexecUpdateLdCacheCommandName,
|
||||
cfg.ldconfigPath,
|
||||
strings.TrimPrefix(config.NormalizeLDConfigPath("@"+cfg.ldconfigPath), "@"),
|
||||
containerRootDir,
|
||||
cfg.folders.Value()...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, cfg.folders.Value()...)
|
||||
|
||||
cmd := createReexecCommand(args)
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
@@ -135,16 +143,15 @@ func updateLdCacheHandler() {
|
||||
}
|
||||
}
|
||||
|
||||
// updateLdCache ensures that the ldcache in the container is updated to include
|
||||
// libraries that are mounted from the host.
|
||||
// It is invoked from a reexec'd handler and provides namespace isolation for
|
||||
// the operations performed by this hook. At the point where this is invoked,
|
||||
// we are in a new mount namespace that is cloned from the parent.
|
||||
// updateLdCache is invoked from a reexec'd handler and provides namespace
|
||||
// isolation for the operations performed by this hook.
|
||||
// At the point where this is invoked, we are in a new mount namespace that is
|
||||
// cloned from the parent.
|
||||
//
|
||||
// args[0] is the reexec initializer function name
|
||||
// args[1] is the path of the ldconfig binary on the host
|
||||
// args[2] is the container root directory
|
||||
// The remaining args are folders where soname symlinks need to be created.
|
||||
// The remaining args are folders that need to be added to the ldcache.
|
||||
func updateLdCache(args []string) error {
|
||||
if len(args) < 3 {
|
||||
return fmt.Errorf("incorrect arguments: %v", args)
|
||||
@@ -152,13 +159,97 @@ func updateLdCache(args []string) error {
|
||||
hostLdconfigPath := args[1]
|
||||
containerRootDirPath := args[2]
|
||||
|
||||
ldconfig, err := ldconfig.New(
|
||||
hostLdconfigPath,
|
||||
containerRootDirPath,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct ldconfig runner: %w", err)
|
||||
// To prevent leaking the parent proc filesystem, we create a new proc mount
|
||||
// in the container root.
|
||||
if err := mountProc(containerRootDirPath); err != nil {
|
||||
return fmt.Errorf("error mounting /proc: %w", err)
|
||||
}
|
||||
|
||||
return ldconfig.UpdateLDCache(args[3:]...)
|
||||
// We mount the host ldconfig before we pivot root since host paths are not
|
||||
// visible after the pivot root operation.
|
||||
ldconfigPath, err := mountLdConfig(hostLdconfigPath, containerRootDirPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error mounting host ldconfig: %w", err)
|
||||
}
|
||||
|
||||
// We pivot to the container root for the new process, this further limits
|
||||
// access to the host.
|
||||
if err := pivotRoot(containerRootDirPath); err != nil {
|
||||
return fmt.Errorf("error running pivot_root: %w", err)
|
||||
}
|
||||
|
||||
return runLdconfig(ldconfigPath, args[3:]...)
|
||||
}
|
||||
|
||||
// runLdconfig runs the ldconfig binary and ensures that the specified directories
|
||||
// are processed for the ldcache.
|
||||
func runLdconfig(ldconfigPath string, directories ...string) error {
|
||||
args := []string{
|
||||
"ldconfig",
|
||||
// Explicitly specify using /etc/ld.so.conf since the host's ldconfig may
|
||||
// be configured to use a different config file by default.
|
||||
// Note that since we apply the `-r {{ .containerRootDir }}` argument, /etc/ld.so.conf is
|
||||
// in the container.
|
||||
"-f", "/etc/ld.so.conf",
|
||||
}
|
||||
|
||||
containerRoot := containerRoot("/")
|
||||
|
||||
if containerRoot.hasPath("/etc/ld.so.cache") {
|
||||
args = append(args, "-C", "/etc/ld.so.cache")
|
||||
} else {
|
||||
args = append(args, "-N")
|
||||
}
|
||||
|
||||
if containerRoot.hasPath("/etc/ld.so.conf.d") {
|
||||
err := createLdsoconfdFile(ldsoconfdFilenamePattern, directories...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ld.so.conf.d: %w", err)
|
||||
}
|
||||
} else {
|
||||
args = append(args, directories...)
|
||||
}
|
||||
|
||||
return SafeExec(ldconfigPath, args, nil)
|
||||
}
|
||||
|
||||
// createLdsoconfdFile creates a file at /etc/ld.so.conf.d/.
|
||||
// The file is created at /etc/ld.so.conf.d/{{ .pattern }} using `CreateTemp` and
|
||||
// contains the specified directories on each line.
|
||||
func createLdsoconfdFile(pattern string, dirs ...string) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ldsoconfdDir := "/etc/ld.so.conf.d"
|
||||
if err := os.MkdirAll(ldsoconfdDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create ld.so.conf.d: %w", err)
|
||||
}
|
||||
|
||||
configFile, err := os.CreateTemp(ldsoconfdDir, pattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = configFile.Close()
|
||||
}()
|
||||
|
||||
added := make(map[string]bool)
|
||||
for _, dir := range dirs {
|
||||
if added[dir] {
|
||||
continue
|
||||
}
|
||||
_, err = fmt.Fprintf(configFile, "%s\n", dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update config file: %w", err)
|
||||
}
|
||||
added[dir] = true
|
||||
}
|
||||
|
||||
// The created file needs to be world readable for the cases where the container is run as a non-root user.
|
||||
if err := configFile.Chmod(0644); err != nil {
|
||||
return fmt.Errorf("failed to chmod config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -242,14 +242,7 @@ func (hookConfig *hookConfig) getNvidiaConfig(image image.CUDA, privileged bool)
|
||||
}
|
||||
}
|
||||
|
||||
func (hookConfig *hookConfig) getContainerConfig() (config *containerConfig) {
|
||||
hookConfig.Lock()
|
||||
defer hookConfig.Unlock()
|
||||
|
||||
if hookConfig.containerConfig != nil {
|
||||
return hookConfig.containerConfig
|
||||
}
|
||||
|
||||
func (hookConfig *hookConfig) getContainerConfig() (config containerConfig) {
|
||||
var h HookState
|
||||
d := json.NewDecoder(os.Stdin)
|
||||
if err := d.Decode(&h); err != nil {
|
||||
@@ -278,13 +271,10 @@ func (hookConfig *hookConfig) getContainerConfig() (config *containerConfig) {
|
||||
log.Panicln(err)
|
||||
}
|
||||
|
||||
cc := containerConfig{
|
||||
return containerConfig{
|
||||
Pid: h.Pid,
|
||||
Rootfs: s.Root.Path,
|
||||
Image: i,
|
||||
Nvidia: hookConfig.getNvidiaConfig(i, privileged),
|
||||
}
|
||||
hookConfig.containerConfig = &cc
|
||||
|
||||
return hookConfig.containerConfig
|
||||
}
|
||||
|
||||
@@ -487,7 +487,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
hookCfg := tc.hookConfig
|
||||
if hookCfg == nil {
|
||||
defaultConfig, _ := config.GetDefault()
|
||||
hookCfg = &hookConfig{Config: defaultConfig}
|
||||
hookCfg = &hookConfig{defaultConfig}
|
||||
}
|
||||
cfg = hookCfg.getNvidiaConfig(image, tc.privileged)
|
||||
}
|
||||
|
||||
@@ -4,46 +4,50 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config/image"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
)
|
||||
|
||||
const (
|
||||
configPath = "/etc/nvidia-container-runtime/config.toml"
|
||||
driverPath = "/run/nvidia/driver"
|
||||
)
|
||||
|
||||
// hookConfig wraps the toolkit config.
|
||||
// This allows for functions to be defined on the local type.
|
||||
type hookConfig struct {
|
||||
sync.Mutex
|
||||
*config.Config
|
||||
containerConfig *containerConfig
|
||||
}
|
||||
|
||||
// loadConfig loads the required paths for the hook config.
|
||||
func loadConfig() (*config.Config, error) {
|
||||
configFilePath, required := getConfigFilePath()
|
||||
cfg, err := config.New(
|
||||
config.WithConfigFile(configFilePath),
|
||||
config.WithRequired(true),
|
||||
)
|
||||
if err == nil {
|
||||
return cfg.Config()
|
||||
} else if os.IsNotExist(err) && !required {
|
||||
return config.GetDefault()
|
||||
var configPaths []string
|
||||
var required bool
|
||||
if len(*configflag) != 0 {
|
||||
configPaths = append(configPaths, *configflag)
|
||||
required = true
|
||||
} else {
|
||||
configPaths = append(configPaths, path.Join(driverPath, configPath), configPath)
|
||||
}
|
||||
return nil, fmt.Errorf("couldn't open required configuration file: %v", err)
|
||||
}
|
||||
|
||||
func getConfigFilePath() (string, bool) {
|
||||
if configFromFlag := *configflag; configFromFlag != "" {
|
||||
return configFromFlag, true
|
||||
for _, p := range configPaths {
|
||||
cfg, err := config.New(
|
||||
config.WithConfigFile(p),
|
||||
config.WithRequired(true),
|
||||
)
|
||||
if err == nil {
|
||||
return cfg.Config()
|
||||
} else if os.IsNotExist(err) && !required {
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("couldn't open required configuration file: %v", err)
|
||||
}
|
||||
if configFromEnvvar := os.Getenv(config.FilePathOverrideEnvVar); configFromEnvvar != "" {
|
||||
return configFromEnvvar, true
|
||||
}
|
||||
return config.GetConfigFilePath(), false
|
||||
|
||||
return config.GetDefault()
|
||||
}
|
||||
|
||||
func getHookConfig() (*hookConfig, error) {
|
||||
@@ -51,7 +55,7 @@ func getHookConfig() (*hookConfig, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load config: %v", err)
|
||||
}
|
||||
config := &hookConfig{Config: cfg}
|
||||
config := &hookConfig{cfg}
|
||||
|
||||
allSupportedDriverCapabilities := image.SupportedDriverCapabilities
|
||||
if config.SupportedDriverCapabilities == "all" {
|
||||
@@ -69,8 +73,8 @@ func getHookConfig() (*hookConfig, error) {
|
||||
|
||||
// getConfigOption returns the toml config option associated with the
|
||||
// specified struct field.
|
||||
func (c *hookConfig) getConfigOption(fieldName string) string {
|
||||
t := reflect.TypeOf(&c)
|
||||
func (c hookConfig) getConfigOption(fieldName string) string {
|
||||
t := reflect.TypeOf(c)
|
||||
f, ok := t.FieldByName(fieldName)
|
||||
if !ok {
|
||||
return fieldName
|
||||
@@ -123,21 +127,3 @@ func (c *hookConfig) nvidiaContainerCliCUDACompatModeFlags() []string {
|
||||
}
|
||||
return []string{flag}
|
||||
}
|
||||
|
||||
func (c *hookConfig) assertModeIsLegacy() error {
|
||||
if c.NVIDIAContainerRuntimeHookConfig.SkipModeDetection {
|
||||
return nil
|
||||
}
|
||||
|
||||
mr := info.NewRuntimeModeResolver(
|
||||
info.WithLogger(&logInterceptor{}),
|
||||
info.WithImage(&c.containerConfig.Image),
|
||||
info.WithDefaultMode(info.LegacyRuntimeMode),
|
||||
)
|
||||
|
||||
mode := mr.ResolveRuntimeMode(c.NVIDIAContainerRuntimeConfig.Mode)
|
||||
if mode == "legacy" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invoking the NVIDIA Container Runtime Hook directly (e.g. specifying the docker --gpus flag) is not supported. Please use the NVIDIA Container Runtime (e.g. specify the --runtime=nvidia flag) instead")
|
||||
}
|
||||
|
||||
@@ -90,10 +90,10 @@ func TestGetHookConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var cfg *hookConfig
|
||||
var cfg hookConfig
|
||||
getHookConfig := func() {
|
||||
c, _ := getHookConfig()
|
||||
cfg = c
|
||||
cfg = *c
|
||||
}
|
||||
|
||||
if tc.expectedPanic {
|
||||
|
||||
@@ -55,7 +55,7 @@ func getCLIPath(config config.ContainerCLIConfig) string {
|
||||
}
|
||||
|
||||
// getRootfsPath returns an absolute path. We don't need to resolve symlinks for now.
|
||||
func getRootfsPath(config *containerConfig) string {
|
||||
func getRootfsPath(config containerConfig) string {
|
||||
rootfs, err := filepath.Abs(config.Rootfs)
|
||||
if err != nil {
|
||||
log.Panicln(err)
|
||||
@@ -82,8 +82,8 @@ func doPrestart() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := hook.assertModeIsLegacy(); err != nil {
|
||||
log.Panicf("%v", err)
|
||||
if !hook.NVIDIAContainerRuntimeHookConfig.SkipModeDetection && info.ResolveAutoMode(&logInterceptor{}, hook.NVIDIAContainerRuntimeConfig.Mode, container.Image) != "legacy" {
|
||||
log.Panicln("invoking the NVIDIA Container Runtime Hook directly (e.g. specifying the docker --gpus flag) is not supported. Please use the NVIDIA Container Runtime (e.g. specify the --runtime=nvidia flag) instead.")
|
||||
}
|
||||
|
||||
rootfs := getRootfsPath(container)
|
||||
|
||||
@@ -122,10 +122,11 @@ func TestGoodInput(t *testing.T) {
|
||||
err = cmdCreate.Run()
|
||||
require.NoError(t, err, "runtime should not return an error")
|
||||
|
||||
// Check config.json to ensure that the NVIDIA prestart was not inserted.
|
||||
// Check config.json for NVIDIA prestart hook
|
||||
spec, err = cfg.getRuntimeSpec()
|
||||
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
|
||||
require.Empty(t, spec.Hooks, "there should be no hooks in config.json")
|
||||
require.NotEmpty(t, spec.Hooks, "there should be hooks in config.json")
|
||||
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "exactly one nvidia prestart hook should be inserted correctly into config.json")
|
||||
}
|
||||
|
||||
// NVIDIA prestart hook already present in config file
|
||||
@@ -167,10 +168,11 @@ func TestDuplicateHook(t *testing.T) {
|
||||
output, err := cmdCreate.CombinedOutput()
|
||||
require.NoErrorf(t, err, "runtime should not return an error", "output=%v", string(output))
|
||||
|
||||
// Check config.json to ensure that the NVIDIA prestart hook was removed.
|
||||
// Check config.json for NVIDIA prestart hook
|
||||
spec, err = cfg.getRuntimeSpec()
|
||||
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
|
||||
require.Empty(t, spec.Hooks, "there should be no hooks in config.json")
|
||||
require.NotEmpty(t, spec.Hooks, "there should be hooks in config.json")
|
||||
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "exactly one nvidia prestart hook should be inserted correctly into config.json")
|
||||
}
|
||||
|
||||
// addNVIDIAHook is a basic wrapper for an addHookModifier that is used for
|
||||
@@ -238,3 +240,18 @@ func (c testConfig) generateNewRuntimeSpec() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return number of valid NVIDIA prestart hooks in runtime spec
|
||||
func nvidiaHookCount(hooks *specs.Hooks) int {
|
||||
if hooks == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, hook := range hooks.Prestart {
|
||||
if strings.Contains(hook.Path, nvidiaHook) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk-installer/toolkit"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/logger"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -37,11 +36,10 @@ var signalReceived = make(chan bool, 1)
|
||||
type options struct {
|
||||
toolkitInstallDir string
|
||||
|
||||
noDaemon bool
|
||||
runtime string
|
||||
pidFile string
|
||||
sourceRoot string
|
||||
packageType string
|
||||
noDaemon bool
|
||||
runtime string
|
||||
pidFile string
|
||||
sourceRoot string
|
||||
|
||||
toolkitOptions toolkit.Options
|
||||
runtimeOptions runtime.Options
|
||||
@@ -125,17 +123,11 @@ func (a app) build() *cli.App {
|
||||
EnvVars: []string{"TOOLKIT_INSTALL_DIR", "ROOT"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "toolkit-source-root",
|
||||
Usage: "The folder where the required toolkit artifacts can be found. If this is not specified, the path /artifacts/{{ .ToolkitPackageType }} is used where ToolkitPackageType is the resolved package type",
|
||||
Name: "source-root",
|
||||
Value: "/",
|
||||
Usage: "The folder where the required toolkit artifacts can be found",
|
||||
Destination: &options.sourceRoot,
|
||||
EnvVars: []string{"TOOLKIT_SOURCE_ROOT"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "toolkit-package-type",
|
||||
Usage: "specify the package type to use for the toolkit. One of ['deb', 'rpm', 'auto', '']. If 'auto' or '' are used, the type is inferred automatically.",
|
||||
Value: "auto",
|
||||
Destination: &options.packageType,
|
||||
EnvVars: []string{"TOOLKIT_PACKAGE_TYPE"},
|
||||
EnvVars: []string{"SOURCE_ROOT"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "pid-file",
|
||||
@@ -153,15 +145,6 @@ func (a app) build() *cli.App {
|
||||
}
|
||||
|
||||
func (a *app) Before(c *cli.Context, o *options) error {
|
||||
if o.sourceRoot == "" {
|
||||
sourceRoot, err := a.resolveSourceRoot(o.runtimeOptions.HostRootMount, o.packageType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve source root: %v", err)
|
||||
}
|
||||
a.logger.Infof("Resolved source root to %v", sourceRoot)
|
||||
o.sourceRoot = sourceRoot
|
||||
}
|
||||
|
||||
a.toolkit = toolkit.NewInstaller(
|
||||
toolkit.WithLogger(a.logger),
|
||||
toolkit.WithSourceRoot(o.sourceRoot),
|
||||
@@ -294,35 +277,3 @@ func (a *app) shutdown(pidFile string) {
|
||||
a.logger.Warningf("Unable to remove pidfile: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) resolveSourceRoot(hostRoot string, packageType string) (string, error) {
|
||||
resolvedPackageType, err := a.resolvePackageType(hostRoot, packageType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
switch resolvedPackageType {
|
||||
case "deb":
|
||||
return "/artifacts/deb", nil
|
||||
case "rpm":
|
||||
return "/artifacts/rpm", nil
|
||||
default:
|
||||
return "", fmt.Errorf("invalid package type: %v", resolvedPackageType)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) resolvePackageType(hostRoot string, packageType string) (rPackageTypes string, rerr error) {
|
||||
if packageType != "" && packageType != "auto" {
|
||||
return packageType, nil
|
||||
}
|
||||
|
||||
locator := lookup.NewExecutableLocator(a.logger, hostRoot)
|
||||
if candidates, err := locator.Locate("/usr/bin/rpm"); err == nil && len(candidates) > 0 {
|
||||
return "rpm", nil
|
||||
}
|
||||
|
||||
if candidates, err := locator.Locate("/usr/bin/dpkg"); err == nil && len(candidates) > 0 {
|
||||
return "deb", nil
|
||||
}
|
||||
|
||||
return "deb", nil
|
||||
}
|
||||
|
||||
@@ -433,7 +433,7 @@ swarm-resource = ""
|
||||
"--driver-root-ctr-path=" + hostRoot,
|
||||
"--pid-file=" + filepath.Join(testRoot, "toolkit.pid"),
|
||||
"--restart-mode=none",
|
||||
"--toolkit-source-root=" + filepath.Join(artifactRoot, "deb"),
|
||||
"--source-root=" + filepath.Join(artifactRoot, "deb"),
|
||||
}
|
||||
|
||||
err := app.Run(append(testArgs, tc.args...))
|
||||
|
||||
@@ -28,7 +28,7 @@ type createDirectory struct {
|
||||
logger logger.Interface
|
||||
}
|
||||
|
||||
func (t *ToolkitInstaller) createDirectory() Installer {
|
||||
func (t *toolkitInstaller) createDirectory() Installer {
|
||||
return &createDirectory{
|
||||
logger: t.logger,
|
||||
}
|
||||
|
||||
@@ -28,18 +28,20 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk-installer/container/operator"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
)
|
||||
|
||||
type executable struct {
|
||||
requiresKernelModule bool
|
||||
path string
|
||||
symlink string
|
||||
args []string
|
||||
env map[string]string
|
||||
}
|
||||
|
||||
func (t *ToolkitInstaller) collectExecutables(destDir string) ([]Installer, error) {
|
||||
configFilePath := t.ConfigFilePath(destDir)
|
||||
func (t *toolkitInstaller) collectExecutables(destDir string) ([]Installer, error) {
|
||||
configHome := filepath.Join(destDir, ".config")
|
||||
configDir := filepath.Join(configHome, "nvidia-container-runtime")
|
||||
configPath := filepath.Join(configDir, "config.toml")
|
||||
|
||||
executables := []executable{
|
||||
{
|
||||
@@ -54,7 +56,7 @@ func (t *ToolkitInstaller) collectExecutables(destDir string) ([]Installer, erro
|
||||
path: runtime.Path,
|
||||
requiresKernelModule: true,
|
||||
env: map[string]string{
|
||||
config.FilePathOverrideEnvVar: configFilePath,
|
||||
"XDG_CONFIG_HOME": configHome,
|
||||
},
|
||||
}
|
||||
executables = append(executables, e)
|
||||
@@ -70,9 +72,7 @@ func (t *ToolkitInstaller) collectExecutables(destDir string) ([]Installer, erro
|
||||
executable{
|
||||
path: "nvidia-container-runtime-hook",
|
||||
symlink: "nvidia-container-toolkit",
|
||||
env: map[string]string{
|
||||
config.FilePathOverrideEnvVar: configFilePath,
|
||||
},
|
||||
args: []string{fmt.Sprintf("-config %s", configPath)},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -94,6 +94,7 @@ func (t *ToolkitInstaller) collectExecutables(destDir string) ([]Installer, erro
|
||||
Source: executablePath,
|
||||
WrappedExecutable: dotRealFilename,
|
||||
CheckModules: executable.requiresKernelModule,
|
||||
Args: executable.args,
|
||||
Envvars: map[string]string{
|
||||
"PATH": strings.Join([]string{destDir, "$PATH"}, ":"),
|
||||
},
|
||||
@@ -123,6 +124,7 @@ type wrapper struct {
|
||||
Envvars map[string]string
|
||||
WrappedExecutable string
|
||||
CheckModules bool
|
||||
Args []string
|
||||
}
|
||||
|
||||
type render struct {
|
||||
@@ -163,6 +165,9 @@ fi
|
||||
{{$key}}={{$value}} \
|
||||
{{- end }}
|
||||
{{ .DestDir }}/{{ .WrappedExecutable }} \
|
||||
{{- range $arg := .Args }}
|
||||
{{$arg}} \
|
||||
{{- end }}
|
||||
"$@"
|
||||
`
|
||||
|
||||
|
||||
@@ -68,6 +68,19 @@ fi
|
||||
PATH=/foo/bar/baz \
|
||||
/dest-dir/some-runtime \
|
||||
"$@"
|
||||
`,
|
||||
},
|
||||
{
|
||||
description: "args are added",
|
||||
w: &wrapper{
|
||||
WrappedExecutable: "some-runtime",
|
||||
Args: []string{"--config foo", "bar"},
|
||||
},
|
||||
expected: `#! /bin/sh
|
||||
/dest-dir/some-runtime \
|
||||
--config foo \
|
||||
bar \
|
||||
"$@"
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ type Installer interface {
|
||||
Install(string) error
|
||||
}
|
||||
|
||||
type ToolkitInstaller struct {
|
||||
type toolkitInstaller struct {
|
||||
logger logger.Interface
|
||||
ignoreErrors bool
|
||||
sourceRoot string
|
||||
@@ -43,13 +43,11 @@ type ToolkitInstaller struct {
|
||||
ensureTargetDirectory Installer
|
||||
}
|
||||
|
||||
var _ Installer = (*ToolkitInstaller)(nil)
|
||||
var _ Installer = (*toolkitInstaller)(nil)
|
||||
|
||||
// New creates a toolkit installer with the specified options.
|
||||
func New(opts ...Option) (*ToolkitInstaller, error) {
|
||||
t := &ToolkitInstaller{
|
||||
sourceRoot: "/",
|
||||
}
|
||||
func New(opts ...Option) (Installer, error) {
|
||||
t := &toolkitInstaller{}
|
||||
for _, opt := range opts {
|
||||
opt(t)
|
||||
}
|
||||
@@ -57,6 +55,9 @@ func New(opts ...Option) (*ToolkitInstaller, error) {
|
||||
if t.logger == nil {
|
||||
t.logger = logger.New()
|
||||
}
|
||||
if t.sourceRoot == "" {
|
||||
t.sourceRoot = "/"
|
||||
}
|
||||
if t.artifactRoot == nil {
|
||||
artifactRoot, err := newArtifactRoot(t.logger, t.sourceRoot)
|
||||
if err != nil {
|
||||
@@ -73,7 +74,7 @@ func New(opts ...Option) (*ToolkitInstaller, error) {
|
||||
}
|
||||
|
||||
// Install ensures that the required toolkit files are installed in the specified directory.
|
||||
func (t *ToolkitInstaller) Install(destDir string) error {
|
||||
func (t *toolkitInstaller) Install(destDir string) error {
|
||||
var installers []Installer
|
||||
|
||||
installers = append(installers, t.ensureTargetDirectory)
|
||||
@@ -98,11 +99,6 @@ func (t *ToolkitInstaller) Install(destDir string) error {
|
||||
return errs
|
||||
}
|
||||
|
||||
func (t *ToolkitInstaller) ConfigFilePath(destDir string) string {
|
||||
toolkitConfigDir := filepath.Join(destDir, ".config", "nvidia-container-runtime")
|
||||
return filepath.Join(toolkitConfigDir, "config.toml")
|
||||
}
|
||||
|
||||
type symlink struct {
|
||||
linkname string
|
||||
target string
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestToolkitInstaller(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
i := ToolkitInstaller{
|
||||
i := toolkitInstaller{
|
||||
logger: logger,
|
||||
artifactRoot: r,
|
||||
ensureTargetDirectory: createDirectory,
|
||||
@@ -172,8 +172,8 @@ if [ "${?}" != "0" ]; then
|
||||
echo "nvidia driver modules are not yet loaded, invoking runc directly"
|
||||
exec runc "$@"
|
||||
fi
|
||||
NVIDIA_CTK_CONFIG_FILE_PATH=/foo/bar/baz/.config/nvidia-container-runtime/config.toml \
|
||||
PATH=/foo/bar/baz:$PATH \
|
||||
XDG_CONFIG_HOME=/foo/bar/baz/.config \
|
||||
/foo/bar/baz/nvidia-container-runtime.real \
|
||||
"$@"
|
||||
`,
|
||||
@@ -187,8 +187,8 @@ if [ "${?}" != "0" ]; then
|
||||
echo "nvidia driver modules are not yet loaded, invoking runc directly"
|
||||
exec runc "$@"
|
||||
fi
|
||||
NVIDIA_CTK_CONFIG_FILE_PATH=/foo/bar/baz/.config/nvidia-container-runtime/config.toml \
|
||||
PATH=/foo/bar/baz:$PATH \
|
||||
XDG_CONFIG_HOME=/foo/bar/baz/.config \
|
||||
/foo/bar/baz/nvidia-container-runtime.cdi.real \
|
||||
"$@"
|
||||
`,
|
||||
@@ -202,8 +202,8 @@ if [ "${?}" != "0" ]; then
|
||||
echo "nvidia driver modules are not yet loaded, invoking runc directly"
|
||||
exec runc "$@"
|
||||
fi
|
||||
NVIDIA_CTK_CONFIG_FILE_PATH=/foo/bar/baz/.config/nvidia-container-runtime/config.toml \
|
||||
PATH=/foo/bar/baz:$PATH \
|
||||
XDG_CONFIG_HOME=/foo/bar/baz/.config \
|
||||
/foo/bar/baz/nvidia-container-runtime.legacy.real \
|
||||
"$@"
|
||||
`,
|
||||
@@ -240,9 +240,9 @@ PATH=/foo/bar/baz:$PATH \
|
||||
path: "/foo/bar/baz/nvidia-container-runtime-hook",
|
||||
mode: 0777,
|
||||
wrapper: `#! /bin/sh
|
||||
NVIDIA_CTK_CONFIG_FILE_PATH=/foo/bar/baz/.config/nvidia-container-runtime/config.toml \
|
||||
PATH=/foo/bar/baz:$PATH \
|
||||
/foo/bar/baz/nvidia-container-runtime-hook.real \
|
||||
-config /foo/bar/baz/.config/nvidia-container-runtime/config.toml \
|
||||
"$@"
|
||||
`,
|
||||
},
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
// A predefined set of library candidates are considered, with the first one
|
||||
// resulting in success being installed to the toolkit folder. The install process
|
||||
// resolves the symlink for the library and copies the versioned library itself.
|
||||
func (t *ToolkitInstaller) collectLibraries() ([]Installer, error) {
|
||||
func (t *toolkitInstaller) collectLibraries() ([]Installer, error) {
|
||||
requiredLibraries := []string{
|
||||
"libnvidia-container.so.1",
|
||||
"libnvidia-container-go.so.1",
|
||||
|
||||
@@ -19,29 +19,29 @@ package installer
|
||||
|
||||
import "github.com/NVIDIA/nvidia-container-toolkit/internal/logger"
|
||||
|
||||
type Option func(*ToolkitInstaller)
|
||||
type Option func(*toolkitInstaller)
|
||||
|
||||
func WithLogger(logger logger.Interface) Option {
|
||||
return func(ti *ToolkitInstaller) {
|
||||
return func(ti *toolkitInstaller) {
|
||||
ti.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func WithArtifactRoot(artifactRoot *artifactRoot) Option {
|
||||
return func(ti *ToolkitInstaller) {
|
||||
return func(ti *toolkitInstaller) {
|
||||
ti.artifactRoot = artifactRoot
|
||||
}
|
||||
}
|
||||
|
||||
func WithIgnoreErrors(ignoreErrors bool) Option {
|
||||
return func(ti *ToolkitInstaller) {
|
||||
return func(ti *toolkitInstaller) {
|
||||
ti.ignoreErrors = ignoreErrors
|
||||
}
|
||||
}
|
||||
|
||||
// WithSourceRoot sets the root directory for locating artifacts to be installed.
|
||||
func WithSourceRoot(sourceRoot string) Option {
|
||||
return func(ti *ToolkitInstaller) {
|
||||
return func(ti *toolkitInstaller) {
|
||||
ti.sourceRoot = sourceRoot
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +37,8 @@ import (
|
||||
const (
|
||||
// DefaultNvidiaDriverRoot specifies the default NVIDIA driver run directory
|
||||
DefaultNvidiaDriverRoot = "/run/nvidia/driver"
|
||||
|
||||
configFilename = "config.toml"
|
||||
)
|
||||
|
||||
type cdiOptions struct {
|
||||
@@ -213,8 +215,7 @@ func Flags(opts *Options) []cli.Flag {
|
||||
|
||||
// An Installer is used to install the NVIDIA Container Toolkit from the toolkit container.
|
||||
type Installer struct {
|
||||
logger logger.Interface
|
||||
|
||||
logger logger.Interface
|
||||
sourceRoot string
|
||||
// toolkitRoot specifies the destination path at which the toolkit is installed.
|
||||
toolkitRoot string
|
||||
@@ -314,7 +315,7 @@ func (t *Installer) Install(cli *cli.Context, opts *Options) error {
|
||||
t.logger.Errorf("Ignoring error: %v", fmt.Errorf("could not install toolkit components: %w", err))
|
||||
}
|
||||
|
||||
err = t.installToolkitConfig(cli, opts, toolkit.ConfigFilePath(t.toolkitRoot))
|
||||
err = t.installToolkitConfig(cli, opts)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA container toolkit config: %v", err)
|
||||
} else if err != nil {
|
||||
@@ -341,11 +342,13 @@ func (t *Installer) Install(cli *cli.Context, opts *Options) error {
|
||||
|
||||
// installToolkitConfig installs the config file for the NVIDIA container toolkit ensuring
|
||||
// that the settings are updated to match the desired install and nvidia driver directories.
|
||||
func (t *Installer) installToolkitConfig(c *cli.Context, opts *Options, toolkitConfigPath string) error {
|
||||
func (t *Installer) installToolkitConfig(c *cli.Context, opts *Options) error {
|
||||
toolkitConfigDir := filepath.Join(t.toolkitRoot, ".config", "nvidia-container-runtime")
|
||||
toolkitConfigPath := filepath.Join(toolkitConfigDir, configFilename)
|
||||
|
||||
t.logger.Infof("Installing NVIDIA container toolkit config '%v'", toolkitConfigPath)
|
||||
|
||||
err := t.createDirectories(filepath.Dir(toolkitConfigPath))
|
||||
err := t.createDirectories(toolkitConfigDir)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("could not create required directories: %v", err)
|
||||
} else if err != nil {
|
||||
|
||||
@@ -86,7 +86,6 @@ devices:
|
||||
hostPath: /host/driver/root/dev/nvidia-caps-imex-channels/channel2047
|
||||
containerEdits:
|
||||
env:
|
||||
- NVIDIA_CTK_LIBCUDA_DIR=/lib/x86_64-linux-gnu
|
||||
- NVIDIA_VISIBLE_DEVICES=void
|
||||
hooks:
|
||||
- hookName: createContainer
|
||||
@@ -98,15 +97,6 @@ containerEdits:
|
||||
- libcuda.so.1::/lib/x86_64-linux-gnu/libcuda.so
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: {{ .toolkitRoot }}/nvidia-cdi-hook
|
||||
args:
|
||||
- nvidia-cdi-hook
|
||||
- create-soname-symlinks
|
||||
- --folder
|
||||
- /lib/x86_64-linux-gnu
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: {{ .toolkitRoot }}/nvidia-cdi-hook
|
||||
args:
|
||||
|
||||
@@ -80,7 +80,6 @@ devices:
|
||||
hostPath: {{ .driverRoot }}/dev/nvidia0
|
||||
containerEdits:
|
||||
env:
|
||||
- NVIDIA_CTK_LIBCUDA_DIR=/lib/x86_64-linux-gnu
|
||||
- NVIDIA_VISIBLE_DEVICES=void
|
||||
deviceNodes:
|
||||
- path: /dev/nvidiactl
|
||||
@@ -103,15 +102,6 @@ containerEdits:
|
||||
- --host-driver-version=999.88.77
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: /usr/bin/nvidia-cdi-hook
|
||||
args:
|
||||
- nvidia-cdi-hook
|
||||
- create-soname-symlinks
|
||||
- --folder
|
||||
- /lib/x86_64-linux-gnu
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: /usr/bin/nvidia-cdi-hook
|
||||
args:
|
||||
@@ -174,7 +164,6 @@ devices:
|
||||
hostPath: {{ .driverRoot }}/dev/nvidia0
|
||||
containerEdits:
|
||||
env:
|
||||
- NVIDIA_CTK_LIBCUDA_DIR=/lib/x86_64-linux-gnu
|
||||
- NVIDIA_VISIBLE_DEVICES=void
|
||||
deviceNodes:
|
||||
- path: /dev/nvidiactl
|
||||
@@ -189,15 +178,6 @@ containerEdits:
|
||||
- libcuda.so.1::/lib/x86_64-linux-gnu/libcuda.so
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: /usr/bin/nvidia-cdi-hook
|
||||
args:
|
||||
- nvidia-cdi-hook
|
||||
- create-soname-symlinks
|
||||
- --folder
|
||||
- /lib/x86_64-linux-gnu
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: /usr/bin/nvidia-cdi-hook
|
||||
args:
|
||||
@@ -260,7 +240,6 @@ devices:
|
||||
hostPath: {{ .driverRoot }}/dev/nvidia0
|
||||
containerEdits:
|
||||
env:
|
||||
- NVIDIA_CTK_LIBCUDA_DIR=/lib/x86_64-linux-gnu
|
||||
- NVIDIA_VISIBLE_DEVICES=void
|
||||
deviceNodes:
|
||||
- path: /dev/nvidiactl
|
||||
@@ -275,15 +254,6 @@ containerEdits:
|
||||
- libcuda.so.1::/lib/x86_64-linux-gnu/libcuda.so
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: /usr/bin/nvidia-cdi-hook
|
||||
args:
|
||||
- nvidia-cdi-hook
|
||||
- create-soname-symlinks
|
||||
- --folder
|
||||
- /lib/x86_64-linux-gnu
|
||||
env:
|
||||
- NVIDIA_CTK_DEBUG=false
|
||||
- hookName: createContainer
|
||||
path: /usr/bin/nvidia-cdi-hook
|
||||
args:
|
||||
@@ -337,7 +307,6 @@ devices:
|
||||
hostPath: {{ .driverRoot }}/dev/nvidia0
|
||||
containerEdits:
|
||||
env:
|
||||
- NVIDIA_CTK_LIBCUDA_DIR=/lib/x86_64-linux-gnu
|
||||
- NVIDIA_VISIBLE_DEVICES=void
|
||||
deviceNodes:
|
||||
- path: /dev/nvidiactl
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
ARG VERSION="N/A"
|
||||
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubi9 AS build
|
||||
|
||||
RUN dnf install -y \
|
||||
wget make git gcc \
|
||||
&& \
|
||||
rm -rf /var/cache/yum/*
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
RUN set -eux; \
|
||||
\
|
||||
arch="$(uname -m)"; \
|
||||
case "${arch##*-}" in \
|
||||
x86_64 | amd64) ARCH='amd64' ;; \
|
||||
ppc64el | ppc64le) ARCH='ppc64le' ;; \
|
||||
aarch64 | arm64) ARCH='arm64' ;; \
|
||||
*) echo "unsupported architecture" ; exit 1 ;; \
|
||||
esac; \
|
||||
wget -nv -O - https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${ARCH}.tar.gz \
|
||||
| tar -C /usr/local -xz
|
||||
|
||||
|
||||
ENV GOPATH=/go
|
||||
ENV PATH=$GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
RUN mkdir -p /artifacts/bin
|
||||
ARG VERSION="N/A"
|
||||
ARG GIT_COMMIT="unknown"
|
||||
RUN make PREFIX=/artifacts/bin cmd-nvidia-ctk-installer
|
||||
|
||||
# The packaging stage collects the deb and rpm packages built for
|
||||
# supported architectures.
|
||||
FROM nvcr.io/nvidia/distroless/go:v3.1.9-dev AS packaging
|
||||
|
||||
USER 0:0
|
||||
SHELL ["/busybox/sh", "-c"]
|
||||
RUN ln -s /busybox/sh /bin/sh
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
COPY ${ARTIFACTS_ROOT} /artifacts/packages/
|
||||
|
||||
WORKDIR /artifacts
|
||||
|
||||
# build-args are added to the manifest.txt file below.
|
||||
ARG PACKAGE_VERSION
|
||||
ARG GIT_BRANCH
|
||||
ARG GIT_COMMIT
|
||||
ARG GIT_COMMIT_SHORT
|
||||
ARG SOURCE_DATE_EPOCH
|
||||
ARG VERSION
|
||||
|
||||
# Create a manifest.txt file with the absolute paths of all deb and rpm packages in the container
|
||||
RUN echo "#IMAGE_EPOCH=$(date '+%s')" > /artifacts/manifest.txt && \
|
||||
env | sed 's/^/#/g' >> /artifacts/manifest.txt && \
|
||||
find /artifacts/packages -iname '*.deb' -o -iname '*.rpm' >> /artifacts/manifest.txt
|
||||
|
||||
LABEL name="NVIDIA Container Toolkit Packages"
|
||||
LABEL vendor="NVIDIA"
|
||||
LABEL version="${VERSION}"
|
||||
LABEL release="N/A"
|
||||
LABEL summary="deb and rpm packages for the NVIDIA Container Toolkit"
|
||||
LABEL description="See summary"
|
||||
|
||||
COPY LICENSE /licenses/
|
||||
|
||||
# The debpackages stage is used to extract the contents of deb packages.
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubuntu20.04 AS debpackages
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG PACKAGE_DIST_DEB=ubuntu18.04
|
||||
|
||||
COPY --from=packaging /artifacts/packages/${PACKAGE_DIST_DEB} /deb-packages
|
||||
|
||||
RUN mkdir -p /artifacts/deb
|
||||
RUN set -eux; \
|
||||
\
|
||||
case "${TARGETARCH}" in \
|
||||
x86_64 | amd64) ARCH='amd64' ;; \
|
||||
ppc64el | ppc64le) ARCH='ppc64le' ;; \
|
||||
aarch64 | arm64) ARCH='arm64' ;; \
|
||||
*) echo "unsupported architecture" ; exit 1 ;; \
|
||||
esac; \
|
||||
for p in $(ls /deb-packages/${ARCH}/*.deb); do dpkg-deb -xv $p /artifacts/deb/; done
|
||||
|
||||
# The rpmpackages stage is used to extract the contents of the rpm packages.
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubi9 AS rpmpackages
|
||||
RUN dnf install -y cpio
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG PACKAGE_DIST_RPM=centos7
|
||||
|
||||
COPY --from=packaging /artifacts/packages/${PACKAGE_DIST_RPM} /rpm-packages
|
||||
|
||||
RUN mkdir -p /artifacts/rpm
|
||||
RUN set -eux; \
|
||||
\
|
||||
case "${TARGETARCH}" in \
|
||||
x86_64 | amd64) ARCH='x86_64' ;; \
|
||||
ppc64el | ppc64le) ARCH='ppc64le' ;; \
|
||||
aarch64 | arm64) ARCH='aarch64' ;; \
|
||||
*) echo "unsupported architecture" ; exit 1 ;; \
|
||||
esac; \
|
||||
for p in $(ls /rpm-packages/${ARCH}/*.rpm); do rpm2cpio $p | cpio -idmv -D /artifacts/rpm; done
|
||||
|
||||
# The artifacts image serves as an intermediate stage to collect the artifacts
|
||||
# From the previous stages:
|
||||
# - The extracted deb packages
|
||||
# - The extracted rpm packages
|
||||
# - The nvidia-ctk-installer binary
|
||||
FROM scratch AS artifacts
|
||||
|
||||
COPY --from=rpmpackages /artifacts/rpm /artifacts/rpm
|
||||
COPY --from=debpackages /artifacts/deb /artifacts/deb
|
||||
COPY --from=build /artifacts/bin /artifacts/build
|
||||
|
||||
# The application stage contains the application used as a GPU Operator
|
||||
# operand.
|
||||
FROM nvcr.io/nvidia/distroless/go:v3.1.9-dev AS application
|
||||
|
||||
USER 0:0
|
||||
SHELL ["/busybox/sh", "-c"]
|
||||
RUN ln -s /busybox/sh /bin/sh
|
||||
|
||||
ENV NVIDIA_DISABLE_REQUIRE="true"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=void
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=utility
|
||||
|
||||
COPY --from=artifacts /artifacts/rpm /artifacts/rpm
|
||||
COPY --from=artifacts /artifacts/deb /artifacts/deb
|
||||
COPY --from=artifacts /artifacts/build /work
|
||||
|
||||
WORKDIR /work
|
||||
ENV PATH=/work:$PATH
|
||||
|
||||
ARG VERSION
|
||||
LABEL io.k8s.display-name="NVIDIA Container Runtime Config"
|
||||
LABEL name="NVIDIA Container Runtime Config"
|
||||
LABEL vendor="NVIDIA"
|
||||
LABEL version="${VERSION}"
|
||||
LABEL release="N/A"
|
||||
LABEL summary="Automatically Configure your Container Runtime for GPU support."
|
||||
LABEL description="See summary"
|
||||
|
||||
COPY LICENSE /licenses/
|
||||
|
||||
ENTRYPOINT ["/work/nvidia-ctk-installer"]
|
||||
|
||||
# The GPU Operator exec's nvidia-toolkit in its entrypoint.
|
||||
# We create a symlink here to ensure compatibility with older
|
||||
# GPU Operator versions.
|
||||
RUN ln -s /work/nvidia-ctk-installer /work/nvidia-toolkit
|
||||
38
deployments/container/Dockerfile.packaging
Normal file
38
deployments/container/Dockerfile.packaging
Normal file
@@ -0,0 +1,38 @@
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubuntu20.04
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
COPY ${ARTIFACTS_ROOT} /artifacts/packages/
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
# build-args are added to the manifest.txt file below.
|
||||
ARG PACKAGE_DIST
|
||||
ARG PACKAGE_VERSION
|
||||
ARG GIT_BRANCH
|
||||
ARG GIT_COMMIT
|
||||
ARG GIT_COMMIT_SHORT
|
||||
ARG SOURCE_DATE_EPOCH
|
||||
ARG VERSION
|
||||
|
||||
# Create a manifest.txt file with the absolute paths of all deb and rpm packages in the container
|
||||
RUN echo "#IMAGE_EPOCH=$(date '+%s')" > /artifacts/manifest.txt && \
|
||||
env | sed 's/^/#/g' >> /artifacts/manifest.txt && \
|
||||
find /artifacts/packages -iname '*.deb' -o -iname '*.rpm' >> /artifacts/manifest.txt
|
||||
|
||||
RUN mkdir /licenses && mv /NGC-DL-CONTAINER-LICENSE /licenses/NGC-DL-CONTAINER-LICENSE
|
||||
90
deployments/container/Dockerfile.ubi8
Normal file
90
deployments/container/Dockerfile.ubi8
Normal file
@@ -0,0 +1,90 @@
|
||||
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
ARG VERSION="N/A"
|
||||
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubi8 AS build
|
||||
|
||||
RUN yum install -y \
|
||||
wget make git gcc \
|
||||
&& \
|
||||
rm -rf /var/cache/yum/*
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
RUN set -eux; \
|
||||
\
|
||||
arch="$(uname -m)"; \
|
||||
case "${arch##*-}" in \
|
||||
x86_64 | amd64) ARCH='amd64' ;; \
|
||||
ppc64el | ppc64le) ARCH='ppc64le' ;; \
|
||||
aarch64 | arm64) ARCH='arm64' ;; \
|
||||
*) echo "unsupported architecture" ; exit 1 ;; \
|
||||
esac; \
|
||||
wget -nv -O - https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${ARCH}.tar.gz \
|
||||
| tar -C /usr/local -xz
|
||||
|
||||
|
||||
ENV GOPATH=/go
|
||||
ENV PATH=$GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
RUN mkdir /artifacts
|
||||
ARG VERSION="N/A"
|
||||
ARG GIT_COMMIT="unknown"
|
||||
RUN make PREFIX=/artifacts cmd-nvidia-ctk-installer
|
||||
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubi8
|
||||
|
||||
ENV NVIDIA_DISABLE_REQUIRE="true"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=void
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=utility
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
ARG PACKAGE_DIST
|
||||
COPY ${ARTIFACTS_ROOT}/${PACKAGE_DIST} /artifacts/packages/${PACKAGE_DIST}
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
ARG PACKAGE_VERSION
|
||||
ARG TARGETARCH
|
||||
ENV PACKAGE_ARCH=${TARGETARCH}
|
||||
|
||||
RUN PACKAGE_ARCH=${PACKAGE_ARCH/amd64/x86_64} && PACKAGE_ARCH=${PACKAGE_ARCH/arm64/aarch64} && \
|
||||
yum localinstall -y \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container1-1.*.rpm \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container-tools-1.*.rpm \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/nvidia-container-toolkit*-${PACKAGE_VERSION}*.rpm
|
||||
|
||||
WORKDIR /work
|
||||
|
||||
COPY --from=build /artifacts/nvidia-ctk-installer /work/nvidia-ctk-installer
|
||||
RUN ln -s nvidia-ctk-installer nvidia-toolkit
|
||||
|
||||
ENV PATH=/work:$PATH
|
||||
|
||||
ARG VERSION
|
||||
LABEL io.k8s.display-name="NVIDIA Container Runtime Config"
|
||||
LABEL name="NVIDIA Container Runtime Config"
|
||||
LABEL vendor="NVIDIA"
|
||||
LABEL version="${VERSION}"
|
||||
LABEL release="N/A"
|
||||
LABEL summary="Automatically Configure your Container Runtime for GPU support."
|
||||
LABEL description="See summary"
|
||||
|
||||
RUN mkdir /licenses && mv /NGC-DL-CONTAINER-LICENSE /licenses/NGC-DL-CONTAINER-LICENSE
|
||||
|
||||
ENTRYPOINT ["/work/nvidia-ctk-installer"]
|
||||
98
deployments/container/Dockerfile.ubuntu
Normal file
98
deployments/container/Dockerfile.ubuntu
Normal file
@@ -0,0 +1,98 @@
|
||||
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
ARG VERSION="N/A"
|
||||
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubuntu20.04 AS build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget make git gcc \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
RUN set -eux; \
|
||||
\
|
||||
arch="$(uname -m)"; \
|
||||
case "${arch##*-}" in \
|
||||
x86_64 | amd64) ARCH='amd64' ;; \
|
||||
ppc64el | ppc64le) ARCH='ppc64le' ;; \
|
||||
aarch64 | arm64) ARCH='arm64' ;; \
|
||||
*) echo "unsupported architecture" ; exit 1 ;; \
|
||||
esac; \
|
||||
wget -nv -O - https://storage.googleapis.com/golang/go${GOLANG_VERSION}.linux-${ARCH}.tar.gz \
|
||||
| tar -C /usr/local -xz
|
||||
|
||||
ENV GOPATH=/go
|
||||
ENV PATH=$GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
RUN mkdir /artifacts
|
||||
ARG VERSION="N/A"
|
||||
ARG GIT_COMMIT="unknown"
|
||||
RUN make PREFIX=/artifacts cmd-nvidia-ctk-installer
|
||||
|
||||
FROM nvcr.io/nvidia/cuda:12.9.0-base-ubuntu20.04
|
||||
|
||||
# Remove the CUDA repository configurations to avoid issues with rotated GPG keys
|
||||
RUN rm -f /etc/apt/sources.list.d/cuda.list
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libcap2 \
|
||||
curl \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV NVIDIA_DISABLE_REQUIRE="true"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=void
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=utility
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
ARG PACKAGE_DIST
|
||||
COPY ${ARTIFACTS_ROOT}/${PACKAGE_DIST} /artifacts/packages/${PACKAGE_DIST}
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
ARG PACKAGE_VERSION
|
||||
ARG TARGETARCH
|
||||
ENV PACKAGE_ARCH=${TARGETARCH}
|
||||
|
||||
RUN dpkg -i \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container1_1.*.deb \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container-tools_1.*.deb \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/nvidia-container-toolkit*_${PACKAGE_VERSION}*.deb
|
||||
|
||||
WORKDIR /work
|
||||
|
||||
COPY --from=build /artifacts/nvidia-ctk-installer /work/nvidia-ctk-installer
|
||||
RUN ln -s nvidia-ctk-installer nvidia-toolkit
|
||||
|
||||
ENV PATH=/work:$PATH
|
||||
|
||||
ARG VERSION
|
||||
LABEL io.k8s.display-name="NVIDIA Container Runtime Config"
|
||||
LABEL name="NVIDIA Container Runtime Config"
|
||||
LABEL vendor="NVIDIA"
|
||||
LABEL version="${VERSION}"
|
||||
LABEL release="N/A"
|
||||
LABEL summary="Automatically Configure your Container Runtime for GPU support."
|
||||
LABEL description="See summary"
|
||||
|
||||
RUN mkdir /licenses && mv /NGC-DL-CONTAINER-LICENSE /licenses/NGC-DL-CONTAINER-LICENSE
|
||||
|
||||
ENTRYPOINT ["/work/nvidia-ctk-installer"]
|
||||
@@ -29,17 +29,17 @@ include $(CURDIR)/versions.mk
|
||||
|
||||
IMAGE_VERSION := $(VERSION)
|
||||
|
||||
IMAGE_TAG ?= $(VERSION)
|
||||
IMAGE_TAG ?= $(VERSION)-$(DIST)
|
||||
IMAGE = $(IMAGE_NAME):$(IMAGE_TAG)
|
||||
|
||||
OUT_IMAGE_NAME ?= $(IMAGE_NAME)
|
||||
OUT_IMAGE_VERSION ?= $(IMAGE_VERSION)
|
||||
OUT_IMAGE_TAG = $(OUT_IMAGE_VERSION)
|
||||
OUT_IMAGE_TAG = $(OUT_IMAGE_VERSION)-$(DIST)
|
||||
OUT_IMAGE = $(OUT_IMAGE_NAME):$(OUT_IMAGE_TAG)
|
||||
|
||||
##### Public rules #####
|
||||
DEFAULT_PUSH_TARGET := application
|
||||
DISTRIBUTIONS := $(DEFAULT_PUSH_TARGET)
|
||||
DEFAULT_PUSH_TARGET := ubuntu20.04
|
||||
DISTRIBUTIONS := ubuntu20.04 ubi8
|
||||
|
||||
META_TARGETS := packaging
|
||||
|
||||
@@ -56,16 +56,30 @@ else
|
||||
include $(CURDIR)/deployments/container/multi-arch.mk
|
||||
endif
|
||||
|
||||
# For the default push target we also push a short tag equal to the version.
|
||||
# We skip this for the development release
|
||||
DEVEL_RELEASE_IMAGE_VERSION ?= devel
|
||||
PUSH_MULTIPLE_TAGS ?= true
|
||||
ifeq ($(strip $(OUT_IMAGE_VERSION)),$(DEVEL_RELEASE_IMAGE_VERSION))
|
||||
PUSH_MULTIPLE_TAGS = false
|
||||
endif
|
||||
ifeq ($(PUSH_MULTIPLE_TAGS),true)
|
||||
push-$(DEFAULT_PUSH_TARGET): push-short
|
||||
endif
|
||||
|
||||
push-%: DIST = $(*)
|
||||
push-short: DIST = $(DEFAULT_PUSH_TARGET)
|
||||
|
||||
# Define the push targets
|
||||
$(PUSH_TARGETS): push-%:
|
||||
$(CURDIR)/scripts/publish-image.sh $(IMAGE) $(OUT_IMAGE)
|
||||
|
||||
DOCKERFILE = $(CURDIR)/deployments/container/Dockerfile
|
||||
push-short:
|
||||
$(CURDIR)/scripts/publish-image.sh $(IMAGE) $(OUT_IMAGE)
|
||||
|
||||
# For packaging targets we set the output image tag to include the -packaging suffix.
|
||||
%-packaging: INTERMEDIATE_TARGET := --target=packaging
|
||||
%-packaging: IMAGE_TAG = $(IMAGE_VERSION)-packaging
|
||||
%-packaging: OUT_IMAGE_TAG = $(IMAGE_VERSION)-packaging
|
||||
|
||||
build-%: DIST = $(*)
|
||||
build-%: DOCKERFILE = $(CURDIR)/deployments/container/Dockerfile.$(DOCKERFILE_SUFFIX)
|
||||
|
||||
ARTIFACTS_ROOT ?= $(shell realpath --relative-to=$(CURDIR) $(DIST_DIR))
|
||||
|
||||
@@ -76,12 +90,10 @@ $(IMAGE_TARGETS): image-%: $(ARTIFACTS_ROOT)
|
||||
--provenance=false --sbom=false \
|
||||
$(DOCKER_BUILD_OPTIONS) \
|
||||
$(DOCKER_BUILD_PLATFORM_OPTIONS) \
|
||||
$(INTERMEDIATE_TARGET) \
|
||||
--tag $(IMAGE) \
|
||||
--build-arg ARTIFACTS_ROOT="$(ARTIFACTS_ROOT)" \
|
||||
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
|
||||
--build-arg PACKAGE_DIST_DEB="$(PACKAGE_DIST_DEB)" \
|
||||
--build-arg PACKAGE_DIST_RPM="$(PACKAGE_DIST_RPM)" \
|
||||
--build-arg PACKAGE_DIST="$(PACKAGE_DIST)" \
|
||||
--build-arg PACKAGE_VERSION="$(PACKAGE_VERSION)" \
|
||||
--build-arg VERSION="$(VERSION)" \
|
||||
--build-arg GIT_COMMIT="$(GIT_COMMIT)" \
|
||||
@@ -91,17 +103,25 @@ $(IMAGE_TARGETS): image-%: $(ARTIFACTS_ROOT)
|
||||
-f $(DOCKERFILE) \
|
||||
$(CURDIR)
|
||||
|
||||
build-ubuntu%: DOCKERFILE_SUFFIX := ubuntu
|
||||
build-ubuntu%: PACKAGE_DIST = ubuntu18.04
|
||||
|
||||
PACKAGE_DIST_DEB = ubuntu18.04
|
||||
# TODO: This needs to be set to centos8 for ppc64le builds
|
||||
PACKAGE_DIST_RPM = centos7
|
||||
build-ubi8: DOCKERFILE_SUFFIX := ubi8
|
||||
build-ubi8: PACKAGE_DIST = centos7
|
||||
|
||||
# Handle the default build target.
|
||||
.PHONY: build push
|
||||
build: build-$(DEFAULT_PUSH_TARGET)
|
||||
push: push-$(DEFAULT_PUSH_TARGET)
|
||||
build-packaging: DOCKERFILE_SUFFIX := packaging
|
||||
build-packaging: PACKAGE_ARCH := amd64
|
||||
build-packaging: PACKAGE_DIST = all
|
||||
|
||||
# Test targets
|
||||
test-%: DIST = $(*)
|
||||
|
||||
# Handle the default build target.
|
||||
.PHONY: build
|
||||
build: $(DEFAULT_PUSH_TARGET)
|
||||
$(DEFAULT_PUSH_TARGET): build-$(DEFAULT_PUSH_TARGET)
|
||||
$(DEFAULT_PUSH_TARGET): DIST = $(DEFAULT_PUSH_TARGET)
|
||||
|
||||
TEST_CASES ?= docker crio containerd
|
||||
$(TEST_TARGETS): test-%:
|
||||
TEST_CASES="$(TEST_CASES)" bash -x $(CURDIR)/test/container/main.sh run \
|
||||
|
||||
@@ -23,3 +23,11 @@ $(BUILD_TARGETS): build-%: image-%
|
||||
else
|
||||
$(BUILD_TARGETS): build-%: image-%
|
||||
endif
|
||||
|
||||
# For the default distribution we also retag the image.
|
||||
# Note: This needs to be updated for multi-arch images.
|
||||
ifeq ($(IMAGE_TAG),$(VERSION)-$(DIST))
|
||||
$(DEFAULT_PUSH_TARGET):
|
||||
$(DOCKER) image inspect $(IMAGE) > /dev/null || $(DOCKER) pull $(IMAGE)
|
||||
$(DOCKER) tag $(IMAGE) $(subst :$(IMAGE_TAG),:$(VERSION),$(IMAGE))
|
||||
endif
|
||||
|
||||
@@ -57,6 +57,9 @@ WORKDIR $DIST_DIR
|
||||
COPY packaging/debian ./debian
|
||||
COPY deployments/systemd/ .
|
||||
|
||||
ARG LIBNVIDIA_CONTAINER_TOOLS_VERSION
|
||||
ENV LIBNVIDIA_CONTAINER_TOOLS_VERSION ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}
|
||||
|
||||
RUN dch --create --package="${PKG_NAME}" \
|
||||
--newversion "${REVISION}" \
|
||||
"See https://gitlab.com/nvidia/container-toolkit/container-toolkit/-/blob/${GIT_COMMIT}/CHANGELOG.md for the changelog" && \
|
||||
@@ -65,6 +68,6 @@ RUN dch --create --package="${PKG_NAME}" \
|
||||
if [ "$REVISION" != "$(dpkg-parsechangelog --show-field=Version)" ]; then exit 1; fi
|
||||
|
||||
CMD export DISTRIB="$(lsb_release -cs)" && \
|
||||
debuild -eDISTRIB -eSECTION -eVERSION="${REVISION}" \
|
||||
debuild -eDISTRIB -eSECTION -eLIBNVIDIA_CONTAINER_TOOLS_VERSION -eVERSION="${REVISION}" \
|
||||
--dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
|
||||
mv /tmp/*.deb /dist
|
||||
|
||||
@@ -48,12 +48,16 @@ WORKDIR $DIST_DIR/..
|
||||
COPY packaging/rpm .
|
||||
COPY deployments/systemd/ .
|
||||
|
||||
ARG LIBNVIDIA_CONTAINER_TOOLS_VERSION
|
||||
ENV LIBNVIDIA_CONTAINER_TOOLS_VERSION ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}
|
||||
|
||||
CMD arch=$(uname -m) && \
|
||||
rpmbuild --clean --target=$arch -bb \
|
||||
-D "_topdir $PWD" \
|
||||
-D "release_date $(date +'%a %b %d %Y')" \
|
||||
-D "git_commit ${GIT_COMMIT}" \
|
||||
-D "version ${PKG_VERS}" \
|
||||
-D "libnvidia_container_tools_version ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}" \
|
||||
-D "release ${PKG_REV}" \
|
||||
SPECS/nvidia-container-toolkit.spec && \
|
||||
mv RPMS/$arch/*.rpm /dist
|
||||
|
||||
@@ -73,12 +73,16 @@ WORKDIR $DIST_DIR/..
|
||||
COPY packaging/rpm .
|
||||
COPY deployments/systemd/ ${DIST_DIR}/
|
||||
|
||||
ARG LIBNVIDIA_CONTAINER_TOOLS_VERSION
|
||||
ENV LIBNVIDIA_CONTAINER_TOOLS_VERSION ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}
|
||||
|
||||
CMD arch=$(uname -m) && \
|
||||
rpmbuild --clean --target=$arch -bb \
|
||||
-D "_topdir $PWD" \
|
||||
-D "release_date $(date +'%a %b %d %Y')" \
|
||||
-D "git_commit ${GIT_COMMIT}" \
|
||||
-D "version ${PKG_VERS}" \
|
||||
-D "libnvidia_container_tools_version ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}" \
|
||||
-D "release ${PKG_REV}" \
|
||||
SPECS/nvidia-container-toolkit.spec && \
|
||||
mv RPMS/$arch/*.rpm /dist
|
||||
|
||||
@@ -55,14 +55,17 @@ WORKDIR $DIST_DIR
|
||||
COPY packaging/debian ./debian
|
||||
COPY deployments/systemd/ .
|
||||
|
||||
ARG LIBNVIDIA_CONTAINER_TOOLS_VERSION
|
||||
ENV LIBNVIDIA_CONTAINER_TOOLS_VERSION ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}
|
||||
|
||||
RUN dch --create --package="${PKG_NAME}" \
|
||||
--newversion "${REVISION}" \
|
||||
"See https://gitlab.com/nvidia/container-toolkit/container-toolkit/-/blob/${GIT_COMMIT}/CHANGELOG.md for the changelog" && \
|
||||
dch --append "Bump libnvidia-container dependency to ${REVISION}" && \
|
||||
dch --append "Bump libnvidia-container dependency to ${LIBNVIDIA_CONTAINER_TOOLS_VERSION}" && \
|
||||
dch -r "" && \
|
||||
if [ "$REVISION" != "$(dpkg-parsechangelog --show-field=Version)" ]; then exit 1; fi
|
||||
|
||||
CMD export DISTRIB="$(lsb_release -cs)" && \
|
||||
debuild -eDISTRIB -eSECTION -eVERSION="${REVISION}" \
|
||||
debuild -eDISTRIB -eSECTION -eLIBNVIDIA_CONTAINER_TOOLS_VERSION -eVERSION="${REVISION}" \
|
||||
--dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
|
||||
mv /tmp/*.deb /dist
|
||||
|
||||
@@ -85,6 +85,11 @@ docker-all: $(AMD64_TARGETS) $(X86_64_TARGETS) \
|
||||
--%: docker-build-%
|
||||
@
|
||||
|
||||
LIBNVIDIA_CONTAINER_VERSION ?= $(LIB_VERSION)
|
||||
LIBNVIDIA_CONTAINER_TAG ?= $(LIB_TAG)
|
||||
|
||||
LIBNVIDIA_CONTAINER_TOOLS_VERSION := $(LIBNVIDIA_CONTAINER_VERSION)$(if $(LIBNVIDIA_CONTAINER_TAG),~$(LIBNVIDIA_CONTAINER_TAG))-1
|
||||
|
||||
# private ubuntu target
|
||||
--ubuntu%: OS := ubuntu
|
||||
|
||||
@@ -124,6 +129,7 @@ docker-build-%:
|
||||
--build-arg PKG_NAME="$(LIB_NAME)" \
|
||||
--build-arg PKG_VERS="$(PACKAGE_VERSION)" \
|
||||
--build-arg PKG_REV="$(PACKAGE_REVISION)" \
|
||||
--build-arg LIBNVIDIA_CONTAINER_TOOLS_VERSION="$(LIBNVIDIA_CONTAINER_TOOLS_VERSION)" \
|
||||
--build-arg GIT_COMMIT="$(GIT_COMMIT)" \
|
||||
--tag $(BUILDIMAGE) \
|
||||
--file $(DOCKERFILE) .
|
||||
|
||||
@@ -53,6 +53,6 @@ docker run --rm \
|
||||
-v $(pwd):$(pwd) \
|
||||
-w $(pwd) \
|
||||
-u $(id -u):$(id -g) \
|
||||
--entrypoint="sh" \
|
||||
--entrypoint="bash" \
|
||||
${IMAGE} \
|
||||
-c "cp -p -R /artifacts/* ${DIST_DIR}"
|
||||
-c "cp --preserve=timestamps -R /artifacts/* ${DIST_DIR}"
|
||||
|
||||
@@ -31,10 +31,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
FilePathOverrideEnvVar = "NVIDIA_CTK_CONFIG_FILE_PATH"
|
||||
RelativeFilePath = "nvidia-container-runtime/config.toml"
|
||||
|
||||
configRootOverride = "XDG_CONFIG_HOME"
|
||||
configOverride = "XDG_CONFIG_HOME"
|
||||
configFilePath = "nvidia-container-runtime/config.toml"
|
||||
|
||||
nvidiaCTKExecutable = "nvidia-ctk"
|
||||
nvidiaCTKDefaultFilePath = "/usr/bin/nvidia-ctk"
|
||||
@@ -76,15 +74,11 @@ type Config struct {
|
||||
|
||||
// GetConfigFilePath returns the path to the config file for the configured system
|
||||
func GetConfigFilePath() string {
|
||||
if configFilePathOverride := os.Getenv(FilePathOverrideEnvVar); configFilePathOverride != "" {
|
||||
return configFilePathOverride
|
||||
}
|
||||
configRoot := "/etc"
|
||||
if XDGConfigDir := os.Getenv(configRootOverride); len(XDGConfigDir) != 0 {
|
||||
configRoot = XDGConfigDir
|
||||
if XDGConfigDir := os.Getenv(configOverride); len(XDGConfigDir) != 0 {
|
||||
return filepath.Join(XDGConfigDir, configFilePath)
|
||||
}
|
||||
|
||||
return filepath.Join(configRoot, RelativeFilePath)
|
||||
return filepath.Join("/etc", configFilePath)
|
||||
}
|
||||
|
||||
// GetConfig sets up the config struct. Values are read from a toml file
|
||||
|
||||
@@ -27,26 +27,9 @@ import (
|
||||
|
||||
func TestGetConfigWithCustomConfig(t *testing.T) {
|
||||
testDir := t.TempDir()
|
||||
t.Setenv(configRootOverride, testDir)
|
||||
t.Setenv(configOverride, testDir)
|
||||
|
||||
filename := filepath.Join(testDir, RelativeFilePath)
|
||||
|
||||
// By default debug is disabled
|
||||
contents := []byte("[nvidia-container-runtime]\ndebug = \"/nvidia-container-toolkit.log\"")
|
||||
|
||||
require.NoError(t, os.MkdirAll(filepath.Dir(filename), 0766))
|
||||
require.NoError(t, os.WriteFile(filename, contents, 0600))
|
||||
|
||||
cfg, err := GetConfig()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/nvidia-container-toolkit.log", cfg.NVIDIAContainerRuntimeConfig.DebugFilePath)
|
||||
}
|
||||
|
||||
func TestGetConfigWithConfigFilePathOverride(t *testing.T) {
|
||||
testDir := t.TempDir()
|
||||
filename := filepath.Join(testDir, RelativeFilePath)
|
||||
|
||||
t.Setenv(FilePathOverrideEnvVar, filename)
|
||||
filename := filepath.Join(testDir, configFilePath)
|
||||
|
||||
// By default debug is disabled
|
||||
contents := []byte("[nvidia-container-runtime]\ndebug = \"/nvidia-container-toolkit.log\"")
|
||||
|
||||
@@ -19,7 +19,6 @@ package image
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -144,8 +143,8 @@ func (i CUDA) HasDisableRequire() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// devicesFromEnvvars returns the devices requested by the image through environment variables
|
||||
func (i CUDA) devicesFromEnvvars(envVars ...string) []string {
|
||||
// DevicesFromEnvvars returns the devices requested by the image through environment variables
|
||||
func (i CUDA) DevicesFromEnvvars(envVars ...string) VisibleDevices {
|
||||
// We concantenate all the devices from the specified env.
|
||||
var isSet bool
|
||||
var devices []string
|
||||
@@ -166,15 +165,15 @@ func (i CUDA) devicesFromEnvvars(envVars ...string) []string {
|
||||
|
||||
// Environment variable unset with legacy image: default to "all".
|
||||
if !isSet && len(devices) == 0 && i.IsLegacy() {
|
||||
devices = []string{"all"}
|
||||
return NewVisibleDevices("all")
|
||||
}
|
||||
|
||||
// Environment variable unset or empty or "void": return nil
|
||||
if len(devices) == 0 || requested["void"] {
|
||||
devices = []string{"void"}
|
||||
return NewVisibleDevices("void")
|
||||
}
|
||||
|
||||
return NewVisibleDevices(devices...).List()
|
||||
return NewVisibleDevices(devices...)
|
||||
}
|
||||
|
||||
// GetDriverCapabilities returns the requested driver capabilities.
|
||||
@@ -233,22 +232,6 @@ func (i CUDA) OnlyFullyQualifiedCDIDevices() bool {
|
||||
return hasCDIdevice
|
||||
}
|
||||
|
||||
// visibleEnvVars returns the environment variables that are used to determine device visibility.
|
||||
// It returns the preferred environment variables that are set, or NVIDIA_VISIBLE_DEVICES if none are set.
|
||||
func (i CUDA) visibleEnvVars() []string {
|
||||
var envVars []string
|
||||
for _, envVar := range i.preferredVisibleDeviceEnvVars {
|
||||
if !i.HasEnvvar(envVar) {
|
||||
continue
|
||||
}
|
||||
envVars = append(envVars, envVar)
|
||||
}
|
||||
if len(envVars) > 0 {
|
||||
return envVars
|
||||
}
|
||||
return []string{EnvVarNvidiaVisibleDevices}
|
||||
}
|
||||
|
||||
// VisibleDevices returns a list of devices requested in the container image.
|
||||
// If volume mount requests are enabled these are returned if requested,
|
||||
// otherwise device requests through environment variables are considered.
|
||||
@@ -270,7 +253,7 @@ func (i CUDA) VisibleDevices() []string {
|
||||
}
|
||||
|
||||
// Get the Fallback to reading from the environment variable if privileges are correct
|
||||
envVarDeviceRequests := i.visibleDevicesFromEnvVar()
|
||||
envVarDeviceRequests := i.VisibleDevicesFromEnvVar()
|
||||
if len(envVarDeviceRequests) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -282,10 +265,7 @@ func (i CUDA) VisibleDevices() []string {
|
||||
}
|
||||
|
||||
// We log a warning if we are ignoring the environment variable requests.
|
||||
envVars := i.visibleEnvVars()
|
||||
if len(envVars) > 0 {
|
||||
i.logger.Warningf("Ignoring devices requested by environment variable(s) in unprivileged container: %v", envVars)
|
||||
}
|
||||
i.logger.Warningf("Ignoring devices specified in NVIDIA_VISIBLE_DEVICES in unprivileged container")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -301,34 +281,31 @@ func (i CUDA) cdiDeviceRequestsFromAnnotations() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
var annotationKeys []string
|
||||
for key := range i.annotations {
|
||||
var devices []string
|
||||
for key, value := range i.annotations {
|
||||
for _, prefix := range i.annotationsPrefixes {
|
||||
if strings.HasPrefix(key, prefix) {
|
||||
annotationKeys = append(annotationKeys, key)
|
||||
devices = append(devices, strings.Split(value, ",")...)
|
||||
// There is no need to check additional prefixes since we
|
||||
// typically deduplicate devices in any case.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// We sort the annotationKeys for consistent results.
|
||||
slices.Sort(annotationKeys)
|
||||
|
||||
var devices []string
|
||||
for _, key := range annotationKeys {
|
||||
devices = append(devices, strings.Split(i.annotations[key], ",")...)
|
||||
}
|
||||
return devices
|
||||
}
|
||||
|
||||
// visibleDevicesFromEnvVar returns the set of visible devices requested through environment variables.
|
||||
// VisibleDevicesFromEnvVar returns the set of visible devices requested through environment variables.
|
||||
// If any of the preferredVisibleDeviceEnvVars are present in the image, they
|
||||
// are used to determine the visible devices. If this is not the case, the
|
||||
// NVIDIA_VISIBLE_DEVICES environment variable is used.
|
||||
func (i CUDA) visibleDevicesFromEnvVar() []string {
|
||||
envVars := i.visibleEnvVars()
|
||||
return i.devicesFromEnvvars(envVars...)
|
||||
func (i CUDA) VisibleDevicesFromEnvVar() []string {
|
||||
for _, envVar := range i.preferredVisibleDeviceEnvVars {
|
||||
if i.HasEnvvar(envVar) {
|
||||
return i.DevicesFromEnvvars(i.preferredVisibleDeviceEnvVars...).List()
|
||||
}
|
||||
}
|
||||
return i.DevicesFromEnvvars(EnvVarNvidiaVisibleDevices).List()
|
||||
}
|
||||
|
||||
// visibleDevicesFromMounts returns the set of visible devices requested as mounts.
|
||||
@@ -414,7 +391,7 @@ func (m cdiDeviceMountRequest) qualifiedName() (string, error) {
|
||||
|
||||
// ImexChannelsFromEnvVar returns the list of IMEX channels requested for the image.
|
||||
func (i CUDA) ImexChannelsFromEnvVar() []string {
|
||||
imexChannels := i.devicesFromEnvvars(EnvVarNvidiaImexChannels)
|
||||
imexChannels := i.DevicesFromEnvvars(EnvVarNvidiaImexChannels).List()
|
||||
if len(imexChannels) == 1 && imexChannels[0] == "all" {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -429,7 +429,7 @@ func TestGetDevicesFromEnvvar(t *testing.T) {
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
devices := image.visibleDevicesFromEnvVar()
|
||||
devices := image.VisibleDevicesFromEnvVar()
|
||||
require.EqualValues(t, tc.expectedDevices, devices)
|
||||
})
|
||||
}
|
||||
@@ -508,15 +508,13 @@ func TestGetVisibleDevicesFromMounts(t *testing.T) {
|
||||
|
||||
func TestVisibleDevices(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
mountDevices []specs.Mount
|
||||
envvarDevices string
|
||||
privileged bool
|
||||
acceptUnprivileged bool
|
||||
acceptMounts bool
|
||||
preferredVisibleDeviceEnvVars []string
|
||||
env map[string]string
|
||||
expectedDevices []string
|
||||
description string
|
||||
mountDevices []specs.Mount
|
||||
envvarDevices string
|
||||
privileged bool
|
||||
acceptUnprivileged bool
|
||||
acceptMounts bool
|
||||
expectedDevices []string
|
||||
}{
|
||||
{
|
||||
description: "Mount devices, unprivileged, no accept unprivileged",
|
||||
@@ -599,92 +597,20 @@ func TestVisibleDevices(t *testing.T) {
|
||||
acceptMounts: false,
|
||||
expectedDevices: nil,
|
||||
},
|
||||
// New test cases for visibleEnvVars functionality
|
||||
{
|
||||
description: "preferred env var set and present in env, privileged",
|
||||
mountDevices: nil,
|
||||
envvarDevices: "",
|
||||
privileged: true,
|
||||
acceptUnprivileged: false,
|
||||
acceptMounts: true,
|
||||
preferredVisibleDeviceEnvVars: []string{"DOCKER_RESOURCE_GPUS"},
|
||||
env: map[string]string{
|
||||
"DOCKER_RESOURCE_GPUS": "GPU-12345",
|
||||
},
|
||||
expectedDevices: []string{"GPU-12345"},
|
||||
},
|
||||
{
|
||||
description: "preferred env var set and present in env, unprivileged but accepted",
|
||||
mountDevices: nil,
|
||||
envvarDevices: "",
|
||||
privileged: false,
|
||||
acceptUnprivileged: true,
|
||||
acceptMounts: true,
|
||||
preferredVisibleDeviceEnvVars: []string{"DOCKER_RESOURCE_GPUS"},
|
||||
env: map[string]string{
|
||||
"DOCKER_RESOURCE_GPUS": "GPU-12345",
|
||||
},
|
||||
expectedDevices: []string{"GPU-12345"},
|
||||
},
|
||||
{
|
||||
description: "preferred env var set and present in env, unprivileged and not accepted",
|
||||
mountDevices: nil,
|
||||
envvarDevices: "",
|
||||
privileged: false,
|
||||
acceptUnprivileged: false,
|
||||
acceptMounts: true,
|
||||
preferredVisibleDeviceEnvVars: []string{"DOCKER_RESOURCE_GPUS"},
|
||||
env: map[string]string{
|
||||
"DOCKER_RESOURCE_GPUS": "GPU-12345",
|
||||
},
|
||||
expectedDevices: nil,
|
||||
},
|
||||
{
|
||||
description: "multiple preferred env vars, both present, privileged",
|
||||
mountDevices: nil,
|
||||
envvarDevices: "",
|
||||
privileged: true,
|
||||
acceptUnprivileged: false,
|
||||
acceptMounts: true,
|
||||
preferredVisibleDeviceEnvVars: []string{"DOCKER_RESOURCE_GPUS", "DOCKER_RESOURCE_GPUS_ADDITIONAL"},
|
||||
env: map[string]string{
|
||||
"DOCKER_RESOURCE_GPUS": "GPU-12345",
|
||||
"DOCKER_RESOURCE_GPUS_ADDITIONAL": "GPU-67890",
|
||||
},
|
||||
expectedDevices: []string{"GPU-12345", "GPU-67890"},
|
||||
},
|
||||
{
|
||||
description: "preferred env var not present, fallback to NVIDIA_VISIBLE_DEVICES, privileged",
|
||||
mountDevices: nil,
|
||||
envvarDevices: "GPU-12345",
|
||||
privileged: true,
|
||||
acceptUnprivileged: false,
|
||||
acceptMounts: true,
|
||||
preferredVisibleDeviceEnvVars: []string{"DOCKER_RESOURCE_GPUS"},
|
||||
env: map[string]string{
|
||||
EnvVarNvidiaVisibleDevices: "GPU-12345",
|
||||
},
|
||||
expectedDevices: []string{"GPU-12345"},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// Create env map with both NVIDIA_VISIBLE_DEVICES and any additional env vars
|
||||
env := make(map[string]string)
|
||||
if tc.envvarDevices != "" {
|
||||
env[EnvVarNvidiaVisibleDevices] = tc.envvarDevices
|
||||
}
|
||||
for k, v := range tc.env {
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
// Wrap the call to getDevices() in a closure.
|
||||
image, err := New(
|
||||
WithEnvMap(env),
|
||||
WithEnvMap(
|
||||
map[string]string{
|
||||
EnvVarNvidiaVisibleDevices: tc.envvarDevices,
|
||||
},
|
||||
),
|
||||
WithMounts(tc.mountDevices),
|
||||
WithPrivileged(tc.privileged),
|
||||
WithAcceptDeviceListAsVolumeMounts(tc.acceptMounts),
|
||||
WithAcceptEnvvarUnprivileged(tc.acceptUnprivileged),
|
||||
WithPreferredVisibleDevicesEnvVars(tc.preferredVisibleDeviceEnvVars...),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedDevices, image.VisibleDevices())
|
||||
|
||||
@@ -23,7 +23,6 @@ type cache struct {
|
||||
|
||||
sync.Mutex
|
||||
devices []Device
|
||||
envVars []EnvVar
|
||||
hooks []Hook
|
||||
mounts []Mount
|
||||
}
|
||||
@@ -52,20 +51,6 @@ func (c *cache) Devices() ([]Device, error) {
|
||||
return c.devices, nil
|
||||
}
|
||||
|
||||
func (c *cache) EnvVars() ([]EnvVar, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.envVars == nil {
|
||||
envVars, err := c.d.EnvVars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.envVars = envVars
|
||||
}
|
||||
return c.envVars, nil
|
||||
}
|
||||
|
||||
func (c *cache) Hooks() ([]Hook, error) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
@@ -22,12 +22,6 @@ type Device struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// EnvVar represents a discovered environment variable.
|
||||
type EnvVar struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Mount represents a discovered mount.
|
||||
type Mount struct {
|
||||
HostPath string
|
||||
@@ -48,7 +42,6 @@ type Hook struct {
|
||||
//go:generate moq -rm -fmt=goimports -stub -out discover_mock.go . Discover
|
||||
type Discover interface {
|
||||
Devices() ([]Device, error)
|
||||
EnvVars() ([]EnvVar, error)
|
||||
Mounts() ([]Mount, error)
|
||||
Hooks() ([]Hook, error)
|
||||
}
|
||||
|
||||
@@ -20,9 +20,6 @@ var _ Discover = &DiscoverMock{}
|
||||
// DevicesFunc: func() ([]Device, error) {
|
||||
// panic("mock out the Devices method")
|
||||
// },
|
||||
// EnvVarsFunc: func() ([]EnvVar, error) {
|
||||
// panic("mock out the EnvVars method")
|
||||
// },
|
||||
// HooksFunc: func() ([]Hook, error) {
|
||||
// panic("mock out the Hooks method")
|
||||
// },
|
||||
@@ -39,9 +36,6 @@ type DiscoverMock struct {
|
||||
// DevicesFunc mocks the Devices method.
|
||||
DevicesFunc func() ([]Device, error)
|
||||
|
||||
// EnvVarsFunc mocks the EnvVars method.
|
||||
EnvVarsFunc func() ([]EnvVar, error)
|
||||
|
||||
// HooksFunc mocks the Hooks method.
|
||||
HooksFunc func() ([]Hook, error)
|
||||
|
||||
@@ -53,9 +47,6 @@ type DiscoverMock struct {
|
||||
// Devices holds details about calls to the Devices method.
|
||||
Devices []struct {
|
||||
}
|
||||
// EnvVars holds details about calls to the EnvVars method.
|
||||
EnvVars []struct {
|
||||
}
|
||||
// Hooks holds details about calls to the Hooks method.
|
||||
Hooks []struct {
|
||||
}
|
||||
@@ -64,7 +55,6 @@ type DiscoverMock struct {
|
||||
}
|
||||
}
|
||||
lockDevices sync.RWMutex
|
||||
lockEnvVars sync.RWMutex
|
||||
lockHooks sync.RWMutex
|
||||
lockMounts sync.RWMutex
|
||||
}
|
||||
@@ -100,37 +90,6 @@ func (mock *DiscoverMock) DevicesCalls() []struct {
|
||||
return calls
|
||||
}
|
||||
|
||||
// EnvVars calls EnvVarsFunc.
|
||||
func (mock *DiscoverMock) EnvVars() ([]EnvVar, error) {
|
||||
callInfo := struct {
|
||||
}{}
|
||||
mock.lockEnvVars.Lock()
|
||||
mock.calls.EnvVars = append(mock.calls.EnvVars, callInfo)
|
||||
mock.lockEnvVars.Unlock()
|
||||
if mock.EnvVarsFunc == nil {
|
||||
var (
|
||||
envVarsOut []EnvVar
|
||||
errOut error
|
||||
)
|
||||
return envVarsOut, errOut
|
||||
}
|
||||
return mock.EnvVarsFunc()
|
||||
}
|
||||
|
||||
// EnvVarsCalls gets all the calls that were made to EnvVars.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedDiscover.EnvVarsCalls())
|
||||
func (mock *DiscoverMock) EnvVarsCalls() []struct {
|
||||
} {
|
||||
var calls []struct {
|
||||
}
|
||||
mock.lockEnvVars.RLock()
|
||||
calls = mock.calls.EnvVars
|
||||
mock.lockEnvVars.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// Hooks calls HooksFunc.
|
||||
func (mock *DiscoverMock) Hooks() ([]Hook, error) {
|
||||
callInfo := struct {
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
/**
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package discover
|
||||
|
||||
var _ Discover = (*EnvVar)(nil)
|
||||
|
||||
// Devices returns an empty list of devices for a EnvVar discoverer.
|
||||
func (e EnvVar) Devices() ([]Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// EnvVars returns an empty list of envs for a EnvVar discoverer.
|
||||
func (e EnvVar) EnvVars() ([]EnvVar, error) {
|
||||
return []EnvVar{e}, nil
|
||||
}
|
||||
|
||||
// Mounts returns an empty list of mounts for a EnvVar discoverer.
|
||||
func (e EnvVar) Mounts() ([]Mount, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Hooks allows the Hook type to also implement the Discoverer interface.
|
||||
// It returns a single hook
|
||||
func (e EnvVar) Hooks() ([]Hook, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -45,19 +45,6 @@ func (f firstOf) Devices() ([]Device, error) {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
func (f firstOf) EnvVars() ([]EnvVar, error) {
|
||||
var errs error
|
||||
for _, d := range f {
|
||||
envs, err := d.EnvVars()
|
||||
if err != nil {
|
||||
errs = errors.Join(errs, err)
|
||||
continue
|
||||
}
|
||||
return envs, nil
|
||||
}
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
func (f firstOf) Hooks() ([]Hook, error) {
|
||||
var errs error
|
||||
for _, d := range f {
|
||||
|
||||
@@ -29,10 +29,11 @@ type gdsDeviceDiscoverer struct {
|
||||
}
|
||||
|
||||
// NewGDSDiscoverer creates a discoverer for GPUDirect Storage devices and mounts.
|
||||
func NewGDSDiscoverer(logger logger.Interface, driverRoot string, devRoot string) (Discover, error) {
|
||||
func NewGDSDiscoverer(logger logger.Interface, driverRoot string) (Discover, error) {
|
||||
devices := NewCharDeviceDiscoverer(
|
||||
logger,
|
||||
devRoot,
|
||||
// The /dev/nvidia-fs* devices are always created at /
|
||||
"/",
|
||||
[]string{"/dev/nvidia-fs*"},
|
||||
)
|
||||
|
||||
|
||||
@@ -46,9 +46,6 @@ const (
|
||||
// An UpdateLDCacheHook is the hook used to update the ldcache in the
|
||||
// container. This allows injected libraries to be discoverable.
|
||||
UpdateLDCacheHook = HookName("update-ldcache")
|
||||
// A CreateSonameSymlinksHook is the hook used to ensure that soname symlinks
|
||||
// for injected libraries exist in the container.
|
||||
CreateSonameSymlinksHook = HookName("create-soname-symlinks")
|
||||
|
||||
defaultNvidiaCDIHookPath = "/usr/bin/nvidia-cdi-hook"
|
||||
)
|
||||
@@ -60,11 +57,6 @@ func (h *Hook) Devices() ([]Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// EnvVars returns an empty list of envs for a Hook discoverer.
|
||||
func (h *Hook) EnvVars() ([]EnvVar, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Mounts returns an empty list of mounts for a Hook discoverer.
|
||||
func (h *Hook) Mounts() ([]Mount, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -51,24 +51,30 @@ func (d ldconfig) Hooks() ([]Hook, error) {
|
||||
return nil, fmt.Errorf("failed to discover mounts for ldcache update: %v", err)
|
||||
}
|
||||
|
||||
var args []string
|
||||
|
||||
if d.ldconfigPath != "" {
|
||||
args = append(args, "--ldconfig-path", d.ldconfigPath)
|
||||
}
|
||||
|
||||
for _, f := range uniqueFolders(getLibraryPaths(mounts)) {
|
||||
args = append(args, "--folder", f)
|
||||
}
|
||||
|
||||
h := Merge(
|
||||
d.hookCreator.Create(CreateSonameSymlinksHook, args...),
|
||||
d.hookCreator.Create(UpdateLDCacheHook, args...),
|
||||
h := createLDCacheUpdateHook(
|
||||
d.hookCreator,
|
||||
d.ldconfigPath,
|
||||
getLibraryPaths(mounts),
|
||||
)
|
||||
|
||||
return h.Hooks()
|
||||
}
|
||||
|
||||
// createLDCacheUpdateHook locates the NVIDIA Container Toolkit CLI and creates a hook for updating the LD Cache
|
||||
func createLDCacheUpdateHook(hookCreator HookCreator, ldconfig string, libraries []string) *Hook {
|
||||
var args []string
|
||||
|
||||
if ldconfig != "" {
|
||||
args = append(args, "--ldconfig-path", ldconfig)
|
||||
}
|
||||
|
||||
for _, f := range uniqueFolders(libraries) {
|
||||
args = append(args, "--folder", f)
|
||||
}
|
||||
|
||||
return hookCreator.Create(UpdateLDCacheHook, args...)
|
||||
}
|
||||
|
||||
// getLibraryPaths extracts the library dirs from the specified mounts
|
||||
func getLibraryPaths(mounts []Mount) []string {
|
||||
var paths []string
|
||||
|
||||
@@ -39,24 +39,11 @@ func TestLDCacheUpdateHook(t *testing.T) {
|
||||
mounts []Mount
|
||||
mountError error
|
||||
expectedError error
|
||||
expectedHooks []Hook
|
||||
expectedArgs []string
|
||||
}{
|
||||
{
|
||||
description: "empty mounts",
|
||||
expectedHooks: []Hook{
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "create-soname-symlinks"},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "update-ldcache"},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
},
|
||||
description: "empty mounts",
|
||||
expectedArgs: []string{"nvidia-cdi-hook", "update-ldcache"},
|
||||
},
|
||||
{
|
||||
description: "mount error",
|
||||
@@ -79,20 +66,7 @@ func TestLDCacheUpdateHook(t *testing.T) {
|
||||
Path: "/usr/local/lib/libbar.so",
|
||||
},
|
||||
},
|
||||
expectedHooks: []Hook{
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "create-soname-symlinks", "--folder", "/usr/local/lib", "--folder", "/usr/local/libother"},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "update-ldcache", "--folder", "/usr/local/lib", "--folder", "/usr/local/libother"},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
},
|
||||
expectedArgs: []string{"nvidia-cdi-hook", "update-ldcache", "--folder", "/usr/local/lib", "--folder", "/usr/local/libother"},
|
||||
},
|
||||
{
|
||||
description: "host paths are ignored",
|
||||
@@ -102,38 +76,12 @@ func TestLDCacheUpdateHook(t *testing.T) {
|
||||
Path: "/usr/local/lib/libfoo.so",
|
||||
},
|
||||
},
|
||||
expectedHooks: []Hook{
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "create-soname-symlinks", "--folder", "/usr/local/lib"},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "update-ldcache", "--folder", "/usr/local/lib"},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
},
|
||||
expectedArgs: []string{"nvidia-cdi-hook", "update-ldcache", "--folder", "/usr/local/lib"},
|
||||
},
|
||||
{
|
||||
description: "explicit ldconfig path is passed",
|
||||
ldconfigPath: testLdconfigPath,
|
||||
expectedHooks: []Hook{
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "create-soname-symlinks", "--ldconfig-path", testLdconfigPath},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
{
|
||||
Lifecycle: "createContainer",
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: []string{"nvidia-cdi-hook", "update-ldcache", "--ldconfig-path", testLdconfigPath},
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
},
|
||||
},
|
||||
expectedArgs: []string{"nvidia-cdi-hook", "update-ldcache", "--ldconfig-path", testLdconfigPath},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -144,6 +92,13 @@ func TestLDCacheUpdateHook(t *testing.T) {
|
||||
return tc.mounts, tc.mountError
|
||||
},
|
||||
}
|
||||
expectedHook := Hook{
|
||||
Path: testNvidiaCDIHookPath,
|
||||
Args: tc.expectedArgs,
|
||||
Lifecycle: "createContainer",
|
||||
Env: []string{"NVIDIA_CTK_DEBUG=false"},
|
||||
}
|
||||
|
||||
d, err := NewLDCacheUpdateHook(logger, mountMock, hookCreator, tc.ldconfigPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -157,7 +112,9 @@ func TestLDCacheUpdateHook(t *testing.T) {
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, tc.expectedHooks, hooks)
|
||||
require.Len(t, hooks, 1)
|
||||
|
||||
require.EqualValues(t, hooks[0], expectedHook)
|
||||
|
||||
devices, err := d.Devices()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -53,21 +53,6 @@ func (d list) Devices() ([]Device, error) {
|
||||
return allDevices, nil
|
||||
}
|
||||
|
||||
// EnvVars returns all environment variables from the included discoverers.
|
||||
func (d list) EnvVars() ([]EnvVar, error) {
|
||||
var allEnvs []EnvVar
|
||||
|
||||
for i, di := range d {
|
||||
envs, err := di.EnvVars()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error discovering envs for discoverer %v: %w", i, err)
|
||||
}
|
||||
allEnvs = append(allEnvs, envs...)
|
||||
}
|
||||
|
||||
return allEnvs, nil
|
||||
}
|
||||
|
||||
// Mounts returns all mounts from the included discoverers
|
||||
func (d list) Mounts() ([]Mount, error) {
|
||||
var allMounts []Mount
|
||||
|
||||
@@ -27,11 +27,6 @@ func (e None) Devices() ([]Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// EnvVars returns an empty list of devices
|
||||
func (e None) EnvVars() ([]EnvVar, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Mounts returns an empty list of mounts
|
||||
func (e None) Mounts() ([]Mount, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -55,11 +55,6 @@ func FromDiscoverer(d discover.Discover) (*cdi.ContainerEdits, error) {
|
||||
return nil, fmt.Errorf("failed to discover devices: %v", err)
|
||||
}
|
||||
|
||||
envs, err := d.EnvVars()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover environment variables: %w", err)
|
||||
}
|
||||
|
||||
mounts, err := d.Mounts()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover mounts: %v", err)
|
||||
@@ -79,10 +74,6 @@ func FromDiscoverer(d discover.Discover) (*cdi.ContainerEdits, error) {
|
||||
c.Append(edits)
|
||||
}
|
||||
|
||||
for _, e := range envs {
|
||||
c.Append(envvar(e).toEdits())
|
||||
}
|
||||
|
||||
for _, m := range mounts {
|
||||
c.Append(mount(m).toEdits())
|
||||
}
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
/**
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package edits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"tags.cncf.io/container-device-interface/pkg/cdi"
|
||||
"tags.cncf.io/container-device-interface/specs-go"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
)
|
||||
|
||||
type envvar discover.EnvVar
|
||||
|
||||
// toEdits converts a discovered envvar to CDI Container Edits.
|
||||
func (d envvar) toEdits() *cdi.ContainerEdits {
|
||||
e := cdi.ContainerEdits{
|
||||
ContainerEdits: &specs.ContainerEdits{
|
||||
Env: []string{fmt.Sprintf("%s=%s", d.Name, d.Value)},
|
||||
},
|
||||
}
|
||||
return &e
|
||||
}
|
||||
@@ -23,114 +23,34 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/logger"
|
||||
)
|
||||
|
||||
// A RuntimeMode is used to select a specific mode of operation for the NVIDIA Container Runtime.
|
||||
type RuntimeMode string
|
||||
|
||||
const (
|
||||
// In LegacyRuntimeMode the nvidia-container-runtime injects the
|
||||
// nvidia-container-runtime-hook as a prestart hook into the incoming
|
||||
// container config. This hook invokes the nvidia-container-cli to perform
|
||||
// the required modifications to the container.
|
||||
LegacyRuntimeMode = RuntimeMode("legacy")
|
||||
// In CSVRuntimeMode the nvidia-container-runtime processes a set of CSV
|
||||
// files to determine which container modification are required. The
|
||||
// contents of these CSV files are used to generate an in-memory CDI
|
||||
// specification which is used to modify the container config.
|
||||
CSVRuntimeMode = RuntimeMode("csv")
|
||||
// In CDIRuntimeMode the nvidia-container-runtime applies the modifications
|
||||
// to the container config required for the requested CDI devices in the
|
||||
// same way that other CDI clients would.
|
||||
CDIRuntimeMode = RuntimeMode("cdi")
|
||||
// In JitCDIRuntimeMode the nvidia-container-runtime generates in-memory CDI
|
||||
// specifications for requested NVIDIA devices.
|
||||
JitCDIRuntimeMode = RuntimeMode("jit-cdi")
|
||||
)
|
||||
|
||||
type RuntimeModeResolver interface {
|
||||
ResolveRuntimeMode(string) RuntimeMode
|
||||
}
|
||||
|
||||
type modeResolver struct {
|
||||
logger logger.Interface
|
||||
// TODO: This only needs to consider the requested devices.
|
||||
image *image.CUDA
|
||||
propertyExtractor info.PropertyExtractor
|
||||
defaultMode RuntimeMode
|
||||
}
|
||||
|
||||
type Option func(*modeResolver)
|
||||
|
||||
func WithDefaultMode(defaultMode RuntimeMode) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.defaultMode = defaultMode
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger logger.Interface) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func WithImage(image *image.CUDA) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.image = image
|
||||
}
|
||||
}
|
||||
|
||||
func WithPropertyExtractor(propertyExtractor info.PropertyExtractor) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.propertyExtractor = propertyExtractor
|
||||
}
|
||||
}
|
||||
|
||||
func NewRuntimeModeResolver(opts ...Option) RuntimeModeResolver {
|
||||
r := &modeResolver{
|
||||
defaultMode: JitCDIRuntimeMode,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(r)
|
||||
}
|
||||
if r.logger == nil {
|
||||
r.logger = &logger.NullLogger{}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ResolveAutoMode determines the correct mode for the platform if set to "auto"
|
||||
func ResolveAutoMode(logger logger.Interface, mode string, image image.CUDA) (rmode RuntimeMode) {
|
||||
r := modeResolver{
|
||||
logger: logger,
|
||||
image: &image,
|
||||
propertyExtractor: nil,
|
||||
}
|
||||
return r.ResolveRuntimeMode(mode)
|
||||
func ResolveAutoMode(logger logger.Interface, mode string, image image.CUDA) (rmode string) {
|
||||
return resolveMode(logger, mode, image, nil)
|
||||
}
|
||||
|
||||
func (m *modeResolver) ResolveRuntimeMode(mode string) (rmode RuntimeMode) {
|
||||
func resolveMode(logger logger.Interface, mode string, image image.CUDA, propertyExtractor info.PropertyExtractor) (rmode string) {
|
||||
if mode != "auto" {
|
||||
m.logger.Infof("Using requested mode '%s'", mode)
|
||||
return RuntimeMode(mode)
|
||||
logger.Infof("Using requested mode '%s'", mode)
|
||||
return mode
|
||||
}
|
||||
defer func() {
|
||||
m.logger.Infof("Auto-detected mode as '%v'", rmode)
|
||||
logger.Infof("Auto-detected mode as '%v'", rmode)
|
||||
}()
|
||||
|
||||
if m.image.OnlyFullyQualifiedCDIDevices() {
|
||||
return CDIRuntimeMode
|
||||
if image.OnlyFullyQualifiedCDIDevices() {
|
||||
return "cdi"
|
||||
}
|
||||
|
||||
nvinfo := info.New(
|
||||
info.WithLogger(m.logger),
|
||||
info.WithPropertyExtractor(m.propertyExtractor),
|
||||
info.WithLogger(logger),
|
||||
info.WithPropertyExtractor(propertyExtractor),
|
||||
)
|
||||
|
||||
switch nvinfo.ResolvePlatform() {
|
||||
case info.PlatformNVML, info.PlatformWSL:
|
||||
return m.defaultMode
|
||||
return "legacy"
|
||||
case info.PlatformTegra:
|
||||
return CSVRuntimeMode
|
||||
return "csv"
|
||||
}
|
||||
return m.defaultMode
|
||||
return "legacy"
|
||||
}
|
||||
|
||||
@@ -43,16 +43,11 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
mode: "not-auto",
|
||||
expectedMode: "not-auto",
|
||||
},
|
||||
{
|
||||
description: "legacy resolves to legacy",
|
||||
mode: "legacy",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "no info defaults to legacy",
|
||||
mode: "auto",
|
||||
info: map[string]bool{},
|
||||
expectedMode: "jit-cdi",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "non-nvml, non-tegra, nvgpu resolves to csv",
|
||||
@@ -85,14 +80,14 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
expectedMode: "csv",
|
||||
},
|
||||
{
|
||||
description: "nvml, non-tegra, non-nvgpu resolves to jit-cdi",
|
||||
description: "nvml, non-tegra, non-nvgpu resolves to legacy",
|
||||
mode: "auto",
|
||||
info: map[string]bool{
|
||||
"nvml": true,
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "jit-cdi",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "nvml, non-tegra, nvgpu resolves to csv",
|
||||
@@ -105,14 +100,14 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
expectedMode: "csv",
|
||||
},
|
||||
{
|
||||
description: "nvml, tegra, non-nvgpu resolves to jit-cdi",
|
||||
description: "nvml, tegra, non-nvgpu resolves to legacy",
|
||||
mode: "auto",
|
||||
info: map[string]bool{
|
||||
"nvml": true,
|
||||
"tegra": true,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "jit-cdi",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "nvml, tegra, nvgpu resolves to csv",
|
||||
@@ -141,7 +136,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "at least one non-cdi device resolves to jit-cdi",
|
||||
description: "at least one non-cdi device resolves to legacy",
|
||||
mode: "auto",
|
||||
envmap: map[string]string{
|
||||
"NVIDIA_VISIBLE_DEVICES": "nvidia.com/gpu=0,0",
|
||||
@@ -151,7 +146,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "jit-cdi",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "at least one non-cdi device resolves to csv",
|
||||
@@ -175,7 +170,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
expectedMode: "cdi",
|
||||
},
|
||||
{
|
||||
description: "cdi mount and non-CDI devices resolves to jit-cdi",
|
||||
description: "cdi mount and non-CDI devices resolves to legacy",
|
||||
mode: "auto",
|
||||
mounts: []string{
|
||||
"/var/run/nvidia-container-devices/cdi/nvidia.com/gpu/0",
|
||||
@@ -186,7 +181,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "jit-cdi",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "cdi mount and non-CDI envvar resolves to cdi",
|
||||
@@ -204,6 +199,22 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
},
|
||||
expectedMode: "cdi",
|
||||
},
|
||||
{
|
||||
description: "non-cdi mount and CDI envvar resolves to legacy",
|
||||
mode: "auto",
|
||||
envmap: map[string]string{
|
||||
"NVIDIA_VISIBLE_DEVICES": "nvidia.com/gpu=0",
|
||||
},
|
||||
mounts: []string{
|
||||
"/var/run/nvidia-container-devices/0",
|
||||
},
|
||||
info: map[string]bool{
|
||||
"nvml": true,
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -240,12 +251,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
image.WithAcceptDeviceListAsVolumeMounts(true),
|
||||
image.WithAcceptEnvvarUnprivileged(true),
|
||||
)
|
||||
mr := NewRuntimeModeResolver(
|
||||
WithLogger(logger),
|
||||
WithImage(&image),
|
||||
WithPropertyExtractor(properties),
|
||||
)
|
||||
mode := mr.ResolveRuntimeMode(tc.mode)
|
||||
mode := resolveMode(logger, tc.mode, image, properties)
|
||||
require.EqualValues(t, tc.expectedMode, mode)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
/**
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package ldconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
)
|
||||
|
||||
const (
|
||||
// ldsoconfdFilenamePattern specifies the pattern for the filename
|
||||
// in ld.so.conf.d that includes references to the specified directories.
|
||||
// The 00-nvcr prefix is chosen to ensure that these libraries have a
|
||||
// higher precedence than other libraries on the system, but lower than
|
||||
// the 00-cuda-compat that is included in some containers.
|
||||
ldsoconfdFilenamePattern = "00-nvcr-*.conf"
|
||||
)
|
||||
|
||||
type Ldconfig struct {
|
||||
ldconfigPath string
|
||||
inRoot string
|
||||
}
|
||||
|
||||
// NewRunner creates an exec.Cmd that can be used to run ldconfig.
|
||||
func NewRunner(id string, ldconfigPath string, containerRoot string, additionalargs ...string) (*exec.Cmd, error) {
|
||||
args := []string{
|
||||
id,
|
||||
strings.TrimPrefix(config.NormalizeLDConfigPath("@"+ldconfigPath), "@"),
|
||||
containerRoot,
|
||||
}
|
||||
args = append(args, additionalargs...)
|
||||
|
||||
return createReexecCommand(args)
|
||||
}
|
||||
|
||||
// New creates an Ldconfig struct that is used to perform operations on the
|
||||
// ldcache and libraries in a particular root (e.g. a container).
|
||||
func New(ldconfigPath string, inRoot string) (*Ldconfig, error) {
|
||||
l := &Ldconfig{
|
||||
ldconfigPath: ldconfigPath,
|
||||
inRoot: inRoot,
|
||||
}
|
||||
if ldconfigPath == "" {
|
||||
return nil, fmt.Errorf("an ldconfig path must be specified")
|
||||
}
|
||||
if inRoot == "" || inRoot == "/" {
|
||||
return nil, fmt.Errorf("ldconfig must be run in the non-system root")
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// CreateSonameSymlinks uses ldconfig to create the soname symlinks in the
|
||||
// specified directories.
|
||||
func (l *Ldconfig) CreateSonameSymlinks(directories ...string) error {
|
||||
if len(directories) == 0 {
|
||||
return nil
|
||||
}
|
||||
ldconfigPath, err := l.prepareRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
filepath.Base(ldconfigPath),
|
||||
// Explicitly disable updating the LDCache.
|
||||
"-N",
|
||||
// Specify -n to only process the specified directories.
|
||||
"-n",
|
||||
}
|
||||
args = append(args, directories...)
|
||||
|
||||
return SafeExec(ldconfigPath, args, nil)
|
||||
}
|
||||
|
||||
func (l *Ldconfig) UpdateLDCache(directories ...string) error {
|
||||
ldconfigPath, err := l.prepareRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []string{
|
||||
filepath.Base(ldconfigPath),
|
||||
// Explicitly specify using /etc/ld.so.conf since the host's ldconfig may
|
||||
// be configured to use a different config file by default.
|
||||
"-f", "/etc/ld.so.conf",
|
||||
}
|
||||
|
||||
if l.ldcacheExists() {
|
||||
args = append(args, "-C", "/etc/ld.so.cache")
|
||||
} else {
|
||||
args = append(args, "-N")
|
||||
}
|
||||
|
||||
// If the ld.so.conf.d directory exists, we create a config file there
|
||||
// containing the required directories, otherwise we add the specified
|
||||
// directories to the ldconfig command directly.
|
||||
if l.ldsoconfdDirectoryExists() {
|
||||
err := createLdsoconfdFile(ldsoconfdFilenamePattern, directories...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ld.so.conf.d: %w", err)
|
||||
}
|
||||
} else {
|
||||
args = append(args, directories...)
|
||||
}
|
||||
|
||||
return SafeExec(ldconfigPath, args, nil)
|
||||
}
|
||||
|
||||
func (l *Ldconfig) prepareRoot() (string, error) {
|
||||
// To prevent leaking the parent proc filesystem, we create a new proc mount
|
||||
// in the specified root.
|
||||
if err := mountProc(l.inRoot); err != nil {
|
||||
return "", fmt.Errorf("error mounting /proc: %w", err)
|
||||
}
|
||||
|
||||
// We mount the host ldconfig before we pivot root since host paths are not
|
||||
// visible after the pivot root operation.
|
||||
ldconfigPath, err := mountLdConfig(l.ldconfigPath, l.inRoot)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error mounting host ldconfig: %w", err)
|
||||
}
|
||||
|
||||
// We pivot to the container root for the new process, this further limits
|
||||
// access to the host.
|
||||
if err := pivotRoot(l.inRoot); err != nil {
|
||||
return "", fmt.Errorf("error running pivot_root: %w", err)
|
||||
}
|
||||
|
||||
return ldconfigPath, nil
|
||||
}
|
||||
|
||||
func (l *Ldconfig) ldcacheExists() bool {
|
||||
if _, err := os.Stat("/etc/ld.so.cache"); err != nil && os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *Ldconfig) ldsoconfdDirectoryExists() bool {
|
||||
info, err := os.Stat("/etc/ld.so.conf.d")
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
return info.IsDir()
|
||||
}
|
||||
|
||||
// createLdsoconfdFile creates a file at /etc/ld.so.conf.d/.
|
||||
// The file is created at /etc/ld.so.conf.d/{{ .pattern }} using `CreateTemp` and
|
||||
// contains the specified directories on each line.
|
||||
func createLdsoconfdFile(pattern string, dirs ...string) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ldsoconfdDir := "/etc/ld.so.conf.d"
|
||||
if err := os.MkdirAll(ldsoconfdDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create ld.so.conf.d: %w", err)
|
||||
}
|
||||
|
||||
configFile, err := os.CreateTemp(ldsoconfdDir, pattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = configFile.Close()
|
||||
}()
|
||||
|
||||
added := make(map[string]bool)
|
||||
for _, dir := range dirs {
|
||||
if added[dir] {
|
||||
continue
|
||||
}
|
||||
_, err = fmt.Fprintf(configFile, "%s\n", dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update config file: %w", err)
|
||||
}
|
||||
added[dir] = true
|
||||
}
|
||||
|
||||
// The created file needs to be world readable for the cases where the container is run as a non-root user.
|
||||
if err := configFile.Chmod(0644); err != nil {
|
||||
return fmt.Errorf("failed to chmod config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -18,7 +18,6 @@ package modifier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"tags.cncf.io/container-device-interface/pkg/parser"
|
||||
|
||||
@@ -28,27 +27,17 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/modifier/cdi"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/nvcdi"
|
||||
)
|
||||
|
||||
const (
|
||||
automaticDeviceVendor = "runtime.nvidia.com"
|
||||
automaticDeviceClass = "gpu"
|
||||
automaticDeviceKind = automaticDeviceVendor + "/" + automaticDeviceClass
|
||||
automaticDevicePrefix = automaticDeviceKind + "="
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/nvcdi/spec"
|
||||
)
|
||||
|
||||
// NewCDIModifier creates an OCI spec modifier that determines the modifications to make based on the
|
||||
// CDI specifications available on the system. The NVIDIA_VISIBLE_DEVICES environment variable is
|
||||
// used to select the devices to include.
|
||||
func NewCDIModifier(logger logger.Interface, cfg *config.Config, image image.CUDA, isJitCDI bool) (oci.SpecModifier, error) {
|
||||
defaultKind := cfg.NVIDIAContainerRuntimeConfig.Modes.CDI.DefaultKind
|
||||
if isJitCDI {
|
||||
defaultKind = automaticDeviceKind
|
||||
}
|
||||
func NewCDIModifier(logger logger.Interface, cfg *config.Config, image image.CUDA) (oci.SpecModifier, error) {
|
||||
deviceRequestor := newCDIDeviceRequestor(
|
||||
logger,
|
||||
image,
|
||||
defaultKind,
|
||||
cfg.NVIDIAContainerRuntimeConfig.Modes.CDI.DefaultKind,
|
||||
)
|
||||
devices := deviceRequestor.DeviceRequests()
|
||||
if len(devices) == 0 {
|
||||
@@ -118,34 +107,17 @@ func (c *cdiDeviceRequestor) DeviceRequests() []string {
|
||||
func filterAutomaticDevices(devices []string) []string {
|
||||
var automatic []string
|
||||
for _, device := range devices {
|
||||
if !strings.HasPrefix(device, automaticDevicePrefix) {
|
||||
continue
|
||||
vendor, class, _ := parser.ParseDevice(device)
|
||||
if vendor == "runtime.nvidia.com" && class == "gpu" {
|
||||
automatic = append(automatic, device)
|
||||
}
|
||||
automatic = append(automatic, device)
|
||||
}
|
||||
return automatic
|
||||
}
|
||||
|
||||
func newAutomaticCDISpecModifier(logger logger.Interface, cfg *config.Config, devices []string) (oci.SpecModifier, error) {
|
||||
logger.Debugf("Generating in-memory CDI specs for devices %v", devices)
|
||||
|
||||
var identifiers []string
|
||||
for _, device := range devices {
|
||||
identifiers = append(identifiers, strings.TrimPrefix(device, automaticDevicePrefix))
|
||||
}
|
||||
|
||||
cdilib, err := nvcdi.New(
|
||||
nvcdi.WithLogger(logger),
|
||||
nvcdi.WithNVIDIACDIHookPath(cfg.NVIDIACTKConfig.Path),
|
||||
nvcdi.WithDriverRoot(cfg.NVIDIAContainerCLIConfig.Root),
|
||||
nvcdi.WithVendor(automaticDeviceVendor),
|
||||
nvcdi.WithClass(automaticDeviceClass),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct CDI library: %w", err)
|
||||
}
|
||||
|
||||
spec, err := cdilib.GetSpec(identifiers...)
|
||||
spec, err := generateAutomaticCDISpec(logger, cfg, devices)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate CDI spec: %w", err)
|
||||
}
|
||||
@@ -160,6 +132,27 @@ func newAutomaticCDISpecModifier(logger logger.Interface, cfg *config.Config, de
|
||||
return cdiDeviceRequestor, nil
|
||||
}
|
||||
|
||||
func generateAutomaticCDISpec(logger logger.Interface, cfg *config.Config, devices []string) (spec.Interface, error) {
|
||||
cdilib, err := nvcdi.New(
|
||||
nvcdi.WithLogger(logger),
|
||||
nvcdi.WithNVIDIACDIHookPath(cfg.NVIDIACTKConfig.Path),
|
||||
nvcdi.WithDriverRoot(cfg.NVIDIAContainerCLIConfig.Root),
|
||||
nvcdi.WithVendor("runtime.nvidia.com"),
|
||||
nvcdi.WithClass("gpu"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct CDI library: %w", err)
|
||||
}
|
||||
|
||||
var identifiers []string
|
||||
for _, device := range devices {
|
||||
_, _, id := parser.ParseDevice(device)
|
||||
identifiers = append(identifiers, id)
|
||||
}
|
||||
|
||||
return cdilib.GetSpec(identifiers...)
|
||||
}
|
||||
|
||||
type deduplicatedDeviceRequestor struct {
|
||||
deviceRequestor
|
||||
}
|
||||
|
||||
@@ -70,18 +70,6 @@ func TestDeviceRequests(t *testing.T) {
|
||||
},
|
||||
expectedDevices: []string{"nvidia.com/gpu=0", "example.com/class=device"},
|
||||
},
|
||||
{
|
||||
description: "cdi devices from envvar with default kind",
|
||||
input: cdiDeviceRequestor{
|
||||
defaultKind: "runtime.nvidia.com/gpu",
|
||||
},
|
||||
spec: &specs.Spec{
|
||||
Process: &specs.Process{
|
||||
Env: []string{"NVIDIA_VISIBLE_DEVICES=all"},
|
||||
},
|
||||
},
|
||||
expectedDevices: []string{"runtime.nvidia.com/gpu=all"},
|
||||
},
|
||||
{
|
||||
description: "no matching annotations",
|
||||
prefixes: []string{"not-prefix/"},
|
||||
@@ -110,7 +98,7 @@ func TestDeviceRequests(t *testing.T) {
|
||||
"another-prefix/bar": "example.com/device=baz",
|
||||
},
|
||||
},
|
||||
expectedDevices: []string{"example.com/device=baz", "example.com/device=bar"},
|
||||
expectedDevices: []string{"example.com/device=bar", "example.com/device=baz"},
|
||||
},
|
||||
{
|
||||
description: "multiple matching annotations with duplicate devices",
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
// NewCSVModifier creates a modifier that applies modications to an OCI spec if required by the runtime wrapper.
|
||||
// The modifications are defined by CSV MountSpecs.
|
||||
func NewCSVModifier(logger logger.Interface, cfg *config.Config, container image.CUDA) (oci.SpecModifier, error) {
|
||||
if devices := container.VisibleDevices(); len(devices) == 0 {
|
||||
if devices := container.VisibleDevicesFromEnvVar(); len(devices) == 0 {
|
||||
logger.Infof("No modification required; no devices requested")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
//
|
||||
// If not devices are selected, no changes are made.
|
||||
func NewFeatureGatedModifier(logger logger.Interface, cfg *config.Config, image image.CUDA, driver *root.Driver, hookCreator discover.HookCreator) (oci.SpecModifier, error) {
|
||||
if devices := image.VisibleDevices(); len(devices) == 0 {
|
||||
if devices := image.VisibleDevicesFromEnvVar(); len(devices) == 0 {
|
||||
logger.Infof("No modification required; no devices requested")
|
||||
return nil, nil
|
||||
}
|
||||
@@ -48,7 +48,7 @@ func NewFeatureGatedModifier(logger logger.Interface, cfg *config.Config, image
|
||||
devRoot := cfg.NVIDIAContainerCLIConfig.Root
|
||||
|
||||
if image.Getenv("NVIDIA_GDS") == "enabled" {
|
||||
d, err := discover.NewGDSDiscoverer(logger, driverRoot, devRoot)
|
||||
d, err := discover.NewGDSDiscoverer(logger, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct discoverer for GDS devices: %w", err)
|
||||
}
|
||||
|
||||
@@ -29,10 +29,9 @@ import (
|
||||
|
||||
// NewGraphicsModifier constructs a modifier that injects graphics-related modifications into an OCI runtime specification.
|
||||
// The value of the NVIDIA_DRIVER_CAPABILITIES environment variable is checked to determine if this modification should be made.
|
||||
func NewGraphicsModifier(logger logger.Interface, cfg *config.Config, container image.CUDA, driver *root.Driver, hookCreator discover.HookCreator) (oci.SpecModifier, error) {
|
||||
devices, reason := requiresGraphicsModifier(container)
|
||||
if len(devices) == 0 {
|
||||
logger.Infof("No graphics modifier required; %v", reason)
|
||||
func NewGraphicsModifier(logger logger.Interface, cfg *config.Config, containerImage image.CUDA, driver *root.Driver, hookCreator discover.HookCreator) (oci.SpecModifier, error) {
|
||||
if required, reason := requiresGraphicsModifier(containerImage); !required {
|
||||
logger.Infof("No graphics modifier required: %v", reason)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -49,7 +48,7 @@ func NewGraphicsModifier(logger logger.Interface, cfg *config.Config, container
|
||||
devRoot := driver.Root
|
||||
drmNodes, err := discover.NewDRMNodesDiscoverer(
|
||||
logger,
|
||||
image.NewVisibleDevices(devices...),
|
||||
containerImage.DevicesFromEnvvars(image.EnvVarNvidiaVisibleDevices),
|
||||
devRoot,
|
||||
hookCreator,
|
||||
)
|
||||
@@ -65,15 +64,14 @@ func NewGraphicsModifier(logger logger.Interface, cfg *config.Config, container
|
||||
}
|
||||
|
||||
// requiresGraphicsModifier determines whether a graphics modifier is required.
|
||||
func requiresGraphicsModifier(cudaImage image.CUDA) ([]string, string) {
|
||||
devices := cudaImage.VisibleDevices()
|
||||
if len(devices) == 0 {
|
||||
return nil, "no devices requested"
|
||||
func requiresGraphicsModifier(cudaImage image.CUDA) (bool, string) {
|
||||
if devices := cudaImage.VisibleDevicesFromEnvVar(); len(devices) == 0 {
|
||||
return false, "no devices requested"
|
||||
}
|
||||
|
||||
if !cudaImage.GetDriverCapabilities().Any(image.DriverCapabilityGraphics, image.DriverCapabilityDisplay) {
|
||||
return nil, "no required capabilities requested"
|
||||
return false, "no required capabilities requested"
|
||||
}
|
||||
|
||||
return devices, ""
|
||||
return true, ""
|
||||
}
|
||||
|
||||
@@ -26,9 +26,9 @@ import (
|
||||
|
||||
func TestGraphicsModifier(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
envmap map[string]string
|
||||
expectedDevices []string
|
||||
description string
|
||||
envmap map[string]string
|
||||
expectedRequired bool
|
||||
}{
|
||||
{
|
||||
description: "empty image does not create modifier",
|
||||
@@ -52,7 +52,7 @@ func TestGraphicsModifier(t *testing.T) {
|
||||
"NVIDIA_VISIBLE_DEVICES": "all",
|
||||
"NVIDIA_DRIVER_CAPABILITIES": "all",
|
||||
},
|
||||
expectedDevices: []string{"all"},
|
||||
expectedRequired: true,
|
||||
},
|
||||
{
|
||||
description: "devices with graphics capability creates modifier",
|
||||
@@ -60,7 +60,7 @@ func TestGraphicsModifier(t *testing.T) {
|
||||
"NVIDIA_VISIBLE_DEVICES": "all",
|
||||
"NVIDIA_DRIVER_CAPABILITIES": "graphics",
|
||||
},
|
||||
expectedDevices: []string{"all"},
|
||||
expectedRequired: true,
|
||||
},
|
||||
{
|
||||
description: "devices with compute,graphics capability creates modifier",
|
||||
@@ -68,7 +68,7 @@ func TestGraphicsModifier(t *testing.T) {
|
||||
"NVIDIA_VISIBLE_DEVICES": "all",
|
||||
"NVIDIA_DRIVER_CAPABILITIES": "compute,graphics",
|
||||
},
|
||||
expectedDevices: []string{"all"},
|
||||
expectedRequired: true,
|
||||
},
|
||||
{
|
||||
description: "devices with display capability creates modifier",
|
||||
@@ -76,7 +76,7 @@ func TestGraphicsModifier(t *testing.T) {
|
||||
"NVIDIA_VISIBLE_DEVICES": "all",
|
||||
"NVIDIA_DRIVER_CAPABILITIES": "display",
|
||||
},
|
||||
expectedDevices: []string{"all"},
|
||||
expectedRequired: true,
|
||||
},
|
||||
{
|
||||
description: "devices with display,graphics capability creates modifier",
|
||||
@@ -84,7 +84,7 @@ func TestGraphicsModifier(t *testing.T) {
|
||||
"NVIDIA_VISIBLE_DEVICES": "all",
|
||||
"NVIDIA_DRIVER_CAPABILITIES": "display,graphics",
|
||||
},
|
||||
expectedDevices: []string{"all"},
|
||||
expectedRequired: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestGraphicsModifier(t *testing.T) {
|
||||
image.WithEnvMap(tc.envmap),
|
||||
)
|
||||
required, _ := requiresGraphicsModifier(image)
|
||||
require.EqualValues(t, tc.expectedDevices, required)
|
||||
require.EqualValues(t, tc.expectedRequired, required)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,11 +41,6 @@ func (d *byPathHookDiscoverer) Devices() ([]discover.Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// EnvVars returns the empty list for the by-path hook discoverer
|
||||
func (d *byPathHookDiscoverer) EnvVars() ([]discover.EnvVar, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Hooks returns the hooks for the GPU device.
|
||||
// The following hooks are detected:
|
||||
// 1. A hook to create /dev/dri/by-path symlinks
|
||||
|
||||
@@ -106,10 +106,6 @@ func (d *nvsandboxutilsDGPU) Devices() ([]discover.Device, error) {
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (d *nvsandboxutilsDGPU) EnvVars() ([]discover.EnvVar, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Hooks returns a hook to create the by-path symlinks for the discovered devices.
|
||||
func (d *nvsandboxutilsDGPU) Hooks() ([]discover.Hook, error) {
|
||||
if len(d.deviceLinks) == 0 {
|
||||
|
||||
@@ -101,14 +101,14 @@ func newSpecModifier(logger logger.Interface, cfg *config.Config, ociSpec oci.Sp
|
||||
return modifiers, nil
|
||||
}
|
||||
|
||||
func newModeModifier(logger logger.Interface, mode info.RuntimeMode, cfg *config.Config, image image.CUDA) (oci.SpecModifier, error) {
|
||||
func newModeModifier(logger logger.Interface, mode string, cfg *config.Config, image image.CUDA) (oci.SpecModifier, error) {
|
||||
switch mode {
|
||||
case info.LegacyRuntimeMode:
|
||||
case "legacy":
|
||||
return modifier.NewStableRuntimeModifier(logger, cfg.NVIDIAContainerRuntimeHookConfig.Path), nil
|
||||
case info.CSVRuntimeMode:
|
||||
case "csv":
|
||||
return modifier.NewCSVModifier(logger, cfg, image)
|
||||
case info.CDIRuntimeMode, info.JitCDIRuntimeMode:
|
||||
return modifier.NewCDIModifier(logger, cfg, image, mode == info.JitCDIRuntimeMode)
|
||||
case "cdi":
|
||||
return modifier.NewCDIModifier(logger, cfg, image)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid runtime mode: %v", cfg.NVIDIAContainerRuntimeConfig.Mode)
|
||||
@@ -119,7 +119,7 @@ func newModeModifier(logger logger.Interface, mode info.RuntimeMode, cfg *config
|
||||
// The image is also used to determine the runtime mode to apply.
|
||||
// If a non-CDI mode is detected we ensure that the image does not process
|
||||
// annotation devices.
|
||||
func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpec oci.Spec) (info.RuntimeMode, *image.CUDA, error) {
|
||||
func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpec oci.Spec) (string, *image.CUDA, error) {
|
||||
rawSpec, err := ociSpec.Load()
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed to load OCI spec: %v", err)
|
||||
@@ -136,13 +136,9 @@ func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpe
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
modeResolver := info.NewRuntimeModeResolver(
|
||||
info.WithLogger(logger),
|
||||
info.WithImage(&image),
|
||||
)
|
||||
mode := modeResolver.ResolveRuntimeMode(cfg.NVIDIAContainerRuntimeConfig.Mode)
|
||||
mode := info.ResolveAutoMode(logger, cfg.NVIDIAContainerRuntimeConfig.Mode, image)
|
||||
// We update the mode here so that we can continue passing just the config to other functions.
|
||||
cfg.NVIDIAContainerRuntimeConfig.Mode = string(mode)
|
||||
cfg.NVIDIAContainerRuntimeConfig.Mode = mode
|
||||
|
||||
if mode == "cdi" || len(cfg.NVIDIAContainerRuntimeConfig.Modes.CDI.AnnotationPrefixes) == 0 {
|
||||
return mode, &image, nil
|
||||
@@ -158,12 +154,12 @@ func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpe
|
||||
}
|
||||
|
||||
// supportedModifierTypes returns the modifiers supported for a specific runtime mode.
|
||||
func supportedModifierTypes(mode info.RuntimeMode) []string {
|
||||
func supportedModifierTypes(mode string) []string {
|
||||
switch mode {
|
||||
case info.CDIRuntimeMode, info.JitCDIRuntimeMode:
|
||||
case "cdi":
|
||||
// For CDI mode we make no additional modifications.
|
||||
return []string{"nvidia-hook-remover", "mode"}
|
||||
case info.CSVRuntimeMode:
|
||||
case "csv":
|
||||
// For CSV mode we support mode and feature-gated modification.
|
||||
return []string{"nvidia-hook-remover", "feature-gated", "mode"}
|
||||
default:
|
||||
|
||||
@@ -10,7 +10,7 @@ Build-Depends: debhelper (>= 9)
|
||||
|
||||
Package: nvidia-container-toolkit
|
||||
Architecture: any
|
||||
Depends: ${misc:Depends}, nvidia-container-toolkit-base (= @VERSION@), libnvidia-container-tools (= @VERSION@), libnvidia-container-tools (<< 2.0.0)
|
||||
Depends: ${misc:Depends}, nvidia-container-toolkit-base (= @VERSION@), libnvidia-container-tools (>= @LIBNVIDIA_CONTAINER_TOOLS_VERSION@), libnvidia-container-tools (<< 2.0.0)
|
||||
Breaks: nvidia-container-runtime (<= 3.5.0-1), nvidia-container-runtime-hook
|
||||
Replaces: nvidia-container-runtime (<= 3.5.0-1), nvidia-container-runtime-hook
|
||||
Description: NVIDIA Container toolkit
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
set -e
|
||||
|
||||
sed -i "s;@SECTION@;${SECTION:+$SECTION/};g" debian/control
|
||||
sed -i "s;@LIBNVIDIA_CONTAINER_TOOLS_VERSION@;${LIBNVIDIA_CONTAINER_TOOLS_VERSION:+$LIBNVIDIA_CONTAINER_TOOLS_VERSION};g" debian/control
|
||||
sed -i "s;@VERSION@;${VERSION:+$VERSION};g" debian/control
|
||||
|
||||
if [ -n "$DISTRIB" ]; then
|
||||
|
||||
@@ -23,7 +23,7 @@ Source8: nvidia-cdi-refresh.path
|
||||
Obsoletes: nvidia-container-runtime <= 3.5.0-1, nvidia-container-runtime-hook <= 1.4.0-2
|
||||
Provides: nvidia-container-runtime
|
||||
Provides: nvidia-container-runtime-hook
|
||||
Requires: libnvidia-container-tools == %{version}-%{release}, libnvidia-container-tools < 2.0.0
|
||||
Requires: libnvidia-container-tools >= %{libnvidia_container_tools_version}, libnvidia-container-tools < 2.0.0
|
||||
Requires: nvidia-container-toolkit-base == %{version}-%{release}
|
||||
|
||||
%description
|
||||
@@ -86,7 +86,7 @@ fi
|
||||
# As of 1.10.0-1 we generate the release information automatically
|
||||
* %{release_date} NVIDIA CORPORATION <cudatools@nvidia.com> %{version}-%{release}
|
||||
- See https://gitlab.com/nvidia/container-toolkit/container-toolkit/-/blob/%{git_commit}/CHANGELOG.md
|
||||
- Bump libnvidia-container dependency to libnvidia-container-tools == %{version}-%{release}
|
||||
- Bump libnvidia-container dependency to libnvidia-container-tools >= %{libnvidia_container_tools_version}
|
||||
|
||||
# The BASE package consists of the NVIDIA Container Runtime and the NVIDIA Container Toolkit CLI.
|
||||
# This allows the package to be installed on systems where no NVIDIA Container CLI is available.
|
||||
|
||||
@@ -56,9 +56,6 @@ const (
|
||||
EnableCudaCompatHook = discover.EnableCudaCompatHook
|
||||
// An UpdateLDCacheHook is used to update the ldcache in the container.
|
||||
UpdateLDCacheHook = discover.UpdateLDCacheHook
|
||||
// A CreateSonameSymlinksHook is the hook used to ensure that soname symlinks
|
||||
// for injected libraries exist in the container.
|
||||
CreateSonameSymlinksHook = discover.CreateSonameSymlinksHook
|
||||
|
||||
// Deprecated: Use CreateSymlinksHook instead.
|
||||
HookCreateSymlinks = CreateSymlinksHook
|
||||
|
||||
@@ -82,7 +82,7 @@ func (l *nvcdilib) newDriverVersionDiscoverer(version string) (discover.Discover
|
||||
|
||||
// NewDriverLibraryDiscoverer creates a discoverer for the libraries associated with the specified driver version.
|
||||
func (l *nvcdilib) NewDriverLibraryDiscoverer(version string) (discover.Discover, error) {
|
||||
libraryPaths, libCudaDirectoryPath, err := getVersionLibs(l.logger, l.driver, version)
|
||||
libraryPaths, err := getVersionLibs(l.logger, l.driver, version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get libraries for driver version: %v", err)
|
||||
}
|
||||
@@ -116,12 +116,6 @@ func (l *nvcdilib) NewDriverLibraryDiscoverer(version string) (discover.Discover
|
||||
disableDeviceNodeModification := l.hookCreator.Create(DisableDeviceNodeModificationHook)
|
||||
discoverers = append(discoverers, disableDeviceNodeModification)
|
||||
|
||||
environmentVariable := &discover.EnvVar{
|
||||
Name: "NVIDIA_CTK_LIBCUDA_DIR",
|
||||
Value: libCudaDirectoryPath,
|
||||
}
|
||||
discoverers = append(discoverers, environmentVariable)
|
||||
|
||||
d := discover.Merge(discoverers...)
|
||||
|
||||
return d, nil
|
||||
@@ -209,41 +203,39 @@ func NewDriverBinariesDiscoverer(logger logger.Interface, driverRoot string) dis
|
||||
// getVersionLibs checks the LDCache for libraries ending in the specified driver version.
|
||||
// Although the ldcache at the specified driverRoot is queried, the paths are returned relative to this driverRoot.
|
||||
// This allows the standard mount location logic to be used for resolving the mounts.
|
||||
func getVersionLibs(logger logger.Interface, driver *root.Driver, version string) ([]string, string, error) {
|
||||
func getVersionLibs(logger logger.Interface, driver *root.Driver, version string) ([]string, error) {
|
||||
logger.Infof("Using driver version %v", version)
|
||||
|
||||
libCudaPaths, err := cuda.New(
|
||||
driver.Libraries(),
|
||||
).Locate("." + version)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to locate libcuda.so.%v: %v", version, err)
|
||||
return nil, fmt.Errorf("failed to locate libcuda.so.%v: %v", version, err)
|
||||
}
|
||||
libCudaDirectoryPath := filepath.Dir(libCudaPaths[0])
|
||||
libRoot := filepath.Dir(libCudaPaths[0])
|
||||
|
||||
libraries := lookup.NewFileLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithSearchPaths(
|
||||
libCudaDirectoryPath,
|
||||
filepath.Join(libCudaDirectoryPath, "vdpau"),
|
||||
libRoot,
|
||||
filepath.Join(libRoot, "vdpau"),
|
||||
),
|
||||
lookup.WithOptional(true),
|
||||
)
|
||||
|
||||
libs, err := libraries.Locate("*.so." + version)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to locate libraries for driver version %v: %v", version, err)
|
||||
return nil, fmt.Errorf("failed to locate libraries for driver version %v: %v", version, err)
|
||||
}
|
||||
|
||||
if driver.Root == "/" || driver.Root == "" {
|
||||
return libs, libCudaDirectoryPath, nil
|
||||
return libs, nil
|
||||
}
|
||||
|
||||
libCudaDirectoryPath = driver.RelativeToRoot(libCudaDirectoryPath)
|
||||
|
||||
var relative []string
|
||||
for _, l := range libs {
|
||||
relative = append(relative, strings.TrimPrefix(l, driver.Root))
|
||||
}
|
||||
|
||||
return relative, libCudaDirectoryPath, nil
|
||||
return relative, nil
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ var _ Interface = (*gdslib)(nil)
|
||||
|
||||
// GetAllDeviceSpecs returns the device specs for all available devices.
|
||||
func (l *gdslib) GetAllDeviceSpecs() ([]specs.Device, error) {
|
||||
discoverer, err := discover.NewGDSDiscoverer(l.logger, l.driverRoot, l.devRoot)
|
||||
discoverer, err := discover.NewGDSDiscoverer(l.logger, l.driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GPUDirect Storage discoverer: %v", err)
|
||||
}
|
||||
|
||||
@@ -55,11 +55,6 @@ func (d *deviceFolderPermissions) Devices() ([]discover.Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// EnvVars are empty for this discoverer
|
||||
func (d *deviceFolderPermissions) EnvVars() ([]discover.EnvVar, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Hooks returns a set of hooks that sets the file mode to 755 of parent folders for nested device nodes.
|
||||
func (d *deviceFolderPermissions) Hooks() ([]discover.Hook, error) {
|
||||
folders, err := d.getDeviceSubfolders()
|
||||
|
||||
@@ -70,9 +70,9 @@ function copy-file() {
|
||||
-v "$(pwd):$(pwd)" \
|
||||
-w "$(pwd)" \
|
||||
-u "$(id -u):$(id -g)" \
|
||||
--entrypoint="sh" \
|
||||
--entrypoint="bash" \
|
||||
"${image}" \
|
||||
-c "cp -p ${path_in_image} ${path_on_host}"
|
||||
-c "cp ${path_in_image} ${path_on_host}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -96,9 +96,9 @@ function copy_file() {
|
||||
-v "$(pwd):$(pwd)" \
|
||||
-w "$(pwd)" \
|
||||
-u "$(id -u):$(id -g)" \
|
||||
--entrypoint="sh" \
|
||||
--entrypoint="bash" \
|
||||
"${image}" \
|
||||
-c "cp -p ${path_in_image} ${path_on_host}"
|
||||
-c "cp ${path_in_image} ${path_on_host}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -28,4 +28,3 @@ spec:
|
||||
install: false
|
||||
nvidiaDriver:
|
||||
install: true
|
||||
branch: 550
|
||||
|
||||
@@ -173,10 +173,10 @@ var _ = Describe("docker", Ordered, ContinueOnFailure, func() {
|
||||
|
||||
When("Testing CUDA Forward compatibility", Ordered, func() {
|
||||
BeforeAll(func(ctx context.Context) {
|
||||
_, _, err := runner.Run("docker pull nvcr.io/nvidia/cuda:12.9.0-base-ubi8")
|
||||
_, _, err := runner.Run("docker pull nvcr.io/nvidia/cuda:12.8.0-base-ubi8")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compatOutput, _, err := runner.Run("docker run --rm -i -e NVIDIA_VISIBLE_DEVICES=void nvcr.io/nvidia/cuda:12.9.0-base-ubi8 bash -c \"ls /usr/local/cuda/compat/libcuda.*.*\"")
|
||||
compatOutput, _, err := runner.Run("docker run --rm -i -e NVIDIA_VISIBLE_DEVICES=void nvcr.io/nvidia/cuda:12.8.0-base-ubi8 bash -c \"ls /usr/local/cuda/compat/libcuda.*.*\"")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(compatOutput).ToNot(BeEmpty())
|
||||
|
||||
@@ -199,21 +199,21 @@ var _ = Describe("docker", Ordered, ContinueOnFailure, func() {
|
||||
})
|
||||
|
||||
It("should work with the nvidia runtime in legacy mode", func(ctx context.Context) {
|
||||
ldconfigOut, _, err := runner.Run("docker run --rm -i -e NVIDIA_DISABLE_REQUIRE=true --runtime=nvidia --gpus all nvcr.io/nvidia/cuda:12.9.0-base-ubi8 bash -c \"ldconfig -p | grep libcuda.so.1\"")
|
||||
ldconfigOut, _, err := runner.Run("docker run --rm -i -e NVIDIA_DISABLE_REQUIRE=true --runtime=nvidia --gpus all nvcr.io/nvidia/cuda:12.8.0-base-ubi8 bash -c \"ldconfig -p | grep libcuda.so.1\"")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ldconfigOut).To(ContainSubstring("/usr/local/cuda-12.9/compat/"))
|
||||
Expect(ldconfigOut).To(ContainSubstring("/usr/local/cuda/compat"))
|
||||
})
|
||||
|
||||
It("should work with the nvidia runtime in CDI mode", func(ctx context.Context) {
|
||||
ldconfigOut, _, err := runner.Run("docker run --rm -i -e NVIDIA_DISABLE_REQUIRE=true --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=runtime.nvidia.com/gpu=all nvcr.io/nvidia/cuda:12.9.0-base-ubi8 bash -c \"ldconfig -p | grep libcuda.so.1\"")
|
||||
ldconfigOut, _, err := runner.Run("docker run --rm -i -e NVIDIA_DISABLE_REQUIRE=true --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=runtime.nvidia.com/gpu=all nvcr.io/nvidia/cuda:12.8.0-base-ubi8 bash -c \"ldconfig -p | grep libcuda.so.1\"")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ldconfigOut).To(ContainSubstring("/usr/local/cuda-12.9/compat/"))
|
||||
Expect(ldconfigOut).To(ContainSubstring("/usr/local/cuda/compat"))
|
||||
})
|
||||
|
||||
It("should work with nvidia-container-runtime-hook", func(ctx context.Context) {
|
||||
ldconfigOut, _, err := runner.Run("docker run --rm -i -e NVIDIA_DISABLE_REQUIRE=true --runtime=runc --gpus all nvcr.io/nvidia/cuda:12.9.0-base-ubi8 bash -c \"ldconfig -p | grep libcuda.so.1\"")
|
||||
It("should NOT work with nvidia-container-runtime-hook", func(ctx context.Context) {
|
||||
ldconfigOut, _, err := runner.Run("docker run --rm -i -e NVIDIA_DISABLE_REQUIRE=true --runtime=runc --gpus all nvcr.io/nvidia/cuda:12.8.0-base-ubi8 bash -c \"ldconfig -p | grep libcuda.so.1\"")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ldconfigOut).To(ContainSubstring("/usr/local/cuda-12.9/compat/"))
|
||||
Expect(ldconfigOut).To(ContainSubstring("/usr/lib64"))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -235,26 +235,4 @@ var _ = Describe("docker", Ordered, ContinueOnFailure, func() {
|
||||
Expect(output).To(Equal("ModifyDeviceFiles: 0\n"))
|
||||
})
|
||||
})
|
||||
|
||||
When("A container is run using CDI", Ordered, func() {
|
||||
BeforeAll(func(ctx context.Context) {
|
||||
_, _, err := runner.Run("docker pull ubuntu")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should include libcuda.so in the ldcache", func(ctx context.Context) {
|
||||
ldcacheOutput, _, err := runner.Run("docker run --rm -i --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=runtime.nvidia.com/gpu=all ubuntu bash -c \"ldconfig -p | grep 'libcuda.so'\"")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ldcacheOutput).ToNot(BeEmpty())
|
||||
|
||||
ldcacheLines := strings.Split(ldcacheOutput, "\n")
|
||||
var libs []string
|
||||
for _, line := range ldcacheLines {
|
||||
parts := strings.SplitN(line, " (", 2)
|
||||
libs = append(libs, strings.TrimSpace(parts[0]))
|
||||
}
|
||||
|
||||
Expect(libs).To(ContainElements([]string{"libcuda.so", "libcuda.so.1"}))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
2
third_party/libnvidia-container
vendored
2
third_party/libnvidia-container
vendored
Submodule third_party/libnvidia-container updated: 9d6a23b996...710a0f1304
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
LIB_NAME := nvidia-container-toolkit
|
||||
LIB_VERSION := 1.18.0
|
||||
LIB_TAG := rc.1
|
||||
LIB_VERSION := 1.17.4
|
||||
LIB_TAG :=
|
||||
|
||||
# The package version is the combination of the library version and tag.
|
||||
# If the tag is specified the two components are joined with a tilde (~).
|
||||
|
||||
Reference in New Issue
Block a user