Merge branch 'build-from-repo' into 'master'

Add logic to build all package from nvidia-container-toolkit repo

See merge request nvidia/container-toolkit/container-toolkit!49
This commit is contained in:
Evan Lezar 2021-10-07 14:30:13 +00:00
commit 845701447c
25 changed files with 639 additions and 14 deletions

9
.gitmodules vendored Normal file
View File

@ -0,0 +1,9 @@
[submodule "third_party/libnvidia-container"]
path = third_party/libnvidia-container
url = https://gitlab.com/nvidia/container-toolkit/libnvidia-container.git
[submodule "third_party/nvidia-container-runtime"]
path = third_party/nvidia-container-runtime
url = https://gitlab.com/nvidia/container-toolkit/container-runtime.git
[submodule "third_party/nvidia-docker"]
path = third_party/nvidia-docker
url = https://gitlab.com/nvidia/container-toolkit/nvidia-docker.git

94
DEVELOPMENT.md Normal file
View File

@ -0,0 +1,94 @@
# NVIDIA Container Toolkit Release Tooling
This repository allows for the components of the NVIDIA container stack to be
built and released as the NVIDIA Container Toolkit from a single repository. The components:
* `libnvidia-container`
* `nvidia-container-runtime`
* `nvidia-docker`
are included as submodules in the `third_party` folder.
The `nvidia-container-toolkit` resides in this repo directly.
## Building
In oder to build the packages, the following command is executed
```sh
./scripts/build-all-components.sh TARGET
```
where `TARGET` is a make target that is valid for each of the sub-components.
These include:
* `ubuntu18.04-amd64`
* `docker-all`
with the later generating for all supported distribution and platform combinations.
The packages are generated in the `dist` folder.
## Testing local changes
In oder to use the same build logic to be used to generate packages with local changes,
the location of the individual components can be overridded using the: `LIBNVIDIA_CONTAINER_ROOT`,
`NVIDIA_CONTAINER_TOOLKIT_ROOT`, `NVIDIA_CONTAINER_RUNTIME_ROOT`, and `NVIDIA_DOCKER_ROOT`
environment variables.
## Testing packages locally
### Ubuntu
Launch a docker container:
```
docker run --rm -it \
-v $(pwd):/work \
-v $(pwd)/dist/ubuntu18.04/amd64:/local-repository \
-w /work \
ubuntu:18.04
```
```
apt-get update && apt-get install -y apt-utils
```
```
echo "deb [trusted=yes] file:/local-repository/ ./" > /etc/apt/sources.list.d/local.list
```
```
cd /local-repository && apt-ftparchive packages . > Packages
```
```
apt-get update
```
### Centos
```
docker run --rm -it \
-v $(pwd):/work \
-v $(pwd)/dist/centos8/x86_64:/local-repository \
-w /work \
centos:8
```
```
yum install -y createrepo
```
```
createrepo /local-repository
```
```
cat >/etc/yum.repos.d/local.repo <<EOL
[local]
name=NVIDIA Container Toolkit Local Packages
baseurl=file:///local-repository
enabled=1
gpgcheck=0
protect=1
EOL
```

View File

@ -17,7 +17,7 @@ MKDIR ?= mkdir
DIST_DIR ?= $(CURDIR)/dist
LIB_NAME := nvidia-container-toolkit
LIB_VERSION := 1.5.2
LIB_VERSION := 1.6.0
LIB_TAG ?= rc.1
GOLANG_VERSION := 1.16.3
@ -43,7 +43,7 @@ CMD_TARGETS := $(patsubst %,cmd-%, $(CMDS))
$(info CMD_TARGETS=$(CMD_TARGETS))
CHECK_TARGETS := assert-fmt vet lint ineffassign misspell
MAKE_TARGETS := binaries build all check fmt lint-internal test examples cmds coverage generate $(CHECK_TARGETS)
MAKE_TARGETS := binaries build check fmt lint-internal test examples cmds coverage generate $(CHECK_TARGETS)
TARGETS := $(MAKE_TARGETS) $(EXAMPLE_TARGETS) $(CMD_TARGETS)

View File

@ -59,6 +59,7 @@ CMD arch=$(uname -m) && \
rpmbuild --clean --target=$arch -bb \
-D "_topdir $PWD" \
-D "version $VERSION" \
-D "libnvidia_container_version ${VERSION}-${RELEASE}" \
-D "release $RELEASE" \
SPECS/nvidia-container-toolkit.spec && \
mv RPMS/$arch/*.rpm /dist

View File

@ -57,6 +57,7 @@ CMD arch=$(uname -m) && \
rpmbuild --clean --target=$arch -bb \
-D "_topdir $PWD" \
-D "version $VERSION" \
-D "libnvidia_container_version ${VERSION}-${RELEASE}" \
-D "release $RELEASE" \
SPECS/nvidia-container-toolkit.spec && \
mv RPMS/$arch/*.rpm /dist

View File

@ -64,5 +64,6 @@ RUN sed -i "s;@VERSION@;${REVISION};" debian/changelog && \
if [ "$REVISION" != "$(dpkg-parsechangelog --show-field=Version)" ]; then exit 1; fi
CMD export DISTRIB="$(lsb_release -cs)" && \
debuild -eDISTRIB -eSECTION --dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
debuild -eDISTRIB -eSECTION -eLIBNVIDIA_CONTAINER_VERSION="${REVISION}" \
--dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
mv /tmp/nvidia-container-toolkit_*.deb /dist

View File

@ -56,6 +56,7 @@ CMD arch=$(uname -m) && \
rpmbuild --clean --target=$arch -bb \
-D "_topdir $PWD" \
-D "version $VERSION" \
-D "libnvidia_container_version ${VERSION}-${RELEASE}" \
-D "release $RELEASE" \
SPECS/nvidia-container-toolkit.spec && \
mv RPMS/$arch/*.rpm /dist

View File

@ -57,5 +57,6 @@ RUN sed -i "s;@VERSION@;${REVISION};" debian/changelog && \
if [ "$REVISION" != "$(dpkg-parsechangelog --show-field=Version)" ]; then exit 1; fi
CMD export DISTRIB="$(lsb_release -cs)" && \
debuild -eDISTRIB -eSECTION --dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
debuild -eDISTRIB -eSECTION -eLIBNVIDIA_CONTAINER_VERSION="${REVISION}" \
--dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
mv /tmp/*.deb /dist

View File

@ -97,11 +97,11 @@ docker-all: $(AMD64_TARGETS) $(X86_64_TARGETS) \
# private centos target
--centos%: OS := centos
--centos%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),2)
--centos%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
# private amazonlinux target
--amazonlinux%: OS := amazonlinux
--amazonlinux%: PKG_REV = $(if $(LIB_TAG),0.1.$(LIB_TAG).amzn$(VERSION),2.amzn$(VERSION))
--amazonlinux%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
# private opensuse-leap target
--opensuse-leap%: OS = opensuse-leap
@ -110,7 +110,7 @@ docker-all: $(AMD64_TARGETS) $(X86_64_TARGETS) \
# private rhel target (actually built on centos)
--rhel%: OS := centos
--rhel%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),2)
--rhel%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
--rhel%: VERSION = $(patsubst rhel%-$(ARCH),%,$(TARGET_PLATFORM))
--rhel%: ARTIFACTS_DIR = $(DIST_DIR)/rhel$(VERSION)/$(ARCH)

View File

@ -1,4 +1,4 @@
nvidia-container-toolkit (1.5.2~rc.1-1) experimental; urgency=medium
nvidia-container-toolkit (1.6.0~rc.1-1) experimental; urgency=medium
* Include nvidia-container-runtime into nvidia-container-toolkit package

View File

@ -10,8 +10,8 @@ Build-Depends: debhelper (>= 9)
Package: nvidia-container-toolkit
Architecture: any
Depends: ${misc:Depends}, libnvidia-container-tools (>= 1.4.0), libnvidia-container-tools (<< 2.0.0), libseccomp2
Breaks: nvidia-container-runtime (<= 3.5.0), nvidia-container-runtime-hook
Replaces: nvidia-container-runtime (<= 3.5.0), nvidia-container-runtime-hook
Depends: ${misc:Depends}, libnvidia-container-tools (>= @LIBNVIDIA_CONTAINER_VERSION@), libnvidia-container-tools (<< 2.0.0), libseccomp2
Breaks: nvidia-container-runtime (<= 3.5.0-1), nvidia-container-runtime-hook
Replaces: nvidia-container-runtime (<= 3.5.0-1), nvidia-container-runtime-hook
Description: NVIDIA container runtime hook
Provides a OCI hook to enable GPU support in containers.

View File

@ -3,6 +3,7 @@
set -e
sed -i "s;@SECTION@;${SECTION:+$SECTION/};g" debian/control
sed -i "s;@LIBNVIDIA_CONTAINER_VERSION@;${LIBNVIDIA_CONTAINER_VERSION:+$LIBNVIDIA_CONTAINER_VERSION};g" debian/control
if [ -n "$DISTRIB" ]; then
sed -i "s;UNRELEASED;$DISTRIB;" debian/changelog

View File

@ -17,10 +17,10 @@ Source3: oci-nvidia-hook
Source4: oci-nvidia-hook.json
Source5: LICENSE
Obsoletes: nvidia-container-runtime <= 3.5.0, nvidia-container-runtime-hook
Obsoletes: nvidia-container-runtime <= 3.5.0-1, nvidia-container-runtime-hook
Provides: nvidia-container-runtime
Provides: nvidia-container-runtime-hook
Requires: libnvidia-container-tools >= 1.4.0, libnvidia-container-tools < 2.0.0
Requires: libnvidia-container-tools >= %{libnvidia_container_version}, libnvidia-container-tools < 2.0.0
%if 0%{?suse_version}
Requires: libseccomp2
@ -64,7 +64,7 @@ rm -f %{_bindir}/nvidia-container-runtime-hook
/usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
%changelog
* Mon Sep 06 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.5.2-0.1.rc.1
* Mon Sep 06 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.6.0-0.1.rc.1
- Include nvidia-container-runtime into nvidia-container-toolkit package

67
scripts/build-all-components.sh Executable file
View File

@ -0,0 +1,67 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
function assert_usage() {
echo "Missing argument $1"
echo "$(basename ${BASH_SOURCE[0]}) TARGET"
exit 1
}
set -e -x
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
if [[ $# -ne 1 ]]; then
assert_usage "TARGET"
fi
TARGET=$1
: ${DIST_DIR:=${PROJECT_ROOT}/dist}
export DIST_DIR
echo "Building ${TARGET} for all packages to ${DIST_DIR}"
: ${LIBNVIDIA_CONTAINER_ROOT:=${PROJECT_ROOT}/third_party/libnvidia-container}
: ${NVIDIA_CONTAINER_TOOLKIT_ROOT:=${PROJECT_ROOT}}
: ${NVIDIA_CONTAINER_RUNTIME_ROOT:=${PROJECT_ROOT}/third_party/nvidia-container-runtime}
: ${NVIDIA_DOCKER_ROOT:=${PROJECT_ROOT}/third_party/nvidia-docker}
${SCRIPTS_DIR}/get-component-versions.sh
# Build libnvidia-container
make -C ${LIBNVIDIA_CONTAINER_ROOT} -f mk/docker.mk ${TARGET}
# Build nvidia-container-toolkit
make -C ${NVIDIA_CONTAINER_TOOLKIT_ROOT} ${TARGET}
# We set the TOOLKIT_VERSION for the nvidia-container-runtime and nvidia-docker targets
# TODO: This is not yet enabled in the makefiles below
: ${PREVIOUS_TOOLKIT_VERSION:=1.5.1}
echo "Using TOOLKIT_VERSION=${PREVIOUS_TOOLKIT_VERSION} as previous nvidia-container-toolkit version"
# Build nvidia-container-runtime
make -C ${NVIDIA_CONTAINER_RUNTIME_ROOT} TOOLKIT_VERSION=${PREVIOUS_TOOLKIT_VERSION} ${TARGET}
# Build nvidia-docker2
make -C ${NVIDIA_DOCKER_ROOT} TOOLKIT_VERSION=${PREVIOUS_TOOLKIT_VERSION} ${TARGET}

View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
function assert_usage() {
exit 1
}
set -e
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
: ${LIBNVIDIA_CONTAINER_ROOT:=${PROJECT_ROOT}/third_party/libnvidia-container}
: ${NVIDIA_CONTAINER_TOOLKIT_ROOT:=${PROJECT_ROOT}}
: ${NVIDIA_CONTAINER_RUNTIME_ROOT:=${PROJECT_ROOT}/third_party/nvidia-container-runtime}
: ${NVIDIA_DOCKER_ROOT:=${PROJECT_ROOT}/third_party/nvidia-docker}
# Get version for libnvidia-container
libnvidia_container_version=$(grep "#define NVC_VERSION" ${LIBNVIDIA_CONTAINER_ROOT}/src/nvc.h \
| sed -e 's/#define NVC_VERSION[[:space:]]"\(.*\)"/\1/')
# Get version for nvidia-container-toolit
nvidia_container_toolkit_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_CONTAINER_TOOLKIT_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_toolkit_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_CONTAINER_TOOLKIT_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_toolkit_version="${nvidia_container_toolkit_version}${nvidia_container_toolkit_tag:+~${nvidia_container_toolkit_tag}}"
# Get version for nvidia-container-runtime
nvidia_container_runtime_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_CONTAINER_RUNTIME_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_runtime_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_CONTAINER_RUNTIME_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_runtime_version="${nvidia_container_runtime_version}${nvidia_container_runtime_tag:+~${nvidia_container_runtime_tag}}"
# Get version for nvidia-docker
nvidia_docker_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_DOCKER_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_docker_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_DOCKER_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_docker_version="${nvidia_docker_version}${nvidia_docker_tag:+~${nvidia_docker_tag}}"
echo "LIBNVIDIA_CONTAINER_VERSION=${libnvidia_container_version}"
echo "NVIDIA_CONTAINER_TOOLKIT_VERSION=${nvidia_container_toolkit_version}"
if [[ "${libnvidia_container_version}" != "${nvidia_container_toolkit_version}" ]]; then
>&2 echo "WARNING: The libnvidia-container and nvidia-container-toolkit versions do not match"
fi
echo "NVIDIA_CONTAINER_RUNTIME_VERSION=${nvidia_container_runtime_version}"
echo "NVIDIA_DOCKER_VERSION=${nvidia_docker_version}"

56
scripts/release.sh Executable file
View File

@ -0,0 +1,56 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
set -e -x
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
# This list represents the distribution-architecture pairs that are actually published
# to the relevant repositories. This targets forwarded to the build-all-components script
# can be overridden by specifying command line arguments.
all=(
amazonlinux1-x86_64
amazonlinux2-x86_64
centos7-ppc64le
centos7-x86_64
centos8-aarch64
centos8-ppc64le
centos8-x86_64
debian10-amd64
debian9-amd64
opensuse-leap15.1-x86_64
ubuntu16.04-amd64
ubuntu16.04-ppc64le
ubuntu18.04-amd64
ubuntu18.04-arm64
ubuntu18.04-ppc64le
)
if [[ $# -gt 0 ]]; then
targets=($*)
else
targets=${all[@]}
fi
for target in ${targets[@]}; do
${SCRIPTS_DIR}/build-all-components.sh ${target}
done

59
test/release/Makefile Normal file
View File

@ -0,0 +1,59 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKFLOW ?= nvidia-docker
TEST_REPO ?= elezar.github.io
DISTRIBUTIONS := ubuntu18.04 centos8
IMAGE_TARGETS := $(patsubst %,image-%, $(DISTRIBUTIONS))
RUN_TARGETS := $(patsubst %,run-%, $(DISTRIBUTIONS))
RELEASE_TARGETS := $(patsubst %,release-%, $(DISTRIBUTIONS))
LOCAL_TARGETS := $(patsubst %,local-%, $(DISTRIBUTIONS))
.PHONY: $(IMAGE_TARGETS)
image-%: DOCKERFILE = docker/$(*)/Dockerfile
images: $(IMAGE_TARGETS)
$(IMAGE_TARGETS): image-%:
docker build \
--build-arg WORKFLOW="$(WORKFLOW)" \
--build-arg TEST_REPO="$(TEST_REPO)" \
-t nvidia-container-toolkit-repo-test:$(*) \
-f $(DOCKERFILE) \
$(shell dirname $(DOCKERFILE))
%-ubuntu18.04: ARCH = amd64
%-centos8: ARCH = x86_64
RELEASE_TEST_DIR := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
PROJECT_ROOT := $(RELEASE_TEST_DIR)/../..
LOCAL_PACKAGE_ROOT := $(PROJECT_ROOT)/dist
local-%: DIST = $(*)
local-%: LOCAL_REPO_ARGS = -v $(LOCAL_PACKAGE_ROOT)/$(DIST)/$(ARCH):/local-repository
$(LOCAL_TARGETS): local-%: release-% run-% | release-%
run-%: DIST = $(*)
$(RUN_TARGETS): run-%:
docker run --rm -ti \
$(LOCAL_REPO_ARGS) \
nvidia-container-toolkit-repo-test:$(*)
# Ensure that the local package root exists
$(RELEASE_TARGETS): release-%: $(LOCAL_PACKAGE_ROOT)/$(*)/$(ARCH)
$(PROJECT_ROOT)/scripts/release.sh $(*)-$(ARCH)

107
test/release/README.md Normal file
View File

@ -0,0 +1,107 @@
# Testing Packaging Workflows
## Building the Docker images:
```bash
make images
```
This assumes that the `nvidia-docker` workflow is being tested and that the test packages have been published to the `elezar.github.io` GitHub pages page. These can be overridden
using make variables.
Valid values for `workflow` are:
* `nvidia-docker`
* `nvidia-container-runtime`
This follows the instructions for setting up the [`nvidia-docker` repostitory](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit).
## Testing local package changes
Running:
```bash
make local-ubuntu18.04
```
will build the `ubuntu18.04-amd64` packages for release and launch a docker container with these packages added to a local APT repository.
The various `apt` workflows can then be tested as if the packages were released to the `libnvidia-container` experimental repository.
The `local-centos8` make target is available for testing the `centos8-x86_64` workflows as representative of `yum`-based installation workflows.
### Example
In the `centos8`-based container above we see the following `nvidia-docker2` packages available with the `local-repository` disabled:
```bash
$ yum list --showduplicates nvidia-docker2 | tail -2
nvidia-docker2.noarch 2.5.0-1 nvidia-docker
nvidia-docker2.noarch 2.6.0-1 nvidia-docker
```
Installing `nvidia-docker2` shows:
```bash
$ yum install -y nvidia-docker2
```
installs the following packages:
```bash
$ yum list installed | grep nvidia
libnvidia-container-tools.x86_64 1.5.1-1 @libnvidia-container
libnvidia-container1.x86_64 1.5.1-1 @libnvidia-container
nvidia-container-runtime.x86_64 3.5.0-1 @nvidia-container-runtime
nvidia-container-toolkit.x86_64 1.5.1-2 @nvidia-container-runtime
nvidia-docker2.noarch 2.6.0-1 @nvidia-docker
```
Note the repositories where these packages were installed from.
We now enable the `local-repository` to simulate the new packages being published to the `libnvidia-container` experimental repository and check the available `nvidia-docker2` versions:
```bash
$ yum-config-manager --enable local-repository
$ yum list --showduplicates nvidia-docker2 | tail -2
nvidia-docker2.noarch 2.6.0-1 nvidia-docker
nvidia-docker2.noarch 2.6.1-0.1.rc.1 local-repository
```
Showing the new version available in the local repository.
Running:
```
$ yum install nvidia-docker2
Last metadata expiration check: 0:01:15 ago on Fri Sep 24 12:49:20 2021.
Package nvidia-docker2-2.6.0-1.noarch is already installed.
Dependencies resolved.
===============================================================================================================================
Package Architecture Version Repository Size
===============================================================================================================================
Upgrading:
libnvidia-container-tools x86_64 1.6.0-0.1.rc.1 local-repository 48 k
libnvidia-container1 x86_64 1.6.0-0.1.rc.1 local-repository 95 k
nvidia-container-toolkit x86_64 1.6.0-0.1.rc.1 local-repository 1.5 M
replacing nvidia-container-runtime.x86_64 3.5.0-1
nvidia-docker2 noarch 2.6.1-0.1.rc.1 local-repository 13 k
Transaction Summary
===============================================================================================================================
Upgrade 4 Packages
Total size: 1.7 M
```
Showing that all the components of the stack will be updated with versions from the `local-repository`.
After installation the installed packages are shown as:
```bash
$ yum list installed | grep nvidia
libnvidia-container-tools.x86_64 1.6.0-0.1.rc.1 @local-repository
libnvidia-container1.x86_64 1.6.0-0.1.rc.1 @local-repository
nvidia-container-toolkit.x86_64 1.6.0-0.1.rc.1 @local-repository
nvidia-docker2.noarch 2.6.1-0.1.rc.1 @local-repository
```
Showing that:
1. All versions have been installed from the same repository
2. The `nvidia-container-runtime` package was removed as it is no longer required.
The `nvidia-container-runtime` executable is, however, still present on the system:
```bash
# ls -l /usr/bin/nvidia-container-runtime
-rwxr-xr-x 1 root root 2256280 Sep 24 12:42 /usr/bin/nvidia-container-runtime
```

View File

@ -0,0 +1,35 @@
FROM centos:8
RUN yum install -y \
yum-utils \
ruby-devel \
gcc \
make \
rpm-build \
rubygems \
createrepo
RUN gem install --no-document fpm
# We create and install a dummy docker package since these dependencies are out of
# scope for the tests performed here.
RUN fpm -s empty \
-t rpm \
--description "A dummy package for docker-ce_18.06.3.ce-3.el7" \
-n docker-ce --version 18.06.3.ce-3.el7 \
-p /tmp/docker.rpm \
&& \
yum localinstall -y /tmp/docker.rpm \
&& \
rm -f /tmp/docker.rpm
ARG WORKFLOW=nvidia-docker
ARG TEST_REPO=nvidia.github.io
ENV TEST_REPO ${TEST_REPO}
RUN curl -s -L https://nvidia.github.io/${WORKFLOW}/centos8/nvidia-docker.repo \
| tee /etc/yum.repos.d/nvidia-docker.repo
COPY entrypoint.sh /
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
: ${LOCAL_REPO_DIRECTORY:=/local-repository}
if [[ -d ${LOCAL_REPO_DIRECTORY} ]]; then
echo "Setting up local-repository"
createrepo /local-repository
cat >/etc/yum.repos.d/local.repo <<EOL
[local-repository]
name=NVIDIA Container Toolkit Local Packages
baseurl=file:///local-repository
enabled=0
gpgcheck=0
protect=1
EOL
yum-config-manager --enable local-repository
else
echo "Setting up TEST repo: ${TEST_REPO}"
sed -i -e 's#nvidia\.github\.io/libnvidia-container#${TEST_REPO}/libnvidia-container#g' /etc/yum.repos.d/nvidia-docker.repo
yum-config-manager --enable libnvidia-container-experimental
fi
exec bash $@

View File

@ -0,0 +1,50 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
ARG DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install --no-install-recommends -y \
curl \
gnupg2 \
apt-transport-https \
ca-certificates \
apt-utils \
ruby ruby-dev rubygems build-essential
RUN gem install --no-document fpm
# We create and install a dummy docker package since these dependencies are out of
# scope for the tests performed here.
RUN fpm -s empty \
-t deb \
--description "A dummy package for docker.io_18.06.0" \
-n docker.io --version 18.06.0 \
-p /tmp/docker.deb \
--deb-no-default-config-files \
&& \
dpkg -i /tmp/docker.deb \
&& \
rm -f /tmp/docker.deb
ARG WORKFLOW=nvidia-docker
ARG TEST_REPO=nvidia.github.io
RUN curl -s -L https://nvidia.github.io/${WORKFLOW}/gpgkey | apt-key add - \
&& curl -s -L https://nvidia.github.io/${WORKFLOW}/ubuntu18.04/nvidia-docker.list | tee /etc/apt/sources.list.d/nvidia-docker.list \
&& apt-get update
COPY entrypoint.sh /
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
: ${LOCAL_REPO_DIRECTORY:=/local-repository}
if [[ -d ${LOCAL_REPO_DIRECTORY} ]]; then
echo "Setting up local-repository"
echo "deb [trusted=yes] file:/local-repository ./" > /etc/apt/sources.list.d/local.list
$(cd /local-repository && apt-ftparchive packages . > Packages)
apt-get update
else
echo "Setting up TEST repo: ${TEST_REPO}"
sed -i -e 's#nvidia\.github\.io/libnvidia-container#${TEST_REPO}/libnvidia-container#g' /etc/apt/sources.list.d/nvidia-docker.list
sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-docker.list
apt-get update
fi
exec bash $@

1
third_party/libnvidia-container vendored Submodule

@ -0,0 +1 @@
Subproject commit d78f1f4afa7e8d809434e71cf717f0817a450b37

@ -0,0 +1 @@
Subproject commit 2e721ab3b2202470e940fd5a29c1f681e761fbf0

1
third_party/nvidia-docker vendored Submodule

@ -0,0 +1 @@
Subproject commit 52ed1032c80be535c44822f67151a4b0a0e72658