Compare commits

...

164 Commits

Author SHA1 Message Date
Evan Lezar
266b752b02 Merge branch 'al2-aarch64' into 'master'
Add aarch64 build for Amazon Linux 2

See merge request nvidia/container-toolkit/container-toolkit!55
2021-10-19 14:31:51 +00:00
Evan Lezar
7fc33d02b4 Update submodules
* libnvidia-container: @1fa138a694b3667fd89ac89dfdb26fcd06ab0bb9
* nvidia-container-runtime: @cd6aef41126b5409c2329b66803b278a697aaaf3
* nvidia-docker: @4613cdae34c3e106ef124c9b86e4cf998569bbd6

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-10-19 15:30:29 +02:00
Evan Lezar
9be9b89f9f Add aarch64 build for Amazon Linux 2
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-10-18 12:03:22 +02:00
Evan Lezar
a036a83afa Merge branch 'improve-release-tests' into 'master'
Extend release testing toolking to allow for upgrade testing

See merge request nvidia/container-toolkit/container-toolkit!54
2021-10-14 17:23:49 +00:00
Evan Lezar
ee0b908613 Extend release testing toolking to allow for upgrade testing
This change allows for upgrade workflows to be tested in the
release test containers. To achieve this a script is added
to configure the test repositories leaving the defaults installed
initially.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-10-14 17:10:43 +02:00
Evan Lezar
28f6b7c02c Merge branch 'update-ci' into 'master'
Update CI to use build image directly

See merge request nvidia/container-toolkit/container-toolkit!53
2021-10-12 12:55:48 +00:00
Evan Lezar
f7e9d1ca45 Use build image directly in CI
This change uses the build image directly in CI instead of
using dind and invoking the docker-* make targets.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-10-12 13:53:11 +02:00
Evan Lezar
229f9c3730 Update DEVELOPMENT.md
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-10-12 13:53:06 +02:00
Evan Lezar
845701447c Merge branch 'build-from-repo' into 'master'
Add logic to build all package from nvidia-container-toolkit repo

See merge request nvidia/container-toolkit/container-toolkit!49
2021-10-07 14:30:13 +00:00
Evan Lezar
1ad98df39f Update submodules for packaging fixes
This change updates the git submodules for nvidia-docker and
nvidia-container-runtime to contain the package fixes and
code cleanup.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-10-07 15:28:52 +02:00
Evan Lezar
22a958fae7 Add docker-based tests for package installation workflows
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-24 14:58:17 +02:00
Evan Lezar
f10fa7b292 FIXUP: Update development
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 21:38:03 +02:00
Evan Lezar
fa7dc8cb31 Require at least a matching libnvidia-container-tools version
This change ensures that at least the same libnvidia-container-tools
version is required when installing nvidia-container-toolkit.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 21:34:09 +02:00
Evan Lezar
3fef6bb5ab Merge branch 'update-readme' into 'master'
Add README to nvidia-container-toolkit repository

See merge request nvidia/container-toolkit/container-toolkit!50
2021-09-23 19:07:22 +00:00
Evan Lezar
2dc85de5d4 Use consistent package revisions for all rpm-based packages
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 15:36:27 +02:00
Evan Lezar
bb6f4745e9 Fix nvidia-container-runtime breaks / replaces dependency
The relationship between packages also considers the package revision
when determining validity. This means that 3.5.0-1 is considered
greater than 3.5.0. This changed adds the package revision to the
nvidia-container-runtime breaks / replaces relationship.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:56:55 +02:00
Evan Lezar
77740c2a80 Add release script for specific targets
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:38:37 +02:00
Evan Lezar
f0fb4739ff Remove docker-all target from Makefile
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:38:37 +02:00
Evan Lezar
5ee2150eaa Add basic version checks
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:38:37 +02:00
Evan Lezar
34e023361b Add nvidia-docker as a git submodule
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:38:37 +02:00
Evan Lezar
2ed7d86709 Add nvidia-container-runtime as a git submodule
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:38:37 +02:00
Evan Lezar
e729e74fe5 Add libnvidia-container as a git submodule
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-23 14:38:37 +02:00
Evan Lezar
b551d0f4f4 Apply edits for the NVIDIA container toolkit
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-22 16:49:58 +02:00
Evan Lezar
1d674783b0 Copy README.md from nvidia-docker
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-22 15:22:53 +02:00
Evan Lezar
cc9c3c0d28 Copy scripts from nvidia-container-toolkit-release
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-22 13:42:32 +02:00
Evan Lezar
78f137a5ef Bump version for 1.6.0 development
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-22 13:42:32 +02:00
Evan Lezar
00258f14fb Merge branch 'include-nvidia-container-runtime' into 'master'
Include nvidia-container-runtime executable in nvidia-container-toolkit

See merge request nvidia/container-toolkit/container-toolkit!45
2021-09-20 14:54:06 +00:00
Evan Lezar
e828697f90 Update debian and rpm package definitions
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-20 14:54:54 +02:00
Evan Lezar
923344d376 Add PREFIX make variable to control command output
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-13 17:15:34 +02:00
Evan Lezar
35c6559013 Make all commands and copy executables
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-13 17:15:19 +02:00
Evan Lezar
eb67968911 Add cmds target to makefile to build all go commands
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-13 17:08:31 +02:00
Evan Lezar
6e1436cefb Update go vendoring
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-07 13:13:03 +02:00
Evan Lezar
10cd42273e Update package references
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-07 13:13:03 +02:00
Evan Lezar
b6a585c77d Copy code from nvidia-container-runtime
This change copies the cmd/nvidia-container-runtime, internal, and test
folders from github.com/NVIDIA/nvidia-container-runtime@8a63b4b34f3ce3b4167f0516aa3f7207ca280dfb

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-07 13:13:03 +02:00
Evan Lezar
58e707fed6 Bump version for post 1.5.1 development
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-09-07 13:12:25 +02:00
Evan Lezar
28ee3d5fd5 Merge branch 'revert-nvlink' into 'master'
Revert support for NVIDIA_FABRIC_DEVICES

See merge request nvidia/container-toolkit/container-toolkit!41
2021-08-20 08:39:49 +00:00
Evan Lezar
c2ac6db43b Revert "Add support for NVIDIA_FABRIC_DEVICES"
This reverts commit f828efcf64.
2021-08-18 15:17:59 +02:00
Evan Lezar
620bd806e8 Revert "Bump version to 1.6.0~rc.1"
This reverts commit 2001d66f9b.
2021-08-18 15:17:37 +02:00
Evan Lezar
afe0f8b61f Merge branch 'release-1.6.0-rc.1' into 'master'
Bump version to 1.6.0~rc.1

See merge request nvidia/container-toolkit/container-toolkit!40
2021-08-16 10:27:11 +00:00
Evan Lezar
2001d66f9b Bump version to 1.6.0~rc.1
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-08-13 14:55:28 +02:00
Evan Lezar
7626578b8e Merge branch 'nvlink' into 'master'
Add support for NVIDIA_FABRIC_DEVICES

See merge request nvidia/container-toolkit/container-toolkit!39
2021-08-12 14:04:10 +00:00
Evan Lezar
f828efcf64 Add support for NVIDIA_FABRIC_DEVICES
This change adds support for the NVIDIA_FABRIC_DEVICES envvar. The (non-empty)
value of this envvar is passed to the NVIDIA Container CLI using the --fabric-device
command line flag and allows for nvswitch and nvlink devices to be mounted
into the container.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-08-11 10:33:55 +02:00
Evan Lezar
faf0df66c7 Merge branch 'improve-ci' into 'master'
Improve CI for container toolkit

See merge request nvidia/container-toolkit/container-toolkit!38
2021-07-15 15:40:56 +00:00
Evan Lezar
1ef4b1a14a Use extends keyword for build-one and build-all
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-07-15 16:27:17 +02:00
Evan Lezar
3df0969349 Improve CI for container toolkit
This change improves the CI for the container toolkit. The go targets are
executed in a docker container which allows for reproducible behaviour on
local systems as well as CI. The Makefile is updated to facilitate this.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-07-15 16:27:15 +02:00
Evan Lezar
22fcd022f3 Merge branch 'upstream-bump-v1.5.1' into 'master'
Bump to version 1.5.1

See merge request nvidia/container-toolkit/container-toolkit!35
2021-06-14 18:35:45 +00:00
Evan Lezar
492905de38 Bump to version 1.5.1
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-14 17:16:44 +02:00
Evan Lezar
17e76cad4d Merge branch 'make-binary-goos-explicit' into 'master'
Explicitly set GOOS when building binary

See merge request nvidia/container-toolkit/container-toolkit!33
2021-06-14 10:12:24 +00:00
Evan Lezar
c728bf4b1e Explicitly set GOOS when building binary
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-10 10:40:06 +02:00
Evan Lezar
f05e4e81c5 Merge branch 'fix-go-install' into 'master'
Move pkg to cmd/nvidia-container-toolkit

See merge request nvidia/container-toolkit/container-toolkit!32
2021-06-08 15:21:06 +00:00
Evan Lezar
14cd7c1833 Add coverage step to CI
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-08 15:23:19 +02:00
Evan Lezar
f72b79cc2a Move pkg to cmd/nvidia-container-toolkit
This change moves the pkg folder to `cmd/nvidia-container-toolkit` to
better match go best practices. This allows, for example, for the
`cmd/nvidia-container-toolkit` to be go installed.

The only package included in `pkg` was `main`.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-08 15:20:59 +02:00
Evan Lezar
f25698e96e Run go mod vendor and go mod tidy
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-08 10:51:33 +02:00
Evan Lezar
a02f7f8f6f Merge branch 'CNT-1554/docker-swarm' into 'master'
Fix bug where docker swarm device selection is overriden by NVIDIA_VISIBLE_DEVICES

See merge request nvidia/container-toolkit/container-toolkit!31
2021-06-08 05:31:05 +00:00
Evan Lezar
2a92d6acb7 Fix bug where docker swarm device selection is overriden by NVIDIA_VISIBLE_DEVICES
This change fixes a bug where the value of NVIDIA_VISIBLE_DEVICES would be used to
select devices even if the `swarm-resource` config option is specified.

Note that this does not change the value of NVIDIA_VISIBLE_DEVICES in the container.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-07 14:10:08 +02:00
Evan Lezar
602eaf0e60 Use require package for tests
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-07 13:31:41 +02:00
Evan Lezar
b930487dc5 Add coverage to go tests
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-07 13:21:28 +02:00
Evan Lezar
9aac07fe64 Update vendoring
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-06-07 13:20:34 +02:00
Evan Lezar
825990ba41 Merge branch 'CNT-1334-publish-tags-to-artifactory' into 'master'
Add artifactory publish step

See merge request nvidia/container-toolkit/container-toolkit!30
2021-05-18 17:25:07 +00:00
Evan Lezar
03d9c1d698 Update to Golang 1.16.3
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-05-18 11:25:52 +02:00
Evan Lezar
de172674b1 Add artifactory publish step
This change simplifies the build process by only targetting ubuntu20.04-amd64
and adds logic to push tagged builds to artifactory.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-05-18 11:25:48 +02:00
Kevin Klues
b71a9ed153 Merge branch 'upstream-bump-v1.5.0' into 'master'
Bump version to 1.5.0

See merge request nvidia/container-toolkit/container-toolkit!29
2021-04-29 14:08:23 +00:00
Kevin Klues
dde7159e11 Bump version to 1.5.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2021-04-29 10:16:44 +00:00
Evan Lezar
46de426cc4 Merge branch 'CNT-1330/jenkins-ci' into 'master'
Add Jenkins file for CI build steps

See merge request nvidia/container-toolkit/container-toolkit!28
2021-03-18 10:06:44 +00:00
Evan Lezar
1c7d6a233a Add golang check targets
This change adds check targets for Golang to the make file. These are also
added as stages to the to the Jenkinsfile definition and the GitLab CI
is modified to use them too.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-03-17 16:58:39 +01:00
Evan Lezar
635aeb8343 Add Jenkinsfile definition for build targets
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-03-17 13:52:19 +01:00
Evan Lezar
ec9d296afe Move docker.mk to docker folder
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-03-17 13:52:14 +01:00
Evan Lezar
ff44395b31 Merge branch 'upstream-bump-v1.4.2' into 'master'
Bump version to 1.4.2

See merge request nvidia/container-toolkit/container-toolkit!27
2021-02-05 12:47:01 +00:00
Kevin Klues
8571e5ac5d Bump version to 1.4.2
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2021-02-05 10:26:10 +00:00
Kevin Klues
108c99bb9b Merge branch 'upstream-bump-v1.4.1' into 'master'
Bump version to 1.4.1

See merge request nvidia/container-toolkit/container-toolkit!26
2021-01-25 13:35:42 +00:00
Kevin Klues
dfb5daf200 Bump version to 1.4.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2021-01-25 10:42:32 +00:00
Kevin Klues
e8aa3cc8c3 Merge branch 'ignore-nvidia-visible-devices' into 'master'
Ignore NVIDIA_VISIBLE_DEVICES for containers with insufficent privileges

See merge request nvidia/container-toolkit/container-toolkit!25
2021-01-25 10:25:00 +00:00
Evan Lezar
fc408a32c7 Add utility function to get config name from struct
Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-01-22 16:08:45 +01:00
Evan Lezar
f6b1b1afad Ignore NVIDIA_VISIBLE_DEVICES for containers with insufficent privileges
This change ignores the value of NVIDIA_VISIBLE_DEVICES instead of
raising an error when launching a container with insufficient permissions.

This changes the behaviour under the following conditions:

NVIDIA_VISIBLE_DEVICES is set
and

accept-nvidia-visible-devices-envvar-when-unprivileged = false (default: true)

or

privileged = false (default: false)

This means that a user need not explicitly clear the NVIDIA_VISIBLE_DEVICES
environment variable if no GPUs are to be used in unprivileged containers.
Note that this envvar is set to 'all' by default in many CUDA images that
are used as base images.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
2021-01-22 15:34:52 +01:00
Kevin Klues
97516467c0 Merge branch 'upstream-bump-v1.4.0' into 'master'
Bump version to 1.4.0

See merge request nvidia/container-toolkit/container-toolkit!24
2020-12-14 14:41:02 +00:00
Kevin Klues
01063c0433 Bump version to 1.4.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-12-11 18:05:49 +00:00
Kevin Klues
119f75dcf8 Merge branch 'upstream-add-compute-to-default-capabilities' into 'master'
Add 'compute' capability to list of defaults.

See merge request nvidia/container-toolkit/container-toolkit!23
2020-12-08 11:31:27 +00:00
Kevin Klues
20604621e4 Add 'compute' capability to list of defaults.
For most practical purposes, it should be fine to set
NVIDIA_DRIVER_CAPABILITIES=all nowadays.

Historically, these different capabilities exist because they were added
incrementally, with varying degrees of stability. It's fairly common to
run with GPUs in containers today, but a few years ago the driver didn't
support them very well, and it was important to make sure the libraries
being injected into the container actually worked in a containerized
environment. When they didn't, it was common to get information leaks,
crashes, or even silent failures.

In the past, whenever a new set of libraries was being vetted for
injected, a new capability was added to make sure that users had control
to explicitly include only those libraries they were comfortable having
injected into their containers.

The idea being that whoever puts together a container image for use with
GPUs should have the knowledge of what capabilities the software in that
container image requires, and can set the NVIDIA_DRIVER_CAPABILITIES
envvar in that image appropriately.

After some back and forth, we've decided it doesn't quite make sense to
set it to "all" just yet, but we should set it to "utility, compute"
instead of just "utility", so that at least the core CUDA libraries work
by default (once installed in the container).

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-12-07 12:10:23 +00:00
Kevin Klues
8cfb3c29f6 Merge branch 'upstream-bump-v1.3.0' into 'master'
Bump to version 1.3.0

See merge request nvidia/container-toolkit/container-toolkit!22
2020-09-16 13:34:37 +00:00
Kevin Klues
98e202d0d8 Bump to version 1.3.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-09-16 11:45:31 +00:00
Kevin Klues
26668097c4 Merge branch 'upstream-bump-1.3.0-rc.2' into 'master'
Bump to version 1.3.0 rc.2

See merge request nvidia/container-toolkit/container-toolkit!21
2020-08-10 15:33:25 +00:00
Kevin Klues
caf2792463 Update changelogs for 1.3.0-rc.2
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-08-10 13:08:17 +00:00
Kevin Klues
b2be0b08ac Bump version to 1.3.0-rc.2
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-08-10 13:03:00 +00:00
Kevin Klues
edc5041636 Merge branch 'upstream-update-devices-from-volume-mounts-semantics' into 'master'
Refactor accepting device lists from volume mounts as a boolean

See merge request nvidia/container-toolkit/container-toolkit!20
2020-08-07 18:40:56 +00:00
Kevin Klues
2c1809475c Add more tests for new semantics with device list from volume mounts
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-08-07 16:30:31 +00:00
Kevin Klues
7c00385797 Refactor accepting device lists from volume mounts as a boolean
Also hard code the "root" path where these volume mounts will be looked
for rather than making it configurable.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-08-07 16:30:19 +00:00
Kevin Klues
322006c361 Merge branch 'upstream-bump-1.3.0-rc.1' into 'master'
Bump version to 1.3.0-rc.1

See merge request nvidia/container-toolkit/container-toolkit!19
2020-07-24 20:36:38 +00:00
Kevin Klues
a25017fb8a Merge branch 'upstream-build-prerelease' into 'master'
Update build system to accept a TAG variable for things like rc.x

See merge request nvidia/container-toolkit/container-toolkit!18
2020-07-24 20:22:00 +00:00
Kevin Klues
928905ce94 Update changelogs for 1.3.0-rc.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 20:10:42 +00:00
Kevin Klues
7ed17bb9ca Bump version to 1.3.0-rc.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 20:03:48 +00:00
Kevin Klues
b50d86c174 Update build system to accept a TAG variable for things like rc.x
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 19:54:29 +00:00
Kevin Klues
bf342fb4c9 Merge branch 'upstream-fix-ci' into 'master'
Generalize CI variables

See merge request nvidia/container-toolkit/container-toolkit!17
2020-07-24 14:28:49 +00:00
Kevin Klues
1791372f22 Generalize CI variables
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 14:01:39 +00:00
Kevin Klues
4448319605 Merge branch 'upstream-add-alternate-device-list' into 'master'
Add the ability to pull the device list from mounted files instead of just Envvars

See merge request nvidia/container-toolkit/container-toolkit!15
2020-07-24 13:18:53 +00:00
Kevin Klues
2ea3150b60 Merge branch 'upstream-simplify-nvidia-config-generation' into 'master'
Simplify logic for `nvidiaConfig` generation

See merge request nvidia/container-toolkit/container-toolkit!14
2020-07-24 13:18:35 +00:00
Kevin Klues
32b4b09bc9 Add tests to verify priority of device list from mounts vs. envvar
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
cc0a22a6d9 Consolidate logic for building nvidiaConfig into a single function
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
e48d23d107 Add test for getDevicesFromMounts()
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
430dda41e9 Remove getNvidiaConfigLegacy() function
A subsequent commit will add equivalent functionality back in

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
8bcd02ee5d Add logic implementing getDevicesFromMounts()
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
4791fab747 Simplify getMigConfigDevices() and getMigMonitorDevices()
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
7313069d4c Update getDevices() to account for getting the devices list from mounts
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
a24b0c8b4e Split isLegacyCUDAImage() into its own helper function
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
f46d1861d3 Add stub implementation for getDevicesFromMounts()
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
0a9dc3c653 Add test to make sure that getNvidiaConfig() operates as expected
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
889ebae1fe Pull logic to get the device list from ENVVARs out to its own function
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
e4b9318de3 Only run gofmt over go files under pkg/ in CI
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
aec9a28bc3 Push HookConfig and privileged flags down to getDevices() call
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
2ae7cb07cf Add ability to consider container mounts to generate nvidiaConfig
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
da36874e91 Add new config options to pull device list from mounted files not ENVVAR
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
b9ef2db205 Remove unnecessary files from version control
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:50:05 +00:00
Kevin Klues
da6fbb343a Revert "Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*"
This reverts commit 01b4381282.
2020-07-24 12:50:05 +00:00
Kevin Klues
647a805341 Merge branch 'upstream-add-ci-tests' into 'master'
Add common CI tests for things like golint, gofmt, unit tests, etc.

See merge request nvidia/container-toolkit/container-toolkit!16
2020-07-24 12:39:45 +00:00
Kevin Klues
fe65573bdf Add common CI tests for things like golint, gofmt, unit tests, etc
This commit also fixes the minor issues uncovered while running these
tests locally.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:14:26 +00:00
Kevin Klues
a7fb33301c Flip build-all targets to run automatically on merge requests
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 12:14:26 +00:00
Kevin Klues
8b248b6631 Rename github.com/NVIDIA/container-toolkit to nvidia-container-toolkit
The repo name on github recently changed, so all references here should
as well.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-24 11:40:45 +00:00
Kevin Klues
d10144b3b1 Merge branch 'upstream-add-ngx-all-driver-caps' into 'master'
Add 'ngx' to list of *all* driver capabilities -- Prepare patch release for 1.2.1

See merge request nvidia/container-toolkit/container-toolkit!13
2020-07-22 15:21:11 +00:00
Kevin Klues
ba9758c7ff Update changelogs for 1.2.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-22 13:41:44 +00:00
Kevin Klues
d467b87ef9 Bump version to 1.2.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-22 13:39:31 +00:00
Kevin Klues
2f4af74320 List config.toml as a config file in the RPM SPEC
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-22 13:39:22 +00:00
Kevin Klues
4e6e0ed4f1 Add 'ngx' to list of *all* driver capabilities
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-22 13:29:39 +00:00
Kevin Klues
7ec9e84369 Merge branch 'upstream-bump-v1.2.0' into 'master'
Bump to version 1.2.0

See merge request nvidia/container-toolkit/container-toolkit!12
2020-07-08 20:29:41 +00:00
Kevin Klues
023af3729f Update changelogs for 1.2.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-08 18:11:44 +00:00
Kevin Klues
a63bef2281 Bump version to 1.2.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-08 16:22:01 +00:00
Kevin Klues
320bb6e4dc Update dependence on libnvidia-container to 1.2.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-08 16:22:01 +00:00
Kevin Klues
8e0aab4607 Fix repo listed in changelog for debian distributions
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-07-08 16:22:01 +00:00
Kevin Klues
ad7d3dda83 Merge branch 'upstream-add-ngx' into 'master'
Add the 'ngx' driver capability

See merge request nvidia/container-toolkit/container-toolkit!11
2020-06-24 18:35:52 +00:00
Kevin Klues
d3aee3e092 Add the 'ngx' driver capability
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-06-24 17:53:42 +00:00
Renaud Gaubert
e7dc3cbbab Fix debian copyright file
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2020-06-10 21:29:39 +00:00
Renaud Gaubert
0d0f3bfa56 Merge branch 'license' into 'master'
Update package license to match source license

See merge request nvidia/container-toolkit/container-toolkit!10
2020-06-10 19:25:50 +00:00
Renaud Gaubert
6cfc80975c Update package license to match source license
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2020-06-09 03:03:27 +00:00
Kevin Klues
d112fbd98a Merge branch 'upstream-fix-for-oci-1.0.0-rc2' into 'master'
Add support for parsing Linux Capabilities for older OCI specs

See merge request nvidia/container-toolkit/container-toolkit!9
2020-06-03 22:35:58 +00:00
Kevin Klues
9d66665d4b Update for patch release 1.1.2
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-06-03 21:26:25 +00:00
Kevin Klues
c32237f39c Add support for parsing Linux Capabilities for older OCI specs
This was added to fix a regression with support for the default runc
shipped with CentOS 7.

The version of runc that is installed by default on CentOS 7 is
1.0.0-rc2 which uses OCI spec 1.0.0-rc2-dev.

This is a prerelease of the OCI spec, which defines the capabilities
section of a process configuration to be a flat list of capabilities
(e.g. SYS_ADMIN, SYS_PTRACE, SYS_RAWIO, etc.)
https://github.com/opencontainers/runtime-spec/blob/v1.0.0-rc2/config.md#process-configuration

By the time the official 1.0.0 version of the OCI spec came out, the
capabilities section of a process configuration was expanded to include
embedded fields for effective, bounding, inheritable, permitted and
ambient (each of which can contain a flat list of capabilities of the
form SYS_ADMIN, SYS_PTRACE, SYS_RAWIO, etc.)
https://github.com/opencontainers/runtime-spec/blob/v1.0.0/config.md#linux-process

Previously, we only inspected the capabilities section of a process
configuration assuming it was in the format of OCI spec 1.0.0.

This patch makes sure we can parse the capaibilites in either format.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-06-03 21:25:13 +00:00
Kevin Klues
39a985ce96 Update vendored packages
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-06-03 21:25:13 +00:00
Renaud Gaubert
809dd1855a Merge branch 'upstream-patch-1.1.1' into 'master'
Update for patch release 1.1.1

See merge request nvidia/container-toolkit/container-toolkit!8
2020-05-19 19:51:44 +00:00
Kevin Klues
ffa82d90b4 Update changelog for 1.1.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-19 14:55:40 +00:00
Kevin Klues
d202adedec Update version to 1.1.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-19 14:04:59 +00:00
Kevin Klues
8f74fabc83 Update dependence on libnvidia-container to 1.1.1
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-19 14:04:29 +00:00
Kevin Klues
9c2c610fcd Update changelog for 1.1.0 release
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-18 13:21:14 +02:00
Renaud Gaubert
976428af2c Merge branch '1.1.0-staging' into 'master'
1.1.0 staging

See merge request nvidia/container-toolkit/container-toolkit!7
2020-05-15 19:39:41 +00:00
Kevin Klues
2c15e81822 Bump version to 1.1.0
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-15 21:34:41 +02:00
Kevin Klues
fcc1d116f0 Merge branch 'internal-add-mig-config-monitor' into 'master'
Add support for mig-config and mig-monitor as privileged capabilities

See merge request dl/container-dev/nvidia-container-toolkit!3
2020-05-15 19:04:10 +00:00
Renaud Gaubert
d4ff0416d8 Merge branch 'add-mergeable-visible-devices-envvar' into 'master'
Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*

See merge request dl/container-dev/nvidia-container-toolkit!2
2020-05-15 19:04:10 +00:00
Kevin Klues
8f387816bc Add support for mig-config and mig-monitor as privileged flags
These flags can only be injected into priviliged containers. If the
container is unpriviliged, and one of these flags is specified, then we
exit with an error.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-15 19:04:10 +00:00
Kevin Klues
05012e7b7f Extend fields we inspect in the runc spec to include linux capabilities
This also includes a helper to look through the capabilities contained
in the spec to determine if the container is privileged or not.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-15 19:04:10 +00:00
Kevin Klues
01b4381282 Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*
This allows someone to (for example) pass the following environment
variables:

NVIDIA_VISIBLE_DEVICES_0="0,1"
NVIDIA_VISIBLE_DEVICES_1="2,3"
NVIDIA_VISIBLE_DEVICES_WHATEVER="4,5"

and have the nvidia-container-toolkit automatically merge these into:

NVIDIA_VISIBLE_DEVICES="0,1,2,3,4,5"

This is useful (for example) if the full list of devices comes
from multiple, disparate sources.

Note: This will override whatever the original value of
NVIDIA_VISIBLE_DEVICES was (*excluding* its original value) if it also
exists as an environment variable already. We exclude the original value
to ensure that we have a way to override the default value of
NVIDIA_VISIBLE_DEVICES set to "all" inside a container image.

Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-15 19:04:05 +00:00
Renaud Gaubert
4e4de762b7 Merge branch 'upstream-cross-build' into 'master'
Update build system to match libnvidia-container

See merge request nvidia/container-toolkit/container-toolkit!6
2020-05-15 18:10:57 +00:00
Kevin Klues
6589f9f28d Update .gitlab-ci.yml to match that of libnvidia-container
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-14 21:42:14 +02:00
Kevin Klues
3353d7530c Update build system to match libnvidia-container
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2020-05-14 19:27:57 +00:00
Renaud Gaubert
f7a19bb301 Split docker targets into the docker.mk file
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2020-04-11 17:20:28 -07:00
Renaud Gaubert
87c8a868f9 Add binary target and use go mod
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2020-04-11 17:18:14 -07:00
Renaud Gaubert
60f165ad69 Merge branch 'toolkit_no_pivot' into 'master'
add no-pivot option to toolkit

See merge request nvidia/container-toolkit/toolkit!3
2020-02-01 01:17:49 +00:00
Kathryn Baldauf
5beddd6705 add no-pivot option to toolkit
Signed-off-by: Kathryn Baldauf <kabaldau@microsoft.com>
2020-01-31 16:43:41 -08:00
Jon Mayo
2155c2d587 Merge branch 'cleanup-driver-caps' into 'master'
Cleanup naming of constants and functions

See merge request nvidia/container-toolkit/toolkit!2
2019-12-20 17:24:26 +00:00
Kevin Klues
c84d80d5ea Make all references to 'Capabilities' explicit to 'DriverCapabilities'
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2019-12-20 16:27:56 +00:00
Kevin Klues
b3de846f66 Cleanup names of constants to better match ENVVARs
Signed-off-by: Kevin Klues <kklues@nvidia.com>
2019-12-20 16:08:47 +00:00
Renaud Gaubert
8da6d14a20 Ensure LICENSE and CONTRIBUTING.md files are present
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-31 12:56:46 -07:00
Renaud Gaubert
a8acc86ddf Login to gitlab registry
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-22 16:39:54 -07:00
Renaud Gaubert
9e1c8c1bbb Rename images
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-22 16:11:35 -07:00
Renaud Gaubert
f94c718c62 Correct push target
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-22 15:16:38 -07:00
Renaud Gaubert
fea67a50b8 Install make in the job
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-22 14:58:54 -07:00
Renaud Gaubert
713ffee7a3 Use the dind service
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-22 14:56:05 -07:00
Renaud Gaubert
d84ddccc68 Fix gitlab-ci syntax error
Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
2019-10-22 14:38:10 -07:00
544 changed files with 285020 additions and 597 deletions

97
.common-ci.yml Normal file
View File

@@ -0,0 +1,97 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
default:
image: docker:stable
services:
- name: docker:stable-dind
command: ["--experimental"]
variables:
IMAGE: "${CI_REGISTRY_IMAGE}"
IMAGE_TAG: "${CI_COMMIT_REF_SLUG}"
BUILDIMAGE: "${CI_REGISTRY_IMAGE}/build:${CI_COMMIT_SHORT_SHA}"
stages:
- image
- lint
- go-checks
- go-build
- unit-tests
- build
- build-long
- scan
- release
build-dev-image:
stage: image
script:
- apk --no-cache add make bash
- make .build-image
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
- make .push-build-image
.requires-build-image:
image: "${BUILDIMAGE}"
.go-check:
extends:
- .requires-build-image
stage: go-checks
fmt:
extends:
- .go-check
script:
- make assert-fmt
vet:
extends:
- .go-check
script:
- make vet
lint:
extends:
- .go-check
script:
- make lint
allow_failure: true
ineffassign:
extends:
- .go-check
script:
- make ineffassign
allow_failure: true
misspell:
extends:
- .go-check
script:
- make misspell
go-build:
extends:
- .requires-build-image
stage: go-build
script:
- make build
unit-tests:
extends:
- .requires-build-image
stage: unit-tests
script:
- make coverage

2
.dockerignore Normal file
View File

@@ -0,0 +1,2 @@
.git
dist

4
.gitignore vendored
View File

@@ -1,3 +1,7 @@
dist
*.swp
*.swo
/coverage.out
/test/output/
/nvidia-container-runtime
/nvidia-container-toolkit

View File

@@ -1,37 +1,121 @@
stages:
- build
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.build_template: &build_definition
include:
- .common-ci.yml
.build-setup:
before_script:
- apk update
- apk upgrade
- apk add coreutils build-base sed git bash make
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes
# build-one jobs build packages for a single OS / ARCH combination.
#
# They are run during the first stage of the pipeline as a smoke test to ensure
# that we can successfully build packages on all of our architectures for a
# single OS. They are triggered on any change to an MR. No artifacts are
# produced as part of build-one jobs.
.build-one-setup:
extends:
- .build-setup
stage: build
script:
- OS="${CI_JOB_NAME#*:}"
- make REGISTRY=${CI_REGISTRY_IMAGE} ${OS}
- make REGISTRY=${CI_REGISTRY_IMAGE} push-${OS}
rules:
- if: $CI_MERGE_REQUEST_ID
# build-all jobs build packages for every OS / ARCH combination we support.
#
# They are run under two conditions:
# 1) Automatically whenever a new tag is pushed to the repo (e.g. v1.1.0)
# 2) Manually by a reviewer just before merging a MR.
#
# Unlike build-one jobs, it takes a long time to build the full suite
# OS / ARCH combinations, so this is optimized to only run once per MR
# (assuming it all passes). A full set of artifacts including the packages
# built for each OS / ARCH are produced as a result of these jobs.
.build-all-setup:
extends:
- .build-setup
stage: build-long
timeout: 2h 30m
rules:
- if: $CI_COMMIT_TAG
when: always
- if: $CI_MERGE_REQUEST_ID
when: always
variables:
ARTIFACTS_NAME: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}-${CI_JOB_NAME}-artifacts-${CI_PIPELINE_ID}"
ARTIFACTS_DIR: "${CI_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}-artifacts-${CI_PIPELINE_ID}"
DIST_DIR: "${CI_PROJECT_DIR}/${ARTIFACTS_DIR}"
artifacts:
expire_in: 1 week
name: ${ARTIFACTS_NAME}
paths:
- dist/
- ${ARTIFACTS_DIR}
amd64:amzn1
<<: *build_definition
# The full set of build-one jobs organizes to build
# ubuntu18.04 in parallel on each of our supported ARCHs.
build-one-amd64:
extends:
- .build-one-setup
script:
- make ubuntu18.04-amd64
rules:
- when: always
amd64:amzn2
<<: *build_definition
build-one-ppc64le:
extends:
- .build-one-setup
script:
- make ubuntu18.04-ppc64le
amd64:centos7:
<<: *build_definition
build-one-arm64:
extends:
- .build-one-setup
script:
- make ubuntu18.04-arm64
amd64:opensuse-leap15.1
<<: *build_definition
# The full set of build-all jobs organized to
# have builds for each ARCH run in parallel.
build-all-amd64:
extends:
- .build-all-setup
script:
- make docker-amd64
amd64:debian9
<<: *build_definition
build-all-x86_64:
extends:
- .build-all-setup
script:
- make docker-x86_64
amd64:debian10
<<: *build_definition
build-all-ppc64le:
extends:
- .build-all-setup
script:
- make docker-ppc64le
amd64:ubuntu16.04
<<: *build_definition
build-all-arm64:
extends:
- .build-all-setup
script:
- make docker-arm64
amd64:ubuntu18.04
<<: *build_definition
build-all-aarch64:
extends:
- .build-all-setup
script:
- make docker-aarch64

9
.gitmodules vendored Normal file
View File

@@ -0,0 +1,9 @@
[submodule "third_party/libnvidia-container"]
path = third_party/libnvidia-container
url = https://gitlab.com/nvidia/container-toolkit/libnvidia-container.git
[submodule "third_party/nvidia-container-runtime"]
path = third_party/nvidia-container-runtime
url = https://gitlab.com/nvidia/container-toolkit/container-runtime.git
[submodule "third_party/nvidia-docker"]
path = third_party/nvidia-docker
url = https://gitlab.com/nvidia/container-toolkit/nvidia-docker.git

60
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,60 @@
# Contribute to the NVIDIA Container Toolkit
Want to hack on the NVIDIA Container Toolkit Project? Awesome!
We only require you to sign your work, the below section describes this!
## Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your
signature certifies that you wrote the patch or otherwise have the right to pass
it on as an open-source patch. The rules are pretty simple: if you can certify
the below (from [developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
Then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe.smith@email.com>
Use your real name (sorry, no pseudonyms or anonymous contributions.)
If you set your `user.name` and `user.email` git configs, you can sign your
commit automatically with `git commit -s`.

45
DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,45 @@
# NVIDIA Container Toolkit Release Tooling
This repository allows for the components of the NVIDIA container stack to be
built and released as the NVIDIA Container Toolkit from a single repository. The components:
* `libnvidia-container`
* `nvidia-container-runtime`
* `nvidia-docker`
are included as submodules in the `third_party` folder.
The `nvidia-container-toolkit` resides in this repo directly.
## Building
In oder to build the packages, the following command is executed
```sh
./scripts/build-all-components.sh TARGET
```
where `TARGET` is a make target that is valid for each of the sub-components.
These include:
* `ubuntu18.04-amd64`
* `centos8-x86_64`
The packages are generated in the `dist` folder.
## Testing local changes
In oder to use the same build logic to be used to generate packages with local changes,
the location of the individual components can be overridded using the: `LIBNVIDIA_CONTAINER_ROOT`,
`NVIDIA_CONTAINER_TOOLKIT_ROOT`, `NVIDIA_CONTAINER_RUNTIME_ROOT`, and `NVIDIA_DOCKER_ROOT`
environment variables.
## Testing packages locally
The [test/release](./test/release/) folder contains documentation on how the installation of local or staged packages can be tested.
## Releasing
A utility script [`scripts/release.sh`](./scripts/release.sh) is provided to build
packages required for release. If run without arguments, all supported distribution-architecture combinations are built. A specific distribution-architecture pair can also be provided
```sh
./scripts/release.sh ubuntu18.04-amd64
```
where the `amd64` builds for `ubuntu18.04` are provided as an example.

142
Jenkinsfile vendored Normal file
View File

@@ -0,0 +1,142 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
podTemplate (cloud:'sw-gpu-cloudnative',
containers: [
containerTemplate(name: 'docker', image: 'docker:dind', ttyEnabled: true, privileged: true),
containerTemplate(name: 'golang', image: 'golang:1.16.3', ttyEnabled: true)
]) {
node(POD_LABEL) {
def scmInfo
stage('checkout') {
scmInfo = checkout(scm)
}
stage('dependencies') {
container('golang') {
sh 'GO111MODULE=off go get -u github.com/client9/misspell/cmd/misspell'
sh 'GO111MODULE=off go get -u github.com/gordonklaus/ineffassign'
sh 'GO111MODULE=off go get -u golang.org/x/lint/golint'
}
container('docker') {
sh 'apk add --no-cache make bash git'
}
}
stage('check') {
parallel (
getGolangStages(["assert-fmt", "lint", "vet", "ineffassign", "misspell"])
)
}
stage('test') {
parallel (
getGolangStages(["test"])
)
}
def versionInfo
stage('version') {
container('docker') {
versionInfo = getVersionInfo(scmInfo)
println "versionInfo=${versionInfo}"
}
}
def dist = 'ubuntu20.04'
def arch = 'amd64'
def stageLabel = "${dist}-${arch}"
stage('build-one') {
container('docker') {
stage (stageLabel) {
sh "make ${dist}-${arch}"
}
}
}
stage('release') {
container('docker') {
stage (stageLabel) {
def component = 'main'
def repository = 'sw-gpu-cloudnative-debian-local/pool/main/'
def uploadSpec = """{
"files":
[ {
"pattern": "./dist/${dist}/${arch}/*.deb",
"target": "${repository}",
"props": "deb.distribution=${dist};deb.component=${component};deb.architecture=${arch}"
}
]
}"""
sh "echo starting release with versionInfo=${versionInfo}"
if (versionInfo.isTag) {
// upload to artifactory repository
def server = Artifactory.server 'sw-gpu-artifactory'
server.upload spec: uploadSpec
} else {
sh "echo skipping release for non-tagged build"
}
}
}
}
}
}
def getGolangStages(def targets) {
stages = [:]
for (t in targets) {
stages[t] = getLintClosure(t)
}
return stages
}
def getLintClosure(def target) {
return {
container('golang') {
stage(target) {
sh "make ${target}"
}
}
}
}
// getVersionInfo returns a hash of version info
def getVersionInfo(def scmInfo) {
def versionInfo = [
isTag: isTag(scmInfo.GIT_BRANCH)
]
scmInfo.each { k, v -> versionInfo[k] = v }
return versionInfo
}
def isTag(def branch) {
if (!branch.startsWith('v')) {
return false
}
def version = shOutput('git describe --all --exact-match --always')
return version == "tags/${branch}"
}
def shOuptut(def script) {
return sh(script: script, returnStdout: true).trim()
}

202
LICENSE Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

210
Makefile
View File

@@ -1,87 +1,147 @@
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCKER ?= docker
MKDIR ?= mkdir
REGISTRY ?= nvidia/toolkit
DIST_DIR ?= $(CURDIR)/dist
GOLANG_VERSION := 1.10.3
VERSION := 1.0.5
DIST_DIR := $(CURDIR)/dist
LIB_NAME := nvidia-container-toolkit
LIB_VERSION := 1.6.0
LIB_TAG ?= rc.1
.NOTPARALLEL:
.PHONY: all
GOLANG_VERSION := 1.16.3
MODULE := github.com/NVIDIA/nvidia-container-toolkit
all: ubuntu18.04 ubuntu16.04 debian10 debian9 centos7 amzn2 amzn1 opensuse-leap15.1
# By default run all native docker-based targets
docker-native:
include $(CURDIR)/docker/docker.mk
push%:
docker push "$(REGISTRY)/ubuntu:$*"
ifeq ($(IMAGE_NAME),)
REGISTRY ?= nvidia
IMAGE_NAME = $(REGISTRY)/container-toolkit
endif
ubuntu%: ARCH := amd64
ubuntu%:
$(DOCKER) build --pull \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--build-arg VERSION_ID="$*" \
--build-arg PKG_VERS="$(VERSION)" \
--build-arg PKG_REV="1" \
--tag "$(REGISTRY)/ubuntu:$*" \
--file docker/Dockerfile.ubuntu .
$(MKDIR) -p $(DIST_DIR)/$@/$(ARCH)
$(DOCKER) run --cidfile $@.cid "$(REGISTRY)/ubuntu:$*"
$(DOCKER) cp $$(cat $@.cid):/dist/. $(DIST_DIR)/$@/$(ARCH)/
$(DOCKER) rm $$(cat $@.cid) && rm $@.cid
BUILDIMAGE_TAG ?= golang$(GOLANG_VERSION)
BUILDIMAGE ?= $(IMAGE_NAME)-build:$(BUILDIMAGE_TAG)
debian%: ARCH := amd64
debian%:
$(DOCKER) build --pull \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--build-arg VERSION_ID="$*" \
--build-arg PKG_VERS="$(VERSION)" \
--build-arg PKG_REV="1" \
--tag "$(REGISTRY)/debian:$*" \
--file docker/Dockerfile.debian .
$(MKDIR) -p $(DIST_DIR)/$@/$(ARCH)
$(DOCKER) run --cidfile $@.cid "$(REGISTRY)/debian:$*"
$(DOCKER) cp $$(cat $@.cid):/dist/. $(DIST_DIR)/$@/$(ARCH)/
$(DOCKER) rm $$(cat $@.cid) && rm $@.cid
EXAMPLES := $(patsubst ./examples/%/,%,$(sort $(dir $(wildcard ./examples/*/))))
EXAMPLE_TARGETS := $(patsubst %,example-%, $(EXAMPLES))
centos%: ARCH := x86_64
centos%:
$(DOCKER) build --pull \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--build-arg VERSION_ID="$*" \
--build-arg PKG_VERS="$(VERSION)" \
--build-arg PKG_REV="2" \
--tag "$(REGISTRY)/centos:$*" \
--file docker/Dockerfile.centos .
$(MKDIR) -p $(DIST_DIR)/$@/$(ARCH)
$(DOCKER) run --cidfile $@.cid "$(REGISTRY)/centos:$*"
$(DOCKER) cp $$(cat $@.cid):/dist/. $(DIST_DIR)/$@/$(ARCH)/
$(DOCKER) rm $$(cat $@.cid) && rm $@.cid
CMDS := $(patsubst ./cmd/%/,%,$(sort $(dir $(wildcard ./cmd/*/))))
CMD_TARGETS := $(patsubst %,cmd-%, $(CMDS))
amzn%: ARCH := x86_64
amzn%:
$(DOCKER) build --pull \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--build-arg VERSION_ID="$*" \
--build-arg PKG_VERS="$(VERSION)" \
--build-arg PKG_REV="2.amzn$*" \
--tag "$(REGISTRY)/amzn:$*" \
--file docker/Dockerfile.amzn .
$(MKDIR) -p $(DIST_DIR)/$@/$(ARCH)
$(DOCKER) run --cidfile $@.cid "$(REGISTRY)/amzn:$*"
$(DOCKER) cp $$(cat $@.cid):/dist/. $(DIST_DIR)/$@/$(ARCH)/
$(DOCKER) rm $$(cat $@.cid) && rm $@.cid
$(info CMD_TARGETS=$(CMD_TARGETS))
opensuse-leap%: ARCH := x86_64
opensuse-leap%:
$(DOCKER) build --pull \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--build-arg VERSION_ID="$*" \
--build-arg PKG_VERS="$(VERSION)" \
--build-arg PKG_REV="1" \
--tag "$(REGISTRY)/opensuse-leap:$*" \
--file docker/Dockerfile.opensuse-leap .
$(MKDIR) -p $(DIST_DIR)/$@/$(ARCH)
$(DOCKER) run --cidfile $@.cid "$(REGISTRY)/opensuse-leap:$*"
$(DOCKER) cp $$(cat $@.cid):/dist/. $(DIST_DIR)/$@/$(ARCH)/
$(DOCKER) rm $$(cat $@.cid) && rm $@.cid
CHECK_TARGETS := assert-fmt vet lint ineffassign misspell
MAKE_TARGETS := binaries build check fmt lint-internal test examples cmds coverage generate $(CHECK_TARGETS)
TARGETS := $(MAKE_TARGETS) $(EXAMPLE_TARGETS) $(CMD_TARGETS)
DOCKER_TARGETS := $(patsubst %,docker-%, $(TARGETS))
.PHONY: $(TARGETS) $(DOCKER_TARGETS)
GOOS ?= linux
binaries: cmds
ifneq ($(PREFIX),)
cmd-%: COMMAND_BUILD_OPTIONS = -o $(PREFIX)/$(*)
endif
cmds: $(CMD_TARGETS)
$(CMD_TARGETS): cmd-%:
GOOS=$(GOOS) go build -ldflags "-s -w" $(COMMAND_BUILD_OPTIONS) $(MODULE)/cmd/$(*)
build:
GOOS=$(GOOS) go build ./...
examples: $(EXAMPLE_TARGETS)
$(EXAMPLE_TARGETS): example-%:
GOOS=$(GOOS) go build ./examples/$(*)
all: check test build binary
check: $(CHECK_TARGETS)
# Apply go fmt to the codebase
fmt:
go list -f '{{.Dir}}' $(MODULE)/... \
| xargs gofmt -s -l -w
assert-fmt:
go list -f '{{.Dir}}' $(MODULE)/... \
| xargs gofmt -s -l > fmt.out
@if [ -s fmt.out ]; then \
echo "\nERROR: The following files are not formatted:\n"; \
cat fmt.out; \
rm fmt.out; \
exit 1; \
else \
rm fmt.out; \
fi
ineffassign:
ineffassign $(MODULE)/...
lint:
# We use `go list -f '{{.Dir}}' $(MODULE)/...` to skip the `vendor` folder.
go list -f '{{.Dir}}' $(MODULE)/... | grep -v /internal/ | xargs golint -set_exit_status
lint-internal:
# We use `go list -f '{{.Dir}}' $(MODULE)/...` to skip the `vendor` folder.
go list -f '{{.Dir}}' $(MODULE)/internal/... | xargs golint -set_exit_status
misspell:
misspell $(MODULE)/...
vet:
go vet $(MODULE)/...
COVERAGE_FILE := coverage.out
test: build cmds
go test -v -coverprofile=$(COVERAGE_FILE) $(MODULE)/...
coverage: test
cat $(COVERAGE_FILE) | grep -v "_mock.go" > $(COVERAGE_FILE).no-mocks
go tool cover -func=$(COVERAGE_FILE).no-mocks
generate:
go generate $(MODULE)/...
# Generate an image for containerized builds
# Note: This image is local only
.PHONY: .build-image .pull-build-image .push-build-image
.build-image: docker/Dockerfile.devel
if [ x"$(SKIP_IMAGE_BUILD)" = x"" ]; then \
$(DOCKER) build \
--progress=plain \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--tag $(BUILDIMAGE) \
-f $(^) \
docker; \
fi
.pull-build-image:
$(DOCKER) pull $(BUILDIMAGE)
.push-build-image:
$(DOCKER) push $(BUILDIMAGE)
$(DOCKER_TARGETS): docker-%: .build-image
@echo "Running 'make $(*)' in docker container $(BUILDIMAGE)"
$(DOCKER) run \
--rm \
-e GOCACHE=/tmp/.cache \
-v $(PWD):$(PWD) \
-w $(PWD) \
--user $$(id -u):$$(id -g) \
$(BUILDIMAGE) \
make $(*)

31
README.md Normal file
View File

@@ -0,0 +1,31 @@
# NVIDIA Container Toolkit
[![GitHub license](https://img.shields.io/github/license/NVIDIA/nvidia-container-toolkit?style=flat-square)](https://raw.githubusercontent.com/NVIDIA/nvidia-container-toolkit/master/LICENSE)
[![Documentation](https://img.shields.io/badge/documentation-wiki-blue.svg?style=flat-square)](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html)
[![Package repository](https://img.shields.io/badge/packages-repository-b956e8.svg?style=flat-square)](https://nvidia.github.io/libnvidia-container)
![nvidia-container-stack](https://cloud.githubusercontent.com/assets/3028125/12213714/5b208976-b632-11e5-8406-38d379ec46aa.png)
## Introduction
The NVIDIA Container Toolkit allows users to build and run GPU accelerated containers. The toolkit includes a container runtime [library](https://github.com/NVIDIA/libnvidia-container) and utilities to automatically configure containers to leverage NVIDIA GPUs.
Product documentation including an architecture overview, platform support, and installation and usage guides can be found in the [documentation repository](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/overview.html).
## Getting Started
**Make sure you have installed the [NVIDIA driver](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#nvidia-drivers) for your Linux Distribution**
**Note that you do not need to install the CUDA Toolkit on the host system, but the NVIDIA driver needs to be installed**
For instructions on getting started with the NVIDIA Container Toolkit, refer to the [installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installation-guide).
## Usage
The [user guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html) provides information on the configuration and command line options available when running GPU containers with Docker.
## Issues and Contributing
[Checkout the Contributing document!](CONTRIBUTING.md)
* Please let us know by [filing a new issue](https://github.com/NVIDIA/nvidia-container-toolkit/issues/new)
* You can contribute by creating a [merge request](https://gitlab.com/nvidia/container-toolkit/container-toolkit/-/merge_requests/new) to our public GitLab repository

View File

@@ -0,0 +1,79 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package main
import (
"fmt"
"io"
"os"
"github.com/sirupsen/logrus"
"github.com/tsaikd/KDGoLib/logrusutil"
)
// Logger adds a way to manage output to a log file to a logrus.Logger
type Logger struct {
*logrus.Logger
previousOutput io.Writer
logFile *os.File
}
// NewLogger constructs a Logger with a preddefined formatter
func NewLogger() *Logger {
logrusLogger := logrus.New()
formatter := &logrusutil.ConsoleLogFormatter{
TimestampFormat: "2006/01/02 15:04:07",
Flag: logrusutil.Ltime,
}
logger := &Logger{
Logger: logrusLogger,
}
logger.SetFormatter(formatter)
return logger
}
// LogToFile opens the specified file for appending and sets the logger to
// output to the opened file. A reference to the file pointer is stored to
// allow this to be closed.
func (l *Logger) LogToFile(filename string) error {
logFile, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("error opening debug log file: %v", err)
}
l.logFile = logFile
l.previousOutput = l.Out
l.SetOutput(logFile)
return nil
}
// CloseFile closes the log file (if any) and resets the logger output to what it
// was before LogToFile was called.
func (l *Logger) CloseFile() error {
if l.logFile == nil {
return nil
}
logFile := l.logFile
l.SetOutput(l.previousOutput)
l.logFile = nil
return logFile.Close()
}

View File

@@ -0,0 +1,89 @@
package main
import (
"fmt"
"os"
"path"
"github.com/pelletier/go-toml"
)
const (
configOverride = "XDG_CONFIG_HOME"
configFilePath = "nvidia-container-runtime/config.toml"
hookDefaultFilePath = "/usr/bin/nvidia-container-runtime-hook"
)
var (
configDir = "/etc/"
)
var logger = NewLogger()
func main() {
err := run(os.Args)
if err != nil {
logger.Errorf("Error running %v: %v", os.Args, err)
os.Exit(1)
}
}
// run is an entry point that allows for idiomatic handling of errors
// when calling from the main function.
func run(argv []string) (err error) {
cfg, err := getConfig()
if err != nil {
return fmt.Errorf("error loading config: %v", err)
}
err = logger.LogToFile(cfg.debugFilePath)
if err != nil {
return fmt.Errorf("error opening debug log file: %v", err)
}
defer func() {
// We capture and log a returning error before closing the log file.
if err != nil {
logger.Errorf("Error running %v: %v", argv, err)
}
logger.CloseFile()
}()
r, err := newRuntime(argv)
if err != nil {
return fmt.Errorf("error creating runtime: %v", err)
}
logger.Printf("Running %s\n", argv[0])
return r.Exec(argv)
}
type config struct {
debugFilePath string
}
// getConfig sets up the config struct. Values are read from a toml file
// or set via the environment.
func getConfig() (*config, error) {
cfg := &config{}
if XDGConfigDir := os.Getenv(configOverride); len(XDGConfigDir) != 0 {
configDir = XDGConfigDir
}
configFilePath := path.Join(configDir, configFilePath)
tomlContent, err := os.ReadFile(configFilePath)
if err != nil {
return nil, err
}
toml, err := toml.Load(string(tomlContent))
if err != nil {
return nil, err
}
cfg.debugFilePath = toml.GetDefault("nvidia-container-runtime.debug", "/dev/null").(string)
return cfg, nil
}

View File

@@ -0,0 +1,293 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/require"
)
const (
nvidiaRuntime = "nvidia-container-runtime"
nvidiaHook = "nvidia-container-runtime-hook"
bundlePathSuffix = "test/output/bundle/"
specFile = "config.json"
unmodifiedSpecFileSuffix = "test/input/test_spec.json"
)
type testConfig struct {
root string
binPath string
}
var cfg *testConfig
func TestMain(m *testing.M) {
// TEST SETUP
// Determine the module root and the test binary path
var err error
moduleRoot, err := getModuleRoot()
if err != nil {
logger.Fatalf("error in test setup: could not get module root: %v", err)
}
testBinPath := filepath.Join(moduleRoot, "test", "bin")
testInputPath := filepath.Join(moduleRoot, "test", "input")
// Set the environment variables for the test
os.Setenv("PATH", prependToPath(testBinPath, moduleRoot))
os.Setenv("XDG_CONFIG_HOME", testInputPath)
// Confirm that the environment is configured correctly
runcPath, err := exec.LookPath(runcExecutableName)
if err != nil || filepath.Join(testBinPath, runcExecutableName) != runcPath {
logger.Fatalf("error in test setup: mock runc path set incorrectly in TestMain(): %v", err)
}
hookPath, err := exec.LookPath(nvidiaHook)
if err != nil || filepath.Join(testBinPath, nvidiaHook) != hookPath {
logger.Fatalf("error in test setup: mock hook path set incorrectly in TestMain(): %v", err)
}
// Store the root and binary paths in the test Config
cfg = &testConfig{
root: moduleRoot,
binPath: testBinPath,
}
// RUN TESTS
exitCode := m.Run()
// TEST CLEANUP
os.Remove(specFile)
os.Exit(exitCode)
}
func getModuleRoot() (string, error) {
_, filename, _, _ := runtime.Caller(0)
return hasGoMod(filename)
}
func hasGoMod(dir string) (string, error) {
if dir == "" || dir == "/" {
return "", fmt.Errorf("module root not found")
}
_, err := os.Stat(filepath.Join(dir, "go.mod"))
if err != nil {
return hasGoMod(filepath.Dir(dir))
}
return dir, nil
}
func prependToPath(additionalPaths ...string) string {
paths := strings.Split(os.Getenv("PATH"), ":")
paths = append(additionalPaths, paths...)
return strings.Join(paths, ":")
}
// case 1) nvidia-container-runtime run --bundle
// case 2) nvidia-container-runtime create --bundle
// - Confirm the runtime handles bad input correctly
func TestBadInput(t *testing.T) {
err := cfg.generateNewRuntimeSpec()
if err != nil {
t.Fatal(err)
}
cmdRun := exec.Command(nvidiaRuntime, "run", "--bundle")
t.Logf("executing: %s\n", strings.Join(cmdRun.Args, " "))
output, err := cmdRun.CombinedOutput()
require.Errorf(t, err, "runtime should return an error", "output=%v", string(output))
cmdCreate := exec.Command(nvidiaRuntime, "create", "--bundle")
t.Logf("executing: %s\n", strings.Join(cmdCreate.Args, " "))
err = cmdCreate.Run()
require.Error(t, err, "runtime should return an error")
}
// case 1) nvidia-container-runtime run --bundle <bundle-name> <ctr-name>
// - Confirm the runtime runs with no errors
// case 2) nvidia-container-runtime create --bundle <bundle-name> <ctr-name>
// - Confirm the runtime inserts the NVIDIA prestart hook correctly
func TestGoodInput(t *testing.T) {
err := cfg.generateNewRuntimeSpec()
if err != nil {
t.Fatalf("error generating runtime spec: %v", err)
}
cmdRun := exec.Command(nvidiaRuntime, "run", "--bundle", cfg.bundlePath(), "testcontainer")
t.Logf("executing: %s\n", strings.Join(cmdRun.Args, " "))
output, err := cmdRun.CombinedOutput()
require.NoErrorf(t, err, "runtime should not return an error", "output=%v", string(output))
// Check config.json and confirm there are no hooks
spec, err := cfg.getRuntimeSpec()
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
require.Empty(t, spec.Hooks, "there should be no hooks in config.json")
cmdCreate := exec.Command(nvidiaRuntime, "create", "--bundle", cfg.bundlePath(), "testcontainer")
t.Logf("executing: %s\n", strings.Join(cmdCreate.Args, " "))
err = cmdCreate.Run()
require.NoError(t, err, "runtime should not return an error")
// Check config.json for NVIDIA prestart hook
spec, err = cfg.getRuntimeSpec()
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
require.NotEmpty(t, spec.Hooks, "there should be hooks in config.json")
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "exactly one nvidia prestart hook should be inserted correctly into config.json")
}
// NVIDIA prestart hook already present in config file
func TestDuplicateHook(t *testing.T) {
err := cfg.generateNewRuntimeSpec()
if err != nil {
t.Fatal(err)
}
var spec specs.Spec
spec, err = cfg.getRuntimeSpec()
if err != nil {
t.Fatal(err)
}
t.Logf("inserting nvidia prestart hook to config.json")
if err = addNVIDIAHook(&spec); err != nil {
t.Fatal(err)
}
jsonOutput, err := json.MarshalIndent(spec, "", "\t")
if err != nil {
t.Fatal(err)
}
jsonFile, err := os.OpenFile(cfg.specFilePath(), os.O_RDWR, 0644)
if err != nil {
t.Fatal(err)
}
_, err = jsonFile.WriteAt(jsonOutput, 0)
if err != nil {
t.Fatal(err)
}
// Test how runtime handles already existing prestart hook in config.json
cmdCreate := exec.Command(nvidiaRuntime, "create", "--bundle", cfg.bundlePath(), "testcontainer")
t.Logf("executing: %s\n", strings.Join(cmdCreate.Args, " "))
output, err := cmdCreate.CombinedOutput()
require.NoErrorf(t, err, "runtime should not return an error", "output=%v", string(output))
// Check config.json for NVIDIA prestart hook
spec, err = cfg.getRuntimeSpec()
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
require.NotEmpty(t, spec.Hooks, "there should be hooks in config.json")
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "exactly one nvidia prestart hook should be inserted correctly into config.json")
}
// addNVIDIAHook is a basic wrapper for nvidiaContainerRunime.addNVIDIAHook that is used for
// testing.
func addNVIDIAHook(spec *specs.Spec) error {
r := nvidiaContainerRuntime{logger: logger.Logger}
return r.addNVIDIAHook(spec)
}
func (c testConfig) getRuntimeSpec() (specs.Spec, error) {
filePath := c.specFilePath()
var spec specs.Spec
jsonFile, err := os.OpenFile(filePath, os.O_RDWR, 0644)
if err != nil {
return spec, err
}
defer jsonFile.Close()
jsonContent, err := ioutil.ReadAll(jsonFile)
if err != nil {
return spec, err
} else if json.Valid(jsonContent) {
err = json.Unmarshal(jsonContent, &spec)
if err != nil {
return spec, err
}
} else {
err = json.NewDecoder(bytes.NewReader(jsonContent)).Decode(&spec)
if err != nil {
return spec, err
}
}
return spec, err
}
func (c testConfig) bundlePath() string {
return filepath.Join(c.root, bundlePathSuffix)
}
func (c testConfig) specFilePath() string {
return filepath.Join(c.bundlePath(), specFile)
}
func (c testConfig) unmodifiedSpecFile() string {
return filepath.Join(c.root, unmodifiedSpecFileSuffix)
}
func (c testConfig) generateNewRuntimeSpec() error {
var err error
err = os.MkdirAll(c.bundlePath(), 0755)
if err != nil {
return err
}
cmd := exec.Command("cp", c.unmodifiedSpecFile(), c.specFilePath())
err = cmd.Run()
if err != nil {
return err
}
return nil
}
// Return number of valid NVIDIA prestart hooks in runtime spec
func nvidiaHookCount(hooks *specs.Hooks) int {
if hooks == nil {
return 0
}
count := 0
for _, hook := range hooks.Prestart {
if strings.Contains(hook.Path, nvidiaHook) {
count++
}
}
return count
}
func TestGetConfigWithCustomConfig(t *testing.T) {
wd, err := os.Getwd()
require.NoError(t, err)
// By default debug is disabled
contents := []byte("[nvidia-container-runtime]\ndebug = \"/nvidia-container-toolkit.log\"")
testDir := filepath.Join(wd, "test")
filename := filepath.Join(testDir, configFilePath)
os.Setenv(configOverride, testDir)
require.NoError(t, os.MkdirAll(filepath.Dir(filename), 0766))
require.NoError(t, ioutil.WriteFile(filename, contents, 0766))
defer func() { require.NoError(t, os.RemoveAll(testDir)) }()
cfg, err := getConfig()
require.NoError(t, err)
require.Equal(t, cfg.debugFilePath, "/nvidia-container-toolkit.log")
}

View File

@@ -0,0 +1,145 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package main
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
"github.com/opencontainers/runtime-spec/specs-go"
log "github.com/sirupsen/logrus"
)
// nvidiaContainerRuntime encapsulates the NVIDIA Container Runtime. It wraps the specified runtime, conditionally
// modifying the specified OCI specification before invoking the runtime.
type nvidiaContainerRuntime struct {
logger *log.Logger
runtime oci.Runtime
ociSpec oci.Spec
}
var _ oci.Runtime = (*nvidiaContainerRuntime)(nil)
// newNvidiaContainerRuntime is a constructor for a standard runtime shim.
func newNvidiaContainerRuntimeWithLogger(logger *log.Logger, runtime oci.Runtime, ociSpec oci.Spec) (oci.Runtime, error) {
r := nvidiaContainerRuntime{
logger: logger,
runtime: runtime,
ociSpec: ociSpec,
}
return &r, nil
}
// Exec defines the entrypoint for the NVIDIA Container Runtime. A check is performed to see whether modifications
// to the OCI spec are required -- and applicable modifcations applied. The supplied arguments are then
// forwarded to the underlying runtime's Exec method.
func (r nvidiaContainerRuntime) Exec(args []string) error {
if r.modificationRequired(args) {
err := r.modifyOCISpec()
if err != nil {
return fmt.Errorf("error modifying OCI spec: %v", err)
}
}
r.logger.Println("Forwarding command to runtime")
return r.runtime.Exec(args)
}
// modificationRequired checks the intput arguments to determine whether a modification
// to the OCI spec is required.
func (r nvidiaContainerRuntime) modificationRequired(args []string) bool {
var previousWasBundle bool
for _, a := range args {
// We check for '--bundle create' explicitly to ensure that we
// don't inadvertently trigger a modification if the bundle directory
// is specified as `create`
if !previousWasBundle && isBundleFlag(a) {
previousWasBundle = true
continue
}
if !previousWasBundle && a == "create" {
r.logger.Infof("'create' command detected; modification required")
return true
}
previousWasBundle = false
}
r.logger.Infof("No modification required")
return false
}
// modifyOCISpec loads and modifies the OCI spec specified in the nvidiaContainerRuntime
// struct. The spec is modified in-place and written to the same file as the input after
// modifcationas are applied.
func (r nvidiaContainerRuntime) modifyOCISpec() error {
err := r.ociSpec.Load()
if err != nil {
return fmt.Errorf("error loading OCI specification for modification: %v", err)
}
err = r.ociSpec.Modify(r.addNVIDIAHook)
if err != nil {
return fmt.Errorf("error injecting NVIDIA Container Runtime hook: %v", err)
}
err = r.ociSpec.Flush()
if err != nil {
return fmt.Errorf("error writing modified OCI specification: %v", err)
}
return nil
}
// addNVIDIAHook modifies the specified OCI specification in-place, inserting a
// prestart hook.
func (r nvidiaContainerRuntime) addNVIDIAHook(spec *specs.Spec) error {
path, err := exec.LookPath("nvidia-container-runtime-hook")
if err != nil {
path = hookDefaultFilePath
_, err = os.Stat(path)
if err != nil {
return err
}
}
r.logger.Printf("prestart hook path: %s\n", path)
args := []string{path}
if spec.Hooks == nil {
spec.Hooks = &specs.Hooks{}
} else if len(spec.Hooks.Prestart) != 0 {
for _, hook := range spec.Hooks.Prestart {
if !strings.Contains(hook.Path, "nvidia-container-runtime-hook") {
continue
}
r.logger.Println("existing nvidia prestart hook in OCI spec file")
return nil
}
}
spec.Hooks.Prestart = append(spec.Hooks.Prestart, specs.Hook{
Path: path,
Args: append(args, "prestart"),
})
return nil
}

View File

@@ -0,0 +1,230 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package main
import (
"fmt"
"os"
"strings"
"testing"
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
"github.com/opencontainers/runtime-spec/specs-go"
testlog "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
)
func TestArgsGetConfigFilePath(t *testing.T) {
wd, err := os.Getwd()
require.NoError(t, err)
testCases := []struct {
bundleDir string
ociSpecPath string
}{
{
ociSpecPath: fmt.Sprintf("%v/config.json", wd),
},
{
bundleDir: "/foo/bar",
ociSpecPath: "/foo/bar/config.json",
},
{
bundleDir: "/foo/bar/",
ociSpecPath: "/foo/bar/config.json",
},
}
for i, tc := range testCases {
cp, err := getOCISpecFilePath(tc.bundleDir)
require.NoErrorf(t, err, "%d: %v", i, tc)
require.Equalf(t, tc.ociSpecPath, cp, "%d: %v", i, tc)
}
}
func TestAddNvidiaHook(t *testing.T) {
logger, logHook := testlog.NewNullLogger()
shim := nvidiaContainerRuntime{
logger: logger,
}
testCases := []struct {
spec *specs.Spec
errorPrefix string
shouldNotAdd bool
}{
{
spec: &specs.Spec{},
},
{
spec: &specs.Spec{
Hooks: &specs.Hooks{},
},
},
{
spec: &specs.Spec{
Hooks: &specs.Hooks{
Prestart: []specs.Hook{{
Path: "some-hook",
}},
},
},
},
{
spec: &specs.Spec{
Hooks: &specs.Hooks{
Prestart: []specs.Hook{{
Path: "nvidia-container-runtime-hook",
}},
},
},
shouldNotAdd: true,
},
}
for i, tc := range testCases {
logHook.Reset()
var numPrestartHooks int
if tc.spec.Hooks != nil {
numPrestartHooks = len(tc.spec.Hooks.Prestart)
}
err := shim.addNVIDIAHook(tc.spec)
if tc.errorPrefix == "" {
require.NoErrorf(t, err, "%d: %v", i, tc)
} else {
require.Truef(t, strings.HasPrefix(err.Error(), tc.errorPrefix), "%d: %v", i, tc)
require.NotNilf(t, tc.spec.Hooks, "%d: %v", i, tc)
require.Equalf(t, 1, nvidiaHookCount(tc.spec.Hooks), "%d: %v", i, tc)
if tc.shouldNotAdd {
require.Equal(t, numPrestartHooks+1, len(tc.spec.Hooks.Poststart), "%d: %v", i, tc)
} else {
require.Equal(t, numPrestartHooks+1, len(tc.spec.Hooks.Poststart), "%d: %v", i, tc)
nvidiaHook := tc.spec.Hooks.Poststart[len(tc.spec.Hooks.Poststart)-1]
// TODO: This assumes that the hook has been set up in the makefile
expectedPath := "/usr/bin/nvidia-container-runtime-hook"
require.Equalf(t, expectedPath, nvidiaHook.Path, "%d: %v", i, tc)
require.Equalf(t, []string{expectedPath, "prestart"}, nvidiaHook.Args, "%d: %v", i, tc)
require.Emptyf(t, nvidiaHook.Env, "%d: %v", i, tc)
require.Nilf(t, nvidiaHook.Timeout, "%d: %v", i, tc)
}
}
}
}
func TestNvidiaContainerRuntime(t *testing.T) {
logger, hook := testlog.NewNullLogger()
testCases := []struct {
shim nvidiaContainerRuntime
shouldModify bool
args []string
modifyError error
writeError error
}{
{
shim: nvidiaContainerRuntime{},
shouldModify: false,
},
{
shim: nvidiaContainerRuntime{},
args: []string{"create"},
shouldModify: true,
},
{
shim: nvidiaContainerRuntime{},
args: []string{"--bundle=create"},
shouldModify: false,
},
{
shim: nvidiaContainerRuntime{},
args: []string{"--bundle", "create"},
shouldModify: false,
},
{
shim: nvidiaContainerRuntime{},
args: []string{"create"},
shouldModify: true,
},
{
shim: nvidiaContainerRuntime{},
args: []string{"create"},
modifyError: fmt.Errorf("error modifying"),
shouldModify: true,
},
{
shim: nvidiaContainerRuntime{},
args: []string{"create"},
writeError: fmt.Errorf("error writing"),
shouldModify: true,
},
}
for i, tc := range testCases {
tc.shim.logger = logger
hook.Reset()
spec := &specs.Spec{}
ociMock := oci.NewMockSpec(spec, tc.writeError, tc.modifyError)
require.Equal(t, tc.shouldModify, tc.shim.modificationRequired(tc.args), "%d: %v", i, tc)
tc.shim.ociSpec = ociMock
tc.shim.runtime = &MockShim{}
err := tc.shim.Exec(tc.args)
if tc.modifyError != nil || tc.writeError != nil {
require.Error(t, err, "%d: %v", i, tc)
} else {
require.NoError(t, err, "%d: %v", i, tc)
}
if tc.shouldModify {
require.Equal(t, 1, ociMock.MockModify.Callcount, "%d: %v", i, tc)
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "%d: %v", i, tc)
} else {
require.Equal(t, 0, ociMock.MockModify.Callcount, "%d: %v", i, tc)
require.Nil(t, spec.Hooks, "%d: %v", i, tc)
}
writeExpected := tc.shouldModify && tc.modifyError == nil
if writeExpected {
require.Equal(t, 1, ociMock.MockFlush.Callcount, "%d: %v", i, tc)
} else {
require.Equal(t, 0, ociMock.MockFlush.Callcount, "%d: %v", i, tc)
}
}
}
type MockShim struct {
called bool
args []string
returnError error
}
func (m *MockShim) Exec(args []string) error {
m.called = true
m.args = args
return m.returnError
}

View File

@@ -0,0 +1,176 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
)
const (
ociSpecFileName = "config.json"
dockerRuncExecutableName = "docker-runc"
runcExecutableName = "runc"
)
// newRuntime is a factory method that constructs a runtime based on the selected configuration.
func newRuntime(argv []string) (oci.Runtime, error) {
ociSpec, err := newOCISpec(argv)
if err != nil {
return nil, fmt.Errorf("error constructing OCI specification: %v", err)
}
runc, err := newRuncRuntime()
if err != nil {
return nil, fmt.Errorf("error constructing runc runtime: %v", err)
}
r, err := newNvidiaContainerRuntimeWithLogger(logger.Logger, runc, ociSpec)
if err != nil {
return nil, fmt.Errorf("error constructing NVIDIA Container Runtime: %v", err)
}
return r, nil
}
// newOCISpec constructs an OCI spec for the provided arguments
func newOCISpec(argv []string) (oci.Spec, error) {
bundlePath, err := getBundlePath(argv)
if err != nil {
return nil, fmt.Errorf("error parsing command line arguments: %v", err)
}
ociSpecPath, err := getOCISpecFilePath(bundlePath)
if err != nil {
return nil, fmt.Errorf("error getting OCI specification file path: %v", err)
}
ociSpec := oci.NewSpecFromFile(ociSpecPath)
return ociSpec, nil
}
// newRuncRuntime locates the runc binary and wraps it in a SyscallExecRuntime
func newRuncRuntime() (oci.Runtime, error) {
runtimePath, err := findRunc()
if err != nil {
return nil, fmt.Errorf("error locating runtime: %v", err)
}
runc, err := oci.NewSyscallExecRuntimeWithLogger(logger.Logger, runtimePath)
if err != nil {
return nil, fmt.Errorf("error constructing runtime: %v", err)
}
return runc, nil
}
// getBundlePath checks the specified slice of strings (argv) for a 'bundle' flag as allowed by runc.
// The following are supported:
// --bundle{{SEP}}BUNDLE_PATH
// -bundle{{SEP}}BUNDLE_PATH
// -b{{SEP}}BUNDLE_PATH
// where {{SEP}} is either ' ' or '='
func getBundlePath(argv []string) (string, error) {
var bundlePath string
for i := 0; i < len(argv); i++ {
param := argv[i]
parts := strings.SplitN(param, "=", 2)
if !isBundleFlag(parts[0]) {
continue
}
// The flag has the format --bundle=/path
if len(parts) == 2 {
bundlePath = parts[1]
continue
}
// The flag has the format --bundle /path
if i+1 < len(argv) {
bundlePath = argv[i+1]
i++
continue
}
// --bundle / -b was the last element of argv
return "", fmt.Errorf("bundle option requires an argument")
}
return bundlePath, nil
}
// findRunc locates runc in the path, returning the full path to the
// binary or an error.
func findRunc() (string, error) {
runtimeCandidates := []string{
dockerRuncExecutableName,
runcExecutableName,
}
return findRuntime(runtimeCandidates)
}
func findRuntime(runtimeCandidates []string) (string, error) {
for _, candidate := range runtimeCandidates {
logger.Infof("Looking for runtime binary '%v'", candidate)
runcPath, err := exec.LookPath(candidate)
if err == nil {
logger.Infof("Found runtime binary '%v'", runcPath)
return runcPath, nil
}
logger.Warnf("Runtime binary '%v' not found: %v", candidate, err)
}
return "", fmt.Errorf("no runtime binary found from candidate list: %v", runtimeCandidates)
}
func isBundleFlag(arg string) bool {
if !strings.HasPrefix(arg, "-") {
return false
}
trimmed := strings.TrimLeft(arg, "-")
return trimmed == "b" || trimmed == "bundle"
}
// getOCISpecFilePath returns the expected path to the OCI specification file for the given
// bundle directory or the current working directory if not specified.
func getOCISpecFilePath(bundleDir string) (string, error) {
if bundleDir == "" {
logger.Infof("Bundle directory path is empty, using working directory.")
workingDirectory, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error getting working directory: %v", err)
}
bundleDir = workingDirectory
}
logger.Infof("Using bundle directory: %v", bundleDir)
OCISpecFilePath := filepath.Join(bundleDir, ociSpecFileName)
logger.Infof("Using OCI specification file path: %v", OCISpecFilePath)
return OCISpecFilePath, nil
}

View File

@@ -0,0 +1,192 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package main
import (
"path/filepath"
"testing"
testlog "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
)
func TestConstructor(t *testing.T) {
shim, err := newRuntime([]string{})
require.NoError(t, err)
require.NotNil(t, shim)
}
func TestGetBundlePath(t *testing.T) {
type expected struct {
bundle string
isError bool
}
testCases := []struct {
argv []string
expected expected
}{
{
argv: []string{},
},
{
argv: []string{"create"},
},
{
argv: []string{"--bundle"},
expected: expected{
isError: true,
},
},
{
argv: []string{"-b"},
expected: expected{
isError: true,
},
},
{
argv: []string{"--bundle", "/foo/bar"},
expected: expected{
bundle: "/foo/bar",
},
},
{
argv: []string{"--not-bundle", "/foo/bar"},
},
{
argv: []string{"--"},
},
{
argv: []string{"-bundle", "/foo/bar"},
expected: expected{
bundle: "/foo/bar",
},
},
{
argv: []string{"--bundle=/foo/bar"},
expected: expected{
bundle: "/foo/bar",
},
},
{
argv: []string{"-b=/foo/bar"},
expected: expected{
bundle: "/foo/bar",
},
},
{
argv: []string{"-b=/foo/=bar"},
expected: expected{
bundle: "/foo/=bar",
},
},
{
argv: []string{"-b", "/foo/bar"},
expected: expected{
bundle: "/foo/bar",
},
},
{
argv: []string{"create", "-b", "/foo/bar"},
expected: expected{
bundle: "/foo/bar",
},
},
{
argv: []string{"-b", "create", "create"},
expected: expected{
bundle: "create",
},
},
{
argv: []string{"-b=create", "create"},
expected: expected{
bundle: "create",
},
},
{
argv: []string{"-b", "create"},
expected: expected{
bundle: "create",
},
},
}
for i, tc := range testCases {
bundle, err := getBundlePath(tc.argv)
if tc.expected.isError {
require.Errorf(t, err, "%d: %v", i, tc)
} else {
require.NoErrorf(t, err, "%d: %v", i, tc)
}
require.Equalf(t, tc.expected.bundle, bundle, "%d: %v", i, tc)
}
}
func TestFindRunc(t *testing.T) {
testLogger, _ := testlog.NewNullLogger()
logger.Logger = testLogger
runcPath, err := findRunc()
require.NoError(t, err)
require.Equal(t, filepath.Join(cfg.binPath, runcExecutableName), runcPath)
}
func TestFindRuntime(t *testing.T) {
testLogger, _ := testlog.NewNullLogger()
logger.Logger = testLogger
testCases := []struct {
candidates []string
expectedPath string
}{
{
candidates: []string{},
},
{
candidates: []string{"not-runc"},
},
{
candidates: []string{"not-runc", "also-not-runc"},
},
{
candidates: []string{runcExecutableName},
expectedPath: filepath.Join(cfg.binPath, runcExecutableName),
},
{
candidates: []string{runcExecutableName, "not-runc"},
expectedPath: filepath.Join(cfg.binPath, runcExecutableName),
},
{
candidates: []string{"not-runc", runcExecutableName},
expectedPath: filepath.Join(cfg.binPath, runcExecutableName),
},
}
for i, tc := range testCases {
runcPath, err := findRuntime(tc.candidates)
if tc.expectedPath == "" {
require.Error(t, err, "%d: %v", i, tc)
} else {
require.NoError(t, err, "%d: %v", i, tc)
}
require.Equal(t, tc.expectedPath, runcPath, "%d: %v", i, tc)
}
}

View File

@@ -18,6 +18,8 @@ func capabilityToCLI(cap string) string {
return "--video"
case "display":
return "--display"
case "ngx":
return "--ngx"
default:
log.Panicln("unknown driver capability:", cap)
}

View File

@@ -0,0 +1,435 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"golang.org/x/mod/semver"
)
var envSwarmGPU *string
const (
envCUDAVersion = "CUDA_VERSION"
envNVRequirePrefix = "NVIDIA_REQUIRE_"
envNVRequireCUDA = envNVRequirePrefix + "CUDA"
envNVDisableRequire = "NVIDIA_DISABLE_REQUIRE"
envNVVisibleDevices = "NVIDIA_VISIBLE_DEVICES"
envNVMigConfigDevices = "NVIDIA_MIG_CONFIG_DEVICES"
envNVMigMonitorDevices = "NVIDIA_MIG_MONITOR_DEVICES"
envNVDriverCapabilities = "NVIDIA_DRIVER_CAPABILITIES"
)
const (
allDriverCapabilities = "compute,compat32,graphics,utility,video,display,ngx"
defaultDriverCapabilities = "utility,compute"
)
const (
capSysAdmin = "CAP_SYS_ADMIN"
)
const (
deviceListAsVolumeMountsRoot = "/var/run/nvidia-container-devices"
)
type nvidiaConfig struct {
Devices string
MigConfigDevices string
MigMonitorDevices string
DriverCapabilities string
Requirements []string
DisableRequire bool
}
type containerConfig struct {
Pid int
Rootfs string
Env map[string]string
Nvidia *nvidiaConfig
}
// Root from OCI runtime spec
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L94-L100
type Root struct {
Path string `json:"path"`
}
// Process from OCI runtime spec
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L30-L57
type Process struct {
Env []string `json:"env,omitempty"`
Capabilities *json.RawMessage `json:"capabilities,omitempty" platform:"linux"`
}
// LinuxCapabilities from OCI runtime spec
// https://github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L61
type LinuxCapabilities struct {
Bounding []string `json:"bounding,omitempty" platform:"linux"`
Effective []string `json:"effective,omitempty" platform:"linux"`
Inheritable []string `json:"inheritable,omitempty" platform:"linux"`
Permitted []string `json:"permitted,omitempty" platform:"linux"`
Ambient []string `json:"ambient,omitempty" platform:"linux"`
}
// Mount from OCI runtime spec
// https://github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L103
type Mount struct {
Destination string `json:"destination"`
Type string `json:"type,omitempty" platform:"linux,solaris"`
Source string `json:"source,omitempty"`
Options []string `json:"options,omitempty"`
}
// Spec from OCI runtime spec
// We use pointers to structs, similarly to the latest version of runtime-spec:
// https://github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L5-L28
type Spec struct {
Version *string `json:"ociVersion"`
Process *Process `json:"process,omitempty"`
Root *Root `json:"root,omitempty"`
Mounts []Mount `json:"mounts,omitempty"`
}
// HookState holds state information about the hook
type HookState struct {
Pid int `json:"pid,omitempty"`
// After 17.06, runc is using the runtime spec:
// github.com/docker/runc/blob/17.06/libcontainer/configs/config.go#L262-L263
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/state.go#L3-L17
Bundle string `json:"bundle"`
// Before 17.06, runc used a custom struct that didn't conform to the spec:
// github.com/docker/runc/blob/17.03.x/libcontainer/configs/config.go#L245-L252
BundlePath string `json:"bundlePath"`
}
func parseCudaVersion(cudaVersion string) (vmaj, vmin, vpatch uint32) {
if _, err := fmt.Sscanf(cudaVersion, "%d.%d.%d\n", &vmaj, &vmin, &vpatch); err != nil {
vpatch = 0
if _, err := fmt.Sscanf(cudaVersion, "%d.%d\n", &vmaj, &vmin); err != nil {
vmin = 0
if _, err := fmt.Sscanf(cudaVersion, "%d\n", &vmaj); err != nil {
log.Panicln("invalid CUDA version:", cudaVersion)
}
}
}
return
}
func getEnvMap(e []string) (m map[string]string) {
m = make(map[string]string)
for _, s := range e {
p := strings.SplitN(s, "=", 2)
if len(p) != 2 {
log.Panicln("environment error")
}
m[p[0]] = p[1]
}
return
}
func loadSpec(path string) (spec *Spec) {
f, err := os.Open(path)
if err != nil {
log.Panicln("could not open OCI spec:", err)
}
defer f.Close()
if err = json.NewDecoder(f).Decode(&spec); err != nil {
log.Panicln("could not decode OCI spec:", err)
}
if spec.Version == nil {
log.Panicln("Version is empty in OCI spec")
}
if spec.Process == nil {
log.Panicln("Process is empty in OCI spec")
}
if spec.Root == nil {
log.Panicln("Root is empty in OCI spec")
}
return
}
func isPrivileged(s *Spec) bool {
if s.Process.Capabilities == nil {
return false
}
var caps []string
// If v1.1.0-rc1 <= OCI version < v1.0.0-rc5 parse s.Process.Capabilities as:
// github.com/opencontainers/runtime-spec/blob/v1.0.0-rc1/specs-go/config.go#L30-L54
rc1cmp := semver.Compare("v"+*s.Version, "v1.0.0-rc1")
rc5cmp := semver.Compare("v"+*s.Version, "v1.0.0-rc5")
if (rc1cmp == 1 || rc1cmp == 0) && (rc5cmp == -1) {
err := json.Unmarshal(*s.Process.Capabilities, &caps)
if err != nil {
log.Panicln("could not decode Process.Capabilities in OCI spec:", err)
}
// Otherwise, parse s.Process.Capabilities as:
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L30-L54
} else {
var lc LinuxCapabilities
err := json.Unmarshal(*s.Process.Capabilities, &lc)
if err != nil {
log.Panicln("could not decode Process.Capabilities in OCI spec:", err)
}
// We only make sure that the bounding capabibility set has
// CAP_SYS_ADMIN. This allows us to make sure that the container was
// actually started as '--privileged', but also allow non-root users to
// access the privileged NVIDIA capabilities.
caps = lc.Bounding
}
for _, c := range caps {
if c == capSysAdmin {
return true
}
}
return false
}
func isLegacyCUDAImage(env map[string]string) bool {
legacyCudaVersion := env[envCUDAVersion]
cudaRequire := env[envNVRequireCUDA]
return len(legacyCudaVersion) > 0 && len(cudaRequire) == 0
}
func getDevicesFromEnvvar(env map[string]string, legacyImage bool) *string {
// Build a list of envvars to consider.
envVars := []string{envNVVisibleDevices}
if envSwarmGPU != nil {
// The Swarm envvar has higher precedence.
envVars = append([]string{*envSwarmGPU}, envVars...)
}
// Grab a reference to devices from the first envvar
// in the list that actually exists in the environment.
var devices *string
for _, envVar := range envVars {
if devs, ok := env[envVar]; ok {
devices = &devs
break
}
}
// Environment variable unset with legacy image: default to "all".
if devices == nil && legacyImage {
all := "all"
return &all
}
// Environment variable unset or empty or "void": return nil
if devices == nil || len(*devices) == 0 || *devices == "void" {
return nil
}
// Environment variable set to "none": reset to "".
if *devices == "none" {
empty := ""
return &empty
}
// Any other value.
return devices
}
func getDevicesFromMounts(mounts []Mount) *string {
var devices []string
for _, m := range mounts {
root := filepath.Clean(deviceListAsVolumeMountsRoot)
source := filepath.Clean(m.Source)
destination := filepath.Clean(m.Destination)
// Only consider mounts who's host volume is /dev/null
if source != "/dev/null" {
continue
}
// Only consider container mount points that begin with 'root'
if len(destination) < len(root) {
continue
}
if destination[:len(root)] != root {
continue
}
// Grab the full path beyond 'root' and add it to the list of devices
device := destination[len(root):]
if len(device) > 0 && device[0] == '/' {
device = device[1:]
}
if len(device) == 0 {
continue
}
devices = append(devices, device)
}
if devices == nil {
return nil
}
ret := strings.Join(devices, ",")
return &ret
}
func getDevices(hookConfig *HookConfig, env map[string]string, mounts []Mount, privileged bool, legacyImage bool) *string {
// If enabled, try and get the device list from volume mounts first
if hookConfig.AcceptDeviceListAsVolumeMounts {
devices := getDevicesFromMounts(mounts)
if devices != nil {
return devices
}
}
// Fallback to reading from the environment variable if privileges are correct
devices := getDevicesFromEnvvar(env, legacyImage)
if devices == nil {
return nil
}
if privileged || hookConfig.AcceptEnvvarUnprivileged {
return devices
}
configName := hookConfig.getConfigOption("AcceptEnvvarUnprivileged")
log.Printf("Ignoring devices specified in NVIDIA_VISIBLE_DEVICES (privileged=%v, %v=%v) ", privileged, configName, hookConfig.AcceptEnvvarUnprivileged)
return nil
}
func getMigConfigDevices(env map[string]string) *string {
if devices, ok := env[envNVMigConfigDevices]; ok {
return &devices
}
return nil
}
func getMigMonitorDevices(env map[string]string) *string {
if devices, ok := env[envNVMigMonitorDevices]; ok {
return &devices
}
return nil
}
func getDriverCapabilities(env map[string]string, legacyImage bool) *string {
// Grab a reference to the capabilities from the envvar
// if it actually exists in the environment.
var capabilities *string
if caps, ok := env[envNVDriverCapabilities]; ok {
capabilities = &caps
}
// Environment variable unset with legacy image: set all capabilities.
if capabilities == nil && legacyImage {
allCaps := allDriverCapabilities
return &allCaps
}
// Environment variable unset or set but empty: set default capabilities.
if capabilities == nil || len(*capabilities) == 0 {
defaultCaps := defaultDriverCapabilities
return &defaultCaps
}
// Environment variable set to "all": set all capabilities.
if *capabilities == "all" {
allCaps := allDriverCapabilities
return &allCaps
}
// Any other value
return capabilities
}
func getRequirements(env map[string]string, legacyImage bool) []string {
// All variables with the "NVIDIA_REQUIRE_" prefix are passed to nvidia-container-cli
var requirements []string
for name, value := range env {
if strings.HasPrefix(name, envNVRequirePrefix) {
requirements = append(requirements, value)
}
}
if legacyImage {
vmaj, vmin, _ := parseCudaVersion(env[envCUDAVersion])
cudaRequire := fmt.Sprintf("cuda>=%d.%d", vmaj, vmin)
requirements = append(requirements, cudaRequire)
}
return requirements
}
func getNvidiaConfig(hookConfig *HookConfig, env map[string]string, mounts []Mount, privileged bool) *nvidiaConfig {
legacyImage := isLegacyCUDAImage(env)
var devices string
if d := getDevices(hookConfig, env, mounts, privileged, legacyImage); d != nil {
devices = *d
} else {
// 'nil' devices means this is not a GPU container.
return nil
}
var migConfigDevices string
if d := getMigConfigDevices(env); d != nil {
migConfigDevices = *d
}
if !privileged && migConfigDevices != "" {
log.Panicln("cannot set MIG_CONFIG_DEVICES in non privileged container")
}
var migMonitorDevices string
if d := getMigMonitorDevices(env); d != nil {
migMonitorDevices = *d
}
if !privileged && migMonitorDevices != "" {
log.Panicln("cannot set MIG_MONITOR_DEVICES in non privileged container")
}
var driverCapabilities string
if c := getDriverCapabilities(env, legacyImage); c != nil {
driverCapabilities = *c
}
requirements := getRequirements(env, legacyImage)
// Don't fail on invalid values.
disableRequire, _ := strconv.ParseBool(env[envNVDisableRequire])
return &nvidiaConfig{
Devices: devices,
MigConfigDevices: migConfigDevices,
MigMonitorDevices: migMonitorDevices,
DriverCapabilities: driverCapabilities,
Requirements: requirements,
DisableRequire: disableRequire,
}
}
func getContainerConfig(hook HookConfig) (config containerConfig) {
var h HookState
d := json.NewDecoder(os.Stdin)
if err := d.Decode(&h); err != nil {
log.Panicln("could not decode container state:", err)
}
b := h.Bundle
if len(b) == 0 {
b = h.BundlePath
}
s := loadSpec(path.Join(b, "config.json"))
env := getEnvMap(s.Process.Env)
privileged := isPrivileged(s)
envSwarmGPU = hook.SwarmResource
return containerConfig{
Pid: h.Pid,
Rootfs: s.Root.Path,
Env: env,
Nvidia: getNvidiaConfig(&hook, env, s.Mounts, privileged),
}
}

View File

@@ -0,0 +1,824 @@
package main
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
func TestGetNvidiaConfig(t *testing.T) {
var tests = []struct {
description string
env map[string]string
privileged bool
expectedConfig *nvidiaConfig
expectedPanic bool
}{
{
description: "No environment, unprivileged",
env: map[string]string{},
privileged: false,
expectedConfig: nil,
},
{
description: "No environment, privileged",
env: map[string]string{},
privileged: true,
expectedConfig: nil,
},
{
description: "Legacy image, no devices, no capabilities, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "all",
DriverCapabilities: allDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices 'all', no capabilities, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "all",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "all",
DriverCapabilities: allDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices 'empty', no capabilities, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "",
},
privileged: false,
expectedConfig: nil,
},
{
description: "Legacy image, devices 'void', no capabilities, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "",
},
privileged: false,
expectedConfig: nil,
},
{
description: "Legacy image, devices 'none', no capabilities, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "none",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "",
DriverCapabilities: allDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices set, no capabilities, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "gpu0,gpu1",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: allDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices set, capabilities 'empty', no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices set, capabilities 'all', no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "all",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: allDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices set, capabilities set, no requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "cap0,cap1",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: "cap0,cap1",
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices set, capabilities set, requirements set",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "cap0,cap1",
envNVRequirePrefix + "REQ0": "req0=true",
envNVRequirePrefix + "REQ1": "req1=false",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: "cap0,cap1",
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
DisableRequire: false,
},
},
{
description: "Legacy image, devices set, capabilities set, requirements set, disable requirements",
env: map[string]string{
envCUDAVersion: "9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "cap0,cap1",
envNVRequirePrefix + "REQ0": "req0=true",
envNVRequirePrefix + "REQ1": "req1=false",
envNVDisableRequire: "true",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: "cap0,cap1",
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
DisableRequire: true,
},
},
{
description: "Modern image, no devices, no capabilities, no requirements, no envCUDAVersion",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
},
privileged: false,
expectedConfig: nil,
},
{
description: "Modern image, no devices, no capabilities, no requirement, envCUDAVersion set",
env: map[string]string{
envCUDAVersion: "9.0",
envNVRequireCUDA: "cuda>=9.0",
},
privileged: false,
expectedConfig: nil,
},
{
description: "Modern image, devices 'all', no capabilities, no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "all",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "all",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices 'empty', no capabilities, no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "",
},
privileged: false,
expectedConfig: nil,
},
{
description: "Modern image, devices 'void', no capabilities, no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "",
},
privileged: false,
expectedConfig: nil,
},
{
description: "Modern image, devices 'none', no capabilities, no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "none",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices set, no capabilities, no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "gpu0,gpu1",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices set, capabilities 'empty', no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices set, capabilities 'all', no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "all",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: allDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices set, capabilities set, no requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "cap0,cap1",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: "cap0,cap1",
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices set, capabilities set, requirements set",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "cap0,cap1",
envNVRequirePrefix + "REQ0": "req0=true",
envNVRequirePrefix + "REQ1": "req1=false",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: "cap0,cap1",
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
DisableRequire: false,
},
},
{
description: "Modern image, devices set, capabilities set, requirements set, disable requirements",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "gpu0,gpu1",
envNVDriverCapabilities: "cap0,cap1",
envNVRequirePrefix + "REQ0": "req0=true",
envNVRequirePrefix + "REQ1": "req1=false",
envNVDisableRequire: "true",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "gpu0,gpu1",
DriverCapabilities: "cap0,cap1",
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
DisableRequire: true,
},
},
{
description: "No cuda envs, devices 'all'",
env: map[string]string{
envNVVisibleDevices: "all",
},
privileged: false,
expectedConfig: &nvidiaConfig{
Devices: "all",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{},
DisableRequire: false,
},
},
{
description: "Modern image, devices 'all', migConfig set, privileged",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "all",
envNVMigConfigDevices: "mig0,mig1",
},
privileged: true,
expectedConfig: &nvidiaConfig{
Devices: "all",
MigConfigDevices: "mig0,mig1",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices 'all', migConfig set, unprivileged",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "all",
envNVMigConfigDevices: "mig0,mig1",
},
privileged: false,
expectedPanic: true,
},
{
description: "Modern image, devices 'all', migMonitor set, privileged",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "all",
envNVMigMonitorDevices: "mig0,mig1",
},
privileged: true,
expectedConfig: &nvidiaConfig{
Devices: "all",
MigMonitorDevices: "mig0,mig1",
DriverCapabilities: defaultDriverCapabilities,
Requirements: []string{"cuda>=9.0"},
DisableRequire: false,
},
},
{
description: "Modern image, devices 'all', migMonitor set, unprivileged",
env: map[string]string{
envNVRequireCUDA: "cuda>=9.0",
envNVVisibleDevices: "all",
envNVMigMonitorDevices: "mig0,mig1",
},
privileged: false,
expectedPanic: true,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
// Wrap the call to getNvidiaConfig() in a closure.
var config *nvidiaConfig
getConfig := func() {
hookConfig := getDefaultHookConfig()
config = getNvidiaConfig(&hookConfig, tc.env, nil, tc.privileged)
}
// For any tests that are expected to panic, make sure they do.
if tc.expectedPanic {
require.Panics(t, getConfig)
return
}
// For all other tests, just grab the config
getConfig()
// And start comparing the test results to the expected results.
if tc.expectedConfig == nil {
require.Nil(t, config, tc.description)
return
}
require.NotNil(t, config, tc.description)
require.Equal(t, tc.expectedConfig.Devices, config.Devices)
require.Equal(t, tc.expectedConfig.MigConfigDevices, config.MigConfigDevices)
require.Equal(t, tc.expectedConfig.MigMonitorDevices, config.MigMonitorDevices)
require.Equal(t, tc.expectedConfig.DriverCapabilities, config.DriverCapabilities)
require.ElementsMatch(t, tc.expectedConfig.Requirements, config.Requirements)
require.Equal(t, tc.expectedConfig.DisableRequire, config.DisableRequire)
})
}
}
func TestGetDevicesFromMounts(t *testing.T) {
var tests = []struct {
description string
mounts []Mount
expectedDevices *string
}{
{
description: "No mounts",
mounts: nil,
expectedDevices: nil,
},
{
description: "Host path is not /dev/null",
mounts: []Mount{
{
Source: "/not/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU0"),
},
},
expectedDevices: nil,
},
{
description: "Container path is not prefixed by 'root'",
mounts: []Mount{
{
Source: "/dev/null",
Destination: filepath.Join("/other/prefix", "GPU0"),
},
},
expectedDevices: nil,
},
{
description: "Container path is only 'root'",
mounts: []Mount{
{
Source: "/dev/null",
Destination: deviceListAsVolumeMountsRoot,
},
},
expectedDevices: nil,
},
{
description: "Discover 2 devices",
mounts: []Mount{
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU0"),
},
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU1"),
},
},
expectedDevices: &[]string{"GPU0,GPU1"}[0],
},
{
description: "Discover 2 devices with slashes in the name",
mounts: []Mount{
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU0-MIG0/0/1"),
},
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU1-MIG0/0/1"),
},
},
expectedDevices: &[]string{"GPU0-MIG0/0/1,GPU1-MIG0/0/1"}[0],
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
devices := getDevicesFromMounts(tc.mounts)
require.Equal(t, tc.expectedDevices, devices)
})
}
}
func TestDeviceListSourcePriority(t *testing.T) {
var tests = []struct {
description string
mountDevices []Mount
envvarDevices string
privileged bool
acceptUnprivileged bool
acceptMounts bool
expectedDevices *string
}{
{
description: "Mount devices, unprivileged, no accept unprivileged",
mountDevices: []Mount{
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU0"),
},
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU1"),
},
},
envvarDevices: "GPU2,GPU3",
privileged: false,
acceptUnprivileged: false,
acceptMounts: true,
expectedDevices: &[]string{"GPU0,GPU1"}[0],
},
{
description: "No mount devices, unprivileged, no accept unprivileged",
mountDevices: nil,
envvarDevices: "GPU0,GPU1",
privileged: false,
acceptUnprivileged: false,
acceptMounts: true,
expectedDevices: nil,
},
{
description: "No mount devices, privileged, no accept unprivileged",
mountDevices: nil,
envvarDevices: "GPU0,GPU1",
privileged: true,
acceptUnprivileged: false,
acceptMounts: true,
expectedDevices: &[]string{"GPU0,GPU1"}[0],
},
{
description: "No mount devices, unprivileged, accept unprivileged",
mountDevices: nil,
envvarDevices: "GPU0,GPU1",
privileged: false,
acceptUnprivileged: true,
acceptMounts: true,
expectedDevices: &[]string{"GPU0,GPU1"}[0],
},
{
description: "Mount devices, unprivileged, accept unprivileged, no accept mounts",
mountDevices: []Mount{
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU0"),
},
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU1"),
},
},
envvarDevices: "GPU2,GPU3",
privileged: false,
acceptUnprivileged: true,
acceptMounts: false,
expectedDevices: &[]string{"GPU2,GPU3"}[0],
},
{
description: "Mount devices, unprivileged, no accept unprivileged, no accept mounts",
mountDevices: []Mount{
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU0"),
},
{
Source: "/dev/null",
Destination: filepath.Join(deviceListAsVolumeMountsRoot, "GPU1"),
},
},
envvarDevices: "GPU2,GPU3",
privileged: false,
acceptUnprivileged: false,
acceptMounts: false,
expectedDevices: nil,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
// Wrap the call to getDevices() in a closure.
var devices *string
getDevices := func() {
env := map[string]string{
envNVVisibleDevices: tc.envvarDevices,
}
hookConfig := getDefaultHookConfig()
hookConfig.AcceptEnvvarUnprivileged = tc.acceptUnprivileged
hookConfig.AcceptDeviceListAsVolumeMounts = tc.acceptMounts
devices = getDevices(&hookConfig, env, tc.mountDevices, tc.privileged, false)
}
// For all other tests, just grab the devices and check the results
getDevices()
require.Equal(t, tc.expectedDevices, devices)
})
}
}
func TestGetDevicesFromEnvvar(t *testing.T) {
all := "all"
empty := ""
envDockerResourceGPUs := "DOCKER_RESOURCE_GPUS"
gpuID := "GPU-12345"
anotherGPUID := "GPU-67890"
var tests = []struct {
description string
envSwarmGPU *string
env map[string]string
legacyImage bool
expectedDevices *string
}{
{
description: "empty env returns nil for non-legacy image",
},
{
description: "blank NVIDIA_VISIBLE_DEVICES returns nil for non-legacy image",
env: map[string]string{
envNVVisibleDevices: "",
},
},
{
description: "'void' NVIDIA_VISIBLE_DEVICES returns nil for non-legacy image",
env: map[string]string{
envNVVisibleDevices: "void",
},
},
{
description: "'none' NVIDIA_VISIBLE_DEVICES returns empty for non-legacy image",
env: map[string]string{
envNVVisibleDevices: "none",
},
expectedDevices: &empty,
},
{
description: "NVIDIA_VISIBLE_DEVICES set returns value for non-legacy image",
env: map[string]string{
envNVVisibleDevices: gpuID,
},
expectedDevices: &gpuID,
},
{
description: "NVIDIA_VISIBLE_DEVICES set returns value for legacy image",
env: map[string]string{
envNVVisibleDevices: gpuID,
},
legacyImage: true,
expectedDevices: &gpuID,
},
{
description: "empty env returns all for legacy image",
legacyImage: true,
expectedDevices: &all,
},
// Add the `DOCKER_RESOURCE_GPUS` envvar and ensure that this is ignored when
// not enabled
{
description: "missing NVIDIA_VISIBLE_DEVICES returns nil for non-legacy image",
env: map[string]string{
envDockerResourceGPUs: anotherGPUID,
},
},
{
description: "blank NVIDIA_VISIBLE_DEVICES returns nil for non-legacy image",
env: map[string]string{
envNVVisibleDevices: "",
envDockerResourceGPUs: anotherGPUID,
},
},
{
description: "'void' NVIDIA_VISIBLE_DEVICES returns nil for non-legacy image",
env: map[string]string{
envNVVisibleDevices: "void",
envDockerResourceGPUs: anotherGPUID,
},
},
{
description: "'none' NVIDIA_VISIBLE_DEVICES returns empty for non-legacy image",
env: map[string]string{
envNVVisibleDevices: "none",
envDockerResourceGPUs: anotherGPUID,
},
expectedDevices: &empty,
},
{
description: "NVIDIA_VISIBLE_DEVICES set returns value for non-legacy image",
env: map[string]string{
envNVVisibleDevices: gpuID,
envDockerResourceGPUs: anotherGPUID,
},
expectedDevices: &gpuID,
},
{
description: "NVIDIA_VISIBLE_DEVICES set returns value for legacy image",
env: map[string]string{
envNVVisibleDevices: gpuID,
envDockerResourceGPUs: anotherGPUID,
},
legacyImage: true,
expectedDevices: &gpuID,
},
{
description: "empty env returns all for legacy image",
env: map[string]string{
envDockerResourceGPUs: anotherGPUID,
},
legacyImage: true,
expectedDevices: &all,
},
// Add the `DOCKER_RESOURCE_GPUS` envvar and ensure that this is selected when
// enabled
{
description: "empty env returns nil for non-legacy image",
envSwarmGPU: &envDockerResourceGPUs,
},
{
description: "blank DOCKER_RESOURCE_GPUS returns nil for non-legacy image",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envDockerResourceGPUs: "",
},
},
{
description: "'void' DOCKER_RESOURCE_GPUS returns nil for non-legacy image",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envDockerResourceGPUs: "void",
},
},
{
description: "'none' DOCKER_RESOURCE_GPUS returns empty for non-legacy image",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envDockerResourceGPUs: "none",
},
expectedDevices: &empty,
},
{
description: "DOCKER_RESOURCE_GPUS set returns value for non-legacy image",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envDockerResourceGPUs: gpuID,
},
expectedDevices: &gpuID,
},
{
description: "DOCKER_RESOURCE_GPUS set returns value for legacy image",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envDockerResourceGPUs: gpuID,
},
legacyImage: true,
expectedDevices: &gpuID,
},
{
description: "DOCKER_RESOURCE_GPUS is selected if present",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envDockerResourceGPUs: anotherGPUID,
},
expectedDevices: &anotherGPUID,
},
{
description: "DOCKER_RESOURCE_GPUS overrides NVIDIA_VISIBLE_DEVICES if present",
envSwarmGPU: &envDockerResourceGPUs,
env: map[string]string{
envNVVisibleDevices: gpuID,
envDockerResourceGPUs: anotherGPUID,
},
expectedDevices: &anotherGPUID,
},
}
for i, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
envSwarmGPU = tc.envSwarmGPU
devices := getDevicesFromEnvvar(tc.env, tc.legacyImage)
if tc.expectedDevices == nil {
require.Nil(t, devices, "%d: %v", i, tc)
return
}
require.NotNil(t, devices, "%d: %v", i, tc)
require.Equal(t, *tc.expectedDevices, *devices, "%d: %v", i, tc)
})
}
}

View File

@@ -4,6 +4,7 @@ import (
"log"
"os"
"path"
"reflect"
"github.com/BurntSushi/toml"
)
@@ -18,7 +19,7 @@ var defaultPaths = [...]string{
configPath,
}
// CLIConfig: options for nvidia-container-cli.
// CLIConfig : options for nvidia-container-cli.
type CLIConfig struct {
Root *string `toml:"root"`
Path *string `toml:"path"`
@@ -26,22 +27,28 @@ type CLIConfig struct {
Debug *string `toml:"debug"`
Ldcache *string `toml:"ldcache"`
LoadKmods bool `toml:"load-kmods"`
NoPivot bool `toml:"no-pivot"`
NoCgroups bool `toml:"no-cgroups"`
User *string `toml:"user"`
Ldconfig *string `toml:"ldconfig"`
}
// HookConfig : options for the nvidia-container-toolkit.
type HookConfig struct {
DisableRequire bool `toml:"disable-require"`
SwarmResource *string `toml:"swarm-resource"`
DisableRequire bool `toml:"disable-require"`
SwarmResource *string `toml:"swarm-resource"`
AcceptEnvvarUnprivileged bool `toml:"accept-nvidia-visible-devices-envvar-when-unprivileged"`
AcceptDeviceListAsVolumeMounts bool `toml:"accept-nvidia-visible-devices-as-volume-mounts"`
NvidiaContainerCLI CLIConfig `toml:"nvidia-container-cli"`
}
func getDefaultHookConfig() (config HookConfig) {
return HookConfig{
DisableRequire: false,
SwarmResource: nil,
DisableRequire: false,
SwarmResource: nil,
AcceptEnvvarUnprivileged: true,
AcceptDeviceListAsVolumeMounts: false,
NvidiaContainerCLI: CLIConfig{
Root: nil,
Path: nil,
@@ -49,6 +56,7 @@ func getDefaultHookConfig() (config HookConfig) {
Debug: nil,
Ldcache: nil,
LoadKmods: true,
NoPivot: false,
NoCgroups: false,
User: nil,
Ldconfig: nil,
@@ -79,3 +87,18 @@ func getHookConfig() (config HookConfig) {
return config
}
// getConfigOption returns the toml config option associated with the
// specified struct field.
func (c HookConfig) getConfigOption(fieldName string) string {
t := reflect.TypeOf(c)
f, ok := t.FieldByName(fieldName)
if !ok {
return fieldName
}
v, ok := f.Tag.Lookup("toml")
if !ok {
return fieldName
}
return v
}

View File

@@ -0,0 +1,134 @@
package main
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
)
func TestParseCudaVersionValid(t *testing.T) {
var tests = []struct {
version string
expected [3]uint32
}{
{"0", [3]uint32{0, 0, 0}},
{"8", [3]uint32{8, 0, 0}},
{"7.5", [3]uint32{7, 5, 0}},
{"9.0.116", [3]uint32{9, 0, 116}},
{"4294967295.4294967295.4294967295", [3]uint32{4294967295, 4294967295, 4294967295}},
}
for i, c := range tests {
vmaj, vmin, vpatch := parseCudaVersion(c.version)
version := [3]uint32{vmaj, vmin, vpatch}
require.Equal(t, c.expected, version, "%d: %v", i, c)
}
}
func TestParseCudaVersionInvalid(t *testing.T) {
var tests = []string{
"foo",
"foo.5.10",
"9.0.116.50",
"9.0.116foo",
"7.foo",
"9.0.bar",
"9.4294967296",
"9.0.116.",
"9..0",
"9.",
".5.10",
"-9",
"+9",
"-9.1.116",
"-9.-1.-116",
}
for _, c := range tests {
require.Panics(t, func() {
parseCudaVersion(c)
}, "parseCudaVersion(%v)", c)
}
}
func TestIsPrivileged(t *testing.T) {
var tests = []struct {
spec string
expected bool
}{
{
`
{
"ociVersion": "1.0.0",
"process": {
"capabilities": {
"bounding": [ "CAP_SYS_ADMIN" ]
}
}
}
`,
true,
},
{
`
{
"ociVersion": "1.0.0",
"process": {
"capabilities": {
"bounding": [ "CAP_SYS_OTHER" ]
}
}
}
`,
false,
},
{
`
{
"ociVersion": "1.0.0",
"process": {}
}
`,
false,
},
{
`
{
"ociVersion": "1.0.0-rc2-dev",
"process": {
"capabilities": [ "CAP_SYS_ADMIN" ]
}
}
`,
true,
},
{
`
{
"ociVersion": "1.0.0-rc2-dev",
"process": {
"capabilities": [ "CAP_SYS_OTHER" ]
}
}
`,
false,
},
{
`
{
"ociVersion": "1.0.0-rc2-dev",
"process": {}
}
`,
false,
},
}
for i, tc := range tests {
var spec Spec
_ = json.Unmarshal([]byte(tc.spec), &spec)
privileged := isPrivileged(&spec)
require.Equal(t, tc.expected, privileged, "%d: %v", i, tc)
}
}

View File

@@ -101,6 +101,9 @@ func doPrestart() {
if cli.LoadKmods {
args = append(args, "--load-kmods")
}
if cli.NoPivot {
args = append(args, "--no-pivot")
}
if *debugflag {
args = append(args, "--debug=/dev/stderr")
} else if cli.Debug != nil {
@@ -123,8 +126,14 @@ func doPrestart() {
if len(nvidia.Devices) > 0 {
args = append(args, fmt.Sprintf("--device=%s", nvidia.Devices))
}
if len(nvidia.MigConfigDevices) > 0 {
args = append(args, fmt.Sprintf("--mig-config=%s", nvidia.MigConfigDevices))
}
if len(nvidia.MigMonitorDevices) > 0 {
args = append(args, fmt.Sprintf("--mig-monitor=%s", nvidia.MigMonitorDevices))
}
for _, cap := range strings.Split(nvidia.Capabilities, ",") {
for _, cap := range strings.Split(nvidia.DriverCapabilities, ",") {
if len(cap) == 0 {
break
}

View File

@@ -1,5 +1,7 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
#accept-nvidia-visible-devices-envvar-when-unprivileged = true
#accept-nvidia-visible-devices-as-volume-mounts = false
[nvidia-container-cli]
#root = "/run/nvidia/driver"

View File

@@ -1,5 +1,7 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
#accept-nvidia-visible-devices-envvar-when-unprivileged = true
#accept-nvidia-visible-devices-as-volume-mounts = false
[nvidia-container-cli]
#root = "/run/nvidia/driver"

View File

@@ -1,5 +1,7 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
#accept-nvidia-visible-devices-envvar-when-unprivileged = true
#accept-nvidia-visible-devices-as-volume-mounts = false
[nvidia-container-cli]
#root = "/run/nvidia/driver"

View File

@@ -1,5 +1,7 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
#accept-nvidia-visible-devices-envvar-when-unprivileged = true
#accept-nvidia-visible-devices-as-volume-mounts = false
[nvidia-container-cli]
#root = "/run/nvidia/driver"

View File

@@ -1,5 +1,7 @@
disable-require = false
#swarm-resource = "DOCKER_RESOURCE_GPU"
#accept-nvidia-visible-devices-envvar-when-unprivileged = true
#accept-nvidia-visible-devices-as-volume-mounts = false
[nvidia-container-cli]
#root = "/run/nvidia/driver"

View File

@@ -1,5 +1,5 @@
ARG VERSION_ID
FROM amazonlinux:${VERSION_ID}
ARG BASEIMAGE
FROM ${BASEIMAGE}
RUN yum install -y \
ca-certificates \
@@ -37,10 +37,10 @@ ENV DIST_DIR=/tmp/nvidia-container-toolkit-$PKG_VERS/SOURCES
RUN mkdir -p $DIST_DIR /dist
# nvidia-container-toolkit
COPY nvidia-container-toolkit/ $GOPATH/src/nvidia-container-toolkit
WORKDIR $GOPATH/src/nvidia-container-toolkit
COPY . .
RUN go get -ldflags "-s -w" -v nvidia-container-toolkit && \
mv $GOPATH/bin/nvidia-container-toolkit $DIST_DIR/nvidia-container-toolkit
RUN make PREFIX=${DIST_DIR} cmds
COPY config/config.toml.amzn $DIST_DIR/config.toml
@@ -53,12 +53,13 @@ COPY oci-nvidia-hook $DIST_DIR/oci-nvidia-hook
COPY oci-nvidia-hook.json $DIST_DIR/oci-nvidia-hook.json
WORKDIR $DIST_DIR/..
COPY pkg/rpm .
COPY packaging/rpm .
CMD arch=$(uname -m) && \
rpmbuild --clean --target=$arch -bb \
-D "_topdir $PWD" \
-D "version $VERSION" \
-D "libnvidia_container_version ${VERSION}-${RELEASE}" \
-D "release $RELEASE" \
SPECS/nvidia-container-toolkit.spec && \
mv RPMS/$arch/*.rpm /dist

View File

@@ -1,10 +1,11 @@
ARG VERSION_ID
FROM centos:${VERSION_ID}
ARG BASEIMAGE
FROM ${BASEIMAGE}
RUN yum install -y \
ca-certificates \
wget \
git \
make \
rpm-build && \
rm -rf /var/cache/yum/*
@@ -36,10 +37,10 @@ ENV DIST_DIR=/tmp/nvidia-container-toolkit-$PKG_VERS/SOURCES
RUN mkdir -p $DIST_DIR /dist
# nvidia-container-toolkit
COPY nvidia-container-toolkit/ $GOPATH/src/nvidia-container-toolkit
WORKDIR $GOPATH/src/nvidia-container-toolkit
COPY . .
RUN go get -ldflags "-s -w" -v nvidia-container-toolkit && \
mv $GOPATH/bin/nvidia-container-toolkit $DIST_DIR/nvidia-container-toolkit
RUN make PREFIX=${DIST_DIR} cmds
COPY config/config.toml.centos $DIST_DIR/config.toml
@@ -50,12 +51,13 @@ COPY oci-nvidia-hook $DIST_DIR/oci-nvidia-hook
COPY oci-nvidia-hook.json $DIST_DIR/oci-nvidia-hook.json
WORKDIR $DIST_DIR/..
COPY pkg/rpm .
COPY packaging/rpm .
CMD arch=$(uname -m) && \
rpmbuild --clean --target=$arch -bb \
-D "_topdir $PWD" \
-D "version $VERSION" \
-D "libnvidia_container_version ${VERSION}-${RELEASE}" \
-D "release $RELEASE" \
SPECS/nvidia-container-toolkit.spec && \
mv RPMS/$arch/*.rpm /dist

View File

@@ -1,6 +1,7 @@
ARG VERSION_ID
FROM debian:${VERSION_ID}
ARG BASEIMAGE
FROM ${BASEIMAGE}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
wget \
ca-certificates \
@@ -44,10 +45,10 @@ ENV DIST_DIR=/tmp/nvidia-container-toolkit-$PKG_VERS
RUN mkdir -p $DIST_DIR /dist
# nvidia-container-toolkit
COPY nvidia-container-toolkit/ $GOPATH/src/nvidia-container-toolkit
WORKDIR $GOPATH/src/nvidia-container-toolkit
COPY . .
RUN go get -ldflags "-s -w" -v nvidia-container-toolkit && \
mv $GOPATH/bin/nvidia-container-toolkit $DIST_DIR/nvidia-container-toolkit
RUN make PREFIX=${DIST_DIR} cmds
COPY config/config.toml.debian $DIST_DIR/config.toml
@@ -57,11 +58,12 @@ RUN if [ "$(lsb_release -cs)" = "jessie" ]; then \
fi
WORKDIR $DIST_DIR
COPY pkg/debian ./debian
COPY packaging/debian ./debian
RUN sed -i "s;@VERSION@;${REVISION};" debian/changelog && \
if [ "$REVISION" != "$(dpkg-parsechangelog --show-field=Version)" ]; then exit 1; fi
CMD export DISTRIB="unstable" && \
debuild -eDISTRIB -eSECTION --dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
CMD export DISTRIB="$(lsb_release -cs)" && \
debuild -eDISTRIB -eSECTION -eLIBNVIDIA_CONTAINER_VERSION="${REVISION}" \
--dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
mv /tmp/nvidia-container-toolkit_*.deb /dist

20
docker/Dockerfile.devel Normal file
View File

@@ -0,0 +1,20 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG GOLANG_VERSION=x.x.x
FROM golang:${GOLANG_VERSION}
RUN go get -u golang.org/x/lint/golint
RUN go get -u github.com/matryer/moq
RUN go get -u github.com/gordonklaus/ineffassign
RUN go get -u github.com/client9/misspell/cmd/misspell

View File

@@ -1,5 +1,5 @@
ARG VERSION_ID
FROM opensuse/leap:${VERSION_ID}
ARG BASEIMAGE
FROM ${BASEIMAGE}
RUN zypper install -y \
ca-certificates \
@@ -36,7 +36,10 @@ ENV DIST_DIR=/tmp/nvidia-container-toolkit-$PKG_VERS/SOURCES
RUN mkdir -p $DIST_DIR /dist
# nvidia-container-toolkit
COPY nvidia-container-toolkit/ $GOPATH/src/nvidia-container-toolkit
WORKDIR $GOPATH/src/nvidia-container-toolkit
COPY . .
RUN make PREFIX=${DIST_DIR} cmds
# Hook for Project Atomic's fork of Docker: https://github.com/projectatomic/docker/tree/docker-1.13.1-rhel#add-dockerhooks-exec-custom-hooks-for-prestartpoststop-containerspatch
COPY oci-nvidia-hook $DIST_DIR/oci-nvidia-hook
@@ -44,18 +47,16 @@ COPY oci-nvidia-hook $DIST_DIR/oci-nvidia-hook
# Hook for libpod/CRI-O: https://github.com/containers/libpod/blob/v0.8.5/pkg/hooks/docs/oci-hooks.5.md
COPY oci-nvidia-hook.json $DIST_DIR/oci-nvidia-hook.json
RUN go get -ldflags "-s -w" -v nvidia-container-toolkit && \
mv $GOPATH/bin/nvidia-container-toolkit $DIST_DIR/nvidia-container-toolkit
COPY config/config.toml.opensuse-leap $DIST_DIR/config.toml
WORKDIR $DIST_DIR/..
COPY pkg/rpm .
COPY packaging/rpm .
CMD arch=$(uname -m) && \
rpmbuild --clean --target=$arch -bb \
-D "_topdir $PWD" \
-D "version $VERSION" \
-D "libnvidia_container_version ${VERSION}-${RELEASE}" \
-D "release $RELEASE" \
SPECS/nvidia-container-toolkit.spec && \
mv RPMS/$arch/*.rpm /dist

View File

@@ -1,6 +1,7 @@
ARG VERSION_ID
FROM ubuntu:${VERSION_ID}
ARG BASEIMAGE
FROM ${BASEIMAGE}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
wget \
ca-certificates \
@@ -42,19 +43,20 @@ ENV DIST_DIR=/tmp/nvidia-container-toolkit-$PKG_VERS
RUN mkdir -p $DIST_DIR /dist
# nvidia-container-toolkit
COPY nvidia-container-toolkit/ $GOPATH/src/nvidia-container-toolkit
WORKDIR $GOPATH/src/nvidia-container-toolkit
COPY . .
RUN go get -ldflags "-s -w" -v nvidia-container-toolkit && \
mv $GOPATH/bin/nvidia-container-toolkit $DIST_DIR/nvidia-container-toolkit
RUN make PREFIX=${DIST_DIR} cmds
COPY config/config.toml.ubuntu $DIST_DIR/config.toml
WORKDIR $DIST_DIR
COPY pkg/debian ./debian
COPY packaging/debian ./debian
RUN sed -i "s;@VERSION@;${REVISION};" debian/changelog && \
if [ "$REVISION" != "$(dpkg-parsechangelog --show-field=Version)" ]; then exit 1; fi
CMD export DISTRIB="$(lsb_release -cs)" && \
debuild -eDISTRIB -eSECTION --dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
debuild -eDISTRIB -eSECTION -eLIBNVIDIA_CONTAINER_VERSION="${REVISION}" \
--dpkg-buildpackage-hook='sh debian/prepare' -i -us -uc -b && \
mv /tmp/*.deb /dist

142
docker/docker.mk Normal file
View File

@@ -0,0 +1,142 @@
# Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Supported OSs by architecture
AMD64_TARGETS := ubuntu20.04 ubuntu18.04 ubuntu16.04 debian10 debian9
X86_64_TARGETS := centos7 centos8 rhel7 rhel8 amazonlinux1 amazonlinux2 opensuse-leap15.1
PPC64LE_TARGETS := ubuntu18.04 ubuntu16.04 centos7 centos8 rhel7 rhel8
ARM64_TARGETS := ubuntu20.04 ubuntu18.04
AARCH64_TARGETS := centos8 rhel8 amazonlinux2
# Define top-level build targets
docker%: SHELL:=/bin/bash
# Native targets
PLATFORM ?= $(shell uname -m)
ifeq ($(PLATFORM),x86_64)
NATIVE_TARGETS := $(AMD64_TARGETS) $(X86_64_TARGETS)
$(AMD64_TARGETS): %: %-amd64
$(X86_64_TARGETS): %: %-x86_64
else ifeq ($(PLATFORM),ppc64le)
NATIVE_TARGETS := $(PPC64LE_TARGETS)
$(PPC64LE_TARGETS): %: %-ppc64le
else ifeq ($(PLATFORM),aarch64)
NATIVE_TARGETS := $(ARM64_TARGETS) $(AARCH64_TARGETS)
$(ARM64_TARGETS): %: %-arm64
$(AARCH64_TARGETS): %: %-aarch64
endif
docker-native: $(NATIVE_TARGETS)
# amd64 targets
AMD64_TARGETS := $(patsubst %, %-amd64, $(AMD64_TARGETS))
$(AMD64_TARGETS): ARCH := amd64
$(AMD64_TARGETS): %: --%
docker-amd64: $(AMD64_TARGETS)
# x86_64 targets
X86_64_TARGETS := $(patsubst %, %-x86_64, $(X86_64_TARGETS))
$(X86_64_TARGETS): ARCH := x86_64
$(X86_64_TARGETS): %: --%
docker-x86_64: $(X86_64_TARGETS)
# arm64 targets
ARM64_TARGETS := $(patsubst %, %-arm64, $(ARM64_TARGETS))
$(ARM64_TARGETS): ARCH := arm64
$(ARM64_TARGETS): %: --%
docker-arm64: $(ARM64_TARGETS)
# aarch64 targets
AARCH64_TARGETS := $(patsubst %, %-aarch64, $(AARCH64_TARGETS))
$(AARCH64_TARGETS): ARCH := aarch64
$(AARCH64_TARGETS): %: --%
docker-aarch64: $(AARCH64_TARGETS)
# ppc64le targets
PPC64LE_TARGETS := $(patsubst %, %-ppc64le, $(PPC64LE_TARGETS))
$(PPC64LE_TARGETS): ARCH := ppc64le
$(PPC64LE_TARGETS): WITH_LIBELF := yes
$(PPC64LE_TARGETS): %: --%
docker-ppc64le: $(PPC64LE_TARGETS)
# docker target to build for all os/arch combinations
docker-all: $(AMD64_TARGETS) $(X86_64_TARGETS) \
$(ARM64_TARGETS) $(AARCH64_TARGETS) \
$(PPC64LE_TARGETS)
# Default variables for all private '--' targets below.
# One private target is defined for each OS we support.
--%: TARGET_PLATFORM = $(*)
--%: VERSION = $(patsubst $(OS)%-$(ARCH),%,$(TARGET_PLATFORM))
--%: BASEIMAGE = $(OS):$(VERSION)
--%: BUILDIMAGE = nvidia/$(LIB_NAME)/$(OS)$(VERSION)-$(ARCH)
--%: DOCKERFILE = $(CURDIR)/docker/Dockerfile.$(OS)
--%: ARTIFACTS_DIR = $(DIST_DIR)/$(OS)$(VERSION)/$(ARCH)
--%: docker-build-%
@
# private ubuntu target
--ubuntu%: OS := ubuntu
--ubuntu%: LIB_VERSION := $(LIB_VERSION)$(if $(LIB_TAG),~$(LIB_TAG))
--ubuntu%: PKG_REV := 1
# private debian target
--debian%: OS := debian
--debian%: LIB_VERSION := $(LIB_VERSION)$(if $(LIB_TAG),~$(LIB_TAG))
--debian%: PKG_REV := 1
# private centos target
--centos%: OS := centos
--centos%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
# private amazonlinux target
--amazonlinux%: OS := amazonlinux
--amazonlinux%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
# private opensuse-leap target
--opensuse-leap%: OS = opensuse-leap
--opensuse-leap%: BASEIMAGE = opensuse/leap:$(VERSION)
--opensuse-leap%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
# private rhel target (actually built on centos)
--rhel%: OS := centos
--rhel%: PKG_REV := $(if $(LIB_TAG),0.1.$(LIB_TAG),1)
--rhel%: VERSION = $(patsubst rhel%-$(ARCH),%,$(TARGET_PLATFORM))
--rhel%: ARTIFACTS_DIR = $(DIST_DIR)/rhel$(VERSION)/$(ARCH)
docker-build-%:
@echo "Building for $(TARGET_PLATFORM)"
docker pull --platform=linux/$(ARCH) $(BASEIMAGE)
DOCKER_BUILDKIT=1 \
$(DOCKER) build \
--progress=plain \
--build-arg BASEIMAGE="$(BASEIMAGE)" \
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
--build-arg PKG_VERS="$(LIB_VERSION)" \
--build-arg PKG_REV="$(PKG_REV)" \
--tag $(BUILDIMAGE) \
--file $(DOCKERFILE) .
$(DOCKER) run \
-e DISTRIB \
-e SECTION \
-v $(ARTIFACTS_DIR):/dist \
$(BUILDIMAGE)
docker-clean:
IMAGES=$$(docker images "nvidia/$(LIB_NAME)/*" --format="{{.ID}}"); \
if [ "$${IMAGES}" != "" ]; then \
docker rmi -f $${IMAGES}; \
fi
distclean:
rm -rf $(DIST_DIR)

13
go.mod Normal file
View File

@@ -0,0 +1,13 @@
module github.com/NVIDIA/nvidia-container-toolkit
go 1.14
require (
github.com/BurntSushi/toml v0.3.1
github.com/opencontainers/runtime-spec v1.0.2
github.com/pelletier/go-toml v1.9.3
github.com/sirupsen/logrus v1.8.1
github.com/stretchr/testify v1.7.0
github.com/tsaikd/KDGoLib v0.0.0-20191001134900-7f3cf518e07d
golang.org/x/mod v0.3.0
)

250
go.sum Normal file
View File

@@ -0,0 +1,250 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nlopes/slack v0.6.0/go.mod h1:JzQ9m3PMAqcpeCam7UaHSuBuupz7CmpjehYMayT6YOk=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tsaikd/KDGoLib v0.0.0-20191001134900-7f3cf518e07d/go.mod h1:oFPCwcQpP90RVZxlBdgPN+iu2tPkboPUa4xaVEI6pO4=
github.com/tsaikd/govalidator v0.0.0-20161031084447-986f2244fc69/go.mod h1:yJymgtZhuWi1Ih5t37Ej381BGZFZvlb9YMTwBxB/QjU=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191001123449-8b695b21ef34/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

23
internal/oci/runtime.go Normal file
View File

@@ -0,0 +1,23 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package oci
// Runtime is an interface for a runtime shim. The Exec method accepts a list
// of command line arguments, and returns an error / nil.
type Runtime interface {
Exec([]string) error
}

View File

@@ -0,0 +1,79 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package oci
import (
"fmt"
"os"
"syscall"
log "github.com/sirupsen/logrus"
)
// SyscallExecRuntime wraps the path that a binary and defines the semanitcs for how to exec into it.
// This can be used to wrap an OCI-compliant low-level runtime binary, allowing it to be used through the
// Runtime internface.
type SyscallExecRuntime struct {
logger *log.Logger
path string
// exec is used for testing. This defaults to syscall.Exec
exec func(argv0 string, argv []string, envv []string) error
}
var _ Runtime = (*SyscallExecRuntime)(nil)
// NewSyscallExecRuntime creates a SyscallExecRuntime for the specified path with the standard logger
func NewSyscallExecRuntime(path string) (Runtime, error) {
return NewSyscallExecRuntimeWithLogger(log.StandardLogger(), path)
}
// NewSyscallExecRuntimeWithLogger creates a SyscallExecRuntime for the specified logger and path
func NewSyscallExecRuntimeWithLogger(logger *log.Logger, path string) (Runtime, error) {
info, err := os.Stat(path)
if err != nil {
return nil, fmt.Errorf("invalid path '%v': %v", path, err)
}
if info.IsDir() || info.Mode()&0111 == 0 {
return nil, fmt.Errorf("specified path '%v' is not an executable file", path)
}
shim := SyscallExecRuntime{
logger: logger,
path: path,
exec: syscall.Exec,
}
return &shim, nil
}
// Exec exces into the binary at the path from the SyscallExecRuntime struct, passing it the supplied arguments
// after ensuring that the first argument is the path of the target binary.
func (s SyscallExecRuntime) Exec(args []string) error {
runtimeArgs := []string{s.path}
if len(args) > 1 {
runtimeArgs = append(runtimeArgs, args[1:]...)
}
err := s.exec(s.path, runtimeArgs, os.Environ())
if err != nil {
return fmt.Errorf("could not exec '%v': %v", s.path, err)
}
// syscall.Exec is not expected to return. This is an error state regardless of whether
// err is nil or not.
return fmt.Errorf("unexpected return from exec '%v'", s.path)
}

View File

@@ -0,0 +1,100 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package oci
import (
"fmt"
"strings"
"testing"
testlog "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
)
func TestSyscallExecConstructor(t *testing.T) {
r, err := NewSyscallExecRuntime("////an/invalid/path")
require.Error(t, err)
require.Nil(t, r)
r, err = NewSyscallExecRuntime("/tmp")
require.Error(t, err)
require.Nil(t, r)
r, err = NewSyscallExecRuntime("/dev/null")
require.Error(t, err)
require.Nil(t, r)
r, err = NewSyscallExecRuntime("/bin/sh")
require.NoError(t, err)
f, ok := r.(*SyscallExecRuntime)
require.True(t, ok)
require.Equal(t, "/bin/sh", f.path)
}
func TestSyscallExecForwardsArgs(t *testing.T) {
logger, _ := testlog.NewNullLogger()
f := SyscallExecRuntime{
logger: logger,
path: "runtime",
}
testCases := []struct {
returnError error
args []string
errorPrefix string
}{
{
returnError: nil,
errorPrefix: "unexpected return from exec",
},
{
returnError: fmt.Errorf("error from exec"),
errorPrefix: "could not exec",
},
{
returnError: nil,
args: []string{"otherargv0"},
errorPrefix: "unexpected return from exec",
},
{
returnError: nil,
args: []string{"otherargv0", "arg1", "arg2", "arg3"},
errorPrefix: "unexpected return from exec",
},
}
for i, tc := range testCases {
execMock := WithMockExec(f, tc.returnError)
err := execMock.Exec(tc.args)
require.Errorf(t, err, "%d: %v", i, tc)
require.Truef(t, strings.HasPrefix(err.Error(), tc.errorPrefix), "%d: %v", i, tc)
if tc.returnError != nil {
require.Truef(t, strings.HasSuffix(err.Error(), tc.returnError.Error()), "%d: %v", i, tc)
}
require.Equalf(t, f.path, execMock.argv0, "%d: %v", i, tc)
require.Equalf(t, f.path, execMock.argv[0], "%d: %v", i, tc)
require.LessOrEqualf(t, len(tc.args), len(execMock.argv), "%d: %v", i, tc)
if len(tc.args) > 1 {
require.Equalf(t, tc.args[1:], execMock.argv[1:], "%d: %v", i, tc)
}
}
}

View File

@@ -0,0 +1,49 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package oci
// MockExecRuntime wraps a SyscallExecRuntime, intercepting the exec call for testing
type MockExecRuntime struct {
SyscallExecRuntime
execMock
}
// WithMockExec wraps a specified SyscallExecRuntime with a mocked exec function for testing
func WithMockExec(e SyscallExecRuntime, execResult error) *MockExecRuntime {
m := MockExecRuntime{
SyscallExecRuntime: e,
execMock: execMock{result: execResult},
}
// overrdie the exec function to the mocked exec function.
m.SyscallExecRuntime.exec = m.execMock.exec
return &m
}
type execMock struct {
argv0 string
argv []string
envv []string
result error
}
func (m *execMock) exec(argv0 string, argv []string, envv []string) error {
m.argv0 = argv0
m.argv = argv
m.envv = envv
return m.result
}

102
internal/oci/spec.go Normal file
View File

@@ -0,0 +1,102 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package oci
import (
"encoding/json"
"fmt"
"os"
oci "github.com/opencontainers/runtime-spec/specs-go"
)
// SpecModifier is a function that accepts a pointer to an OCI Srec and returns an
// error. The intention is that the function would modify the spec in-place.
type SpecModifier func(*oci.Spec) error
// Spec defines the operations to be performed on an OCI specification
type Spec interface {
Load() error
Flush() error
Modify(SpecModifier) error
}
type fileSpec struct {
*oci.Spec
path string
}
var _ Spec = (*fileSpec)(nil)
// NewSpecFromFile creates an object that encapsulates a file-backed OCI spec.
// This can be used to read from the file, modify the spec, and write to the
// same file.
func NewSpecFromFile(filepath string) Spec {
oci := fileSpec{
path: filepath,
}
return &oci
}
// Load reads the contents of an OCI spec from file to be referenced internally.
// The file is opened "read-only"
func (s *fileSpec) Load() error {
specFile, err := os.Open(s.path)
if err != nil {
return fmt.Errorf("error opening OCI specification file: %v", err)
}
defer specFile.Close()
decoder := json.NewDecoder(specFile)
var spec oci.Spec
err = decoder.Decode(&spec)
if err != nil {
return fmt.Errorf("error reading OCI specification from file: %v", err)
}
s.Spec = &spec
return nil
}
// Modify applies the specified SpecModifier to the stored OCI specification.
func (s *fileSpec) Modify(f SpecModifier) error {
if s.Spec == nil {
return fmt.Errorf("no spec loaded for modification")
}
return f(s.Spec)
}
// Flush writes the stored OCI specification to the filepath specifed by the path member.
// The file is truncated upon opening, overwriting any existing contents.
func (s fileSpec) Flush() error {
specFile, err := os.Create(s.path)
if err != nil {
return fmt.Errorf("error opening OCI specification file: %v", err)
}
defer specFile.Close()
encoder := json.NewEncoder(specFile)
err = encoder.Encode(s.Spec)
if err != nil {
return fmt.Errorf("error writing OCI specification to file: %v", err)
}
return nil
}

70
internal/oci/spec_mock.go Normal file
View File

@@ -0,0 +1,70 @@
/*
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package oci
import (
oci "github.com/opencontainers/runtime-spec/specs-go"
)
// MockSpec provides a simple mock for an OCI spec to be used in testing.
// It also implements the SpecModifier interface.
type MockSpec struct {
*oci.Spec
MockLoad mockFunc
MockFlush mockFunc
MockModify mockFunc
}
var _ Spec = (*MockSpec)(nil)
// NewMockSpec constructs a MockSpec to be used in testing as a Spec
func NewMockSpec(spec *oci.Spec, flushResult error, modifyResult error) *MockSpec {
s := MockSpec{
Spec: spec,
MockFlush: mockFunc{result: flushResult},
MockModify: mockFunc{result: modifyResult},
}
return &s
}
// Load invokes the mocked Load function to return the predefined error / result
func (s *MockSpec) Load() error {
return s.MockLoad.call()
}
// Flush invokes the mocked Load function to return the predefined error / result
func (s *MockSpec) Flush() error {
return s.MockFlush.call()
}
// Modify applies the specified SpecModifier to the spec and invokes the
// mocked modify function to return the predefined error / result.
func (s *MockSpec) Modify(f SpecModifier) error {
f(s.Spec)
return s.MockModify.call()
}
type mockFunc struct {
Callcount int
result error
}
func (m *mockFunc) call() error {
m.Callcount++
return m.result
}

View File

@@ -1,12 +0,0 @@
{
"ImportPath": "github.com/nvidia/nvidia-container-runtime/toolkit/nvidia-container-toolkit",
"GoVersion": "go1.9",
"GodepVersion": "v80",
"Deps": [
{
"ImportPath": "github.com/BurntSushi/toml",
"Comment": "v0.3.0-7-ga368813",
"Rev": "a368813c5e648fee92e5f6c30e3944ff9d5e8895"
}
]
}

View File

@@ -1,262 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"path"
"strconv"
"strings"
)
var envSwarmGPU *string
const (
envNVRequirePrefix = "NVIDIA_REQUIRE_"
envLegacyCUDAVersion = "CUDA_VERSION"
envNVRequireCUDA = envNVRequirePrefix + "CUDA"
envNVGPU = "NVIDIA_VISIBLE_DEVICES"
envNVDriverCapabilities = "NVIDIA_DRIVER_CAPABILITIES"
defaultCapability = "utility"
allCapabilities = "compute,compat32,graphics,utility,video,display"
envNVDisableRequire = "NVIDIA_DISABLE_REQUIRE"
)
type nvidiaConfig struct {
Devices string
Capabilities string
Requirements []string
DisableRequire bool
}
type containerConfig struct {
Pid int
Rootfs string
Env map[string]string
Nvidia *nvidiaConfig
}
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L94-L100
type Root struct {
Path string `json:"path"`
}
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L30-L57
type Process struct {
Env []string `json:"env,omitempty"`
}
// We use pointers to structs, similarly to the latest version of runtime-spec:
// https://github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L5-L28
type Spec struct {
Process *Process `json:"process,omitempty"`
Root *Root `json:"root,omitempty"`
}
type HookState struct {
Pid int `json:"pid,omitempty"`
// After 17.06, runc is using the runtime spec:
// github.com/docker/runc/blob/17.06/libcontainer/configs/config.go#L262-L263
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/state.go#L3-L17
Bundle string `json:"bundle"`
// Before 17.06, runc used a custom struct that didn't conform to the spec:
// github.com/docker/runc/blob/17.03.x/libcontainer/configs/config.go#L245-L252
BundlePath string `json:"bundlePath"`
}
func parseCudaVersion(cudaVersion string) (vmaj, vmin, vpatch uint32) {
if _, err := fmt.Sscanf(cudaVersion, "%d.%d.%d\n", &vmaj, &vmin, &vpatch); err != nil {
vpatch = 0
if _, err := fmt.Sscanf(cudaVersion, "%d.%d\n", &vmaj, &vmin); err != nil {
vmin = 0
if _, err := fmt.Sscanf(cudaVersion, "%d\n", &vmaj); err != nil {
log.Panicln("invalid CUDA version:", cudaVersion)
}
}
}
return
}
func getEnvMap(e []string) (m map[string]string) {
m = make(map[string]string)
for _, s := range e {
p := strings.SplitN(s, "=", 2)
if len(p) != 2 {
log.Panicln("environment error")
}
m[p[0]] = p[1]
}
return
}
func loadSpec(path string) (spec *Spec) {
f, err := os.Open(path)
if err != nil {
log.Panicln("could not open OCI spec:", err)
}
defer f.Close()
if err = json.NewDecoder(f).Decode(&spec); err != nil {
log.Panicln("could not decode OCI spec:", err)
}
if spec.Process == nil {
log.Panicln("Process is empty in OCI spec")
}
if spec.Root == nil {
log.Panicln("Root is empty in OCI spec")
}
return
}
func getDevices(env map[string]string) *string {
gpuVars := []string{envNVGPU}
if envSwarmGPU != nil {
// The Swarm resource has higher precedence.
gpuVars = append([]string{*envSwarmGPU}, gpuVars...)
}
for _, gpuVar := range gpuVars {
if devices, ok := env[gpuVar]; ok {
return &devices
}
}
return nil
}
func getCapabilities(env map[string]string) *string {
if capabilities, ok := env[envNVDriverCapabilities]; ok {
return &capabilities
}
return nil
}
func getRequirements(env map[string]string) []string {
// All variables with the "NVIDIA_REQUIRE_" prefix are passed to nvidia-container-cli
var requirements []string
for name, value := range env {
if strings.HasPrefix(name, envNVRequirePrefix) {
requirements = append(requirements, value)
}
}
return requirements
}
// Mimic the new CUDA images if no capabilities or devices are specified.
func getNvidiaConfigLegacy(env map[string]string) *nvidiaConfig {
var devices string
if d := getDevices(env); d == nil {
// Environment variable unset: default to "all".
devices = "all"
} else if len(*d) == 0 || *d == "void" {
// Environment variable empty or "void": not a GPU container.
return nil
} else {
// Environment variable non-empty and not "void".
devices = *d
}
if devices == "none" {
devices = ""
}
var capabilities string
if c := getCapabilities(env); c == nil {
// Environment variable unset: default to "all".
capabilities = allCapabilities
} else if len(*c) == 0 {
// Environment variable empty: use default capability.
capabilities = defaultCapability
} else {
// Environment variable non-empty.
capabilities = *c
}
if capabilities == "all" {
capabilities = allCapabilities
}
requirements := getRequirements(env)
vmaj, vmin, _ := parseCudaVersion(env[envLegacyCUDAVersion])
cudaRequire := fmt.Sprintf("cuda>=%d.%d", vmaj, vmin)
requirements = append(requirements, cudaRequire)
// Don't fail on invalid values.
disableRequire, _ := strconv.ParseBool(env[envNVDisableRequire])
return &nvidiaConfig{
Devices: devices,
Capabilities: capabilities,
Requirements: requirements,
DisableRequire: disableRequire,
}
}
func getNvidiaConfig(env map[string]string) *nvidiaConfig {
legacyCudaVersion := env[envLegacyCUDAVersion]
cudaRequire := env[envNVRequireCUDA]
if len(legacyCudaVersion) > 0 && len(cudaRequire) == 0 {
// Legacy CUDA image detected.
return getNvidiaConfigLegacy(env)
}
var devices string
if d := getDevices(env); d == nil || len(*d) == 0 || *d == "void" {
// Environment variable unset or empty or "void": not a GPU container.
return nil
} else {
// Environment variable non-empty and not "void".
devices = *d
}
if devices == "none" {
devices = ""
}
var capabilities string
if c := getCapabilities(env); c == nil || len(*c) == 0 {
// Environment variable unset or set but empty: use default capability.
capabilities = defaultCapability
} else {
// Environment variable set and non-empty.
capabilities = *c
}
if capabilities == "all" {
capabilities = allCapabilities
}
requirements := getRequirements(env)
// Don't fail on invalid values.
disableRequire, _ := strconv.ParseBool(env[envNVDisableRequire])
return &nvidiaConfig{
Devices: devices,
Capabilities: capabilities,
Requirements: requirements,
DisableRequire: disableRequire,
}
}
func getContainerConfig(hook HookConfig) (config containerConfig) {
var h HookState
d := json.NewDecoder(os.Stdin)
if err := d.Decode(&h); err != nil {
log.Panicln("could not decode container state:", err)
}
b := h.Bundle
if len(b) == 0 {
b = h.BundlePath
}
s := loadSpec(path.Join(b, "config.json"))
env := getEnvMap(s.Process.Env)
envSwarmGPU = hook.SwarmResource
return containerConfig{
Pid: h.Pid,
Rootfs: s.Root.Path,
Env: env,
Nvidia: getNvidiaConfig(env),
}
}

View File

@@ -1,60 +0,0 @@
package main
import (
"testing"
)
func TestParseCudaVersionValid(t *testing.T) {
var tests = []struct {
version string
expected [3]uint32
}{
{"0", [3]uint32{0, 0, 0}},
{"8", [3]uint32{8, 0, 0}},
{"7.5", [3]uint32{7, 5, 0}},
{"9.0.116", [3]uint32{9, 0, 116}},
{"4294967295.4294967295.4294967295", [3]uint32{4294967295, 4294967295, 4294967295}},
}
for _, c := range tests {
vmaj, vmin, vpatch := parseCudaVersion(c.version)
if vmaj != c.expected[0] || vmin != c.expected[1] || vpatch != c.expected[2] {
t.Errorf("parseCudaVersion(%s): %d.%d.%d (expected: %v)", c.version, vmaj, vmin, vpatch, c.expected)
}
}
}
func mustPanic(t *testing.T, f func()) {
defer func() {
if err := recover(); err == nil {
t.Error("Test didn't panic!")
}
}()
f()
}
func TestParseCudaVersionInvalid(t *testing.T) {
var tests = []string{
"foo",
"foo.5.10",
"9.0.116.50",
"9.0.116foo",
"7.foo",
"9.0.bar",
"9.4294967296",
"9.0.116.",
"9..0",
"9.",
".5.10",
"-9",
"+9",
"-9.1.116",
"-9.-1.-116",
}
for _, c := range tests {
mustPanic(t, func() {
t.Logf("parseCudaVersion(%s)", c)
parseCudaVersion(c)
})
}
}

115
packaging/debian/changelog Normal file
View File

@@ -0,0 +1,115 @@
nvidia-container-toolkit (1.6.0~rc.1-1) experimental; urgency=medium
* Add AARCH64 package for Amazon Linux 2
* Include nvidia-container-runtime into nvidia-container-toolkit package
-- NVIDIA CORPORATION <cudatools@nvidia.com> Mon, 06 Sep 2021 12:24:05 +0200
nvidia-container-toolkit (1.5.1-1) UNRELEASED; urgency=medium
* Fix bug where Docker Swarm device selection is ignored if
NVIDIA_VISIBLE_DEVICES is also set
* Improve unit testing by using require package and adding coverage reports
* Remove unneeded go dependencies by running go mod tidy
* Move contents of pkg directory to cmd for CLI tools
* Ensure make binary target explicitly sets GOOS
-- NVIDIA CORPORATION <cudatools@nvidia.com> Mon, 14 Jun 2021 09:00:00 -0700
nvidia-container-toolkit (1.5.0-1) UNRELEASED; urgency=medium
* Add dependence on libnvidia-container-tools >= 1.4.0
* Add golang check targets to Makefile
* Add Jenkinsfile definition for build targets
* Move docker.mk to docker folder
-- NVIDIA CORPORATION <cudatools@nvidia.com> Thu, 29 Apr 2021 03:12:43 -0700
nvidia-container-toolkit (1.4.2-1) UNRELEASED; urgency=medium
* Add dependence on libnvidia-container-tools >= 1.3.3
-- NVIDIA CORPORATION <cudatools@nvidia.com> Fri, 05 Feb 2021 02:24:36 -0700
nvidia-container-toolkit (1.4.1-1) UNRELEASED; urgency=medium
* Ignore NVIDIA_VISIBLE_DEVICES for containers with insufficent privileges
* Add dependence on libnvidia-container-tools >= 1.3.2
-- NVIDIA CORPORATION <cudatools@nvidia.com> Mon, 25 Jan 2021 02:18:04 -0700
nvidia-container-toolkit (1.4.0-1) UNRELEASED; urgency=medium
* Add 'compute' capability to list of defaults
* Add dependence on libnvidia-container-tools >= 1.3.1
-- NVIDIA CORPORATION <cudatools@nvidia.com> Fri, 11 Dec 2020 18:29:23 -0700
nvidia-container-toolkit (1.3.0-1) UNRELEASED; urgency=medium
* Promote 1.3.0~rc.2-1 to 1.3.0-1
* Add dependence on libnvidia-container-tools >= 1.3.0
-- NVIDIA CORPORATION <cudatools@nvidia.com> Wed, 16 Sep 2020 13:40:29 -0700
nvidia-container-toolkit (1.3.0~rc.2-1) experimental; urgency=medium
* 2c180947 Add more tests for new semantics with device list from volume mounts
* 7c003857 Refactor accepting device lists from volume mounts as a boolean
-- NVIDIA CORPORATION <cudatools@nvidia.com> Mon, 10 Aug 2020 15:05:34 -0700
nvidia-container-toolkit (1.3.0~rc.1-1) experimental; urgency=medium
* b50d86c1 Update build system to accept a TAG variable for things like rc.x
* fe65573b Add common CI tests for things like golint, gofmt, unit tests, etc.
* da6fbb34 Revert "Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*"
* a7fb3330 Flip build-all targets to run automatically on merge requests
* 8b248b66 Rename github.com/NVIDIA/container-toolkit to nvidia-container-toolkit
* da36874e Add new config options to pull device list from mounted files instead of ENVVAR
-- NVIDIA CORPORATION <cudatools@nvidia.com> Fri, 24 Jul 2020 22:21:49 -0700
nvidia-container-toolkit (1.2.1-1) UNRELEASED; urgency=medium
* 4e6e0ed4 Add 'ngx' to list of *all* driver capabilities
* 2f4af743 List config.toml as a config file in the RPM SPEC
-- NVIDIA CORPORATION <cudatools@nvidia.com> Wed, 22 Jul 2020 15:36:12 -0700
nvidia-container-toolkit (1.2.0-1) UNRELEASED; urgency=medium
* 8e0aab46 Fix repo listed in changelog for debian distributions
* 320bb6e4 Update dependence on libnvidia-container to 1.2.0
* 6cfc8097 Update package license to match source license
* e7dc3cbb Fix debian copyright file
* d3aee3e0 Add the 'ngx' driver capability
-- NVIDIA CORPORATION <cudatools@nvidia.com> Wed, 08 Jul 2020 18:11:19 -0700
nvidia-container-toolkit (1.1.2-1) UNRELEASED; urgency=medium
* c32237f3 Add support for parsing Linux Capabilities for older OCI specs
-- NVIDIA CORPORATION <cudatools@nvidia.com> Wed, 03 Jun 2020 12:05:32 -0700
nvidia-container-toolkit (1.1.1-1) UNRELEASED; urgency=medium
* d202aded Update dependence to libnvidia-container 1.1.1
-- NVIDIA CORPORATION <cudatools@nvidia.com> Tue, 19 May 2020 12:05:32 -0700
nvidia-container-toolkit (1.1.0-1) UNRELEASED; urgency=medium
* 4e4de762 Update build system to support multi-arch builds
* fcc1d116 Add support for MIG (Multi-Instance GPUs)
* d4ff0416 Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*
* 60f165ad Add no-pivot option to toolkit
-- NVIDIA CORPORATION <cudatools@nvidia.com> Fri, 15 May 2020 12:05:32 -0700
nvidia-container-toolkit (1.0.5-1) UNRELEASED; urgency=medium
* Initial release. Replaces older package nvidia-container-runtime-hook. (Closes: #XXXXXX)
-- Rajat Chopra <rajatc@nvidia.com> Wed, 10 Jul 2019 11:31:11 -0700

View File

@@ -10,8 +10,8 @@ Build-Depends: debhelper (>= 9)
Package: nvidia-container-toolkit
Architecture: any
Depends: ${misc:Depends}, libnvidia-container-tools (>= 0.1.0), libnvidia-container-tools (<< 2.0.0)
Breaks: nvidia-container-runtime (<< 2.0.0), nvidia-container-runtime-hook
Replaces: nvidia-container-runtime (<< 2.0.0), nvidia-container-runtime-hook
Depends: ${misc:Depends}, libnvidia-container-tools (>= @LIBNVIDIA_CONTAINER_VERSION@), libnvidia-container-tools (<< 2.0.0), libseccomp2
Breaks: nvidia-container-runtime (<= 3.5.0-1), nvidia-container-runtime-hook
Replaces: nvidia-container-runtime (<= 3.5.0-1), nvidia-container-runtime-hook
Description: NVIDIA container runtime hook
Provides a OCI hook to enable GPU support in containers.

185
packaging/debian/copyright Normal file
View File

@@ -0,0 +1,185 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: nvidia-container-toolkit
Source: https://github.com/NVIDIA/nvidia-container-toolkit
Files: *
Copyright: 2017-2020 NVIDIA CORPORATION <cudatools@nvidia.com>
License: Apache-2.0
License: Apache-2.0
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@@ -1,2 +1,3 @@
config.toml /etc/nvidia-container-runtime
nvidia-container-toolkit /usr/bin
nvidia-container-runtime /usr/bin

View File

@@ -3,6 +3,7 @@
set -e
sed -i "s;@SECTION@;${SECTION:+$SECTION/};g" debian/control
sed -i "s;@LIBNVIDIA_CONTAINER_VERSION@;${LIBNVIDIA_CONTAINER_VERSION:+$LIBNVIDIA_CONTAINER_VERSION};g" debian/control
if [ -n "$DISTRIB" ]; then
sed -i "s;UNRELEASED;$DISTRIB;" debian/changelog

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,134 @@
Name: nvidia-container-toolkit
Version: %{version}
Release: %{release}
Group: Development Tools
Vendor: NVIDIA CORPORATION
Packager: NVIDIA CORPORATION <cudatools@nvidia.com>
Summary: NVIDIA container runtime hook
URL: https://github.com/NVIDIA/nvidia-container-runtime
License: Apache-2.0
Source0: nvidia-container-toolkit
Source1: nvidia-container-runtime
Source2: config.toml
Source3: oci-nvidia-hook
Source4: oci-nvidia-hook.json
Source5: LICENSE
Obsoletes: nvidia-container-runtime <= 3.5.0-1, nvidia-container-runtime-hook
Provides: nvidia-container-runtime
Provides: nvidia-container-runtime-hook
Requires: libnvidia-container-tools >= %{libnvidia_container_version}, libnvidia-container-tools < 2.0.0
%if 0%{?suse_version}
Requires: libseccomp2
Requires: libapparmor1
%else
Requires: libseccomp
%endif
%description
Provides a OCI hook to enable GPU support in containers.
%prep
cp %{SOURCE0} %{SOURCE1} %{SOURCE2} %{SOURCE3} %{SOURCE4} %{SOURCE5} .
%install
mkdir -p %{buildroot}%{_bindir}
install -m 755 -t %{buildroot}%{_bindir} nvidia-container-toolkit
install -m 755 -t %{buildroot}%{_bindir} nvidia-container-runtime
mkdir -p %{buildroot}/etc/nvidia-container-runtime
install -m 644 -t %{buildroot}/etc/nvidia-container-runtime config.toml
mkdir -p %{buildroot}/usr/libexec/oci/hooks.d
install -m 755 -t %{buildroot}/usr/libexec/oci/hooks.d oci-nvidia-hook
mkdir -p %{buildroot}/usr/share/containers/oci/hooks.d
install -m 644 -t %{buildroot}/usr/share/containers/oci/hooks.d oci-nvidia-hook.json
%posttrans
ln -sf %{_bindir}/nvidia-container-toolkit %{_bindir}/nvidia-container-runtime-hook
%postun
rm -f %{_bindir}/nvidia-container-runtime-hook
%files
%license LICENSE
%{_bindir}/nvidia-container-toolkit
%{_bindir}/nvidia-container-runtime
%config /etc/nvidia-container-runtime/config.toml
/usr/libexec/oci/hooks.d/oci-nvidia-hook
/usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
%changelog
* Mon Sep 06 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.6.0-0.1.rc.1
- Add AARCH64 package for Amazon Linux 2
- Include nvidia-container-runtime into nvidia-container-toolkit package
* Mon Jun 14 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.5.1-1
- Fix bug where Docker Swarm device selection is ignored if NVIDIA_VISIBLE_DEVICES is also set
- Improve unit testing by using require package and adding coverage reports
- Remove unneeded go dependencies by running go mod tidy
- Move contents of pkg directory to cmd for CLI tools
- Ensure make binary target explicitly sets GOOS
* Thu Apr 29 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.5.0-1
- Add dependence on libnvidia-container-tools >= 1.4.0
- Add golang check targets to Makefile
- Add Jenkinsfile definition for build targets
- Move docker.mk to docker folder
* Fri Feb 05 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.4.2-1
- Add dependence on libnvidia-container-tools >= 1.3.3
* Mon Jan 25 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.4.1-1
- Ignore NVIDIA_VISIBLE_DEVICES for containers with insufficent privileges
- Add dependence on libnvidia-container-tools >= 1.3.2
* Fri Dec 11 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.4.0-1
- Add 'compute' capability to list of defaults
- Add dependence on libnvidia-container-tools >= 1.3.1
* Wed Sep 16 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.3.0-1
- Promote 1.3.0-0.1.rc.2 to 1.3.0-1
- Add dependence on libnvidia-container-tools >= 1.3.0
* Mon Aug 10 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.3.0-0.1.rc.2
- 2c180947 Add more tests for new semantics with device list from volume mounts
- 7c003857 Refactor accepting device lists from volume mounts as a boolean
* Fri Jul 24 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.3.0-0.1.rc.1
- b50d86c1 Update build system to accept a TAG variable for things like rc.x
- fe65573b Add common CI tests for things like golint, gofmt, unit tests, etc.
- da6fbb34 Revert "Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*"
- a7fb3330 Flip build-all targets to run automatically on merge requests
- 8b248b66 Rename github.com/NVIDIA/container-toolkit to nvidia-container-toolkit
- da36874e Add new config options to pull device list from mounted files instead of ENVVAR
* Wed Jul 22 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.2.1-1
- 4e6e0ed4 Add 'ngx' to list of *all* driver capabilities
- 2f4af743 List config.toml as a config file in the RPM SPEC
* Wed Jul 08 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.2.0-1
- 8e0aab46 Fix repo listed in changelog for debian distributions
- 320bb6e4 Update dependence on libnvidia-container to 1.2.0
- 6cfc8097 Update package license to match source license
- e7dc3cbb Fix debian copyright file
- d3aee3e0 Add the 'ngx' driver capability
* Wed Jun 03 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.1.2-1
- c32237f3 Add support for parsing Linux Capabilities for older OCI specs
* Tue May 19 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.1.1-1
- d202aded Update dependence to libnvidia-container 1.1.1
* Fri May 15 2020 NVIDIA CORPORATION <cudatools@nvidia.com> 1.1.0-1
- 4e4de762 Update build system to support multi-arch builds
- fcc1d116 Add support for MIG (Multi-Instance GPUs)
- d4ff0416 Add ability to merge envars of the form NVIDIA_VISIBLE_DEVICES_*
- 60f165ad Add no-pivot option to toolkit

View File

@@ -1,5 +0,0 @@
nvidia-container-toolkit (@VERSION@) UNRELEASED; urgency=medium
* Initial release. Replaces older package nvidia-container-runtime-hook. (Closes: #XXXXXX)
-- Rajat Chopra <rajatc@nvidia.com> Wed, 10 Jul 2019 11:31:11 -0700

View File

@@ -1,35 +0,0 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: nvidia-container-toolkit
Source: https://github.com/NVIDIA/nvidia-container-runtime
Files: *
Copyright: 2017-2018 NVIDIA CORPORATION <cudatools@nvidia.com>
License: BSD-3-Clause
License: BSD-3-clause
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
.
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
.
Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,25 +0,0 @@
Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,55 +0,0 @@
Name: nvidia-container-toolkit
Version: %{version}
Release: %{release}
Group: Development Tools
Vendor: NVIDIA CORPORATION
Packager: NVIDIA CORPORATION <cudatools@nvidia.com>
Summary: NVIDIA container runtime hook
URL: https://github.com/NVIDIA/nvidia-container-runtime
License: BSD
Source0: nvidia-container-toolkit
Source1: config.toml
Source2: oci-nvidia-hook
Source3: oci-nvidia-hook.json
Source4: LICENSE
Obsoletes: nvidia-container-runtime < 2.0.0, nvidia-container-runtime-hook
Provides: nvidia-container-runtime-hook
Requires: libnvidia-container-tools >= 0.1.0, libnvidia-container-tools < 2.0.0
%description
Provides a OCI hook to enable GPU support in containers.
%prep
cp %{SOURCE0} %{SOURCE1} %{SOURCE2} %{SOURCE3} %{SOURCE4} .
%install
mkdir -p %{buildroot}%{_bindir}
install -m 755 -t %{buildroot}%{_bindir} nvidia-container-toolkit
mkdir -p %{buildroot}/etc/nvidia-container-runtime
install -m 644 -t %{buildroot}/etc/nvidia-container-runtime config.toml
mkdir -p %{buildroot}/usr/libexec/oci/hooks.d
install -m 755 -t %{buildroot}/usr/libexec/oci/hooks.d oci-nvidia-hook
mkdir -p %{buildroot}/usr/share/containers/oci/hooks.d
install -m 644 -t %{buildroot}/usr/share/containers/oci/hooks.d oci-nvidia-hook.json
%posttrans
ln -sf %{_bindir}/nvidia-container-toolkit %{_bindir}/nvidia-container-runtime-hook
%postun
rm -f %{_bindir}/nvidia-container-runtime-hook
%files
%license LICENSE
%{_bindir}/nvidia-container-toolkit
/etc/nvidia-container-runtime/config.toml
/usr/libexec/oci/hooks.d/oci-nvidia-hook
/usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
%changelog

67
scripts/build-all-components.sh Executable file
View File

@@ -0,0 +1,67 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
function assert_usage() {
echo "Missing argument $1"
echo "$(basename ${BASH_SOURCE[0]}) TARGET"
exit 1
}
set -e -x
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
if [[ $# -ne 1 ]]; then
assert_usage "TARGET"
fi
TARGET=$1
: ${DIST_DIR:=${PROJECT_ROOT}/dist}
export DIST_DIR
echo "Building ${TARGET} for all packages to ${DIST_DIR}"
: ${LIBNVIDIA_CONTAINER_ROOT:=${PROJECT_ROOT}/third_party/libnvidia-container}
: ${NVIDIA_CONTAINER_TOOLKIT_ROOT:=${PROJECT_ROOT}}
: ${NVIDIA_CONTAINER_RUNTIME_ROOT:=${PROJECT_ROOT}/third_party/nvidia-container-runtime}
: ${NVIDIA_DOCKER_ROOT:=${PROJECT_ROOT}/third_party/nvidia-docker}
${SCRIPTS_DIR}/get-component-versions.sh
# Build libnvidia-container
make -C ${LIBNVIDIA_CONTAINER_ROOT} -f mk/docker.mk ${TARGET}
# Build nvidia-container-toolkit
make -C ${NVIDIA_CONTAINER_TOOLKIT_ROOT} ${TARGET}
# We set the TOOLKIT_VERSION for the nvidia-container-runtime and nvidia-docker targets
# TODO: This is not yet enabled in the makefiles below
: ${PREVIOUS_TOOLKIT_VERSION:=1.5.1}
echo "Using TOOLKIT_VERSION=${PREVIOUS_TOOLKIT_VERSION} as previous nvidia-container-toolkit version"
# Build nvidia-container-runtime
make -C ${NVIDIA_CONTAINER_RUNTIME_ROOT} TOOLKIT_VERSION=${PREVIOUS_TOOLKIT_VERSION} ${TARGET}
# Build nvidia-docker2
make -C ${NVIDIA_DOCKER_ROOT} TOOLKIT_VERSION=${PREVIOUS_TOOLKIT_VERSION} ${TARGET}

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
function assert_usage() {
exit 1
}
set -e
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
: ${LIBNVIDIA_CONTAINER_ROOT:=${PROJECT_ROOT}/third_party/libnvidia-container}
: ${NVIDIA_CONTAINER_TOOLKIT_ROOT:=${PROJECT_ROOT}}
: ${NVIDIA_CONTAINER_RUNTIME_ROOT:=${PROJECT_ROOT}/third_party/nvidia-container-runtime}
: ${NVIDIA_DOCKER_ROOT:=${PROJECT_ROOT}/third_party/nvidia-docker}
# Get version for libnvidia-container
libnvidia_container_version=$(grep "#define NVC_VERSION" ${LIBNVIDIA_CONTAINER_ROOT}/src/nvc.h \
| sed -e 's/#define NVC_VERSION[[:space:]]"\(.*\)"/\1/')
# Get version for nvidia-container-toolit
nvidia_container_toolkit_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_CONTAINER_TOOLKIT_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_toolkit_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_CONTAINER_TOOLKIT_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_toolkit_version="${nvidia_container_toolkit_version}${nvidia_container_toolkit_tag:+~${nvidia_container_toolkit_tag}}"
# Get version for nvidia-container-runtime
nvidia_container_runtime_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_CONTAINER_RUNTIME_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_runtime_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_CONTAINER_RUNTIME_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_container_runtime_version="${nvidia_container_runtime_version}${nvidia_container_runtime_tag:+~${nvidia_container_runtime_tag}}"
# Get version for nvidia-docker
nvidia_docker_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_DOCKER_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_docker_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_DOCKER_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
nvidia_docker_version="${nvidia_docker_version}${nvidia_docker_tag:+~${nvidia_docker_tag}}"
echo "LIBNVIDIA_CONTAINER_VERSION=${libnvidia_container_version}"
echo "NVIDIA_CONTAINER_TOOLKIT_VERSION=${nvidia_container_toolkit_version}"
if [[ "${libnvidia_container_version}" != "${nvidia_container_toolkit_version}" ]]; then
>&2 echo "WARNING: The libnvidia-container and nvidia-container-toolkit versions do not match"
fi
echo "NVIDIA_CONTAINER_RUNTIME_VERSION=${nvidia_container_runtime_version}"
echo "NVIDIA_DOCKER_VERSION=${nvidia_docker_version}"

57
scripts/release.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
set -e -x
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
# This list represents the distribution-architecture pairs that are actually published
# to the relevant repositories. This targets forwarded to the build-all-components script
# can be overridden by specifying command line arguments.
all=(
amazonlinux1-x86_64
amazonlinux2-aarch64
amazonlinux2-x86_64
centos7-ppc64le
centos7-x86_64
centos8-aarch64
centos8-ppc64le
centos8-x86_64
debian10-amd64
debian9-amd64
opensuse-leap15.1-x86_64
ubuntu16.04-amd64
ubuntu16.04-ppc64le
ubuntu18.04-amd64
ubuntu18.04-arm64
ubuntu18.04-ppc64le
)
if [[ $# -gt 0 ]]; then
targets=($*)
else
targets=${all[@]}
fi
for target in ${targets[@]}; do
${SCRIPTS_DIR}/build-all-components.sh ${target}
done

View File

@@ -0,0 +1,2 @@
#!/bin/bash
echo mock hook

2
test/bin/runc Executable file
View File

@@ -0,0 +1,2 @@
#!/bin/bash
echo mock runc

178
test/input/test_spec.json Normal file
View File

@@ -0,0 +1,178 @@
{
"ociVersion": "1.0.1-dev",
"process": {
"terminal": true,
"user": {
"uid": 0,
"gid": 0
},
"args": [
"sh"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"effective": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"inheritable": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"permitted": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
],
"ambient": [
"CAP_AUDIT_WRITE",
"CAP_KILL",
"CAP_NET_BIND_SERVICE"
]
},
"rlimits": [
{
"type": "RLIMIT_NOFILE",
"hard": 1024,
"soft": 1024
}
],
"noNewPrivileges": true
},
"root": {
"path": "rootfs",
"readonly": true
},
"hostname": "runc",
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc"
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/shm",
"type": "tmpfs",
"source": "shm",
"options": [
"nosuid",
"noexec",
"nodev",
"mode=1777",
"size=65536k"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
}
],
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
]
},
"namespaces": [
{
"type": "pid"
},
{
"type": "network"
},
{
"type": "ipc"
},
{
"type": "uts"
},
{
"type": "mount"
}
],
"maskedPaths": [
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/sys/firmware",
"/proc/scsi"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}

59
test/release/Makefile Normal file
View File

@@ -0,0 +1,59 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKFLOW ?= nvidia-docker
TEST_REPO ?= elezar.github.io
DISTRIBUTIONS := ubuntu18.04 centos8
IMAGE_TARGETS := $(patsubst %,image-%, $(DISTRIBUTIONS))
RUN_TARGETS := $(patsubst %,run-%, $(DISTRIBUTIONS))
RELEASE_TARGETS := $(patsubst %,release-%, $(DISTRIBUTIONS))
LOCAL_TARGETS := $(patsubst %,local-%, $(DISTRIBUTIONS))
.PHONY: $(IMAGE_TARGETS)
image-%: DOCKERFILE = docker/$(*)/Dockerfile
images: $(IMAGE_TARGETS)
$(IMAGE_TARGETS): image-%:
docker build \
--build-arg WORKFLOW="$(WORKFLOW)" \
--build-arg TEST_REPO="$(TEST_REPO)" \
-t nvidia-container-toolkit-repo-test:$(*) \
-f $(DOCKERFILE) \
$(shell dirname $(DOCKERFILE))
%-ubuntu18.04: ARCH = amd64
%-centos8: ARCH = x86_64
RELEASE_TEST_DIR := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
PROJECT_ROOT := $(RELEASE_TEST_DIR)/../..
LOCAL_PACKAGE_ROOT := $(PROJECT_ROOT)/dist
local-%: DIST = $(*)
local-%: LOCAL_REPO_ARGS = -v $(LOCAL_PACKAGE_ROOT)/$(DIST)/$(ARCH):/local-repository
$(LOCAL_TARGETS): local-%: release-% run-% | release-%
run-%: DIST = $(*)
$(RUN_TARGETS): run-%:
docker run --rm -ti \
$(LOCAL_REPO_ARGS) \
nvidia-container-toolkit-repo-test:$(*)
# Ensure that the local package root exists
$(RELEASE_TARGETS): release-%: $(LOCAL_PACKAGE_ROOT)/$(*)/$(ARCH)
$(PROJECT_ROOT)/scripts/release.sh $(*)-$(ARCH)

107
test/release/README.md Normal file
View File

@@ -0,0 +1,107 @@
# Testing Packaging Workflows
## Building the Docker images:
```bash
make images
```
This assumes that the `nvidia-docker` workflow is being tested and that the test packages have been published to the `elezar.github.io` GitHub pages page. These can be overridden
using make variables.
Valid values for `workflow` are:
* `nvidia-docker`
* `nvidia-container-runtime`
This follows the instructions for setting up the [`nvidia-docker` repostitory](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit).
## Testing local package changes
Running:
```bash
make local-ubuntu18.04
```
will build the `ubuntu18.04-amd64` packages for release and launch a docker container with these packages added to a local APT repository.
The various `apt` workflows can then be tested as if the packages were released to the `libnvidia-container` experimental repository.
The `local-centos8` make target is available for testing the `centos8-x86_64` workflows as representative of `yum`-based installation workflows.
### Example
In the `centos8`-based container above we see the following `nvidia-docker2` packages available with the `local-repository` disabled:
```bash
$ yum list --showduplicates nvidia-docker2 | tail -2
nvidia-docker2.noarch 2.5.0-1 nvidia-docker
nvidia-docker2.noarch 2.6.0-1 nvidia-docker
```
Installing `nvidia-docker2` shows:
```bash
$ yum install -y nvidia-docker2
```
installs the following packages:
```bash
$ yum list installed | grep nvidia
libnvidia-container-tools.x86_64 1.5.1-1 @libnvidia-container
libnvidia-container1.x86_64 1.5.1-1 @libnvidia-container
nvidia-container-runtime.x86_64 3.5.0-1 @nvidia-container-runtime
nvidia-container-toolkit.x86_64 1.5.1-2 @nvidia-container-runtime
nvidia-docker2.noarch 2.6.0-1 @nvidia-docker
```
Note the repositories where these packages were installed from.
We now enable the `local-repository` to simulate the new packages being published to the `libnvidia-container` experimental repository and check the available `nvidia-docker2` versions:
```bash
$ yum-config-manager --enable local-repository
$ yum list --showduplicates nvidia-docker2 | tail -2
nvidia-docker2.noarch 2.6.0-1 nvidia-docker
nvidia-docker2.noarch 2.6.1-0.1.rc.1 local-repository
```
Showing the new version available in the local repository.
Running:
```
$ yum install nvidia-docker2
Last metadata expiration check: 0:01:15 ago on Fri Sep 24 12:49:20 2021.
Package nvidia-docker2-2.6.0-1.noarch is already installed.
Dependencies resolved.
===============================================================================================================================
Package Architecture Version Repository Size
===============================================================================================================================
Upgrading:
libnvidia-container-tools x86_64 1.6.0-0.1.rc.1 local-repository 48 k
libnvidia-container1 x86_64 1.6.0-0.1.rc.1 local-repository 95 k
nvidia-container-toolkit x86_64 1.6.0-0.1.rc.1 local-repository 1.5 M
replacing nvidia-container-runtime.x86_64 3.5.0-1
nvidia-docker2 noarch 2.6.1-0.1.rc.1 local-repository 13 k
Transaction Summary
===============================================================================================================================
Upgrade 4 Packages
Total size: 1.7 M
```
Showing that all the components of the stack will be updated with versions from the `local-repository`.
After installation the installed packages are shown as:
```bash
$ yum list installed | grep nvidia
libnvidia-container-tools.x86_64 1.6.0-0.1.rc.1 @local-repository
libnvidia-container1.x86_64 1.6.0-0.1.rc.1 @local-repository
nvidia-container-toolkit.x86_64 1.6.0-0.1.rc.1 @local-repository
nvidia-docker2.noarch 2.6.1-0.1.rc.1 @local-repository
```
Showing that:
1. All versions have been installed from the same repository
2. The `nvidia-container-runtime` package was removed as it is no longer required.
The `nvidia-container-runtime` executable is, however, still present on the system:
```bash
# ls -l /usr/bin/nvidia-container-runtime
-rwxr-xr-x 1 root root 2256280 Sep 24 12:42 /usr/bin/nvidia-container-runtime
```

View File

@@ -0,0 +1,35 @@
FROM centos:8
RUN yum install -y \
yum-utils \
ruby-devel \
gcc \
make \
rpm-build \
rubygems \
createrepo
RUN gem install --no-document fpm
# We create and install a dummy docker package since these dependencies are out of
# scope for the tests performed here.
RUN fpm -s empty \
-t rpm \
--description "A dummy package for docker-ce_18.06.3.ce-3.el7" \
-n docker-ce --version 18.06.3.ce-3.el7 \
-p /tmp/docker.rpm \
&& \
yum localinstall -y /tmp/docker.rpm \
&& \
rm -f /tmp/docker.rpm
ARG WORKFLOW=nvidia-docker
ARG TEST_REPO=nvidia.github.io
ENV TEST_REPO ${TEST_REPO}
RUN curl -s -L https://nvidia.github.io/${WORKFLOW}/centos8/nvidia-docker.repo \
| tee /etc/yum.repos.d/nvidia-docker.repo
COPY entrypoint.sh /
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
: ${LOCAL_REPO_DIRECTORY:=/local-repository}
if [[ -d ${LOCAL_REPO_DIRECTORY} ]]; then
echo "Setting up local-repository"
createrepo /local-repository
cat >/etc/yum.repos.d/local.repo <<EOL
[local-repository]
name=NVIDIA Container Toolkit Local Packages
baseurl=file:///local-repository
enabled=0
gpgcheck=0
protect=1
EOL
yum-config-manager --enable local-repository
else
echo "Setting up TEST repo: ${TEST_REPO}"
sed -i -e 's#nvidia\.github\.io/libnvidia-container#${TEST_REPO}/libnvidia-container#g' /etc/yum.repos.d/nvidia-docker.repo
yum-config-manager --enable libnvidia-container-experimental
fi
exec bash $@

View File

@@ -0,0 +1,50 @@
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM ubuntu:18.04
ARG DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install --no-install-recommends -y \
curl \
gnupg2 \
apt-transport-https \
ca-certificates \
apt-utils \
ruby ruby-dev rubygems build-essential
RUN gem install --no-document fpm
# We create and install a dummy docker package since these dependencies are out of
# scope for the tests performed here.
RUN fpm -s empty \
-t deb \
--description "A dummy package for docker.io_18.06.0" \
-n docker.io --version 18.06.0 \
-p /tmp/docker.deb \
--deb-no-default-config-files \
&& \
dpkg -i /tmp/docker.deb \
&& \
rm -f /tmp/docker.deb
ARG WORKFLOW=nvidia-docker
RUN curl -s -L https://nvidia.github.io/${WORKFLOW}/gpgkey | apt-key add - \
&& curl -s -L https://nvidia.github.io/${WORKFLOW}/ubuntu18.04/nvidia-docker.list | tee /etc/apt/sources.list.d/nvidia-docker.list \
&& apt-get update
COPY entrypoint.sh /
COPY install_repo.sh /
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
: ${LOCAL_REPO_DIRECTORY:=/local-repository}
if [[ -d ${LOCAL_REPO_DIRECTORY} ]]; then
echo "Setting up local-repository"
echo "deb [trusted=yes] file:/local-repository ./" > /etc/apt/sources.list.d/local.list
$(cd /local-repository && apt-ftparchive packages . > Packages)
elif [[ -n ${TEST_REPO} ]]; then
./install_repo.sh ${TEST_REPO}
else
echo "Skipping repo setup"
fi
apt-get update
exec bash $@

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is used to build the packages for the components of the NVIDIA
# Container Stack. These include the nvidia-container-toolkit in this repository
# as well as the components included in the third_party folder.
# All required packages are generated in the specified dist folder.
test_repo=$1
echo "Setting up TEST repo: ${test_repo}"
sed -i -e "s#nvidia\.github\.io/libnvidia-container#${test_repo}/libnvidia-container#g" /etc/apt/sources.list.d/nvidia-docker.list
sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-docker.list

1
third_party/nvidia-docker vendored Submodule

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

145
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View File

@@ -0,0 +1,145 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
type flag uintptr
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
panic("reflect.Value read-only flag has changed semantics")
}

Some files were not shown because too many files have changed in this diff Show More