Add binary target and use go mod

Signed-off-by: Renaud Gaubert <rgaubert@nvidia.com>
This commit is contained in:
Renaud Gaubert
2020-04-11 17:18:00 -07:00
parent 60f165ad69
commit 87c8a868f9
48 changed files with 123 additions and 25 deletions

12
pkg/Godeps/Godeps.json generated Normal file
View File

@@ -0,0 +1,12 @@
{
"ImportPath": "github.com/nvidia/nvidia-container-runtime/toolkit/nvidia-container-toolkit",
"GoVersion": "go1.9",
"GodepVersion": "v80",
"Deps": [
{
"ImportPath": "github.com/BurntSushi/toml",
"Comment": "v0.3.0-7-ga368813",
"Rev": "a368813c5e648fee92e5f6c30e3944ff9d5e8895"
}
]
}

25
pkg/capabilities.go Normal file
View File

@@ -0,0 +1,25 @@
package main
import (
"log"
)
func capabilityToCLI(cap string) string {
switch cap {
case "compute":
return "--compute"
case "compat32":
return "--compat32"
case "graphics":
return "--graphics"
case "utility":
return "--utility"
case "video":
return "--video"
case "display":
return "--display"
default:
log.Panicln("unknown driver capability:", cap)
}
return ""
}

BIN
pkg/container-toolkit Executable file

Binary file not shown.

265
pkg/container_config.go Normal file
View File

@@ -0,0 +1,265 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"path"
"strconv"
"strings"
)
var envSwarmGPU *string
const (
envCUDAVersion = "CUDA_VERSION"
envNVRequirePrefix = "NVIDIA_REQUIRE_"
envNVRequireCUDA = envNVRequirePrefix + "CUDA"
envNVDisableRequire = "NVIDIA_DISABLE_REQUIRE"
envNVVisibleDevices = "NVIDIA_VISIBLE_DEVICES"
envNVDriverCapabilities = "NVIDIA_DRIVER_CAPABILITIES"
)
const (
allDriverCapabilities = "compute,compat32,graphics,utility,video,display"
defaultDriverCapabilities = "utility"
)
type nvidiaConfig struct {
Devices string
DriverCapabilities string
Requirements []string
DisableRequire bool
}
type containerConfig struct {
Pid int
Rootfs string
Env map[string]string
Nvidia *nvidiaConfig
}
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L94-L100
type Root struct {
Path string `json:"path"`
}
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L30-L57
type Process struct {
Env []string `json:"env,omitempty"`
}
// We use pointers to structs, similarly to the latest version of runtime-spec:
// https://github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/config.go#L5-L28
type Spec struct {
Process *Process `json:"process,omitempty"`
Root *Root `json:"root,omitempty"`
}
type HookState struct {
Pid int `json:"pid,omitempty"`
// After 17.06, runc is using the runtime spec:
// github.com/docker/runc/blob/17.06/libcontainer/configs/config.go#L262-L263
// github.com/opencontainers/runtime-spec/blob/v1.0.0/specs-go/state.go#L3-L17
Bundle string `json:"bundle"`
// Before 17.06, runc used a custom struct that didn't conform to the spec:
// github.com/docker/runc/blob/17.03.x/libcontainer/configs/config.go#L245-L252
BundlePath string `json:"bundlePath"`
}
func parseCudaVersion(cudaVersion string) (vmaj, vmin, vpatch uint32) {
if _, err := fmt.Sscanf(cudaVersion, "%d.%d.%d\n", &vmaj, &vmin, &vpatch); err != nil {
vpatch = 0
if _, err := fmt.Sscanf(cudaVersion, "%d.%d\n", &vmaj, &vmin); err != nil {
vmin = 0
if _, err := fmt.Sscanf(cudaVersion, "%d\n", &vmaj); err != nil {
log.Panicln("invalid CUDA version:", cudaVersion)
}
}
}
return
}
func getEnvMap(e []string) (m map[string]string) {
m = make(map[string]string)
for _, s := range e {
p := strings.SplitN(s, "=", 2)
if len(p) != 2 {
log.Panicln("environment error")
}
m[p[0]] = p[1]
}
return
}
func loadSpec(path string) (spec *Spec) {
f, err := os.Open(path)
if err != nil {
log.Panicln("could not open OCI spec:", err)
}
defer f.Close()
if err = json.NewDecoder(f).Decode(&spec); err != nil {
log.Panicln("could not decode OCI spec:", err)
}
if spec.Process == nil {
log.Panicln("Process is empty in OCI spec")
}
if spec.Root == nil {
log.Panicln("Root is empty in OCI spec")
}
return
}
func getDevices(env map[string]string) *string {
gpuVars := []string{envNVVisibleDevices}
if envSwarmGPU != nil {
// The Swarm resource has higher precedence.
gpuVars = append([]string{*envSwarmGPU}, gpuVars...)
}
for _, gpuVar := range gpuVars {
if devices, ok := env[gpuVar]; ok {
return &devices
}
}
return nil
}
func getDriverCapabilities(env map[string]string) *string {
if capabilities, ok := env[envNVDriverCapabilities]; ok {
return &capabilities
}
return nil
}
func getRequirements(env map[string]string) []string {
// All variables with the "NVIDIA_REQUIRE_" prefix are passed to nvidia-container-cli
var requirements []string
for name, value := range env {
if strings.HasPrefix(name, envNVRequirePrefix) {
requirements = append(requirements, value)
}
}
return requirements
}
// Mimic the new CUDA images if no capabilities or devices are specified.
func getNvidiaConfigLegacy(env map[string]string) *nvidiaConfig {
var devices string
if d := getDevices(env); d == nil {
// Environment variable unset: default to "all".
devices = "all"
} else if len(*d) == 0 || *d == "void" {
// Environment variable empty or "void": not a GPU container.
return nil
} else {
// Environment variable non-empty and not "void".
devices = *d
}
if devices == "none" {
devices = ""
}
var driverCapabilities string
if c := getDriverCapabilities(env); c == nil {
// Environment variable unset: default to "all".
driverCapabilities = allDriverCapabilities
} else if len(*c) == 0 {
// Environment variable empty: use default capability.
driverCapabilities = defaultDriverCapabilities
} else {
// Environment variable non-empty.
driverCapabilities = *c
}
if driverCapabilities == "all" {
driverCapabilities = allDriverCapabilities
}
requirements := getRequirements(env)
vmaj, vmin, _ := parseCudaVersion(env[envCUDAVersion])
cudaRequire := fmt.Sprintf("cuda>=%d.%d", vmaj, vmin)
requirements = append(requirements, cudaRequire)
// Don't fail on invalid values.
disableRequire, _ := strconv.ParseBool(env[envNVDisableRequire])
return &nvidiaConfig{
Devices: devices,
DriverCapabilities: driverCapabilities,
Requirements: requirements,
DisableRequire: disableRequire,
}
}
func getNvidiaConfig(env map[string]string) *nvidiaConfig {
legacyCudaVersion := env[envCUDAVersion]
cudaRequire := env[envNVRequireCUDA]
if len(legacyCudaVersion) > 0 && len(cudaRequire) == 0 {
// Legacy CUDA image detected.
return getNvidiaConfigLegacy(env)
}
var devices string
if d := getDevices(env); d == nil || len(*d) == 0 || *d == "void" {
// Environment variable unset or empty or "void": not a GPU container.
return nil
} else {
// Environment variable non-empty and not "void".
devices = *d
}
if devices == "none" {
devices = ""
}
var driverCapabilities string
if c := getDriverCapabilities(env); c == nil || len(*c) == 0 {
// Environment variable unset or set but empty: use default capability.
driverCapabilities = defaultDriverCapabilities
} else {
// Environment variable set and non-empty.
driverCapabilities = *c
}
if driverCapabilities == "all" {
driverCapabilities = allDriverCapabilities
}
requirements := getRequirements(env)
// Don't fail on invalid values.
disableRequire, _ := strconv.ParseBool(env[envNVDisableRequire])
return &nvidiaConfig{
Devices: devices,
DriverCapabilities: driverCapabilities,
Requirements: requirements,
DisableRequire: disableRequire,
}
}
func getContainerConfig(hook HookConfig) (config containerConfig) {
var h HookState
d := json.NewDecoder(os.Stdin)
if err := d.Decode(&h); err != nil {
log.Panicln("could not decode container state:", err)
}
b := h.Bundle
if len(b) == 0 {
b = h.BundlePath
}
s := loadSpec(path.Join(b, "config.json"))
env := getEnvMap(s.Process.Env)
envSwarmGPU = hook.SwarmResource
return containerConfig{
Pid: h.Pid,
Rootfs: s.Root.Path,
Env: env,
Nvidia: getNvidiaConfig(env),
}
}

View File

@@ -1,5 +0,0 @@
nvidia-container-toolkit (@VERSION@) UNRELEASED; urgency=medium
* Initial release. Replaces older package nvidia-container-runtime-hook. (Closes: #XXXXXX)
-- Rajat Chopra <rajatc@nvidia.com> Wed, 10 Jul 2019 11:31:11 -0700

View File

@@ -1,21 +0,0 @@
nvidia-container-runtime-hook (1.4.0-1) UNRELEASED; urgency=medium
* 2f562d5 Add support for the --no-cgroups argument
* 25bd7c4 Add flag to specify configuration file
* 300aaf9 Add support for the --user argument
-- NVIDIA CORPORATION <cudatools@nvidia.com> Thu, 21 Jun 2018 18:15:20 +0000
nvidia-container-runtime-hook (1.3.0-1) UNRELEASED; urgency=medium
* b4d7682 Search for nvidia-container-cli in the custom root directory
* 6e589e0 Add support for display capability
* b406749 Add support for Ubuntu 14.04, Debian Jessie and Amazon Linux 2
-- NVIDIA CORPORATION <cudatools@nvidia.com> Thu, 08 Mar 2018 04:17:34 +0000
nvidia-container-runtime-hook (1.2.1-1) UNRELEASED; urgency=medium
* Extract hook from nvidia-container-runtime package
-- NVIDIA CORPORATION <cudatools@nvidia.com> Wed, 07 Mar 2018 02:43:03 +0000

View File

@@ -1 +0,0 @@
9

View File

@@ -1,17 +0,0 @@
Source: nvidia-container-toolkit
Section: @SECTION@utils
Priority: optional
Maintainer: NVIDIA CORPORATION <cudatools@nvidia.com>
Standards-Version: 3.9.8
Homepage: https://github.com/NVIDIA/nvidia-container-runtime/wiki
Vcs-Git: https://github.com/NVIDIA/nvidia-container-runtime
Vcs-Browser: https://github.com/NVIDIA/nvidia-container-runtime
Build-Depends: debhelper (>= 9)
Package: nvidia-container-toolkit
Architecture: any
Depends: ${misc:Depends}, libnvidia-container-tools (>= 0.1.0), libnvidia-container-tools (<< 2.0.0)
Breaks: nvidia-container-runtime (<< 2.0.0), nvidia-container-runtime-hook
Replaces: nvidia-container-runtime (<< 2.0.0), nvidia-container-runtime-hook
Description: NVIDIA container runtime hook
Provides a OCI hook to enable GPU support in containers.

View File

@@ -1,35 +0,0 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: nvidia-container-toolkit
Source: https://github.com/NVIDIA/nvidia-container-runtime
Files: *
Copyright: 2017-2018 NVIDIA CORPORATION <cudatools@nvidia.com>
License: BSD-3-Clause
License: BSD-3-clause
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
.
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
.
Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,2 +0,0 @@
config.toml /etc/nvidia-container-runtime
nvidia-container-toolkit /usr/bin

View File

@@ -1,4 +0,0 @@
new-package-should-close-itp-bug
binary-without-manpage
statically-linked-binary
description-is-pkg-name

View File

@@ -1,28 +0,0 @@
#!/bin/sh
set -e
NVIDIA_CONTAINER_RUNTIME_HOOK=/usr/bin/nvidia-container-runtime-hook
NVIDIA_CONTAINER_TOOLKIT=/usr/bin/nvidia-container-toolkit
case "$1" in
configure)
if [ -f "${NVIDIA_CONTAINER_TOOLKIT}" ]; then
if [ ! -e "${NVIDIA_CONTAINER_RUNTIME_HOOK}" ]; then
ln -s ${NVIDIA_CONTAINER_TOOLKIT} ${NVIDIA_CONTAINER_RUNTIME_HOOK}
fi
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

View File

@@ -1,24 +0,0 @@
#!/bin/sh
set -e
NVIDIA_CONTAINER_RUNTIME_HOOK=/usr/bin/nvidia-container-runtime-hook
NVIDIA_CONTAINER_TOOLKIT=/usr/bin/nvidia-container-toolkit
case "$1" in
purge)
[ -L "${NVIDIA_CONTAINER_RUNTIME_HOOK}" ] && rm ${NVIDIA_CONTAINER_RUNTIME_HOOK}
;;
upgrade|failed-upgrade|remove|abort-install|abort-upgrade|disappear)
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0

View File

@@ -1,9 +0,0 @@
#! /bin/bash
set -e
sed -i "s;@SECTION@;${SECTION:+$SECTION/};g" debian/control
if [ -n "$DISTRIB" ]; then
sed -i "s;UNRELEASED;$DISTRIB;" debian/changelog
fi

View File

@@ -1,7 +0,0 @@
#!/usr/bin/make -f
# -*- makefile -*-
#export DH_VERBOSE=1
%:
dh $@

83
pkg/hook_config.go Normal file
View File

@@ -0,0 +1,83 @@
package main
import (
"log"
"os"
"path"
"github.com/BurntSushi/toml"
)
const (
configPath = "/etc/nvidia-container-runtime/config.toml"
driverPath = "/run/nvidia/driver"
)
var defaultPaths = [...]string{
path.Join(driverPath, configPath),
configPath,
}
// CLIConfig: options for nvidia-container-cli.
type CLIConfig struct {
Root *string `toml:"root"`
Path *string `toml:"path"`
Environment []string `toml:"environment"`
Debug *string `toml:"debug"`
Ldcache *string `toml:"ldcache"`
LoadKmods bool `toml:"load-kmods"`
NoPivot bool `toml:"no-pivot"`
NoCgroups bool `toml:"no-cgroups"`
User *string `toml:"user"`
Ldconfig *string `toml:"ldconfig"`
}
type HookConfig struct {
DisableRequire bool `toml:"disable-require"`
SwarmResource *string `toml:"swarm-resource"`
NvidiaContainerCLI CLIConfig `toml:"nvidia-container-cli"`
}
func getDefaultHookConfig() (config HookConfig) {
return HookConfig{
DisableRequire: false,
SwarmResource: nil,
NvidiaContainerCLI: CLIConfig{
Root: nil,
Path: nil,
Environment: []string{},
Debug: nil,
Ldcache: nil,
LoadKmods: true,
NoPivot: false,
NoCgroups: false,
User: nil,
Ldconfig: nil,
},
}
}
func getHookConfig() (config HookConfig) {
var err error
if len(*configflag) > 0 {
config = getDefaultHookConfig()
_, err = toml.DecodeFile(*configflag, &config)
if err != nil {
log.Panicln("couldn't open configuration file:", err)
}
} else {
for _, p := range defaultPaths {
config = getDefaultHookConfig()
_, err = toml.DecodeFile(p, &config)
if err == nil {
break
} else if !os.IsNotExist(err) {
log.Panicln("couldn't open default configuration file:", err)
}
}
}
return config
}

60
pkg/hook_test.go Normal file
View File

@@ -0,0 +1,60 @@
package main
import (
"testing"
)
func TestParseCudaVersionValid(t *testing.T) {
var tests = []struct {
version string
expected [3]uint32
}{
{"0", [3]uint32{0, 0, 0}},
{"8", [3]uint32{8, 0, 0}},
{"7.5", [3]uint32{7, 5, 0}},
{"9.0.116", [3]uint32{9, 0, 116}},
{"4294967295.4294967295.4294967295", [3]uint32{4294967295, 4294967295, 4294967295}},
}
for _, c := range tests {
vmaj, vmin, vpatch := parseCudaVersion(c.version)
if vmaj != c.expected[0] || vmin != c.expected[1] || vpatch != c.expected[2] {
t.Errorf("parseCudaVersion(%s): %d.%d.%d (expected: %v)", c.version, vmaj, vmin, vpatch, c.expected)
}
}
}
func mustPanic(t *testing.T, f func()) {
defer func() {
if err := recover(); err == nil {
t.Error("Test didn't panic!")
}
}()
f()
}
func TestParseCudaVersionInvalid(t *testing.T) {
var tests = []string{
"foo",
"foo.5.10",
"9.0.116.50",
"9.0.116foo",
"7.foo",
"9.0.bar",
"9.4294967296",
"9.0.116.",
"9..0",
"9.",
".5.10",
"-9",
"+9",
"-9.1.116",
"-9.-1.-116",
}
for _, c := range tests {
mustPanic(t, func() {
t.Logf("parseCudaVersion(%s)", c)
parseCudaVersion(c)
})
}
}

182
pkg/main.go Normal file
View File

@@ -0,0 +1,182 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"syscall"
)
var (
debugflag = flag.Bool("debug", false, "enable debug output")
configflag = flag.String("config", "", "configuration file")
defaultPATH = []string{"/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"}
)
func exit() {
if err := recover(); err != nil {
if _, ok := err.(runtime.Error); ok {
log.Println(err)
}
if *debugflag {
log.Printf("%s", debug.Stack())
}
os.Exit(1)
}
os.Exit(0)
}
func getPATH(config CLIConfig) string {
dirs := filepath.SplitList(os.Getenv("PATH"))
// directories from the hook environment have higher precedence
dirs = append(dirs, defaultPATH...)
if config.Root != nil {
rootDirs := []string{}
for _, dir := range dirs {
rootDirs = append(rootDirs, path.Join(*config.Root, dir))
}
// directories with the root prefix have higher precedence
dirs = append(rootDirs, dirs...)
}
return strings.Join(dirs, ":")
}
func getCLIPath(config CLIConfig) string {
if config.Path != nil {
return *config.Path
}
if err := os.Setenv("PATH", getPATH(config)); err != nil {
log.Panicln("couldn't set PATH variable:", err)
}
path, err := exec.LookPath("nvidia-container-cli")
if err != nil {
log.Panicln("couldn't find binary nvidia-container-cli in", os.Getenv("PATH"), ":", err)
}
return path
}
// getRootfsPath returns an absolute path. We don't need to resolve symlinks for now.
func getRootfsPath(config containerConfig) string {
rootfs, err := filepath.Abs(config.Rootfs)
if err != nil {
log.Panicln(err)
}
return rootfs
}
func doPrestart() {
var err error
defer exit()
log.SetFlags(0)
hook := getHookConfig()
cli := hook.NvidiaContainerCLI
container := getContainerConfig(hook)
nvidia := container.Nvidia
if nvidia == nil {
// Not a GPU container, nothing to do.
return
}
rootfs := getRootfsPath(container)
args := []string{getCLIPath(cli)}
if cli.Root != nil {
args = append(args, fmt.Sprintf("--root=%s", *cli.Root))
}
if cli.LoadKmods {
args = append(args, "--load-kmods")
}
if cli.NoPivot {
args = append(args, "--no-pivot")
}
if *debugflag {
args = append(args, "--debug=/dev/stderr")
} else if cli.Debug != nil {
args = append(args, fmt.Sprintf("--debug=%s", *cli.Debug))
}
if cli.Ldcache != nil {
args = append(args, fmt.Sprintf("--ldcache=%s", *cli.Ldcache))
}
if cli.User != nil {
args = append(args, fmt.Sprintf("--user=%s", *cli.User))
}
args = append(args, "configure")
if cli.Ldconfig != nil {
args = append(args, fmt.Sprintf("--ldconfig=%s", *cli.Ldconfig))
}
if cli.NoCgroups {
args = append(args, "--no-cgroups")
}
if len(nvidia.Devices) > 0 {
args = append(args, fmt.Sprintf("--device=%s", nvidia.Devices))
}
for _, cap := range strings.Split(nvidia.DriverCapabilities, ",") {
if len(cap) == 0 {
break
}
args = append(args, capabilityToCLI(cap))
}
if !hook.DisableRequire && !nvidia.DisableRequire {
for _, req := range nvidia.Requirements {
args = append(args, fmt.Sprintf("--require=%s", req))
}
}
args = append(args, fmt.Sprintf("--pid=%s", strconv.FormatUint(uint64(container.Pid), 10)))
args = append(args, rootfs)
env := append(os.Environ(), cli.Environment...)
err = syscall.Exec(args[0], args, env)
log.Panicln("exec failed:", err)
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, "\nCommands:\n")
fmt.Fprintf(os.Stderr, " prestart\n run the prestart hook\n")
fmt.Fprintf(os.Stderr, " poststart\n no-op\n")
fmt.Fprintf(os.Stderr, " poststop\n no-op\n")
}
func main() {
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) == 0 {
flag.Usage()
os.Exit(2)
}
switch args[0] {
case "prestart":
doPrestart()
os.Exit(0)
case "poststart":
fallthrough
case "poststop":
os.Exit(0)
default:
flag.Usage()
os.Exit(2)
}
}

View File

@@ -1,25 +0,0 @@
Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of NVIDIA CORPORATION nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,55 +0,0 @@
Name: nvidia-container-toolkit
Version: %{version}
Release: %{release}
Group: Development Tools
Vendor: NVIDIA CORPORATION
Packager: NVIDIA CORPORATION <cudatools@nvidia.com>
Summary: NVIDIA container runtime hook
URL: https://github.com/NVIDIA/nvidia-container-runtime
License: BSD
Source0: nvidia-container-toolkit
Source1: config.toml
Source2: oci-nvidia-hook
Source3: oci-nvidia-hook.json
Source4: LICENSE
Obsoletes: nvidia-container-runtime < 2.0.0, nvidia-container-runtime-hook
Provides: nvidia-container-runtime-hook
Requires: libnvidia-container-tools >= 0.1.0, libnvidia-container-tools < 2.0.0
%description
Provides a OCI hook to enable GPU support in containers.
%prep
cp %{SOURCE0} %{SOURCE1} %{SOURCE2} %{SOURCE3} %{SOURCE4} .
%install
mkdir -p %{buildroot}%{_bindir}
install -m 755 -t %{buildroot}%{_bindir} nvidia-container-toolkit
mkdir -p %{buildroot}/etc/nvidia-container-runtime
install -m 644 -t %{buildroot}/etc/nvidia-container-runtime config.toml
mkdir -p %{buildroot}/usr/libexec/oci/hooks.d
install -m 755 -t %{buildroot}/usr/libexec/oci/hooks.d oci-nvidia-hook
mkdir -p %{buildroot}/usr/share/containers/oci/hooks.d
install -m 644 -t %{buildroot}/usr/share/containers/oci/hooks.d oci-nvidia-hook.json
%posttrans
ln -sf %{_bindir}/nvidia-container-toolkit %{_bindir}/nvidia-container-runtime-hook
%postun
rm -f %{_bindir}/nvidia-container-runtime-hook
%files
%license LICENSE
%{_bindir}/nvidia-container-toolkit
/etc/nvidia-container-runtime/config.toml
/usr/libexec/oci/hooks.d/oci-nvidia-hook
/usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
%changelog