mirror of
https://github.com/NVIDIA/nvidia-container-toolkit
synced 2025-06-26 18:18:24 +00:00
Merge pull request #910 from elezar/default-to-cdi
Some checks failed
Some checks failed
Use just-in-time CDI spec generation by default in the NVIDIA Container Runtime
This commit is contained in:
commit
0dddd5cfd8
@ -242,7 +242,14 @@ func (hookConfig *hookConfig) getNvidiaConfig(image image.CUDA, privileged bool)
|
||||
}
|
||||
}
|
||||
|
||||
func (hookConfig *hookConfig) getContainerConfig() (config containerConfig) {
|
||||
func (hookConfig *hookConfig) getContainerConfig() (config *containerConfig) {
|
||||
hookConfig.Lock()
|
||||
defer hookConfig.Unlock()
|
||||
|
||||
if hookConfig.containerConfig != nil {
|
||||
return hookConfig.containerConfig
|
||||
}
|
||||
|
||||
var h HookState
|
||||
d := json.NewDecoder(os.Stdin)
|
||||
if err := d.Decode(&h); err != nil {
|
||||
@ -271,10 +278,13 @@ func (hookConfig *hookConfig) getContainerConfig() (config containerConfig) {
|
||||
log.Panicln(err)
|
||||
}
|
||||
|
||||
return containerConfig{
|
||||
cc := containerConfig{
|
||||
Pid: h.Pid,
|
||||
Rootfs: s.Root.Path,
|
||||
Image: i,
|
||||
Nvidia: hookConfig.getNvidiaConfig(i, privileged),
|
||||
}
|
||||
hookConfig.containerConfig = &cc
|
||||
|
||||
return hookConfig.containerConfig
|
||||
}
|
||||
|
@ -487,7 +487,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
hookCfg := tc.hookConfig
|
||||
if hookCfg == nil {
|
||||
defaultConfig, _ := config.GetDefault()
|
||||
hookCfg = &hookConfig{defaultConfig}
|
||||
hookCfg = &hookConfig{Config: defaultConfig}
|
||||
}
|
||||
cfg = hookCfg.getNvidiaConfig(image, tc.privileged)
|
||||
}
|
||||
|
@ -7,9 +7,11 @@ import (
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config/image"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -20,7 +22,9 @@ const (
|
||||
// hookConfig wraps the toolkit config.
|
||||
// This allows for functions to be defined on the local type.
|
||||
type hookConfig struct {
|
||||
sync.Mutex
|
||||
*config.Config
|
||||
containerConfig *containerConfig
|
||||
}
|
||||
|
||||
// loadConfig loads the required paths for the hook config.
|
||||
@ -55,7 +59,7 @@ func getHookConfig() (*hookConfig, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load config: %v", err)
|
||||
}
|
||||
config := &hookConfig{cfg}
|
||||
config := &hookConfig{Config: cfg}
|
||||
|
||||
allSupportedDriverCapabilities := image.SupportedDriverCapabilities
|
||||
if config.SupportedDriverCapabilities == "all" {
|
||||
@ -73,8 +77,8 @@ func getHookConfig() (*hookConfig, error) {
|
||||
|
||||
// getConfigOption returns the toml config option associated with the
|
||||
// specified struct field.
|
||||
func (c hookConfig) getConfigOption(fieldName string) string {
|
||||
t := reflect.TypeOf(c)
|
||||
func (c *hookConfig) getConfigOption(fieldName string) string {
|
||||
t := reflect.TypeOf(&c)
|
||||
f, ok := t.FieldByName(fieldName)
|
||||
if !ok {
|
||||
return fieldName
|
||||
@ -127,3 +131,21 @@ func (c *hookConfig) nvidiaContainerCliCUDACompatModeFlags() []string {
|
||||
}
|
||||
return []string{flag}
|
||||
}
|
||||
|
||||
func (c *hookConfig) assertModeIsLegacy() error {
|
||||
if c.NVIDIAContainerRuntimeHookConfig.SkipModeDetection {
|
||||
return nil
|
||||
}
|
||||
|
||||
mr := info.NewRuntimeModeResolver(
|
||||
info.WithLogger(&logInterceptor{}),
|
||||
info.WithImage(&c.containerConfig.Image),
|
||||
info.WithDefaultMode(info.LegacyRuntimeMode),
|
||||
)
|
||||
|
||||
mode := mr.ResolveRuntimeMode(c.NVIDIAContainerRuntimeConfig.Mode)
|
||||
if mode == "legacy" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invoking the NVIDIA Container Runtime Hook directly (e.g. specifying the docker --gpus flag) is not supported. Please use the NVIDIA Container Runtime (e.g. specify the --runtime=nvidia flag) instead")
|
||||
}
|
||||
|
@ -90,10 +90,10 @@ func TestGetHookConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var cfg hookConfig
|
||||
var cfg *hookConfig
|
||||
getHookConfig := func() {
|
||||
c, _ := getHookConfig()
|
||||
cfg = *c
|
||||
cfg = c
|
||||
}
|
||||
|
||||
if tc.expectedPanic {
|
||||
|
@ -55,7 +55,7 @@ func getCLIPath(config config.ContainerCLIConfig) string {
|
||||
}
|
||||
|
||||
// getRootfsPath returns an absolute path. We don't need to resolve symlinks for now.
|
||||
func getRootfsPath(config containerConfig) string {
|
||||
func getRootfsPath(config *containerConfig) string {
|
||||
rootfs, err := filepath.Abs(config.Rootfs)
|
||||
if err != nil {
|
||||
log.Panicln(err)
|
||||
@ -82,8 +82,8 @@ func doPrestart() {
|
||||
return
|
||||
}
|
||||
|
||||
if !hook.NVIDIAContainerRuntimeHookConfig.SkipModeDetection && info.ResolveAutoMode(&logInterceptor{}, hook.NVIDIAContainerRuntimeConfig.Mode, container.Image) != "legacy" {
|
||||
log.Panicln("invoking the NVIDIA Container Runtime Hook directly (e.g. specifying the docker --gpus flag) is not supported. Please use the NVIDIA Container Runtime (e.g. specify the --runtime=nvidia flag) instead.")
|
||||
if err := hook.assertModeIsLegacy(); err != nil {
|
||||
log.Panicf("%v", err)
|
||||
}
|
||||
|
||||
rootfs := getRootfsPath(container)
|
||||
|
@ -122,11 +122,10 @@ func TestGoodInput(t *testing.T) {
|
||||
err = cmdCreate.Run()
|
||||
require.NoError(t, err, "runtime should not return an error")
|
||||
|
||||
// Check config.json for NVIDIA prestart hook
|
||||
// Check config.json to ensure that the NVIDIA prestart was not inserted.
|
||||
spec, err = cfg.getRuntimeSpec()
|
||||
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
|
||||
require.NotEmpty(t, spec.Hooks, "there should be hooks in config.json")
|
||||
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "exactly one nvidia prestart hook should be inserted correctly into config.json")
|
||||
require.Empty(t, spec.Hooks, "there should be no hooks in config.json")
|
||||
}
|
||||
|
||||
// NVIDIA prestart hook already present in config file
|
||||
@ -168,11 +167,10 @@ func TestDuplicateHook(t *testing.T) {
|
||||
output, err := cmdCreate.CombinedOutput()
|
||||
require.NoErrorf(t, err, "runtime should not return an error", "output=%v", string(output))
|
||||
|
||||
// Check config.json for NVIDIA prestart hook
|
||||
// Check config.json to ensure that the NVIDIA prestart hook was removed.
|
||||
spec, err = cfg.getRuntimeSpec()
|
||||
require.NoError(t, err, "should be no errors when reading and parsing spec from config.json")
|
||||
require.NotEmpty(t, spec.Hooks, "there should be hooks in config.json")
|
||||
require.Equal(t, 1, nvidiaHookCount(spec.Hooks), "exactly one nvidia prestart hook should be inserted correctly into config.json")
|
||||
require.Empty(t, spec.Hooks, "there should be no hooks in config.json")
|
||||
}
|
||||
|
||||
// addNVIDIAHook is a basic wrapper for an addHookModifier that is used for
|
||||
@ -240,18 +238,3 @@ func (c testConfig) generateNewRuntimeSpec() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return number of valid NVIDIA prestart hooks in runtime spec
|
||||
func nvidiaHookCount(hooks *specs.Hooks) int {
|
||||
if hooks == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, hook := range hooks.Prestart {
|
||||
if strings.Contains(hook.Path, nvidiaHook) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
@ -23,34 +23,114 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/logger"
|
||||
)
|
||||
|
||||
// ResolveAutoMode determines the correct mode for the platform if set to "auto"
|
||||
func ResolveAutoMode(logger logger.Interface, mode string, image image.CUDA) (rmode string) {
|
||||
return resolveMode(logger, mode, image, nil)
|
||||
// A RuntimeMode is used to select a specific mode of operation for the NVIDIA Container Runtime.
|
||||
type RuntimeMode string
|
||||
|
||||
const (
|
||||
// In LegacyRuntimeMode the nvidia-container-runtime injects the
|
||||
// nvidia-container-runtime-hook as a prestart hook into the incoming
|
||||
// container config. This hook invokes the nvidia-container-cli to perform
|
||||
// the required modifications to the container.
|
||||
LegacyRuntimeMode = RuntimeMode("legacy")
|
||||
// In CSVRuntimeMode the nvidia-container-runtime processes a set of CSV
|
||||
// files to determine which container modification are required. The
|
||||
// contents of these CSV files are used to generate an in-memory CDI
|
||||
// specification which is used to modify the container config.
|
||||
CSVRuntimeMode = RuntimeMode("csv")
|
||||
// In CDIRuntimeMode the nvidia-container-runtime applies the modifications
|
||||
// to the container config required for the requested CDI devices in the
|
||||
// same way that other CDI clients would.
|
||||
CDIRuntimeMode = RuntimeMode("cdi")
|
||||
// In JitCDIRuntimeMode the nvidia-container-runtime generates in-memory CDI
|
||||
// specifications for requested NVIDIA devices.
|
||||
JitCDIRuntimeMode = RuntimeMode("jit-cdi")
|
||||
)
|
||||
|
||||
type RuntimeModeResolver interface {
|
||||
ResolveRuntimeMode(string) RuntimeMode
|
||||
}
|
||||
|
||||
func resolveMode(logger logger.Interface, mode string, image image.CUDA, propertyExtractor info.PropertyExtractor) (rmode string) {
|
||||
type modeResolver struct {
|
||||
logger logger.Interface
|
||||
// TODO: This only needs to consider the requested devices.
|
||||
image *image.CUDA
|
||||
propertyExtractor info.PropertyExtractor
|
||||
defaultMode RuntimeMode
|
||||
}
|
||||
|
||||
type Option func(*modeResolver)
|
||||
|
||||
func WithDefaultMode(defaultMode RuntimeMode) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.defaultMode = defaultMode
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger logger.Interface) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func WithImage(image *image.CUDA) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.image = image
|
||||
}
|
||||
}
|
||||
|
||||
func WithPropertyExtractor(propertyExtractor info.PropertyExtractor) Option {
|
||||
return func(mr *modeResolver) {
|
||||
mr.propertyExtractor = propertyExtractor
|
||||
}
|
||||
}
|
||||
|
||||
func NewRuntimeModeResolver(opts ...Option) RuntimeModeResolver {
|
||||
r := &modeResolver{
|
||||
defaultMode: JitCDIRuntimeMode,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(r)
|
||||
}
|
||||
if r.logger == nil {
|
||||
r.logger = &logger.NullLogger{}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ResolveAutoMode determines the correct mode for the platform if set to "auto"
|
||||
func ResolveAutoMode(logger logger.Interface, mode string, image image.CUDA) (rmode RuntimeMode) {
|
||||
r := modeResolver{
|
||||
logger: logger,
|
||||
image: &image,
|
||||
propertyExtractor: nil,
|
||||
}
|
||||
return r.ResolveRuntimeMode(mode)
|
||||
}
|
||||
|
||||
func (m *modeResolver) ResolveRuntimeMode(mode string) (rmode RuntimeMode) {
|
||||
if mode != "auto" {
|
||||
logger.Infof("Using requested mode '%s'", mode)
|
||||
return mode
|
||||
m.logger.Infof("Using requested mode '%s'", mode)
|
||||
return RuntimeMode(mode)
|
||||
}
|
||||
defer func() {
|
||||
logger.Infof("Auto-detected mode as '%v'", rmode)
|
||||
m.logger.Infof("Auto-detected mode as '%v'", rmode)
|
||||
}()
|
||||
|
||||
if image.OnlyFullyQualifiedCDIDevices() {
|
||||
return "cdi"
|
||||
if m.image.OnlyFullyQualifiedCDIDevices() {
|
||||
return CDIRuntimeMode
|
||||
}
|
||||
|
||||
nvinfo := info.New(
|
||||
info.WithLogger(logger),
|
||||
info.WithPropertyExtractor(propertyExtractor),
|
||||
info.WithLogger(m.logger),
|
||||
info.WithPropertyExtractor(m.propertyExtractor),
|
||||
)
|
||||
|
||||
switch nvinfo.ResolvePlatform() {
|
||||
case info.PlatformNVML, info.PlatformWSL:
|
||||
return "legacy"
|
||||
return m.defaultMode
|
||||
case info.PlatformTegra:
|
||||
return "csv"
|
||||
return CSVRuntimeMode
|
||||
}
|
||||
return "legacy"
|
||||
return m.defaultMode
|
||||
}
|
||||
|
@ -43,11 +43,16 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
mode: "not-auto",
|
||||
expectedMode: "not-auto",
|
||||
},
|
||||
{
|
||||
description: "legacy resolves to legacy",
|
||||
mode: "legacy",
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
{
|
||||
description: "no info defaults to legacy",
|
||||
mode: "auto",
|
||||
info: map[string]bool{},
|
||||
expectedMode: "legacy",
|
||||
expectedMode: "jit-cdi",
|
||||
},
|
||||
{
|
||||
description: "non-nvml, non-tegra, nvgpu resolves to csv",
|
||||
@ -80,14 +85,14 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
expectedMode: "csv",
|
||||
},
|
||||
{
|
||||
description: "nvml, non-tegra, non-nvgpu resolves to legacy",
|
||||
description: "nvml, non-tegra, non-nvgpu resolves to jit-cdi",
|
||||
mode: "auto",
|
||||
info: map[string]bool{
|
||||
"nvml": true,
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "legacy",
|
||||
expectedMode: "jit-cdi",
|
||||
},
|
||||
{
|
||||
description: "nvml, non-tegra, nvgpu resolves to csv",
|
||||
@ -100,14 +105,14 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
expectedMode: "csv",
|
||||
},
|
||||
{
|
||||
description: "nvml, tegra, non-nvgpu resolves to legacy",
|
||||
description: "nvml, tegra, non-nvgpu resolves to jit-cdi",
|
||||
mode: "auto",
|
||||
info: map[string]bool{
|
||||
"nvml": true,
|
||||
"tegra": true,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "legacy",
|
||||
expectedMode: "jit-cdi",
|
||||
},
|
||||
{
|
||||
description: "nvml, tegra, nvgpu resolves to csv",
|
||||
@ -136,7 +141,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "at least one non-cdi device resolves to legacy",
|
||||
description: "at least one non-cdi device resolves to jit-cdi",
|
||||
mode: "auto",
|
||||
envmap: map[string]string{
|
||||
"NVIDIA_VISIBLE_DEVICES": "nvidia.com/gpu=0,0",
|
||||
@ -146,7 +151,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "legacy",
|
||||
expectedMode: "jit-cdi",
|
||||
},
|
||||
{
|
||||
description: "at least one non-cdi device resolves to csv",
|
||||
@ -170,7 +175,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
expectedMode: "cdi",
|
||||
},
|
||||
{
|
||||
description: "cdi mount and non-CDI devices resolves to legacy",
|
||||
description: "cdi mount and non-CDI devices resolves to jit-cdi",
|
||||
mode: "auto",
|
||||
mounts: []string{
|
||||
"/var/run/nvidia-container-devices/cdi/nvidia.com/gpu/0",
|
||||
@ -181,7 +186,7 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "legacy",
|
||||
expectedMode: "jit-cdi",
|
||||
},
|
||||
{
|
||||
description: "cdi mount and non-CDI envvar resolves to cdi",
|
||||
@ -199,22 +204,6 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
},
|
||||
expectedMode: "cdi",
|
||||
},
|
||||
{
|
||||
description: "non-cdi mount and CDI envvar resolves to legacy",
|
||||
mode: "auto",
|
||||
envmap: map[string]string{
|
||||
"NVIDIA_VISIBLE_DEVICES": "nvidia.com/gpu=0",
|
||||
},
|
||||
mounts: []string{
|
||||
"/var/run/nvidia-container-devices/0",
|
||||
},
|
||||
info: map[string]bool{
|
||||
"nvml": true,
|
||||
"tegra": false,
|
||||
"nvgpu": false,
|
||||
},
|
||||
expectedMode: "legacy",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@ -251,7 +240,12 @@ func TestResolveAutoMode(t *testing.T) {
|
||||
image.WithAcceptDeviceListAsVolumeMounts(true),
|
||||
image.WithAcceptEnvvarUnprivileged(true),
|
||||
)
|
||||
mode := resolveMode(logger, tc.mode, image, properties)
|
||||
mr := NewRuntimeModeResolver(
|
||||
WithLogger(logger),
|
||||
WithImage(&image),
|
||||
WithPropertyExtractor(properties),
|
||||
)
|
||||
mode := mr.ResolveRuntimeMode(tc.mode)
|
||||
require.EqualValues(t, tc.expectedMode, mode)
|
||||
})
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package modifier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"tags.cncf.io/container-device-interface/pkg/parser"
|
||||
|
||||
@ -27,17 +28,27 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/modifier/cdi"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/nvcdi"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/nvcdi/spec"
|
||||
)
|
||||
|
||||
const (
|
||||
automaticDeviceVendor = "runtime.nvidia.com"
|
||||
automaticDeviceClass = "gpu"
|
||||
automaticDeviceKind = automaticDeviceVendor + "/" + automaticDeviceClass
|
||||
automaticDevicePrefix = automaticDeviceKind + "="
|
||||
)
|
||||
|
||||
// NewCDIModifier creates an OCI spec modifier that determines the modifications to make based on the
|
||||
// CDI specifications available on the system. The NVIDIA_VISIBLE_DEVICES environment variable is
|
||||
// used to select the devices to include.
|
||||
func NewCDIModifier(logger logger.Interface, cfg *config.Config, image image.CUDA) (oci.SpecModifier, error) {
|
||||
func NewCDIModifier(logger logger.Interface, cfg *config.Config, image image.CUDA, isJitCDI bool) (oci.SpecModifier, error) {
|
||||
defaultKind := cfg.NVIDIAContainerRuntimeConfig.Modes.CDI.DefaultKind
|
||||
if isJitCDI {
|
||||
defaultKind = automaticDeviceKind
|
||||
}
|
||||
deviceRequestor := newCDIDeviceRequestor(
|
||||
logger,
|
||||
image,
|
||||
cfg.NVIDIAContainerRuntimeConfig.Modes.CDI.DefaultKind,
|
||||
defaultKind,
|
||||
)
|
||||
devices := deviceRequestor.DeviceRequests()
|
||||
if len(devices) == 0 {
|
||||
@ -107,17 +118,34 @@ func (c *cdiDeviceRequestor) DeviceRequests() []string {
|
||||
func filterAutomaticDevices(devices []string) []string {
|
||||
var automatic []string
|
||||
for _, device := range devices {
|
||||
vendor, class, _ := parser.ParseDevice(device)
|
||||
if vendor == "runtime.nvidia.com" && class == "gpu" {
|
||||
automatic = append(automatic, device)
|
||||
if !strings.HasPrefix(device, automaticDevicePrefix) {
|
||||
continue
|
||||
}
|
||||
automatic = append(automatic, device)
|
||||
}
|
||||
return automatic
|
||||
}
|
||||
|
||||
func newAutomaticCDISpecModifier(logger logger.Interface, cfg *config.Config, devices []string) (oci.SpecModifier, error) {
|
||||
logger.Debugf("Generating in-memory CDI specs for devices %v", devices)
|
||||
spec, err := generateAutomaticCDISpec(logger, cfg, devices)
|
||||
|
||||
var identifiers []string
|
||||
for _, device := range devices {
|
||||
identifiers = append(identifiers, strings.TrimPrefix(device, automaticDevicePrefix))
|
||||
}
|
||||
|
||||
cdilib, err := nvcdi.New(
|
||||
nvcdi.WithLogger(logger),
|
||||
nvcdi.WithNVIDIACDIHookPath(cfg.NVIDIACTKConfig.Path),
|
||||
nvcdi.WithDriverRoot(cfg.NVIDIAContainerCLIConfig.Root),
|
||||
nvcdi.WithVendor(automaticDeviceVendor),
|
||||
nvcdi.WithClass(automaticDeviceClass),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct CDI library: %w", err)
|
||||
}
|
||||
|
||||
spec, err := cdilib.GetSpec(identifiers...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate CDI spec: %w", err)
|
||||
}
|
||||
@ -132,27 +160,6 @@ func newAutomaticCDISpecModifier(logger logger.Interface, cfg *config.Config, de
|
||||
return cdiDeviceRequestor, nil
|
||||
}
|
||||
|
||||
func generateAutomaticCDISpec(logger logger.Interface, cfg *config.Config, devices []string) (spec.Interface, error) {
|
||||
cdilib, err := nvcdi.New(
|
||||
nvcdi.WithLogger(logger),
|
||||
nvcdi.WithNVIDIACDIHookPath(cfg.NVIDIACTKConfig.Path),
|
||||
nvcdi.WithDriverRoot(cfg.NVIDIAContainerCLIConfig.Root),
|
||||
nvcdi.WithVendor("runtime.nvidia.com"),
|
||||
nvcdi.WithClass("gpu"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct CDI library: %w", err)
|
||||
}
|
||||
|
||||
var identifiers []string
|
||||
for _, device := range devices {
|
||||
_, _, id := parser.ParseDevice(device)
|
||||
identifiers = append(identifiers, id)
|
||||
}
|
||||
|
||||
return cdilib.GetSpec(identifiers...)
|
||||
}
|
||||
|
||||
type deduplicatedDeviceRequestor struct {
|
||||
deviceRequestor
|
||||
}
|
||||
|
@ -70,6 +70,18 @@ func TestDeviceRequests(t *testing.T) {
|
||||
},
|
||||
expectedDevices: []string{"nvidia.com/gpu=0", "example.com/class=device"},
|
||||
},
|
||||
{
|
||||
description: "cdi devices from envvar with default kind",
|
||||
input: cdiDeviceRequestor{
|
||||
defaultKind: "runtime.nvidia.com/gpu",
|
||||
},
|
||||
spec: &specs.Spec{
|
||||
Process: &specs.Process{
|
||||
Env: []string{"NVIDIA_VISIBLE_DEVICES=all"},
|
||||
},
|
||||
},
|
||||
expectedDevices: []string{"runtime.nvidia.com/gpu=all"},
|
||||
},
|
||||
{
|
||||
description: "no matching annotations",
|
||||
prefixes: []string{"not-prefix/"},
|
||||
|
@ -101,14 +101,14 @@ func newSpecModifier(logger logger.Interface, cfg *config.Config, ociSpec oci.Sp
|
||||
return modifiers, nil
|
||||
}
|
||||
|
||||
func newModeModifier(logger logger.Interface, mode string, cfg *config.Config, image image.CUDA) (oci.SpecModifier, error) {
|
||||
func newModeModifier(logger logger.Interface, mode info.RuntimeMode, cfg *config.Config, image image.CUDA) (oci.SpecModifier, error) {
|
||||
switch mode {
|
||||
case "legacy":
|
||||
case info.LegacyRuntimeMode:
|
||||
return modifier.NewStableRuntimeModifier(logger, cfg.NVIDIAContainerRuntimeHookConfig.Path), nil
|
||||
case "csv":
|
||||
case info.CSVRuntimeMode:
|
||||
return modifier.NewCSVModifier(logger, cfg, image)
|
||||
case "cdi":
|
||||
return modifier.NewCDIModifier(logger, cfg, image)
|
||||
case info.CDIRuntimeMode, info.JitCDIRuntimeMode:
|
||||
return modifier.NewCDIModifier(logger, cfg, image, mode == info.JitCDIRuntimeMode)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid runtime mode: %v", cfg.NVIDIAContainerRuntimeConfig.Mode)
|
||||
@ -119,7 +119,7 @@ func newModeModifier(logger logger.Interface, mode string, cfg *config.Config, i
|
||||
// The image is also used to determine the runtime mode to apply.
|
||||
// If a non-CDI mode is detected we ensure that the image does not process
|
||||
// annotation devices.
|
||||
func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpec oci.Spec) (string, *image.CUDA, error) {
|
||||
func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpec oci.Spec) (info.RuntimeMode, *image.CUDA, error) {
|
||||
rawSpec, err := ociSpec.Load()
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("failed to load OCI spec: %v", err)
|
||||
@ -136,9 +136,13 @@ func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpe
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
mode := info.ResolveAutoMode(logger, cfg.NVIDIAContainerRuntimeConfig.Mode, image)
|
||||
modeResolver := info.NewRuntimeModeResolver(
|
||||
info.WithLogger(logger),
|
||||
info.WithImage(&image),
|
||||
)
|
||||
mode := modeResolver.ResolveRuntimeMode(cfg.NVIDIAContainerRuntimeConfig.Mode)
|
||||
// We update the mode here so that we can continue passing just the config to other functions.
|
||||
cfg.NVIDIAContainerRuntimeConfig.Mode = mode
|
||||
cfg.NVIDIAContainerRuntimeConfig.Mode = string(mode)
|
||||
|
||||
if mode == "cdi" || len(cfg.NVIDIAContainerRuntimeConfig.Modes.CDI.AnnotationPrefixes) == 0 {
|
||||
return mode, &image, nil
|
||||
@ -154,12 +158,12 @@ func initRuntimeModeAndImage(logger logger.Interface, cfg *config.Config, ociSpe
|
||||
}
|
||||
|
||||
// supportedModifierTypes returns the modifiers supported for a specific runtime mode.
|
||||
func supportedModifierTypes(mode string) []string {
|
||||
func supportedModifierTypes(mode info.RuntimeMode) []string {
|
||||
switch mode {
|
||||
case "cdi":
|
||||
case info.CDIRuntimeMode, info.JitCDIRuntimeMode:
|
||||
// For CDI mode we make no additional modifications.
|
||||
return []string{"nvidia-hook-remover", "mode"}
|
||||
case "csv":
|
||||
case info.CSVRuntimeMode:
|
||||
// For CSV mode we support mode and feature-gated modification.
|
||||
return []string{"nvidia-hook-remover", "feature-gated", "mode"}
|
||||
default:
|
||||
|
Loading…
Reference in New Issue
Block a user