Fix generation of management CDI spec in containers

Since we relied on finding libcuda.so in the LDCache to determine both the CUDA
version and the expected directory for the driver libraries, the generation of the
management CDI specifications fails in containers where the LDCache has not been updated.

This change falls back to searching a set of predefined paths instead when the lookup of
libcuda.so in the cache fails.

Signed-off-by: Evan Lezar <elezar@nvidia.com>
This commit is contained in:
Evan Lezar
2023-03-23 11:50:11 +02:00
parent 5e0684e99d
commit 9506bd9da0
4 changed files with 105 additions and 34 deletions

View File

@@ -17,6 +17,8 @@
package nvcdi
import (
"fmt"
"github.com/NVIDIA/nvidia-container-toolkit/pkg/nvcdi/spec"
"github.com/sirupsen/logrus"
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvlib/device"
@@ -151,3 +153,24 @@ func (l *nvcdilib) resolveMode() (rmode string) {
return ModeNvml
}
// getCudaVersion returns the CUDA version of the current system.
func (l *nvcdilib) getCudaVersion() (string, error) {
if hasNVML, reason := l.infolib.HasNvml(); !hasNVML {
return "", fmt.Errorf("nvml not detected: %v", reason)
}
if l.nvmllib == nil {
return "", fmt.Errorf("nvml library not initialized")
}
r := l.nvmllib.Init()
if r != nvml.SUCCESS {
return "", fmt.Errorf("failed to initialize nvml: %v", r)
}
defer l.nvmllib.Shutdown()
version, r := l.nvmllib.SystemGetDriverVersion()
if r != nvml.SUCCESS {
return "", fmt.Errorf("failed to get driver version: %v", r)
}
return version, nil
}