mirror of
https://github.com/NVIDIA/nvidia-container-toolkit
synced 2024-11-23 20:53:30 +00:00
Merge pull request #722 from tariq1890/use-go-api-for-toolkit-install-rebase
This commit is contained in:
commit
4f440dedda
@ -1,260 +0,0 @@
|
||||
/**
|
||||
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/engine/containerd"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/toml"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultConfig = "/etc/containerd/config.toml"
|
||||
defaultSocket = "/run/containerd/containerd.sock"
|
||||
defaultRuntimeClass = "nvidia"
|
||||
defaultRuntmeType = "io.containerd.runc.v2"
|
||||
defaultSetAsDefault = true
|
||||
defaultRestartMode = "signal"
|
||||
defaultHostRootMount = "/host"
|
||||
)
|
||||
|
||||
// options stores the configuration from the command line or environment variables
|
||||
type options struct {
|
||||
container.Options
|
||||
|
||||
// containerd-specific options
|
||||
useLegacyConfig bool
|
||||
runtimeType string
|
||||
|
||||
ContainerRuntimeModesCDIAnnotationPrefixes cli.StringSlice
|
||||
|
||||
runtimeConfigOverrideJSON string
|
||||
}
|
||||
|
||||
func main() {
|
||||
options := options{}
|
||||
|
||||
// Create the top-level CLI
|
||||
c := cli.NewApp()
|
||||
c.Name = "containerd"
|
||||
c.Usage = "Update a containerd config with the nvidia-container-runtime"
|
||||
c.Version = info.GetVersionString()
|
||||
|
||||
// Create the 'setup' subcommand
|
||||
setup := cli.Command{}
|
||||
setup.Name = "setup"
|
||||
setup.Usage = "Trigger a containerd config to be updated"
|
||||
setup.ArgsUsage = "<runtime_dirname>"
|
||||
setup.Action = func(c *cli.Context) error {
|
||||
return Setup(c, &options)
|
||||
}
|
||||
setup.Before = func(c *cli.Context) error {
|
||||
return container.ParseArgs(c, &options.Options)
|
||||
}
|
||||
|
||||
// Create the 'cleanup' subcommand
|
||||
cleanup := cli.Command{}
|
||||
cleanup.Name = "cleanup"
|
||||
cleanup.Usage = "Trigger any updates made to a containerd config to be undone"
|
||||
cleanup.ArgsUsage = "<runtime_dirname>"
|
||||
cleanup.Action = func(c *cli.Context) error {
|
||||
return Cleanup(c, &options)
|
||||
}
|
||||
cleanup.Before = func(c *cli.Context) error {
|
||||
return container.ParseArgs(c, &options.Options)
|
||||
}
|
||||
|
||||
// Register the subcommands with the top-level CLI
|
||||
c.Commands = []*cli.Command{
|
||||
&setup,
|
||||
&cleanup,
|
||||
}
|
||||
|
||||
// Setup common flags across both subcommands. All subcommands get the same
|
||||
// set of flags even if they don't use some of them. This is so that we
|
||||
// only require the user to specify one set of flags for both 'startup'
|
||||
// and 'cleanup' to simplify things.
|
||||
commonFlags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to the containerd config file",
|
||||
Value: defaultConfig,
|
||||
Destination: &options.Config,
|
||||
EnvVars: []string{"RUNTIME_CONFIG", "CONTAINERD_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "socket",
|
||||
Usage: "Path to the containerd socket file",
|
||||
Value: defaultSocket,
|
||||
Destination: &options.Socket,
|
||||
EnvVars: []string{"RUNTIME_SOCKET", "CONTAINERD_SOCKET"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "restart-mode",
|
||||
Usage: "Specify how containerd should be restarted; If 'none' is selected, it will not be restarted [signal | systemd | none]",
|
||||
Value: defaultRestartMode,
|
||||
Destination: &options.RestartMode,
|
||||
EnvVars: []string{"RUNTIME_RESTART_MODE", "CONTAINERD_RESTART_MODE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-name",
|
||||
Aliases: []string{"nvidia-runtime-name", "runtime-class"},
|
||||
Usage: "The name of the runtime class to set for the nvidia-container-runtime",
|
||||
Value: defaultRuntimeClass,
|
||||
Destination: &options.RuntimeName,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_NAME", "CONTAINERD_RUNTIME_CLASS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "nvidia-runtime-dir",
|
||||
Aliases: []string{"runtime-dir"},
|
||||
Usage: "The path where the nvidia-container-runtime binaries are located. If this is not specified, the first argument will be used instead",
|
||||
Destination: &options.RuntimeDir,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_DIR"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "set-as-default",
|
||||
Usage: "Set nvidia-container-runtime as the default runtime",
|
||||
Value: defaultSetAsDefault,
|
||||
Destination: &options.SetAsDefault,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_SET_AS_DEFAULT", "CONTAINERD_SET_AS_DEFAULT"},
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-root",
|
||||
Usage: "Specify the path to the host root to be used when restarting containerd using systemd",
|
||||
Value: defaultHostRootMount,
|
||||
Destination: &options.HostRootMount,
|
||||
EnvVars: []string{"HOST_ROOT_MOUNT"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-legacy-config",
|
||||
Usage: "Specify whether a legacy (pre v1.3) config should be used",
|
||||
Destination: &options.useLegacyConfig,
|
||||
EnvVars: []string{"CONTAINERD_USE_LEGACY_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-type",
|
||||
Usage: "The runtime_type to use for the configured runtime classes",
|
||||
Value: defaultRuntmeType,
|
||||
Destination: &options.runtimeType,
|
||||
EnvVars: []string{"CONTAINERD_RUNTIME_TYPE"},
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "nvidia-container-runtime-modes.cdi.annotation-prefixes",
|
||||
Destination: &options.ContainerRuntimeModesCDIAnnotationPrefixes,
|
||||
EnvVars: []string{"NVIDIA_CONTAINER_RUNTIME_MODES_CDI_ANNOTATION_PREFIXES"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-config-override",
|
||||
Destination: &options.runtimeConfigOverrideJSON,
|
||||
Usage: "specify additional runtime options as a JSON string. The paths are relative to the runtime config.",
|
||||
Value: "{}",
|
||||
EnvVars: []string{"RUNTIME_CONFIG_OVERRIDE", "CONTAINERD_RUNTIME_CONFIG_OVERRIDE"},
|
||||
},
|
||||
}
|
||||
|
||||
// Update the subcommand flags with the common subcommand flags
|
||||
setup.Flags = append([]cli.Flag{}, commonFlags...)
|
||||
cleanup.Flags = append([]cli.Flag{}, commonFlags...)
|
||||
|
||||
// Run the top-level CLI
|
||||
if err := c.Run(os.Args); err != nil {
|
||||
log.Fatal(fmt.Errorf("error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Setup updates a containerd configuration to include the nvidia-containerd-runtime and reloads it
|
||||
func Setup(c *cli.Context, o *options) error {
|
||||
log.Infof("Starting 'setup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := containerd.New(
|
||||
containerd.WithPath(o.Config),
|
||||
containerd.WithConfigSource(toml.FromFile(o.Config)),
|
||||
containerd.WithRuntimeType(o.runtimeType),
|
||||
containerd.WithUseLegacyConfig(o.useLegacyConfig),
|
||||
containerd.WithContainerAnnotations(o.containerAnnotationsFromCDIPrefixes()...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Configure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to configure containerd: %v", err)
|
||||
}
|
||||
|
||||
err = RestartContainerd(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart containerd: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'setup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup reverts a containerd configuration to remove the nvidia-containerd-runtime and reloads it
|
||||
func Cleanup(c *cli.Context, o *options) error {
|
||||
log.Infof("Starting 'cleanup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := containerd.New(
|
||||
containerd.WithPath(o.Config),
|
||||
containerd.WithConfigSource(toml.FromFile(o.Config)),
|
||||
containerd.WithRuntimeType(o.runtimeType),
|
||||
containerd.WithUseLegacyConfig(o.useLegacyConfig),
|
||||
containerd.WithContainerAnnotations(o.containerAnnotationsFromCDIPrefixes()...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Unconfigure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unconfigure containerd: %v", err)
|
||||
}
|
||||
|
||||
err = RestartContainerd(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart containerd: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'cleanup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartContainerd restarts containerd depending on the value of restartModeFlag
|
||||
func RestartContainerd(o *options) error {
|
||||
return o.Restart("containerd", SignalContainerd)
|
||||
}
|
||||
|
||||
// containerAnnotationsFromCDIPrefixes returns the container annotations to set for the given CDI prefixes.
|
||||
func (o *options) containerAnnotationsFromCDIPrefixes() []string {
|
||||
var annotations []string
|
||||
for _, prefix := range o.ContainerRuntimeModesCDIAnnotationPrefixes.Value() {
|
||||
annotations = append(annotations, prefix+"*")
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
@ -1,300 +0,0 @@
|
||||
/**
|
||||
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/engine/crio"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/ocihook"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/toml"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultConfigMode = "hook"
|
||||
|
||||
// Hook-based settings
|
||||
defaultHooksDir = "/usr/share/containers/oci/hooks.d"
|
||||
defaultHookFilename = "oci-nvidia-hook.json"
|
||||
|
||||
// Config-based settings
|
||||
defaultConfig = "/etc/crio/crio.conf"
|
||||
defaultSocket = "/var/run/crio/crio.sock"
|
||||
defaultRuntimeClass = "nvidia"
|
||||
defaultSetAsDefault = true
|
||||
defaultRestartMode = "systemd"
|
||||
defaultHostRootMount = "/host"
|
||||
)
|
||||
|
||||
// options stores the configuration from the command linek or environment variables
|
||||
type options struct {
|
||||
container.Options
|
||||
|
||||
configMode string
|
||||
|
||||
// hook-specific options
|
||||
hooksDir string
|
||||
hookFilename string
|
||||
}
|
||||
|
||||
func main() {
|
||||
options := options{}
|
||||
|
||||
// Create the top-level CLI
|
||||
c := cli.NewApp()
|
||||
c.Name = "crio"
|
||||
c.Usage = "Update cri-o hooks to include the NVIDIA runtime hook"
|
||||
c.Version = info.GetVersionString()
|
||||
|
||||
// Create the 'setup' subcommand
|
||||
setup := cli.Command{}
|
||||
setup.Name = "setup"
|
||||
setup.Usage = "Configure cri-o for NVIDIA GPU containers"
|
||||
setup.ArgsUsage = "<toolkit_dirname>"
|
||||
setup.Action = func(c *cli.Context) error {
|
||||
return Setup(c, &options)
|
||||
}
|
||||
setup.Before = func(c *cli.Context) error {
|
||||
return container.ParseArgs(c, &options.Options)
|
||||
}
|
||||
|
||||
// Create the 'cleanup' subcommand
|
||||
cleanup := cli.Command{}
|
||||
cleanup.Name = "cleanup"
|
||||
cleanup.Usage = "Remove the NVIDIA-specific cri-o configuration"
|
||||
cleanup.Action = func(c *cli.Context) error {
|
||||
return Cleanup(c, &options)
|
||||
}
|
||||
cleanup.Before = func(c *cli.Context) error {
|
||||
return container.ParseArgs(c, &options.Options)
|
||||
}
|
||||
|
||||
// Register the subcommands with the top-level CLI
|
||||
c.Commands = []*cli.Command{
|
||||
&setup,
|
||||
&cleanup,
|
||||
}
|
||||
|
||||
// Setup common flags across both subcommands. All subcommands get the same
|
||||
// set of flags even if they don't use some of them. This is so that we
|
||||
// only require the user to specify one set of flags for both 'startup'
|
||||
// and 'cleanup' to simplify things.
|
||||
commonFlags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to the cri-o config file",
|
||||
Value: defaultConfig,
|
||||
Destination: &options.Config,
|
||||
EnvVars: []string{"RUNTIME_CONFIG", "CRIO_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "socket",
|
||||
Usage: "Path to the crio socket file",
|
||||
Value: "",
|
||||
Destination: &options.Socket,
|
||||
EnvVars: []string{"RUNTIME_SOCKET", "CRIO_SOCKET"},
|
||||
// Note: We hide this option since restarting cri-o via a socket is not supported.
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "restart-mode",
|
||||
Usage: "Specify how cri-o should be restarted; If 'none' is selected, it will not be restarted [systemd | none]",
|
||||
Value: defaultRestartMode,
|
||||
Destination: &options.RestartMode,
|
||||
EnvVars: []string{"RUNTIME_RESTART_MODE", "CRIO_RESTART_MODE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-name",
|
||||
Aliases: []string{"nvidia-runtime-name", "runtime-class"},
|
||||
Usage: "The name of the runtime class to set for the nvidia-container-runtime",
|
||||
Value: defaultRuntimeClass,
|
||||
Destination: &options.RuntimeName,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_NAME", "CRIO_RUNTIME_CLASS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "nvidia-runtime-dir",
|
||||
Aliases: []string{"runtime-dir"},
|
||||
Usage: "The path where the nvidia-container-runtime binaries are located. If this is not specified, the first argument will be used instead",
|
||||
Destination: &options.RuntimeDir,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_DIR"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "set-as-default",
|
||||
Usage: "Set nvidia-container-runtime as the default runtime",
|
||||
Value: defaultSetAsDefault,
|
||||
Destination: &options.SetAsDefault,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_SET_AS_DEFAULT", "CRIO_SET_AS_DEFAULT"},
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-root",
|
||||
Usage: "Specify the path to the host root to be used when restarting crio using systemd",
|
||||
Value: defaultHostRootMount,
|
||||
Destination: &options.HostRootMount,
|
||||
EnvVars: []string{"HOST_ROOT_MOUNT"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "hooks-dir",
|
||||
Usage: "path to the cri-o hooks directory",
|
||||
Value: defaultHooksDir,
|
||||
Destination: &options.hooksDir,
|
||||
EnvVars: []string{"CRIO_HOOKS_DIR"},
|
||||
DefaultText: defaultHooksDir,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "hook-filename",
|
||||
Usage: "filename of the cri-o hook that will be created / removed in the hooks directory",
|
||||
Value: defaultHookFilename,
|
||||
Destination: &options.hookFilename,
|
||||
EnvVars: []string{"CRIO_HOOK_FILENAME"},
|
||||
DefaultText: defaultHookFilename,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-mode",
|
||||
Usage: "the configuration mode to use. One of [hook | config]",
|
||||
Value: defaultConfigMode,
|
||||
Destination: &options.configMode,
|
||||
EnvVars: []string{"CRIO_CONFIG_MODE"},
|
||||
},
|
||||
}
|
||||
|
||||
// Update the subcommand flags with the common subcommand flags
|
||||
setup.Flags = append([]cli.Flag{}, commonFlags...)
|
||||
cleanup.Flags = append([]cli.Flag{}, commonFlags...)
|
||||
|
||||
// Run the top-level CLI
|
||||
if err := c.Run(os.Args); err != nil {
|
||||
log.Fatal(fmt.Errorf("error: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Setup installs the prestart hook required to launch GPU-enabled containers
|
||||
func Setup(c *cli.Context, o *options) error {
|
||||
log.Infof("Starting 'setup' for %v", c.App.Name)
|
||||
|
||||
switch o.configMode {
|
||||
case "hook":
|
||||
return setupHook(o)
|
||||
case "config":
|
||||
return setupConfig(o)
|
||||
default:
|
||||
return fmt.Errorf("invalid config-mode '%v'", o.configMode)
|
||||
}
|
||||
}
|
||||
|
||||
// setupHook installs the prestart hook required to launch GPU-enabled containers
|
||||
func setupHook(o *options) error {
|
||||
log.Infof("Installing prestart hook")
|
||||
|
||||
hookPath := filepath.Join(o.hooksDir, o.hookFilename)
|
||||
err := ocihook.CreateHook(hookPath, filepath.Join(o.RuntimeDir, config.NVIDIAContainerRuntimeHookExecutable))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating hook: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupConfig updates the cri-o config for the NVIDIA container runtime
|
||||
func setupConfig(o *options) error {
|
||||
log.Infof("Updating config file")
|
||||
|
||||
cfg, err := crio.New(
|
||||
crio.WithPath(o.Config),
|
||||
crio.WithConfigSource(toml.FromFile(o.Config)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Configure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to configure cri-o: %v", err)
|
||||
}
|
||||
|
||||
err = RestartCrio(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart crio: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup removes the specified prestart hook
|
||||
func Cleanup(c *cli.Context, o *options) error {
|
||||
log.Infof("Starting 'cleanup' for %v", c.App.Name)
|
||||
|
||||
switch o.configMode {
|
||||
case "hook":
|
||||
return cleanupHook(o)
|
||||
case "config":
|
||||
return cleanupConfig(o)
|
||||
default:
|
||||
return fmt.Errorf("invalid config-mode '%v'", o.configMode)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupHook removes the prestart hook
|
||||
func cleanupHook(o *options) error {
|
||||
log.Infof("Removing prestart hook")
|
||||
|
||||
hookPath := filepath.Join(o.hooksDir, o.hookFilename)
|
||||
err := os.Remove(hookPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing hook '%v': %v", hookPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupConfig removes the NVIDIA container runtime from the cri-o config
|
||||
func cleanupConfig(o *options) error {
|
||||
log.Infof("Reverting config file modifications")
|
||||
|
||||
cfg, err := crio.New(
|
||||
crio.WithPath(o.Config),
|
||||
crio.WithConfigSource(toml.FromFile(o.Config)),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Unconfigure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unconfigure cri-o: %v", err)
|
||||
}
|
||||
|
||||
err = RestartCrio(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart crio: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartCrio restarts crio depending on the value of restartModeFlag
|
||||
func RestartCrio(o *options) error {
|
||||
return o.Restart("crio", func(string) error { return fmt.Errorf("supporting crio via signal is unsupported") })
|
||||
}
|
@ -1,212 +0,0 @@
|
||||
/**
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/engine/docker"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultConfig = "/etc/docker/daemon.json"
|
||||
defaultSocket = "/var/run/docker.sock"
|
||||
defaultSetAsDefault = true
|
||||
// defaultRuntimeName specifies the NVIDIA runtime to be use as the default runtime if setting the default runtime is enabled
|
||||
defaultRuntimeName = "nvidia"
|
||||
defaultRestartMode = "signal"
|
||||
defaultHostRootMount = "/host"
|
||||
)
|
||||
|
||||
// options stores the configuration from the command line or environment variables
|
||||
type options struct {
|
||||
container.Options
|
||||
}
|
||||
|
||||
func main() {
|
||||
options := options{}
|
||||
|
||||
// Create the top-level CLI
|
||||
c := cli.NewApp()
|
||||
c.Name = "docker"
|
||||
c.Usage = "Update docker config with the nvidia runtime"
|
||||
c.Version = info.GetVersionString()
|
||||
|
||||
// Create the 'setup' subcommand
|
||||
setup := cli.Command{}
|
||||
setup.Name = "setup"
|
||||
setup.Usage = "Trigger docker config to be updated"
|
||||
setup.ArgsUsage = "<runtime_dirname>"
|
||||
setup.Action = func(c *cli.Context) error {
|
||||
return Setup(c, &options)
|
||||
}
|
||||
setup.Before = func(c *cli.Context) error {
|
||||
return container.ParseArgs(c, &options.Options)
|
||||
}
|
||||
|
||||
// Create the 'cleanup' subcommand
|
||||
cleanup := cli.Command{}
|
||||
cleanup.Name = "cleanup"
|
||||
cleanup.Usage = "Trigger any updates made to docker config to be undone"
|
||||
cleanup.ArgsUsage = "<runtime_dirname>"
|
||||
cleanup.Action = func(c *cli.Context) error {
|
||||
return Cleanup(c, &options)
|
||||
}
|
||||
cleanup.Before = func(c *cli.Context) error {
|
||||
return container.ParseArgs(c, &options.Options)
|
||||
}
|
||||
|
||||
// Register the subcommands with the top-level CLI
|
||||
c.Commands = []*cli.Command{
|
||||
&setup,
|
||||
&cleanup,
|
||||
}
|
||||
|
||||
// Setup common flags across both subcommands. All subcommands get the same
|
||||
// set of flags even if they don't use some of them. This is so that we
|
||||
// only require the user to specify one set of flags for both 'startup'
|
||||
// and 'cleanup' to simplify things.
|
||||
commonFlags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to docker config file",
|
||||
Value: defaultConfig,
|
||||
Destination: &options.Config,
|
||||
EnvVars: []string{"RUNTIME_CONFIG", "DOCKER_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "socket",
|
||||
Usage: "Path to the docker socket file",
|
||||
Value: defaultSocket,
|
||||
Destination: &options.Socket,
|
||||
EnvVars: []string{"RUNTIME_SOCKET", "DOCKER_SOCKET"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "restart-mode",
|
||||
Usage: "Specify how docker should be restarted; If 'none' is selected it will not be restarted [signal | systemd | none ]",
|
||||
Value: defaultRestartMode,
|
||||
Destination: &options.RestartMode,
|
||||
EnvVars: []string{"RUNTIME_RESTART_MODE", "DOCKER_RESTART_MODE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-root",
|
||||
Usage: "Specify the path to the host root to be used when restarting docker using systemd",
|
||||
Value: defaultHostRootMount,
|
||||
Destination: &options.HostRootMount,
|
||||
EnvVars: []string{"HOST_ROOT_MOUNT"},
|
||||
// Restart using systemd is currently not supported.
|
||||
// We hide this option for the time being.
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-name",
|
||||
Aliases: []string{"nvidia-runtime-name", "runtime-class"},
|
||||
Usage: "Specify the name of the `nvidia` runtime. If set-as-default is selected, the runtime is used as the default runtime.",
|
||||
Value: defaultRuntimeName,
|
||||
Destination: &options.RuntimeName,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_NAME", "DOCKER_RUNTIME_NAME"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "nvidia-runtime-dir",
|
||||
Aliases: []string{"runtime-dir"},
|
||||
Usage: "The path where the nvidia-container-runtime binaries are located. If this is not specified, the first argument will be used instead",
|
||||
Destination: &options.RuntimeDir,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_DIR"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "set-as-default",
|
||||
Usage: "Set the `nvidia` runtime as the default runtime.",
|
||||
Value: defaultSetAsDefault,
|
||||
Destination: &options.SetAsDefault,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_SET_AS_DEFAULT", "DOCKER_SET_AS_DEFAULT"},
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Update the subcommand flags with the common subcommand flags
|
||||
setup.Flags = append([]cli.Flag{}, commonFlags...)
|
||||
cleanup.Flags = append([]cli.Flag{}, commonFlags...)
|
||||
|
||||
// Run the top-level CLI
|
||||
if err := c.Run(os.Args); err != nil {
|
||||
log.Errorf("Error running docker configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Setup updates docker configuration to include the nvidia runtime and reloads it
|
||||
func Setup(c *cli.Context, o *options) error {
|
||||
log.Infof("Starting 'setup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := docker.New(
|
||||
docker.WithPath(o.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Configure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to configure docker: %v", err)
|
||||
}
|
||||
|
||||
err = RestartDocker(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart docker: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'setup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup reverts docker configuration to remove the nvidia runtime and reloads it
|
||||
func Cleanup(c *cli.Context, o *options) error {
|
||||
log.Infof("Starting 'cleanup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := docker.New(
|
||||
docker.WithPath(o.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Unconfigure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unconfigure docker: %v", err)
|
||||
}
|
||||
|
||||
err = RestartDocker(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to signal docker: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'cleanup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartDocker restarts docker depending on the value of restartModeFlag
|
||||
func RestartDocker(o *options) error {
|
||||
return o.Restart("docker", SignalDocker)
|
||||
}
|
@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -12,6 +11,9 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
unix "golang.org/x/sys/unix"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container/runtime"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container/toolkit"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -20,7 +22,6 @@ const (
|
||||
toolkitCommand = "toolkit"
|
||||
toolkitSubDir = "toolkit"
|
||||
|
||||
defaultToolkitArgs = ""
|
||||
defaultRuntime = "docker"
|
||||
defaultRuntimeArgs = ""
|
||||
)
|
||||
@ -37,6 +38,13 @@ type options struct {
|
||||
runtimeArgs string
|
||||
root string
|
||||
pidFile string
|
||||
|
||||
toolkitOptions toolkit.Options
|
||||
runtimeOptions runtime.Options
|
||||
}
|
||||
|
||||
func (o options) toolkitRoot() string {
|
||||
return filepath.Join(o.root, toolkitSubDir)
|
||||
}
|
||||
|
||||
// Version defines the CLI version. This is set at build time using LD FLAGS
|
||||
@ -49,7 +57,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
options := options{}
|
||||
options := options{
|
||||
toolkitOptions: toolkit.Options{},
|
||||
}
|
||||
// Create the top-level CLI
|
||||
c := cli.NewApp()
|
||||
c.Name = "nvidia-toolkit"
|
||||
@ -81,6 +91,7 @@ func main() {
|
||||
Destination: &options.runtime,
|
||||
EnvVars: []string{"RUNTIME"},
|
||||
},
|
||||
// TODO: Remove runtime-args
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-args",
|
||||
Aliases: []string{"u"},
|
||||
@ -105,6 +116,9 @@ func main() {
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags = append(c.Flags, toolkit.Flags(&options.toolkitOptions)...)
|
||||
c.Flags = append(c.Flags, runtime.Flags(&options.runtimeOptions)...)
|
||||
|
||||
// Run the CLI
|
||||
log.Infof("Starting %v", c.Name)
|
||||
if err := c.Run(remainingArgs); err != nil {
|
||||
@ -119,7 +133,12 @@ func validateFlags(_ *cli.Context, o *options) error {
|
||||
if filepath.Base(o.pidFile) != toolkitPidFilename {
|
||||
return fmt.Errorf("invalid toolkit.pid path %v", o.pidFile)
|
||||
}
|
||||
|
||||
if err := toolkit.ValidateOptions(&o.toolkitOptions, o.toolkitRoot()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := runtime.ValidateOptions(&o.runtimeOptions, o.runtime, o.toolkitRoot()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -136,12 +155,12 @@ func Run(c *cli.Context, o *options) error {
|
||||
}
|
||||
defer shutdown(o.pidFile)
|
||||
|
||||
err = installToolkit(o)
|
||||
err = toolkit.Install(c, &o.toolkitOptions, o.toolkitRoot())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to install toolkit: %v", err)
|
||||
}
|
||||
|
||||
err = setupRuntime(o)
|
||||
err = runtime.Setup(c, &o.runtimeOptions, o.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to setup runtime: %v", err)
|
||||
}
|
||||
@ -152,7 +171,7 @@ func Run(c *cli.Context, o *options) error {
|
||||
return fmt.Errorf("unable to wait for signal: %v", err)
|
||||
}
|
||||
|
||||
err = cleanupRuntime(o)
|
||||
err = runtime.Cleanup(c, &o.runtimeOptions, o.runtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to cleanup runtime: %v", err)
|
||||
}
|
||||
@ -245,47 +264,6 @@ func initialize(pidFile string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func installToolkit(o *options) error {
|
||||
log.Infof("Installing toolkit")
|
||||
|
||||
cmdline := []string{
|
||||
toolkitCommand,
|
||||
"install",
|
||||
"--toolkit-root",
|
||||
filepath.Join(o.root, toolkitSubDir),
|
||||
}
|
||||
|
||||
//nolint:gosec // TODO: Can we harden this so that there is less risk of command injection
|
||||
cmd := exec.Command("sh", "-c", strings.Join(cmdline, " "))
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running %v command: %v", cmdline, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupRuntime(o *options) error {
|
||||
toolkitDir := filepath.Join(o.root, toolkitSubDir)
|
||||
|
||||
log.Infof("Setting up runtime")
|
||||
|
||||
cmdline := fmt.Sprintf("%v setup %v %v\n", o.runtime, o.runtimeArgs, toolkitDir)
|
||||
|
||||
//nolint:gosec // TODO: Can we harden this so that there is less risk of command injection
|
||||
cmd := exec.Command("sh", "-c", cmdline)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running %v command: %v", o.runtime, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForSignal() error {
|
||||
log.Infof("Waiting for signal")
|
||||
waitingForSignal <- true
|
||||
@ -293,25 +271,6 @@ func waitForSignal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupRuntime(o *options) error {
|
||||
toolkitDir := filepath.Join(o.root, toolkitSubDir)
|
||||
|
||||
log.Infof("Cleaning up Runtime")
|
||||
|
||||
cmdline := fmt.Sprintf("%v cleanup %v %v\n", o.runtime, o.runtimeArgs, toolkitDir)
|
||||
|
||||
//nolint:gosec // TODO: Can we harden this so that there is less risk of command injection
|
||||
cmd := exec.Command("sh", "-c", cmdline)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error running %v command: %v", o.runtime, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shutdown(pidFile string) {
|
||||
log.Infof("Shutting Down")
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -82,14 +82,10 @@ func TestUpdateV1ConfigDefaultRuntime(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
},
|
||||
runtimeType: runtimeType,
|
||||
useLegacyConfig: tc.legacyConfig,
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
}
|
||||
|
||||
cfg, err := toml.Empty.Load()
|
||||
@ -233,11 +229,9 @@ func TestUpdateV1Config(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
}
|
||||
|
||||
cfg, err := toml.Empty.Load()
|
||||
@ -394,11 +388,9 @@ func TestUpdateV1ConfigWithRuncPresent(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
}
|
||||
|
||||
cfg, err := toml.TreeFromMap(runcConfigMapV1("/runc-binary"))
|
||||
@ -473,10 +465,8 @@ func TestRevertV1Config(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: "nvidia",
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: "nvidia",
|
||||
}
|
||||
|
||||
cfg, err := toml.LoadMap(tc.config)
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -66,12 +66,10 @@ func TestUpdateV2ConfigDefaultRuntime(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
}
|
||||
|
||||
cfg, err := toml.LoadMap(map[string]interface{}{})
|
||||
@ -192,12 +190,9 @@ func TestUpdateV2Config(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
},
|
||||
runtimeType: runtimeType,
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
}
|
||||
|
||||
cfg, err := toml.LoadMap(map[string]interface{}{})
|
||||
@ -206,7 +201,7 @@ func TestUpdateV2Config(t *testing.T) {
|
||||
v2 := &containerd.Config{
|
||||
Logger: logger,
|
||||
Tree: cfg,
|
||||
RuntimeType: o.runtimeType,
|
||||
RuntimeType: runtimeType,
|
||||
ContainerAnnotations: []string{"cdi.k8s.io/*"},
|
||||
}
|
||||
|
||||
@ -348,11 +343,9 @@ func TestUpdateV2ConfigWithRuncPresent(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
}
|
||||
|
||||
cfg, err := toml.LoadMap(runcConfigMapV2("/runc-binary"))
|
||||
@ -421,10 +414,8 @@ func TestRevertV2Config(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: "nvidia",
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: "nvidia",
|
||||
}
|
||||
|
||||
cfg, err := toml.LoadMap(tc.config)
|
166
tools/container/runtime/containerd/containerd.go
Normal file
166
tools/container/runtime/containerd/containerd.go
Normal file
@ -0,0 +1,166 @@
|
||||
/**
|
||||
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/engine/containerd"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "containerd"
|
||||
|
||||
DefaultConfig = "/etc/containerd/config.toml"
|
||||
DefaultSocket = "/run/containerd/containerd.sock"
|
||||
DefaultRestartMode = "signal"
|
||||
|
||||
defaultRuntmeType = "io.containerd.runc.v2"
|
||||
)
|
||||
|
||||
// Options stores the containerd-specific options
|
||||
type Options struct {
|
||||
useLegacyConfig bool
|
||||
runtimeType string
|
||||
|
||||
ContainerRuntimeModesCDIAnnotationPrefixes cli.StringSlice
|
||||
|
||||
runtimeConfigOverrideJSON string
|
||||
}
|
||||
|
||||
func Flags(opts *Options) []cli.Flag {
|
||||
flags := []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "use-legacy-config",
|
||||
Usage: "Specify whether a legacy (pre v1.3) config should be used",
|
||||
Destination: &opts.useLegacyConfig,
|
||||
EnvVars: []string{"CONTAINERD_USE_LEGACY_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-type",
|
||||
Usage: "The runtime_type to use for the configured runtime classes",
|
||||
Value: defaultRuntmeType,
|
||||
Destination: &opts.runtimeType,
|
||||
EnvVars: []string{"CONTAINERD_RUNTIME_TYPE"},
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "nvidia-container-runtime-modes.cdi.annotation-prefixes",
|
||||
Destination: &opts.ContainerRuntimeModesCDIAnnotationPrefixes,
|
||||
EnvVars: []string{"NVIDIA_CONTAINER_RUNTIME_MODES_CDI_ANNOTATION_PREFIXES"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-config-override",
|
||||
Destination: &opts.runtimeConfigOverrideJSON,
|
||||
Usage: "specify additional runtime options as a JSON string. The paths are relative to the runtime config.",
|
||||
Value: "{}",
|
||||
EnvVars: []string{"RUNTIME_CONFIG_OVERRIDE", "CONTAINERD_RUNTIME_CONFIG_OVERRIDE"},
|
||||
},
|
||||
}
|
||||
|
||||
return flags
|
||||
}
|
||||
|
||||
// Setup updates a containerd configuration to include the nvidia-containerd-runtime and reloads it
|
||||
func Setup(c *cli.Context, o *container.Options, co *Options) error {
|
||||
log.Infof("Starting 'setup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := containerd.New(
|
||||
containerd.WithPath(o.Config),
|
||||
containerd.WithRuntimeType(co.runtimeType),
|
||||
containerd.WithUseLegacyConfig(co.useLegacyConfig),
|
||||
containerd.WithContainerAnnotations(co.containerAnnotationsFromCDIPrefixes()...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Configure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to configure containerd: %v", err)
|
||||
}
|
||||
|
||||
err = RestartContainerd(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart containerd: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'setup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup reverts a containerd configuration to remove the nvidia-containerd-runtime and reloads it
|
||||
func Cleanup(c *cli.Context, o *container.Options, co *Options) error {
|
||||
log.Infof("Starting 'cleanup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := containerd.New(
|
||||
containerd.WithPath(o.Config),
|
||||
containerd.WithRuntimeType(co.runtimeType),
|
||||
containerd.WithUseLegacyConfig(co.useLegacyConfig),
|
||||
containerd.WithContainerAnnotations(co.containerAnnotationsFromCDIPrefixes()...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Unconfigure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unconfigure containerd: %v", err)
|
||||
}
|
||||
|
||||
err = RestartContainerd(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart containerd: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'cleanup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartContainerd restarts containerd depending on the value of restartModeFlag
|
||||
func RestartContainerd(o *container.Options) error {
|
||||
return o.Restart("containerd", SignalContainerd)
|
||||
}
|
||||
|
||||
// containerAnnotationsFromCDIPrefixes returns the container annotations to set for the given CDI prefixes.
|
||||
func (o *Options) containerAnnotationsFromCDIPrefixes() []string {
|
||||
var annotations []string
|
||||
for _, prefix := range o.ContainerRuntimeModesCDIAnnotationPrefixes.Value() {
|
||||
annotations = append(annotations, prefix+"*")
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
func (o *Options) runtimeConfigOverride() (map[string]interface{}, error) {
|
||||
if o.runtimeConfigOverrideJSON == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
runtimeOptions := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(o.runtimeConfigOverrideJSON), &runtimeOptions); err != nil {
|
||||
return nil, fmt.Errorf("failed to read %v as JSON: %w", o.runtimeConfigOverrideJSON, err)
|
||||
}
|
||||
|
||||
return runtimeOptions, nil
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"fmt"
|
@ -17,7 +17,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"errors"
|
72
tools/container/runtime/containerd/containerd_test.go
Normal file
72
tools/container/runtime/containerd/containerd_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
/**
|
||||
# Copyright 2024 NVIDIA CORPORATION
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRuntimeOptions(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
options Options
|
||||
expected map[string]interface{}
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
description: "empty is nil",
|
||||
},
|
||||
{
|
||||
description: "empty json",
|
||||
options: Options{
|
||||
runtimeConfigOverrideJSON: "{}",
|
||||
},
|
||||
expected: map[string]interface{}{},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
description: "SystemdCgroup is true",
|
||||
options: Options{
|
||||
runtimeConfigOverrideJSON: "{\"SystemdCgroup\": true}",
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"SystemdCgroup": true,
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
description: "SystemdCgroup is false",
|
||||
options: Options{
|
||||
runtimeConfigOverrideJSON: "{\"SystemdCgroup\": false}",
|
||||
},
|
||||
expected: map[string]interface{}{
|
||||
"SystemdCgroup": false,
|
||||
},
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
runtimeOptions, err := tc.options.runtimeConfigOverride()
|
||||
require.ErrorIs(t, tc.expectedError, err)
|
||||
require.EqualValues(t, tc.expected, runtimeOptions)
|
||||
})
|
||||
}
|
||||
}
|
192
tools/container/runtime/crio/crio.go
Normal file
192
tools/container/runtime/crio/crio.go
Normal file
@ -0,0 +1,192 @@
|
||||
/**
|
||||
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package crio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/engine/crio"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/ocihook"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "crio"
|
||||
|
||||
defaultConfigMode = "hook"
|
||||
|
||||
// Hook-based settings
|
||||
defaultHooksDir = "/usr/share/containers/oci/hooks.d"
|
||||
defaultHookFilename = "oci-nvidia-hook.json"
|
||||
|
||||
// Config-based settings
|
||||
DefaultConfig = "/etc/crio/crio.conf"
|
||||
DefaultSocket = "/var/run/crio/crio.sock"
|
||||
DefaultRestartMode = "systemd"
|
||||
)
|
||||
|
||||
// Options defines the cri-o specific options.
|
||||
type Options struct {
|
||||
configMode string
|
||||
|
||||
// hook-specific options
|
||||
hooksDir string
|
||||
hookFilename string
|
||||
}
|
||||
|
||||
func Flags(opts *Options) []cli.Flag {
|
||||
flags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "hooks-dir",
|
||||
Usage: "path to the cri-o hooks directory",
|
||||
Value: defaultHooksDir,
|
||||
Destination: &opts.hooksDir,
|
||||
EnvVars: []string{"CRIO_HOOKS_DIR"},
|
||||
DefaultText: defaultHooksDir,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "hook-filename",
|
||||
Usage: "filename of the cri-o hook that will be created / removed in the hooks directory",
|
||||
Value: defaultHookFilename,
|
||||
Destination: &opts.hookFilename,
|
||||
EnvVars: []string{"CRIO_HOOK_FILENAME"},
|
||||
DefaultText: defaultHookFilename,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-mode",
|
||||
Usage: "the configuration mode to use. One of [hook | config]",
|
||||
Value: defaultConfigMode,
|
||||
Destination: &opts.configMode,
|
||||
EnvVars: []string{"CRIO_CONFIG_MODE"},
|
||||
},
|
||||
}
|
||||
|
||||
return flags
|
||||
}
|
||||
|
||||
// Setup installs the prestart hook required to launch GPU-enabled containers
|
||||
func Setup(c *cli.Context, o *container.Options, co *Options) error {
|
||||
log.Infof("Starting 'setup' for %v", c.App.Name)
|
||||
|
||||
switch co.configMode {
|
||||
case "hook":
|
||||
return setupHook(o, co)
|
||||
case "config":
|
||||
return setupConfig(o)
|
||||
default:
|
||||
return fmt.Errorf("invalid config-mode '%v'", co.configMode)
|
||||
}
|
||||
}
|
||||
|
||||
// setupHook installs the prestart hook required to launch GPU-enabled containers
|
||||
func setupHook(o *container.Options, co *Options) error {
|
||||
log.Infof("Installing prestart hook")
|
||||
|
||||
hookPath := filepath.Join(co.hooksDir, co.hookFilename)
|
||||
err := ocihook.CreateHook(hookPath, filepath.Join(o.RuntimeDir, config.NVIDIAContainerRuntimeHookExecutable))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating hook: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupConfig updates the cri-o config for the NVIDIA container runtime
|
||||
func setupConfig(o *container.Options) error {
|
||||
log.Infof("Updating config file")
|
||||
|
||||
cfg, err := crio.New(
|
||||
crio.WithPath(o.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Configure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to configure cri-o: %v", err)
|
||||
}
|
||||
|
||||
err = RestartCrio(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart crio: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup removes the specified prestart hook
|
||||
func Cleanup(c *cli.Context, o *container.Options, co *Options) error {
|
||||
log.Infof("Starting 'cleanup' for %v", c.App.Name)
|
||||
|
||||
switch co.configMode {
|
||||
case "hook":
|
||||
return cleanupHook(co)
|
||||
case "config":
|
||||
return cleanupConfig(o)
|
||||
default:
|
||||
return fmt.Errorf("invalid config-mode '%v'", co.configMode)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupHook removes the prestart hook
|
||||
func cleanupHook(co *Options) error {
|
||||
log.Infof("Removing prestart hook")
|
||||
|
||||
hookPath := filepath.Join(co.hooksDir, co.hookFilename)
|
||||
err := os.Remove(hookPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error removing hook '%v': %v", hookPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupConfig removes the NVIDIA container runtime from the cri-o config
|
||||
func cleanupConfig(o *container.Options) error {
|
||||
log.Infof("Reverting config file modifications")
|
||||
|
||||
cfg, err := crio.New(
|
||||
crio.WithPath(o.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Unconfigure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unconfigure cri-o: %v", err)
|
||||
}
|
||||
|
||||
err = RestartCrio(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart crio: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartCrio restarts crio depending on the value of restartModeFlag
|
||||
func RestartCrio(o *container.Options) error {
|
||||
return o.Restart("crio", func(string) error { return fmt.Errorf("supporting crio via signal is unsupported") })
|
||||
}
|
98
tools/container/runtime/docker/docker.go
Normal file
98
tools/container/runtime/docker/docker.go
Normal file
@ -0,0 +1,98 @@
|
||||
/**
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/pkg/config/engine/docker"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "docker"
|
||||
|
||||
DefaultConfig = "/etc/docker/daemon.json"
|
||||
DefaultSocket = "/var/run/docker.sock"
|
||||
DefaultRestartMode = "signal"
|
||||
)
|
||||
|
||||
type Options struct{}
|
||||
|
||||
func Flags(opts *Options) []cli.Flag {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Setup updates docker configuration to include the nvidia runtime and reloads it
|
||||
func Setup(c *cli.Context, o *container.Options) error {
|
||||
log.Infof("Starting 'setup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := docker.New(
|
||||
docker.WithPath(o.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Configure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to configure docker: %v", err)
|
||||
}
|
||||
|
||||
err = RestartDocker(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to restart docker: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'setup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup reverts docker configuration to remove the nvidia runtime and reloads it
|
||||
func Cleanup(c *cli.Context, o *container.Options) error {
|
||||
log.Infof("Starting 'cleanup' for %v", c.App.Name)
|
||||
|
||||
cfg, err := docker.New(
|
||||
docker.WithPath(o.Config),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to load config: %v", err)
|
||||
}
|
||||
|
||||
err = o.Unconfigure(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unconfigure docker: %v", err)
|
||||
}
|
||||
|
||||
err = RestartDocker(o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to signal docker: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Completed 'cleanup' for %v", c.App.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartDocker restarts docker depending on the value of restartModeFlag
|
||||
func RestartDocker(o *container.Options) error {
|
||||
return o.Restart("docker", SignalDocker)
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
@ -17,7 +17,7 @@
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@ -52,12 +52,10 @@ func TestUpdateConfigDefaultRuntime(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
}
|
||||
|
||||
config := docker.Config(map[string]interface{}{})
|
||||
@ -238,12 +236,10 @@ func TestUpdateConfig(t *testing.T) {
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
|
||||
o := &options{
|
||||
Options: container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
},
|
||||
o := &container.Options{
|
||||
RuntimeName: tc.runtimeName,
|
||||
RuntimeDir: runtimeDir,
|
||||
SetAsDefault: tc.setAsDefault,
|
||||
}
|
||||
|
||||
err := o.UpdateConfig(&tc.config)
|
||||
@ -365,7 +361,7 @@ func TestRevertConfig(t *testing.T) {
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
o := &options{}
|
||||
o := &container.Options{}
|
||||
err := o.RevertConfig(&tc.config)
|
||||
|
||||
require.NoError(t, err, "%d: %v", i, tc)
|
168
tools/container/runtime/runtime.go
Normal file
168
tools/container/runtime/runtime.go
Normal file
@ -0,0 +1,168 @@
|
||||
/**
|
||||
# Copyright 2024 NVIDIA CORPORATION
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container/runtime/containerd"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container/runtime/crio"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/tools/container/runtime/docker"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSetAsDefault = true
|
||||
// defaultRuntimeName specifies the NVIDIA runtime to be use as the default runtime if setting the default runtime is enabled
|
||||
defaultRuntimeName = "nvidia"
|
||||
defaultHostRootMount = "/host"
|
||||
|
||||
runtimeSpecificDefault = "RUNTIME_SPECIFIC_DEFAULT"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
container.Options
|
||||
|
||||
containerdOptions containerd.Options
|
||||
crioOptions crio.Options
|
||||
}
|
||||
|
||||
func Flags(opts *Options) []cli.Flag {
|
||||
flags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "Path to the runtime config file",
|
||||
Value: runtimeSpecificDefault,
|
||||
Destination: &opts.Config,
|
||||
EnvVars: []string{"RUNTIME_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "socket",
|
||||
Usage: "Path to the runtime socket file",
|
||||
Value: runtimeSpecificDefault,
|
||||
Destination: &opts.Socket,
|
||||
EnvVars: []string{"RUNTIME_SOCKET"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "restart-mode",
|
||||
Usage: "Specify how the runtime should be restarted; If 'none' is selected it will not be restarted [signal | systemd | none ]",
|
||||
Value: runtimeSpecificDefault,
|
||||
Destination: &opts.RestartMode,
|
||||
EnvVars: []string{"RUNTIME_RESTART_MODE"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "host-root",
|
||||
Usage: "Specify the path to the host root to be used when restarting the runtime using systemd",
|
||||
Value: defaultHostRootMount,
|
||||
Destination: &opts.HostRootMount,
|
||||
EnvVars: []string{"HOST_ROOT_MOUNT"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "runtime-name",
|
||||
Aliases: []string{"nvidia-runtime-name", "runtime-class"},
|
||||
Usage: "Specify the name of the `nvidia` runtime. If set-as-default is selected, the runtime is used as the default runtime.",
|
||||
Value: defaultRuntimeName,
|
||||
Destination: &opts.RuntimeName,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_NAME"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "set-as-default",
|
||||
Usage: "Set the `nvidia` runtime as the default runtime.",
|
||||
Value: defaultSetAsDefault,
|
||||
Destination: &opts.SetAsDefault,
|
||||
EnvVars: []string{"NVIDIA_RUNTIME_SET_AS_DEFAULT"},
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
|
||||
flags = append(flags, containerd.Flags(&opts.containerdOptions)...)
|
||||
flags = append(flags, crio.Flags(&opts.crioOptions)...)
|
||||
|
||||
return flags
|
||||
}
|
||||
|
||||
// ValidateOptions checks whether the specified options are valid
|
||||
func ValidateOptions(opts *Options, runtime string, toolkitRoot string) error {
|
||||
// We set this option here to ensure that it is available in future calls.
|
||||
opts.RuntimeDir = toolkitRoot
|
||||
|
||||
// Apply the runtime-specific config changes.
|
||||
switch runtime {
|
||||
case containerd.Name:
|
||||
if opts.Config == runtimeSpecificDefault {
|
||||
opts.Config = containerd.DefaultConfig
|
||||
}
|
||||
if opts.Socket == runtimeSpecificDefault {
|
||||
opts.Socket = containerd.DefaultSocket
|
||||
}
|
||||
if opts.RestartMode == runtimeSpecificDefault {
|
||||
opts.RestartMode = containerd.DefaultRestartMode
|
||||
}
|
||||
case crio.Name:
|
||||
if opts.Config == runtimeSpecificDefault {
|
||||
opts.Config = crio.DefaultConfig
|
||||
}
|
||||
if opts.Socket == runtimeSpecificDefault {
|
||||
opts.Socket = crio.DefaultSocket
|
||||
}
|
||||
if opts.RestartMode == runtimeSpecificDefault {
|
||||
opts.RestartMode = crio.DefaultRestartMode
|
||||
}
|
||||
case docker.Name:
|
||||
if opts.Config == runtimeSpecificDefault {
|
||||
opts.Config = docker.DefaultConfig
|
||||
}
|
||||
if opts.Socket == runtimeSpecificDefault {
|
||||
opts.Socket = docker.DefaultSocket
|
||||
}
|
||||
if opts.RestartMode == runtimeSpecificDefault {
|
||||
opts.RestartMode = docker.DefaultRestartMode
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("undefined runtime %v", runtime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Setup(c *cli.Context, opts *Options, runtime string) error {
|
||||
switch runtime {
|
||||
case containerd.Name:
|
||||
return containerd.Setup(c, &opts.Options, &opts.containerdOptions)
|
||||
case crio.Name:
|
||||
return crio.Setup(c, &opts.Options, &opts.crioOptions)
|
||||
case docker.Name:
|
||||
return docker.Setup(c, &opts.Options)
|
||||
default:
|
||||
return fmt.Errorf("undefined runtime %v", runtime)
|
||||
}
|
||||
}
|
||||
|
||||
func Cleanup(c *cli.Context, opts *Options, runtime string) error {
|
||||
switch runtime {
|
||||
case containerd.Name:
|
||||
return containerd.Cleanup(c, &opts.Options, &opts.containerdOptions)
|
||||
case crio.Name:
|
||||
return crio.Cleanup(c, &opts.Options, &opts.crioOptions)
|
||||
case docker.Name:
|
||||
return docker.Cleanup(c, &opts.Options)
|
||||
default:
|
||||
return fmt.Errorf("undefined runtime %v", runtime)
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package toolkit
|
||||
|
||||
import "strings"
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -49,7 +49,7 @@ const (
|
||||
toolkitPidFilename = "toolkit.pid"
|
||||
)
|
||||
|
||||
type options struct {
|
||||
type Options struct {
|
||||
DriverRoot string
|
||||
DevRoot string
|
||||
DriverRootCtrPath string
|
||||
@ -67,7 +67,6 @@ type options struct {
|
||||
ContainerRuntimeHookSkipModeDetection bool
|
||||
|
||||
ContainerCLIDebug string
|
||||
toolkitRoot string
|
||||
|
||||
cdiEnabled bool
|
||||
cdiOutputDir string
|
||||
@ -83,46 +82,7 @@ type options struct {
|
||||
ignoreErrors bool
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
opts := options{}
|
||||
|
||||
// Create the top-level CLI
|
||||
c := cli.NewApp()
|
||||
c.Name = "toolkit"
|
||||
c.Usage = "Manage the NVIDIA container toolkit"
|
||||
c.Version = "0.1.0"
|
||||
|
||||
// Create the 'install' subcommand
|
||||
install := cli.Command{}
|
||||
install.Name = "install"
|
||||
install.Usage = "Install the components of the NVIDIA container toolkit"
|
||||
install.ArgsUsage = "<toolkit_directory>"
|
||||
install.Before = func(c *cli.Context) error {
|
||||
return validateOptions(c, &opts)
|
||||
}
|
||||
install.Action = func(c *cli.Context) error {
|
||||
return Install(c, &opts)
|
||||
}
|
||||
|
||||
// Create the 'delete' command
|
||||
delete := cli.Command{}
|
||||
delete.Name = "delete"
|
||||
delete.Usage = "Delete the NVIDIA container toolkit"
|
||||
delete.ArgsUsage = "<toolkit_directory>"
|
||||
delete.Before = func(c *cli.Context) error {
|
||||
return validateOptions(c, &opts)
|
||||
}
|
||||
delete.Action = func(c *cli.Context) error {
|
||||
return TryDelete(c, &opts)
|
||||
}
|
||||
|
||||
// Register the subcommand with the top-level CLI
|
||||
c.Commands = []*cli.Command{
|
||||
&install,
|
||||
&delete,
|
||||
}
|
||||
|
||||
func Flags(opts *Options) []cli.Flag {
|
||||
flags := []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "driver-root",
|
||||
@ -209,13 +169,6 @@ func main() {
|
||||
Destination: &opts.acceptNVIDIAVisibleDevicesAsVolumeMounts,
|
||||
EnvVars: []string{"ACCEPT_NVIDIA_VISIBLE_DEVICES_AS_VOLUME_MOUNTS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "toolkit-root",
|
||||
Usage: "The directory where the NVIDIA Container toolkit is to be installed",
|
||||
Required: true,
|
||||
Destination: &opts.toolkitRoot,
|
||||
EnvVars: []string{"TOOLKIT_ROOT"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "cdi-enabled",
|
||||
Aliases: []string{"enable-cdi"},
|
||||
@ -252,20 +205,13 @@ func main() {
|
||||
},
|
||||
}
|
||||
|
||||
// Update the subcommand flags with the common subcommand flags
|
||||
install.Flags = append([]cli.Flag{}, flags...)
|
||||
delete.Flags = append([]cli.Flag{}, flags...)
|
||||
|
||||
// Run the top-level CLI
|
||||
if err := c.Run(os.Args); err != nil {
|
||||
log.Fatal(fmt.Errorf("error: %v", err))
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
// validateOptions checks whether the specified options are valid
|
||||
func validateOptions(c *cli.Context, opts *options) error {
|
||||
if opts.toolkitRoot == "" {
|
||||
return fmt.Errorf("invalid --toolkit-root option: %v", opts.toolkitRoot)
|
||||
// ValidateOptions checks whether the specified options are valid
|
||||
func ValidateOptions(opts *Options, toolkitRoot string) error {
|
||||
if toolkitRoot == "" {
|
||||
return fmt.Errorf("invalid --toolkit-root option: %v", toolkitRoot)
|
||||
}
|
||||
|
||||
vendor, class := parser.ParseQualifier(opts.cdiKind)
|
||||
@ -306,90 +252,90 @@ func validateOptions(c *cli.Context, opts *options) error {
|
||||
|
||||
// TryDelete attempts to remove the specified toolkit folder.
|
||||
// A toolkit.pid file -- if present -- is skipped.
|
||||
func TryDelete(cli *cli.Context, opts *options) error {
|
||||
log.Infof("Attempting to delete NVIDIA container toolkit from '%v'", opts.toolkitRoot)
|
||||
func TryDelete(cli *cli.Context, toolkitRoot string) error {
|
||||
log.Infof("Attempting to delete NVIDIA container toolkit from '%v'", toolkitRoot)
|
||||
|
||||
contents, err := os.ReadDir(opts.toolkitRoot)
|
||||
contents, err := os.ReadDir(toolkitRoot)
|
||||
if err != nil && errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("failed to read the contents of %v: %w", opts.toolkitRoot, err)
|
||||
return fmt.Errorf("failed to read the contents of %v: %w", toolkitRoot, err)
|
||||
}
|
||||
|
||||
for _, content := range contents {
|
||||
if content.Name() == toolkitPidFilename {
|
||||
continue
|
||||
}
|
||||
name := filepath.Join(opts.toolkitRoot, content.Name())
|
||||
name := filepath.Join(toolkitRoot, content.Name())
|
||||
if err := os.RemoveAll(name); err != nil {
|
||||
log.Warningf("could not remove %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
if err := os.RemoveAll(opts.toolkitRoot); err != nil {
|
||||
log.Warningf("could not remove %v: %v", opts.toolkitRoot, err)
|
||||
if err := os.RemoveAll(toolkitRoot); err != nil {
|
||||
log.Warningf("could not remove %v: %v", toolkitRoot, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Install installs the components of the NVIDIA container toolkit.
|
||||
// Any existing installation is removed.
|
||||
func Install(cli *cli.Context, opts *options) error {
|
||||
log.Infof("Installing NVIDIA container toolkit to '%v'", opts.toolkitRoot)
|
||||
func Install(cli *cli.Context, opts *Options, toolkitRoot string) error {
|
||||
log.Infof("Installing NVIDIA container toolkit to '%v'", toolkitRoot)
|
||||
|
||||
log.Infof("Removing existing NVIDIA container toolkit installation")
|
||||
err := os.RemoveAll(opts.toolkitRoot)
|
||||
err := os.RemoveAll(toolkitRoot)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error removing toolkit directory: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("error removing toolkit directory: %v", err))
|
||||
}
|
||||
|
||||
toolkitConfigDir := filepath.Join(opts.toolkitRoot, ".config", "nvidia-container-runtime")
|
||||
toolkitConfigDir := filepath.Join(toolkitRoot, ".config", "nvidia-container-runtime")
|
||||
toolkitConfigPath := filepath.Join(toolkitConfigDir, configFilename)
|
||||
|
||||
err = createDirectories(opts.toolkitRoot, toolkitConfigDir)
|
||||
err = createDirectories(toolkitRoot, toolkitConfigDir)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("could not create required directories: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("could not create required directories: %v", err))
|
||||
}
|
||||
|
||||
err = installContainerLibraries(opts.toolkitRoot)
|
||||
err = installContainerLibraries(toolkitRoot)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA container library: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("error installing NVIDIA container library: %v", err))
|
||||
}
|
||||
|
||||
err = installContainerRuntimes(opts.toolkitRoot, opts.DriverRoot)
|
||||
err = installContainerRuntimes(toolkitRoot, opts.DriverRoot)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA container runtime: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("error installing NVIDIA container runtime: %v", err))
|
||||
}
|
||||
|
||||
nvidiaContainerCliExecutable, err := installContainerCLI(opts.toolkitRoot)
|
||||
nvidiaContainerCliExecutable, err := installContainerCLI(toolkitRoot)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA container CLI: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("error installing NVIDIA container CLI: %v", err))
|
||||
}
|
||||
|
||||
nvidiaContainerRuntimeHookPath, err := installRuntimeHook(opts.toolkitRoot, toolkitConfigPath)
|
||||
nvidiaContainerRuntimeHookPath, err := installRuntimeHook(toolkitRoot, toolkitConfigPath)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA container runtime hook: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("error installing NVIDIA container runtime hook: %v", err))
|
||||
}
|
||||
|
||||
nvidiaCTKPath, err := installContainerToolkitCLI(opts.toolkitRoot)
|
||||
nvidiaCTKPath, err := installContainerToolkitCLI(toolkitRoot)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA Container Toolkit CLI: %v", err)
|
||||
} else if err != nil {
|
||||
log.Errorf("Ignoring error: %v", fmt.Errorf("error installing NVIDIA Container Toolkit CLI: %v", err))
|
||||
}
|
||||
|
||||
nvidiaCDIHookPath, err := installContainerCDIHookCLI(opts.toolkitRoot)
|
||||
nvidiaCDIHookPath, err := installContainerCDIHookCLI(toolkitRoot)
|
||||
if err != nil && !opts.ignoreErrors {
|
||||
return fmt.Errorf("error installing NVIDIA Container CDI Hook CLI: %v", err)
|
||||
} else if err != nil {
|
||||
@ -470,7 +416,7 @@ func installLibrary(libName string, toolkitRoot string) error {
|
||||
|
||||
// installToolkitConfig installs the config file for the NVIDIA container toolkit ensuring
|
||||
// that the settings are updated to match the desired install and nvidia driver directories.
|
||||
func installToolkitConfig(c *cli.Context, toolkitConfigPath string, nvidiaContainerCliExecutablePath string, nvidiaCTKPath string, nvidaContainerRuntimeHookPath string, opts *options) error {
|
||||
func installToolkitConfig(c *cli.Context, toolkitConfigPath string, nvidiaContainerCliExecutablePath string, nvidiaCTKPath string, nvidaContainerRuntimeHookPath string, opts *Options) error {
|
||||
log.Infof("Installing NVIDIA container toolkit config '%v'", toolkitConfigPath)
|
||||
|
||||
cfg, err := loadConfig(nvidiaContainerToolkitConfigSource)
|
||||
@ -777,7 +723,7 @@ func createDirectories(dir ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDeviceNodes(opts *options) error {
|
||||
func createDeviceNodes(opts *Options) error {
|
||||
modes := opts.createDeviceNodes.Value()
|
||||
if len(modes) == 0 {
|
||||
return nil
|
||||
@ -804,7 +750,7 @@ func createDeviceNodes(opts *options) error {
|
||||
}
|
||||
|
||||
// generateCDISpec generates a CDI spec for use in management containers
|
||||
func generateCDISpec(opts *options, nvidiaCDIHookPath string) error {
|
||||
func generateCDISpec(opts *Options, nvidiaCDIHookPath string) error {
|
||||
if !opts.cdiEnabled {
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user