Copy container test scripts from container-config

Files copied from:

383587f766

Signed-off-by: Evan Lezar <elezar@nvidia.com>
This commit is contained in:
Evan Lezar 2021-10-13 13:57:51 +02:00
parent 7d76243783
commit f2c93363ab
11 changed files with 754 additions and 0 deletions

117
test/container/common.sh Normal file
View File

@ -0,0 +1,117 @@
#! /bin/bash
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
readonly CRIO_HOOKS_DIR="/usr/share/containers/oci/hooks.d"
readonly CRIO_HOOK_FILENAME="oci-nvidia-hook.json"
# shellcheck disable=SC2015
[ -t 2 ] && readonly LOG_TTY=1 || readonly LOG_NO_TTY=1
if [ "${LOG_TTY-0}" -eq 1 ] && [ "$(tput colors)" -ge 15 ]; then
readonly FMT_BOLD=$(tput bold)
readonly FMT_RED=$(tput setaf 1)
readonly FMT_YELLOW=$(tput setaf 3)
readonly FMT_BLUE=$(tput setaf 12)
readonly FMT_CLEAR=$(tput sgr0)
fi
log() {
local -r level="$1"; shift
local -r message="$*"
local fmt_on="${FMT_CLEAR-}"
local -r fmt_off="${FMT_CLEAR-}"
case "${level}" in
INFO) fmt_on="${FMT_BLUE-}" ;;
WARN) fmt_on="${FMT_YELLOW-}" ;;
ERROR) fmt_on="${FMT_RED-}" ;;
esac
printf "%s[%s]%s %b\n" "${fmt_on}" "${level}" "${fmt_off}" "${message}" >&2
}
with_retry() {
local max_attempts="$1"
local delay="$2"
local count=0
local rc
shift 2
while true; do
set +e
"$@"; rc="$?"
set -e
count="$((count+1))"
if [[ "${rc}" -eq 0 ]]; then
return 0
fi
if [[ "${max_attempts}" -le 0 ]] || [[ "${count}" -lt "${max_attempts}" ]]; then
sleep "${delay}"
else
break
fi
done
return 1
}
testing::setup() {
cp -Rp ${basedir}/shared ${shared_dir}
mkdir -p "${shared_dir}/etc/containerd"
mkdir -p "${shared_dir}/etc/docker"
mkdir -p "${shared_dir}/run/docker/containerd"
mkdir -p "${shared_dir}/run/nvidia"
mkdir -p "${shared_dir}/usr/local/nvidia"
mkdir -p "${shared_dir}${CRIO_HOOKS_DIR}"
}
testing::cleanup() {
if [[ "${CLEANUP}" == "false" ]]; then
echo "Skipping cleanup: CLEANUP=${CLEANUP}"
return 0
fi
if [[ -e "${shared_dir}" ]]; then
docker run --rm \
-v "${shared_dir}:/work" \
alpine sh -c 'rm -rf /work/*'
rmdir "${shared_dir}"
fi
if [[ "${test_cases:-""}" == "" ]]; then
echo "No test cases defined. Skipping test case cleanup"
return 0
fi
for tc in ${test_cases}; do
testing::${tc}::cleanup
done
}
testing::docker_run::toolkit::shell() {
docker run --rm --privileged \
--entrypoint sh \
-v "${shared_dir}/etc/containerd:/etc/containerd" \
-v "${shared_dir}/etc/docker:/etc/docker" \
-v "${shared_dir}/run/docker/containerd:/run/docker/containerd" \
-v "${shared_dir}/run/nvidia:/run/nvidia" \
-v "${shared_dir}/usr/local/nvidia:/usr/local/nvidia" \
-v "${shared_dir}${CRIO_HOOKS_DIR}:${CRIO_HOOKS_DIR}" \
"${toolkit_container_image}" "-c" "$*"
}

147
test/container/containerd_test.sh Executable file
View File

@ -0,0 +1,147 @@
#! /bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
readonly containerd_dind_ctr="container-config-containerd-dind-ctr-name"
readonly containerd_test_ctr="container-config-containerd-test-ctr-name"
readonly containerd_dind_socket="/run/nvidia/docker.sock"
readonly containerd_dind_containerd_dir="/run/docker/containerd"
testing::containerd::dind::setup() {
# Docker creates /etc/docker when starting
# by default there isn't any config in this directory (even after the daemon starts)
docker run -d --rm --privileged \
-v "${shared_dir}/etc/docker:/etc/docker" \
-v "${shared_dir}/run/nvidia:/run/nvidia" \
-v "${shared_dir}/usr/local/nvidia:/usr/local/nvidia" \
-v "${shared_dir}/run/docker/containerd:/run/docker/containerd" \
--name "${containerd_dind_ctr}" \
docker:stable-dind -H unix://${containerd_dind_socket}
}
testing::containerd::dind::exec() {
docker exec "${containerd_dind_ctr}" sh -c "$*"
}
testing::containerd::toolkit::run() {
local version=${1}
# We run ctr image list to ensure that containerd has successfully started in the docker-in-docker container
with_retry 5 5s testing::containerd::dind::exec " \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock image list -q"
# Ensure that we can run some non GPU containers from within dind
with_retry 3 5s testing::containerd::dind::exec " \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock image pull nvcr.io/nvidia/cuda:11.1-base; \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock run --rm --runtime=io.containerd.runtime.v1.linux nvcr.io/nvidia/cuda:11.1-base cuda echo foo"
# Share the volumes so that we can edit the config file and point to the new runtime
# Share the pid so that we can ask docker to reload its config
docker run --rm --privileged \
--volumes-from "${containerd_dind_ctr}" \
-v "${shared_dir}/etc/containerd/config_${version}.toml:${containerd_dind_containerd_dir}/containerd.toml" \
--pid "container:${containerd_dind_ctr}" \
-e "RUNTIME=containerd" \
-e "RUNTIME_ARGS=--config=${containerd_dind_containerd_dir}/containerd.toml --socket=${containerd_dind_containerd_dir}/containerd.sock" \
--name "${containerd_test_ctr}" \
"${toolkit_container_image}" "/usr/local/nvidia" "--no-daemon"
# We run ctr image list to ensure that containerd has successfully started in the docker-in-docker container
with_retry 5 5s testing::containerd::dind::exec " \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock image list -q"
# Ensure that we haven't broken non GPU containers
with_retry 3 5s testing::containerd::dind::exec " \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock image pull nvcr.io/nvidia/cuda:11.1-base; \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock run --rm --runtime=io.containerd.runtime.v1.linux nvcr.io/nvidia/cuda:11.1-base cuda echo foo"
}
# This test runs containerd setup and containerd cleanup in succession to ensure that the
# config is restored correctly.
testing::containerd::toolkit::test_config() {
local version=${1}
# We run ctr image list to ensure that containerd has successfully started in the docker-in-docker container
with_retry 5 5s testing::containerd::dind::exec " \
ctr --address=${containerd_dind_containerd_dir}/containerd.sock image list -q"
local input_config="${shared_dir}/etc/containerd/config_${version}.toml"
local output_config="${shared_dir}/output/config_${version}.toml"
local output_dir=$(dirname ${output_config})
mkdir -p ${output_dir}
cp -p "${input_config}" "${output_config}"
docker run --rm --privileged \
--volumes-from "${containerd_dind_ctr}" \
-v "${output_dir}:${output_dir}" \
--name "${containerd_test_ctr}" \
--entrypoint sh \
"${toolkit_container_image}" -c "containerd setup \
--config=${output_config} \
--socket=${containerd_dind_containerd_dir}/containerd.sock \
--restart-mode=NONE \
/usr/local/nvidia/toolkit"
# As a basic test we check that the config has changed
diff "${input_config}" "${output_config}" || test ${?} -ne 0
grep -q -E "^version = \d" "${output_config}"
grep -q -E "default_runtime_name = \"nvidia\"" "${output_config}"
docker run --rm --privileged \
--volumes-from "${containerd_dind_ctr}" \
-v "${output_dir}:${output_dir}" \
--name "${containerd_test_ctr}" \
--entrypoint sh \
"${toolkit_container_image}" -c "containerd cleanup \
--config=${output_config} \
--socket=${containerd_dind_containerd_dir}/containerd.sock \
--restart-mode=NONE \
/usr/local/nvidia/toolkit"
if [[ -s "${input_config}" ]]; then
# Compare the input and output config. These should be the same.
diff "${input_config}" "${output_config}" || true
else
# If the input config is empty, the output should not exist.
test ! -e "${output_config}"
fi
}
testing::containerd::main() {
testing::containerd::dind::setup
testing::containerd::toolkit::test_config empty
testing::containerd::toolkit::test_config v1
testing::containerd::toolkit::test_config v2
testing::containerd::cleanup
testing::containerd::dind::setup
testing::containerd::toolkit::run empty
testing::containerd::cleanup
testing::containerd::dind::setup
testing::containerd::toolkit::run v1
testing::containerd::cleanup
testing::containerd::dind::setup
testing::containerd::toolkit::run v2
testing::containerd::cleanup
}
testing::containerd::cleanup() {
docker kill "${containerd_dind_ctr}" &> /dev/null || true
docker kill "${containerd_test_ctr}" &> /dev/null || true
}

View File

@ -0,0 +1,42 @@
#! /bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
testing::crio::hook_created() {
testing::docker_run::toolkit::shell 'crio setup /run/nvidia/toolkit'
test ! -z "$(ls -A "${shared_dir}${CRIO_HOOKS_DIR}")"
cat "${shared_dir}${CRIO_HOOKS_DIR}/${CRIO_HOOK_FILENAME}" | \
jq -r '.hook.path' | grep -q "/run/nvidia/toolkit/"
test $? -eq 0
cat "${shared_dir}${CRIO_HOOKS_DIR}/${CRIO_HOOK_FILENAME}" | \
jq -r '.hook.env[0]' | grep -q ":/run/nvidia/toolkit"
test $? -eq 0
}
testing::crio::hook_cleanup() {
testing::docker_run::toolkit::shell 'crio cleanup'
test -z "$(ls -A "${shared_dir}${CRIO_HOOKS_DIR}")"
}
testing::crio::main() {
testing::crio::hook_created
testing::crio::hook_cleanup
}
testing::crio::cleanup() {
:
}

57
test/container/docker_test.sh Executable file
View File

@ -0,0 +1,57 @@
#! /bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
readonly docker_dind_ctr="container-config-docker-dind-ctr-name"
readonly docker_test_ctr="container-config-docker-test-ctr-name"
readonly docker_dind_socket="/run/nvidia/docker.sock"
testing::docker::dind::setup() {
# Docker creates /etc/docker when starting
# by default there isn't any config in this directory (even after the daemon starts)
docker run -d --rm --privileged \
-v "${shared_dir}/etc/docker:/etc/docker" \
-v "${shared_dir}/run/nvidia:/run/nvidia" \
-v "${shared_dir}/usr/local/nvidia:/usr/local/nvidia" \
--name "${docker_dind_ctr}" \
docker:stable-dind -H unix://${docker_dind_socket}
}
testing::docker::dind::exec() {
docker exec "${docker_dind_ctr}" sh -c "$*"
}
testing::docker::toolkit::run() {
# Share the volumes so that we can edit the config file and point to the new runtime
# Share the pid so that we can ask docker to reload its config
docker run -d --rm --privileged \
--volumes-from "${docker_dind_ctr}" \
--pid "container:${docker_dind_ctr}" \
-e "RUNTIME_ARGS=--socket ${docker_dind_socket}" \
--name "${docker_test_ctr}" \
"${toolkit_container_image}" "/usr/local/nvidia" "--no-daemon"
# Ensure that we haven't broken non GPU containers
with_retry 3 5s testing::docker::dind::exec docker run -t alpine echo foo
}
testing::docker::main() {
testing::docker::dind::setup
testing::docker::toolkit::run
}
testing::docker::cleanup() {
docker kill "${docker_dind_ctr}" &> /dev/null || true
docker kill "${docker_test_ctr}" &> /dev/null || true
}

77
test/container/main.sh Normal file
View File

@ -0,0 +1,77 @@
#! /bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eEuo pipefail
shopt -s lastpipe
readonly basedir="$(dirname "$(realpath "$0")")"
source "${basedir}/common.sh"
source "${basedir}/toolkit_test.sh"
source "${basedir}/docker_test.sh"
source "${basedir}/crio_test.sh"
source "${basedir}/containerd_test.sh"
: ${CLEANUP:=true}
usage() {
cat >&2 <<EOF
Usage: $0 COMMAND [ARG...]
Commands:
run SHARED_DIR TOOLKIT_CONTAINER_IMAGE [-c | --no-cleanup-on-error ]
clean SHARED_DIR
EOF
}
if [ $# -lt 2 ]; then usage; exit 1; fi
# We defined shared_dir here so that it can be used in cleanup
readonly command=${1}; shift
readonly shared_dir="${1}"; shift;
case "${command}" in
clean) testing::cleanup; exit 0;;
run) ;;
*) usage; exit 0;;
esac
if [ $# -eq 0 ]; then usage; exit 1; fi
readonly toolkit_container_image="${1}"; shift
options=$(getopt -l no-cleanup-on-error -o c -- "$@")
if [[ "$?" -ne 0 ]]; then usage; exit 1; fi
# set options to positional parameters
eval set -- "${options}"
for opt in ${options}; do
case "${opt}" in
c | --no-cleanup-on-error) CLEANUP=false; shift;;
--) shift; break;;
esac
done
trap '"$CLEANUP" && testing::cleanup' ERR
readonly test_cases="${TEST_CASES:-toolkit docker crio containerd}"
testing::cleanup
for tc in ${test_cases}; do
log INFO "=================Testing ${tc}================="
testing::setup
testing::${tc}::main "$@"
testing::cleanup
done

View File

@ -0,0 +1,92 @@
oom_score = 0
root = "/var/lib/containerd"
state = "/run/containerd"
[cgroup]
path = ""
[debug]
address = "/var/run/docker/containerd/containerd-debug.sock"
gid = 0
level = ""
uid = 0
[grpc]
address = "/var/run/docker/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins.cgroups]
no_prometheus = false
[plugins.cri]
disable_proc_mount = false
enable_selinux = false
enable_tls_streaming = false
max_container_log_line_size = 16384
sandbox_image = "k8s.gcr.io/pause:3.1"
stats_collect_period = 10
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
[plugins.cri.containerd]
no_pivot = false
snapshotter = "overlayfs"
[plugins.cri.containerd.default_runtime]
runtime_engine = ""
runtime_root = ""
runtime_type = "io.containerd.runtime.v1.linux"
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_engine = ""
runtime_root = ""
runtime_type = ""
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]
[plugins.cri.x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins.diff-service]
default = ["walking"]
[plugins.linux]
no_shim = false
runtime = "runc"
runtime_root = "/var/lib/docker/runc"
shim = "containerd-shim"
shim_debug = false
[plugins.opt]
path = "/opt/containerd"
[plugins.restart]
interval = "10s"
[plugins.scheduler]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"

View File

@ -0,0 +1,139 @@
disabled_plugins = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
version = 2
[cgroup]
path = ""
[debug]
address = "/var/run/docker/containerd/containerd-debug.sock"
gid = 0
level = ""
uid = 0
[grpc]
address = "/var/run/docker/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
disable_apparmor = false
disable_cgroup = false
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
restrict_oom_score_adj = false
sandbox_image = "k8s.gcr.io/pause:3.1"
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
privileged_without_host_devices = false
runtime_engine = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
privileged_without_host_devices = false
runtime_engine = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v1"
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
privileged_without_host_devices = false
runtime_engine = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://registry-1.docker.io"]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = "/var/lib/docker/runc"
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.snapshotter.v1.devmapper"]
base_image_size = ""
pool_name = ""
root_path = ""
[timeouts]
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0

View File

@ -0,0 +1,3 @@
{
"registry-mirrors": ["https://mirror.gcr.io"]
}

View File

@ -0,0 +1 @@
# This is a dummy lib file to test nvidia-runtime-experimental

View File

@ -0,0 +1,79 @@
#! /bin/bash
# Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
testing::toolkit::install() {
local -r uid=$(id -u)
local -r gid=$(id -g)
local READLINK="readlink"
local -r platform=$(uname)
if [[ "${platform}" == "Darwin" ]]; then
READLINK="greadlink"
fi
testing::docker_run::toolkit::shell 'toolkit install /usr/local/nvidia/toolkit'
docker run --rm -v "${shared_dir}:/work" alpine sh -c "chown -R ${uid}:${gid} /work/"
# Ensure toolkit dir is correctly setup
test ! -z "$(ls -A "${shared_dir}/usr/local/nvidia/toolkit")"
test -L "${shared_dir}/usr/local/nvidia/toolkit/libnvidia-container.so.1"
test -e "$(${READLINK} -f "${shared_dir}/usr/local/nvidia/toolkit/libnvidia-container.so.1")"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-cli"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-toolkit"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime"
grep -q -E "nvidia driver modules are not yet loaded, invoking runc directly" "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime"
grep -q -E "exec runc \".@\"" "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-cli.real"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-toolkit.real"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime.real"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime.experimental"
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime-experimental"
grep -q -E "nvidia driver modules are not yet loaded, invoking runc directly" "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime-experimental"
grep -q -E "exec runc \".@\"" "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime-experimental"
grep -q -E "LD_LIBRARY_PATH=/run/nvidia/driver/usr/lib64:\\\$LD_LIBRARY_PATH " "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-runtime-experimental"
test -e "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
# Ensure that the config file has the required contents.
# NOTE: This assumes that RUN_DIR is '/run/nvidia'
local -r nvidia_run_dir="/run/nvidia"
grep -q -E "^\s*ldconfig = \"@${nvidia_run_dir}/driver/sbin/ldconfig(.real)?\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
grep -q -E "^\s*root = \"${nvidia_run_dir}/driver\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
grep -q -E "^\s*path = \"/usr/local/nvidia/toolkit/nvidia-container-cli\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
}
testing::toolkit::delete() {
testing::docker_run::toolkit::shell 'mkdir -p /usr/local/nvidia/delete-toolkit'
testing::docker_run::toolkit::shell 'touch /usr/local/nvidia/delete-toolkit/test.file'
testing::docker_run::toolkit::shell 'toolkit delete /usr/local/nvidia/delete-toolkit'
test ! -z "$(ls -A "${shared_dir}/usr/local/nvidia")"
test ! -e "${shared_dir}/usr/local/nvidia/delete-toolkit"
}
testing::toolkit::main() {
testing::toolkit::install
testing::toolkit::delete
}
testing::toolkit::cleanup() {
:
}