mirror of
https://github.com/NVIDIA/nvidia-container-toolkit
synced 2025-06-26 18:18:24 +00:00
Compare commits
146 Commits
v1.12.0-rc
...
v1.12.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0bedf07e7b | ||
|
|
5c86ca17ef | ||
|
|
0741252a7c | ||
|
|
2d9e4cb720 | ||
|
|
12aca454ab | ||
|
|
57a4072fae | ||
|
|
bac1222871 | ||
|
|
0023db2150 | ||
|
|
c03258325b | ||
|
|
3ffc8cf86d | ||
|
|
a6d02feba7 | ||
|
|
9fc25ac641 | ||
|
|
7df553ad09 | ||
|
|
d5ab1c0ba0 | ||
|
|
bc37b97c73 | ||
|
|
2081750bea | ||
|
|
a2d1f4cfb4 | ||
|
|
944922a541 | ||
|
|
5995305554 | ||
|
|
bc81d5e68b | ||
|
|
d50e7f1f4f | ||
|
|
62bd015475 | ||
|
|
ac5c62c116 | ||
|
|
80fe1065ad | ||
|
|
fea195cc8d | ||
|
|
9ef314e1e3 | ||
|
|
95f859118b | ||
|
|
daceac9117 | ||
|
|
cfa2647260 | ||
|
|
03cdf3b5d7 | ||
|
|
f8f415a605 | ||
|
|
fe117d3916 | ||
|
|
069536d598 | ||
|
|
5f53ca0af5 | ||
|
|
9a06768863 | ||
|
|
0c8379f681 | ||
|
|
92dc0506fe | ||
|
|
7045a223d2 | ||
|
|
763e4936cd | ||
|
|
f0c7491029 | ||
|
|
ba5c4b2831 | ||
|
|
9c73438682 | ||
|
|
37f7337d2b | ||
|
|
98285c27ab | ||
|
|
5750881cea | ||
|
|
95ca1c2e50 | ||
|
|
e4031ced39 | ||
|
|
7f6d21c53b | ||
|
|
846ac347fe | ||
|
|
50afd443fc | ||
|
|
14bcebd8b7 | ||
|
|
d091d3c7f4 | ||
|
|
eb0ef8ab31 | ||
|
|
9c5c12a1bc | ||
|
|
8b197b27ed | ||
|
|
8c57e55b59 | ||
|
|
6d1639a513 | ||
|
|
5e6f72e8f4 | ||
|
|
707e3479f8 | ||
|
|
201232dae3 | ||
|
|
f768bb5783 | ||
|
|
f0de3ccd9c | ||
|
|
09e8d4c4f3 | ||
|
|
8188400c97 | ||
|
|
962d38e9dd | ||
|
|
9fc2c59122 | ||
|
|
540f4349f5 | ||
|
|
1d7e419008 | ||
|
|
95394e0fc8 | ||
|
|
f9330a4c2c | ||
|
|
be0e4667a5 | ||
|
|
408eeae70f | ||
|
|
27c82c19ea | ||
|
|
937f3d0d78 | ||
|
|
bc3cc71f90 | ||
|
|
ad4531db1e | ||
|
|
e5d8d10d4f | ||
|
|
89bf81a9db | ||
|
|
6237477ba3 | ||
|
|
6706024687 | ||
|
|
7649126248 | ||
|
|
104dca867f | ||
|
|
881b1c0e08 | ||
|
|
3537d76726 | ||
|
|
ccd1961c60 | ||
|
|
f350f0c0bb | ||
|
|
80672d33af | ||
|
|
7a1cfb48b9 | ||
|
|
ae3b213b0e | ||
|
|
eaf9bdaeb4 | ||
|
|
bc4bfb94a2 | ||
|
|
a77331f8f0 | ||
|
|
94b7add334 | ||
|
|
9c9e6cd324 | ||
|
|
f50efca73f | ||
|
|
19cfb2774d | ||
|
|
27347c98d9 | ||
|
|
ebbc47702d | ||
|
|
09d42f0ad9 | ||
|
|
35df24d63a | ||
|
|
f93b6a13f4 | ||
|
|
50d7fb8f41 | ||
|
|
311e7a1feb | ||
|
|
14e587d55f | ||
|
|
66ec967de2 | ||
|
|
252693aeac | ||
|
|
079b47ed94 | ||
|
|
d2952b07aa | ||
|
|
41f1b93422 | ||
|
|
3140810c95 | ||
|
|
046d761f4c | ||
|
|
0a2083df72 | ||
|
|
80c810bf9e | ||
|
|
82ba424212 | ||
|
|
c131b99cb3 | ||
|
|
64a85fb832 | ||
|
|
ebf1772068 | ||
|
|
8604c255c4 | ||
|
|
bea8321205 | ||
|
|
db962c4bf2 | ||
|
|
d1a3de7671 | ||
|
|
8da7e74408 | ||
|
|
55eb898186 | ||
|
|
a7fc29d4bd | ||
|
|
fdb3e51294 | ||
|
|
0582180cab | ||
|
|
46667b5a8c | ||
|
|
e4e1de82ec | ||
|
|
d51c8fcfa7 | ||
|
|
9b33c34a57 | ||
|
|
0b6cd7e90e | ||
|
|
029a04c37d | ||
|
|
60c1df4e9c | ||
|
|
3e35312537 | ||
|
|
932b39fd08 | ||
|
|
78cafe45d4 | ||
|
|
584e792a5a | ||
|
|
f0bcfa0415 | ||
|
|
d45ec7bd28 | ||
|
|
153f2f6300 | ||
|
|
9df3975740 | ||
|
|
5575b391ff | ||
|
|
9faf11ddf3 | ||
|
|
d3ed27722e | ||
|
|
07a3f3040a | ||
|
|
749ab2a746 |
@@ -37,6 +37,7 @@ stages:
|
||||
.main-or-manual:
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == "main"
|
||||
- if: $CI_COMMIT_BRANCH =~ /^release-.*$/
|
||||
- if: $CI_COMMIT_TAG && $CI_COMMIT_TAG != ""
|
||||
- if: $CI_PIPELINE_SOURCE == "schedule"
|
||||
when: manual
|
||||
@@ -70,20 +71,6 @@ stages:
|
||||
DIST: debian10
|
||||
PACKAGE_REPO_TYPE: debian
|
||||
|
||||
.dist-debian9:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
DIST: debian9
|
||||
PACKAGE_REPO_TYPE: debian
|
||||
|
||||
.dist-fedora35:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
DIST: fedora35
|
||||
PACKAGE_REPO_TYPE: rpm
|
||||
|
||||
.dist-opensuse-leap15.1:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
@@ -99,13 +86,6 @@ stages:
|
||||
CVE_UPDATES: "cyrus-sasl-lib"
|
||||
PACKAGE_REPO_TYPE: rpm
|
||||
|
||||
.dist-ubuntu16.04:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
DIST: ubuntu16.04
|
||||
PACKAGE_REPO_TYPE: debian
|
||||
|
||||
.dist-ubuntu18.04:
|
||||
variables:
|
||||
DIST: ubuntu18.04
|
||||
@@ -113,8 +93,6 @@ stages:
|
||||
PACKAGE_REPO_TYPE: debian
|
||||
|
||||
.dist-ubuntu20.04:
|
||||
rules:
|
||||
- !reference [.main-or-manual, rules]
|
||||
variables:
|
||||
DIST: ubuntu20.04
|
||||
CVE_UPDATES: "libsasl2-2 libsasl2-modules-db"
|
||||
@@ -259,22 +237,15 @@ release:staging-ubi8:
|
||||
needs:
|
||||
- image-ubi8
|
||||
|
||||
release:staging-ubuntu18.04:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- test-toolkit-ubuntu18.04
|
||||
- test-containerd-ubuntu18.04
|
||||
- test-crio-ubuntu18.04
|
||||
- test-docker-ubuntu18.04
|
||||
|
||||
release:staging-ubuntu20.04:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu20.04
|
||||
- test-toolkit-ubuntu20.04
|
||||
- test-containerd-ubuntu20.04
|
||||
- test-crio-ubuntu20.04
|
||||
- test-docker-ubuntu20.04
|
||||
|
||||
release:staging-packaging:
|
||||
extends:
|
||||
|
||||
@@ -116,12 +116,6 @@ package-amazonlinux2-x86_64:
|
||||
- .dist-amazonlinux2
|
||||
- .arch-x86_64
|
||||
|
||||
package-centos7-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos7
|
||||
- .arch-ppc64le
|
||||
|
||||
package-centos7-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
@@ -152,42 +146,12 @@ package-debian10-amd64:
|
||||
- .dist-debian10
|
||||
- .arch-amd64
|
||||
|
||||
package-debian9-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-debian9
|
||||
- .arch-amd64
|
||||
|
||||
package-fedora35-aarch64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-fedora35
|
||||
- .arch-aarch64
|
||||
|
||||
package-fedora35-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-fedora35
|
||||
- .arch-x86_64
|
||||
|
||||
package-opensuse-leap15.1-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-opensuse-leap15.1
|
||||
- .arch-x86_64
|
||||
|
||||
package-ubuntu16.04-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu16.04
|
||||
- .arch-amd64
|
||||
|
||||
package-ubuntu16.04-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu16.04
|
||||
- .arch-ppc64le
|
||||
|
||||
package-ubuntu18.04-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
@@ -228,7 +192,7 @@ package-ubuntu18.04-ppc64le:
|
||||
before_script:
|
||||
- !reference [.buildx-setup, before_script]
|
||||
|
||||
- apk add --no-cache bash make
|
||||
- apk add --no-cache bash make git
|
||||
- 'echo "Logging in to CI registry ${CI_REGISTRY}"'
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
script:
|
||||
@@ -240,7 +204,6 @@ image-centos7:
|
||||
- .package-artifacts
|
||||
- .dist-centos7
|
||||
needs:
|
||||
- package-centos7-ppc64le
|
||||
- package-centos7-x86_64
|
||||
|
||||
image-ubi8:
|
||||
@@ -254,17 +217,6 @@ image-ubi8:
|
||||
- package-centos8-x86_64
|
||||
- package-centos8-ppc64le
|
||||
|
||||
image-ubuntu18.04:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- package-ubuntu18.04-amd64
|
||||
- package-ubuntu18.04-arm64
|
||||
- job: package-ubuntu18.04-ppc64le
|
||||
optional: true
|
||||
|
||||
image-ubuntu20.04:
|
||||
extends:
|
||||
- .image-build
|
||||
@@ -273,7 +225,8 @@ image-ubuntu20.04:
|
||||
needs:
|
||||
- package-ubuntu18.04-amd64
|
||||
- package-ubuntu18.04-arm64
|
||||
- package-ubuntu18.04-ppc64le
|
||||
- job: package-ubuntu18.04-ppc64le
|
||||
optional: true
|
||||
|
||||
# The DIST=packaging target creates an image containing all built packages
|
||||
image-packaging:
|
||||
@@ -290,26 +243,14 @@ image-packaging:
|
||||
optional: true
|
||||
- job: package-amazonlinux2-x86_64
|
||||
optional: true
|
||||
- job: package-centos7-ppc64le
|
||||
optional: true
|
||||
- job: package-centos7-x86_64
|
||||
optional: true
|
||||
- job: package-centos8-ppc64le
|
||||
optional: true
|
||||
- job: package-debian10-amd64
|
||||
optional: true
|
||||
- job: package-debian9-amd64
|
||||
optional: true
|
||||
- job: package-fedora35-aarch64
|
||||
optional: true
|
||||
- job: package-fedora35-x86_64
|
||||
optional: true
|
||||
- job: package-opensuse-leap15.1-x86_64
|
||||
optional: true
|
||||
- job: package-ubuntu16.04-amd64
|
||||
optional: true
|
||||
- job: package-ubuntu16.04-ppc64le
|
||||
optional: true
|
||||
- job: package-ubuntu18.04-ppc64le
|
||||
optional: true
|
||||
|
||||
@@ -343,31 +284,31 @@ image-packaging:
|
||||
TEST_CASES: "crio"
|
||||
|
||||
# Define the test targets
|
||||
test-toolkit-ubuntu18.04:
|
||||
test-toolkit-ubuntu20.04:
|
||||
extends:
|
||||
- .test:toolkit
|
||||
- .dist-ubuntu18.04
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
- image-ubuntu20.04
|
||||
|
||||
test-containerd-ubuntu18.04:
|
||||
test-containerd-ubuntu20.04:
|
||||
extends:
|
||||
- .test:containerd
|
||||
- .dist-ubuntu18.04
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
- image-ubuntu20.04
|
||||
|
||||
test-crio-ubuntu18.04:
|
||||
test-crio-ubuntu20.04:
|
||||
extends:
|
||||
- .test:crio
|
||||
- .dist-ubuntu18.04
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
- image-ubuntu20.04
|
||||
|
||||
test-docker-ubuntu18.04:
|
||||
test-docker-ubuntu20.04:
|
||||
extends:
|
||||
- .test:docker
|
||||
- .dist-ubuntu18.04
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
- image-ubuntu20.04
|
||||
|
||||
|
||||
141
.nvidia-ci.yml
141
.nvidia-ci.yml
@@ -79,11 +79,6 @@ image-ubi8:
|
||||
- .dist-ubi8
|
||||
- .image-pull
|
||||
|
||||
image-ubuntu18.04:
|
||||
extends:
|
||||
- .dist-ubuntu18.04
|
||||
- .image-pull
|
||||
|
||||
image-ubuntu20.04:
|
||||
extends:
|
||||
- .dist-ubuntu20.04
|
||||
@@ -95,109 +90,6 @@ image-packaging:
|
||||
- .dist-packaging
|
||||
- .image-pull
|
||||
|
||||
# Define the package release targets
|
||||
release:packages:amazonlinux2-aarch64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-amazonlinux2
|
||||
- .arch-aarch64
|
||||
|
||||
release:packages:amazonlinux2-x86_64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-amazonlinux2
|
||||
- .arch-x86_64
|
||||
|
||||
release:packages:centos7-ppc64le:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-centos7
|
||||
- .arch-ppc64le
|
||||
|
||||
release:packages:centos7-x86_64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-centos7
|
||||
- .arch-x86_64
|
||||
|
||||
release:packages:centos8-aarch64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-centos8
|
||||
- .arch-aarch64
|
||||
|
||||
release:packages:centos8-ppc64le:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-centos8
|
||||
- .arch-ppc64le
|
||||
|
||||
release:packages:centos8-x86_64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-centos8
|
||||
- .arch-x86_64
|
||||
|
||||
release:packages:debian10-amd64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-debian10
|
||||
- .arch-amd64
|
||||
|
||||
release:packages:debian9-amd64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-debian9
|
||||
- .arch-amd64
|
||||
|
||||
release:packages:fedora35-aarch64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-fedora35
|
||||
- .arch-aarch64
|
||||
|
||||
release:packages:fedora35-x86_64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-fedora35
|
||||
- .arch-x86_64
|
||||
|
||||
release:packages:opensuse-leap15.1-x86_64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-opensuse-leap15.1
|
||||
- .arch-x86_64
|
||||
|
||||
release:packages:ubuntu16.04-amd64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-ubuntu16.04
|
||||
- .arch-amd64
|
||||
|
||||
release:packages:ubuntu16.04-ppc64le:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-ubuntu16.04
|
||||
- .arch-ppc64le
|
||||
|
||||
release:packages:ubuntu18.04-amd64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-amd64
|
||||
|
||||
release:packages:ubuntu18.04-arm64:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-arm64
|
||||
|
||||
release:packages:ubuntu18.04-ppc64le:
|
||||
extends:
|
||||
- .release:packages
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-ppc64le
|
||||
|
||||
# We skip the integration tests for the internal CI:
|
||||
.integration:
|
||||
stage: test
|
||||
@@ -213,7 +105,7 @@ release:packages:ubuntu18.04-ppc64le:
|
||||
image: "${PULSE_IMAGE}"
|
||||
variables:
|
||||
IMAGE: "${CI_REGISTRY_IMAGE}/container-toolkit:${CI_COMMIT_SHORT_SHA}-${DIST}"
|
||||
IMAGE_ARCHIVE: "container-toolkit.tar"
|
||||
IMAGE_ARCHIVE: "container-toolkit-${DIST}-${ARCH}-${CI_JOB_ID}.tar"
|
||||
rules:
|
||||
- if: $SKIP_SCANS != "yes"
|
||||
- when: manual
|
||||
@@ -256,14 +148,6 @@ scan-centos7-arm64:
|
||||
- image-centos7
|
||||
- scan-centos7-amd64
|
||||
|
||||
scan-ubuntu18.04-amd64:
|
||||
extends:
|
||||
- .dist-ubuntu18.04
|
||||
- .platform-amd64
|
||||
- .scan
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
scan-ubuntu20.04-amd64:
|
||||
extends:
|
||||
- .dist-ubuntu20.04
|
||||
@@ -319,22 +203,26 @@ scan-ubi8-arm64:
|
||||
PACKAGE_REGISTRY_TOKEN: "${CI_REGISTRY_PASSWORD}"
|
||||
PACKAGE_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
PACKAGE_IMAGE_TAG: "${CI_COMMIT_SHORT_SHA}-packaging"
|
||||
PACKAGE_ARTIFACTORY_REPO: "${ARTIFACTORY_REPO_BASE}-${PACKAGE_REPO_TYPE}-local"
|
||||
KITMAKER_ARTIFACTORY_REPO: "${ARTIFACTORY_REPO_BASE}-generic-local/${KITMAKER_RELEASE_FOLDER}"
|
||||
script:
|
||||
- !reference [.regctl-setup, before_script]
|
||||
- apk add --no-cache bash
|
||||
- apk add --no-cache bash git
|
||||
- regctl registry login "${PACKAGE_REGISTRY}" -u "${PACKAGE_REGISTRY_USER}" -p "${PACKAGE_REGISTRY_TOKEN}"
|
||||
- ./scripts/extract-packages.sh "${PACKAGE_IMAGE_NAME}:${PACKAGE_IMAGE_TAG}" "${DIST}-${ARCH}"
|
||||
- ./scripts/extract-packages.sh "${PACKAGE_IMAGE_NAME}:${PACKAGE_IMAGE_TAG}"
|
||||
# TODO: ./scripts/release-packages-artifactory.sh "${DIST}-${ARCH}" "${PACKAGE_ARTIFACTORY_REPO}"
|
||||
- ./scripts/release-kitmaker-artifactory.sh "${DIST}-${ARCH}" "${KITMAKER_ARTIFACTORY_REPO}"
|
||||
- ./scripts/release-kitmaker-artifactory.sh "${KITMAKER_ARTIFACTORY_REPO}"
|
||||
|
||||
release:staging-ubuntu18.04:
|
||||
# Define the package release targets
|
||||
release:packages:kitmaker:
|
||||
extends:
|
||||
- .release:packages
|
||||
|
||||
release:staging-ubuntu20.04:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubuntu18.04
|
||||
- .dist-ubuntu20.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
- image-ubuntu20.04
|
||||
|
||||
# Define the external release targets
|
||||
# Release to NGC
|
||||
@@ -343,11 +231,6 @@ release:ngc-centos7:
|
||||
- .dist-centos7
|
||||
- .release:ngc
|
||||
|
||||
release:ngc-ubuntu18.04:
|
||||
extends:
|
||||
- .dist-ubuntu18.04
|
||||
- .release:ngc
|
||||
|
||||
release:ngc-ubuntu20.04:
|
||||
extends:
|
||||
- .dist-ubuntu20.04
|
||||
|
||||
40
CHANGELOG.md
40
CHANGELOG.md
@@ -1,5 +1,45 @@
|
||||
# NVIDIA Container Toolkit Changelog
|
||||
|
||||
## v1.12.1
|
||||
|
||||
* Don't fail chmod hook if paths are not injected
|
||||
* Fix possible blank `nvidia-ctk` path in generated CDI specifications
|
||||
* Fix error in postun scriplet on RPM-based systems
|
||||
* Fix missing NVML symbols when running `nvidia-ctk` on some platforms [#49]
|
||||
* Discover all `gsb*.bin` GSP firmware files when generating CDI specification.
|
||||
* Remove `fedora35` packaging targets
|
||||
* [libnvidia-container] Include all `gsp*.bin` firmware files if present
|
||||
* [toolkit-container] Install `nvidia-ctk` from toolkit container
|
||||
* [toolkit-container] Use installed `nvidia-ctk` path in NVIDIA Container Toolkit config
|
||||
* [toolkit-container] Bump CUDA base images to 12.1.0
|
||||
|
||||
## v1.12.0
|
||||
|
||||
* Promote `v1.12.0-rc.5` to `v1.12.0`
|
||||
* Rename `nvidia cdi generate` `--root` flag to `--driver-root` to better indicate intent
|
||||
* [libnvidia-container] Add nvcubins.bin to DriverStore components under WSL2
|
||||
* [toolkit-container] Bump CUDA base images to 12.0.1
|
||||
|
||||
## v1.12.0-rc.5
|
||||
|
||||
* Fix bug here the `nvidia-ctk` path was not properly resolved. This causes failures to run containers when the runtime is configured in `csv` mode or if the `NVIDIA_DRIVER_CAPABILITIES` includes `graphics` or `display` (e.g. `all`).
|
||||
|
||||
## v1.12.0-rc.4
|
||||
|
||||
* Generate a minimum CDI spec version for improved compatibility.
|
||||
* Add `--device-name-strategy` options to the `nvidia-ctk cdi generate` command that can be used to control how device names are constructed.
|
||||
* Set default for CDI device name generation to `index` to generate device names such as `nvidia.com/gpu=0` or `nvidia.com/gpu=1:0` by default.
|
||||
|
||||
## v1.12.0-rc.3
|
||||
|
||||
* Don't fail if by-path symlinks for DRM devices do not exist
|
||||
* Replace the --json flag with a --format [json|yaml] flag for the nvidia-ctk cdi generate command
|
||||
* Ensure that the CDI output folder is created if required
|
||||
* When generating a CDI specification use a blank host path for devices to ensure compatibility with the v0.4.0 CDI specification
|
||||
* Add injection of Wayland JSON files
|
||||
* Add GSP firmware paths to generated CDI specification
|
||||
* Add --root flag to nvidia-ctk cdi generate command
|
||||
|
||||
## v1.12.0-rc.2
|
||||
|
||||
* Inject Direct Rendering Manager (DRM) devices into a container using the NVIDIA Container Runtime
|
||||
|
||||
2
Makefile
2
Makefile
@@ -61,7 +61,7 @@ cmd-%: COMMAND_BUILD_OPTIONS = -o $(PREFIX)/$(*)
|
||||
endif
|
||||
cmds: $(CMD_TARGETS)
|
||||
$(CMD_TARGETS): cmd-%:
|
||||
GOOS=$(GOOS) go build -ldflags "-s -w -X $(CLI_VERSION_PACKAGE).gitCommit=$(GIT_COMMIT) -X $(CLI_VERSION_PACKAGE).version=$(CLI_VERSION)" $(COMMAND_BUILD_OPTIONS) $(MODULE)/cmd/$(*)
|
||||
GOOS=$(GOOS) go build -ldflags "-extldflags=-Wl,-z,lazy -s -w -X $(CLI_VERSION_PACKAGE).gitCommit=$(GIT_COMMIT) -X $(CLI_VERSION_PACKAGE).version=$(CLI_VERSION)" $(COMMAND_BUILD_OPTIONS) $(MODULE)/cmd/$(*)
|
||||
|
||||
build:
|
||||
GOOS=$(GOOS) go build ./...
|
||||
|
||||
@@ -15,17 +15,26 @@
|
||||
ARG BASE_DIST
|
||||
ARG CUDA_VERSION
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
ARG VERSION="N/A"
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-base-${BASE_DIST}
|
||||
|
||||
ENV NVIDIA_CONTAINER_TOOLKIT_VERSION="${VERSION}"
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
COPY ${ARTIFACTS_ROOT} /artifacts/packages/
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
# build-args are added to the manifest.txt file below.
|
||||
ARG BASE_DIST
|
||||
ARG PACKAGE_DIST
|
||||
ARG PACKAGE_VERSION
|
||||
ARG GIT_BRANCH
|
||||
ARG GIT_COMMIT
|
||||
ARG SOURCE_DATE_EPOCH
|
||||
ARG VERSION
|
||||
|
||||
# Create a manifest.txt file with the absolute paths of all deb and rpm packages in the container
|
||||
RUN find /artifacts/packages -iname '*.deb' -o -iname '*.rpm' > /artifacts/manifest.txt
|
||||
RUN echo "#IMAGE_EPOCH=$(date '+%s')" > /artifacts/manifest.txt && \
|
||||
env | sed 's/^/#/g' >> /artifacts/manifest.txt && \
|
||||
find /artifacts/packages -iname '*.deb' -o -iname '*.rpm' >> /artifacts/manifest.txt
|
||||
|
||||
RUN mkdir /licenses && mv /NGC-DL-CONTAINER-LICENSE /licenses/NGC-DL-CONTAINER-LICENSE
|
||||
|
||||
@@ -44,7 +44,7 @@ OUT_IMAGE = $(OUT_IMAGE_NAME):$(OUT_IMAGE_TAG)
|
||||
|
||||
##### Public rules #####
|
||||
DEFAULT_PUSH_TARGET := ubuntu20.04
|
||||
DISTRIBUTIONS := ubuntu20.04 ubuntu18.04 ubi8 centos7
|
||||
DISTRIBUTIONS := ubuntu20.04 ubi8 centos7
|
||||
|
||||
META_TARGETS := packaging
|
||||
|
||||
@@ -94,6 +94,9 @@ $(BUILD_TARGETS): build-%: $(ARTIFACTS_ROOT)
|
||||
--build-arg PACKAGE_DIST="$(PACKAGE_DIST)" \
|
||||
--build-arg PACKAGE_VERSION="$(PACKAGE_VERSION)" \
|
||||
--build-arg VERSION="$(VERSION)" \
|
||||
--build-arg GIT_COMMIT="$(GIT_COMMIT)" \
|
||||
--build-arg GIT_BRANCH="$(GIT_BRANCH)" \
|
||||
--build-arg SOURCE_DATE_EPOCH="$(SOURCE_DATE_EPOCH)" \
|
||||
--build-arg CVE_UPDATES="$(CVE_UPDATES)" \
|
||||
-f $(DOCKERFILE) \
|
||||
$(CURDIR)
|
||||
@@ -143,10 +146,7 @@ test-packaging:
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos8/ppc64le" || echo "Missing centos8/ppc64le"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos8/x86_64" || echo "Missing centos8/x86_64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/debian10/amd64" || echo "Missing debian10/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/debian9/amd64" || echo "Missing debian9/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/opensuse-leap15.1/x86_64" || echo "Missing opensuse-leap15.1/x86_64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu16.04/amd64" || echo "Missing ubuntu16.04/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu16.04/ppc64le" || echo "Missing ubuntu16.04/ppc64le"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu18.04/amd64" || echo "Missing ubuntu18.04/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu18.04/arm64" || echo "Missing ubuntu18.04/arm64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu18.04/ppc64le" || echo "Missing ubuntu18.04/ppc64le"
|
||||
|
||||
63
cmd/nvidia-ctk/cdi/generate/common.go
Normal file
63
cmd/nvidia-ctk/cdi/generate/common.go
Normal file
@@ -0,0 +1,63 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml"
|
||||
)
|
||||
|
||||
// NewCommonDiscoverer returns a discoverer for entities that are not associated with a specific CDI device.
|
||||
// This includes driver libraries and meta devices, for example.
|
||||
func NewCommonDiscoverer(logger *logrus.Logger, driverRoot string, nvidiaCTKPath string, nvmllib nvml.Interface) (discover.Discover, error) {
|
||||
metaDevices := discover.NewDeviceDiscoverer(
|
||||
logger,
|
||||
lookup.NewCharDeviceLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(driverRoot),
|
||||
),
|
||||
driverRoot,
|
||||
[]string{
|
||||
"/dev/nvidia-modeset",
|
||||
"/dev/nvidia-uvm-tools",
|
||||
"/dev/nvidia-uvm",
|
||||
"/dev/nvidiactl",
|
||||
},
|
||||
)
|
||||
|
||||
graphicsMounts, err := discover.NewGraphicsMountsDiscoverer(logger, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error constructing discoverer for graphics mounts: %v", err)
|
||||
}
|
||||
|
||||
driverFiles, err := NewDriverDiscoverer(logger, driverRoot, nvidiaCTKPath, nvmllib)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create discoverer for driver files: %v", err)
|
||||
}
|
||||
|
||||
d := discover.Merge(
|
||||
metaDevices,
|
||||
graphicsMounts,
|
||||
driverFiles,
|
||||
)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
105
cmd/nvidia-ctk/cdi/generate/device-folder-permissions.go
Normal file
105
cmd/nvidia-ctk/cdi/generate/device-folder-permissions.go
Normal file
@@ -0,0 +1,105 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type deviceFolderPermissions struct {
|
||||
logger *logrus.Logger
|
||||
driverRoot string
|
||||
nvidiaCTKPath string
|
||||
folders []string
|
||||
}
|
||||
|
||||
var _ discover.Discover = (*deviceFolderPermissions)(nil)
|
||||
|
||||
// NewDeviceFolderPermissionHookDiscoverer creates a discoverer that can be used to update the permissions for the parent folders of nested device nodes from the specified set of device specs.
|
||||
// This works around an issue with rootless podman when using crun as a low-level runtime.
|
||||
// See https://github.com/containers/crun/issues/1047
|
||||
// The nested devices that are applicable to the NVIDIA GPU devices are:
|
||||
// - DRM devices at /dev/dri/*
|
||||
// - NVIDIA Caps devices at /dev/nvidia-caps/*
|
||||
func NewDeviceFolderPermissionHookDiscoverer(logger *logrus.Logger, driverRoot string, nvidiaCTKPath string, deviceSpecs []specs.Device) (discover.Discover, error) {
|
||||
var folders []string
|
||||
seen := make(map[string]bool)
|
||||
for _, device := range deviceSpecs {
|
||||
for _, dn := range device.ContainerEdits.DeviceNodes {
|
||||
df := filepath.Dir(dn.Path)
|
||||
if seen[df] {
|
||||
continue
|
||||
}
|
||||
// We only consider the special case paths
|
||||
if df != "/dev/dri" && df != "/dev/nvidia-caps" {
|
||||
continue
|
||||
}
|
||||
folders = append(folders, df)
|
||||
seen[df] = true
|
||||
}
|
||||
if len(folders) == 2 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(folders) == 0 {
|
||||
return discover.None{}, nil
|
||||
}
|
||||
|
||||
d := &deviceFolderPermissions{
|
||||
logger: logger,
|
||||
driverRoot: driverRoot,
|
||||
nvidiaCTKPath: nvidiaCTKPath,
|
||||
folders: folders,
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Devices are empty for this discoverer
|
||||
func (d *deviceFolderPermissions) Devices() ([]discover.Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Hooks returns a set of hooks that sets the file mode to 755 of parent folders for nested device nodes.
|
||||
func (d *deviceFolderPermissions) Hooks() ([]discover.Hook, error) {
|
||||
if len(d.folders) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
args := []string{"--mode", "755"}
|
||||
for _, folder := range d.folders {
|
||||
args = append(args, "--path", folder)
|
||||
}
|
||||
|
||||
hook := discover.CreateNvidiaCTKHook(
|
||||
d.nvidiaCTKPath,
|
||||
"chmod",
|
||||
args...,
|
||||
)
|
||||
|
||||
return []discover.Hook{hook}, nil
|
||||
}
|
||||
|
||||
// Mounts are empty for this discoverer
|
||||
func (d *deviceFolderPermissions) Mounts() ([]discover.Mount, error) {
|
||||
return nil, nil
|
||||
}
|
||||
156
cmd/nvidia-ctk/cdi/generate/driver.go
Normal file
156
cmd/nvidia-ctk/cdi/generate/driver.go
Normal file
@@ -0,0 +1,156 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml"
|
||||
)
|
||||
|
||||
// NewDriverDiscoverer creates a discoverer for the libraries and binaries associated with a driver installation.
|
||||
// The supplied NVML Library is used to query the expected driver version.
|
||||
func NewDriverDiscoverer(logger *logrus.Logger, driverRoot string, nvidiaCTKPath string, nvmllib nvml.Interface) (discover.Discover, error) {
|
||||
version, r := nvmllib.SystemGetDriverVersion()
|
||||
if r != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("failed to determine driver version: %v", r)
|
||||
}
|
||||
|
||||
libraries, err := NewDriverLibraryDiscoverer(logger, driverRoot, nvidiaCTKPath, version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create discoverer for driver libraries: %v", err)
|
||||
}
|
||||
|
||||
firmwares := NewDriverFirmwareDiscoverer(logger, driverRoot, version)
|
||||
|
||||
binaries := NewDriverBinariesDiscoverer(logger, driverRoot)
|
||||
|
||||
d := discover.Merge(
|
||||
libraries,
|
||||
firmwares,
|
||||
binaries,
|
||||
)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// NewDriverLibraryDiscoverer creates a discoverer for the libraries associated with the specified driver version.
|
||||
func NewDriverLibraryDiscoverer(logger *logrus.Logger, driverRoot string, nvidiaCTKPath string, version string) (discover.Discover, error) {
|
||||
libraryPaths, err := getVersionLibs(logger, driverRoot, version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get libraries for driver version: %v", err)
|
||||
}
|
||||
|
||||
libraries := discover.NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(driverRoot),
|
||||
),
|
||||
driverRoot,
|
||||
libraryPaths,
|
||||
)
|
||||
|
||||
cfg := &discover.Config{
|
||||
DriverRoot: driverRoot,
|
||||
NvidiaCTKPath: nvidiaCTKPath,
|
||||
}
|
||||
hooks, _ := discover.NewLDCacheUpdateHook(logger, libraries, cfg)
|
||||
|
||||
d := discover.Merge(
|
||||
libraries,
|
||||
hooks,
|
||||
)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// NewDriverFirmwareDiscoverer creates a discoverer for GSP firmware associated with the specified driver version.
|
||||
func NewDriverFirmwareDiscoverer(logger *logrus.Logger, driverRoot string, version string) discover.Discover {
|
||||
gspFirmwarePath := filepath.Join("/lib/firmware/nvidia", version, "gsp*.bin")
|
||||
return discover.NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(driverRoot),
|
||||
),
|
||||
driverRoot,
|
||||
[]string{gspFirmwarePath},
|
||||
)
|
||||
}
|
||||
|
||||
// NewDriverBinariesDiscoverer creates a discoverer for GSP firmware associated with the GPU driver.
|
||||
func NewDriverBinariesDiscoverer(logger *logrus.Logger, driverRoot string) discover.Discover {
|
||||
return discover.NewMounts(
|
||||
logger,
|
||||
lookup.NewExecutableLocator(logger, driverRoot),
|
||||
driverRoot,
|
||||
[]string{
|
||||
"nvidia-smi", /* System management interface */
|
||||
"nvidia-debugdump", /* GPU coredump utility */
|
||||
"nvidia-persistenced", /* Persistence mode utility */
|
||||
"nvidia-cuda-mps-control", /* Multi process service CLI */
|
||||
"nvidia-cuda-mps-server", /* Multi process service server */
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// getVersionLibs checks the LDCache for libraries ending in the specified driver version.
|
||||
// Although the ldcache at the specified driverRoot is queried, the paths are returned relative to this driverRoot.
|
||||
// This allows the standard mount location logic to be used for resolving the mounts.
|
||||
func getVersionLibs(logger *logrus.Logger, driverRoot string, version string) ([]string, error) {
|
||||
logger.Infof("Using driver version %v", version)
|
||||
|
||||
cache, err := ldcache.New(logger, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load ldcache: %v", err)
|
||||
}
|
||||
|
||||
libs32, libs64 := cache.List()
|
||||
|
||||
var libs []string
|
||||
for _, l := range libs64 {
|
||||
if strings.HasSuffix(l, version) {
|
||||
logger.Infof("found 64-bit driver lib: %v", l)
|
||||
libs = append(libs, l)
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range libs32 {
|
||||
if strings.HasSuffix(l, version) {
|
||||
logger.Infof("found 32-bit driver lib: %v", l)
|
||||
libs = append(libs, l)
|
||||
}
|
||||
}
|
||||
|
||||
if driverRoot == "/" || driverRoot == "" {
|
||||
return libs, nil
|
||||
}
|
||||
|
||||
var relative []string
|
||||
for _, l := range libs {
|
||||
relative = append(relative, strings.TrimPrefix(l, driverRoot))
|
||||
}
|
||||
|
||||
return relative, nil
|
||||
}
|
||||
160
cmd/nvidia-ctk/cdi/generate/full-gpu.go
Normal file
160
cmd/nvidia-ctk/cdi/generate/full-gpu.go
Normal file
@@ -0,0 +1,160 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info/drm"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvlib/device"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml"
|
||||
)
|
||||
|
||||
// byPathHookDiscoverer discovers the entities required for injecting by-path DRM device links
|
||||
type byPathHookDiscoverer struct {
|
||||
logger *logrus.Logger
|
||||
driverRoot string
|
||||
nvidiaCTKPath string
|
||||
pciBusID string
|
||||
}
|
||||
|
||||
var _ discover.Discover = (*byPathHookDiscoverer)(nil)
|
||||
|
||||
// NewFullGPUDiscoverer creates a discoverer for the full GPU defined by the specified device.
|
||||
func NewFullGPUDiscoverer(logger *logrus.Logger, driverRoot string, nvidiaCTKPath string, d device.Device) (discover.Discover, error) {
|
||||
// TODO: The functionality to get device paths should be integrated into the go-nvlib/pkg/device.Device interface.
|
||||
// This will allow reuse here and in other code where the paths are queried such as the NVIDIA device plugin.
|
||||
minor, ret := d.GetMinorNumber()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting GPU device minor number: %v", ret)
|
||||
}
|
||||
path := fmt.Sprintf("/dev/nvidia%d", minor)
|
||||
|
||||
pciInfo, ret := d.GetPciInfo()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting PCI info for device: %v", ret)
|
||||
}
|
||||
pciBusID := getBusID(pciInfo)
|
||||
|
||||
drmDeviceNodes, err := drm.GetDeviceNodesByBusID(pciBusID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine DRM devices for %v: %v", pciBusID, err)
|
||||
}
|
||||
|
||||
deviceNodePaths := append([]string{path}, drmDeviceNodes...)
|
||||
|
||||
deviceNodes := discover.NewCharDeviceDiscoverer(
|
||||
logger,
|
||||
deviceNodePaths,
|
||||
driverRoot,
|
||||
)
|
||||
|
||||
byPathHooks := &byPathHookDiscoverer{
|
||||
logger: logger,
|
||||
driverRoot: driverRoot,
|
||||
nvidiaCTKPath: nvidiaCTKPath,
|
||||
pciBusID: pciBusID,
|
||||
}
|
||||
|
||||
dd := discover.Merge(
|
||||
deviceNodes,
|
||||
byPathHooks,
|
||||
)
|
||||
|
||||
return dd, nil
|
||||
}
|
||||
|
||||
// Devices returns the empty list for the by-path hook discoverer
|
||||
func (d *byPathHookDiscoverer) Devices() ([]discover.Device, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Hooks returns the hooks for the GPU device.
|
||||
// The following hooks are detected:
|
||||
// 1. A hook to create /dev/dri/by-path symlinks
|
||||
func (d *byPathHookDiscoverer) Hooks() ([]discover.Hook, error) {
|
||||
links, err := d.deviceNodeLinks()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover DRA device links: %v", err)
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var args []string
|
||||
for _, l := range links {
|
||||
args = append(args, "--link", l)
|
||||
}
|
||||
|
||||
hook := discover.CreateNvidiaCTKHook(
|
||||
d.nvidiaCTKPath,
|
||||
"create-symlinks",
|
||||
args...,
|
||||
)
|
||||
|
||||
return []discover.Hook{hook}, nil
|
||||
}
|
||||
|
||||
// Mounts returns an empty slice for a full GPU
|
||||
func (d *byPathHookDiscoverer) Mounts() ([]discover.Mount, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *byPathHookDiscoverer) deviceNodeLinks() ([]string, error) {
|
||||
candidates := []string{
|
||||
fmt.Sprintf("/dev/dri/by-path/pci-%s-card", d.pciBusID),
|
||||
fmt.Sprintf("/dev/dri/by-path/pci-%s-render", d.pciBusID),
|
||||
}
|
||||
|
||||
var links []string
|
||||
for _, c := range candidates {
|
||||
linkPath := filepath.Join(d.driverRoot, c)
|
||||
device, err := os.Readlink(linkPath)
|
||||
if err != nil {
|
||||
d.logger.Warningf("Failed to evaluate symlink %v; ignoring", linkPath)
|
||||
continue
|
||||
}
|
||||
|
||||
d.logger.Debugf("adding device symlink %v -> %v", linkPath, device)
|
||||
links = append(links, fmt.Sprintf("%v::%v", device, linkPath))
|
||||
}
|
||||
|
||||
return links, nil
|
||||
}
|
||||
|
||||
// getBusID provides a utility function that returns the string representation of the bus ID.
|
||||
func getBusID(p nvml.PciInfo) string {
|
||||
var bytes []byte
|
||||
for _, b := range p.BusId {
|
||||
if byte(b) == '\x00' {
|
||||
break
|
||||
}
|
||||
bytes = append(bytes, byte(b))
|
||||
}
|
||||
id := strings.ToLower(string(bytes))
|
||||
|
||||
if id != "0000" {
|
||||
id = strings.TrimPrefix(id, "0000")
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
@@ -23,10 +23,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/config/image"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/ldcache"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/edits"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
specs "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -37,8 +35,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
nvidiaCTKExecutable = "nvidia-ctk"
|
||||
nvidiaCTKDefaultFilePath = "/usr/bin/" + nvidiaCTKExecutable
|
||||
formatJSON = "json"
|
||||
formatYAML = "yaml"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
@@ -46,8 +44,11 @@ type command struct {
|
||||
}
|
||||
|
||||
type config struct {
|
||||
output string
|
||||
jsonMode bool
|
||||
output string
|
||||
format string
|
||||
deviceNameStrategy string
|
||||
driverRoot string
|
||||
nvidiaCTKPath string
|
||||
}
|
||||
|
||||
// NewCommand constructs a generate-cdi command with the specified logger
|
||||
@@ -66,6 +67,9 @@ func (m command) build() *cli.Command {
|
||||
c := cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "Generate CDI specifications for use with CDI-enabled runtimes",
|
||||
Before: func(c *cli.Context) error {
|
||||
return m.validateFlags(c, &cfg)
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
return m.run(c, &cfg)
|
||||
},
|
||||
@@ -74,29 +78,78 @@ func (m command) build() *cli.Command {
|
||||
c.Flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "output",
|
||||
Usage: "Specify the file to output the generated CDI specification to. If this is '-' or '' the specification is output to STDOUT",
|
||||
Usage: "Specify the file to output the generated CDI specification to. If this is '' the specification is output to STDOUT",
|
||||
Destination: &cfg.output,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "Output the generated CDI spec in JSON mode instead of YAML",
|
||||
Destination: &cfg.jsonMode,
|
||||
&cli.StringFlag{
|
||||
Name: "format",
|
||||
Usage: "The output format for the generated spec [json | yaml]. This overrides the format defined by the output file extension (if specified).",
|
||||
Value: formatYAML,
|
||||
Destination: &cfg.format,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "device-name-strategy",
|
||||
Usage: "Specify the strategy for generating device names. One of [index | uuid | type-index]",
|
||||
Value: deviceNameStrategyIndex,
|
||||
Destination: &cfg.deviceNameStrategy,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "driver-root",
|
||||
Usage: "Specify the NVIDIA GPU driver root to use when discovering the entities that should be included in the CDI specification.",
|
||||
Destination: &cfg.driverRoot,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "nvidia-ctk-path",
|
||||
Usage: "Specify the path to use for the nvidia-ctk in the generated CDI specification. If this is left empty, the path will be searched.",
|
||||
Destination: &cfg.nvidiaCTKPath,
|
||||
},
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func (m command) validateFlags(r *cli.Context, cfg *config) error {
|
||||
cfg.format = strings.ToLower(cfg.format)
|
||||
switch cfg.format {
|
||||
case formatJSON:
|
||||
case formatYAML:
|
||||
default:
|
||||
return fmt.Errorf("invalid output format: %v", cfg.format)
|
||||
}
|
||||
|
||||
_, err := newDeviceNamer(cfg.deviceNameStrategy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.nvidiaCTKPath = discover.FindNvidiaCTK(m.logger, cfg.nvidiaCTKPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m command) run(c *cli.Context, cfg *config) error {
|
||||
spec, err := m.generateSpec()
|
||||
deviceNamer, err := newDeviceNamer(cfg.deviceNameStrategy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create device namer: %v", err)
|
||||
}
|
||||
|
||||
spec, err := m.generateSpec(
|
||||
cfg.driverRoot,
|
||||
discover.FindNvidiaCTK(m.logger, cfg.nvidiaCTKPath),
|
||||
deviceNamer,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate CDI spec: %v", err)
|
||||
}
|
||||
|
||||
var outputTo io.Writer
|
||||
if cfg.output == "" || cfg.output == "-" {
|
||||
if cfg.output == "" {
|
||||
outputTo = os.Stdout
|
||||
} else {
|
||||
err := createParentDirsIfRequired(cfg.output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create parent folders for output file: %v", err)
|
||||
}
|
||||
outputFile, err := os.Create(cfg.output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %v", err)
|
||||
@@ -105,10 +158,13 @@ func (m command) run(c *cli.Context, cfg *config) error {
|
||||
outputTo = outputFile
|
||||
}
|
||||
|
||||
if filepath.Ext(cfg.output) == ".json" {
|
||||
cfg.jsonMode = true
|
||||
} else if filepath.Ext(cfg.output) == ".yaml" || filepath.Ext(cfg.output) == ".yml" {
|
||||
cfg.jsonMode = false
|
||||
if outputFileFormat := formatFromFilename(cfg.output); outputFileFormat != "" {
|
||||
m.logger.Debugf("Inferred output format as %q from output file name", outputFileFormat)
|
||||
if !c.IsSet("format") {
|
||||
cfg.format = outputFileFormat
|
||||
} else if outputFileFormat != cfg.format {
|
||||
m.logger.Warningf("Requested output format %q does not match format implied by output file name: %q", cfg.format, outputFileFormat)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := yaml.Marshal(spec)
|
||||
@@ -116,14 +172,14 @@ func (m command) run(c *cli.Context, cfg *config) error {
|
||||
return fmt.Errorf("failed to marshal CDI spec: %v", err)
|
||||
}
|
||||
|
||||
if cfg.jsonMode {
|
||||
if strings.ToLower(cfg.format) == formatJSON {
|
||||
data, err = yaml.YAMLToJSONStrict(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert CDI spec from YAML to JSON: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = writeToOutput(cfg.jsonMode, data, outputTo)
|
||||
err = writeToOutput(cfg.format, data, outputTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write output: %v", err)
|
||||
}
|
||||
@@ -131,13 +187,26 @@ func (m command) run(c *cli.Context, cfg *config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToOutput(jsonMode bool, data []byte, output io.Writer) error {
|
||||
if !jsonMode {
|
||||
func formatFromFilename(filename string) string {
|
||||
ext := filepath.Ext(filename)
|
||||
switch strings.ToLower(ext) {
|
||||
case ".json":
|
||||
return formatJSON
|
||||
case ".yaml":
|
||||
return formatYAML
|
||||
case ".yml":
|
||||
return formatYAML
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func writeToOutput(format string, data []byte, output io.Writer) error {
|
||||
if format == formatYAML {
|
||||
_, err := output.Write([]byte("---\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write YAML separator: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
_, err := output.Write(data)
|
||||
if err != nil {
|
||||
@@ -147,7 +216,7 @@ func writeToOutput(jsonMode bool, data []byte, output io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m command) generateSpec() (*specs.Spec, error) {
|
||||
func (m command) generateSpec(driverRoot string, nvidiaCTKPath string, namer deviceNamer) (*specs.Spec, error) {
|
||||
nvmllib := nvml.New()
|
||||
if r := nvmllib.Init(); r != nvml.SUCCESS {
|
||||
return nil, r
|
||||
@@ -156,404 +225,161 @@ func (m command) generateSpec() (*specs.Spec, error) {
|
||||
|
||||
devicelib := device.New(device.WithNvml(nvmllib))
|
||||
|
||||
spec := specs.Spec{
|
||||
Version: "0.4.0",
|
||||
Kind: "nvidia.com/gpu",
|
||||
ContainerEdits: specs.ContainerEdits{},
|
||||
deviceSpecs, err := m.generateDeviceSpecs(devicelib, driverRoot, nvidiaCTKPath, namer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create device CDI specs: %v", err)
|
||||
}
|
||||
|
||||
allDevice := createAllDevice(deviceSpecs)
|
||||
|
||||
deviceSpecs = append(deviceSpecs, allDevice)
|
||||
|
||||
allEdits := edits.NewContainerEdits()
|
||||
|
||||
ipcs, err := NewIPCDiscoverer(m.logger, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create discoverer for IPC sockets: %v", err)
|
||||
}
|
||||
|
||||
ipcEdits, err := edits.FromDiscoverer(ipcs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create container edits for IPC sockets: %v", err)
|
||||
}
|
||||
// TODO: We should not have to update this after the fact
|
||||
for _, s := range ipcEdits.Mounts {
|
||||
s.Options = append(s.Options, "noexec")
|
||||
}
|
||||
|
||||
allEdits.Append(ipcEdits)
|
||||
|
||||
common, err := NewCommonDiscoverer(m.logger, driverRoot, nvidiaCTKPath, nvmllib)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create discoverer for common entities: %v", err)
|
||||
}
|
||||
|
||||
deviceFolderPermissionHooks, err := NewDeviceFolderPermissionHookDiscoverer(m.logger, driverRoot, nvidiaCTKPath, deviceSpecs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generated permission hooks for device nodes: %v", err)
|
||||
}
|
||||
|
||||
commonEdits, err := edits.FromDiscoverer(discover.Merge(common, deviceFolderPermissionHooks))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create container edits for common entities: %v", err)
|
||||
}
|
||||
|
||||
allEdits.Append(commonEdits)
|
||||
|
||||
// We construct the spec and determine the minimum required version based on the specification.
|
||||
spec := specs.Spec{
|
||||
Version: "NOT_SET",
|
||||
Kind: "nvidia.com/gpu",
|
||||
Devices: deviceSpecs,
|
||||
ContainerEdits: *allEdits.ContainerEdits,
|
||||
}
|
||||
|
||||
minVersion, err := cdi.MinimumRequiredVersion(&spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get minumum required CDI spec version: %v", err)
|
||||
}
|
||||
m.logger.Infof("Using minimum required CDI spec version: %s", minVersion)
|
||||
|
||||
spec.Version = minVersion
|
||||
|
||||
return &spec, nil
|
||||
}
|
||||
|
||||
func (m command) generateDeviceSpecs(devicelib device.Interface, driverRoot string, nvidiaCTKPath string, namer deviceNamer) ([]specs.Device, error) {
|
||||
var deviceSpecs []specs.Device
|
||||
|
||||
err := devicelib.VisitDevices(func(i int, d device.Device) error {
|
||||
isMig, err := d.IsMigEnabled()
|
||||
isMigEnabled, err := d.IsMigEnabled()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check whether device is MIG device: %v", err)
|
||||
}
|
||||
if isMig {
|
||||
if isMigEnabled {
|
||||
return nil
|
||||
}
|
||||
device, err := generateEditsForDevice(newGPUDevice(i, d))
|
||||
device, err := NewFullGPUDiscoverer(m.logger, driverRoot, nvidiaCTKPath, d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate CDI spec for device %v: %v", i, err)
|
||||
return fmt.Errorf("failed to create device: %v", err)
|
||||
}
|
||||
|
||||
graphicsEdits, err := m.editsForGraphicsDevice(d)
|
||||
deviceEdits, err := edits.FromDiscoverer(device)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate CDI spec for DRM devices associated with device %v: %v", i, err)
|
||||
return fmt.Errorf("failed to create container edits for device: %v", err)
|
||||
}
|
||||
|
||||
// We add the device nodes and hooks edits for the DRM devices; Mounts are added globally
|
||||
for _, dn := range graphicsEdits.DeviceNodes {
|
||||
device.ContainerEdits.DeviceNodes = append(device.ContainerEdits.DeviceNodes, dn)
|
||||
deviceName, err := namer.GetDeviceName(i, d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get device name: %v", err)
|
||||
}
|
||||
for _, h := range graphicsEdits.Hooks {
|
||||
device.ContainerEdits.Hooks = append(device.ContainerEdits.Hooks, h)
|
||||
deviceSpec := specs.Device{
|
||||
Name: deviceName,
|
||||
ContainerEdits: *deviceEdits.ContainerEdits,
|
||||
}
|
||||
|
||||
spec.Devices = append(spec.Devices, device)
|
||||
deviceSpecs = append(deviceSpecs, deviceSpec)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate CDI spec for GPU devices: %v", err)
|
||||
}
|
||||
|
||||
err = devicelib.VisitMigDevices(func(i int, d device.Device, j int, m device.MigDevice) error {
|
||||
device, err := generateEditsForDevice(newMigDevice(i, j, m))
|
||||
err = devicelib.VisitMigDevices(func(i int, d device.Device, j int, mig device.MigDevice) error {
|
||||
device, err := NewMigDeviceDiscoverer(m.logger, "", d, mig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate CDI spec for device %v: %v", i, err)
|
||||
return fmt.Errorf("failed to create MIG device: %v", err)
|
||||
}
|
||||
|
||||
spec.Devices = append(spec.Devices, device)
|
||||
deviceEdits, err := edits.FromDiscoverer(device)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create container edits for MIG device: %v", err)
|
||||
}
|
||||
|
||||
deviceName, err := namer.GetMigDeviceName(i, j, mig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get device name: %v", err)
|
||||
}
|
||||
deviceSpec := specs.Device{
|
||||
Name: deviceName,
|
||||
ContainerEdits: *deviceEdits.ContainerEdits,
|
||||
}
|
||||
|
||||
deviceSpecs = append(deviceSpecs, deviceSpec)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("falied to generate CDI spec for MIG devices: %v", err)
|
||||
}
|
||||
|
||||
// We create an "all" device with all the discovered device nodes
|
||||
var allDeviceNodes []*specs.DeviceNode
|
||||
for _, d := range spec.Devices {
|
||||
for _, dn := range d.ContainerEdits.DeviceNodes {
|
||||
allDeviceNodes = append(allDeviceNodes, dn)
|
||||
return deviceSpecs, nil
|
||||
}
|
||||
|
||||
// createAllDevice creates an 'all' device which combines the edits from the previous devices
|
||||
func createAllDevice(deviceSpecs []specs.Device) specs.Device {
|
||||
edits := edits.NewContainerEdits()
|
||||
|
||||
for _, d := range deviceSpecs {
|
||||
edit := cdi.ContainerEdits{
|
||||
ContainerEdits: &d.ContainerEdits,
|
||||
}
|
||||
edits.Append(&edit)
|
||||
}
|
||||
|
||||
all := specs.Device{
|
||||
Name: "all",
|
||||
ContainerEdits: specs.ContainerEdits{
|
||||
DeviceNodes: allDeviceNodes,
|
||||
},
|
||||
Name: "all",
|
||||
ContainerEdits: *edits.ContainerEdits,
|
||||
}
|
||||
|
||||
spec.Devices = append(spec.Devices, all)
|
||||
spec.ContainerEdits.DeviceNodes = m.getExistingMetaDeviceNodes()
|
||||
|
||||
libraries, err := m.findLibs(nvmllib)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to locate driver libraries: %v", err)
|
||||
}
|
||||
|
||||
binaries, err := m.findBinaries()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to locate driver binaries: %v", err)
|
||||
}
|
||||
|
||||
ipcs, err := m.findIPC()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to locate driver IPC sockets: %v", err)
|
||||
}
|
||||
|
||||
graphicsEdits, err := m.editsForGraphicsDevice(nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generated edits for graphics libraries")
|
||||
}
|
||||
|
||||
libOptions := []string{
|
||||
"ro",
|
||||
"nosuid",
|
||||
"nodev",
|
||||
"bind",
|
||||
}
|
||||
ipcOptions := append(libOptions, "noexec")
|
||||
|
||||
spec.ContainerEdits.Mounts = append(
|
||||
generateMountsForPaths(libOptions, libraries, binaries),
|
||||
generateMountsForPaths(ipcOptions, ipcs)...,
|
||||
)
|
||||
|
||||
spec.ContainerEdits.Mounts = append(spec.ContainerEdits.Mounts, graphicsEdits.Mounts...)
|
||||
|
||||
ldcacheUpdateHook := m.generateUpdateLdCacheHook(libraries)
|
||||
|
||||
deviceFolderPermissionHooks, err := m.generateDeviceFolderPermissionHooks(ldcacheUpdateHook.Path, allDeviceNodes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generated permission hooks for device nodes: %v", err)
|
||||
}
|
||||
|
||||
spec.ContainerEdits.Hooks = append([]*specs.Hook{ldcacheUpdateHook}, deviceFolderPermissionHooks...)
|
||||
|
||||
return &spec, nil
|
||||
return all
|
||||
}
|
||||
|
||||
func generateEditsForDevice(name string, d deviceInfo) (specs.Device, error) {
|
||||
deviceNodePaths, err := d.GetDeviceNodes()
|
||||
if err != nil {
|
||||
return specs.Device{}, fmt.Errorf("failed to get paths for device: %v", err)
|
||||
// createParentDirsIfRequired creates the parent folders of the specified path if requried.
|
||||
// Note that MkdirAll does not specifically check whether the specified path is non-empty and raises an error if it is.
|
||||
// The path will be empty if filename in the current folder is specified, for example
|
||||
func createParentDirsIfRequired(filename string) error {
|
||||
dir := filepath.Dir(filename)
|
||||
if dir == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
deviceNodes := getDeviceNodesFromPaths(deviceNodePaths)
|
||||
|
||||
device := specs.Device{
|
||||
Name: name,
|
||||
ContainerEdits: specs.ContainerEdits{
|
||||
DeviceNodes: deviceNodes,
|
||||
},
|
||||
}
|
||||
|
||||
return device, nil
|
||||
}
|
||||
|
||||
func (m command) editsForGraphicsDevice(device device.Device) (*specs.ContainerEdits, error) {
|
||||
selectedDevice := image.NewVisibleDevices("none")
|
||||
if device != nil {
|
||||
uuid, ret := device.GetUUID()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting device UUID: %v", ret)
|
||||
}
|
||||
selectedDevice = image.NewVisibleDevices(uuid)
|
||||
}
|
||||
cfg := discover.Config{
|
||||
Root: "",
|
||||
NVIDIAContainerToolkitCLIExecutablePath: "nvidia-ctk",
|
||||
}
|
||||
// Create a discoverer for the single device:
|
||||
d, err := discover.NewGraphicsDiscoverer(m.logger, selectedDevice, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error constructing discoverer: %v", err)
|
||||
}
|
||||
|
||||
devices, err := d.Devices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting DRM devices: %v", err)
|
||||
}
|
||||
|
||||
var deviceNodes []*specs.DeviceNode
|
||||
for _, d := range devices {
|
||||
dn := specs.DeviceNode{
|
||||
Path: d.Path,
|
||||
HostPath: d.HostPath,
|
||||
}
|
||||
deviceNodes = append(deviceNodes, &dn)
|
||||
}
|
||||
|
||||
hooks, err := d.Hooks()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting hooks: %v", err)
|
||||
}
|
||||
|
||||
var cdiHooks []*specs.Hook
|
||||
for _, h := range hooks {
|
||||
cdiHook := specs.Hook{
|
||||
HookName: h.Lifecycle,
|
||||
Path: h.Path,
|
||||
Args: h.Args,
|
||||
}
|
||||
cdiHooks = append(cdiHooks, &cdiHook)
|
||||
}
|
||||
|
||||
mounts, err := d.Mounts()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting mounts: %v", err)
|
||||
}
|
||||
|
||||
var cdiMounts []*specs.Mount
|
||||
for _, m := range mounts {
|
||||
cdiMount := specs.Mount{
|
||||
ContainerPath: m.Path,
|
||||
HostPath: m.HostPath,
|
||||
Options: []string{
|
||||
"ro",
|
||||
"nosuid",
|
||||
"nodev",
|
||||
"bind",
|
||||
},
|
||||
Type: "bind",
|
||||
}
|
||||
cdiMounts = append(cdiMounts, &cdiMount)
|
||||
}
|
||||
|
||||
edits := specs.ContainerEdits{
|
||||
DeviceNodes: deviceNodes,
|
||||
Hooks: cdiHooks,
|
||||
Mounts: cdiMounts,
|
||||
}
|
||||
|
||||
return &edits, nil
|
||||
}
|
||||
|
||||
func (m command) getExistingMetaDeviceNodes() []*specs.DeviceNode {
|
||||
metaDeviceNodePaths := []string{
|
||||
"/dev/nvidia-modeset",
|
||||
"/dev/nvidia-uvm-tools",
|
||||
"/dev/nvidia-uvm",
|
||||
"/dev/nvidiactl",
|
||||
}
|
||||
|
||||
var existingDeviceNodePaths []string
|
||||
for _, p := range metaDeviceNodePaths {
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
m.logger.Infof("Ignoring missing meta device %v", p)
|
||||
continue
|
||||
}
|
||||
existingDeviceNodePaths = append(existingDeviceNodePaths, p)
|
||||
}
|
||||
|
||||
return getDeviceNodesFromPaths(existingDeviceNodePaths)
|
||||
}
|
||||
|
||||
func getDeviceNodesFromPaths(deviceNodePaths []string) []*specs.DeviceNode {
|
||||
var deviceNodes []*specs.DeviceNode
|
||||
for _, p := range deviceNodePaths {
|
||||
deviceNode := specs.DeviceNode{
|
||||
Path: p,
|
||||
}
|
||||
deviceNodes = append(deviceNodes, &deviceNode)
|
||||
}
|
||||
|
||||
return deviceNodes
|
||||
}
|
||||
|
||||
func (m command) findLibs(nvmllib nvml.Interface) ([]string, error) {
|
||||
version, r := nvmllib.SystemGetDriverVersion()
|
||||
if r != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("failed to determine driver version: %v", r)
|
||||
}
|
||||
m.logger.Infof("Using driver version %v", version)
|
||||
|
||||
cache, err := ldcache.New(m.logger, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load ldcache: %v", err)
|
||||
}
|
||||
|
||||
libs32, libs64 := cache.List()
|
||||
|
||||
var libs []string
|
||||
for _, l := range libs64 {
|
||||
if strings.HasSuffix(l, version) {
|
||||
m.logger.Infof("found 64-bit driver lib: %v", l)
|
||||
libs = append(libs, l)
|
||||
}
|
||||
}
|
||||
|
||||
for _, l := range libs32 {
|
||||
if strings.HasSuffix(l, version) {
|
||||
m.logger.Infof("found 32-bit driver lib: %v", l)
|
||||
libs = append(libs, l)
|
||||
}
|
||||
}
|
||||
|
||||
return libs, nil
|
||||
}
|
||||
|
||||
func (m command) findBinaries() ([]string, error) {
|
||||
candidates := []string{
|
||||
"nvidia-smi", /* System management interface */
|
||||
"nvidia-debugdump", /* GPU coredump utility */
|
||||
"nvidia-persistenced", /* Persistence mode utility */
|
||||
"nvidia-cuda-mps-control", /* Multi process service CLI */
|
||||
"nvidia-cuda-mps-server", /* Multi process service server */
|
||||
}
|
||||
|
||||
locator := lookup.NewExecutableLocator(m.logger, "")
|
||||
|
||||
var binaries []string
|
||||
for _, c := range candidates {
|
||||
targets, err := locator.Locate(c)
|
||||
if err != nil {
|
||||
m.logger.Warningf("skipping %v: %v", c, err)
|
||||
continue
|
||||
}
|
||||
|
||||
binaries = append(binaries, targets[0])
|
||||
}
|
||||
return binaries, nil
|
||||
}
|
||||
|
||||
func (m command) findIPC() ([]string, error) {
|
||||
candidates := []string{
|
||||
"/var/run/nvidia-persistenced/socket",
|
||||
"/var/run/nvidia-fabricmanager/socket",
|
||||
// TODO: This can be controlled by the NV_MPS_PIPE_DIR envvar
|
||||
"/tmp/nvidia-mps",
|
||||
}
|
||||
|
||||
locator := lookup.NewFileLocator(m.logger, "")
|
||||
|
||||
var ipcs []string
|
||||
for _, c := range candidates {
|
||||
targets, err := locator.Locate(c)
|
||||
if err != nil {
|
||||
m.logger.Warningf("skipping %v: %v", c, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ipcs = append(ipcs, targets[0])
|
||||
}
|
||||
return ipcs, nil
|
||||
}
|
||||
|
||||
func generateMountsForPaths(options []string, pathSets ...[]string) []*specs.Mount {
|
||||
var mounts []*specs.Mount
|
||||
for _, paths := range pathSets {
|
||||
for _, p := range paths {
|
||||
mount := specs.Mount{
|
||||
HostPath: p,
|
||||
// We may want to adjust the container path
|
||||
ContainerPath: p,
|
||||
Type: "bind",
|
||||
Options: options,
|
||||
}
|
||||
mounts = append(mounts, &mount)
|
||||
}
|
||||
}
|
||||
return mounts
|
||||
}
|
||||
|
||||
func (m command) generateUpdateLdCacheHook(libraries []string) *specs.Hook {
|
||||
locator := lookup.NewExecutableLocator(m.logger, "")
|
||||
|
||||
hook := discover.CreateLDCacheUpdateHook(
|
||||
m.logger,
|
||||
locator,
|
||||
nvidiaCTKExecutable,
|
||||
nvidiaCTKDefaultFilePath,
|
||||
libraries,
|
||||
)
|
||||
return &specs.Hook{
|
||||
HookName: hook.Lifecycle,
|
||||
Path: hook.Path,
|
||||
Args: hook.Args,
|
||||
}
|
||||
}
|
||||
|
||||
func (m command) generateDeviceFolderPermissionHooks(nvidiaCTKPath string, deviceNodes []*specs.DeviceNode) ([]*specs.Hook, error) {
|
||||
var deviceFolders []string
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, dn := range deviceNodes {
|
||||
if !strings.HasPrefix(dn.Path, "/dev") {
|
||||
m.logger.Warningf("Skipping unexpected device folder path for device %v", dn.Path)
|
||||
continue
|
||||
}
|
||||
for df := filepath.Dir(dn.Path); df != "/dev"; df = filepath.Dir(df) {
|
||||
if seen[df] {
|
||||
continue
|
||||
}
|
||||
deviceFolders = append(deviceFolders, df)
|
||||
seen[df] = true
|
||||
}
|
||||
}
|
||||
|
||||
foldersByMode := make(map[string][]string)
|
||||
for _, p := range deviceFolders {
|
||||
info, err := os.Stat(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get info for path %v: %v", p, err)
|
||||
}
|
||||
mode := fmt.Sprintf("%o", info.Mode().Perm())
|
||||
foldersByMode[mode] = append(foldersByMode[mode], p)
|
||||
}
|
||||
|
||||
var hooks []*specs.Hook
|
||||
for mode, folders := range foldersByMode {
|
||||
args := []string{filepath.Base(nvidiaCTKPath), "hook", "chmod", "--mode", mode}
|
||||
for _, folder := range folders {
|
||||
args = append(args, "--path", folder)
|
||||
}
|
||||
hook := specs.Hook{
|
||||
HookName: cdi.CreateContainerHook,
|
||||
Path: nvidiaCTKPath,
|
||||
Args: args,
|
||||
}
|
||||
|
||||
hooks = append(hooks, &hook)
|
||||
}
|
||||
|
||||
return hooks, nil
|
||||
return os.MkdirAll(dir, 0755)
|
||||
}
|
||||
|
||||
42
cmd/nvidia-ctk/cdi/generate/ipc.go
Normal file
42
cmd/nvidia-ctk/cdi/generate/ipc.go
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewIPCDiscoverer creats a discoverer for NVIDIA IPC sockets.
|
||||
func NewIPCDiscoverer(logger *logrus.Logger, driverRoot string) (discover.Discover, error) {
|
||||
d := discover.NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(driverRoot),
|
||||
),
|
||||
driverRoot,
|
||||
[]string{
|
||||
"/var/run/nvidia-persistenced/socket",
|
||||
"/var/run/nvidia-fabricmanager/socket",
|
||||
"/tmp/nvidia-mps",
|
||||
},
|
||||
)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
75
cmd/nvidia-ctk/cdi/generate/mig-device.go
Normal file
75
cmd/nvidia-ctk/cdi/generate/mig-device.go
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/nvcaps"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvlib/device"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml"
|
||||
)
|
||||
|
||||
// NewMigDeviceDiscoverer creates a discoverer for the specified mig device and its parent.
|
||||
func NewMigDeviceDiscoverer(logger *logrus.Logger, driverRoot string, parent device.Device, d device.MigDevice) (discover.Discover, error) {
|
||||
minor, ret := parent.GetMinorNumber()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting GPU device minor number: %v", ret)
|
||||
}
|
||||
parentPath := fmt.Sprintf("/dev/nvidia%d", minor)
|
||||
|
||||
migCaps, err := nvcaps.NewMigCaps()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting MIG capability device paths: %v", err)
|
||||
}
|
||||
|
||||
gi, ret := d.GetGpuInstanceId()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting GPU Instance ID: %v", ret)
|
||||
}
|
||||
|
||||
ci, ret := d.GetComputeInstanceId()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting Compute Instance ID: %v", ret)
|
||||
}
|
||||
|
||||
giCap := nvcaps.NewGPUInstanceCap(minor, gi)
|
||||
giCapDevicePath, err := migCaps.GetCapDevicePath(giCap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get GI cap device path: %v", err)
|
||||
}
|
||||
|
||||
ciCap := nvcaps.NewComputeInstanceCap(minor, gi, ci)
|
||||
ciCapDevicePath, err := migCaps.GetCapDevicePath(ciCap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get CI cap device path: %v", err)
|
||||
}
|
||||
|
||||
deviceNodes := discover.NewCharDeviceDiscoverer(
|
||||
logger,
|
||||
[]string{
|
||||
parentPath,
|
||||
giCapDevicePath,
|
||||
ciCapDevicePath,
|
||||
},
|
||||
driverRoot,
|
||||
)
|
||||
|
||||
return deviceNodes, nil
|
||||
}
|
||||
84
cmd/nvidia-ctk/cdi/generate/namer.go
Normal file
84
cmd/nvidia-ctk/cdi/generate/namer.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvlib/device"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml"
|
||||
)
|
||||
|
||||
type deviceNamer interface {
|
||||
GetDeviceName(int, device.Device) (string, error)
|
||||
GetMigDeviceName(int, int, device.MigDevice) (string, error)
|
||||
}
|
||||
|
||||
const (
|
||||
deviceNameStrategyIndex = "index"
|
||||
deviceNameStrategyTypeIndex = "type-index"
|
||||
deviceNameStrategyUUID = "uuid"
|
||||
)
|
||||
|
||||
type deviceNameIndex struct {
|
||||
gpuPrefix string
|
||||
migPrefix string
|
||||
}
|
||||
type deviceNameUUID struct{}
|
||||
|
||||
// newDeviceNamer creates a Device Namer based on the supplied strategy.
|
||||
// This namer can be used to construct the names for MIG and GPU devices when generating the CDI spec.
|
||||
func newDeviceNamer(strategy string) (deviceNamer, error) {
|
||||
switch strategy {
|
||||
case deviceNameStrategyIndex:
|
||||
return deviceNameIndex{}, nil
|
||||
case deviceNameStrategyTypeIndex:
|
||||
return deviceNameIndex{gpuPrefix: "gpu", migPrefix: "mig"}, nil
|
||||
case deviceNameStrategyUUID:
|
||||
return deviceNameUUID{}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid device name strategy: %v", strategy)
|
||||
}
|
||||
|
||||
// GetDeviceName returns the name for the specified device based on the naming strategy
|
||||
func (s deviceNameIndex) GetDeviceName(i int, d device.Device) (string, error) {
|
||||
return fmt.Sprintf("%s%d", s.gpuPrefix, i), nil
|
||||
}
|
||||
|
||||
// GetMigDeviceName returns the name for the specified device based on the naming strategy
|
||||
func (s deviceNameIndex) GetMigDeviceName(i int, j int, d device.MigDevice) (string, error) {
|
||||
return fmt.Sprintf("%s%d:%d", s.migPrefix, i, j), nil
|
||||
}
|
||||
|
||||
// GetDeviceName returns the name for the specified device based on the naming strategy
|
||||
func (s deviceNameUUID) GetDeviceName(i int, d device.Device) (string, error) {
|
||||
uuid, ret := d.GetUUID()
|
||||
if ret != nvml.SUCCESS {
|
||||
return "", fmt.Errorf("failed to get device UUID: %v", ret)
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// GetMigDeviceName returns the name for the specified device based on the naming strategy
|
||||
func (s deviceNameUUID) GetMigDeviceName(i int, j int, d device.MigDevice) (string, error) {
|
||||
uuid, ret := d.GetUUID()
|
||||
if ret != nvml.SUCCESS {
|
||||
return "", fmt.Errorf("failed to get device UUID: %v", ret)
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY Type, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/nvcaps"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvlib/device"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml"
|
||||
)
|
||||
|
||||
// nvmlDevice wraps an nvml.Device with more functions.
|
||||
type nvmlDevice struct {
|
||||
nvml.Device
|
||||
}
|
||||
|
||||
// nvmlMigDevice allows for specific functions of nvmlDevice to be overridden.
|
||||
type nvmlMigDevice nvmlDevice
|
||||
|
||||
// deviceInfo defines the information the required to construct a Device
|
||||
type deviceInfo interface {
|
||||
GetUUID() (string, error)
|
||||
GetDeviceNodes() ([]string, error)
|
||||
}
|
||||
|
||||
var _ deviceInfo = (*nvmlDevice)(nil)
|
||||
var _ deviceInfo = (*nvmlMigDevice)(nil)
|
||||
|
||||
func newGPUDevice(i int, gpu device.Device) (string, nvmlDevice) {
|
||||
return fmt.Sprintf("gpu%v", i), nvmlDevice{gpu}
|
||||
}
|
||||
|
||||
func newMigDevice(i int, j int, mig device.MigDevice) (string, nvmlMigDevice) {
|
||||
return fmt.Sprintf("mig%v:%v", i, j), nvmlMigDevice{mig}
|
||||
}
|
||||
|
||||
// GetUUID returns the UUID of the device
|
||||
func (d nvmlDevice) GetUUID() (string, error) {
|
||||
uuid, ret := d.Device.GetUUID()
|
||||
if ret != nvml.SUCCESS {
|
||||
return "", ret
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// GetUUID returns the UUID of the device
|
||||
func (d nvmlMigDevice) GetUUID() (string, error) {
|
||||
return nvmlDevice(d).GetUUID()
|
||||
}
|
||||
|
||||
// GetDeviceNodes returns the device node paths for a GPU device
|
||||
func (d nvmlDevice) GetDeviceNodes() ([]string, error) {
|
||||
minor, ret := d.GetMinorNumber()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting GPU device minor number: %v", ret)
|
||||
}
|
||||
path := fmt.Sprintf("/dev/nvidia%d", minor)
|
||||
|
||||
return []string{path}, nil
|
||||
}
|
||||
|
||||
// GetDeviceNodes returns the device node paths for a MIG device
|
||||
func (d nvmlMigDevice) GetDeviceNodes() ([]string, error) {
|
||||
parent, ret := d.GetDeviceHandleFromMigDeviceHandle()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting parent device: %v", ret)
|
||||
}
|
||||
minor, ret := parent.GetMinorNumber()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting GPU device minor number: %v", ret)
|
||||
}
|
||||
parentPath := fmt.Sprintf("/dev/nvidia%d", minor)
|
||||
|
||||
migCaps, err := nvcaps.NewMigCaps()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting MIG capability device paths: %v", err)
|
||||
}
|
||||
|
||||
gi, ret := d.GetGpuInstanceId()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting GPU Instance ID: %v", ret)
|
||||
}
|
||||
|
||||
ci, ret := d.GetComputeInstanceId()
|
||||
if ret != nvml.SUCCESS {
|
||||
return nil, fmt.Errorf("error getting Compute Instance ID: %v", ret)
|
||||
}
|
||||
|
||||
giCap := nvcaps.NewGPUInstanceCap(minor, gi)
|
||||
giCapDevicePath, err := migCaps.GetCapDevicePath(giCap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get GI cap device path: %v", err)
|
||||
}
|
||||
|
||||
ciCap := nvcaps.NewComputeInstanceCap(minor, gi, ci)
|
||||
ciCapDevicePath, err := migCaps.GetCapDevicePath(ciCap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get CI cap device path: %v", err)
|
||||
}
|
||||
|
||||
devicePaths := []string{
|
||||
parentPath,
|
||||
giCapDevicePath,
|
||||
ciCapDevicePath,
|
||||
}
|
||||
|
||||
return devicePaths, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ package chmod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -133,7 +134,12 @@ func (m command) run(c *cli.Context, cfg *config) error {
|
||||
func (m command) getPaths(root string, paths []string) []string {
|
||||
var pathsInRoot []string
|
||||
for _, f := range paths {
|
||||
pathsInRoot = append(pathsInRoot, filepath.Join(root, f))
|
||||
path := filepath.Join(root, f)
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
m.logger.Debugf("Skipping path %q: %v", path, err)
|
||||
continue
|
||||
}
|
||||
pathsInRoot = append(pathsInRoot, path)
|
||||
}
|
||||
|
||||
return pathsInRoot
|
||||
|
||||
@@ -18,6 +18,7 @@ package hook
|
||||
|
||||
import (
|
||||
chmod "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/hook/chmod"
|
||||
|
||||
symlinks "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/hook/create-symlinks"
|
||||
ldcache "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/hook/update-ldcache"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/hook"
|
||||
infoCLI "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/info"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/runtime"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -77,6 +78,7 @@ func main() {
|
||||
runtime.NewCommand(logger),
|
||||
infoCLI.NewCommand(logger),
|
||||
cdi.NewCommand(logger),
|
||||
system.NewCommand(logger),
|
||||
}
|
||||
|
||||
// Run the CLI
|
||||
|
||||
175
cmd/nvidia-ctk/system/create-dev-char-symlinks/all.go
Normal file
175
cmd/nvidia-ctk/system/create-dev-char-symlinks/all.go
Normal file
@@ -0,0 +1,175 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package devchar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc/devices"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/nvcaps"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvpci"
|
||||
)
|
||||
|
||||
type allPossible struct {
|
||||
logger *logrus.Logger
|
||||
driverRoot string
|
||||
deviceMajors devices.Devices
|
||||
migCaps nvcaps.MigCaps
|
||||
}
|
||||
|
||||
// newAllPossible returns a new allPossible device node lister.
|
||||
// This lister lists all possible device nodes for NVIDIA GPUs, control devices, and capability devices.
|
||||
func newAllPossible(logger *logrus.Logger, driverRoot string) (nodeLister, error) {
|
||||
deviceMajors, err := devices.GetNVIDIADevices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed reading device majors: %v", err)
|
||||
}
|
||||
migCaps, err := nvcaps.NewMigCaps()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read MIG caps: %v", err)
|
||||
}
|
||||
if migCaps == nil {
|
||||
migCaps = make(nvcaps.MigCaps)
|
||||
}
|
||||
|
||||
l := allPossible{
|
||||
logger: logger,
|
||||
driverRoot: driverRoot,
|
||||
deviceMajors: deviceMajors,
|
||||
migCaps: migCaps,
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// DeviceNodes returns a list of all possible device nodes for NVIDIA GPUs, control devices, and capability devices.
|
||||
func (m allPossible) DeviceNodes() ([]deviceNode, error) {
|
||||
gpus, err := nvpci.NewFrom(
|
||||
filepath.Join(m.driverRoot, nvpci.PCIDevicesRoot),
|
||||
).GetGPUs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get GPU information: %v", err)
|
||||
}
|
||||
|
||||
count := len(gpus)
|
||||
if count == 0 {
|
||||
m.logger.Infof("No NVIDIA devices found in %s", m.driverRoot)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
deviceNodes, err := m.getControlDeviceNodes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get control device nodes: %v", err)
|
||||
}
|
||||
|
||||
for gpu := 0; gpu < count; gpu++ {
|
||||
deviceNodes = append(deviceNodes, m.getGPUDeviceNodes(gpu)...)
|
||||
deviceNodes = append(deviceNodes, m.getNVCapDeviceNodes(gpu)...)
|
||||
}
|
||||
|
||||
return deviceNodes, nil
|
||||
}
|
||||
|
||||
// getControlDeviceNodes generates a list of control devices
|
||||
func (m allPossible) getControlDeviceNodes() ([]deviceNode, error) {
|
||||
var deviceNodes []deviceNode
|
||||
|
||||
// Define the control devices for standard GPUs.
|
||||
controlDevices := []deviceNode{
|
||||
m.newDeviceNode(devices.NVIDIAGPU, "/dev/nvidia-modeset", devices.NVIDIAModesetMinor),
|
||||
m.newDeviceNode(devices.NVIDIAGPU, "/dev/nvidiactl", devices.NVIDIACTLMinor),
|
||||
m.newDeviceNode(devices.NVIDIAUVM, "/dev/nvidia-uvm", devices.NVIDIAUVMMinor),
|
||||
m.newDeviceNode(devices.NVIDIAUVM, "/dev/nvidia-uvm-tools", devices.NVIDIAUVMToolsMinor),
|
||||
}
|
||||
|
||||
deviceNodes = append(deviceNodes, controlDevices...)
|
||||
|
||||
for _, migControlDevice := range []nvcaps.MigCap{"config", "monitor"} {
|
||||
migControlMinor, exist := m.migCaps[migControlDevice]
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
|
||||
d := m.newDeviceNode(
|
||||
devices.NVIDIACaps,
|
||||
migControlMinor.DevicePath(),
|
||||
int(migControlMinor),
|
||||
)
|
||||
|
||||
deviceNodes = append(deviceNodes, d)
|
||||
}
|
||||
|
||||
return deviceNodes, nil
|
||||
}
|
||||
|
||||
// getGPUDeviceNodes generates a list of device nodes for a given GPU.
|
||||
func (m allPossible) getGPUDeviceNodes(gpu int) []deviceNode {
|
||||
d := m.newDeviceNode(
|
||||
devices.NVIDIAGPU,
|
||||
fmt.Sprintf("/dev/nvidia%d", gpu),
|
||||
gpu,
|
||||
)
|
||||
|
||||
return []deviceNode{d}
|
||||
}
|
||||
|
||||
// getNVCapDeviceNodes generates a list of cap device nodes for a given GPU.
|
||||
func (m allPossible) getNVCapDeviceNodes(gpu int) []deviceNode {
|
||||
var selectedCapMinors []nvcaps.MigMinor
|
||||
for gi := 0; ; gi++ {
|
||||
giCap := nvcaps.NewGPUInstanceCap(gpu, gi)
|
||||
giMinor, exist := m.migCaps[giCap]
|
||||
if !exist {
|
||||
break
|
||||
}
|
||||
selectedCapMinors = append(selectedCapMinors, giMinor)
|
||||
for ci := 0; ; ci++ {
|
||||
ciCap := nvcaps.NewComputeInstanceCap(gpu, gi, ci)
|
||||
ciMinor, exist := m.migCaps[ciCap]
|
||||
if !exist {
|
||||
break
|
||||
}
|
||||
selectedCapMinors = append(selectedCapMinors, ciMinor)
|
||||
}
|
||||
}
|
||||
|
||||
var deviceNodes []deviceNode
|
||||
for _, capMinor := range selectedCapMinors {
|
||||
d := m.newDeviceNode(
|
||||
devices.NVIDIACaps,
|
||||
capMinor.DevicePath(),
|
||||
int(capMinor),
|
||||
)
|
||||
deviceNodes = append(deviceNodes, d)
|
||||
}
|
||||
|
||||
return deviceNodes
|
||||
}
|
||||
|
||||
// newDeviceNode creates a new device node with the specified path and major/minor numbers.
|
||||
// The path is adjusted for the specified driver root.
|
||||
func (m allPossible) newDeviceNode(deviceName devices.Name, path string, minor int) deviceNode {
|
||||
major, _ := m.deviceMajors.Get(deviceName)
|
||||
|
||||
return deviceNode{
|
||||
path: filepath.Join(m.driverRoot, path),
|
||||
major: uint32(major),
|
||||
minor: uint32(minor),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,332 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package devchar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDevCharPath = "/dev/char"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
logger *logrus.Logger
|
||||
}
|
||||
|
||||
type config struct {
|
||||
devCharPath string
|
||||
driverRoot string
|
||||
dryRun bool
|
||||
watch bool
|
||||
createAll bool
|
||||
}
|
||||
|
||||
// NewCommand constructs a command sub-command with the specified logger
|
||||
func NewCommand(logger *logrus.Logger) *cli.Command {
|
||||
c := command{
|
||||
logger: logger,
|
||||
}
|
||||
return c.build()
|
||||
}
|
||||
|
||||
// build
|
||||
func (m command) build() *cli.Command {
|
||||
cfg := config{}
|
||||
|
||||
// Create the 'create-dev-char-symlinks' command
|
||||
c := cli.Command{
|
||||
Name: "create-dev-char-symlinks",
|
||||
Usage: "A utility to create symlinks to possible /dev/nv* devices in /dev/char",
|
||||
Before: func(c *cli.Context) error {
|
||||
return m.validateFlags(c, &cfg)
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
return m.run(c, &cfg)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "dev-char-path",
|
||||
Usage: "The path at which the symlinks will be created. Symlinks will be created as `DEV_CHAR`/MAJOR:MINOR where MAJOR and MINOR are the major and minor numbers of a corresponding device node.",
|
||||
Value: defaultDevCharPath,
|
||||
Destination: &cfg.devCharPath,
|
||||
EnvVars: []string{"DEV_CHAR_PATH"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "driver-root",
|
||||
Usage: "The path to the driver root. `DRIVER_ROOT`/dev is searched for NVIDIA device nodes.",
|
||||
Value: "/",
|
||||
Destination: &cfg.driverRoot,
|
||||
EnvVars: []string{"DRIVER_ROOT"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "watch",
|
||||
Usage: "If set, the command will watch for changes to the driver root and recreate the symlinks when changes are detected.",
|
||||
Value: false,
|
||||
Destination: &cfg.watch,
|
||||
EnvVars: []string{"WATCH"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "create-all",
|
||||
Usage: "Create all possible /dev/char symlinks instead of limiting these to existing device nodes.",
|
||||
Destination: &cfg.createAll,
|
||||
EnvVars: []string{"CREATE_ALL"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "If set, the command will not create any symlinks.",
|
||||
Value: false,
|
||||
Destination: &cfg.dryRun,
|
||||
EnvVars: []string{"DRY_RUN"},
|
||||
},
|
||||
}
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func (m command) validateFlags(r *cli.Context, cfg *config) error {
|
||||
if cfg.createAll && cfg.watch {
|
||||
return fmt.Errorf("create-all and watch are mutually exclusive")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m command) run(c *cli.Context, cfg *config) error {
|
||||
var watcher *fsnotify.Watcher
|
||||
var sigs chan os.Signal
|
||||
|
||||
if cfg.watch {
|
||||
watcher, err := newFSWatcher(filepath.Join(cfg.driverRoot, "dev"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create FS watcher: %v", err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
sigs = newOSWatcher(syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
}
|
||||
|
||||
l, err := NewSymlinkCreator(
|
||||
WithLogger(m.logger),
|
||||
WithDevCharPath(cfg.devCharPath),
|
||||
WithDriverRoot(cfg.driverRoot),
|
||||
WithDryRun(cfg.dryRun),
|
||||
WithCreateAll(cfg.createAll),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create symlink creator: %v", err)
|
||||
}
|
||||
|
||||
create:
|
||||
err = l.CreateLinks()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create links: %v", err)
|
||||
}
|
||||
if !cfg.watch {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
select {
|
||||
|
||||
case event := <-watcher.Events:
|
||||
deviceNode := filepath.Base(event.Name)
|
||||
if !strings.HasPrefix(deviceNode, "nvidia") {
|
||||
continue
|
||||
}
|
||||
if event.Op&fsnotify.Create == fsnotify.Create {
|
||||
m.logger.Infof("%s created, restarting.", event.Name)
|
||||
goto create
|
||||
}
|
||||
if event.Op&fsnotify.Create == fsnotify.Remove {
|
||||
m.logger.Infof("%s removed. Ignoring", event.Name)
|
||||
|
||||
}
|
||||
|
||||
// Watch for any other fs errors and log them.
|
||||
case err := <-watcher.Errors:
|
||||
m.logger.Errorf("inotify: %s", err)
|
||||
|
||||
// React to signals
|
||||
case s := <-sigs:
|
||||
switch s {
|
||||
case syscall.SIGHUP:
|
||||
m.logger.Infof("Received SIGHUP, recreating symlinks.")
|
||||
goto create
|
||||
default:
|
||||
m.logger.Infof("Received signal %q, shutting down.", s)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type linkCreator struct {
|
||||
logger *logrus.Logger
|
||||
lister nodeLister
|
||||
driverRoot string
|
||||
devCharPath string
|
||||
dryRun bool
|
||||
createAll bool
|
||||
}
|
||||
|
||||
// Creator is an interface for creating symlinks to /dev/nv* devices in /dev/char.
|
||||
type Creator interface {
|
||||
CreateLinks() error
|
||||
}
|
||||
|
||||
// Option is a functional option for configuring the linkCreator.
|
||||
type Option func(*linkCreator)
|
||||
|
||||
// NewSymlinkCreator creates a new linkCreator.
|
||||
func NewSymlinkCreator(opts ...Option) (Creator, error) {
|
||||
c := linkCreator{}
|
||||
for _, opt := range opts {
|
||||
opt(&c)
|
||||
}
|
||||
if c.logger == nil {
|
||||
c.logger = logrus.StandardLogger()
|
||||
}
|
||||
if c.driverRoot == "" {
|
||||
c.driverRoot = "/"
|
||||
}
|
||||
if c.devCharPath == "" {
|
||||
c.devCharPath = defaultDevCharPath
|
||||
}
|
||||
|
||||
if c.createAll {
|
||||
lister, err := newAllPossible(c.logger, c.driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create all possible device lister: %v", err)
|
||||
}
|
||||
c.lister = lister
|
||||
} else {
|
||||
c.lister = existing{c.logger, c.driverRoot}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// WithDriverRoot sets the driver root path.
|
||||
func WithDriverRoot(root string) Option {
|
||||
return func(c *linkCreator) {
|
||||
c.driverRoot = root
|
||||
}
|
||||
}
|
||||
|
||||
// WithDevCharPath sets the path at which the symlinks will be created.
|
||||
func WithDevCharPath(path string) Option {
|
||||
return func(c *linkCreator) {
|
||||
c.devCharPath = path
|
||||
}
|
||||
}
|
||||
|
||||
// WithDryRun sets the dry run flag.
|
||||
func WithDryRun(dryRun bool) Option {
|
||||
return func(c *linkCreator) {
|
||||
c.dryRun = dryRun
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger sets the logger.
|
||||
func WithLogger(logger *logrus.Logger) Option {
|
||||
return func(c *linkCreator) {
|
||||
c.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// WithCreateAll sets the createAll flag for the linkCreator.
|
||||
func WithCreateAll(createAll bool) Option {
|
||||
return func(lc *linkCreator) {
|
||||
lc.createAll = createAll
|
||||
}
|
||||
}
|
||||
|
||||
// CreateLinks creates symlinks for all NVIDIA device nodes found in the driver root.
|
||||
func (m linkCreator) CreateLinks() error {
|
||||
deviceNodes, err := m.lister.DeviceNodes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get device nodes: %v", err)
|
||||
}
|
||||
|
||||
if len(deviceNodes) != 0 && !m.dryRun {
|
||||
err := os.MkdirAll(m.devCharPath, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %v", m.devCharPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, deviceNode := range deviceNodes {
|
||||
target := deviceNode.path
|
||||
linkPath := filepath.Join(m.devCharPath, deviceNode.devCharName())
|
||||
|
||||
m.logger.Infof("Creating link %s => %s", linkPath, target)
|
||||
if m.dryRun {
|
||||
continue
|
||||
}
|
||||
|
||||
err = os.Symlink(target, linkPath)
|
||||
if err != nil {
|
||||
m.logger.Warnf("Could not create symlink: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type deviceNode struct {
|
||||
path string
|
||||
major uint32
|
||||
minor uint32
|
||||
}
|
||||
|
||||
func (d deviceNode) devCharName() string {
|
||||
return fmt.Sprintf("%d:%d", d.major, d.minor)
|
||||
}
|
||||
|
||||
func newFSWatcher(files ...string) (*fsnotify.Watcher, error) {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
err = watcher.Add(f)
|
||||
if err != nil {
|
||||
watcher.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return watcher, nil
|
||||
}
|
||||
|
||||
func newOSWatcher(sigs ...os.Signal) chan os.Signal {
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, sigs...)
|
||||
|
||||
return sigChan
|
||||
}
|
||||
95
cmd/nvidia-ctk/system/create-dev-char-symlinks/existing.go
Normal file
95
cmd/nvidia-ctk/system/create-dev-char-symlinks/existing.go
Normal file
@@ -0,0 +1,95 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package devchar
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type nodeLister interface {
|
||||
DeviceNodes() ([]deviceNode, error)
|
||||
}
|
||||
|
||||
type existing struct {
|
||||
logger *logrus.Logger
|
||||
driverRoot string
|
||||
}
|
||||
|
||||
// DeviceNodes returns a list of NVIDIA device nodes in the specified root.
|
||||
// The nvidia-nvswitch* and nvidia-nvlink devices are excluded.
|
||||
func (m existing) DeviceNodes() ([]deviceNode, error) {
|
||||
locator := lookup.NewCharDeviceLocator(
|
||||
lookup.WithLogger(m.logger),
|
||||
lookup.WithRoot(m.driverRoot),
|
||||
lookup.WithOptional(true),
|
||||
)
|
||||
|
||||
devices, err := locator.Locate("/dev/nvidia*")
|
||||
if err != nil {
|
||||
m.logger.Warnf("Error while locating device: %v", err)
|
||||
}
|
||||
|
||||
capDevices, err := locator.Locate("/dev/nvidia-caps/nvidia-*")
|
||||
if err != nil {
|
||||
m.logger.Warnf("Error while locating caps device: %v", err)
|
||||
}
|
||||
|
||||
if len(devices) == 0 && len(capDevices) == 0 {
|
||||
m.logger.Infof("No NVIDIA devices found in %s", m.driverRoot)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var deviceNodes []deviceNode
|
||||
for _, d := range append(devices, capDevices...) {
|
||||
if m.nodeIsBlocked(d) {
|
||||
continue
|
||||
}
|
||||
|
||||
var stat unix.Stat_t
|
||||
err := unix.Stat(d, &stat)
|
||||
if err != nil {
|
||||
m.logger.Warnf("Could not stat device: %v", err)
|
||||
continue
|
||||
}
|
||||
deviceNode := deviceNode{
|
||||
path: d,
|
||||
major: unix.Major(uint64(stat.Rdev)),
|
||||
minor: unix.Minor(uint64(stat.Rdev)),
|
||||
}
|
||||
|
||||
deviceNodes = append(deviceNodes, deviceNode)
|
||||
}
|
||||
|
||||
return deviceNodes, nil
|
||||
}
|
||||
|
||||
// nodeIsBlocked returns true if the specified device node should be ignored.
|
||||
func (m existing) nodeIsBlocked(path string) bool {
|
||||
blockedPrefixes := []string{"nvidia-fs", "nvidia-nvswitch", "nvidia-nvlink"}
|
||||
nodeName := filepath.Base(path)
|
||||
for _, prefix := range blockedPrefixes {
|
||||
if strings.HasPrefix(nodeName, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
49
cmd/nvidia-ctk/system/system.go
Normal file
49
cmd/nvidia-ctk/system/system.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
devchar "github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-ctk/system/create-dev-char-symlinks"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
logger *logrus.Logger
|
||||
}
|
||||
|
||||
// NewCommand constructs a runtime command with the specified logger
|
||||
func NewCommand(logger *logrus.Logger) *cli.Command {
|
||||
c := command{
|
||||
logger: logger,
|
||||
}
|
||||
return c.build()
|
||||
}
|
||||
|
||||
func (m command) build() *cli.Command {
|
||||
// Create the 'system' command
|
||||
system := cli.Command{
|
||||
Name: "system",
|
||||
Usage: "A collection of system-related utilities for the NVIDIA Container Toolkit",
|
||||
}
|
||||
|
||||
system.Subcommands = []*cli.Command{
|
||||
devchar.NewCommand(m.logger),
|
||||
}
|
||||
|
||||
return &system
|
||||
}
|
||||
@@ -14,8 +14,8 @@
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
FROM golang:${GOLANG_VERSION}
|
||||
|
||||
RUN go install golang.org/x/lint/golint@latest
|
||||
RUN go install golang.org/x/lint/golint@6edffad5e6160f5949cdefc81710b2706fbcd4f6
|
||||
RUN go install github.com/matryer/moq@latest
|
||||
RUN go install github.com/gordonklaus/ineffassign@latest
|
||||
RUN go install github.com/gordonklaus/ineffassign@d2c82e48359b033cde9cf1307f6d5550b8d61321
|
||||
RUN go install github.com/client9/misspell/cmd/misspell@latest
|
||||
RUN go install github.com/google/go-licenses@latest
|
||||
|
||||
19
go.mod
19
go.mod
@@ -5,37 +5,32 @@ go 1.18
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.0.0
|
||||
github.com/NVIDIA/go-nvml v0.11.6-0.0.20220823120812-7e2082095e82
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.2
|
||||
github.com/opencontainers/runc v1.1.4
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.4-0.20230111111500-5b3b5d81179a
|
||||
github.com/fsnotify/fsnotify v1.5.4
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb
|
||||
github.com/pelletier/go-toml v1.9.4
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220922133427-1049a7fa76a9
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230119114711-6fe07bb33342
|
||||
golang.org/x/mod v0.5.0
|
||||
golang.org/x/sys v0.0.0-20220927170352-d9d178bc13c6
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
28
go.sum
28
go.sum
@@ -3,13 +3,12 @@ github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/NVIDIA/go-nvml v0.11.6-0.0.20220823120812-7e2082095e82 h1:x751Xx1tdxkiA/sdkv2J769n21UbYKzVOpe9S/h1M3k=
|
||||
github.com/NVIDIA/go-nvml v0.11.6-0.0.20220823120812-7e2082095e82/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.2 h1:Bf/Zq8UBhbSBtB+pFBVIQ2Rh7sNK/x2ZEr6uW5YjNv8=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.2/go.mod h1:ZToWfSyUH5l9Rk7/bjkUUkNLz4b1mE+CVUVafuikDPY=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.4-0.20230111111500-5b3b5d81179a h1:sP3PcgyIkRlHqfF3Jfpe/7G8kf/qpzG4C8r94y9hLbE=
|
||||
github.com/container-orchestrated-devices/container-device-interface v0.5.4-0.20230111111500-5b3b5d81179a/go.mod h1:xMRa4fJgXzSDFUCURSimOUgoSc+odohvO3uXT9xjqH0=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
@@ -46,20 +45,17 @@ github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdx
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg=
|
||||
github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20201121164853-7413a7f753e1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab h1:YQZXa3elcHgKXAa2GjVFC9M3JeP7ZPyFD1YByDx/dgQ=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20211214071223-8958f93039ab/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f h1:MMcsVl0FAVEahmXTy+uXoDTw3yJq7nGrK8ITs/kkreo=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20220110225228-7e2d60f1e41f/go.mod h1:/tgP02fPXGHkU3/qKK1Y0Db4yqNyGm03vLq/mzHzcS4=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb h1:1xSVPOd7/UA+39/hXEGnBJ13p6JFB0E1EvQFlrRDOXI=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
|
||||
github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w=
|
||||
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -90,8 +86,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220922133427-1049a7fa76a9 h1:3C1cK0/n1HGfH0MPCCPRntK8xPNlrVq9OWR4RA72jW0=
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20220922133427-1049a7fa76a9/go.mod h1:GStidGxhaqJhYFW1YpOnLvYCbL2EsM0od7IW4u7+JgU=
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230119114711-6fe07bb33342 h1:083n9fJt2dWOpJd/X/q9Xgl5XtQLL22uSFYbzVqJssg=
|
||||
gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230119114711-6fe07bb33342/go.mod h1:GStidGxhaqJhYFW1YpOnLvYCbL2EsM0od7IW4u7+JgU=
|
||||
golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
@@ -121,7 +117,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
|
||||
@@ -28,7 +28,10 @@ var _ Discover = (*charDevices)(nil)
|
||||
|
||||
// NewCharDeviceDiscoverer creates a discoverer which locates the specified set of device nodes.
|
||||
func NewCharDeviceDiscoverer(logger *logrus.Logger, devices []string, root string) Discover {
|
||||
locator := lookup.NewCharDeviceLocator(logger, root)
|
||||
locator := lookup.NewCharDeviceLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(root),
|
||||
)
|
||||
|
||||
return NewDeviceDiscoverer(logger, locator, root, devices)
|
||||
}
|
||||
|
||||
@@ -27,16 +27,16 @@ import (
|
||||
// NewFromCSVFiles creates a discoverer for the specified CSV files. A logger is also supplied.
|
||||
// The constructed discoverer is comprised of a list, with each element in the list being associated with a
|
||||
// single CSV files.
|
||||
func NewFromCSVFiles(logger *logrus.Logger, files []string, root string) (Discover, error) {
|
||||
func NewFromCSVFiles(logger *logrus.Logger, files []string, driverRoot string) (Discover, error) {
|
||||
if len(files) == 0 {
|
||||
logger.Warnf("No CSV files specified")
|
||||
return None{}, nil
|
||||
}
|
||||
|
||||
symlinkLocator := lookup.NewSymlinkLocator(logger, root)
|
||||
symlinkLocator := lookup.NewSymlinkLocator(logger, driverRoot)
|
||||
locators := map[csv.MountSpecType]lookup.Locator{
|
||||
csv.MountSpecDev: lookup.NewCharDeviceLocator(logger, root),
|
||||
csv.MountSpecDir: lookup.NewDirectoryLocator(logger, root),
|
||||
csv.MountSpecDev: lookup.NewCharDeviceLocator(lookup.WithLogger(logger), lookup.WithRoot(driverRoot)),
|
||||
csv.MountSpecDir: lookup.NewDirectoryLocator(logger, driverRoot),
|
||||
// Libraries and symlinks are handled in the same way
|
||||
csv.MountSpecLib: symlinkLocator,
|
||||
csv.MountSpecSym: symlinkLocator,
|
||||
@@ -52,7 +52,7 @@ func NewFromCSVFiles(logger *logrus.Logger, files []string, root string) (Discov
|
||||
mountSpecs = append(mountSpecs, targets...)
|
||||
}
|
||||
|
||||
return newFromMountSpecs(logger, locators, root, mountSpecs)
|
||||
return newFromMountSpecs(logger, locators, driverRoot, mountSpecs)
|
||||
}
|
||||
|
||||
// loadCSVFile loads the specified CSV file and returns the list of mount specs
|
||||
@@ -71,7 +71,7 @@ func loadCSVFile(logger *logrus.Logger, filename string) ([]*csv.MountSpec, erro
|
||||
|
||||
// newFromMountSpecs creates a discoverer for the CSV file. A logger is also supplied.
|
||||
// A list of csvDiscoverers is returned, with each being associated with a single MountSpecType.
|
||||
func newFromMountSpecs(logger *logrus.Logger, locators map[csv.MountSpecType]lookup.Locator, root string, targets []*csv.MountSpec) (Discover, error) {
|
||||
func newFromMountSpecs(logger *logrus.Logger, locators map[csv.MountSpecType]lookup.Locator, driverRoot string, targets []*csv.MountSpec) (Discover, error) {
|
||||
if len(targets) == 0 {
|
||||
return &None{}, nil
|
||||
}
|
||||
@@ -95,9 +95,9 @@ func newFromMountSpecs(logger *logrus.Logger, locators map[csv.MountSpecType]loo
|
||||
var m Discover
|
||||
switch t {
|
||||
case csv.MountSpecDev:
|
||||
m = NewDeviceDiscoverer(logger, locator, root, candidatesByType[t])
|
||||
m = NewDeviceDiscoverer(logger, locator, driverRoot, candidatesByType[t])
|
||||
default:
|
||||
m = NewMounts(logger, locator, root, candidatesByType[t])
|
||||
m = NewMounts(logger, locator, driverRoot, candidatesByType[t])
|
||||
}
|
||||
discoverers = append(discoverers, m)
|
||||
|
||||
|
||||
@@ -18,8 +18,8 @@ package discover
|
||||
|
||||
// Config represents the configuration options for discovery
|
||||
type Config struct {
|
||||
Root string
|
||||
NVIDIAContainerToolkitCLIExecutablePath string
|
||||
DriverRoot string
|
||||
NvidiaCTKPath string
|
||||
}
|
||||
|
||||
// Device represents a discovered character device.
|
||||
@@ -41,8 +41,9 @@ type Hook struct {
|
||||
Args []string
|
||||
}
|
||||
|
||||
//go:generate moq -stub -out discover_mock.go . Discover
|
||||
// Discover defines an interface for discovering the devices, mounts, and hooks available on a system
|
||||
//
|
||||
//go:generate moq -stub -out discover_mock.go . Discover
|
||||
type Discover interface {
|
||||
Devices() ([]Device, error)
|
||||
Mounts() ([]Mount, error)
|
||||
|
||||
@@ -45,7 +45,10 @@ func NewGDSDiscoverer(logger *logrus.Logger, root string) (Discover, error) {
|
||||
|
||||
cufile := NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(logger, root),
|
||||
lookup.NewFileLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(root),
|
||||
),
|
||||
root,
|
||||
[]string{"/etc/cufile.json"},
|
||||
)
|
||||
|
||||
@@ -25,44 +25,19 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info/drm"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/info/proc"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewGraphicsDiscoverer returns the discoverer for graphics tools such as Vulkan.
|
||||
func NewGraphicsDiscoverer(logger *logrus.Logger, devices image.VisibleDevices, cfg *Config) (Discover, error) {
|
||||
root := cfg.Root
|
||||
driverRoot := cfg.DriverRoot
|
||||
|
||||
locator, err := lookup.NewLibraryLocator(logger, root)
|
||||
mounts, err := NewGraphicsMountsDiscoverer(logger, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct library locator: %v", err)
|
||||
return nil, fmt.Errorf("failed to create mounts discoverer: %v", err)
|
||||
}
|
||||
libraries := NewMounts(
|
||||
logger,
|
||||
locator,
|
||||
root,
|
||||
[]string{
|
||||
"libnvidia-egl-gbm.so",
|
||||
},
|
||||
)
|
||||
|
||||
jsonMounts := NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(logger, root),
|
||||
root,
|
||||
[]string{
|
||||
// TODO: We should handle this more cleanly
|
||||
"/etc/glvnd/egl_vendor.d/10_nvidia.json",
|
||||
"/etc/vulkan/icd.d/nvidia_icd.json",
|
||||
"/etc/vulkan/implicit_layer.d/nvidia_layers.json",
|
||||
"/usr/share/glvnd/egl_vendor.d/10_nvidia.json",
|
||||
"/usr/share/vulkan/icd.d/nvidia_icd.json",
|
||||
"/usr/share/vulkan/implicit_layer.d/nvidia_layers.json",
|
||||
"/usr/share/egl/egl_external_platform.d/15_nvidia_gbm.json",
|
||||
},
|
||||
)
|
||||
|
||||
drmDeviceNodes, err := newDRMDeviceDiscoverer(logger, devices, root)
|
||||
drmDeviceNodes, err := newDRMDeviceDiscoverer(logger, devices, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DRM device discoverer: %v", err)
|
||||
}
|
||||
@@ -71,6 +46,45 @@ func NewGraphicsDiscoverer(logger *logrus.Logger, devices image.VisibleDevices,
|
||||
|
||||
discover := Merge(
|
||||
Merge(drmDeviceNodes, drmByPathSymlinks),
|
||||
mounts,
|
||||
)
|
||||
|
||||
return discover, nil
|
||||
}
|
||||
|
||||
// NewGraphicsMountsDiscoverer creates a discoverer for the mounts required by graphics tools such as vulkan.
|
||||
func NewGraphicsMountsDiscoverer(logger *logrus.Logger, driverRoot string) (Discover, error) {
|
||||
locator, err := lookup.NewLibraryLocator(logger, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct library locator: %v", err)
|
||||
}
|
||||
libraries := NewMounts(
|
||||
logger,
|
||||
locator,
|
||||
driverRoot,
|
||||
[]string{
|
||||
"libnvidia-egl-gbm.so",
|
||||
},
|
||||
)
|
||||
|
||||
jsonMounts := NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(driverRoot),
|
||||
lookup.WithSearchPaths("/etc", "/usr/share"),
|
||||
),
|
||||
driverRoot,
|
||||
[]string{
|
||||
"glvnd/egl_vendor.d/10_nvidia.json",
|
||||
"vulkan/icd.d/nvidia_icd.json",
|
||||
"vulkan/implicit_layer.d/nvidia_layers.json",
|
||||
"egl/egl_external_platform.d/15_nvidia_gbm.json",
|
||||
"egl/egl_external_platform.d/10_nvidia_wayland.json",
|
||||
},
|
||||
)
|
||||
|
||||
discover := Merge(
|
||||
libraries,
|
||||
jsonMounts,
|
||||
)
|
||||
@@ -80,21 +94,19 @@ func NewGraphicsDiscoverer(logger *logrus.Logger, devices image.VisibleDevices,
|
||||
|
||||
type drmDevicesByPath struct {
|
||||
None
|
||||
logger *logrus.Logger
|
||||
lookup lookup.Locator
|
||||
nvidiaCTKExecutablePath string
|
||||
root string
|
||||
devicesFrom Discover
|
||||
logger *logrus.Logger
|
||||
nvidiaCTKPath string
|
||||
driverRoot string
|
||||
devicesFrom Discover
|
||||
}
|
||||
|
||||
// newCreateDRMByPathSymlinks creates a discoverer for a hook to create the by-path symlinks for DRM devices discovered by the specified devices discoverer
|
||||
func newCreateDRMByPathSymlinks(logger *logrus.Logger, devices Discover, cfg *Config) Discover {
|
||||
d := drmDevicesByPath{
|
||||
logger: logger,
|
||||
lookup: lookup.NewExecutableLocator(logger, cfg.Root),
|
||||
nvidiaCTKExecutablePath: cfg.NVIDIAContainerToolkitCLIExecutablePath,
|
||||
root: cfg.Root,
|
||||
devicesFrom: devices,
|
||||
logger: logger,
|
||||
nvidiaCTKPath: FindNvidiaCTK(logger, cfg.NvidiaCTKPath),
|
||||
driverRoot: cfg.DriverRoot,
|
||||
devicesFrom: devices,
|
||||
}
|
||||
|
||||
return &d
|
||||
@@ -109,35 +121,26 @@ func (d drmDevicesByPath) Hooks() ([]Hook, error) {
|
||||
if len(devices) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
hookPath := nvidiaCTKDefaultFilePath
|
||||
targets, err := d.lookup.Locate(d.nvidiaCTKExecutablePath)
|
||||
if err != nil {
|
||||
d.logger.Warnf("Failed to locate %v: %v", d.nvidiaCTKExecutablePath, err)
|
||||
} else if len(targets) == 0 {
|
||||
d.logger.Warnf("%v not found", d.nvidiaCTKExecutablePath)
|
||||
} else {
|
||||
d.logger.Debugf("Found %v candidates: %v", d.nvidiaCTKExecutablePath, targets)
|
||||
hookPath = targets[0]
|
||||
}
|
||||
d.logger.Debugf("Using NVIDIA Container Toolkit CLI path %v", hookPath)
|
||||
|
||||
args := []string{hookPath, "hook", "create-symlinks"}
|
||||
links, err := d.getSpecificLinkArgs(devices)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine specific links: %v", err)
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var args []string
|
||||
for _, l := range links {
|
||||
args = append(args, "--link", l)
|
||||
}
|
||||
|
||||
h := Hook{
|
||||
Lifecycle: cdi.CreateContainerHook,
|
||||
Path: hookPath,
|
||||
Args: args,
|
||||
}
|
||||
hook := CreateNvidiaCTKHook(
|
||||
d.nvidiaCTKPath,
|
||||
"create-symlinks",
|
||||
args...,
|
||||
)
|
||||
|
||||
return []Hook{h}, nil
|
||||
return []Hook{hook}, nil
|
||||
}
|
||||
|
||||
// getSpecificLinkArgs returns the required specic links that need to be created
|
||||
@@ -147,10 +150,14 @@ func (d drmDevicesByPath) getSpecificLinkArgs(devices []Device) ([]string, error
|
||||
selectedDevices[filepath.Base(d.HostPath)] = true
|
||||
}
|
||||
|
||||
linkLocator := lookup.NewFileLocator(d.logger, d.root)
|
||||
linkLocator := lookup.NewFileLocator(
|
||||
lookup.WithLogger(d.logger),
|
||||
lookup.WithRoot(d.driverRoot),
|
||||
)
|
||||
candidates, err := linkLocator.Locate("/dev/dri/by-path/pci-*-*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to locate devices by path: %v", err)
|
||||
d.logger.Warningf("Failed to locate by-path links: %v; ignoring", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var links []string
|
||||
@@ -171,18 +178,21 @@ func (d drmDevicesByPath) getSpecificLinkArgs(devices []Device) ([]string, error
|
||||
}
|
||||
|
||||
// newDRMDeviceDiscoverer creates a discoverer for the DRM devices associated with the requested devices.
|
||||
func newDRMDeviceDiscoverer(logger *logrus.Logger, devices image.VisibleDevices, root string) (Discover, error) {
|
||||
func newDRMDeviceDiscoverer(logger *logrus.Logger, devices image.VisibleDevices, driverRoot string) (Discover, error) {
|
||||
allDevices := NewDeviceDiscoverer(
|
||||
logger,
|
||||
lookup.NewCharDeviceLocator(logger, root),
|
||||
root,
|
||||
lookup.NewCharDeviceLocator(
|
||||
lookup.WithLogger(logger),
|
||||
lookup.WithRoot(driverRoot),
|
||||
),
|
||||
driverRoot,
|
||||
[]string{
|
||||
"/dev/dri/card*",
|
||||
"/dev/dri/renderD*",
|
||||
},
|
||||
)
|
||||
|
||||
filter, err := newDRMDeviceFilter(logger, devices, root)
|
||||
filter, err := newDRMDeviceFilter(logger, devices, driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct DRM device filter: %v", err)
|
||||
}
|
||||
@@ -198,8 +208,8 @@ func newDRMDeviceDiscoverer(logger *logrus.Logger, devices image.VisibleDevices,
|
||||
}
|
||||
|
||||
// newDRMDeviceFilter creates a filter that matches DRM devices nodes for the visible devices.
|
||||
func newDRMDeviceFilter(logger *logrus.Logger, devices image.VisibleDevices, root string) (Filter, error) {
|
||||
gpuInformationPaths, err := proc.GetInformationFilePaths(root)
|
||||
func newDRMDeviceFilter(logger *logrus.Logger, devices image.VisibleDevices, driverRoot string) (Filter, error) {
|
||||
gpuInformationPaths, err := proc.GetInformationFilePaths(driverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read GPU information: %v", err)
|
||||
}
|
||||
|
||||
68
internal/discover/hooks.go
Normal file
68
internal/discover/hooks.go
Normal file
@@ -0,0 +1,68 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package discover
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
nvidiaCTKExecutable = "nvidia-ctk"
|
||||
nvidiaCTKDefaultFilePath = "/usr/bin/nvidia-ctk"
|
||||
)
|
||||
|
||||
// CreateNvidiaCTKHook creates a hook which invokes the NVIDIA Container CLI hook subcommand.
|
||||
func CreateNvidiaCTKHook(executable string, hookName string, additionalArgs ...string) Hook {
|
||||
return Hook{
|
||||
Lifecycle: cdi.CreateContainerHook,
|
||||
Path: executable,
|
||||
Args: append([]string{filepath.Base(executable), "hook", hookName}, additionalArgs...),
|
||||
}
|
||||
}
|
||||
|
||||
// FindNvidiaCTK locates the nvidia-ctk executable to be used in hooks.
|
||||
// If an nvidia-ctk path is specified as an absolute path, it is used directly
|
||||
// without checking for existence of an executable at that path.
|
||||
func FindNvidiaCTK(logger *logrus.Logger, nvidiaCTKPath string) string {
|
||||
if filepath.IsAbs(nvidiaCTKPath) {
|
||||
logger.Debugf("Using specified NVIDIA Container Toolkit CLI path %v", nvidiaCTKPath)
|
||||
return nvidiaCTKPath
|
||||
}
|
||||
|
||||
if nvidiaCTKPath == "" {
|
||||
nvidiaCTKPath = nvidiaCTKExecutable
|
||||
}
|
||||
logger.Debugf("Locating NVIDIA Container Toolkit CLI as %v", nvidiaCTKPath)
|
||||
lookup := lookup.NewExecutableLocator(logger, "")
|
||||
hookPath := nvidiaCTKDefaultFilePath
|
||||
targets, err := lookup.Locate(nvidiaCTKPath)
|
||||
if err != nil {
|
||||
logger.Warnf("Failed to locate %v: %v", nvidiaCTKPath, err)
|
||||
} else if len(targets) == 0 {
|
||||
logger.Warnf("%v not found", nvidiaCTKPath)
|
||||
} else {
|
||||
logger.Debugf("Found %v candidates: %v", nvidiaCTKPath, targets)
|
||||
hookPath = targets[0]
|
||||
}
|
||||
logger.Debugf("Using NVIDIA Container Toolkit CLI path %v", hookPath)
|
||||
|
||||
return hookPath
|
||||
}
|
||||
@@ -21,33 +21,25 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewLDCacheUpdateHook creates a discoverer that updates the ldcache for the specified mounts. A logger can also be specified
|
||||
func NewLDCacheUpdateHook(logger *logrus.Logger, mounts Discover, cfg *Config) (Discover, error) {
|
||||
d := ldconfig{
|
||||
logger: logger,
|
||||
mountsFrom: mounts,
|
||||
lookup: lookup.NewExecutableLocator(logger, cfg.Root),
|
||||
nvidiaCTKExecutablePath: cfg.NVIDIAContainerToolkitCLIExecutablePath,
|
||||
logger: logger,
|
||||
nvidiaCTKPath: FindNvidiaCTK(logger, cfg.NvidiaCTKPath),
|
||||
mountsFrom: mounts,
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
const (
|
||||
nvidiaCTKDefaultFilePath = "/usr/bin/nvidia-ctk"
|
||||
)
|
||||
|
||||
type ldconfig struct {
|
||||
None
|
||||
logger *logrus.Logger
|
||||
mountsFrom Discover
|
||||
lookup lookup.Locator
|
||||
nvidiaCTKExecutablePath string
|
||||
logger *logrus.Logger
|
||||
nvidiaCTKPath string
|
||||
mountsFrom Discover
|
||||
}
|
||||
|
||||
// Hooks checks the required mounts for libraries and returns a hook to update the LDcache for the discovered paths.
|
||||
@@ -57,38 +49,27 @@ func (d ldconfig) Hooks() ([]Hook, error) {
|
||||
return nil, fmt.Errorf("failed to discover mounts for ldcache update: %v", err)
|
||||
}
|
||||
h := CreateLDCacheUpdateHook(
|
||||
d.logger,
|
||||
d.lookup,
|
||||
d.nvidiaCTKExecutablePath,
|
||||
nvidiaCTKDefaultFilePath,
|
||||
d.nvidiaCTKPath,
|
||||
getLibraryPaths(mounts),
|
||||
)
|
||||
return []Hook{h}, nil
|
||||
}
|
||||
|
||||
// CreateLDCacheUpdateHook locates the NVIDIA Container Toolkit CLI and creates a hook for updating the LD Cache
|
||||
func CreateLDCacheUpdateHook(logger *logrus.Logger, lookup lookup.Locator, execuable string, defaultPath string, libraries []string) Hook {
|
||||
hookPath := defaultPath
|
||||
targets, err := lookup.Locate(execuable)
|
||||
if err != nil {
|
||||
logger.Warnf("Failed to locate %v: %v", execuable, err)
|
||||
} else if len(targets) == 0 {
|
||||
logger.Warnf("%v not found", execuable)
|
||||
} else {
|
||||
logger.Debugf("Found %v candidates: %v", execuable, targets)
|
||||
hookPath = targets[0]
|
||||
}
|
||||
logger.Debugf("Using NVIDIA Container Toolkit CLI path %v", hookPath)
|
||||
|
||||
args := []string{filepath.Base(hookPath), "hook", "update-ldcache"}
|
||||
func CreateLDCacheUpdateHook(executable string, libraries []string) Hook {
|
||||
var args []string
|
||||
for _, f := range uniqueFolders(libraries) {
|
||||
args = append(args, "--folder", f)
|
||||
}
|
||||
return Hook{
|
||||
Lifecycle: cdi.CreateContainerHook,
|
||||
Path: hookPath,
|
||||
Args: args,
|
||||
}
|
||||
|
||||
hook := CreateNvidiaCTKHook(
|
||||
executable,
|
||||
"update-ldcache",
|
||||
args...,
|
||||
)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// getLibraryPaths extracts the library dirs from the specified mounts
|
||||
|
||||
@@ -24,12 +24,16 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testNvidiaCTKPath = "/foo/bar/nvidia-ctk"
|
||||
)
|
||||
|
||||
func TestLDCacheUpdateHook(t *testing.T) {
|
||||
logger, _ := testlog.NewNullLogger()
|
||||
|
||||
cfg := Config{
|
||||
Root: "/",
|
||||
NVIDIAContainerToolkitCLIExecutablePath: "/foo/bar/nvidia-ctk",
|
||||
DriverRoot: "/",
|
||||
NvidiaCTKPath: testNvidiaCTKPath,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@@ -86,7 +90,7 @@ func TestLDCacheUpdateHook(t *testing.T) {
|
||||
},
|
||||
}
|
||||
expectedHook := Hook{
|
||||
Path: "/usr/bin/nvidia-ctk",
|
||||
Path: testNvidiaCTKPath,
|
||||
Args: tc.expectedArgs,
|
||||
Lifecycle: "createContainer",
|
||||
}
|
||||
|
||||
@@ -21,28 +21,24 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/lookup"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type symlinks struct {
|
||||
None
|
||||
logger *logrus.Logger
|
||||
lookup lookup.Locator
|
||||
nvidiaCTKExecutablePath string
|
||||
csvFiles []string
|
||||
mountsFrom Discover
|
||||
logger *logrus.Logger
|
||||
nvidiaCTKPath string
|
||||
csvFiles []string
|
||||
mountsFrom Discover
|
||||
}
|
||||
|
||||
// NewCreateSymlinksHook creates a discoverer for a hook that creates required symlinks in the container
|
||||
func NewCreateSymlinksHook(logger *logrus.Logger, csvFiles []string, mounts Discover, cfg *Config) (Discover, error) {
|
||||
d := symlinks{
|
||||
logger: logger,
|
||||
lookup: lookup.NewExecutableLocator(logger, cfg.Root),
|
||||
nvidiaCTKExecutablePath: cfg.NVIDIAContainerToolkitCLIExecutablePath,
|
||||
csvFiles: csvFiles,
|
||||
mountsFrom: mounts,
|
||||
logger: logger,
|
||||
nvidiaCTKPath: FindNvidiaCTK(logger, cfg.NvidiaCTKPath),
|
||||
csvFiles: csvFiles,
|
||||
mountsFrom: mounts,
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
@@ -50,19 +46,7 @@ func NewCreateSymlinksHook(logger *logrus.Logger, csvFiles []string, mounts Disc
|
||||
|
||||
// Hooks returns a hook to create the symlinks from the required CSV files
|
||||
func (d symlinks) Hooks() ([]Hook, error) {
|
||||
hookPath := nvidiaCTKDefaultFilePath
|
||||
targets, err := d.lookup.Locate(d.nvidiaCTKExecutablePath)
|
||||
if err != nil {
|
||||
d.logger.Warnf("Failed to locate %v: %v", d.nvidiaCTKExecutablePath, err)
|
||||
} else if len(targets) == 0 {
|
||||
d.logger.Warnf("%v not found", d.nvidiaCTKExecutablePath)
|
||||
} else {
|
||||
d.logger.Debugf("Found %v candidates: %v", d.nvidiaCTKExecutablePath, targets)
|
||||
hookPath = targets[0]
|
||||
}
|
||||
d.logger.Debugf("Using NVIDIA Container Toolkit CLI path %v", hookPath)
|
||||
|
||||
args := []string{hookPath, "hook", "create-symlinks"}
|
||||
var args []string
|
||||
for _, f := range d.csvFiles {
|
||||
args = append(args, "--csv-filename", f)
|
||||
}
|
||||
@@ -73,13 +57,13 @@ func (d symlinks) Hooks() ([]Hook, error) {
|
||||
}
|
||||
args = append(args, links...)
|
||||
|
||||
h := Hook{
|
||||
Lifecycle: cdi.CreateContainerHook,
|
||||
Path: hookPath,
|
||||
Args: args,
|
||||
}
|
||||
hook := CreateNvidiaCTKHook(
|
||||
d.nvidiaCTKPath,
|
||||
"create-symlinks",
|
||||
args...,
|
||||
)
|
||||
|
||||
return []Hook{h}, nil
|
||||
return []Hook{hook}, nil
|
||||
}
|
||||
|
||||
// getSpecificLinkArgs returns the required specic links that need to be created
|
||||
|
||||
@@ -17,13 +17,9 @@
|
||||
package edits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/devices"
|
||||
)
|
||||
|
||||
type device discover.Device
|
||||
@@ -46,21 +42,18 @@ func (d device) toEdits() (*cdi.ContainerEdits, error) {
|
||||
// toSpec converts a discovered Device to a CDI Spec Device. Note
|
||||
// that missing info is filled in when edits are applied by querying the Device node.
|
||||
func (d device) toSpec() (*specs.DeviceNode, error) {
|
||||
// NOTE: This mirrors what cri-o does.
|
||||
// https://github.com/cri-o/cri-o/blob/ca3bb80a3dda0440659fcf8da8ed6f23211de94e/internal/config/device/device.go#L93
|
||||
// This can be removed once https://github.com/container-orchestrated-devices/container-device-interface/issues/72 is addressed
|
||||
dev, err := devices.DeviceFromPath(d.HostPath, "rwm")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query device node %v: %v", d.HostPath, err)
|
||||
// The HostPath field was added in the v0.5.0 CDI specification.
|
||||
// The cdi package uses strict unmarshalling when loading specs from file causing failures for
|
||||
// unexpected fields.
|
||||
// Since the behaviour for HostPath == "" and HostPath == Path are equivalent, we clear HostPath
|
||||
// if it is equal to Path to ensure compatibility with the widest range of specs.
|
||||
hostPath := d.HostPath
|
||||
if hostPath == d.Path {
|
||||
hostPath = ""
|
||||
}
|
||||
s := specs.DeviceNode{
|
||||
HostPath: hostPath,
|
||||
Path: d.Path,
|
||||
Type: string(dev.Type),
|
||||
Major: dev.Major,
|
||||
Minor: dev.Minor,
|
||||
FileMode: &dev.FileMode,
|
||||
UID: &dev.Uid,
|
||||
GID: &dev.Gid,
|
||||
}
|
||||
|
||||
return &s, nil
|
||||
|
||||
69
internal/edits/device_test.go
Normal file
69
internal/edits/device_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package edits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDeviceToSpec(t *testing.T) {
|
||||
testCases := []struct {
|
||||
device discover.Device
|
||||
expected *specs.DeviceNode
|
||||
}{
|
||||
{
|
||||
device: discover.Device{
|
||||
Path: "/foo",
|
||||
},
|
||||
expected: &specs.DeviceNode{
|
||||
Path: "/foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
device: discover.Device{
|
||||
Path: "/foo",
|
||||
HostPath: "/foo",
|
||||
},
|
||||
expected: &specs.DeviceNode{
|
||||
Path: "/foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
device: discover.Device{
|
||||
Path: "/foo",
|
||||
HostPath: "/not/foo",
|
||||
},
|
||||
expected: &specs.DeviceNode{
|
||||
Path: "/foo",
|
||||
HostPath: "/not/foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
spec, err := device(tc.device).toSpec()
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, tc.expected, spec)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/oci"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
"github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
ociSpecs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -34,6 +35,20 @@ type edits struct {
|
||||
// NewSpecEdits creates a SpecModifier that defines the required OCI spec edits (as CDI ContainerEdits) from the specified
|
||||
// discoverer.
|
||||
func NewSpecEdits(logger *logrus.Logger, d discover.Discover) (oci.SpecModifier, error) {
|
||||
c, err := FromDiscoverer(d)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error constructing container edits: %v", err)
|
||||
}
|
||||
e := edits{
|
||||
ContainerEdits: *c,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
return &e, nil
|
||||
}
|
||||
|
||||
// FromDiscoverer creates CDI container edits for the specified discoverer.
|
||||
func FromDiscoverer(d discover.Discover) (*cdi.ContainerEdits, error) {
|
||||
devices, err := d.Devices()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover devices: %v", err)
|
||||
@@ -49,7 +64,7 @@ func NewSpecEdits(logger *logrus.Logger, d discover.Discover) (oci.SpecModifier,
|
||||
return nil, fmt.Errorf("failed to discover hooks: %v", err)
|
||||
}
|
||||
|
||||
c := cdi.ContainerEdits{}
|
||||
c := NewContainerEdits()
|
||||
for _, d := range devices {
|
||||
edits, err := device(d).toEdits()
|
||||
if err != nil {
|
||||
@@ -66,12 +81,15 @@ func NewSpecEdits(logger *logrus.Logger, d discover.Discover) (oci.SpecModifier,
|
||||
c.Append(hook(h).toEdits())
|
||||
}
|
||||
|
||||
e := edits{
|
||||
ContainerEdits: c,
|
||||
logger: logger,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
return &e, nil
|
||||
// NewContainerEdits is a utility function to create a CDI ContainerEdits struct.
|
||||
func NewContainerEdits() *cdi.ContainerEdits {
|
||||
c := cdi.ContainerEdits{
|
||||
ContainerEdits: &specs.ContainerEdits{},
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Modify applies the defined edits to the incoming OCI spec
|
||||
@@ -90,7 +108,7 @@ func (e *edits) Modify(spec *ociSpecs.Spec) error {
|
||||
}
|
||||
e.logger.Infof("Hooks:")
|
||||
for _, hook := range e.Hooks {
|
||||
e.logger.Infof("Injecting %v", hook.Args)
|
||||
e.logger.Infof("Injecting %v %v", hook.Path, hook.Args)
|
||||
}
|
||||
|
||||
return e.Apply(spec)
|
||||
|
||||
31
internal/edits/edits_test.go
Normal file
31
internal/edits/edits_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/**
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package edits
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/NVIDIA/nvidia-container-toolkit/internal/discover"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFromDiscovererAllowsMountsToIterate(t *testing.T) {
|
||||
edits, err := FromDiscoverer(discover.None{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, edits.Mounts)
|
||||
}
|
||||
125
internal/info/proc/devices/devices.go
Normal file
125
internal/info/proc/devices/devices.go
Normal file
@@ -0,0 +1,125 @@
|
||||
/*
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package devices
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Device major numbers and device names for NVIDIA devices
|
||||
const (
|
||||
NVIDIAUVMMinor = 0
|
||||
NVIDIAUVMToolsMinor = 1
|
||||
NVIDIACTLMinor = 255
|
||||
NVIDIAModesetMinor = 254
|
||||
|
||||
NVIDIAFrontend = Name("nvidia-frontend")
|
||||
NVIDIAGPU = NVIDIAFrontend
|
||||
NVIDIACaps = Name("nvidia-caps")
|
||||
NVIDIAUVM = Name("nvidia-uvm")
|
||||
|
||||
procDevicesPath = "/proc/devices"
|
||||
nvidiaDevicePrefix = "nvidia"
|
||||
)
|
||||
|
||||
// Name represents the name of a device as specified under /proc/devices
|
||||
type Name string
|
||||
|
||||
// Major represents a device major as specified under /proc/devices
|
||||
type Major int
|
||||
|
||||
// Devices represents the set of devices under /proc/devices
|
||||
//
|
||||
//go:generate moq -stub -out devices_mock.go . Devices
|
||||
type Devices interface {
|
||||
Exists(Name) bool
|
||||
Get(Name) (Major, bool)
|
||||
}
|
||||
|
||||
type devices map[Name]Major
|
||||
|
||||
var _ Devices = devices(nil)
|
||||
|
||||
// Exists checks if a Device with a given name exists or not
|
||||
func (d devices) Exists(name Name) bool {
|
||||
_, exists := d[name]
|
||||
return exists
|
||||
}
|
||||
|
||||
// Get a Device from Devices
|
||||
func (d devices) Get(name Name) (Major, bool) {
|
||||
device, exists := d[name]
|
||||
return device, exists
|
||||
}
|
||||
|
||||
// GetNVIDIADevices returns the set of NVIDIA Devices on the machine
|
||||
func GetNVIDIADevices() (Devices, error) {
|
||||
devicesFile, err := os.Open(procDevicesPath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening devices file: %v", err)
|
||||
}
|
||||
defer devicesFile.Close()
|
||||
|
||||
return nvidiaDeviceFrom(devicesFile), nil
|
||||
}
|
||||
|
||||
func nvidiaDeviceFrom(reader io.Reader) devices {
|
||||
allDevices := devicesFrom(reader)
|
||||
nvidiaDevices := make(devices)
|
||||
for n, d := range allDevices {
|
||||
if !strings.HasPrefix(string(n), nvidiaDevicePrefix) {
|
||||
continue
|
||||
}
|
||||
nvidiaDevices[n] = d
|
||||
}
|
||||
|
||||
return nvidiaDevices
|
||||
}
|
||||
|
||||
func devicesFrom(reader io.Reader) devices {
|
||||
allDevices := make(devices)
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
device, major, err := processProcDeviceLine(scanner.Text())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
allDevices[device] = major
|
||||
}
|
||||
return allDevices
|
||||
}
|
||||
|
||||
func processProcDeviceLine(line string) (Name, Major, error) {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
|
||||
var name Name
|
||||
var major Major
|
||||
|
||||
n, _ := fmt.Sscanf(trimmed, "%d %s", &major, &name)
|
||||
if n == 2 {
|
||||
return name, major, nil
|
||||
}
|
||||
|
||||
return "", 0, fmt.Errorf("unparsable line: %v", line)
|
||||
}
|
||||
125
internal/info/proc/devices/devices_mock.go
Normal file
125
internal/info/proc/devices/devices_mock.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Code generated by moq; DO NOT EDIT.
|
||||
// github.com/matryer/moq
|
||||
|
||||
package devices
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Ensure, that DevicesMock does implement Devices.
|
||||
// If this is not the case, regenerate this file with moq.
|
||||
var _ Devices = &DevicesMock{}
|
||||
|
||||
// DevicesMock is a mock implementation of Devices.
|
||||
//
|
||||
// func TestSomethingThatUsesDevices(t *testing.T) {
|
||||
//
|
||||
// // make and configure a mocked Devices
|
||||
// mockedDevices := &DevicesMock{
|
||||
// ExistsFunc: func(name Name) bool {
|
||||
// panic("mock out the Exists method")
|
||||
// },
|
||||
// GetFunc: func(name Name) (Major, bool) {
|
||||
// panic("mock out the Get method")
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// // use mockedDevices in code that requires Devices
|
||||
// // and then make assertions.
|
||||
//
|
||||
// }
|
||||
type DevicesMock struct {
|
||||
// ExistsFunc mocks the Exists method.
|
||||
ExistsFunc func(name Name) bool
|
||||
|
||||
// GetFunc mocks the Get method.
|
||||
GetFunc func(name Name) (Major, bool)
|
||||
|
||||
// calls tracks calls to the methods.
|
||||
calls struct {
|
||||
// Exists holds details about calls to the Exists method.
|
||||
Exists []struct {
|
||||
// Name is the name argument value.
|
||||
Name Name
|
||||
}
|
||||
// Get holds details about calls to the Get method.
|
||||
Get []struct {
|
||||
// Name is the name argument value.
|
||||
Name Name
|
||||
}
|
||||
}
|
||||
lockExists sync.RWMutex
|
||||
lockGet sync.RWMutex
|
||||
}
|
||||
|
||||
// Exists calls ExistsFunc.
|
||||
func (mock *DevicesMock) Exists(name Name) bool {
|
||||
callInfo := struct {
|
||||
Name Name
|
||||
}{
|
||||
Name: name,
|
||||
}
|
||||
mock.lockExists.Lock()
|
||||
mock.calls.Exists = append(mock.calls.Exists, callInfo)
|
||||
mock.lockExists.Unlock()
|
||||
if mock.ExistsFunc == nil {
|
||||
var (
|
||||
bOut bool
|
||||
)
|
||||
return bOut
|
||||
}
|
||||
return mock.ExistsFunc(name)
|
||||
}
|
||||
|
||||
// ExistsCalls gets all the calls that were made to Exists.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedDevices.ExistsCalls())
|
||||
func (mock *DevicesMock) ExistsCalls() []struct {
|
||||
Name Name
|
||||
} {
|
||||
var calls []struct {
|
||||
Name Name
|
||||
}
|
||||
mock.lockExists.RLock()
|
||||
calls = mock.calls.Exists
|
||||
mock.lockExists.RUnlock()
|
||||
return calls
|
||||
}
|
||||
|
||||
// Get calls GetFunc.
|
||||
func (mock *DevicesMock) Get(name Name) (Major, bool) {
|
||||
callInfo := struct {
|
||||
Name Name
|
||||
}{
|
||||
Name: name,
|
||||
}
|
||||
mock.lockGet.Lock()
|
||||
mock.calls.Get = append(mock.calls.Get, callInfo)
|
||||
mock.lockGet.Unlock()
|
||||
if mock.GetFunc == nil {
|
||||
var (
|
||||
majorOut Major
|
||||
bOut bool
|
||||
)
|
||||
return majorOut, bOut
|
||||
}
|
||||
return mock.GetFunc(name)
|
||||
}
|
||||
|
||||
// GetCalls gets all the calls that were made to Get.
|
||||
// Check the length with:
|
||||
//
|
||||
// len(mockedDevices.GetCalls())
|
||||
func (mock *DevicesMock) GetCalls() []struct {
|
||||
Name Name
|
||||
} {
|
||||
var calls []struct {
|
||||
Name Name
|
||||
}
|
||||
mock.lockGet.RLock()
|
||||
calls = mock.calls.Get
|
||||
mock.lockGet.RUnlock()
|
||||
return calls
|
||||
}
|
||||
102
internal/info/proc/devices/devices_test.go
Normal file
102
internal/info/proc/devices/devices_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
/*
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
*/
|
||||
|
||||
package devices
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNvidiaDevices(t *testing.T) {
|
||||
devices := map[Name]Major{
|
||||
"nvidia-frontend": 195,
|
||||
"nvidia-nvlink": 234,
|
||||
"nvidia-caps": 235,
|
||||
"nvidia-uvm": 510,
|
||||
"nvidia-nvswitch": 511,
|
||||
}
|
||||
|
||||
nvidiaDevices := testDevices(devices)
|
||||
for name, major := range devices {
|
||||
device, exists := nvidiaDevices.Get(name)
|
||||
require.True(t, exists, "Unexpected missing device")
|
||||
require.Equal(t, device, major, "Unexpected device major")
|
||||
}
|
||||
_, exists := nvidiaDevices.Get("bogus")
|
||||
require.False(t, exists, "Unexpected 'bogus' device found")
|
||||
}
|
||||
|
||||
func TestProcessDeviceFile(t *testing.T) {
|
||||
testCases := []struct {
|
||||
lines []string
|
||||
expected devices
|
||||
}{
|
||||
{[]string{}, make(devices)},
|
||||
{[]string{"Not a valid line:"}, make(devices)},
|
||||
{[]string{"195 nvidia-frontend"}, devices{"nvidia-frontend": 195}},
|
||||
{[]string{"195 nvidia-frontend", "235 nvidia-caps"}, devices{"nvidia-frontend": 195, "nvidia-caps": 235}},
|
||||
{[]string{" 195 nvidia-frontend"}, devices{"nvidia-frontend": 195}},
|
||||
{[]string{"Not a valid line:", "", "195 nvidia-frontend"}, devices{"nvidia-frontend": 195}},
|
||||
{[]string{"195 not-nvidia-frontend"}, make(devices)},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("testcase %d", i), func(t *testing.T) {
|
||||
contents := strings.NewReader(strings.Join(tc.lines, "\n"))
|
||||
d := nvidiaDeviceFrom(contents)
|
||||
|
||||
require.EqualValues(t, tc.expected, d)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeviceFileLine(t *testing.T) {
|
||||
testCases := []struct {
|
||||
line string
|
||||
name Name
|
||||
major Major
|
||||
err bool
|
||||
}{
|
||||
{"", "", 0, true},
|
||||
{"0", "", 0, true},
|
||||
{"notint nvidia-frontend", "", 0, true},
|
||||
{"195 nvidia-frontend", "nvidia-frontend", 195, false},
|
||||
{" 195 nvidia-frontend", "nvidia-frontend", 195, false},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("testcase %d", i), func(t *testing.T) {
|
||||
name, major, err := processProcDeviceLine(tc.line)
|
||||
|
||||
require.Equal(t, tc.name, name)
|
||||
require.Equal(t, tc.major, major)
|
||||
if tc.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testDevices creates a set of test NVIDIA devices
|
||||
func testDevices(d map[Name]Major) Devices {
|
||||
return devices(d)
|
||||
}
|
||||
@@ -22,8 +22,10 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
@@ -177,13 +179,15 @@ func (c *ldcache) parse() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List creates a list of libraires in the ldcache.
|
||||
// The 32-bit and 64-bit libraries are returned separately.
|
||||
func (c *ldcache) List() ([]string, []string) {
|
||||
paths := make(map[int][]string)
|
||||
|
||||
processed := make(map[string]bool)
|
||||
type entry struct {
|
||||
libname string
|
||||
bits int
|
||||
value string
|
||||
}
|
||||
|
||||
// getEntries returns the entires of the ldcache in a go-friendly struct.
|
||||
func (c *ldcache) getEntries(selected func(string) bool) []entry {
|
||||
var entries []entry
|
||||
for _, e := range c.entries {
|
||||
bits := 0
|
||||
if ((e.Flags & flagTypeMask) & flagTypeELF) == 0 {
|
||||
@@ -204,89 +208,119 @@ func (c *ldcache) List() ([]string, []string) {
|
||||
if e.Key > uint32(len(c.libs)) || e.Value > uint32(len(c.libs)) {
|
||||
continue
|
||||
}
|
||||
value := c.libs[e.Value:]
|
||||
|
||||
n := bytes.IndexByte(value, 0)
|
||||
if n < 0 {
|
||||
break
|
||||
lib := bytesToString(c.libs[e.Key:])
|
||||
if lib == "" {
|
||||
c.logger.Debugf("Skipping invalid lib")
|
||||
continue
|
||||
}
|
||||
if !selected(lib) {
|
||||
continue
|
||||
}
|
||||
value := bytesToString(c.libs[e.Value:])
|
||||
if value == "" {
|
||||
c.logger.Debugf("Skipping invalid value for lib %v", lib)
|
||||
continue
|
||||
}
|
||||
e := entry{
|
||||
libname: lib,
|
||||
bits: bits,
|
||||
value: value,
|
||||
}
|
||||
|
||||
name := filepath.Join(c.root, strn(value, n))
|
||||
c.logger.Debugf("checking %v", string(name))
|
||||
entries = append(entries, e)
|
||||
}
|
||||
|
||||
path, err := filepath.EvalSymlinks(name)
|
||||
return entries
|
||||
}
|
||||
|
||||
// List creates a list of libraires in the ldcache.
|
||||
// The 32-bit and 64-bit libraries are returned separately.
|
||||
func (c *ldcache) List() ([]string, []string) {
|
||||
all := func(s string) bool { return true }
|
||||
|
||||
return c.resolveSelected(all)
|
||||
}
|
||||
|
||||
// Lookup searches the ldcache for the specified prefixes.
|
||||
// The 32-bit and 64-bit libraries matching the prefixes are returned.
|
||||
func (c *ldcache) Lookup(libPrefixes ...string) ([]string, []string) {
|
||||
c.logger.Debugf("Looking up %v in cache", libPrefixes)
|
||||
|
||||
// We define a functor to check whether a given library name matches any of the prefixes
|
||||
matchesAnyPrefix := func(s string) bool {
|
||||
for _, p := range libPrefixes {
|
||||
if strings.HasPrefix(s, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return c.resolveSelected(matchesAnyPrefix)
|
||||
}
|
||||
|
||||
// resolveSelected process the entries in the LDCach based on the supplied filter and returns the resolved paths.
|
||||
// The paths are separated by bittage.
|
||||
func (c *ldcache) resolveSelected(selected func(string) bool) ([]string, []string) {
|
||||
paths := make(map[int][]string)
|
||||
processed := make(map[string]bool)
|
||||
|
||||
for _, e := range c.getEntries(selected) {
|
||||
path, err := c.resolve(e.value)
|
||||
if err != nil {
|
||||
c.logger.Debugf("could not resolve symlink for %v", name)
|
||||
break
|
||||
c.logger.Debugf("Could not resolve entry: %v", err)
|
||||
continue
|
||||
}
|
||||
if processed[path] {
|
||||
continue
|
||||
}
|
||||
paths[bits] = append(paths[bits], path)
|
||||
paths[e.bits] = append(paths[e.bits], path)
|
||||
processed[path] = true
|
||||
}
|
||||
|
||||
return paths[32], paths[64]
|
||||
}
|
||||
|
||||
// Lookup searches the ldcache for the specified prefixes.
|
||||
// The 32-bit and 64-bit libraries matching the prefixes are returned.
|
||||
func (c *ldcache) Lookup(libs ...string) (paths32, paths64 []string) {
|
||||
c.logger.Debugf("Looking up %v in cache", libs)
|
||||
type void struct{}
|
||||
var paths *[]string
|
||||
// resolve resolves the specified ldcache entry based on the value being processed.
|
||||
// The input is the name of the entry in the cache.
|
||||
func (c *ldcache) resolve(target string) (string, error) {
|
||||
name := filepath.Join(c.root, target)
|
||||
|
||||
set := make(map[string]void)
|
||||
prefix := make([][]byte, len(libs))
|
||||
c.logger.Debugf("checking %v", string(name))
|
||||
|
||||
for i := range libs {
|
||||
prefix[i] = []byte(libs[i])
|
||||
info, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get file info: %v", info)
|
||||
}
|
||||
for _, e := range c.entries {
|
||||
if ((e.Flags & flagTypeMask) & flagTypeELF) == 0 {
|
||||
continue
|
||||
}
|
||||
switch e.Flags & flagArchMask {
|
||||
case flagArchX8664:
|
||||
fallthrough
|
||||
case flagArchPpc64le:
|
||||
paths = &paths64
|
||||
case flagArchX32:
|
||||
fallthrough
|
||||
case flagArchI386:
|
||||
paths = &paths32
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if e.Key > uint32(len(c.libs)) || e.Value > uint32(len(c.libs)) {
|
||||
continue
|
||||
}
|
||||
lib := c.libs[e.Key:]
|
||||
value := c.libs[e.Value:]
|
||||
|
||||
for _, p := range prefix {
|
||||
if bytes.HasPrefix(lib, p) {
|
||||
n := bytes.IndexByte(value, 0)
|
||||
if n < 0 {
|
||||
break
|
||||
}
|
||||
|
||||
name := filepath.Join(c.root, strn(value, n))
|
||||
c.logger.Debugf("checking %v", string(name))
|
||||
|
||||
path, err := filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
c.logger.Debugf("could not resolve symlink for %v", name)
|
||||
break
|
||||
}
|
||||
if _, ok := set[path]; ok {
|
||||
break
|
||||
}
|
||||
set[path] = void{}
|
||||
*paths = append(*paths, path)
|
||||
break
|
||||
}
|
||||
}
|
||||
if info.Mode()&os.ModeSymlink == 0 {
|
||||
c.logger.Debugf("Resolved regular file: %v", name)
|
||||
return name, nil
|
||||
}
|
||||
return
|
||||
|
||||
link, err := os.Readlink(name)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to resolve symlink: %v", err)
|
||||
}
|
||||
|
||||
// We return absolute paths for all targets
|
||||
if !filepath.IsAbs(link) || strings.HasPrefix(link, ".") {
|
||||
link = filepath.Join(filepath.Dir(target), link)
|
||||
}
|
||||
|
||||
// Ensure that the returned path is relative to the root.
|
||||
link = filepath.Join(c.root, link)
|
||||
|
||||
c.logger.Debugf("Resolved link: '%v' => '%v'", name, link)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
// bytesToString converts a byte slice to a string.
|
||||
// This assumes that the byte slice is null-terminated
|
||||
func bytesToString(value []byte) string {
|
||||
n := bytes.IndexByte(value, 0)
|
||||
if n < 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strn(value, n)
|
||||
}
|
||||
|
||||
@@ -19,9 +19,6 @@ package lookup
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,14 +27,14 @@ const (
|
||||
|
||||
// NewCharDeviceLocator creates a Locator that can be used to find char devices at the specified root. A logger is
|
||||
// also specified.
|
||||
func NewCharDeviceLocator(logger *logrus.Logger, root string) Locator {
|
||||
l := file{
|
||||
logger: logger,
|
||||
prefixes: []string{root, filepath.Join(root, devRoot)},
|
||||
filter: assertCharDevice,
|
||||
}
|
||||
|
||||
return &l
|
||||
func NewCharDeviceLocator(opts ...Option) Locator {
|
||||
opts = append(opts,
|
||||
WithSearchPaths("", devRoot),
|
||||
WithFilter(assertCharDevice),
|
||||
)
|
||||
return NewFileLocator(
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
// assertCharDevice checks whether the specified path is a char device and returns an error if this is not the case.
|
||||
|
||||
58
internal/lookup/device_test.go
Normal file
58
internal/lookup/device_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
/**
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
testlog "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCharDeviceLocator(t *testing.T) {
|
||||
logger, _ := testlog.NewNullLogger()
|
||||
|
||||
testCases := []struct {
|
||||
root string
|
||||
expectedPrefixes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
expectedPrefixes: []string{"", "/dev"},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
expectedPrefixes: []string{"/", "/dev"},
|
||||
},
|
||||
{
|
||||
root: "/some/root",
|
||||
expectedPrefixes: []string{"/some/root", "/some/root/dev"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
f := NewCharDeviceLocator(
|
||||
WithLogger(logger),
|
||||
WithRoot(tc.root),
|
||||
).(*file)
|
||||
|
||||
require.EqualValues(t, tc.expectedPrefixes, f.prefixes)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -26,13 +26,11 @@ import (
|
||||
// NewDirectoryLocator creates a Locator that can be used to find directories at the specified root. A logger
|
||||
// is also specified.
|
||||
func NewDirectoryLocator(logger *log.Logger, root string) Locator {
|
||||
l := file{
|
||||
logger: logger,
|
||||
prefixes: []string{root},
|
||||
filter: assertDirectory,
|
||||
}
|
||||
|
||||
return &l
|
||||
return NewFileLocator(
|
||||
WithLogger(logger),
|
||||
WithRoot(root),
|
||||
WithFilter(assertDirectory),
|
||||
)
|
||||
}
|
||||
|
||||
// assertDirectory checks wither the specified path is a directory.
|
||||
|
||||
@@ -19,7 +19,6 @@ package lookup
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -33,17 +32,22 @@ type executable struct {
|
||||
func NewExecutableLocator(logger *log.Logger, root string) Locator {
|
||||
paths := GetPaths(root)
|
||||
|
||||
var prefixes []string
|
||||
for _, dir := range paths {
|
||||
prefixes = append(prefixes, filepath.Join(root, dir))
|
||||
}
|
||||
return newExecutableLocator(logger, root, paths...)
|
||||
}
|
||||
|
||||
func newExecutableLocator(logger *log.Logger, root string, paths ...string) *executable {
|
||||
f := newFileLocator(
|
||||
WithLogger(logger),
|
||||
WithRoot(root),
|
||||
WithSearchPaths(paths...),
|
||||
WithFilter(assertExecutable),
|
||||
WithCount(1),
|
||||
)
|
||||
|
||||
l := executable{
|
||||
file: file{
|
||||
logger: logger,
|
||||
prefixes: prefixes,
|
||||
filter: assertExecutable,
|
||||
},
|
||||
file: *f,
|
||||
}
|
||||
|
||||
return &l
|
||||
}
|
||||
|
||||
|
||||
77
internal/lookup/executable_test.go
Normal file
77
internal/lookup/executable_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
/**
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
testlog "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExecutableLocator(t *testing.T) {
|
||||
logger, _ := testlog.NewNullLogger()
|
||||
|
||||
testCases := []struct {
|
||||
root string
|
||||
paths []string
|
||||
expectedPrefixes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
expectedPrefixes: []string{""},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
paths: []string{"/"},
|
||||
expectedPrefixes: []string{"/"},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
paths: []string{"/", "/bin"},
|
||||
expectedPrefixes: []string{"/", "/bin"},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
expectedPrefixes: []string{"/"},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
paths: []string{"/"},
|
||||
expectedPrefixes: []string{"/"},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
paths: []string{"/", "/bin"},
|
||||
expectedPrefixes: []string{"/", "/bin"},
|
||||
},
|
||||
{
|
||||
root: "/some/path",
|
||||
paths: []string{"/", "/bin"},
|
||||
expectedPrefixes: []string{"/some/path", "/some/path/bin"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
e := newExecutableLocator(logger, tc.root, tc.paths...)
|
||||
|
||||
require.EqualValues(t, tc.expectedPrefixes, e.prefixes)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -27,33 +27,124 @@ import (
|
||||
// file can be used to locate file (or file-like elements) at a specified set of
|
||||
// prefixes. The validity of a file is determined by a filter function.
|
||||
type file struct {
|
||||
logger *log.Logger
|
||||
prefixes []string
|
||||
filter func(string) error
|
||||
logger *log.Logger
|
||||
root string
|
||||
prefixes []string
|
||||
filter func(string) error
|
||||
count int
|
||||
isOptional bool
|
||||
}
|
||||
|
||||
// NewFileLocator creates a Locator that can be used to find files at the specified root. A logger
|
||||
// can also be specified.
|
||||
func NewFileLocator(logger *log.Logger, root string) Locator {
|
||||
l := newFileLocator(logger, root)
|
||||
// Option defines a function for passing options to the NewFileLocator() call
|
||||
type Option func(*file)
|
||||
|
||||
return &l
|
||||
}
|
||||
|
||||
func newFileLocator(logger *log.Logger, root string) file {
|
||||
return file{
|
||||
logger: logger,
|
||||
prefixes: []string{root},
|
||||
filter: assertFile,
|
||||
// WithRoot sets the root for the file locator
|
||||
func WithRoot(root string) Option {
|
||||
return func(f *file) {
|
||||
f.root = root
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger sets the logger for the file locator
|
||||
func WithLogger(logger *log.Logger) Option {
|
||||
return func(f *file) {
|
||||
f.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// WithSearchPaths sets the search paths for the file locator.
|
||||
func WithSearchPaths(paths ...string) Option {
|
||||
return func(f *file) {
|
||||
f.prefixes = paths
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilter sets the filter for the file locator
|
||||
// The filter is called for each candidate file and candidates that return nil are considered.
|
||||
func WithFilter(assert func(string) error) Option {
|
||||
return func(f *file) {
|
||||
f.filter = assert
|
||||
}
|
||||
}
|
||||
|
||||
// WithCount sets the maximum number of candidates to discover
|
||||
func WithCount(count int) Option {
|
||||
return func(f *file) {
|
||||
f.count = count
|
||||
}
|
||||
}
|
||||
|
||||
// WithOptional sets the optional flag for the file locator
|
||||
// If the optional flag is set, the locator will not return an error if the file is not found.
|
||||
func WithOptional(optional bool) Option {
|
||||
return func(f *file) {
|
||||
f.isOptional = optional
|
||||
}
|
||||
}
|
||||
|
||||
// NewFileLocator creates a Locator that can be used to find files with the specified options.
|
||||
func NewFileLocator(opts ...Option) Locator {
|
||||
return newFileLocator(opts...)
|
||||
}
|
||||
|
||||
func newFileLocator(opts ...Option) *file {
|
||||
f := &file{}
|
||||
for _, opt := range opts {
|
||||
opt(f)
|
||||
}
|
||||
if f.logger == nil {
|
||||
f.logger = log.StandardLogger()
|
||||
}
|
||||
if f.filter == nil {
|
||||
f.filter = assertFile
|
||||
}
|
||||
// Since the `Locate` implementations rely on the root already being specified we update
|
||||
// the prefixes to include the root.
|
||||
f.prefixes = getSearchPrefixes(f.root, f.prefixes...)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// getSearchPrefixes generates a list of unique paths to be searched by a file locator.
|
||||
//
|
||||
// For each of the unique prefixes <p> specified, the path <root><p> is searched, where <root> is the
|
||||
// specified root. If no prefixes are specified, <root> is returned as the only search prefix.
|
||||
//
|
||||
// Note that an empty root is equivalent to searching relative to the current working directory, and
|
||||
// if the root filesystem should be searched instead, root should be specified as "/" explicitly.
|
||||
//
|
||||
// Also, a prefix of "" forces the root to be included in returned set of paths. This means that if
|
||||
// the root in addition to another prefix must be searched the function should be called with:
|
||||
//
|
||||
// getSearchPrefixes("/root", "", "another/path")
|
||||
//
|
||||
// and will result in the search paths []{"/root", "/root/another/path"} being returned.
|
||||
func getSearchPrefixes(root string, prefixes ...string) []string {
|
||||
seen := make(map[string]bool)
|
||||
var uniquePrefixes []string
|
||||
for _, p := range prefixes {
|
||||
if seen[p] {
|
||||
continue
|
||||
}
|
||||
seen[p] = true
|
||||
uniquePrefixes = append(uniquePrefixes, filepath.Join(root, p))
|
||||
}
|
||||
|
||||
if len(uniquePrefixes) == 0 {
|
||||
uniquePrefixes = append(uniquePrefixes, root)
|
||||
}
|
||||
|
||||
return uniquePrefixes
|
||||
}
|
||||
|
||||
var _ Locator = (*file)(nil)
|
||||
|
||||
// Locate attempts to find files with names matching the specified pattern.
|
||||
// All prefixes are searched and any matching candidates are returned. If no matches are found, an error is returned.
|
||||
func (p file) Locate(pattern string) ([]string, error) {
|
||||
var filenames []string
|
||||
|
||||
visit:
|
||||
for _, prefix := range p.prefixes {
|
||||
pathPattern := filepath.Join(prefix, pattern)
|
||||
candidates, err := filepath.Glob(pathPattern)
|
||||
@@ -69,9 +160,14 @@ func (p file) Locate(pattern string) ([]string, error) {
|
||||
continue
|
||||
}
|
||||
filenames = append(filenames, candidate)
|
||||
if p.count > 0 && len(filenames) == p.count {
|
||||
p.logger.Debugf("Found %d candidates; ignoring further candidates", len(filenames))
|
||||
break visit
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
|
||||
if !p.isOptional && len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("pattern %v not found", pattern)
|
||||
}
|
||||
return filenames, nil
|
||||
|
||||
82
internal/lookup/file_test.go
Normal file
82
internal/lookup/file_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
/**
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetSearchPrefixes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
root string
|
||||
prefixes []string
|
||||
expectedPrefixes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
expectedPrefixes: []string{""},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
expectedPrefixes: []string{"/"},
|
||||
},
|
||||
{
|
||||
root: "/some/root",
|
||||
expectedPrefixes: []string{"/some/root"},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
prefixes: []string{"foo", "bar"},
|
||||
expectedPrefixes: []string{"foo", "bar"},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
prefixes: []string{"foo", "bar"},
|
||||
expectedPrefixes: []string{"/foo", "/bar"},
|
||||
},
|
||||
{
|
||||
root: "/",
|
||||
prefixes: []string{"/foo", "/bar"},
|
||||
expectedPrefixes: []string{"/foo", "/bar"},
|
||||
},
|
||||
{
|
||||
root: "/some/root",
|
||||
prefixes: []string{"foo", "bar"},
|
||||
expectedPrefixes: []string{"/some/root/foo", "/some/root/bar"},
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
prefixes: []string{"foo", "bar", "bar", "foo"},
|
||||
expectedPrefixes: []string{"foo", "bar"},
|
||||
},
|
||||
{
|
||||
root: "/some/root",
|
||||
prefixes: []string{"foo", "bar", "foo", "bar"},
|
||||
expectedPrefixes: []string{"/some/root/foo", "/some/root/bar"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
prefixes := getSearchPrefixes(tc.root, tc.prefixes...)
|
||||
require.EqualValues(t, tc.expectedPrefixes, prefixes)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,8 +35,9 @@ type symlink struct {
|
||||
// NewSymlinkChainLocator creats a locator that can be used for locating files through symlinks.
|
||||
// A logger can also be specified.
|
||||
func NewSymlinkChainLocator(logger *logrus.Logger, root string) Locator {
|
||||
f := newFileLocator(WithLogger(logger), WithRoot(root))
|
||||
l := symlinkChain{
|
||||
file: newFileLocator(logger, root),
|
||||
file: *f,
|
||||
}
|
||||
|
||||
return &l
|
||||
@@ -45,8 +46,9 @@ func NewSymlinkChainLocator(logger *logrus.Logger, root string) Locator {
|
||||
// NewSymlinkLocator creats a locator that can be used for locating files through symlinks.
|
||||
// A logger can also be specified.
|
||||
func NewSymlinkLocator(logger *logrus.Logger, root string) Locator {
|
||||
f := newFileLocator(WithLogger(logger), WithRoot(root))
|
||||
l := symlink{
|
||||
file: newFileLocator(logger, root),
|
||||
file: *f,
|
||||
}
|
||||
|
||||
return &l
|
||||
|
||||
@@ -62,8 +62,8 @@ func NewCSVModifier(logger *logrus.Logger, cfg *config.Config, ociSpec oci.Spec)
|
||||
logger.Infof("Constructing modifier from config: %+v", *cfg)
|
||||
|
||||
config := &discover.Config{
|
||||
Root: cfg.NVIDIAContainerCLIConfig.Root,
|
||||
NVIDIAContainerToolkitCLIExecutablePath: cfg.NVIDIACTKConfig.Path,
|
||||
DriverRoot: cfg.NVIDIAContainerCLIConfig.Root,
|
||||
NvidiaCTKPath: cfg.NVIDIACTKConfig.Path,
|
||||
}
|
||||
|
||||
if err := checkRequirements(logger, image); err != nil {
|
||||
@@ -79,7 +79,7 @@ func NewCSVModifier(logger *logrus.Logger, cfg *config.Config, ociSpec oci.Spec)
|
||||
csvFiles = csv.BaseFilesOnly(csvFiles)
|
||||
}
|
||||
|
||||
csvDiscoverer, err := discover.NewFromCSVFiles(logger, csvFiles, config.Root)
|
||||
csvDiscoverer, err := discover.NewFromCSVFiles(logger, csvFiles, config.DriverRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create CSV discoverer: %v", err)
|
||||
}
|
||||
|
||||
@@ -37,8 +37,10 @@ func TestDiscoverModifier(t *testing.T) {
|
||||
expectedSpec *specs.Spec
|
||||
}{
|
||||
{
|
||||
description: "empty discoverer does not modify spec",
|
||||
discover: &discover.DiscoverMock{},
|
||||
description: "empty discoverer does not modify spec",
|
||||
spec: &specs.Spec{},
|
||||
discover: &discover.DiscoverMock{},
|
||||
expectedSpec: &specs.Spec{},
|
||||
},
|
||||
{
|
||||
description: "failed hooks discoverer returns error",
|
||||
|
||||
@@ -45,8 +45,8 @@ func NewGraphicsModifier(logger *logrus.Logger, cfg *config.Config, ociSpec oci.
|
||||
}
|
||||
|
||||
config := &discover.Config{
|
||||
Root: cfg.NVIDIAContainerCLIConfig.Root,
|
||||
NVIDIAContainerToolkitCLIExecutablePath: cfg.NVIDIACTKConfig.Path,
|
||||
DriverRoot: cfg.NVIDIAContainerCLIConfig.Root,
|
||||
NvidiaCTKPath: cfg.NVIDIACTKConfig.Path,
|
||||
}
|
||||
d, err := discover.NewGraphicsDiscoverer(
|
||||
logger,
|
||||
|
||||
@@ -33,7 +33,7 @@ func NewTegraPlatformFiles(logger *logrus.Logger) (oci.SpecModifier, error) {
|
||||
|
||||
tegraSystemMounts := discover.NewMounts(
|
||||
logger,
|
||||
lookup.NewFileLocator(logger, ""),
|
||||
lookup.NewFileLocator(lookup.WithLogger(logger)),
|
||||
"",
|
||||
[]string{
|
||||
"/etc/nv_tegra_release",
|
||||
|
||||
@@ -52,11 +52,22 @@ install -m 755 -t %{buildroot}/usr/libexec/oci/hooks.d oci-nvidia-hook
|
||||
mkdir -p %{buildroot}/usr/share/containers/oci/hooks.d
|
||||
install -m 644 -t %{buildroot}/usr/share/containers/oci/hooks.d oci-nvidia-hook.json
|
||||
|
||||
%post
|
||||
mkdir -p %{_localstatedir}/lib/rpm-state/nvidia-container-toolkit
|
||||
cp -af %{_bindir}/nvidia-container-runtime-hook %{_localstatedir}/lib/rpm-state/nvidia-container-toolkit
|
||||
|
||||
%posttrans
|
||||
if [ ! -e %{_bindir}/nvidia-container-runtime-hook ]; then
|
||||
# reparing lost file nvidia-container-runtime-hook
|
||||
cp -avf %{_localstatedir}/lib/rpm-state/nvidia-container-toolkit/nvidia-container-runtime-hook %{_bindir}
|
||||
fi
|
||||
rm -rf %{_localstatedir}/lib/rpm-state/nvidia-container-toolkit
|
||||
ln -sf %{_bindir}/nvidia-container-runtime-hook %{_bindir}/nvidia-container-toolkit
|
||||
|
||||
%postun
|
||||
if [ -L %{_bindir}/nvidia-container-toolkit ] then; rm -f %{_bindir}/nvidia-container-toolkit; fi
|
||||
if [ "$1" = 0 ]; then # package is uninstalled, not upgraded
|
||||
if [ -L %{_bindir}/nvidia-container-toolkit ]; then rm -f %{_bindir}/nvidia-container-toolkit; fi
|
||||
fi
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
|
||||
@@ -29,18 +29,12 @@ SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
|
||||
all=(
|
||||
amazonlinux2-aarch64
|
||||
amazonlinux2-x86_64
|
||||
centos7-ppc64le
|
||||
centos7-x86_64
|
||||
centos8-aarch64
|
||||
centos8-ppc64le
|
||||
centos8-x86_64
|
||||
debian10-amd64
|
||||
debian9-amd64
|
||||
fedora35-aarch64
|
||||
fedora35-x86_64
|
||||
opensuse-leap15.1-x86_64
|
||||
ubuntu16.04-amd64
|
||||
ubuntu16.04-ppc64le
|
||||
ubuntu18.04-amd64
|
||||
ubuntu18.04-arm64
|
||||
ubuntu18.04-ppc64le
|
||||
@@ -52,14 +46,6 @@ else
|
||||
targets=${all[@]}
|
||||
fi
|
||||
|
||||
echo "Updating components"
|
||||
"${SCRIPTS_DIR}/update-components.sh"
|
||||
if [[ -n $(git status -s third_party) && ${ALLOW_LOCAL_COMPONENT_CHANGES} != "true" ]]; then
|
||||
echo "ERROR: Building with local component changes."
|
||||
echo "Commit pending changes or rerun with ALLOW_LOCAL_COMPONENT_CHANGES='true'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
eval $(${SCRIPTS_DIR}/get-component-versions.sh)
|
||||
|
||||
|
||||
@@ -85,6 +71,6 @@ export LIBNVIDIA_CONTAINER_TAG
|
||||
export NVIDIA_CONTAINER_RUNTIME_VERSION
|
||||
export NVIDIA_DOCKER_VERSION
|
||||
|
||||
for target in "${targets[@]}"; do
|
||||
for target in ${targets[@]}; do
|
||||
"${SCRIPTS_DIR}/build-all-components.sh" "${target}"
|
||||
done
|
||||
|
||||
@@ -16,30 +16,22 @@
|
||||
|
||||
function assert_usage() {
|
||||
echo "Incorrect arguments: $*" >&2
|
||||
echo "$(basename "${BASH_SOURCE[0]}") PACKAGE_IMAGE_NAME:PACKAGE_IMAGE_TAG DIST-ARCH" >&2
|
||||
echo "$(basename "${BASH_SOURCE[0]}") PACKAGE_IMAGE_NAME:PACKAGE_IMAGE_TAG" >&2
|
||||
echo -e "\\tPACKAGE_IMAGE: container image holding packages [e.g. registry.gitlab.com/nvidia/container-toolkit/container-toolkit/staging/container-toolkit]" >&2
|
||||
echo -e "\\tPACKAGE_TAG: tag for container image holding packages. [e.g. 1a2b3c4-packaging]" >&2
|
||||
echo -e "\\tDIST: The distribution." >&2
|
||||
echo -e "\\tARCH: The architecture." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
|
||||
PROJECT_ROOT="$( cd "${SCRIPTS_DIR}/.." && pwd )"
|
||||
|
||||
if [[ $# -ne 2 ]]; then
|
||||
if [[ $# -ne 1 ]]; then
|
||||
assert_usage "$@"
|
||||
fi
|
||||
|
||||
PACKAGE_IMAGE=$1
|
||||
DISTARCH=$2
|
||||
DIST=${DISTARCH%-*}
|
||||
ARCH=${DISTARCH##*-}
|
||||
|
||||
if [[ -z "${DIST}" || -z "${ARCH}" ]]; then
|
||||
echo "ERROR: Distro and Architecture must be specified." >&2
|
||||
assert_usage "$@"
|
||||
fi
|
||||
|
||||
# TODO: accept ARTIFACTS_DIR as a command-line argument
|
||||
: "${ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts"}"
|
||||
@@ -85,30 +77,30 @@ function copy-file() {
|
||||
|
||||
eval $(${SCRIPTS_DIR}/get-component-versions.sh)
|
||||
|
||||
# extract-all extracts all package for the specified dist-arch combination from the package image.
|
||||
# extract-all extracts all package for the specified distribution from the package image.
|
||||
# The manifest.txt file in the image is used to detemine the applicable files for the combination.
|
||||
# Files are extracted to ${ARTIFACTS_DIR}/artifacts/packages/${dist}/${arch}
|
||||
function extract-all() {
|
||||
local dist=$1
|
||||
local arch=$2
|
||||
|
||||
echo "Extracting packages for ${dist}-${arch} from ${PACKAGE_IMAGE}"
|
||||
|
||||
mkdir -p "${ARTIFACTS_DIR}"
|
||||
copy-file "${PACKAGE_IMAGE}" "/artifacts/manifest.txt" "${ARTIFACTS_DIR}/manifest.txt"
|
||||
echo "Extracting packages for ${dist} from ${PACKAGE_IMAGE}"
|
||||
|
||||
# Extract every file for the specified dist-arch combiniation in MANIFEST.txt
|
||||
grep "/${dist}/${arch}/" "${ARTIFACTS_DIR}/manifest.txt" | while read -r f ; do
|
||||
grep "/${dist}/" "${ARTIFACTS_DIR}/manifest.txt" | while read -r f ; do
|
||||
package_name="$(basename "$f")"
|
||||
# For release-candidates, we skip certain packages
|
||||
if skip-for-release-candidate "${package_name}"; then
|
||||
echo "Skipping $f for release-candidate ${VERSION}"
|
||||
continue
|
||||
fi
|
||||
target="${ARTIFACTS_DIR}/packages/${dist}/${arch}/${package_name}"
|
||||
target="${ARTIFACTS_DIR}/${f##/artifacts/}"
|
||||
mkdir -p "$(dirname "$target")"
|
||||
copy-file "${PACKAGE_IMAGE}" "${f}" "${target}"
|
||||
done
|
||||
}
|
||||
|
||||
extract-all "${DIST}" "${ARCH}"
|
||||
mkdir -p "${ARTIFACTS_DIR}"
|
||||
copy-file "${PACKAGE_IMAGE}" "/artifacts/manifest.txt" "${ARTIFACTS_DIR}/manifest.txt"
|
||||
|
||||
extract-all ubuntu18.04
|
||||
extract-all centos8
|
||||
|
||||
@@ -92,6 +92,6 @@ function sign() {
|
||||
cd -
|
||||
}
|
||||
|
||||
for target in "${TARGETS[@]}"; do
|
||||
for target in ${TARGETS[@]}; do
|
||||
sign "${target}" "$(pwd)"
|
||||
done
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
# Copyright (c) NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,10 +17,8 @@
|
||||
function assert_usage() {
|
||||
cat >&2 << EOF
|
||||
Incorrect arguments: $*
|
||||
$(basename "${BASH_SOURCE[0]}") DIST-ARCH ARTIFACTORY_URL
|
||||
DIST: The distribution.
|
||||
ARCH: The architecture.
|
||||
ARTIFACTORY_URL must contain repo path for package, including hostname.
|
||||
$(basename "${BASH_SOURCE[0]}") KITMAKER_ARTIFACTORY_REPO
|
||||
KITMAKER_ARTIFACTORY_REPO must contain repo path for package, including hostname.
|
||||
|
||||
Environment Variables
|
||||
ARTIFACTORY_TOKEN: must contain an auth token. [required]
|
||||
@@ -32,26 +30,20 @@ set -e
|
||||
|
||||
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
|
||||
PROJECT_ROOT="$( cd "${SCRIPTS_DIR}/.." && pwd )"
|
||||
COMPONENT_NAME="nvidia-container-toolkit"
|
||||
|
||||
if [[ $# -ne 2 ]]; then
|
||||
if [[ $# -ne 1 ]]; then
|
||||
assert_usage "$@"
|
||||
fi
|
||||
|
||||
source "${SCRIPTS_DIR}"/utils.sh
|
||||
|
||||
DISTARCH=$1
|
||||
DIST=${DISTARCH%-*}
|
||||
ARCH=${DISTARCH##*-}
|
||||
ARTIFACTORY_URL=$2
|
||||
# KITMAKER_ARTIFACTORY_REPO=https://urm.nvidia.com/artifactory/sw-gpu-cloudnative-generic-local/testing
|
||||
KITMAKER_ARTIFACTORY_REPO=$1
|
||||
|
||||
CURL=${CURL:-curl}
|
||||
|
||||
if [[ -z "${DIST}" || -z "${ARCH}" ]]; then
|
||||
echo "ERROR: Distro and Architecture must be specified." >&2
|
||||
assert_usage "$@"
|
||||
fi
|
||||
: ${CURL:=curl}
|
||||
|
||||
# ARTIFACTS_DIR represents the root of the artifacts (deb and rpm packages)
|
||||
# extracted from the packaging image.
|
||||
# TODO: accept ARTIFACTS_DIR as a command-line argument
|
||||
: "${ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts"}"
|
||||
|
||||
@@ -60,142 +52,69 @@ if [[ ! -d "${ARTIFACTS_DIR}" ]]; then
|
||||
assert_usage "$@"
|
||||
fi
|
||||
|
||||
if [[ ! -f "${ARTIFACTS_DIR}/manifest.txt" ]]; then
|
||||
echo "ERROR: Manifest file not found." >&2
|
||||
assert_usage "$@"
|
||||
fi
|
||||
|
||||
if [[ -z "${ARTIFACTORY_TOKEN}" ]]; then
|
||||
echo "ERROR: ARTIFACTORY_TOKEN must be defined." >&2
|
||||
assert_usage "$@"
|
||||
fi
|
||||
|
||||
# TODO: accept KITMACKER_DIR as a command-line argument
|
||||
# TODO: accept KITMAKER_DIR as a command-line argument
|
||||
: "${KITMAKER_DIR="${PROJECT_ROOT}/artifacts/kitmaker"}"
|
||||
|
||||
eval $(${SCRIPTS_DIR}/get-component-versions.sh)
|
||||
KITMAKER_SCRATCH="${KITMAKER_DIR}/.scratch"
|
||||
|
||||
# Returns the key=value property if the value isn't empty
|
||||
# Prepends with ";" if needed
|
||||
set_prop_value() {
|
||||
local key=$1
|
||||
local value=$2
|
||||
if [ -n "${value}" ]; then
|
||||
if [ -z "${PROPS}" ]; then
|
||||
echo "${key}=${value}"
|
||||
else
|
||||
echo ";${key}=${value}"
|
||||
fi
|
||||
fi
|
||||
# extract_info extracts the value of the specified variable from the manifest.txt file.
|
||||
function extract_info() {
|
||||
local variable=$1
|
||||
local value=$(cat "${ARTIFACTS_DIR}/manifest.txt" | grep "#${variable}" | sed -e "s/#${variable}=//" | tr -d '\r')
|
||||
echo $value
|
||||
}
|
||||
|
||||
process_props() {
|
||||
local dist=$1
|
||||
local arch=$2
|
||||
IMAGE_EPOCH=$(extract_info "IMAGE_EPOCH")
|
||||
# Note we use the main branch for the kitmaker archive.
|
||||
GIT_BRANCH=main
|
||||
GIT_COMMIT=$(extract_info "GIT_COMMIT")
|
||||
VERSION=$(extract_info "PACKAGE_VERSION")
|
||||
|
||||
PROPS+=$(set_prop_value "component_name" "${COMPONENT_NAME}")
|
||||
PROPS+=$(set_prop_value "version" "${VERSION}")
|
||||
PROPS+=$(set_prop_value "os" "${dist}")
|
||||
PROPS+=$(set_prop_value "arch" "${arch}")
|
||||
PROPS+=$(set_prop_value "platform" "${dist}-${arch}")
|
||||
# TODO: Use `git describe` to get this information if it's not available.
|
||||
PROPS+=$(set_prop_value "changelist" "${CI_COMMIT_SHA}")
|
||||
PROPS+=$(set_prop_value "branch" "${CI_COMMIT_REF_NAME}")
|
||||
|
||||
# Gitlab variables to expose
|
||||
for var in CI_PROJECT_ID CI_PIPELINE_ID CI_JOB_ID CI_JOB_URL CI_PROJECT_PATH; do
|
||||
if [ -n "${!var}" ]; then
|
||||
PROPS+=$(set_prop_value "${var}" "${!var}")
|
||||
fi
|
||||
done
|
||||
# add_distro adds the specified component, os, and arch to the .package folder from which a kitmaker archive is generated.
|
||||
function add_distro() {
|
||||
local component=$1
|
||||
local os=$2
|
||||
local arch=$3
|
||||
|
||||
echo "Applying properties: ${PROPS}"
|
||||
}
|
||||
local package_dist=$4
|
||||
local package_arch=$5
|
||||
|
||||
## NOT USED:
|
||||
## can substitute this function place of upload_file to modify properties of
|
||||
## existing file instead of uploading files.
|
||||
# Sets the properties on a path
|
||||
# Relies on global variables: ARTIFACTORY_TOKEN, ARTIFACTORY_URL
|
||||
set_props() {
|
||||
local dist="$1"
|
||||
local arch="$2"
|
||||
local kitmakerfilename="$3"
|
||||
local name="${component}-${os}-${arch}"
|
||||
|
||||
# extract the Artifactory hostname
|
||||
artifactory_host=$(echo "${ARTIFACTORY_URL##https://}" | awk -F'/' '{print $1}')
|
||||
local image_path="${ARTIFACTORY_URL#https://${artifactory_host}/}/${dist}/${arch}/${kitmakerfilename}"
|
||||
|
||||
local PROPS
|
||||
process_props "${DIST}" "${ARCH}"
|
||||
|
||||
echo "Setting ${image_path} with properties: ${PROPS}"
|
||||
if ! ${CURL} -fs -H "X-JFrog-Art-Api: ${ARTIFACTORY_TOKEN}" \
|
||||
-X PUT \
|
||||
"https://${artifactory_host}/artifactory/api/storage/${image_path}?properties=${PROPS}&recursive=0" ; then
|
||||
echo "ERROR: set props failed: ${image_path}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Uploads file to ARTIFACTS_DIR/<os>/<arch>/<filename>
|
||||
# Relies on global variables: DIST, ARCH, ARTIFACTORY_TOKEN, ARTIFACTORY_URL
|
||||
upload_file() {
|
||||
local dist=$1
|
||||
local arch=$2
|
||||
local file=$3
|
||||
|
||||
# extract the Artifactory hostname
|
||||
artifactory_host=$(echo "${ARTIFACTORY_URL##https://}" | awk -F'/' '{print $1}')
|
||||
# get base part of the ARTIFACTORY_URL without hostname
|
||||
local image_path="${ARTIFACTORY_URL#https://${artifactory_host}/}/${dist}/${arch}/$(basename ${file})"
|
||||
|
||||
local PROPS
|
||||
process_props "${dist}" "${arch}"
|
||||
|
||||
if [ ! -r "${file}" ]; then
|
||||
echo "ERROR: File not found or not readable: ${file}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Collect sum
|
||||
SHA1_SUM=$(sha1sum -b "${file}" | awk '{ print $1 }')
|
||||
|
||||
echo "Uploading ${image_path} from ${file}"
|
||||
if ! ${CURL} -f \
|
||||
-H "X-JFrog-Art-Api: ${ARTIFACTORY_TOKEN}" \
|
||||
-H "X-Checksum-Sha1: ${SHA1_SUM}" \
|
||||
${file:+-T ${file}} -X PUT \
|
||||
"https://${artifactory_host}/${image_path};${PROPS}" ;
|
||||
then
|
||||
echo "ERROR: upload file failed: ${file}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function push-kitmaker-artifactory() {
|
||||
local dist=$1
|
||||
local arch=$2
|
||||
local archive=$3
|
||||
|
||||
upload_file "${dist}" "${arch}" "${archive}"
|
||||
}
|
||||
|
||||
# kitmakerize-distro creates a tar.gz archive for the specified dist-arch combination.
|
||||
# The archive is created at ${KITMAKER_DIR}/${name}.tar.gz (where ${name} is the third positional argument)
|
||||
function kitmakerize-distro() {
|
||||
local dist="$1"
|
||||
local arch="$2"
|
||||
local archive="$3"
|
||||
|
||||
local name=$(basename "${archive%%.tar.gz}")
|
||||
## Copy packages into directory layout for .tar.gz
|
||||
# TODO: make scratch_dir configurable
|
||||
local scratch_dir="$(dirname ${archive})/.scratch/${name}"
|
||||
local packages_dir="${scratch_dir}/.packages/"
|
||||
local scratch_dir="${KITMAKER_SCRATCH}/${name}"
|
||||
local packages_dir="${scratch_dir}/.packages"
|
||||
|
||||
mkdir -p "${packages_dir}"
|
||||
|
||||
# Copy the extracted files to the .packages directory so that a kitmaker file can be created.
|
||||
source="${ARTIFACTS_DIR}/packages/${dist}/${arch}"
|
||||
source="${ARTIFACTS_DIR}/packages/${package_dist}/${package_arch}"
|
||||
cp -r "${source}/"* "${packages_dir}/"
|
||||
}
|
||||
|
||||
# create_archive creates a kitmaker archive for the specified component, os, and arch.
|
||||
function create_archive() {
|
||||
local component=$1
|
||||
local os=$2
|
||||
local arch=$3
|
||||
local version=$4
|
||||
|
||||
local name="${component}-${os}-${arch}"
|
||||
local archive="${KITMAKER_DIR}/${name}-${version}.tar.gz"
|
||||
|
||||
local scratch_dir="${KITMAKER_SCRATCH}/${name}"
|
||||
local packages_dir="${scratch_dir}/.packages/"
|
||||
|
||||
## Tar up the directory structure created above
|
||||
tar zcvf "${archive}" -C "${scratch_dir}/.." "${name}"
|
||||
echo "Created: ${archive}"
|
||||
ls -l "${archive}"
|
||||
@@ -209,8 +128,99 @@ function kitmakerize-distro() {
|
||||
rmdir "${scratch_dir}"
|
||||
}
|
||||
|
||||
: "${VERSION=$({NVIDIA_CONTAINER_TOOLKIT_PACKAGE_VERSION})}"
|
||||
kitmaker_name="${COMPONENT_NAME//-/_}-${DIST}-${ARCH}-${VERSION}"
|
||||
kitmaker_archive="${KITMAKER_DIR}/${kitmaker_name}.tar.gz"
|
||||
kitmakerize-distro "${DIST}" "${ARCH}" "${kitmaker_archive}"
|
||||
push-kitmaker-artifactory "${DIST}" "${ARCH}" "${kitmaker_archive}"
|
||||
function join_by { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
function optionally_add_property() {
|
||||
local property=$1
|
||||
local value=$2
|
||||
if [[ -n "${value}" ]]; then
|
||||
props+=("${property}=${value}")
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_archive() {
|
||||
local component=$1
|
||||
local os=$2
|
||||
local arch=$3
|
||||
local version=$4
|
||||
|
||||
local package_builds=$(join_by , ${@:5})
|
||||
|
||||
local name="${component}-${os}-${arch}"
|
||||
local archive="${KITMAKER_DIR}/${name}-${version}.tar.gz"
|
||||
|
||||
if [ ! -r "${archive}" ]; then
|
||||
echo "ERROR: File not found or not readable: ${archive}"
|
||||
exit 1
|
||||
fi
|
||||
local sha1_checksum=$(sha1sum -b "${archive}" | awk '{ print $1 }')
|
||||
|
||||
local upload_url="${KITMAKER_ARTIFACTORY_REPO}/${component}-${GIT_BRANCH}/default/$(basename ${archive})"
|
||||
|
||||
local props=()
|
||||
# Required KITMAKER properties:
|
||||
props+=("component_name=${component}")
|
||||
props+=("version=${version}")
|
||||
props+=("os=${os}")
|
||||
props+=("arch=${arch}")
|
||||
props+=("platform=${os}-${arch}")
|
||||
# TODO: extract the GIT commit from the packaging image
|
||||
props+=("changelist=${GIT_COMMIT}")
|
||||
props+=("branch=${GIT_BRANCH}")
|
||||
# Package properties:
|
||||
props+=("package.epoch=${IMAGE_EPOCH}")
|
||||
props+=("package.version=${VERSION}")
|
||||
optionally_add_property "package.builds" "${package_builds}"
|
||||
|
||||
for var in "CI_PROJECT_ID" "CI_PIPELINE_ID" "CI_JOB_ID" "CI_JOB_URL" "CI_PROJECT_PATH"; do
|
||||
if [ -n "${!var}" ]; then
|
||||
optionally_add_property "${var}" "${!var}"
|
||||
fi
|
||||
done
|
||||
local PROPS=$(join_by ";" "${props[@]}")
|
||||
|
||||
echo "Uploading ${upload_url} from ${file}"
|
||||
if ! ${CURL} -f \
|
||||
-H "X-JFrog-Art-Api: ${ARTIFACTORY_TOKEN}" \
|
||||
-H "X-Checksum-Sha1: ${sha1_checksum}" \
|
||||
${archive:+-T ${archive}} -X PUT \
|
||||
"${upload_url};${PROPS}" ;
|
||||
then
|
||||
echo "ERROR: upload file failed: ${archive}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
component="nvidia_container_toolkit"
|
||||
version="${VERSION%-rc.*}"
|
||||
version_suffix=$(date -r "${IMAGE_EPOCH}" '+%Y.%m.%d.%s' || date -d @"${IMAGE_EPOCH}" '+%Y.%m.%d.%s')
|
||||
kitmaker_version="${VERSION%-rc.*}.${version_suffix}"
|
||||
kitmaker_os="linux"
|
||||
|
||||
# create_and_upload creates a kitmaker archive for the specified component, os, and arch and uploads it.
|
||||
function create_and_upload() {
|
||||
local kitmaker_arch=$1
|
||||
local builds=${@:2}
|
||||
|
||||
for build in ${builds}; do
|
||||
local package_dist=$(echo ${build} | cut -d- -f1)
|
||||
local package_arch=$(echo ${build} | cut -d- -f2)
|
||||
|
||||
add_distro "${component}" "${kitmaker_os}" "${kitmaker_arch}" "${package_dist}" "${package_arch}"
|
||||
done
|
||||
|
||||
create_archive "${component}" "${kitmaker_os}" "${kitmaker_arch}" "${kitmaker_version}"
|
||||
upload_archive "${component}" "${kitmaker_os}" "${kitmaker_arch}" "${kitmaker_version}" ${builds}
|
||||
}
|
||||
|
||||
# Create archive for x86_64 linux distributions
|
||||
create_and_upload "x86_64" "ubuntu18.04-amd64" "centos8-x86_64"
|
||||
|
||||
# Create archive for sbsa linux distributions
|
||||
create_and_upload "sbsa" "ubuntu18.04-arm64" "centos8-aarch64"
|
||||
# Create archive for aarch64 linux distributions
|
||||
# NOTE: From the perspective of the NVIDIA Container Toolkit aarch64 is just a duplicate of sbsa
|
||||
create_and_upload "aarch64" "ubuntu18.04-arm64" "centos8-aarch64"
|
||||
|
||||
# Create archive for ppc64le linux distributions
|
||||
create_and_upload "ppc64le" "ubuntu18.04-ppc64le" "centos8-ppc64le"
|
||||
|
||||
@@ -152,18 +152,12 @@ function sync() {
|
||||
all=(
|
||||
amazonlinux2-aarch64
|
||||
amazonlinux2-x86_64
|
||||
centos7-ppc64le
|
||||
centos7-x86_64
|
||||
centos8-aarch64
|
||||
centos8-ppc64le
|
||||
centos8-x86_64
|
||||
debian10-amd64
|
||||
debian9-amd64
|
||||
fedora35-aarch64
|
||||
fedora35-x86_64
|
||||
opensuse-leap15.1-x86_64
|
||||
ubuntu16.04-amd64
|
||||
ubuntu16.04-ppc64le
|
||||
ubuntu18.04-amd64
|
||||
ubuntu18.04-arm64
|
||||
ubuntu18.04-ppc64le
|
||||
|
||||
@@ -61,6 +61,7 @@ testing::toolkit::install() {
|
||||
grep -q -E "^\s*ldconfig = \"@${nvidia_run_dir}/driver/sbin/ldconfig(.real)?\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
|
||||
grep -q -E "^\s*root = \"${nvidia_run_dir}/driver\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
|
||||
grep -q -E "^\s*path = \"/usr/local/nvidia/toolkit/nvidia-container-cli\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
|
||||
grep -q -E "^\s*path = \"/usr/local/nvidia/toolkit/nvidia-ctk\"" "${shared_dir}/usr/local/nvidia/toolkit/.config/nvidia-container-runtime/config.toml"
|
||||
}
|
||||
|
||||
testing::toolkit::delete() {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
WORKFLOW ?= nvidia-docker
|
||||
|
||||
DISTRIBUTIONS := ubuntu18.04 centos8 fedora35
|
||||
DISTRIBUTIONS := ubuntu20.04 ubuntu18.04 centos8 fedora35
|
||||
|
||||
IMAGE_TARGETS := $(patsubst %,image-%, $(DISTRIBUTIONS))
|
||||
RUN_TARGETS := $(patsubst %,run-%, $(DISTRIBUTIONS))
|
||||
@@ -33,6 +33,7 @@ $(IMAGE_TARGETS): image-%: $(DOCKERFILE)
|
||||
$(shell dirname $(DOCKERFILE))
|
||||
|
||||
|
||||
%-ubuntu20.04: ARCH ?= amd64
|
||||
%-ubuntu18.04: ARCH ?= amd64
|
||||
%-centos8: ARCH ?= x86_64
|
||||
%-fedora35: ARCH ?= x86_64
|
||||
|
||||
2
third_party/libnvidia-container
vendored
2
third_party/libnvidia-container
vendored
Submodule third_party/libnvidia-container updated: 505dedfee8...7440a1ead8
@@ -204,7 +204,12 @@ func Install(cli *cli.Context, opts *options) error {
|
||||
return fmt.Errorf("error installing NVIDIA container runtime hook: %v", err)
|
||||
}
|
||||
|
||||
err = installToolkitConfig(toolkitConfigPath, nvidiaContainerCliExecutable, opts)
|
||||
nvidiaCTKPath, err := installContainerToolkitCLI(opts.toolkitRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error installing NVIDIA Container Toolkit CLI: %v", err)
|
||||
}
|
||||
|
||||
err = installToolkitConfig(toolkitConfigPath, nvidiaContainerCliExecutable, nvidiaCTKPath, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error installing NVIDIA container toolkit config: %v", err)
|
||||
}
|
||||
@@ -262,7 +267,7 @@ func installLibrary(libName string, toolkitRoot string) error {
|
||||
|
||||
// installToolkitConfig installs the config file for the NVIDIA container toolkit ensuring
|
||||
// that the settings are updated to match the desired install and nvidia driver directories.
|
||||
func installToolkitConfig(toolkitConfigPath string, nvidiaContainerCliExecutablePath string, opts *options) error {
|
||||
func installToolkitConfig(toolkitConfigPath string, nvidiaContainerCliExecutablePath string, nvidiaCTKPath string, opts *options) error {
|
||||
log.Infof("Installing NVIDIA container toolkit config '%v'", toolkitConfigPath)
|
||||
|
||||
config, err := toml.LoadFile(nvidiaContainerToolkitConfigSource)
|
||||
@@ -311,6 +316,9 @@ func installToolkitConfig(toolkitConfigPath string, nvidiaContainerCliExecutable
|
||||
config.Set(key, value)
|
||||
}
|
||||
|
||||
// Set nvidia-ctk options
|
||||
config.Set("nvidia-ctk.path", nvidiaCTKPath)
|
||||
|
||||
_, err = config.WriteTo(targetConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing config: %v", err)
|
||||
@@ -322,6 +330,19 @@ func installToolkitConfig(toolkitConfigPath string, nvidiaContainerCliExecutable
|
||||
return nil
|
||||
}
|
||||
|
||||
// installContainerToolkitCLI installs the nvidia-ctk CLI executable and wrapper.
|
||||
func installContainerToolkitCLI(toolkitDir string) (string, error) {
|
||||
e := executable{
|
||||
source: "/usr/bin/nvidia-ctk",
|
||||
target: executableTarget{
|
||||
dotfileName: "nvidia-ctk.real",
|
||||
wrapperName: "nvidia-ctk",
|
||||
},
|
||||
}
|
||||
|
||||
return e.install(toolkitDir)
|
||||
}
|
||||
|
||||
// installContainerCLI sets up the NVIDIA container CLI executable, copying the executable
|
||||
// and implementing the required wrapper
|
||||
func installContainerCLI(toolkitRoot string) (string, error) {
|
||||
|
||||
21
vendor/github.com/blang/semver/.travis.yml
generated
vendored
21
vendor/github.com/blang/semver/.travis.yml
generated
vendored
@@ -1,21 +0,0 @@
|
||||
language: go
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.4.3
|
||||
- go: 1.5.4
|
||||
- go: 1.6.3
|
||||
- go: 1.7
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
|
||||
-repotoken $COVERALLS_TOKEN
|
||||
- echo "Build examples" ; cd examples && go build
|
||||
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
|
||||
env:
|
||||
global:
|
||||
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
|
||||
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
22
vendor/github.com/blang/semver/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
194
vendor/github.com/blang/semver/README.md
generated
vendored
194
vendor/github.com/blang/semver/README.md
generated
vendored
@@ -1,194 +0,0 @@
|
||||
semver for golang [](https://travis-ci.org/blang/semver) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
|
||||
======
|
||||
|
||||
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
||||
|
||||
Usage
|
||||
-----
|
||||
```bash
|
||||
$ go get github.com/blang/semver
|
||||
```
|
||||
Note: Always vendor your dependencies or fix on a specific version tag.
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
v1, err := semver.Make("1.0.0-beta")
|
||||
v2, err := semver.Make("2.0.0-beta")
|
||||
v1.Compare(v2)
|
||||
```
|
||||
|
||||
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
||||
|
||||
Why should I use this lib?
|
||||
-----
|
||||
|
||||
- Fully spec compatible
|
||||
- No reflection
|
||||
- No regex
|
||||
- Fully tested (Coverage >99%)
|
||||
- Readable parsing/validation errors
|
||||
- Fast (See [Benchmarks](#benchmarks))
|
||||
- Only Stdlib
|
||||
- Uses values instead of pointers
|
||||
- Many features, see below
|
||||
|
||||
|
||||
Features
|
||||
-----
|
||||
|
||||
- Parsing and validation at all levels
|
||||
- Comparator-like comparisons
|
||||
- Compare Helper Methods
|
||||
- InPlace manipulation
|
||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
||||
- Wildcards `>=1.x`, `<=2.5.x`
|
||||
- Sortable (implements sort.Interface)
|
||||
- database/sql compatible (sql.Scanner/Valuer)
|
||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||
|
||||
Ranges
|
||||
------
|
||||
|
||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
||||
|
||||
A condition is composed of an operator and a version. The supported operators are:
|
||||
|
||||
- `<1.0.0` Less than `1.0.0`
|
||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
||||
- `>1.0.0` Greater than `1.0.0`
|
||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
||||
|
||||
Note that spaces between the operator and the version will be gracefully tolerated.
|
||||
|
||||
A `Range` can link multiple `Ranges` separated by space:
|
||||
|
||||
Ranges can be linked by logical AND:
|
||||
|
||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
||||
|
||||
Ranges can also be linked by logical OR:
|
||||
|
||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
||||
|
||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
|
||||
Ranges can be combined by both AND and OR
|
||||
|
||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
|
||||
Range usage:
|
||||
|
||||
```
|
||||
v, err := semver.Parse("1.2.3")
|
||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
||||
if range(v) {
|
||||
//valid
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Example
|
||||
-----
|
||||
|
||||
Have a look at full examples in [examples/main.go](examples/main.go)
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
|
||||
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
||||
fmt.Printf("Major: %d\n", v.Major)
|
||||
fmt.Printf("Minor: %d\n", v.Minor)
|
||||
fmt.Printf("Patch: %d\n", v.Patch)
|
||||
fmt.Printf("Pre: %s\n", v.Pre)
|
||||
fmt.Printf("Build: %s\n", v.Build)
|
||||
|
||||
// Prerelease versions array
|
||||
if len(v.Pre) > 0 {
|
||||
fmt.Println("Prerelease versions:")
|
||||
for i, pre := range v.Pre {
|
||||
fmt.Printf("%d: %q\n", i, pre)
|
||||
}
|
||||
}
|
||||
|
||||
// Build meta data array
|
||||
if len(v.Build) > 0 {
|
||||
fmt.Println("Build meta data:")
|
||||
for i, build := range v.Build {
|
||||
fmt.Printf("%d: %q\n", i, build)
|
||||
}
|
||||
}
|
||||
|
||||
v001, err := semver.Make("0.0.1")
|
||||
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
||||
v001.GT(v) == true
|
||||
v.LT(v001) == true
|
||||
v.GTE(v) == true
|
||||
v.LTE(v) == true
|
||||
|
||||
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
||||
v001.Compare(v) == 1
|
||||
v.Compare(v001) == -1
|
||||
v.Compare(v) == 0
|
||||
|
||||
// Manipulate Version in place:
|
||||
v.Pre[0], err = semver.NewPRVersion("beta")
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing pre release version: %q", err)
|
||||
}
|
||||
|
||||
fmt.Println("\nValidate versions:")
|
||||
v.Build[0] = "?"
|
||||
|
||||
err = v.Validate()
|
||||
if err != nil {
|
||||
fmt.Printf("Validation failed: %s\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Benchmarks
|
||||
-----
|
||||
|
||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
||||
|
||||
See benchmark cases at [semver_test.go](semver_test.go)
|
||||
|
||||
|
||||
Motivation
|
||||
-----
|
||||
|
||||
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
||||
|
||||
|
||||
Contribution
|
||||
-----
|
||||
|
||||
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
||||
|
||||
|
||||
License
|
||||
-----
|
||||
|
||||
See [LICENSE](LICENSE) file.
|
||||
23
vendor/github.com/blang/semver/json.go
generated
vendored
23
vendor/github.com/blang/semver/json.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// MarshalJSON implements the encoding/json.Marshaler interface.
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
|
||||
func (v *Version) UnmarshalJSON(data []byte) (err error) {
|
||||
var versionString string
|
||||
|
||||
if err = json.Unmarshal(data, &versionString); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
*v, err = Parse(versionString)
|
||||
|
||||
return
|
||||
}
|
||||
17
vendor/github.com/blang/semver/package.json
generated
vendored
17
vendor/github.com/blang/semver/package.json
generated
vendored
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"author": "blang",
|
||||
"bugs": {
|
||||
"URL": "https://github.com/blang/semver/issues",
|
||||
"url": "https://github.com/blang/semver/issues"
|
||||
},
|
||||
"gx": {
|
||||
"dvcsimport": "github.com/blang/semver"
|
||||
},
|
||||
"gxVersion": "0.10.0",
|
||||
"language": "go",
|
||||
"license": "MIT",
|
||||
"name": "semver",
|
||||
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||
"version": "3.5.1"
|
||||
}
|
||||
|
||||
416
vendor/github.com/blang/semver/range.go
generated
vendored
416
vendor/github.com/blang/semver/range.go
generated
vendored
@@ -1,416 +0,0 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type wildcardType int
|
||||
|
||||
const (
|
||||
noneWildcard wildcardType = iota
|
||||
majorWildcard wildcardType = 1
|
||||
minorWildcard wildcardType = 2
|
||||
patchWildcard wildcardType = 3
|
||||
)
|
||||
|
||||
func wildcardTypefromInt(i int) wildcardType {
|
||||
switch i {
|
||||
case 1:
|
||||
return majorWildcard
|
||||
case 2:
|
||||
return minorWildcard
|
||||
case 3:
|
||||
return patchWildcard
|
||||
default:
|
||||
return noneWildcard
|
||||
}
|
||||
}
|
||||
|
||||
type comparator func(Version, Version) bool
|
||||
|
||||
var (
|
||||
compEQ comparator = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 0
|
||||
}
|
||||
compNE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) != 0
|
||||
}
|
||||
compGT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == 1
|
||||
}
|
||||
compGE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) >= 0
|
||||
}
|
||||
compLT = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) == -1
|
||||
}
|
||||
compLE = func(v1 Version, v2 Version) bool {
|
||||
return v1.Compare(v2) <= 0
|
||||
}
|
||||
)
|
||||
|
||||
type versionRange struct {
|
||||
v Version
|
||||
c comparator
|
||||
}
|
||||
|
||||
// rangeFunc creates a Range from the given versionRange.
|
||||
func (vr *versionRange) rangeFunc() Range {
|
||||
return Range(func(v Version) bool {
|
||||
return vr.c(v, vr.v)
|
||||
})
|
||||
}
|
||||
|
||||
// Range represents a range of versions.
|
||||
// A Range can be used to check if a Version satisfies it:
|
||||
//
|
||||
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
|
||||
// range(semver.MustParse("1.1.1") // returns true
|
||||
type Range func(Version) bool
|
||||
|
||||
// OR combines the existing Range with another Range using logical OR.
|
||||
func (rf Range) OR(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) || f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AND combines the existing Range with another Range using logical AND.
|
||||
func (rf Range) AND(f Range) Range {
|
||||
return Range(func(v Version) bool {
|
||||
return rf(v) && f(v)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseRange parses a range and returns a Range.
|
||||
// If the range could not be parsed an error is returned.
|
||||
//
|
||||
// Valid ranges are:
|
||||
// - "<1.0.0"
|
||||
// - "<=1.0.0"
|
||||
// - ">1.0.0"
|
||||
// - ">=1.0.0"
|
||||
// - "1.0.0", "=1.0.0", "==1.0.0"
|
||||
// - "!1.0.0", "!=1.0.0"
|
||||
//
|
||||
// A Range can consist of multiple ranges separated by space:
|
||||
// Ranges can be linked by logical AND:
|
||||
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
|
||||
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
|
||||
//
|
||||
// Ranges can also be linked by logical OR:
|
||||
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
|
||||
//
|
||||
// AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
//
|
||||
// Ranges can be combined by both AND and OR
|
||||
//
|
||||
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
func ParseRange(s string) (Range, error) {
|
||||
parts := splitAndTrim(s)
|
||||
orParts, err := splitORParts(parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expandedParts, err := expandWildcardVersion(orParts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var orFn Range
|
||||
for _, p := range expandedParts {
|
||||
var andFn Range
|
||||
for _, ap := range p {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vr, err := buildVersionRange(opStr, vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
|
||||
}
|
||||
rf := vr.rangeFunc()
|
||||
|
||||
// Set function
|
||||
if andFn == nil {
|
||||
andFn = rf
|
||||
} else { // Combine with existing function
|
||||
andFn = andFn.AND(rf)
|
||||
}
|
||||
}
|
||||
if orFn == nil {
|
||||
orFn = andFn
|
||||
} else {
|
||||
orFn = orFn.OR(andFn)
|
||||
}
|
||||
|
||||
}
|
||||
return orFn, nil
|
||||
}
|
||||
|
||||
// splitORParts splits the already cleaned parts by '||'.
|
||||
// Checks for invalid positions of the operator and returns an
|
||||
// error if found.
|
||||
func splitORParts(parts []string) ([][]string, error) {
|
||||
var ORparts [][]string
|
||||
last := 0
|
||||
for i, p := range parts {
|
||||
if p == "||" {
|
||||
if i == 0 {
|
||||
return nil, fmt.Errorf("First element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:i])
|
||||
last = i + 1
|
||||
}
|
||||
}
|
||||
if last == len(parts) {
|
||||
return nil, fmt.Errorf("Last element in range is '||'")
|
||||
}
|
||||
ORparts = append(ORparts, parts[last:])
|
||||
return ORparts, nil
|
||||
}
|
||||
|
||||
// buildVersionRange takes a slice of 2: operator and version
|
||||
// and builds a versionRange, otherwise an error.
|
||||
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
|
||||
c := parseComparator(opStr)
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
|
||||
}
|
||||
v, err := Parse(vStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
|
||||
}
|
||||
|
||||
return &versionRange{
|
||||
v: v,
|
||||
c: c,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// inArray checks if a byte is contained in an array of bytes
|
||||
func inArray(s byte, list []byte) bool {
|
||||
for _, el := range list {
|
||||
if el == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// splitAndTrim splits a range string by spaces and cleans whitespaces
|
||||
func splitAndTrim(s string) (result []string) {
|
||||
last := 0
|
||||
var lastChar byte
|
||||
excludeFromSplit := []byte{'>', '<', '='}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) {
|
||||
if last < i-1 {
|
||||
result = append(result, s[last:i])
|
||||
}
|
||||
last = i + 1
|
||||
} else if s[i] != ' ' {
|
||||
lastChar = s[i]
|
||||
}
|
||||
}
|
||||
if last < len(s)-1 {
|
||||
result = append(result, s[last:])
|
||||
}
|
||||
|
||||
for i, v := range result {
|
||||
result[i] = strings.Replace(v, " ", "", -1)
|
||||
}
|
||||
|
||||
// parts := strings.Split(s, " ")
|
||||
// for _, x := range parts {
|
||||
// if s := strings.TrimSpace(x); len(s) != 0 {
|
||||
// result = append(result, s)
|
||||
// }
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
// splitComparatorVersion splits the comparator from the version.
|
||||
// Input must be free of leading or trailing spaces.
|
||||
func splitComparatorVersion(s string) (string, string, error) {
|
||||
i := strings.IndexFunc(s, unicode.IsDigit)
|
||||
if i == -1 {
|
||||
return "", "", fmt.Errorf("Could not get version from string: %q", s)
|
||||
}
|
||||
return strings.TrimSpace(s[0:i]), s[i:], nil
|
||||
}
|
||||
|
||||
// getWildcardType will return the type of wildcard that the
|
||||
// passed version contains
|
||||
func getWildcardType(vStr string) wildcardType {
|
||||
parts := strings.Split(vStr, ".")
|
||||
nparts := len(parts)
|
||||
wildcard := parts[nparts-1]
|
||||
|
||||
possibleWildcardType := wildcardTypefromInt(nparts)
|
||||
if wildcard == "x" {
|
||||
return possibleWildcardType
|
||||
}
|
||||
|
||||
return noneWildcard
|
||||
}
|
||||
|
||||
// createVersionFromWildcard will convert a wildcard version
|
||||
// into a regular version, replacing 'x's with '0's, handling
|
||||
// special cases like '1.x.x' and '1.x'
|
||||
func createVersionFromWildcard(vStr string) string {
|
||||
// handle 1.x.x
|
||||
vStr2 := strings.Replace(vStr, ".x.x", ".x", 1)
|
||||
vStr2 = strings.Replace(vStr2, ".x", ".0", 1)
|
||||
parts := strings.Split(vStr2, ".")
|
||||
|
||||
// handle 1.x
|
||||
if len(parts) == 2 {
|
||||
return vStr2 + ".0"
|
||||
}
|
||||
|
||||
return vStr2
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the major version
|
||||
// of the passed version
|
||||
func incrementMajorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[0] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// incrementMajorVersion will increment the minor version
|
||||
// of the passed version
|
||||
func incrementMinorVersion(vStr string) (string, error) {
|
||||
parts := strings.Split(vStr, ".")
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
parts[1] = strconv.Itoa(i + 1)
|
||||
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// expandWildcardVersion will expand wildcards inside versions
|
||||
// following these rules:
|
||||
//
|
||||
// * when dealing with patch wildcards:
|
||||
// >= 1.2.x will become >= 1.2.0
|
||||
// <= 1.2.x will become < 1.3.0
|
||||
// > 1.2.x will become >= 1.3.0
|
||||
// < 1.2.x will become < 1.2.0
|
||||
// != 1.2.x will become < 1.2.0 >= 1.3.0
|
||||
//
|
||||
// * when dealing with minor wildcards:
|
||||
// >= 1.x will become >= 1.0.0
|
||||
// <= 1.x will become < 2.0.0
|
||||
// > 1.x will become >= 2.0.0
|
||||
// < 1.0 will become < 1.0.0
|
||||
// != 1.x will become < 1.0.0 >= 2.0.0
|
||||
//
|
||||
// * when dealing with wildcards without
|
||||
// version operator:
|
||||
// 1.2.x will become >= 1.2.0 < 1.3.0
|
||||
// 1.x will become >= 1.0.0 < 2.0.0
|
||||
func expandWildcardVersion(parts [][]string) ([][]string, error) {
|
||||
var expandedParts [][]string
|
||||
for _, p := range parts {
|
||||
var newParts []string
|
||||
for _, ap := range p {
|
||||
if strings.Index(ap, "x") != -1 {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionWildcardType := getWildcardType(vStr)
|
||||
flatVersion := createVersionFromWildcard(vStr)
|
||||
|
||||
var resultOperator string
|
||||
var shouldIncrementVersion bool
|
||||
switch opStr {
|
||||
case ">":
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
case ">=":
|
||||
resultOperator = ">="
|
||||
case "<":
|
||||
resultOperator = "<"
|
||||
case "<=":
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "", "=", "==":
|
||||
newParts = append(newParts, ">="+flatVersion)
|
||||
resultOperator = "<"
|
||||
shouldIncrementVersion = true
|
||||
case "!=", "!":
|
||||
newParts = append(newParts, "<"+flatVersion)
|
||||
resultOperator = ">="
|
||||
shouldIncrementVersion = true
|
||||
}
|
||||
|
||||
var resultVersion string
|
||||
if shouldIncrementVersion {
|
||||
switch versionWildcardType {
|
||||
case patchWildcard:
|
||||
resultVersion, _ = incrementMinorVersion(flatVersion)
|
||||
case minorWildcard:
|
||||
resultVersion, _ = incrementMajorVersion(flatVersion)
|
||||
}
|
||||
} else {
|
||||
resultVersion = flatVersion
|
||||
}
|
||||
|
||||
ap = resultOperator + resultVersion
|
||||
}
|
||||
newParts = append(newParts, ap)
|
||||
}
|
||||
expandedParts = append(expandedParts, newParts)
|
||||
}
|
||||
|
||||
return expandedParts, nil
|
||||
}
|
||||
|
||||
func parseComparator(s string) comparator {
|
||||
switch s {
|
||||
case "==":
|
||||
fallthrough
|
||||
case "":
|
||||
fallthrough
|
||||
case "=":
|
||||
return compEQ
|
||||
case ">":
|
||||
return compGT
|
||||
case ">=":
|
||||
return compGE
|
||||
case "<":
|
||||
return compLT
|
||||
case "<=":
|
||||
return compLE
|
||||
case "!":
|
||||
fallthrough
|
||||
case "!=":
|
||||
return compNE
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
|
||||
func MustParseRange(s string) Range {
|
||||
r, err := ParseRange(s)
|
||||
if err != nil {
|
||||
panic(`semver: ParseRange(` + s + `): ` + err.Error())
|
||||
}
|
||||
return r
|
||||
}
|
||||
418
vendor/github.com/blang/semver/semver.go
generated
vendored
418
vendor/github.com/blang/semver/semver.go
generated
vendored
@@ -1,418 +0,0 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
numbers string = "0123456789"
|
||||
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||
alphanum = alphas + numbers
|
||||
)
|
||||
|
||||
// SpecVersion is the latest fully supported spec version of semver
|
||||
var SpecVersion = Version{
|
||||
Major: 2,
|
||||
Minor: 0,
|
||||
Patch: 0,
|
||||
}
|
||||
|
||||
// Version represents a semver compatible version
|
||||
type Version struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
Patch uint64
|
||||
Pre []PRVersion
|
||||
Build []string //No Precendence
|
||||
}
|
||||
|
||||
// Version to string
|
||||
func (v Version) String() string {
|
||||
b := make([]byte, 0, 5)
|
||||
b = strconv.AppendUint(b, v.Major, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Minor, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Patch, 10)
|
||||
|
||||
if len(v.Pre) > 0 {
|
||||
b = append(b, '-')
|
||||
b = append(b, v.Pre[0].String()...)
|
||||
|
||||
for _, pre := range v.Pre[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, pre.String()...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Build) > 0 {
|
||||
b = append(b, '+')
|
||||
b = append(b, v.Build[0]...)
|
||||
|
||||
for _, build := range v.Build[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, build...)
|
||||
}
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Equals checks if v is equal to o.
|
||||
func (v Version) Equals(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// EQ checks if v is equal to o.
|
||||
func (v Version) EQ(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// NE checks if v is not equal to o.
|
||||
func (v Version) NE(o Version) bool {
|
||||
return (v.Compare(o) != 0)
|
||||
}
|
||||
|
||||
// GT checks if v is greater than o.
|
||||
func (v Version) GT(o Version) bool {
|
||||
return (v.Compare(o) == 1)
|
||||
}
|
||||
|
||||
// GTE checks if v is greater than or equal to o.
|
||||
func (v Version) GTE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// GE checks if v is greater than or equal to o.
|
||||
func (v Version) GE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// LT checks if v is less than o.
|
||||
func (v Version) LT(o Version) bool {
|
||||
return (v.Compare(o) == -1)
|
||||
}
|
||||
|
||||
// LTE checks if v is less than or equal to o.
|
||||
func (v Version) LTE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// LE checks if v is less than or equal to o.
|
||||
func (v Version) LE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// Compare compares Versions v to o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v Version) Compare(o Version) int {
|
||||
if v.Major != o.Major {
|
||||
if v.Major > o.Major {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Minor != o.Minor {
|
||||
if v.Minor > o.Minor {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Patch != o.Patch {
|
||||
if v.Patch > o.Patch {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Quick comparison if a version has no prerelease versions
|
||||
if len(v.Pre) == 0 && len(o.Pre) == 0 {
|
||||
return 0
|
||||
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
|
||||
return 1
|
||||
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
|
||||
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
|
||||
continue
|
||||
} else if comp == 1 {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// If all pr versions are the equal but one has further prversion, this one greater
|
||||
if i == len(v.Pre) && i == len(o.Pre) {
|
||||
return 0
|
||||
} else if i == len(v.Pre) && i < len(o.Pre) {
|
||||
return -1
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Validate validates v and returns error in case
|
||||
func (v Version) Validate() error {
|
||||
// Major, Minor, Patch already validated using uint64
|
||||
|
||||
for _, pre := range v.Pre {
|
||||
if !pre.IsNum { //Numeric prerelease versions already uint64
|
||||
if len(pre.VersionStr) == 0 {
|
||||
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
|
||||
}
|
||||
if !containsOnly(pre.VersionStr, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, build := range v.Build {
|
||||
if len(build) == 0 {
|
||||
return fmt.Errorf("Build meta data can not be empty %q", build)
|
||||
}
|
||||
if !containsOnly(build, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
||||
func New(s string) (vp *Version, err error) {
|
||||
v, err := Parse(s)
|
||||
vp = &v
|
||||
return
|
||||
}
|
||||
|
||||
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
||||
func Make(s string) (Version, error) {
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
|
||||
// specs to be parsed by this library. It does so by normalizing versions before passing them to
|
||||
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
|
||||
// with only major and minor components specified
|
||||
func ParseTolerant(s string) (Version, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
s = strings.TrimPrefix(s, "v")
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) < 3 {
|
||||
if strings.ContainsAny(parts[len(parts)-1], "+-") {
|
||||
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
|
||||
}
|
||||
for len(parts) < 3 {
|
||||
parts = append(parts, "0")
|
||||
}
|
||||
s = strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses version string and returns a validated Version or error
|
||||
func Parse(s string) (Version, error) {
|
||||
if len(s) == 0 {
|
||||
return Version{}, errors.New("Version string empty")
|
||||
}
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) != 3 {
|
||||
return Version{}, errors.New("No Major.Minor.Patch elements found")
|
||||
}
|
||||
|
||||
// Major
|
||||
if !containsOnly(parts[0], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
|
||||
}
|
||||
if hasLeadingZeroes(parts[0]) {
|
||||
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
|
||||
}
|
||||
major, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
// Minor
|
||||
if !containsOnly(parts[1], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
|
||||
}
|
||||
if hasLeadingZeroes(parts[1]) {
|
||||
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
|
||||
}
|
||||
minor, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v := Version{}
|
||||
v.Major = major
|
||||
v.Minor = minor
|
||||
|
||||
var build, prerelease []string
|
||||
patchStr := parts[2]
|
||||
|
||||
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
|
||||
build = strings.Split(patchStr[buildIndex+1:], ".")
|
||||
patchStr = patchStr[:buildIndex]
|
||||
}
|
||||
|
||||
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
|
||||
prerelease = strings.Split(patchStr[preIndex+1:], ".")
|
||||
patchStr = patchStr[:preIndex]
|
||||
}
|
||||
|
||||
if !containsOnly(patchStr, numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
|
||||
}
|
||||
if hasLeadingZeroes(patchStr) {
|
||||
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
|
||||
}
|
||||
patch, err := strconv.ParseUint(patchStr, 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v.Patch = patch
|
||||
|
||||
// Prerelease
|
||||
for _, prstr := range prerelease {
|
||||
parsedPR, err := NewPRVersion(prstr)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
v.Pre = append(v.Pre, parsedPR)
|
||||
}
|
||||
|
||||
// Build meta data
|
||||
for _, str := range build {
|
||||
if len(str) == 0 {
|
||||
return Version{}, errors.New("Build meta data is empty")
|
||||
}
|
||||
if !containsOnly(str, alphanum) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
|
||||
}
|
||||
v.Build = append(v.Build, str)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the version cannot be parsed.
|
||||
func MustParse(s string) Version {
|
||||
v, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`semver: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// PRVersion represents a PreRelease Version
|
||||
type PRVersion struct {
|
||||
VersionStr string
|
||||
VersionNum uint64
|
||||
IsNum bool
|
||||
}
|
||||
|
||||
// NewPRVersion creates a new valid prerelease version
|
||||
func NewPRVersion(s string) (PRVersion, error) {
|
||||
if len(s) == 0 {
|
||||
return PRVersion{}, errors.New("Prerelease is empty")
|
||||
}
|
||||
v := PRVersion{}
|
||||
if containsOnly(s, numbers) {
|
||||
if hasLeadingZeroes(s) {
|
||||
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
|
||||
}
|
||||
num, err := strconv.ParseUint(s, 10, 64)
|
||||
|
||||
// Might never be hit, but just in case
|
||||
if err != nil {
|
||||
return PRVersion{}, err
|
||||
}
|
||||
v.VersionNum = num
|
||||
v.IsNum = true
|
||||
} else if containsOnly(s, alphanum) {
|
||||
v.VersionStr = s
|
||||
v.IsNum = false
|
||||
} else {
|
||||
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IsNumeric checks if prerelease-version is numeric
|
||||
func (v PRVersion) IsNumeric() bool {
|
||||
return v.IsNum
|
||||
}
|
||||
|
||||
// Compare compares two PreRelease Versions v and o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v PRVersion) Compare(o PRVersion) int {
|
||||
if v.IsNum && !o.IsNum {
|
||||
return -1
|
||||
} else if !v.IsNum && o.IsNum {
|
||||
return 1
|
||||
} else if v.IsNum && o.IsNum {
|
||||
if v.VersionNum == o.VersionNum {
|
||||
return 0
|
||||
} else if v.VersionNum > o.VersionNum {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
} else { // both are Alphas
|
||||
if v.VersionStr == o.VersionStr {
|
||||
return 0
|
||||
} else if v.VersionStr > o.VersionStr {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PreRelease version to string
|
||||
func (v PRVersion) String() string {
|
||||
if v.IsNum {
|
||||
return strconv.FormatUint(v.VersionNum, 10)
|
||||
}
|
||||
return v.VersionStr
|
||||
}
|
||||
|
||||
func containsOnly(s string, set string) bool {
|
||||
return strings.IndexFunc(s, func(r rune) bool {
|
||||
return !strings.ContainsRune(set, r)
|
||||
}) == -1
|
||||
}
|
||||
|
||||
func hasLeadingZeroes(s string) bool {
|
||||
return len(s) > 1 && s[0] == '0'
|
||||
}
|
||||
|
||||
// NewBuildVersion creates a new valid build version
|
||||
func NewBuildVersion(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return "", errors.New("Buildversion is empty")
|
||||
}
|
||||
if !containsOnly(s, alphanum) {
|
||||
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
28
vendor/github.com/blang/semver/sort.go
generated
vendored
28
vendor/github.com/blang/semver/sort.go
generated
vendored
@@ -1,28 +0,0 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Versions represents multiple versions.
|
||||
type Versions []Version
|
||||
|
||||
// Len returns length of version collection
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps two versions inside the collection by its indices
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less checks if version at index i is less than version at index j
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LT(s[j])
|
||||
}
|
||||
|
||||
// Sort sorts a slice of versions
|
||||
func Sort(versions []Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
||||
30
vendor/github.com/blang/semver/sql.go
generated
vendored
30
vendor/github.com/blang/semver/sql.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
package semver
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements the database/sql.Scanner interface.
|
||||
func (v *Version) Scan(src interface{}) (err error) {
|
||||
var str string
|
||||
switch src := src.(type) {
|
||||
case string:
|
||||
str = src
|
||||
case []byte:
|
||||
str = string(src)
|
||||
default:
|
||||
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
||||
}
|
||||
|
||||
if t, err := Parse(str); err == nil {
|
||||
*v = t
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the database/sql/driver.Valuer interface.
|
||||
func (v Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
||||
82
vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go
generated
vendored
Normal file
82
vendor/github.com/container-orchestrated-devices/container-device-interface/internal/multierror/multierror.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright © 2022 The CDI Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// New combines several errors into a single error. Parameters that are nil are
|
||||
// ignored. If no errors are passed in or all parameters are nil, then the
|
||||
// result is also nil.
|
||||
func New(errors ...error) error {
|
||||
// Filter out nil entries.
|
||||
numErrors := 0
|
||||
for _, err := range errors {
|
||||
if err != nil {
|
||||
errors[numErrors] = err
|
||||
numErrors++
|
||||
}
|
||||
}
|
||||
if numErrors == 0 {
|
||||
return nil
|
||||
}
|
||||
return multiError(errors[0:numErrors])
|
||||
}
|
||||
|
||||
// multiError is the underlying implementation used by New.
|
||||
//
|
||||
// Beware that a null multiError is not the same as a nil error.
|
||||
type multiError []error
|
||||
|
||||
// multiError returns all individual error strings concatenated with "\n"
|
||||
func (e multiError) Error() string {
|
||||
var builder strings.Builder
|
||||
for i, err := range e {
|
||||
if i > 0 {
|
||||
_, _ = builder.WriteString("\n")
|
||||
}
|
||||
_, _ = builder.WriteString(err.Error())
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// Append returns a new multi error all errors concatenated. Errors that are
|
||||
// multi errors get flattened, nil is ignored.
|
||||
func Append(err error, errors ...error) error {
|
||||
var result multiError
|
||||
if m, ok := err.(multiError); ok {
|
||||
result = m
|
||||
} else if err != nil {
|
||||
result = append(result, err)
|
||||
}
|
||||
|
||||
for _, e := range errors {
|
||||
if e == nil {
|
||||
continue
|
||||
}
|
||||
if m, ok := e.(multiError); ok {
|
||||
result = append(result, m...)
|
||||
} else {
|
||||
result = append(result, e)
|
||||
}
|
||||
}
|
||||
if len(result) == 0 {
|
||||
return nil
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -17,9 +17,9 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,14 +34,14 @@ const (
|
||||
func UpdateAnnotations(annotations map[string]string, plugin string, deviceID string, devices []string) (map[string]string, error) {
|
||||
key, err := AnnotationKey(plugin, deviceID)
|
||||
if err != nil {
|
||||
return annotations, errors.Wrap(err, "CDI annotation failed")
|
||||
return annotations, fmt.Errorf("CDI annotation failed: %w", err)
|
||||
}
|
||||
if _, ok := annotations[key]; ok {
|
||||
return annotations, errors.Errorf("CDI annotation failed, key %q used", key)
|
||||
return annotations, fmt.Errorf("CDI annotation failed, key %q used", key)
|
||||
}
|
||||
value, err := AnnotationValue(devices)
|
||||
if err != nil {
|
||||
return annotations, errors.Wrap(err, "CDI annotation failed")
|
||||
return annotations, fmt.Errorf("CDI annotation failed: %w", err)
|
||||
}
|
||||
|
||||
if annotations == nil {
|
||||
@@ -70,7 +70,7 @@ func ParseAnnotations(annotations map[string]string) ([]string, []string, error)
|
||||
}
|
||||
for _, d := range strings.Split(value, ",") {
|
||||
if !IsQualifiedName(d) {
|
||||
return nil, nil, errors.Errorf("invalid CDI device name %q", d)
|
||||
return nil, nil, fmt.Errorf("invalid CDI device name %q", d)
|
||||
}
|
||||
devices = append(devices, d)
|
||||
}
|
||||
@@ -98,11 +98,11 @@ func AnnotationKey(pluginName, deviceID string) (string, error) {
|
||||
name := pluginName + "_" + strings.ReplaceAll(deviceID, "/", "_")
|
||||
|
||||
if len(name) > maxNameLen {
|
||||
return "", errors.Errorf("invalid plugin+deviceID %q, too long", name)
|
||||
return "", fmt.Errorf("invalid plugin+deviceID %q, too long", name)
|
||||
}
|
||||
|
||||
if c := rune(name[0]); !isAlphaNumeric(c) {
|
||||
return "", errors.Errorf("invalid name %q, first '%c' should be alphanumeric",
|
||||
return "", fmt.Errorf("invalid name %q, first '%c' should be alphanumeric",
|
||||
name, c)
|
||||
}
|
||||
if len(name) > 2 {
|
||||
@@ -111,13 +111,13 @@ func AnnotationKey(pluginName, deviceID string) (string, error) {
|
||||
case isAlphaNumeric(c):
|
||||
case c == '_' || c == '-' || c == '.':
|
||||
default:
|
||||
return "", errors.Errorf("invalid name %q, invalid charcter '%c'",
|
||||
return "", fmt.Errorf("invalid name %q, invalid charcter '%c'",
|
||||
name, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
if c := rune(name[len(name)-1]); !isAlphaNumeric(c) {
|
||||
return "", errors.Errorf("invalid name %q, last '%c' should be alphanumeric",
|
||||
return "", fmt.Errorf("invalid name %q, last '%c' should be alphanumeric",
|
||||
name, c)
|
||||
}
|
||||
|
||||
|
||||
106
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go
generated
vendored
106
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go
generated
vendored
@@ -17,16 +17,19 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/container-orchestrated-devices/container-device-interface/internal/multierror"
|
||||
cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
oci "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Option is an option to change some aspect of default CDI behavior.
|
||||
@@ -93,7 +96,7 @@ func (c *Cache) configure(options ...Option) error {
|
||||
|
||||
for _, o := range options {
|
||||
if err = o(c); err != nil {
|
||||
return errors.Wrapf(err, "failed to apply cache options")
|
||||
return fmt.Errorf("failed to apply cache options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,8 +126,8 @@ func (c *Cache) Refresh() error {
|
||||
|
||||
// collect and return cached errors, much like refresh() does it
|
||||
var result error
|
||||
for _, err := range c.errors {
|
||||
result = multierror.Append(result, err...)
|
||||
for _, errors := range c.errors {
|
||||
result = multierror.Append(result, errors...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -155,7 +158,7 @@ func (c *Cache) refresh() error {
|
||||
return false
|
||||
case devPrio == oldPrio:
|
||||
devPath, oldPath := devSpec.GetPath(), oldSpec.GetPath()
|
||||
collectError(errors.Errorf("conflicting device %q (specs %q, %q)",
|
||||
collectError(fmt.Errorf("conflicting device %q (specs %q, %q)",
|
||||
name, devPath, oldPath), devPath, oldPath)
|
||||
conflicts[name] = struct{}{}
|
||||
}
|
||||
@@ -165,7 +168,7 @@ func (c *Cache) refresh() error {
|
||||
_ = scanSpecDirs(c.specDirs, func(path string, priority int, spec *Spec, err error) error {
|
||||
path = filepath.Clean(path)
|
||||
if err != nil {
|
||||
collectError(errors.Wrapf(err, "failed to load CDI Spec"), path)
|
||||
collectError(fmt.Errorf("failed to load CDI Spec %w", err), path)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -194,11 +197,7 @@ func (c *Cache) refresh() error {
|
||||
c.devices = devices
|
||||
c.errors = specErrors
|
||||
|
||||
if len(result) > 0 {
|
||||
return multierror.Append(nil, result...)
|
||||
}
|
||||
|
||||
return nil
|
||||
return multierror.New(result...)
|
||||
}
|
||||
|
||||
// RefreshIfRequired triggers a refresh if necessary.
|
||||
@@ -219,7 +218,7 @@ func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, e
|
||||
var unresolved []string
|
||||
|
||||
if ociSpec == nil {
|
||||
return devices, errors.Errorf("can't inject devices, nil OCI Spec")
|
||||
return devices, fmt.Errorf("can't inject devices, nil OCI Spec")
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
@@ -244,22 +243,33 @@ func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, e
|
||||
}
|
||||
|
||||
if unresolved != nil {
|
||||
return unresolved, errors.Errorf("unresolvable CDI devices %s",
|
||||
return unresolved, fmt.Errorf("unresolvable CDI devices %s",
|
||||
strings.Join(devices, ", "))
|
||||
}
|
||||
|
||||
if err := edits.Apply(ociSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to inject devices")
|
||||
return nil, fmt.Errorf("failed to inject devices: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// WriteSpec writes a Spec file with the given content. Priority is used
|
||||
// as an index into the list of Spec directories to pick a directory for
|
||||
// the file, adjusting for any under- or overflows. If name has a "json"
|
||||
// or "yaml" extension it choses the encoding. Otherwise JSON encoding
|
||||
// is used with a "json" extension.
|
||||
// highestPrioritySpecDir returns the Spec directory with highest priority
|
||||
// and its priority.
|
||||
func (c *Cache) highestPrioritySpecDir() (string, int) {
|
||||
if len(c.specDirs) == 0 {
|
||||
return "", -1
|
||||
}
|
||||
|
||||
prio := len(c.specDirs) - 1
|
||||
dir := c.specDirs[prio]
|
||||
|
||||
return dir, prio
|
||||
}
|
||||
|
||||
// WriteSpec writes a Spec file with the given content into the highest
|
||||
// priority Spec directory. If name has a "json" or "yaml" extension it
|
||||
// choses the encoding. Otherwise the default YAML encoding is used.
|
||||
func (c *Cache) WriteSpec(raw *cdi.Spec, name string) error {
|
||||
var (
|
||||
specDir string
|
||||
@@ -269,23 +279,51 @@ func (c *Cache) WriteSpec(raw *cdi.Spec, name string) error {
|
||||
err error
|
||||
)
|
||||
|
||||
if len(c.specDirs) == 0 {
|
||||
specDir, prio = c.highestPrioritySpecDir()
|
||||
if specDir == "" {
|
||||
return errors.New("no Spec directories to write to")
|
||||
}
|
||||
|
||||
prio = len(c.specDirs) - 1
|
||||
specDir = c.specDirs[prio]
|
||||
path = filepath.Join(specDir, name)
|
||||
if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" {
|
||||
path += ".json"
|
||||
path += defaultSpecExt
|
||||
}
|
||||
|
||||
spec, err = NewSpec(raw, path, prio)
|
||||
spec, err = newSpec(raw, path, prio)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.Write(true)
|
||||
return spec.write(true)
|
||||
}
|
||||
|
||||
// RemoveSpec removes a Spec with the given name from the highest
|
||||
// priority Spec directory. This function can be used to remove a
|
||||
// Spec previously written by WriteSpec(). If the file exists and
|
||||
// its removal fails RemoveSpec returns an error.
|
||||
func (c *Cache) RemoveSpec(name string) error {
|
||||
var (
|
||||
specDir string
|
||||
path string
|
||||
err error
|
||||
)
|
||||
|
||||
specDir, _ = c.highestPrioritySpecDir()
|
||||
if specDir == "" {
|
||||
return errors.New("no Spec directories to remove from")
|
||||
}
|
||||
|
||||
path = filepath.Join(specDir, name)
|
||||
if ext := filepath.Ext(path); ext != ".json" && ext != ".yaml" {
|
||||
path += defaultSpecExt
|
||||
}
|
||||
|
||||
err = os.Remove(path)
|
||||
if err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDevice returns the cached device for the given qualified name.
|
||||
@@ -370,7 +408,17 @@ func (c *Cache) GetVendorSpecs(vendor string) []*Spec {
|
||||
// GetSpecErrors returns all errors encountered for the spec during the
|
||||
// last cache refresh.
|
||||
func (c *Cache) GetSpecErrors(spec *Spec) []error {
|
||||
return c.errors[spec.GetPath()]
|
||||
var errors []error
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if errs, ok := c.errors[spec.GetPath()]; ok {
|
||||
errors = make([]error, len(errs))
|
||||
copy(errors, errs)
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// GetErrors returns all errors encountered during the last
|
||||
@@ -436,7 +484,7 @@ func (w *watch) setup(dirs []string, dirErrors map[string]error) {
|
||||
w.watcher, err = fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
for _, dir := range dirs {
|
||||
dirErrors[dir] = errors.Wrap(err, "failed to create watcher")
|
||||
dirErrors[dir] = fmt.Errorf("failed to create watcher: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -519,7 +567,7 @@ func (w *watch) update(dirErrors map[string]error, removed ...string) bool {
|
||||
update = true
|
||||
} else {
|
||||
w.tracked[dir] = false
|
||||
dirErrors[dir] = errors.Wrap(err, "failed to monitor for changes")
|
||||
dirErrors[dir] = fmt.Errorf("failed to monitor for changes: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,13 +17,13 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
oci "github.com/opencontainers/runtime-spec/specs-go"
|
||||
ocigen "github.com/opencontainers/runtime-tools/generate"
|
||||
@@ -140,7 +140,7 @@ func (e *ContainerEdits) Apply(spec *oci.Spec) error {
|
||||
ensureOCIHooks(spec)
|
||||
spec.Hooks.StartContainer = append(spec.Hooks.StartContainer, h.ToOCI())
|
||||
default:
|
||||
return errors.Errorf("unknown hook name %q", h.HookName)
|
||||
return fmt.Errorf("unknown hook name %q", h.HookName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ func (e *ContainerEdits) Validate() error {
|
||||
}
|
||||
|
||||
if err := ValidateEnv(e.Env); err != nil {
|
||||
return errors.Wrap(err, "invalid container edits")
|
||||
return fmt.Errorf("invalid container edits: %w", err)
|
||||
}
|
||||
for _, d := range e.DeviceNodes {
|
||||
if err := (&DeviceNode{d}).Validate(); err != nil {
|
||||
@@ -209,7 +209,7 @@ func (e *ContainerEdits) isEmpty() bool {
|
||||
func ValidateEnv(env []string) error {
|
||||
for _, v := range env {
|
||||
if strings.IndexByte(v, byte('=')) <= 0 {
|
||||
return errors.Errorf("invalid environment variable %q", v)
|
||||
return fmt.Errorf("invalid environment variable %q", v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -234,11 +234,11 @@ func (d *DeviceNode) Validate() error {
|
||||
return errors.New("invalid (empty) device path")
|
||||
}
|
||||
if _, ok := validTypes[d.Type]; !ok {
|
||||
return errors.Errorf("device %q: invalid type %q", d.Path, d.Type)
|
||||
return fmt.Errorf("device %q: invalid type %q", d.Path, d.Type)
|
||||
}
|
||||
for _, bit := range d.Permissions {
|
||||
if bit != 'r' && bit != 'w' && bit != 'm' {
|
||||
return errors.Errorf("device %q: invalid persmissions %q",
|
||||
return fmt.Errorf("device %q: invalid persmissions %q",
|
||||
d.Path, d.Permissions)
|
||||
}
|
||||
}
|
||||
@@ -253,13 +253,13 @@ type Hook struct {
|
||||
// Validate a hook.
|
||||
func (h *Hook) Validate() error {
|
||||
if _, ok := validHookNames[h.HookName]; !ok {
|
||||
return errors.Errorf("invalid hook name %q", h.HookName)
|
||||
return fmt.Errorf("invalid hook name %q", h.HookName)
|
||||
}
|
||||
if h.Path == "" {
|
||||
return errors.Errorf("invalid hook %q with empty path", h.HookName)
|
||||
return fmt.Errorf("invalid hook %q with empty path", h.HookName)
|
||||
}
|
||||
if err := ValidateEnv(h.Env); err != nil {
|
||||
return errors.Wrapf(err, "invalid hook %q", h.HookName)
|
||||
return fmt.Errorf("invalid hook %q: %w", h.HookName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -298,7 +298,8 @@ func sortMounts(specgen *ocigen.Generator) {
|
||||
// orderedMounts defines how to sort an OCI Spec Mount slice.
|
||||
// This is the almost the same implementation sa used by CRI-O and Docker,
|
||||
// with a minor tweak for stable sorting order (easier to test):
|
||||
// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26
|
||||
//
|
||||
// https://github.com/moby/moby/blob/17.05.x/daemon/volumes.go#L26
|
||||
type orderedMounts []oci.Mount
|
||||
|
||||
// Len returns the number of mounts. Used in sorting.
|
||||
|
||||
@@ -20,8 +20,9 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
runc "github.com/opencontainers/runc/libcontainer/devices"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// fillMissingInfo fills in missing mandatory attributes from the host device.
|
||||
@@ -36,14 +37,14 @@ func (d *DeviceNode) fillMissingInfo() error {
|
||||
|
||||
hostDev, err := runc.DeviceFromPath(d.HostPath, "rwm")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat CDI host device %q", d.HostPath)
|
||||
return fmt.Errorf("failed to stat CDI host device %q: %w", d.HostPath, err)
|
||||
}
|
||||
|
||||
if d.Type == "" {
|
||||
d.Type = string(hostDev.Type)
|
||||
} else {
|
||||
if d.Type != string(hostDev.Type) {
|
||||
return errors.Errorf("CDI device (%q, %q), host type mismatch (%s, %s)",
|
||||
return fmt.Errorf("CDI device (%q, %q), host type mismatch (%s, %s)",
|
||||
d.Path, d.HostPath, d.Type, string(hostDev.Type))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
oci "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Device represents a CDI device of a Spec.
|
||||
@@ -69,10 +70,10 @@ func (d *Device) validate() error {
|
||||
}
|
||||
edits := d.edits()
|
||||
if edits.isEmpty() {
|
||||
return errors.Errorf("invalid device, empty device edits")
|
||||
return fmt.Errorf("invalid device, empty device edits")
|
||||
}
|
||||
if err := edits.Validate(); err != nil {
|
||||
return errors.Wrapf(err, "invalid device %q", d.Name)
|
||||
return fmt.Errorf("invalid device %q: %w", d.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
125
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go
generated
vendored
125
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go
generated
vendored
@@ -46,7 +46,6 @@
|
||||
// "fmt"
|
||||
// "strings"
|
||||
//
|
||||
// "github.com/pkg/errors"
|
||||
// log "github.com/sirupsen/logrus"
|
||||
//
|
||||
// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
@@ -58,7 +57,7 @@
|
||||
//
|
||||
// unresolved, err := cdi.GetRegistry().InjectDevices(spec, devices)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "CDI device injection failed")
|
||||
// return fmt.Errorf("CDI device injection failed: %w", err)
|
||||
// }
|
||||
//
|
||||
// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec))
|
||||
@@ -90,7 +89,6 @@
|
||||
// "fmt"
|
||||
// "strings"
|
||||
//
|
||||
// "github.com/pkg/errors"
|
||||
// log "github.com/sirupsen/logrus"
|
||||
//
|
||||
// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
@@ -115,7 +113,7 @@
|
||||
//
|
||||
// unresolved, err := registry.InjectDevices(spec, devices)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "CDI device injection failed")
|
||||
// return fmt.Errorf("CDI device injection failed: %w", err)
|
||||
// }
|
||||
//
|
||||
// log.Debug("CDI-updated OCI Spec: %s", dumpSpec(spec))
|
||||
@@ -124,10 +122,15 @@
|
||||
//
|
||||
// Generated Spec Files, Multiple Directories, Device Precedence
|
||||
//
|
||||
// There are systems where the set of available or usable CDI devices
|
||||
// changes dynamically and this needs to be reflected in the CDI Specs.
|
||||
// This is done by dynamically regenerating CDI Spec files which are
|
||||
// affected by these changes.
|
||||
// It is often necessary to generate Spec files dynamically. On some
|
||||
// systems the available or usable set of CDI devices might change
|
||||
// dynamically which then needs to be reflected in CDI Specs. For
|
||||
// some device classes it makes sense to enumerate the available
|
||||
// devices at every boot and generate Spec file entries for each
|
||||
// device found. Some CDI devices might need special client- or
|
||||
// request-specific configuration which can only be fulfilled by
|
||||
// dynamically generated client-specific entries in transient Spec
|
||||
// files.
|
||||
//
|
||||
// CDI can collect Spec files from multiple directories. Spec files are
|
||||
// automatically assigned priorities according to which directory they
|
||||
@@ -141,7 +144,111 @@
|
||||
// separating dynamically generated CDI Spec files from static ones.
|
||||
// The default directories are '/etc/cdi' and '/var/run/cdi'. By putting
|
||||
// dynamically generated Spec files under '/var/run/cdi', those take
|
||||
// precedence over static ones in '/etc/cdi'.
|
||||
// precedence over static ones in '/etc/cdi'. With this scheme, static
|
||||
// Spec files, typically installed by distro-specific packages, go into
|
||||
// '/etc/cdi' while all the dynamically generated Spec files, transient
|
||||
// or other, go into '/var/run/cdi'.
|
||||
//
|
||||
// Spec File Generation
|
||||
//
|
||||
// CDI offers two functions for writing and removing dynamically generated
|
||||
// Specs from CDI Spec directories. These functions, WriteSpec() and
|
||||
// RemoveSpec() implicitly follow the principle of separating dynamic Specs
|
||||
// from the rest and therefore always write to and remove Specs from the
|
||||
// last configured directory.
|
||||
//
|
||||
// Corresponding functions are also provided for generating names for Spec
|
||||
// files. These functions follow a simple naming convention to ensure that
|
||||
// multiple entities generating Spec files simultaneously on the same host
|
||||
// do not end up using conflicting Spec file names. GenerateSpecName(),
|
||||
// GenerateNameForSpec(), GenerateTransientSpecName(), and
|
||||
// GenerateTransientNameForSpec() all generate names which can be passed
|
||||
// as such to WriteSpec() and subsequently to RemoveSpec().
|
||||
//
|
||||
// Generating a Spec file for a vendor/device class can be done with a
|
||||
// code snippet similar to the following:
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
// ...
|
||||
// "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
// )
|
||||
//
|
||||
// func generateDeviceSpecs() error {
|
||||
// registry := cdi.GetRegistry()
|
||||
// spec := &specs.Spec{
|
||||
// Version: specs.CurrentVersion,
|
||||
// Kind: vendor+"/"+class,
|
||||
// }
|
||||
//
|
||||
// for _, dev := range enumerateDevices() {
|
||||
// spec.Devices = append(spec.Devices, specs.Device{
|
||||
// Name: dev.Name,
|
||||
// ContainerEdits: getContainerEditsForDevice(dev),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// specName, err := cdi.GenerateNameForSpec(spec)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to generate Spec name: %w", err)
|
||||
// }
|
||||
//
|
||||
// return registry.SpecDB().WriteSpec(spec, specName)
|
||||
// }
|
||||
//
|
||||
// Similary, generating and later cleaning up transient Spec files can be
|
||||
// done with code fragments similar to the following. These transient Spec
|
||||
// files are temporary Spec files with container-specific parametrization.
|
||||
// They are typically created before the associated container is created
|
||||
// and removed once that container is removed.
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
// ...
|
||||
// "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
// "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi"
|
||||
// )
|
||||
//
|
||||
// func generateTransientSpec(ctr Container) error {
|
||||
// registry := cdi.GetRegistry()
|
||||
// devices := getContainerDevs(ctr, vendor, class)
|
||||
// spec := &specs.Spec{
|
||||
// Version: specs.CurrentVersion,
|
||||
// Kind: vendor+"/"+class,
|
||||
// }
|
||||
//
|
||||
// for _, dev := range devices {
|
||||
// spec.Devices = append(spec.Devices, specs.Device{
|
||||
// // the generated name needs to be unique within the
|
||||
// // vendor/class domain on the host/node.
|
||||
// Name: generateUniqueDevName(dev, ctr),
|
||||
// ContainerEdits: getEditsForContainer(dev),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// // transientID is expected to guarantee that the Spec file name
|
||||
// // generated using <vendor, class, transientID> is unique within
|
||||
// // the host/node. If more than one device is allocated with the
|
||||
// // same vendor/class domain, either all generated Spec entries
|
||||
// // should go to a single Spec file (like in this sample snippet),
|
||||
// // or transientID should be unique for each generated Spec file.
|
||||
// transientID := getSomeSufficientlyUniqueIDForContainer(ctr)
|
||||
// specName, err := cdi.GenerateNameForTransientSpec(vendor, class, transientID)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to generate Spec name: %w", err)
|
||||
// }
|
||||
//
|
||||
// return registry.SpecDB().WriteSpec(spec, specName)
|
||||
// }
|
||||
//
|
||||
// func removeTransientSpec(ctr Container) error {
|
||||
// registry := cdi.GetRegistry()
|
||||
// transientID := getSomeSufficientlyUniqueIDForContainer(ctr)
|
||||
// specName := cdi.GenerateNameForTransientSpec(vendor, class, transientID)
|
||||
//
|
||||
// return registry.SpecDB().RemoveSpec(specName)
|
||||
// }
|
||||
//
|
||||
// CDI Spec Validation
|
||||
//
|
||||
|
||||
@@ -17,9 +17,8 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// QualifiedName returns the qualified name for a device.
|
||||
@@ -50,23 +49,23 @@ func ParseQualifiedName(device string) (string, string, string, error) {
|
||||
vendor, class, name := ParseDevice(device)
|
||||
|
||||
if vendor == "" {
|
||||
return "", "", device, errors.Errorf("unqualified device %q, missing vendor", device)
|
||||
return "", "", device, fmt.Errorf("unqualified device %q, missing vendor", device)
|
||||
}
|
||||
if class == "" {
|
||||
return "", "", device, errors.Errorf("unqualified device %q, missing class", device)
|
||||
return "", "", device, fmt.Errorf("unqualified device %q, missing class", device)
|
||||
}
|
||||
if name == "" {
|
||||
return "", "", device, errors.Errorf("unqualified device %q, missing device name", device)
|
||||
return "", "", device, fmt.Errorf("unqualified device %q, missing device name", device)
|
||||
}
|
||||
|
||||
if err := ValidateVendorName(vendor); err != nil {
|
||||
return "", "", device, errors.Wrapf(err, "invalid device %q", device)
|
||||
return "", "", device, fmt.Errorf("invalid device %q: %w", device, err)
|
||||
}
|
||||
if err := ValidateClassName(class); err != nil {
|
||||
return "", "", device, errors.Wrapf(err, "invalid device %q", device)
|
||||
return "", "", device, fmt.Errorf("invalid device %q: %w", device, err)
|
||||
}
|
||||
if err := ValidateDeviceName(name); err != nil {
|
||||
return "", "", device, errors.Wrapf(err, "invalid device %q", device)
|
||||
return "", "", device, fmt.Errorf("invalid device %q: %w", device, err)
|
||||
}
|
||||
|
||||
return vendor, class, name, nil
|
||||
@@ -115,22 +114,22 @@ func ParseQualifier(kind string) (string, string) {
|
||||
// - underscore, dash, and dot ('_', '-', and '.')
|
||||
func ValidateVendorName(vendor string) error {
|
||||
if vendor == "" {
|
||||
return errors.Errorf("invalid (empty) vendor name")
|
||||
return fmt.Errorf("invalid (empty) vendor name")
|
||||
}
|
||||
if !isLetter(rune(vendor[0])) {
|
||||
return errors.Errorf("invalid vendor %q, should start with letter", vendor)
|
||||
return fmt.Errorf("invalid vendor %q, should start with letter", vendor)
|
||||
}
|
||||
for _, c := range string(vendor[1 : len(vendor)-1]) {
|
||||
switch {
|
||||
case isAlphaNumeric(c):
|
||||
case c == '_' || c == '-' || c == '.':
|
||||
default:
|
||||
return errors.Errorf("invalid character '%c' in vendor name %q",
|
||||
return fmt.Errorf("invalid character '%c' in vendor name %q",
|
||||
c, vendor)
|
||||
}
|
||||
}
|
||||
if !isAlphaNumeric(rune(vendor[len(vendor)-1])) {
|
||||
return errors.Errorf("invalid vendor %q, should end with a letter or digit", vendor)
|
||||
return fmt.Errorf("invalid vendor %q, should end with a letter or digit", vendor)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -143,22 +142,22 @@ func ValidateVendorName(vendor string) error {
|
||||
// - underscore and dash ('_', '-')
|
||||
func ValidateClassName(class string) error {
|
||||
if class == "" {
|
||||
return errors.Errorf("invalid (empty) device class")
|
||||
return fmt.Errorf("invalid (empty) device class")
|
||||
}
|
||||
if !isLetter(rune(class[0])) {
|
||||
return errors.Errorf("invalid class %q, should start with letter", class)
|
||||
return fmt.Errorf("invalid class %q, should start with letter", class)
|
||||
}
|
||||
for _, c := range string(class[1 : len(class)-1]) {
|
||||
switch {
|
||||
case isAlphaNumeric(c):
|
||||
case c == '_' || c == '-':
|
||||
default:
|
||||
return errors.Errorf("invalid character '%c' in device class %q",
|
||||
return fmt.Errorf("invalid character '%c' in device class %q",
|
||||
c, class)
|
||||
}
|
||||
}
|
||||
if !isAlphaNumeric(rune(class[len(class)-1])) {
|
||||
return errors.Errorf("invalid class %q, should end with a letter or digit", class)
|
||||
return fmt.Errorf("invalid class %q, should end with a letter or digit", class)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -170,10 +169,10 @@ func ValidateClassName(class string) error {
|
||||
// - underscore, dash, dot, colon ('_', '-', '.', ':')
|
||||
func ValidateDeviceName(name string) error {
|
||||
if name == "" {
|
||||
return errors.Errorf("invalid (empty) device name")
|
||||
return fmt.Errorf("invalid (empty) device name")
|
||||
}
|
||||
if !isAlphaNumeric(rune(name[0])) {
|
||||
return errors.Errorf("invalid class %q, should start with a letter or digit", name)
|
||||
return fmt.Errorf("invalid class %q, should start with a letter or digit", name)
|
||||
}
|
||||
if len(name) == 1 {
|
||||
return nil
|
||||
@@ -183,12 +182,12 @@ func ValidateDeviceName(name string) error {
|
||||
case isAlphaNumeric(c):
|
||||
case c == '_' || c == '-' || c == '.' || c == ':':
|
||||
default:
|
||||
return errors.Errorf("invalid character '%c' in device name %q",
|
||||
return fmt.Errorf("invalid character '%c' in device name %q",
|
||||
c, name)
|
||||
}
|
||||
}
|
||||
if !isAlphaNumeric(rune(name[len(name)-1])) {
|
||||
return errors.Errorf("invalid name %q, should end with a letter or digit", name)
|
||||
return fmt.Errorf("invalid name %q, should end with a letter or digit", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -107,6 +107,7 @@ type RegistrySpecDB interface {
|
||||
GetVendorSpecs(vendor string) []*Spec
|
||||
GetSpecErrors(*Spec) []error
|
||||
WriteSpec(raw *cdi.Spec, name string) error
|
||||
RemoveSpec(name string) error
|
||||
}
|
||||
|
||||
type registry struct {
|
||||
|
||||
139
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go
generated
vendored
139
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go
generated
vendored
@@ -18,28 +18,25 @@ package cdi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
oci "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
)
|
||||
|
||||
var (
|
||||
// Valid CDI Spec versions.
|
||||
validSpecVersions = map[string]struct{}{
|
||||
"0.1.0": {},
|
||||
"0.2.0": {},
|
||||
"0.3.0": {},
|
||||
"0.4.0": {},
|
||||
"0.5.0": {},
|
||||
}
|
||||
const (
|
||||
// defaultSpecExt is the file extension for the default encoding.
|
||||
defaultSpecExt = ".yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
// Externally set CDI Spec validation function.
|
||||
specValidator func(*cdi.Spec) error
|
||||
validatorLock sync.RWMutex
|
||||
@@ -69,18 +66,18 @@ func ReadSpec(path string, priority int) (*Spec, error) {
|
||||
case os.IsNotExist(err):
|
||||
return nil, err
|
||||
case err != nil:
|
||||
return nil, errors.Wrapf(err, "failed to read CDI Spec %q", path)
|
||||
return nil, fmt.Errorf("failed to read CDI Spec %q: %w", path, err)
|
||||
}
|
||||
|
||||
raw, err := ParseSpec(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse CDI Spec %q", path)
|
||||
return nil, fmt.Errorf("failed to parse CDI Spec %q: %w", path, err)
|
||||
}
|
||||
if raw == nil {
|
||||
return nil, errors.Errorf("failed to parse CDI Spec %q, no Spec data", path)
|
||||
return nil, fmt.Errorf("failed to parse CDI Spec %q, no Spec data", path)
|
||||
}
|
||||
|
||||
spec, err := NewSpec(raw, path, priority)
|
||||
spec, err := newSpec(raw, path, priority)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -88,11 +85,11 @@ func ReadSpec(path string, priority int) (*Spec, error) {
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// NewSpec creates a new Spec from the given CDI Spec data. The
|
||||
// newSpec creates a new Spec from the given CDI Spec data. The
|
||||
// Spec is marked as loaded from the given path with the given
|
||||
// priority. If Spec data validation fails NewSpec returns a nil
|
||||
// priority. If Spec data validation fails newSpec returns a nil
|
||||
// Spec and an error.
|
||||
func NewSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) {
|
||||
func newSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) {
|
||||
err := validateSpec(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -104,18 +101,22 @@ func NewSpec(raw *cdi.Spec, path string, priority int) (*Spec, error) {
|
||||
priority: priority,
|
||||
}
|
||||
|
||||
if ext := filepath.Ext(spec.path); ext != ".yaml" && ext != ".json" {
|
||||
spec.path += defaultSpecExt
|
||||
}
|
||||
|
||||
spec.vendor, spec.class = ParseQualifier(spec.Kind)
|
||||
|
||||
if spec.devices, err = spec.validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid CDI Spec")
|
||||
return nil, fmt.Errorf("invalid CDI Spec: %w", err)
|
||||
}
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// Write the CDI Spec to the file associated with it during instantiation
|
||||
// by NewSpec() or ReadSpec().
|
||||
func (s *Spec) Write(overwrite bool) error {
|
||||
// by newSpec() or ReadSpec().
|
||||
func (s *Spec) write(overwrite bool) error {
|
||||
var (
|
||||
data []byte
|
||||
dir string
|
||||
@@ -134,30 +135,30 @@ func (s *Spec) Write(overwrite bool) error {
|
||||
data, err = json.Marshal(s.Spec)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal Spec file")
|
||||
return fmt.Errorf("failed to marshal Spec file: %w", err)
|
||||
}
|
||||
|
||||
dir = filepath.Dir(s.path)
|
||||
err = os.MkdirAll(dir, 0o755)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create Spec dir")
|
||||
return fmt.Errorf("failed to create Spec dir: %w", err)
|
||||
}
|
||||
|
||||
tmp, err = os.CreateTemp(dir, "spec.*.tmp")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create Spec file")
|
||||
return fmt.Errorf("failed to create Spec file: %w", err)
|
||||
}
|
||||
_, err = tmp.Write(data)
|
||||
tmp.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write Spec file")
|
||||
return fmt.Errorf("failed to write Spec file: %w", err)
|
||||
}
|
||||
|
||||
err = renameIn(dir, filepath.Base(tmp.Name()), filepath.Base(s.path), overwrite)
|
||||
|
||||
if err != nil {
|
||||
os.Remove(tmp.Name())
|
||||
err = errors.Wrap(err, "failed to write Spec file")
|
||||
err = fmt.Errorf("failed to write Spec file: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -203,6 +204,15 @@ func (s *Spec) validate() (map[string]*Device, error) {
|
||||
if err := validateVersion(s.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
minVersion, err := MinimumRequiredVersion(s.Spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine minumum required version: %v", err)
|
||||
}
|
||||
if newVersion(minVersion).IsGreaterThan(newVersion(s.Version)) {
|
||||
return nil, fmt.Errorf("the spec version must be at least v%v", minVersion)
|
||||
}
|
||||
|
||||
if err := ValidateVendorName(s.vendor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -217,10 +227,10 @@ func (s *Spec) validate() (map[string]*Device, error) {
|
||||
for _, d := range s.Devices {
|
||||
dev, err := newDevice(s, d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed add device %q", d.Name)
|
||||
return nil, fmt.Errorf("failed add device %q: %w", d.Name, err)
|
||||
}
|
||||
if _, conflict := devices[d.Name]; conflict {
|
||||
return nil, errors.Errorf("invalid spec, multiple device %q", d.Name)
|
||||
return nil, fmt.Errorf("invalid spec, multiple device %q", d.Name)
|
||||
}
|
||||
devices[d.Name] = dev
|
||||
}
|
||||
@@ -230,8 +240,8 @@ func (s *Spec) validate() (map[string]*Device, error) {
|
||||
|
||||
// validateVersion checks whether the specified spec version is supported.
|
||||
func validateVersion(version string) error {
|
||||
if _, ok := validSpecVersions[version]; !ok {
|
||||
return errors.Errorf("invalid version %q", version)
|
||||
if !validSpecVersions.isValidVersion(version) {
|
||||
return fmt.Errorf("invalid version %q", version)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -242,14 +252,14 @@ func ParseSpec(data []byte) (*cdi.Spec, error) {
|
||||
var raw *cdi.Spec
|
||||
err := yaml.UnmarshalStrict(data, &raw)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal CDI Spec")
|
||||
return nil, fmt.Errorf("failed to unmarshal CDI Spec: %w", err)
|
||||
}
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
// SetSpecValidator sets a CDI Spec validator function. This function
|
||||
// is used for extra CDI Spec content validation whenever a Spec file
|
||||
// loaded (using ReadSpec() or NewSpec()) or written (Spec.Write()).
|
||||
// loaded (using ReadSpec() or written (using WriteSpec()).
|
||||
func SetSpecValidator(fn func(*cdi.Spec) error) {
|
||||
validatorLock.Lock()
|
||||
defer validatorLock.Unlock()
|
||||
@@ -266,7 +276,72 @@ func validateSpec(raw *cdi.Spec) error {
|
||||
}
|
||||
err := specValidator(raw)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Spec validation failed")
|
||||
return fmt.Errorf("Spec validation failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateSpecName generates a vendor+class scoped Spec file name. The
|
||||
// name can be passed to WriteSpec() to write a Spec file to the file
|
||||
// system.
|
||||
//
|
||||
// vendor and class should match the vendor and class of the CDI Spec.
|
||||
// The file name is generated without a ".json" or ".yaml" extension.
|
||||
// The caller can append the desired extension to choose a particular
|
||||
// encoding. Otherwise WriteSpec() will use its default encoding.
|
||||
//
|
||||
// This function always returns the same name for the same vendor/class
|
||||
// combination. Therefore it cannot be used as such to generate multiple
|
||||
// Spec file names for a single vendor and class.
|
||||
func GenerateSpecName(vendor, class string) string {
|
||||
return vendor + "-" + class
|
||||
}
|
||||
|
||||
// GenerateTransientSpecName generates a vendor+class scoped transient
|
||||
// Spec file name. The name can be passed to WriteSpec() to write a Spec
|
||||
// file to the file system.
|
||||
//
|
||||
// Transient Specs are those whose lifecycle is tied to that of some
|
||||
// external entity, for instance a container. vendor and class should
|
||||
// match the vendor and class of the CDI Spec. transientID should be
|
||||
// unique among all CDI users on the same host that might generate
|
||||
// transient Spec files using the same vendor/class combination. If
|
||||
// the external entity to which the lifecycle of the tranient Spec
|
||||
// is tied to has a unique ID of its own, then this is usually a
|
||||
// good choice for transientID.
|
||||
//
|
||||
// The file name is generated without a ".json" or ".yaml" extension.
|
||||
// The caller can append the desired extension to choose a particular
|
||||
// encoding. Otherwise WriteSpec() will use its default encoding.
|
||||
func GenerateTransientSpecName(vendor, class, transientID string) string {
|
||||
transientID = strings.ReplaceAll(transientID, "/", "_")
|
||||
return GenerateSpecName(vendor, class) + "_" + transientID
|
||||
}
|
||||
|
||||
// GenerateNameForSpec generates a name for the given Spec using
|
||||
// GenerateSpecName with the vendor and class taken from the Spec.
|
||||
// On success it returns the generated name and a nil error. If
|
||||
// the Spec does not contain a valid vendor or class, it returns
|
||||
// an empty name and a non-nil error.
|
||||
func GenerateNameForSpec(raw *cdi.Spec) (string, error) {
|
||||
vendor, class := ParseQualifier(raw.Kind)
|
||||
if vendor == "" {
|
||||
return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind)
|
||||
}
|
||||
|
||||
return GenerateSpecName(vendor, class), nil
|
||||
}
|
||||
|
||||
// GenerateNameForTransientSpec generates a name for the given transient
|
||||
// Spec using GenerateTransientSpecName with the vendor and class taken
|
||||
// from the Spec. On success it returns the generated name and a nil error.
|
||||
// If the Spec does not contain a valid vendor or class, it returns an
|
||||
// an empty name and a non-nil error.
|
||||
func GenerateNameForTransientSpec(raw *cdi.Spec, transientID string) (string, error) {
|
||||
vendor, class := ParseQualifier(raw.Kind)
|
||||
if vendor == "" {
|
||||
return "", fmt.Errorf("invalid vendor/class %q in Spec", raw.Kind)
|
||||
}
|
||||
|
||||
return GenerateTransientSpecName(vendor, class, transientID), nil
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -30,7 +30,7 @@ func renameIn(dir, src, dst string, overwrite bool) error {
|
||||
|
||||
dirf, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename failed")
|
||||
return fmt.Errorf("rename failed: %w", err)
|
||||
}
|
||||
defer dirf.Close()
|
||||
|
||||
@@ -41,7 +41,7 @@ func renameIn(dir, src, dst string, overwrite bool) error {
|
||||
dirFd := int(dirf.Fd())
|
||||
err = unix.Renameat2(dirFd, src, dirFd, dst, flags)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename failed")
|
||||
return fmt.Errorf("rename failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
160
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go
generated
vendored
Normal file
160
vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright © The CDI Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cdi
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// CurrentVersion is the current version of the CDI Spec.
|
||||
CurrentVersion = cdi.CurrentVersion
|
||||
|
||||
// vCurrent is the current version as a semver-comparable type
|
||||
vCurrent version = "v" + CurrentVersion
|
||||
|
||||
// These represent the released versions of the CDI specification
|
||||
v010 version = "v0.1.0"
|
||||
v020 version = "v0.2.0"
|
||||
v030 version = "v0.3.0"
|
||||
v040 version = "v0.4.0"
|
||||
v050 version = "v0.5.0"
|
||||
|
||||
// vEarliest is the earliest supported version of the CDI specification
|
||||
vEarliest version = v030
|
||||
)
|
||||
|
||||
// validSpecVersions stores a map of spec versions to functions to check the required versions.
|
||||
// Adding new fields / spec versions requires that a `requiredFunc` be implemented and
|
||||
// this map be updated.
|
||||
var validSpecVersions = requiredVersionMap{
|
||||
v010: nil,
|
||||
v020: nil,
|
||||
v030: nil,
|
||||
v040: requiresV040,
|
||||
v050: requiresV050,
|
||||
}
|
||||
|
||||
// MinimumRequiredVersion determines the minumum spec version for the input spec.
|
||||
func MinimumRequiredVersion(spec *cdi.Spec) (string, error) {
|
||||
minVersion := validSpecVersions.requiredVersion(spec)
|
||||
return minVersion.String(), nil
|
||||
}
|
||||
|
||||
// version represents a semantic version string
|
||||
type version string
|
||||
|
||||
// newVersion creates a version that can be used for semantic version comparisons.
|
||||
func newVersion(v string) version {
|
||||
return version("v" + strings.TrimPrefix(v, "v"))
|
||||
}
|
||||
|
||||
// String returns the string representation of the version.
|
||||
// This trims a leading v if present.
|
||||
func (v version) String() string {
|
||||
return strings.TrimPrefix(string(v), "v")
|
||||
}
|
||||
|
||||
// IsGreaterThan checks with a version is greater than the specified version.
|
||||
func (v version) IsGreaterThan(o version) bool {
|
||||
return semver.Compare(string(v), string(o)) > 0
|
||||
}
|
||||
|
||||
// IsLatest checks whether the version is the latest supported version
|
||||
func (v version) IsLatest() bool {
|
||||
return v == vCurrent
|
||||
}
|
||||
|
||||
type requiredFunc func(*cdi.Spec) bool
|
||||
|
||||
type requiredVersionMap map[version]requiredFunc
|
||||
|
||||
// isValidVersion checks whether the specified version is valid.
|
||||
// A version is valid if it is contained in the required version map.
|
||||
func (r requiredVersionMap) isValidVersion(specVersion string) bool {
|
||||
_, ok := validSpecVersions[newVersion(specVersion)]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// requiredVersion returns the minimum version required for the given spec
|
||||
func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version {
|
||||
minVersion := vEarliest
|
||||
|
||||
for v, isRequired := range validSpecVersions {
|
||||
if isRequired == nil {
|
||||
continue
|
||||
}
|
||||
if isRequired(spec) && v.IsGreaterThan(minVersion) {
|
||||
minVersion = v
|
||||
}
|
||||
// If we have already detected the latest version then no later version could be detected
|
||||
if minVersion.IsLatest() {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return minVersion
|
||||
}
|
||||
|
||||
// requiresV050 returns true if the spec uses v0.5.0 features
|
||||
func requiresV050(spec *cdi.Spec) bool {
|
||||
var edits []*cdi.ContainerEdits
|
||||
|
||||
for _, d := range spec.Devices {
|
||||
// The v0.5.0 spec allowed device names to start with a digit instead of requiring a letter
|
||||
if len(d.Name) > 0 && !isLetter(rune(d.Name[0])) {
|
||||
return true
|
||||
}
|
||||
edits = append(edits, &d.ContainerEdits)
|
||||
}
|
||||
|
||||
edits = append(edits, &spec.ContainerEdits)
|
||||
for _, e := range edits {
|
||||
for _, dn := range e.DeviceNodes {
|
||||
// The HostPath field was added in v0.5.0
|
||||
if dn.HostPath != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// requiresV040 returns true if the spec uses v0.4.0 features
|
||||
func requiresV040(spec *cdi.Spec) bool {
|
||||
var edits []*cdi.ContainerEdits
|
||||
|
||||
for _, d := range spec.Devices {
|
||||
edits = append(edits, &d.ContainerEdits)
|
||||
}
|
||||
|
||||
edits = append(edits, &spec.ContainerEdits)
|
||||
for _, e := range edits {
|
||||
for _, m := range e.Mounts {
|
||||
// The Type field was added in v0.4.0
|
||||
if m.Type != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
354
vendor/github.com/hashicorp/errwrap/LICENSE
generated
vendored
@@ -1,354 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
||||
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
89
vendor/github.com/hashicorp/errwrap/README.md
generated
vendored
@@ -1,89 +0,0 @@
|
||||
# errwrap
|
||||
|
||||
`errwrap` is a package for Go that formalizes the pattern of wrapping errors
|
||||
and checking if an error contains another error.
|
||||
|
||||
There is a common pattern in Go of taking a returned `error` value and
|
||||
then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
|
||||
with this pattern is that you completely lose the original `error` structure.
|
||||
|
||||
Arguably the _correct_ approach is that you should make a custom structure
|
||||
implementing the `error` interface, and have the original error as a field
|
||||
on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
|
||||
This is a good approach, but you have to know the entire chain of possible
|
||||
rewrapping that happens, when you might just care about one.
|
||||
|
||||
`errwrap` formalizes this pattern (it doesn't matter what approach you use
|
||||
above) by giving a single interface for wrapping errors, checking if a specific
|
||||
error is wrapped, and extracting that error.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/errwrap`.
|
||||
|
||||
Full documentation is available at
|
||||
http://godoc.org/github.com/hashicorp/errwrap
|
||||
|
||||
## Usage
|
||||
|
||||
#### Basic Usage
|
||||
|
||||
Below is a very basic example of its usage:
|
||||
|
||||
```go
|
||||
// A function that always returns an error, but wraps it, like a real
|
||||
// function might.
|
||||
func tryOpen() error {
|
||||
_, err := os.Open("/i/dont/exist")
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("Doesn't exist: {{err}}", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
err := tryOpen()
|
||||
|
||||
// We can use the Contains helpers to check if an error contains
|
||||
// another error. It is safe to do this with a nil error, or with
|
||||
// an error that doesn't even use the errwrap package.
|
||||
if errwrap.Contains(err, "does not exist") {
|
||||
// Do something
|
||||
}
|
||||
if errwrap.ContainsType(err, new(os.PathError)) {
|
||||
// Do something
|
||||
}
|
||||
|
||||
// Or we can use the associated `Get` functions to just extract
|
||||
// a specific error. This would return nil if that specific error doesn't
|
||||
// exist.
|
||||
perr := errwrap.GetType(err, new(os.PathError))
|
||||
}
|
||||
```
|
||||
|
||||
#### Custom Types
|
||||
|
||||
If you're already making custom types that properly wrap errors, then
|
||||
you can get all the functionality of `errwraps.Contains` and such by
|
||||
implementing the `Wrapper` interface with just one function. Example:
|
||||
|
||||
```go
|
||||
type AppError {
|
||||
Code ErrorCode
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *AppError) WrappedErrors() []error {
|
||||
return []error{e.Err}
|
||||
}
|
||||
```
|
||||
|
||||
Now this works:
|
||||
|
||||
```go
|
||||
err := &AppError{Err: fmt.Errorf("an error")}
|
||||
if errwrap.ContainsType(err, fmt.Errorf("")) {
|
||||
// This will work!
|
||||
}
|
||||
```
|
||||
178
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
178
vendor/github.com/hashicorp/errwrap/errwrap.go
generated
vendored
@@ -1,178 +0,0 @@
|
||||
// Package errwrap implements methods to formalize error wrapping in Go.
|
||||
//
|
||||
// All of the top-level functions that take an `error` are built to be able
|
||||
// to take any error, not just wrapped errors. This allows you to use errwrap
|
||||
// without having to type-check and type-cast everywhere.
|
||||
package errwrap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WalkFunc is the callback called for Walk.
|
||||
type WalkFunc func(error)
|
||||
|
||||
// Wrapper is an interface that can be implemented by custom types to
|
||||
// have all the Contains, Get, etc. functions in errwrap work.
|
||||
//
|
||||
// When Walk reaches a Wrapper, it will call the callback for every
|
||||
// wrapped error in addition to the wrapper itself. Since all the top-level
|
||||
// functions in errwrap use Walk, this means that all those functions work
|
||||
// with your custom type.
|
||||
type Wrapper interface {
|
||||
WrappedErrors() []error
|
||||
}
|
||||
|
||||
// Wrap defines that outer wraps inner, returning an error type that
|
||||
// can be cleanly used with the other methods in this package, such as
|
||||
// Contains, GetAll, etc.
|
||||
//
|
||||
// This function won't modify the error message at all (the outer message
|
||||
// will be used).
|
||||
func Wrap(outer, inner error) error {
|
||||
return &wrappedError{
|
||||
Outer: outer,
|
||||
Inner: inner,
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf wraps an error with a formatting message. This is similar to using
|
||||
// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
|
||||
// errors, you should replace it with this.
|
||||
//
|
||||
// format is the format of the error message. The string '{{err}}' will
|
||||
// be replaced with the original error message.
|
||||
//
|
||||
// Deprecated: Use fmt.Errorf()
|
||||
func Wrapf(format string, err error) error {
|
||||
outerMsg := "<nil>"
|
||||
if err != nil {
|
||||
outerMsg = err.Error()
|
||||
}
|
||||
|
||||
outer := errors.New(strings.Replace(
|
||||
format, "{{err}}", outerMsg, -1))
|
||||
|
||||
return Wrap(outer, err)
|
||||
}
|
||||
|
||||
// Contains checks if the given error contains an error with the
|
||||
// message msg. If err is not a wrapped error, this will always return
|
||||
// false unless the error itself happens to match this msg.
|
||||
func Contains(err error, msg string) bool {
|
||||
return len(GetAll(err, msg)) > 0
|
||||
}
|
||||
|
||||
// ContainsType checks if the given error contains an error with
|
||||
// the same concrete type as v. If err is not a wrapped error, this will
|
||||
// check the err itself.
|
||||
func ContainsType(err error, v interface{}) bool {
|
||||
return len(GetAllType(err, v)) > 0
|
||||
}
|
||||
|
||||
// Get is the same as GetAll but returns the deepest matching error.
|
||||
func Get(err error, msg string) error {
|
||||
es := GetAll(err, msg)
|
||||
if len(es) > 0 {
|
||||
return es[len(es)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetType is the same as GetAllType but returns the deepest matching error.
|
||||
func GetType(err error, v interface{}) error {
|
||||
es := GetAllType(err, v)
|
||||
if len(es) > 0 {
|
||||
return es[len(es)-1]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll gets all the errors that might be wrapped in err with the
|
||||
// given message. The order of the errors is such that the outermost
|
||||
// matching error (the most recent wrap) is index zero, and so on.
|
||||
func GetAll(err error, msg string) []error {
|
||||
var result []error
|
||||
|
||||
Walk(err, func(err error) {
|
||||
if err.Error() == msg {
|
||||
result = append(result, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetAllType gets all the errors that are the same type as v.
|
||||
//
|
||||
// The order of the return value is the same as described in GetAll.
|
||||
func GetAllType(err error, v interface{}) []error {
|
||||
var result []error
|
||||
|
||||
var search string
|
||||
if v != nil {
|
||||
search = reflect.TypeOf(v).String()
|
||||
}
|
||||
Walk(err, func(err error) {
|
||||
var needle string
|
||||
if err != nil {
|
||||
needle = reflect.TypeOf(err).String()
|
||||
}
|
||||
|
||||
if needle == search {
|
||||
result = append(result, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Walk walks all the wrapped errors in err and calls the callback. If
|
||||
// err isn't a wrapped error, this will be called once for err. If err
|
||||
// is a wrapped error, the callback will be called for both the wrapper
|
||||
// that implements error as well as the wrapped error itself.
|
||||
func Walk(err error, cb WalkFunc) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case *wrappedError:
|
||||
cb(e.Outer)
|
||||
Walk(e.Inner, cb)
|
||||
case Wrapper:
|
||||
cb(err)
|
||||
|
||||
for _, err := range e.WrappedErrors() {
|
||||
Walk(err, cb)
|
||||
}
|
||||
case interface{ Unwrap() error }:
|
||||
cb(err)
|
||||
Walk(e.Unwrap(), cb)
|
||||
default:
|
||||
cb(err)
|
||||
}
|
||||
}
|
||||
|
||||
// wrappedError is an implementation of error that has both the
|
||||
// outer and inner errors.
|
||||
type wrappedError struct {
|
||||
Outer error
|
||||
Inner error
|
||||
}
|
||||
|
||||
func (w *wrappedError) Error() string {
|
||||
return w.Outer.Error()
|
||||
}
|
||||
|
||||
func (w *wrappedError) WrappedErrors() []error {
|
||||
return []error{w.Outer, w.Inner}
|
||||
}
|
||||
|
||||
func (w *wrappedError) Unwrap() error {
|
||||
return w.Inner
|
||||
}
|
||||
353
vendor/github.com/hashicorp/go-multierror/LICENSE
generated
vendored
353
vendor/github.com/hashicorp/go-multierror/LICENSE
generated
vendored
@@ -1,353 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
31
vendor/github.com/hashicorp/go-multierror/Makefile
generated
vendored
31
vendor/github.com/hashicorp/go-multierror/Makefile
generated
vendored
@@ -1,31 +0,0 @@
|
||||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
# test runs the test suite and vets the code.
|
||||
test: generate
|
||||
@echo "==> Running tests..."
|
||||
@go list $(TEST) \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
|
||||
|
||||
# testrace runs the race checker
|
||||
testrace: generate
|
||||
@echo "==> Running tests (race)..."
|
||||
@go list $(TEST) \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go test -timeout=60s -race ${TESTARGS}
|
||||
|
||||
# updatedeps installs all the dependencies needed to run and build.
|
||||
updatedeps:
|
||||
@sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
|
||||
|
||||
# generate runs `go generate` to build the dynamically generated source files.
|
||||
generate:
|
||||
@echo "==> Generating..."
|
||||
@find . -type f -name '.DS_Store' -delete
|
||||
@go list ./... \
|
||||
| grep -v "/vendor/" \
|
||||
| xargs -n1 go generate
|
||||
|
||||
.PHONY: default test testrace updatedeps generate
|
||||
150
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
150
vendor/github.com/hashicorp/go-multierror/README.md
generated
vendored
@@ -1,150 +0,0 @@
|
||||
# go-multierror
|
||||
|
||||
[](https://circleci.com/gh/hashicorp/go-multierror)
|
||||
[](https://pkg.go.dev/github.com/hashicorp/go-multierror)
|
||||

|
||||
|
||||
[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror
|
||||
[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror
|
||||
|
||||
`go-multierror` is a package for Go that provides a mechanism for
|
||||
representing a list of `error` values as a single `error`.
|
||||
|
||||
This allows a function in Go to return an `error` that might actually
|
||||
be a list of errors. If the caller knows this, they can unwrap the
|
||||
list and access the errors. If the caller doesn't know, the error
|
||||
formats to a nice human-readable format.
|
||||
|
||||
`go-multierror` is fully compatible with the Go standard library
|
||||
[errors](https://golang.org/pkg/errors/) package, including the
|
||||
functions `As`, `Is`, and `Unwrap`. This provides a standardized approach
|
||||
for introspecting on error values.
|
||||
|
||||
## Installation and Docs
|
||||
|
||||
Install using `go get github.com/hashicorp/go-multierror`.
|
||||
|
||||
Full documentation is available at
|
||||
https://pkg.go.dev/github.com/hashicorp/go-multierror
|
||||
|
||||
### Requires go version 1.13 or newer
|
||||
|
||||
`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced
|
||||
[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which
|
||||
this library takes advantage of.
|
||||
|
||||
If you need to use an earlier version of go, you can use the
|
||||
[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0)
|
||||
tag, which doesn't rely on features in go 1.13.
|
||||
|
||||
If you see compile errors that look like the below, it's likely that
|
||||
you're on an older version of go:
|
||||
|
||||
```
|
||||
/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As
|
||||
/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
go-multierror is easy to use and purposely built to be unobtrusive in
|
||||
existing Go applications/libraries that may not be aware of it.
|
||||
|
||||
**Building a list of errors**
|
||||
|
||||
The `Append` function is used to create a list of errors. This function
|
||||
behaves a lot like the Go built-in `append` function: it doesn't matter
|
||||
if the first argument is nil, a `multierror.Error`, or any other `error`,
|
||||
the function behaves as you would expect.
|
||||
|
||||
```go
|
||||
var result error
|
||||
|
||||
if err := step1(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
if err := step2(); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
**Customizing the formatting of the errors**
|
||||
|
||||
By specifying a custom `ErrorFormat`, you can customize the format
|
||||
of the `Error() string` function:
|
||||
|
||||
```go
|
||||
var result *multierror.Error
|
||||
|
||||
// ... accumulate errors here, maybe using Append
|
||||
|
||||
if result != nil {
|
||||
result.ErrorFormat = func([]error) string {
|
||||
return "errors!"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Accessing the list of errors**
|
||||
|
||||
`multierror.Error` implements `error` so if the caller doesn't know about
|
||||
multierror, it will work just fine. But if you're aware a multierror might
|
||||
be returned, you can use type switches to access the list of errors:
|
||||
|
||||
```go
|
||||
if err := something(); err != nil {
|
||||
if merr, ok := err.(*multierror.Error); ok {
|
||||
// Use merr.Errors
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap)
|
||||
function. This will continue to unwrap into subsequent errors until none exist.
|
||||
|
||||
**Extracting an error**
|
||||
|
||||
The standard library [`errors.As`](https://golang.org/pkg/errors/#As)
|
||||
function can be used directly with a multierror to extract a specific error:
|
||||
|
||||
```go
|
||||
// Assume err is a multierror value
|
||||
err := somefunc()
|
||||
|
||||
// We want to know if "err" has a "RichErrorType" in it and extract it.
|
||||
var errRich RichErrorType
|
||||
if errors.As(err, &errRich) {
|
||||
// It has it, and now errRich is populated.
|
||||
}
|
||||
```
|
||||
|
||||
**Checking for an exact error value**
|
||||
|
||||
Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables)
|
||||
error in the `os` package. You can check if this error is present by using
|
||||
the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function.
|
||||
|
||||
```go
|
||||
// Assume err is a multierror value
|
||||
err := somefunc()
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// err contains os.ErrNotExist
|
||||
}
|
||||
```
|
||||
|
||||
**Returning a multierror only if there are errors**
|
||||
|
||||
If you build a `multierror.Error`, you can use the `ErrorOrNil` function
|
||||
to return an `error` implementation only if there are errors to return:
|
||||
|
||||
```go
|
||||
var result *multierror.Error
|
||||
|
||||
// ... accumulate errors here
|
||||
|
||||
// Return the `error` only if errors were added to the multierror, otherwise
|
||||
// return nil since there are no errors.
|
||||
return result.ErrorOrNil()
|
||||
```
|
||||
43
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
43
vendor/github.com/hashicorp/go-multierror/append.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
package multierror
|
||||
|
||||
// Append is a helper function that will append more errors
|
||||
// onto an Error in order to create a larger multi-error.
|
||||
//
|
||||
// If err is not a multierror.Error, then it will be turned into
|
||||
// one. If any of the errs are multierr.Error, they will be flattened
|
||||
// one level into err.
|
||||
// Any nil errors within errs will be ignored. If err is nil, a new
|
||||
// *Error will be returned.
|
||||
func Append(err error, errs ...error) *Error {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
// Typed nils can reach here, so initialize if we are nil
|
||||
if err == nil {
|
||||
err = new(Error)
|
||||
}
|
||||
|
||||
// Go through each error and flatten
|
||||
for _, e := range errs {
|
||||
switch e := e.(type) {
|
||||
case *Error:
|
||||
if e != nil {
|
||||
err.Errors = append(err.Errors, e.Errors...)
|
||||
}
|
||||
default:
|
||||
if e != nil {
|
||||
err.Errors = append(err.Errors, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
default:
|
||||
newErrs := make([]error, 0, len(errs)+1)
|
||||
if err != nil {
|
||||
newErrs = append(newErrs, err)
|
||||
}
|
||||
newErrs = append(newErrs, errs...)
|
||||
|
||||
return Append(&Error{}, newErrs...)
|
||||
}
|
||||
}
|
||||
26
vendor/github.com/hashicorp/go-multierror/flatten.go
generated
vendored
26
vendor/github.com/hashicorp/go-multierror/flatten.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
package multierror
|
||||
|
||||
// Flatten flattens the given error, merging any *Errors together into
|
||||
// a single *Error.
|
||||
func Flatten(err error) error {
|
||||
// If it isn't an *Error, just return the error as-is
|
||||
if _, ok := err.(*Error); !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise, make the result and flatten away!
|
||||
flatErr := new(Error)
|
||||
flatten(err, flatErr)
|
||||
return flatErr
|
||||
}
|
||||
|
||||
func flatten(err error, flatErr *Error) {
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
for _, e := range err.Errors {
|
||||
flatten(e, flatErr)
|
||||
}
|
||||
default:
|
||||
flatErr.Errors = append(flatErr.Errors, err)
|
||||
}
|
||||
}
|
||||
27
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
27
vendor/github.com/hashicorp/go-multierror/format.go
generated
vendored
@@ -1,27 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorFormatFunc is a function callback that is called by Error to
|
||||
// turn the list of errors into a string.
|
||||
type ErrorFormatFunc func([]error) string
|
||||
|
||||
// ListFormatFunc is a basic formatter that outputs the number of errors
|
||||
// that occurred along with a bullet point list of the errors.
|
||||
func ListFormatFunc(es []error) string {
|
||||
if len(es) == 1 {
|
||||
return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
|
||||
}
|
||||
|
||||
points := make([]string, len(es))
|
||||
for i, err := range es {
|
||||
points[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%d errors occurred:\n\t%s\n\n",
|
||||
len(es), strings.Join(points, "\n\t"))
|
||||
}
|
||||
38
vendor/github.com/hashicorp/go-multierror/group.go
generated
vendored
38
vendor/github.com/hashicorp/go-multierror/group.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import "sync"
|
||||
|
||||
// Group is a collection of goroutines which return errors that need to be
|
||||
// coalesced.
|
||||
type Group struct {
|
||||
mutex sync.Mutex
|
||||
err *Error
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// Go calls the given function in a new goroutine.
|
||||
//
|
||||
// If the function returns an error it is added to the group multierror which
|
||||
// is returned by Wait.
|
||||
func (g *Group) Go(f func() error) {
|
||||
g.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer g.wg.Done()
|
||||
|
||||
if err := f(); err != nil {
|
||||
g.mutex.Lock()
|
||||
g.err = Append(g.err, err)
|
||||
g.mutex.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the multierror.
|
||||
func (g *Group) Wait() *Error {
|
||||
g.wg.Wait()
|
||||
g.mutex.Lock()
|
||||
defer g.mutex.Unlock()
|
||||
return g.err
|
||||
}
|
||||
121
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
121
vendor/github.com/hashicorp/go-multierror/multierror.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is an error type to track multiple errors. This is used to
|
||||
// accumulate errors in cases and return them as a single "error".
|
||||
type Error struct {
|
||||
Errors []error
|
||||
ErrorFormat ErrorFormatFunc
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
fn := e.ErrorFormat
|
||||
if fn == nil {
|
||||
fn = ListFormatFunc
|
||||
}
|
||||
|
||||
return fn(e.Errors)
|
||||
}
|
||||
|
||||
// ErrorOrNil returns an error interface if this Error represents
|
||||
// a list of errors, or returns nil if the list of errors is empty. This
|
||||
// function is useful at the end of accumulation to make sure that the value
|
||||
// returned represents the existence of errors.
|
||||
func (e *Error) ErrorOrNil() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
if len(e.Errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *Error) GoString() string {
|
||||
return fmt.Sprintf("*%#v", *e)
|
||||
}
|
||||
|
||||
// WrappedErrors returns the list of errors that this Error is wrapping. It is
|
||||
// an implementation of the errwrap.Wrapper interface so that multierror.Error
|
||||
// can be used with that library.
|
||||
//
|
||||
// This method is not safe to be called concurrently. Unlike accessing the
|
||||
// Errors field directly, this function also checks if the multierror is nil to
|
||||
// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface.
|
||||
func (e *Error) WrappedErrors() []error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.Errors
|
||||
}
|
||||
|
||||
// Unwrap returns an error from Error (or nil if there are no errors).
|
||||
// This error returned will further support Unwrap to get the next error,
|
||||
// etc. The order will match the order of Errors in the multierror.Error
|
||||
// at the time of calling.
|
||||
//
|
||||
// The resulting error supports errors.As/Is/Unwrap so you can continue
|
||||
// to use the stdlib errors package to introspect further.
|
||||
//
|
||||
// This will perform a shallow copy of the errors slice. Any errors appended
|
||||
// to this error after calling Unwrap will not be available until a new
|
||||
// Unwrap is called on the multierror.Error.
|
||||
func (e *Error) Unwrap() error {
|
||||
// If we have no errors then we do nothing
|
||||
if e == nil || len(e.Errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we have exactly one error, we can just return that directly.
|
||||
if len(e.Errors) == 1 {
|
||||
return e.Errors[0]
|
||||
}
|
||||
|
||||
// Shallow copy the slice
|
||||
errs := make([]error, len(e.Errors))
|
||||
copy(errs, e.Errors)
|
||||
return chain(errs)
|
||||
}
|
||||
|
||||
// chain implements the interfaces necessary for errors.Is/As/Unwrap to
|
||||
// work in a deterministic way with multierror. A chain tracks a list of
|
||||
// errors while accounting for the current represented error. This lets
|
||||
// Is/As be meaningful.
|
||||
//
|
||||
// Unwrap returns the next error. In the cleanest form, Unwrap would return
|
||||
// the wrapped error here but we can't do that if we want to properly
|
||||
// get access to all the errors. Instead, users are recommended to use
|
||||
// Is/As to get the correct error type out.
|
||||
//
|
||||
// Precondition: []error is non-empty (len > 0)
|
||||
type chain []error
|
||||
|
||||
// Error implements the error interface
|
||||
func (e chain) Error() string {
|
||||
return e[0].Error()
|
||||
}
|
||||
|
||||
// Unwrap implements errors.Unwrap by returning the next error in the
|
||||
// chain or nil if there are no more errors.
|
||||
func (e chain) Unwrap() error {
|
||||
if len(e) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e[1:]
|
||||
}
|
||||
|
||||
// As implements errors.As by attempting to map to the current value.
|
||||
func (e chain) As(target interface{}) bool {
|
||||
return errors.As(e[0], target)
|
||||
}
|
||||
|
||||
// Is implements errors.Is by comparing the current value directly.
|
||||
func (e chain) Is(target error) bool {
|
||||
return errors.Is(e[0], target)
|
||||
}
|
||||
37
vendor/github.com/hashicorp/go-multierror/prefix.go
generated
vendored
37
vendor/github.com/hashicorp/go-multierror/prefix.go
generated
vendored
@@ -1,37 +0,0 @@
|
||||
package multierror
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
)
|
||||
|
||||
// Prefix is a helper function that will prefix some text
|
||||
// to the given error. If the error is a multierror.Error, then
|
||||
// it will be prefixed to each wrapped error.
|
||||
//
|
||||
// This is useful to use when appending multiple multierrors
|
||||
// together in order to give better scoping.
|
||||
func Prefix(err error, prefix string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
format := fmt.Sprintf("%s {{err}}", prefix)
|
||||
switch err := err.(type) {
|
||||
case *Error:
|
||||
// Typed nils can reach here, so initialize if we are nil
|
||||
if err == nil {
|
||||
err = new(Error)
|
||||
}
|
||||
|
||||
// Wrap each of the errors
|
||||
for i, e := range err.Errors {
|
||||
err.Errors[i] = errwrap.Wrapf(format, e)
|
||||
}
|
||||
|
||||
return err
|
||||
default:
|
||||
return errwrap.Wrapf(format, err)
|
||||
}
|
||||
}
|
||||
16
vendor/github.com/hashicorp/go-multierror/sort.go
generated
vendored
16
vendor/github.com/hashicorp/go-multierror/sort.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
package multierror
|
||||
|
||||
// Len implements sort.Interface function for length
|
||||
func (err Error) Len() int {
|
||||
return len(err.Errors)
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface function for swapping elements
|
||||
func (err Error) Swap(i, j int) {
|
||||
err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
|
||||
}
|
||||
|
||||
// Less implements sort.Interface function for determining order
|
||||
func (err Error) Less(i, j int) bool {
|
||||
return err.Errors[i].Error() < err.Errors[j].Error()
|
||||
}
|
||||
46
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
46
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
@@ -12,6 +12,8 @@ type Spec struct {
|
||||
Root *Root `json:"root,omitempty"`
|
||||
// Hostname configures the container's hostname.
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
// Domainname configures the container's domainname.
|
||||
Domainname string `json:"domainname,omitempty"`
|
||||
// Mounts configures additional mounts (on top of Root).
|
||||
Mounts []Mount `json:"mounts,omitempty"`
|
||||
// Hooks configures callbacks for container lifecycle events.
|
||||
@@ -117,6 +119,11 @@ type Mount struct {
|
||||
Source string `json:"source,omitempty"`
|
||||
// Options are fstab style mount options.
|
||||
Options []string `json:"options,omitempty"`
|
||||
|
||||
// UID/GID mappings used for changing file owners w/o calling chown, fs should support it.
|
||||
// Every mount point could have its own mapping.
|
||||
UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"`
|
||||
GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"`
|
||||
}
|
||||
|
||||
// Hook specifies a command that is run at a particular event in the lifecycle of a container
|
||||
@@ -252,8 +259,8 @@ type LinuxInterfacePriority struct {
|
||||
Priority uint32 `json:"priority"`
|
||||
}
|
||||
|
||||
// linuxBlockIODevice holds major:minor format supported in blkio cgroup
|
||||
type linuxBlockIODevice struct {
|
||||
// LinuxBlockIODevice holds major:minor format supported in blkio cgroup
|
||||
type LinuxBlockIODevice struct {
|
||||
// Major is the device's major number.
|
||||
Major int64 `json:"major"`
|
||||
// Minor is the device's minor number.
|
||||
@@ -262,7 +269,7 @@ type linuxBlockIODevice struct {
|
||||
|
||||
// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice
|
||||
type LinuxWeightDevice struct {
|
||||
linuxBlockIODevice
|
||||
LinuxBlockIODevice
|
||||
// Weight is the bandwidth rate for the device.
|
||||
Weight *uint16 `json:"weight,omitempty"`
|
||||
// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only
|
||||
@@ -271,7 +278,7 @@ type LinuxWeightDevice struct {
|
||||
|
||||
// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair
|
||||
type LinuxThrottleDevice struct {
|
||||
linuxBlockIODevice
|
||||
LinuxBlockIODevice
|
||||
// Rate is the IO rate limit per cgroup per device
|
||||
Rate uint64 `json:"rate"`
|
||||
}
|
||||
@@ -330,6 +337,8 @@ type LinuxCPU struct {
|
||||
Cpus string `json:"cpus,omitempty"`
|
||||
// List of memory nodes in the cpuset. Default is to use any available memory node.
|
||||
Mems string `json:"mems,omitempty"`
|
||||
// cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE.
|
||||
Idle *int64 `json:"idle,omitempty"`
|
||||
}
|
||||
|
||||
// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3)
|
||||
@@ -524,11 +533,21 @@ type WindowsMemoryResources struct {
|
||||
|
||||
// WindowsCPUResources contains CPU resource management settings.
|
||||
type WindowsCPUResources struct {
|
||||
// Number of CPUs available to the container.
|
||||
// Count is the number of CPUs available to the container. It represents the
|
||||
// fraction of the configured processor `count` in a container in relation
|
||||
// to the processors available in the host. The fraction ultimately
|
||||
// determines the portion of processor cycles that the threads in a
|
||||
// container can use during each scheduling interval, as the number of
|
||||
// cycles per 10,000 cycles.
|
||||
Count *uint64 `json:"count,omitempty"`
|
||||
// CPU shares (relative weight to other containers with cpu shares).
|
||||
// Shares limits the share of processor time given to the container relative
|
||||
// to other workloads on the processor. The processor `shares` (`weight` at
|
||||
// the platform level) is a value between 0 and 10000.
|
||||
Shares *uint16 `json:"shares,omitempty"`
|
||||
// Specifies the portion of processor cycles that this container can use as a percentage times 100.
|
||||
// Maximum determines the portion of processor cycles that the threads in a
|
||||
// container can use during each scheduling interval, as the number of
|
||||
// cycles per 10,000 cycles. Set processor `maximum` to a percentage times
|
||||
// 100.
|
||||
Maximum *uint16 `json:"maximum,omitempty"`
|
||||
}
|
||||
|
||||
@@ -615,6 +634,19 @@ type Arch string
|
||||
// LinuxSeccompFlag is a flag to pass to seccomp(2).
|
||||
type LinuxSeccompFlag string
|
||||
|
||||
const (
|
||||
// LinuxSeccompFlagLog is a seccomp flag to request all returned
|
||||
// actions except SECCOMP_RET_ALLOW to be logged. An administrator may
|
||||
// override this filter flag by preventing specific actions from being
|
||||
// logged via the /proc/sys/kernel/seccomp/actions_logged file. (since
|
||||
// Linux 4.14)
|
||||
LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG"
|
||||
|
||||
// LinuxSeccompFlagSpecAllow can be used to disable Speculative Store
|
||||
// Bypass mitigation. (since Linux 4.17)
|
||||
LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW"
|
||||
)
|
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
// By default only the native architecture of the kernel is permitted
|
||||
const (
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user