mirror of
https://github.com/NVIDIA/nvidia-container-toolkit
synced 2025-06-26 18:18:24 +00:00
Compare commits
63 Commits
v1.6.0
...
v1.8.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4562cb559c | ||
|
|
72e17e8632 | ||
|
|
6898917f41 | ||
|
|
53c130fb3c | ||
|
|
45bd3002da | ||
|
|
58042d78df | ||
|
|
aa52b12c09 | ||
|
|
47bc4f90ba | ||
|
|
41c1c2312a | ||
|
|
9d34134b3f | ||
|
|
d931e861f3 | ||
|
|
b1c9b8bb49 | ||
|
|
50fbcebe31 | ||
|
|
78f38455fd | ||
|
|
f57e9b969c | ||
|
|
a174aae7b5 | ||
|
|
6890cb2ed8 | ||
|
|
13603e9794 | ||
|
|
afb260d82e | ||
|
|
f0311bfe17 | ||
|
|
050c29b157 | ||
|
|
de9afd4623 | ||
|
|
b231d8f365 | ||
|
|
ee2b84b228 | ||
|
|
0c24fa83ae | ||
|
|
79660d1e55 | ||
|
|
39d2ff06fa | ||
|
|
0ac288e6dd | ||
|
|
b334f1977b | ||
|
|
2d07385e81 | ||
|
|
fd5a1a72f0 | ||
|
|
738d28dac5 | ||
|
|
e662e8197c | ||
|
|
2964f26533 | ||
|
|
629d575fad | ||
|
|
7fb04878c7 | ||
|
|
f10f533fb2 | ||
|
|
9c2cdc2f81 | ||
|
|
5bbaf8af4b | ||
|
|
c6ce5b5a29 | ||
|
|
b9e752e24e | ||
|
|
94849fa822 | ||
|
|
b0d6948d94 | ||
|
|
995bd0d34a | ||
|
|
27bb5cca0c | ||
|
|
72d1d90ce9 | ||
|
|
6a1f7d0228 | ||
|
|
094631329f | ||
|
|
6731f050da | ||
|
|
2ee6ec5d17 | ||
|
|
1c25b349b1 | ||
|
|
d87bdf9ab6 | ||
|
|
472c89d051 | ||
|
|
3470f2ecb9 | ||
|
|
9c27e03c87 | ||
|
|
09c6995ff9 | ||
|
|
e2ec381093 | ||
|
|
7a31ebadb1 | ||
|
|
7a34be62b2 | ||
|
|
a4441b6545 | ||
|
|
ab3ebe5e49 | ||
|
|
ea0bf6fbf8 | ||
|
|
0a2db7c70e |
268
.common-ci.yml
268
.common-ci.yml
@@ -34,86 +34,49 @@ stages:
|
||||
- release
|
||||
- build-all
|
||||
|
||||
build-dev-image:
|
||||
stage: image
|
||||
script:
|
||||
- apk --no-cache add make bash
|
||||
- make .build-image
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
- make .push-build-image
|
||||
|
||||
.requires-build-image:
|
||||
image: "${BUILDIMAGE}"
|
||||
|
||||
.go-check:
|
||||
extends:
|
||||
- .requires-build-image
|
||||
stage: go-checks
|
||||
|
||||
fmt:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make assert-fmt
|
||||
|
||||
vet:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make vet
|
||||
|
||||
lint:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make lint
|
||||
allow_failure: true
|
||||
|
||||
ineffassign:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make ineffassign
|
||||
allow_failure: true
|
||||
|
||||
misspell:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make misspell
|
||||
|
||||
go-build:
|
||||
extends:
|
||||
- .requires-build-image
|
||||
stage: go-build
|
||||
script:
|
||||
- make build
|
||||
|
||||
unit-tests:
|
||||
extends:
|
||||
- .requires-build-image
|
||||
stage: unit-tests
|
||||
script:
|
||||
- make coverage
|
||||
|
||||
|
||||
# Define the distribution targets
|
||||
.dist-amazonlinux2:
|
||||
variables:
|
||||
DIST: amazonlinux2
|
||||
|
||||
.dist-centos7:
|
||||
variables:
|
||||
DIST: centos7
|
||||
CVE_UPDATES: "nss"
|
||||
|
||||
.dist-centos8:
|
||||
variables:
|
||||
DIST: centos8
|
||||
|
||||
.dist-debian10:
|
||||
variables:
|
||||
DIST: debian10
|
||||
|
||||
.dist-debian9:
|
||||
variables:
|
||||
DIST: debian9
|
||||
|
||||
.dist-opensuse-leap15.1:
|
||||
variables:
|
||||
DIST: opensuse-leap15.1
|
||||
|
||||
.dist-ubi8:
|
||||
variables:
|
||||
DIST: ubi8
|
||||
|
||||
.dist-ubuntu16.04:
|
||||
variables:
|
||||
DIST: ubuntu16.04
|
||||
|
||||
.dist-ubuntu18.04:
|
||||
variables:
|
||||
DIST: ubuntu18.04
|
||||
|
||||
.dist-packaging:
|
||||
variables:
|
||||
DIST: packaging
|
||||
|
||||
# Define architecture targets
|
||||
.arch-aarch64:
|
||||
variables:
|
||||
ARCH: aarch64
|
||||
@@ -134,121 +97,6 @@ unit-tests:
|
||||
variables:
|
||||
ARCH: x86_64
|
||||
|
||||
# Define the package build helpers
|
||||
.multi-arch-build:
|
||||
before_script:
|
||||
- apk add --no-cache coreutils build-base sed git bash make
|
||||
- '[[ -n "${SKIP_QEMU_SETUP}" ]] || docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes'
|
||||
|
||||
.package-artifacts:
|
||||
variables:
|
||||
ARTIFACTS_NAME: "toolkit-container-${CI_PIPELINE_ID}"
|
||||
ARTIFACTS_ROOT: "toolkit-container-${CI_PIPELINE_ID}"
|
||||
DIST_DIR: ${CI_PROJECT_DIR}/${ARTIFACTS_ROOT}
|
||||
|
||||
.package-build:
|
||||
extends:
|
||||
- .multi-arch-build
|
||||
- .package-artifacts
|
||||
stage: package-build
|
||||
script:
|
||||
- ./scripts/release.sh ${DIST}-${ARCH}
|
||||
|
||||
artifacts:
|
||||
name: ${ARTIFACTS_NAME}
|
||||
paths:
|
||||
- ${ARTIFACTS_ROOT}
|
||||
|
||||
# Define the package build targets
|
||||
package-ubuntu18.04-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-amd64
|
||||
|
||||
package-ubuntu18.04-arm64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-arm64
|
||||
|
||||
package-ubuntu18.04-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-ppc64le
|
||||
|
||||
package-centos7-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos7
|
||||
- .arch-x86_64
|
||||
|
||||
package-centos8-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos8
|
||||
- .arch-x86_64
|
||||
|
||||
# Define the image build targets
|
||||
.image-build:
|
||||
stage: image-build
|
||||
variables:
|
||||
IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
before_script:
|
||||
- apk add --no-cache bash make
|
||||
- 'echo "Logging in to CI registry ${CI_REGISTRY}"'
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
|
||||
image-centos7:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-centos7
|
||||
needs:
|
||||
- package-centos7-x86_64
|
||||
script:
|
||||
- make -f build/container/Makefile build-${DIST}
|
||||
- make -f build/container/Makefile push-${DIST}
|
||||
|
||||
image-centos8:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-centos8
|
||||
needs:
|
||||
- package-centos8-x86_64
|
||||
script:
|
||||
- make -f build/container/Makefile build-${DIST}
|
||||
- make -f build/container/Makefile push-${DIST}
|
||||
|
||||
image-ubi8:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubi8
|
||||
needs:
|
||||
# Note: The ubi8 image currently uses the centos7 packages
|
||||
- package-centos7-x86_64
|
||||
script:
|
||||
- make -f build/container/Makefile build-${DIST}
|
||||
- make -f build/container/Makefile push-${DIST}
|
||||
|
||||
image-ubuntu18.04:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- package-ubuntu18.04-amd64
|
||||
# TODO: These will be required once we generate multi-arch images
|
||||
# - package-ubuntu18.04-arm64
|
||||
# - package-ubuntu18.04-ppc64le
|
||||
script:
|
||||
- make -f build/container/Makefile build-${DIST}
|
||||
- make -f build/container/Makefile push-${DIST}
|
||||
|
||||
# Define test helpers
|
||||
.integration:
|
||||
stage: test
|
||||
@@ -262,62 +110,13 @@ image-ubuntu18.04:
|
||||
script:
|
||||
- make -f build/container/Makefile test-${DIST}
|
||||
|
||||
.test:toolkit:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "toolkit"
|
||||
|
||||
.test:docker:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "docker"
|
||||
|
||||
.test:containerd:
|
||||
# TODO: The containerd tests fail due to issues with SIGHUP.
|
||||
# Until this is resolved with retry up to twice and allow failure here.
|
||||
retry: 2
|
||||
allow_failure: true
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "containerd"
|
||||
|
||||
.test:crio:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "crio"
|
||||
|
||||
# Define the test targets
|
||||
test-toolkit-ubuntu18.04:
|
||||
test-packaging:
|
||||
extends:
|
||||
- .test:toolkit
|
||||
- .dist-ubuntu18.04
|
||||
- .integration
|
||||
- .dist-packaging
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
test-containerd-ubuntu18.04:
|
||||
extends:
|
||||
- .test:containerd
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
test-crio-ubuntu18.04:
|
||||
extends:
|
||||
- .test:crio
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
test-docker-ubuntu18.04:
|
||||
extends:
|
||||
- .test:docker
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
- image-packaging
|
||||
|
||||
# .release forms the base of the deployment jobs which push images to the CI registry.
|
||||
# This is extended with the version to be deployed (e.g. the SHA or TAG) and the
|
||||
@@ -407,3 +206,10 @@ release:staging-ubuntu18.04:
|
||||
- test-containerd-ubuntu18.04
|
||||
- test-crio-ubuntu18.04
|
||||
- test-docker-ubuntu18.04
|
||||
|
||||
release:staging-packaging:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-packaging
|
||||
needs:
|
||||
- test-packaging
|
||||
|
||||
312
.gitlab-ci.yml
312
.gitlab-ci.yml
@@ -15,6 +15,318 @@
|
||||
include:
|
||||
- .common-ci.yml
|
||||
|
||||
build-dev-image:
|
||||
stage: image
|
||||
script:
|
||||
- apk --no-cache add make bash
|
||||
- make .build-image
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
- make .push-build-image
|
||||
|
||||
.requires-build-image:
|
||||
image: "${BUILDIMAGE}"
|
||||
|
||||
.go-check:
|
||||
extends:
|
||||
- .requires-build-image
|
||||
stage: go-checks
|
||||
|
||||
fmt:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make assert-fmt
|
||||
|
||||
vet:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make vet
|
||||
|
||||
lint:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make lint
|
||||
allow_failure: true
|
||||
|
||||
ineffassign:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make ineffassign
|
||||
allow_failure: true
|
||||
|
||||
misspell:
|
||||
extends:
|
||||
- .go-check
|
||||
script:
|
||||
- make misspell
|
||||
|
||||
go-build:
|
||||
extends:
|
||||
- .requires-build-image
|
||||
stage: go-build
|
||||
script:
|
||||
- make build
|
||||
|
||||
unit-tests:
|
||||
extends:
|
||||
- .requires-build-image
|
||||
stage: unit-tests
|
||||
script:
|
||||
- make coverage
|
||||
|
||||
# Define the package build helpers
|
||||
.multi-arch-build:
|
||||
before_script:
|
||||
- apk add --no-cache coreutils build-base sed git bash make
|
||||
- '[[ -n "${SKIP_QEMU_SETUP}" ]] || docker run --rm --privileged multiarch/qemu-user-static --reset -p yes -c yes'
|
||||
|
||||
.package-artifacts:
|
||||
variables:
|
||||
ARTIFACTS_NAME: "toolkit-container-${CI_PIPELINE_ID}"
|
||||
ARTIFACTS_ROOT: "toolkit-container-${CI_PIPELINE_ID}"
|
||||
DIST_DIR: ${CI_PROJECT_DIR}/${ARTIFACTS_ROOT}
|
||||
|
||||
.package-build:
|
||||
extends:
|
||||
- .multi-arch-build
|
||||
- .package-artifacts
|
||||
stage: package-build
|
||||
script:
|
||||
- ./scripts/release.sh ${DIST}-${ARCH}
|
||||
|
||||
artifacts:
|
||||
name: ${ARTIFACTS_NAME}
|
||||
paths:
|
||||
- ${ARTIFACTS_ROOT}
|
||||
|
||||
# Define the package build targets
|
||||
package-amazonlinux2-aarch64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-amazonlinux2
|
||||
- .arch-aarch64
|
||||
|
||||
package-amazonlinux2-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-amazonlinux2
|
||||
- .arch-x86_64
|
||||
|
||||
package-centos7-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos7
|
||||
- .arch-ppc64le
|
||||
|
||||
package-centos7-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos7
|
||||
- .arch-x86_64
|
||||
|
||||
package-centos8-aarch64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos8
|
||||
- .arch-aarch64
|
||||
|
||||
package-centos8-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos8
|
||||
- .arch-ppc64le
|
||||
|
||||
package-centos8-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-centos8
|
||||
- .arch-x86_64
|
||||
|
||||
package-debian10-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-debian10
|
||||
- .arch-amd64
|
||||
|
||||
package-debian9-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-debian9
|
||||
- .arch-amd64
|
||||
|
||||
package-opensuse-leap15.1-x86_64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-opensuse-leap15.1
|
||||
- .arch-x86_64
|
||||
|
||||
package-ubuntu16.04-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu16.04
|
||||
- .arch-amd64
|
||||
|
||||
package-ubuntu16.04-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu16.04
|
||||
- .arch-ppc64le
|
||||
|
||||
package-ubuntu18.04-amd64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-amd64
|
||||
|
||||
package-ubuntu18.04-arm64:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-arm64
|
||||
|
||||
package-ubuntu18.04-ppc64le:
|
||||
extends:
|
||||
- .package-build
|
||||
- .dist-ubuntu18.04
|
||||
- .arch-ppc64le
|
||||
|
||||
# Define the image build targets
|
||||
.image-build:
|
||||
stage: image-build
|
||||
variables:
|
||||
IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
VERSION: "${CI_COMMIT_SHORT_SHA}"
|
||||
before_script:
|
||||
- apk add --no-cache bash make
|
||||
- 'echo "Logging in to CI registry ${CI_REGISTRY}"'
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
script:
|
||||
- make -f build/container/Makefile build-${DIST}
|
||||
- make -f build/container/Makefile push-${DIST}
|
||||
|
||||
image-centos7:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-centos7
|
||||
needs:
|
||||
- package-centos7-ppc64le
|
||||
- package-centos7-x86_64
|
||||
|
||||
image-centos8:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-centos8
|
||||
needs:
|
||||
- package-centos8-aarch64
|
||||
- package-centos8-x86_64
|
||||
- package-centos8-ppc64le
|
||||
|
||||
image-ubi8:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubi8
|
||||
needs:
|
||||
# Note: The ubi8 image currently uses the centos7 packages
|
||||
- package-centos7-ppc64le
|
||||
- package-centos7-x86_64
|
||||
|
||||
image-ubuntu18.04:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- package-ubuntu18.04-amd64
|
||||
- package-ubuntu18.04-arm64
|
||||
- package-ubuntu18.04-ppc64le
|
||||
|
||||
# The DIST=packaging target creates an image containing all built packages
|
||||
image-packaging:
|
||||
extends:
|
||||
- .image-build
|
||||
- .package-artifacts
|
||||
- .dist-packaging
|
||||
needs:
|
||||
- package-amazonlinux2-aarch64
|
||||
- package-amazonlinux2-x86_64
|
||||
- package-centos7-ppc64le
|
||||
- package-centos7-x86_64
|
||||
- package-centos8-aarch64
|
||||
- package-centos8-ppc64le
|
||||
- package-centos8-x86_64
|
||||
- package-debian10-amd64
|
||||
- package-debian9-amd64
|
||||
- package-opensuse-leap15.1-x86_64
|
||||
- package-ubuntu16.04-amd64
|
||||
- package-ubuntu16.04-ppc64le
|
||||
- package-ubuntu18.04-amd64
|
||||
- package-ubuntu18.04-arm64
|
||||
- package-ubuntu18.04-ppc64le
|
||||
|
||||
# Define publish test helpers
|
||||
.test:toolkit:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "toolkit"
|
||||
|
||||
.test:docker:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "docker"
|
||||
|
||||
.test:containerd:
|
||||
# TODO: The containerd tests fail due to issues with SIGHUP.
|
||||
# Until this is resolved with retry up to twice and allow failure here.
|
||||
retry: 2
|
||||
allow_failure: true
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "containerd"
|
||||
|
||||
.test:crio:
|
||||
extends:
|
||||
- .integration
|
||||
variables:
|
||||
TEST_CASES: "crio"
|
||||
|
||||
# Define the test targets
|
||||
test-toolkit-ubuntu18.04:
|
||||
extends:
|
||||
- .test:toolkit
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
test-containerd-ubuntu18.04:
|
||||
extends:
|
||||
- .test:containerd
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
test-crio-ubuntu18.04:
|
||||
extends:
|
||||
- .test:crio
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
test-docker-ubuntu18.04:
|
||||
extends:
|
||||
- .test:docker
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
# build-all jobs build packages for every OS / ARCH combination we support.
|
||||
#
|
||||
# They are run under two conditions:
|
||||
|
||||
@@ -32,6 +32,63 @@ variables:
|
||||
DEVEL_RELEASE_IMAGE_VERSION: "devel"
|
||||
# On the multi-arch builder we don't need the qemu setup.
|
||||
SKIP_QEMU_SETUP: "1"
|
||||
# Define the public staging registry
|
||||
STAGING_REGISTRY: registry.gitlab.com/nvidia/container-toolkit/container-toolkit/staging
|
||||
STAGING_VERSION: ${CI_COMMIT_SHORT_SHA}
|
||||
|
||||
.image-pull:
|
||||
stage: image-build
|
||||
variables:
|
||||
IN_REGISTRY: "${STAGING_REGISTRY}"
|
||||
IN_IMAGE_NAME: container-toolkit
|
||||
IN_VERSION: "${STAGING_VERSION}"
|
||||
OUT_REGISTRY_USER: "${CI_REGISTRY_USER}"
|
||||
OUT_REGISTRY_TOKEN: "${CI_REGISTRY_PASSWORD}"
|
||||
OUT_REGISTRY: "${CI_REGISTRY}"
|
||||
OUT_IMAGE_NAME: "${CI_REGISTRY_IMAGE}/container-toolkit"
|
||||
# We delay the job start to allow the public pipeline to generate the required images.
|
||||
when: delayed
|
||||
start_in: 30 minutes
|
||||
timeout: 30 minutes
|
||||
retry:
|
||||
max: 2
|
||||
when:
|
||||
- job_execution_timeout
|
||||
- stuck_or_timeout_failure
|
||||
before_script:
|
||||
- >
|
||||
docker pull ${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} > /dev/null && echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST}" || ( echo "${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} does not exist" && sleep infinity )
|
||||
script:
|
||||
- docker pull ${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST}
|
||||
- docker tag ${IN_REGISTRY}/${IN_IMAGE_NAME}:${IN_VERSION}-${DIST} ${OUT_IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}-${DIST}
|
||||
- docker login -u "${OUT_REGISTRY_USER}" -p "${OUT_REGISTRY_TOKEN}" "${OUT_REGISTRY}"
|
||||
- docker push ${OUT_IMAGE_NAME}:${CI_COMMIT_SHORT_SHA}-${DIST}
|
||||
|
||||
image-centos7:
|
||||
extends:
|
||||
- .image-pull
|
||||
- .dist-centos7
|
||||
|
||||
image-centos8:
|
||||
extends:
|
||||
- .image-pull
|
||||
- .dist-centos8
|
||||
|
||||
image-ubi8:
|
||||
extends:
|
||||
- .image-pull
|
||||
- .dist-ubi8
|
||||
|
||||
image-ubuntu18.04:
|
||||
extends:
|
||||
- .image-pull
|
||||
- .dist-ubuntu18.04
|
||||
|
||||
# The DIST=packaging target creates an image containing all built packages
|
||||
image-packaging:
|
||||
extends:
|
||||
- .image-pull
|
||||
- .dist-packaging
|
||||
|
||||
# We skip the integration tests for the internal CI:
|
||||
.integration:
|
||||
@@ -49,13 +106,9 @@ variables:
|
||||
variables:
|
||||
IMAGE: "${CI_REGISTRY_IMAGE}/container-toolkit:${CI_COMMIT_SHORT_SHA}-${DIST}"
|
||||
IMAGE_ARCHIVE: "container-toolkit.tar"
|
||||
rules:
|
||||
- if: $CI_COMMIT_MESSAGE =~ /\[skip[ _-]scans?\]/i
|
||||
when: never
|
||||
- if: $SKIP_SCANS
|
||||
when: never
|
||||
- if: $CI_COMMIT_TAG == null && $CI_COMMIT_BRANCH != $RELEASE_DEVEL_BRANCH
|
||||
allow_failure: true
|
||||
except:
|
||||
variables:
|
||||
- $SKIP_SCANS && $SKIP_SCANS == "yes"
|
||||
before_script:
|
||||
- docker login -u "${CI_REGISTRY_USER}" -p "${CI_REGISTRY_PASSWORD}" "${CI_REGISTRY}"
|
||||
# TODO: We should specify the architecture here and scan all architectures
|
||||
@@ -115,8 +168,6 @@ scan-ubi8:
|
||||
OUT_REGISTRY_TOKEN: "${NGC_REGISTRY_TOKEN}"
|
||||
OUT_REGISTRY: "${NGC_REGISTRY}"
|
||||
OUT_IMAGE_NAME: "${NGC_REGISTRY_IMAGE}"
|
||||
# TODO: For now we disable external releases
|
||||
DOCKER: echo
|
||||
|
||||
.release:dockerhub:
|
||||
extends:
|
||||
@@ -127,8 +178,12 @@ scan-ubi8:
|
||||
OUT_REGISTRY: "${DOCKERHUB_REGISTRY}"
|
||||
OUT_IMAGE_NAME: "${REGISTRY_IMAGE}"
|
||||
|
||||
# TODO: For now we disable external releases
|
||||
DOCKER: echo
|
||||
release:staging-ubuntu18.04:
|
||||
extends:
|
||||
- .release:staging
|
||||
- .dist-ubuntu18.04
|
||||
needs:
|
||||
- image-ubuntu18.04
|
||||
|
||||
# Define the external release targets
|
||||
# Release to NGC
|
||||
|
||||
5
Makefile
5
Makefile
@@ -16,11 +16,8 @@ DOCKER ?= docker
|
||||
MKDIR ?= mkdir
|
||||
DIST_DIR ?= $(CURDIR)/dist
|
||||
|
||||
LIB_NAME := nvidia-container-toolkit
|
||||
LIB_VERSION := 1.6.0
|
||||
LIB_TAG ?=
|
||||
include $(CURDIR)/versions.mk
|
||||
|
||||
GOLANG_VERSION := 1.16.3
|
||||
MODULE := github.com/NVIDIA/nvidia-container-toolkit
|
||||
|
||||
# By default run all native docker-based targets
|
||||
|
||||
@@ -46,16 +46,18 @@ ENV NVIDIA_DISABLE_REQUIRE="true"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=utility
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
ARG PACKAGE_DIST
|
||||
COPY ${ARTIFACTS_ROOT}/${PACKAGE_DIST} /artifacts/packages/${PACKAGE_DIST}
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
ARG ARTIFACTS_DIR
|
||||
COPY ${ARTIFACTS_DIR}/* /artifacts/packages/
|
||||
|
||||
ARG PACKAGE_VERSION
|
||||
ARG PACKAGE_ARCH
|
||||
RUN yum localinstall -y \
|
||||
libnvidia-container1-${PACKAGE_VERSION}*.rpm \
|
||||
libnvidia-container-tools-${PACKAGE_VERSION}*.rpm \
|
||||
nvidia-container-toolkit-${PACKAGE_VERSION}*.rpm
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container1-${PACKAGE_VERSION}*.rpm \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container-tools-${PACKAGE_VERSION}*.rpm \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/nvidia-container-toolkit-${PACKAGE_VERSION}*.rpm
|
||||
|
||||
WORKDIR /work
|
||||
|
||||
@@ -73,4 +75,11 @@ LABEL description="See summary"
|
||||
|
||||
COPY ./LICENSE /licenses/LICENSE
|
||||
|
||||
ENTRYPOINT ["/work/nvidia-toolkit"]
|
||||
# Install / upgrade packages here that are required to resolve CVEs
|
||||
ARG CVE_UPDATES
|
||||
RUN if [ -n "${CVE_UPDATES}" ]; then \
|
||||
yum update -y ${CVE_UPDATES} && \
|
||||
rm -rf /var/cache/yum/*; \
|
||||
fi
|
||||
|
||||
ENTRYPOINT ["/work/nvidia-toolkit"]
|
||||
|
||||
29
build/container/Dockerfile.packaging
Normal file
29
build/container/Dockerfile.packaging
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARG BASE_DIST
|
||||
ARG CUDA_VERSION
|
||||
ARG GOLANG_VERSION=x.x.x
|
||||
ARG VERSION="N/A"
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-base-${BASE_DIST}
|
||||
|
||||
ENV NVIDIA_CONTAINER_TOOLKIT_VERSION="${VERSION}"
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
COPY ${ARTIFACTS_ROOT} /artifacts/packages/
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
COPY ./LICENSE /licenses/LICENSE
|
||||
@@ -52,16 +52,18 @@ ENV NVIDIA_DISABLE_REQUIRE="true"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=utility
|
||||
|
||||
ARG ARTIFACTS_ROOT
|
||||
ARG PACKAGE_DIST
|
||||
COPY ${ARTIFACTS_ROOT}/${PACKAGE_DIST} /artifacts/packages/${PACKAGE_DIST}
|
||||
|
||||
WORKDIR /artifacts/packages
|
||||
|
||||
ARG ARTIFACTS_DIR
|
||||
COPY ${ARTIFACTS_DIR}/* /artifacts/packages/
|
||||
|
||||
ARG PACKAGE_VERSION
|
||||
ARG PACKAGE_ARCH
|
||||
RUN dpkg -i \
|
||||
libnvidia-container1_${PACKAGE_VERSION}*.deb \
|
||||
libnvidia-container-tools_${PACKAGE_VERSION}*.deb \
|
||||
nvidia-container-toolkit_${PACKAGE_VERSION}*.deb
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container1_${PACKAGE_VERSION}*.deb \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/libnvidia-container-tools_${PACKAGE_VERSION}*.deb \
|
||||
${PACKAGE_DIST}/${PACKAGE_ARCH}/nvidia-container-toolkit_${PACKAGE_VERSION}*.deb
|
||||
|
||||
WORKDIR /work
|
||||
|
||||
|
||||
@@ -17,21 +17,15 @@ MKDIR ?= mkdir
|
||||
DIST_DIR ?= $(CURDIR)/dist
|
||||
|
||||
##### Global variables #####
|
||||
include $(CURDIR)/versions.mk
|
||||
|
||||
# TODO: These should be defined ONCE and currently duplicate the version in the
|
||||
# toolkit makefile.
|
||||
LIB_VERSION := 1.6.0
|
||||
LIB_TAG :=
|
||||
|
||||
VERSION ?= $(LIB_VERSION)$(if $(LIB_TAG),-$(LIB_TAG))
|
||||
|
||||
CUDA_VERSION ?= 11.4.2
|
||||
GOLANG_VERSION ?= 1.16.4
|
||||
ifeq ($(IMAGE_NAME),)
|
||||
REGISTRY ?= nvidia
|
||||
IMAGE_NAME := $(REGISTRY)/container-toolkit
|
||||
endif
|
||||
|
||||
VERSION ?= $(LIB_VERSION)$(if $(LIB_TAG),-$(LIB_TAG))
|
||||
|
||||
IMAGE_TAG ?= $(VERSION)-$(DIST)
|
||||
IMAGE = $(IMAGE_NAME):$(IMAGE_TAG)
|
||||
|
||||
@@ -39,12 +33,15 @@ IMAGE = $(IMAGE_NAME):$(IMAGE_TAG)
|
||||
DEFAULT_PUSH_TARGET := ubuntu18.04
|
||||
TARGETS := ubuntu20.04 ubuntu18.04 ubi8 centos7 centos8
|
||||
|
||||
BUILD_TARGETS := $(patsubst %, build-%, $(TARGETS))
|
||||
PUSH_TARGETS := $(patsubst %, push-%, $(TARGETS))
|
||||
TEST_TARGETS := $(patsubst %, test-%, $(TARGETS))
|
||||
META_TARGETS := packaging
|
||||
|
||||
BUILD_TARGETS := $(patsubst %,build-%,$(TARGETS) $(META_TARGETS))
|
||||
PUSH_TARGETS := $(patsubst %,push-%,$(TARGETS) $(META_TARGETS))
|
||||
TEST_TARGETS := $(patsubst %,test-%, $(TARGETS))
|
||||
|
||||
.PHONY: $(TARGETS) $(PUSH_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS)
|
||||
|
||||
push-%: DIST = $(*)
|
||||
$(PUSH_TARGETS): push-%:
|
||||
$(DOCKER) push "$(IMAGE_NAME):$(IMAGE_TAG)"
|
||||
|
||||
@@ -62,41 +59,51 @@ push-short:
|
||||
build-%: DIST = $(*)
|
||||
build-%: DOCKERFILE = $(CURDIR)/build/container/Dockerfile.$(DOCKERFILE_SUFFIX)
|
||||
|
||||
ARTIFACTS_ROOT ?= $(shell realpath --relative-to=$(CURDIR) $(DIST_DIR))
|
||||
|
||||
# Use a generic build target to build the relevant images
|
||||
$(BUILD_TARGETS): build-%: $(ARTIFACTS_DIR)
|
||||
$(DOCKER) build --pull \
|
||||
$(BUILD_TARGETS): build-%: $(ARTIFACTS_ROOT)
|
||||
DOCKER_BUILDKIT=1 \
|
||||
$(DOCKER) build --pull \
|
||||
--platform=linux/amd64 \
|
||||
--tag $(IMAGE) \
|
||||
--build-arg ARTIFACTS_DIR="$(ARTIFACTS_DIR)" \
|
||||
--build-arg ARTIFACTS_ROOT="$(ARTIFACTS_ROOT)" \
|
||||
--build-arg BASE_DIST="$(BASE_DIST)" \
|
||||
--build-arg CUDA_VERSION="$(CUDA_VERSION)" \
|
||||
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
|
||||
--build-arg PACKAGE_DIST="$(PACKAGE_DIST)" \
|
||||
--build-arg PACKAGE_VERSION="$(PACKAGE_VERSION)" \
|
||||
--build-arg PACKAGE_ARCH="$(PACKAGE_ARCH)" \
|
||||
--build-arg VERSION="$(VERSION)" \
|
||||
--build-arg CVE_UPDATES="$(CVE_UPDATES)" \
|
||||
-f $(DOCKERFILE) \
|
||||
$(CURDIR)
|
||||
|
||||
|
||||
ARTIFACTS_ROOT ?= $(shell realpath --relative-to=$(CURDIR) $(DIST_DIR))
|
||||
|
||||
build-ubuntu%: BASE_DIST = $(*)
|
||||
build-ubuntu%: DOCKERFILE_SUFFIX := ubuntu
|
||||
build-ubuntu%: ARTIFACTS_DIR = $(ARTIFACTS_ROOT)/$(*)/amd64
|
||||
build-ubuntu%: PACKAGE_ARCH := amd64
|
||||
build-ubuntu%: PACKAGE_DIST = $(BASE_DIST)
|
||||
build-ubuntu%: PACKAGE_VERSION := $(LIB_VERSION)$(if $(LIB_TAG),~$(LIB_TAG))
|
||||
|
||||
build-ubuntu18.04: BASE_DIST := ubuntu18.04
|
||||
build-ubuntu20.04: BASE_DIST := ubuntu20.04
|
||||
|
||||
build-ubi8: DOCKERFILE_SUFFIX := centos
|
||||
# TODO: Update this to use the centos8 packages
|
||||
build-ubi8: ARTIFACTS_DIR = $(ARTIFACTS_ROOT)/centos7/x86_64
|
||||
build-ubi8: PACKAGE_VERSION := $(LIB_VERSION)-$(if $(LIB_TAG),0.1.$(LIB_TAG),1)
|
||||
build-ubi8: BASE_DIST := ubi8
|
||||
build-ubi8: DOCKERFILE_SUFFIX := centos
|
||||
build-ubi8: PACKAGE_ARCH := x86_64
|
||||
build-ubi8: PACKAGE_DIST = centos7
|
||||
build-ubi8: PACKAGE_VERSION := $(LIB_VERSION)-$(if $(LIB_TAG),0.1.$(LIB_TAG),1)
|
||||
|
||||
build-centos%: BASE_DIST = $(*)
|
||||
build-centos%: DOCKERFILE_SUFFIX := centos
|
||||
build-centos%: ARTIFACTS_DIR = $(ARTIFACTS_ROOT)/$(*)/x86_64
|
||||
build-centos%: PACKAGE_ARCH := x86_64
|
||||
build-centos%: PACKAGE_DIST = $(BASE_DIST)
|
||||
build-centos%: PACKAGE_VERSION := $(LIB_VERSION)-$(if $(LIB_TAG),0.1.$(LIB_TAG),1)
|
||||
|
||||
build-centos7: BASE_DIST := centos7
|
||||
build-centos8: BASE_DIST := centos8
|
||||
build-packaging: BASE_DIST := ubuntu20.04
|
||||
build-packaging: DOCKERFILE_SUFFIX := packaging
|
||||
build-packaging: PACKAGE_ARCH := amd64
|
||||
build-packaging: PACKAGE_DIST = all
|
||||
build-packaging: PACKAGE_VERSION := $(LIB_VERSION)$(if $(LIB_TAG),-$(LIB_TAG))
|
||||
|
||||
# Test targets
|
||||
test-%: DIST = $(*)
|
||||
@@ -108,3 +115,22 @@ $(TEST_TARGETS): test-%:
|
||||
$(IMAGE) \
|
||||
--no-cleanup-on-error
|
||||
|
||||
.PHONY: test-packaging
|
||||
test-packaging: DIST = packaging
|
||||
test-packaging:
|
||||
@echo "Testing package image contents"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/amazonlinux2/aarch64" || echo "Missing amazonlinux2/aarch64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/amazonlinux2/x86_64" || echo "Missing amazonlinux2/x86_64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos7/ppc64le" || echo "Missing centos7/ppc64le"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos7/x86_64" || echo "Missing centos7/x86_64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos8/aarch64" || echo "Missing centos8/aarch64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos8/ppc64le" || echo "Missing centos8/ppc64le"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/centos8/x86_64" || echo "Missing centos8/x86_64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/debian10/amd64" || echo "Missing debian10/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/debian9/amd64" || echo "Missing debian9/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/opensuse-leap15.1/x86_64" || echo "Missing opensuse-leap15.1/x86_64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu16.04/amd64" || echo "Missing ubuntu16.04/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu16.04/ppc64le" || echo "Missing ubuntu16.04/ppc64le"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu18.04/amd64" || echo "Missing ubuntu18.04/amd64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu18.04/arm64" || echo "Missing ubuntu18.04/arm64"
|
||||
@$(DOCKER) run --rm $(IMAGE) test -d "/artifacts/packages/ubuntu18.04/ppc64le" || echo "Missing ubuntu18.04/ppc64le"
|
||||
|
||||
@@ -2,6 +2,15 @@ package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
allDriverCapabilities = DriverCapabilities("compute,compat32,graphics,utility,video,display,ngx")
|
||||
defaultDriverCapabilities = DriverCapabilities("utility,compute")
|
||||
|
||||
none = DriverCapabilities("")
|
||||
all = DriverCapabilities("all")
|
||||
)
|
||||
|
||||
func capabilityToCLI(cap string) string {
|
||||
@@ -25,3 +34,50 @@ func capabilityToCLI(cap string) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DriverCapabilities is used to process the NVIDIA_DRIVER_CAPABILITIES environment
|
||||
// variable. Operations include default values, filtering, and handling meta values such as "all"
|
||||
type DriverCapabilities string
|
||||
|
||||
// Intersection returns intersection between two sets of capabilities.
|
||||
func (d DriverCapabilities) Intersection(capabilities DriverCapabilities) DriverCapabilities {
|
||||
if capabilities == all {
|
||||
return d
|
||||
}
|
||||
if d == all {
|
||||
return capabilities
|
||||
}
|
||||
|
||||
lookup := make(map[string]bool)
|
||||
for _, c := range d.list() {
|
||||
lookup[c] = true
|
||||
}
|
||||
var found []string
|
||||
for _, c := range capabilities.list() {
|
||||
if lookup[c] {
|
||||
found = append(found, c)
|
||||
}
|
||||
}
|
||||
|
||||
intersection := DriverCapabilities(strings.Join(found, ","))
|
||||
return intersection
|
||||
}
|
||||
|
||||
// String returns the string representation of the driver capabilities
|
||||
func (d DriverCapabilities) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// list returns the driver capabilities as a list
|
||||
func (d DriverCapabilities) list() []string {
|
||||
var caps []string
|
||||
for _, c := range strings.Split(string(d), ",") {
|
||||
trimmed := strings.TrimSpace(c)
|
||||
if len(trimmed) == 0 {
|
||||
continue
|
||||
}
|
||||
caps = append(caps, trimmed)
|
||||
}
|
||||
|
||||
return caps
|
||||
}
|
||||
|
||||
134
cmd/nvidia-container-toolkit/capabilities_test.go
Normal file
134
cmd/nvidia-container-toolkit/capabilities_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
/**
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDriverCapabilitiesIntersection(t *testing.T) {
|
||||
testCases := []struct {
|
||||
capabilities DriverCapabilities
|
||||
supportedCapabilities DriverCapabilities
|
||||
expectedIntersection DriverCapabilities
|
||||
}{
|
||||
{
|
||||
capabilities: none,
|
||||
supportedCapabilities: none,
|
||||
expectedIntersection: none,
|
||||
},
|
||||
{
|
||||
capabilities: all,
|
||||
supportedCapabilities: none,
|
||||
expectedIntersection: none,
|
||||
},
|
||||
{
|
||||
capabilities: all,
|
||||
supportedCapabilities: allDriverCapabilities,
|
||||
expectedIntersection: allDriverCapabilities,
|
||||
},
|
||||
{
|
||||
capabilities: allDriverCapabilities,
|
||||
supportedCapabilities: all,
|
||||
expectedIntersection: allDriverCapabilities,
|
||||
},
|
||||
{
|
||||
capabilities: none,
|
||||
supportedCapabilities: all,
|
||||
expectedIntersection: none,
|
||||
},
|
||||
{
|
||||
capabilities: none,
|
||||
supportedCapabilities: DriverCapabilities("cap1"),
|
||||
expectedIntersection: none,
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("cap0,cap1"),
|
||||
supportedCapabilities: DriverCapabilities("cap1,cap0"),
|
||||
expectedIntersection: DriverCapabilities("cap0,cap1"),
|
||||
},
|
||||
{
|
||||
capabilities: defaultDriverCapabilities,
|
||||
supportedCapabilities: allDriverCapabilities,
|
||||
expectedIntersection: defaultDriverCapabilities,
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("compute,compat32,graphics,utility,video,display"),
|
||||
supportedCapabilities: DriverCapabilities("compute,compat32,graphics,utility,video,display,ngx"),
|
||||
expectedIntersection: DriverCapabilities("compute,compat32,graphics,utility,video,display"),
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("cap1"),
|
||||
supportedCapabilities: none,
|
||||
expectedIntersection: none,
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("compute,compat32,graphics,utility,video,display,ngx"),
|
||||
supportedCapabilities: DriverCapabilities("compute,compat32,graphics,utility,video,display"),
|
||||
expectedIntersection: DriverCapabilities("compute,compat32,graphics,utility,video,display"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
intersection := tc.supportedCapabilities.Intersection(tc.capabilities)
|
||||
require.EqualValues(t, tc.expectedIntersection, intersection)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDriverCapabilitiesList(t *testing.T) {
|
||||
testCases := []struct {
|
||||
capabilities DriverCapabilities
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
capabilities: DriverCapabilities(""),
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities(" "),
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities(","),
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities(",cap"),
|
||||
expected: []string{"cap"},
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("cap,"),
|
||||
expected: []string{"cap"},
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("cap0,,cap1"),
|
||||
expected: []string{"cap0", "cap1"},
|
||||
},
|
||||
{
|
||||
capabilities: DriverCapabilities("cap1,cap0,cap3"),
|
||||
expected: []string{"cap1", "cap0", "cap3"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
require.EqualValues(t, tc.expected, tc.capabilities.list())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -26,11 +26,6 @@ const (
|
||||
envNVDriverCapabilities = "NVIDIA_DRIVER_CAPABILITIES"
|
||||
)
|
||||
|
||||
const (
|
||||
allDriverCapabilities = "compute,compat32,graphics,utility,video,display,ngx"
|
||||
defaultDriverCapabilities = "utility,compute"
|
||||
)
|
||||
|
||||
const (
|
||||
capSysAdmin = "CAP_SYS_ADMIN"
|
||||
)
|
||||
@@ -316,33 +311,27 @@ func getMigMonitorDevices(env map[string]string) *string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDriverCapabilities(env map[string]string, legacyImage bool) *string {
|
||||
// Grab a reference to the capabilities from the envvar
|
||||
// if it actually exists in the environment.
|
||||
var capabilities *string
|
||||
if caps, ok := env[envNVDriverCapabilities]; ok {
|
||||
capabilities = &caps
|
||||
func getDriverCapabilities(env map[string]string, supportedDriverCapabilities DriverCapabilities, legacyImage bool) DriverCapabilities {
|
||||
// We use the default driver capabilities by default. This is filtered to only include the
|
||||
// supported capabilities
|
||||
capabilities := supportedDriverCapabilities.Intersection(defaultDriverCapabilities)
|
||||
|
||||
capsEnv, capsEnvSpecified := env[envNVDriverCapabilities]
|
||||
|
||||
if !capsEnvSpecified && legacyImage {
|
||||
// Environment variable unset with legacy image: set all capabilities.
|
||||
return supportedDriverCapabilities
|
||||
}
|
||||
|
||||
// Environment variable unset with legacy image: set all capabilities.
|
||||
if capabilities == nil && legacyImage {
|
||||
allCaps := allDriverCapabilities
|
||||
return &allCaps
|
||||
if capsEnvSpecified && len(capsEnv) > 0 {
|
||||
// If the envvironment variable is specified and is non-empty, use the capabilities value
|
||||
envCapabilities := DriverCapabilities(capsEnv)
|
||||
capabilities = supportedDriverCapabilities.Intersection(envCapabilities)
|
||||
if envCapabilities != all && capabilities != envCapabilities {
|
||||
log.Panicln(fmt.Errorf("unsupported capabilities found in '%v' (allowed '%v')", envCapabilities, capabilities))
|
||||
}
|
||||
}
|
||||
|
||||
// Environment variable unset or set but empty: set default capabilities.
|
||||
if capabilities == nil || len(*capabilities) == 0 {
|
||||
defaultCaps := defaultDriverCapabilities
|
||||
return &defaultCaps
|
||||
}
|
||||
|
||||
// Environment variable set to "all": set all capabilities.
|
||||
if *capabilities == "all" {
|
||||
allCaps := allDriverCapabilities
|
||||
return &allCaps
|
||||
}
|
||||
|
||||
// Any other value
|
||||
return capabilities
|
||||
}
|
||||
|
||||
@@ -389,10 +378,7 @@ func getNvidiaConfig(hookConfig *HookConfig, env map[string]string, mounts []Mou
|
||||
log.Panicln("cannot set MIG_MONITOR_DEVICES in non privileged container")
|
||||
}
|
||||
|
||||
var driverCapabilities string
|
||||
if c := getDriverCapabilities(env, legacyImage); c != nil {
|
||||
driverCapabilities = *c
|
||||
}
|
||||
driverCapabilities := getDriverCapabilities(env, hookConfig.SupportedDriverCapabilities, legacyImage).String()
|
||||
|
||||
requirements := getRequirements(env, legacyImage)
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
description string
|
||||
env map[string]string
|
||||
privileged bool
|
||||
hookConfig *HookConfig
|
||||
expectedConfig *nvidiaConfig
|
||||
expectedPanic bool
|
||||
}{
|
||||
@@ -35,7 +36,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: allDriverCapabilities,
|
||||
DriverCapabilities: allDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -49,7 +50,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: allDriverCapabilities,
|
||||
DriverCapabilities: allDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -81,7 +82,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "",
|
||||
DriverCapabilities: allDriverCapabilities,
|
||||
DriverCapabilities: allDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -95,7 +96,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: allDriverCapabilities,
|
||||
DriverCapabilities: allDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -110,7 +111,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -125,7 +126,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: allDriverCapabilities,
|
||||
DriverCapabilities: allDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -135,12 +136,12 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
env: map[string]string{
|
||||
envCUDAVersion: "9.0",
|
||||
envNVVisibleDevices: "gpu0,gpu1",
|
||||
envNVDriverCapabilities: "cap0,cap1",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
},
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: "cap0,cap1",
|
||||
DriverCapabilities: "video,display",
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -150,14 +151,14 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
env: map[string]string{
|
||||
envCUDAVersion: "9.0",
|
||||
envNVVisibleDevices: "gpu0,gpu1",
|
||||
envNVDriverCapabilities: "cap0,cap1",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
envNVRequirePrefix + "REQ0": "req0=true",
|
||||
envNVRequirePrefix + "REQ1": "req1=false",
|
||||
},
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: "cap0,cap1",
|
||||
DriverCapabilities: "video,display",
|
||||
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -167,7 +168,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
env: map[string]string{
|
||||
envCUDAVersion: "9.0",
|
||||
envNVVisibleDevices: "gpu0,gpu1",
|
||||
envNVDriverCapabilities: "cap0,cap1",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
envNVRequirePrefix + "REQ0": "req0=true",
|
||||
envNVRequirePrefix + "REQ1": "req1=false",
|
||||
envNVDisableRequire: "true",
|
||||
@@ -175,7 +176,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: "cap0,cap1",
|
||||
DriverCapabilities: "video,display",
|
||||
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
|
||||
DisableRequire: true,
|
||||
},
|
||||
@@ -206,7 +207,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -238,7 +239,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -252,7 +253,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -267,7 +268,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -282,7 +283,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: allDriverCapabilities,
|
||||
DriverCapabilities: allDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -292,12 +293,12 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
env: map[string]string{
|
||||
envNVRequireCUDA: "cuda>=9.0",
|
||||
envNVVisibleDevices: "gpu0,gpu1",
|
||||
envNVDriverCapabilities: "cap0,cap1",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
},
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: "cap0,cap1",
|
||||
DriverCapabilities: "video,display",
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -307,14 +308,14 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
env: map[string]string{
|
||||
envNVRequireCUDA: "cuda>=9.0",
|
||||
envNVVisibleDevices: "gpu0,gpu1",
|
||||
envNVDriverCapabilities: "cap0,cap1",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
envNVRequirePrefix + "REQ0": "req0=true",
|
||||
envNVRequirePrefix + "REQ1": "req1=false",
|
||||
},
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: "cap0,cap1",
|
||||
DriverCapabilities: "video,display",
|
||||
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -324,7 +325,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
env: map[string]string{
|
||||
envNVRequireCUDA: "cuda>=9.0",
|
||||
envNVVisibleDevices: "gpu0,gpu1",
|
||||
envNVDriverCapabilities: "cap0,cap1",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
envNVRequirePrefix + "REQ0": "req0=true",
|
||||
envNVRequirePrefix + "REQ1": "req1=false",
|
||||
envNVDisableRequire: "true",
|
||||
@@ -332,7 +333,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "gpu0,gpu1",
|
||||
DriverCapabilities: "cap0,cap1",
|
||||
DriverCapabilities: "video,display",
|
||||
Requirements: []string{"cuda>=9.0", "req0=true", "req1=false"},
|
||||
DisableRequire: true,
|
||||
},
|
||||
@@ -346,7 +347,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -362,7 +363,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
MigConfigDevices: "mig0,mig1",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -388,7 +389,7 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
MigMonitorDevices: "mig0,mig1",
|
||||
DriverCapabilities: defaultDriverCapabilities,
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
Requirements: []string{"cuda>=9.0"},
|
||||
DisableRequire: false,
|
||||
},
|
||||
@@ -403,14 +404,62 @@ func TestGetNvidiaConfig(t *testing.T) {
|
||||
privileged: false,
|
||||
expectedPanic: true,
|
||||
},
|
||||
{
|
||||
description: "Hook config set as driver-capabilities-all",
|
||||
env: map[string]string{
|
||||
envNVVisibleDevices: "all",
|
||||
envNVDriverCapabilities: "all",
|
||||
},
|
||||
privileged: true,
|
||||
hookConfig: &HookConfig{
|
||||
SupportedDriverCapabilities: "video,display",
|
||||
},
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: "video,display",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Hook config set, envvar sets driver-capabilities",
|
||||
env: map[string]string{
|
||||
envNVVisibleDevices: "all",
|
||||
envNVDriverCapabilities: "video,display",
|
||||
},
|
||||
privileged: true,
|
||||
hookConfig: &HookConfig{
|
||||
SupportedDriverCapabilities: "video,display,compute,utility",
|
||||
},
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: "video,display",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Hook config set, envvar unset sets default driver-capabilities",
|
||||
env: map[string]string{
|
||||
envNVVisibleDevices: "all",
|
||||
},
|
||||
privileged: true,
|
||||
hookConfig: &HookConfig{
|
||||
SupportedDriverCapabilities: "video,display,utility,compute",
|
||||
},
|
||||
expectedConfig: &nvidiaConfig{
|
||||
Devices: "all",
|
||||
DriverCapabilities: defaultDriverCapabilities.String(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// Wrap the call to getNvidiaConfig() in a closure.
|
||||
var config *nvidiaConfig
|
||||
getConfig := func() {
|
||||
hookConfig := getDefaultHookConfig()
|
||||
config = getNvidiaConfig(&hookConfig, tc.env, nil, tc.privileged)
|
||||
hookConfig := tc.hookConfig
|
||||
if hookConfig == nil {
|
||||
defaultConfig := getDefaultHookConfig()
|
||||
hookConfig = &defaultConfig
|
||||
}
|
||||
config = getNvidiaConfig(hookConfig, tc.env, nil, tc.privileged)
|
||||
}
|
||||
|
||||
// For any tests that are expected to panic, make sure they do.
|
||||
@@ -822,3 +871,119 @@ func TestGetDevicesFromEnvvar(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDriverCapabilities(t *testing.T) {
|
||||
|
||||
supportedCapabilities := "compute,utility,display,video"
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
env map[string]string
|
||||
legacyImage bool
|
||||
supportedCapabilities string
|
||||
expectedPanic bool
|
||||
expectedCapabilities string
|
||||
}{
|
||||
{
|
||||
description: "Env is set for legacy image",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "display,video",
|
||||
},
|
||||
legacyImage: true,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: "display,video",
|
||||
},
|
||||
{
|
||||
description: "Env is all for legacy image",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "all",
|
||||
},
|
||||
legacyImage: true,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: supportedCapabilities,
|
||||
},
|
||||
{
|
||||
description: "Env is empty for legacy image",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "",
|
||||
},
|
||||
legacyImage: true,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: defaultDriverCapabilities.String(),
|
||||
},
|
||||
{
|
||||
description: "Env unset for legacy image is 'all'",
|
||||
env: map[string]string{},
|
||||
legacyImage: true,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: supportedCapabilities,
|
||||
},
|
||||
{
|
||||
description: "Env is set for modern image",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "display,video",
|
||||
},
|
||||
legacyImage: false,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: "display,video",
|
||||
},
|
||||
{
|
||||
description: "Env unset for modern image is default",
|
||||
env: map[string]string{},
|
||||
legacyImage: false,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: defaultDriverCapabilities.String(),
|
||||
},
|
||||
{
|
||||
description: "Env is all for modern image",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "all",
|
||||
},
|
||||
legacyImage: false,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: supportedCapabilities,
|
||||
},
|
||||
{
|
||||
description: "Env is empty for modern image",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "",
|
||||
},
|
||||
legacyImage: false,
|
||||
supportedCapabilities: supportedCapabilities,
|
||||
expectedCapabilities: defaultDriverCapabilities.String(),
|
||||
},
|
||||
{
|
||||
description: "Invalid capabilities panic",
|
||||
env: map[string]string{
|
||||
envNVDriverCapabilities: "compute,utility",
|
||||
},
|
||||
supportedCapabilities: "not-compute,not-utility",
|
||||
expectedPanic: true,
|
||||
},
|
||||
{
|
||||
description: "Default is restricted for modern image",
|
||||
legacyImage: false,
|
||||
supportedCapabilities: "compute",
|
||||
expectedCapabilities: "compute",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var capabilites DriverCapabilities
|
||||
|
||||
getDriverCapabilities := func() {
|
||||
supportedCapabilities := DriverCapabilities(tc.supportedCapabilities)
|
||||
capabilites = getDriverCapabilities(tc.env, supportedCapabilities, tc.legacyImage)
|
||||
}
|
||||
|
||||
if tc.expectedPanic {
|
||||
require.Panics(t, getDriverCapabilities)
|
||||
return
|
||||
}
|
||||
|
||||
getDriverCapabilities()
|
||||
require.EqualValues(t, tc.expectedCapabilities, capabilites)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,10 +35,11 @@ type CLIConfig struct {
|
||||
|
||||
// HookConfig : options for the nvidia-container-toolkit.
|
||||
type HookConfig struct {
|
||||
DisableRequire bool `toml:"disable-require"`
|
||||
SwarmResource *string `toml:"swarm-resource"`
|
||||
AcceptEnvvarUnprivileged bool `toml:"accept-nvidia-visible-devices-envvar-when-unprivileged"`
|
||||
AcceptDeviceListAsVolumeMounts bool `toml:"accept-nvidia-visible-devices-as-volume-mounts"`
|
||||
DisableRequire bool `toml:"disable-require"`
|
||||
SwarmResource *string `toml:"swarm-resource"`
|
||||
AcceptEnvvarUnprivileged bool `toml:"accept-nvidia-visible-devices-envvar-when-unprivileged"`
|
||||
AcceptDeviceListAsVolumeMounts bool `toml:"accept-nvidia-visible-devices-as-volume-mounts"`
|
||||
SupportedDriverCapabilities DriverCapabilities `toml:"supported-driver-capabilities"`
|
||||
|
||||
NvidiaContainerCLI CLIConfig `toml:"nvidia-container-cli"`
|
||||
}
|
||||
@@ -49,6 +50,7 @@ func getDefaultHookConfig() (config HookConfig) {
|
||||
SwarmResource: nil,
|
||||
AcceptEnvvarUnprivileged: true,
|
||||
AcceptDeviceListAsVolumeMounts: false,
|
||||
SupportedDriverCapabilities: allDriverCapabilities,
|
||||
NvidiaContainerCLI: CLIConfig{
|
||||
Root: nil,
|
||||
Path: nil,
|
||||
@@ -85,6 +87,15 @@ func getHookConfig() (config HookConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
if config.SupportedDriverCapabilities == all {
|
||||
config.SupportedDriverCapabilities = allDriverCapabilities
|
||||
}
|
||||
// We ensure that the supported-driver-capabilites option is a subset of allDriverCapabilities
|
||||
if intersection := allDriverCapabilities.Intersection(config.SupportedDriverCapabilities); intersection != config.SupportedDriverCapabilities {
|
||||
configName := config.getConfigOption("SupportedDriverCapabilities")
|
||||
log.Panicf("Invalid value for config option '%v'; %v (supported: %v)\n", configName, config.SupportedDriverCapabilities, allDriverCapabilities)
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
|
||||
105
cmd/nvidia-container-toolkit/hook_config_test.go
Normal file
105
cmd/nvidia-container-toolkit/hook_config_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
/**
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
**/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetHookConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
lines []string
|
||||
expectedPanic bool
|
||||
expectedDriverCapabilities DriverCapabilities
|
||||
}{
|
||||
{
|
||||
expectedDriverCapabilities: allDriverCapabilities,
|
||||
},
|
||||
{
|
||||
lines: []string{
|
||||
"supported-driver-capabilities = \"all\"",
|
||||
},
|
||||
expectedDriverCapabilities: allDriverCapabilities,
|
||||
},
|
||||
{
|
||||
lines: []string{
|
||||
"supported-driver-capabilities = \"compute,utility,not-compute\"",
|
||||
},
|
||||
expectedPanic: true,
|
||||
},
|
||||
{
|
||||
lines: []string{},
|
||||
expectedDriverCapabilities: allDriverCapabilities,
|
||||
},
|
||||
{
|
||||
lines: []string{
|
||||
"supported-driver-capabilities = \"\"",
|
||||
},
|
||||
expectedDriverCapabilities: none,
|
||||
},
|
||||
{
|
||||
lines: []string{
|
||||
"supported-driver-capabilities = \"utility,compute\"",
|
||||
},
|
||||
expectedDriverCapabilities: DriverCapabilities("utility,compute"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) {
|
||||
var filename string
|
||||
defer func() {
|
||||
if len(filename) > 0 {
|
||||
os.Remove(filename)
|
||||
}
|
||||
configflag = nil
|
||||
}()
|
||||
|
||||
if tc.lines != nil {
|
||||
configFile, err := os.CreateTemp("", "*.toml")
|
||||
require.NoError(t, err)
|
||||
defer configFile.Close()
|
||||
|
||||
filename = configFile.Name()
|
||||
configflag = &filename
|
||||
|
||||
for _, line := range tc.lines {
|
||||
_, err := configFile.WriteString(fmt.Sprintf("%s\n", line))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
var config HookConfig
|
||||
getHookConfig := func() {
|
||||
config = getHookConfig()
|
||||
}
|
||||
|
||||
if tc.expectedPanic {
|
||||
require.Panics(t, getHookConfig)
|
||||
return
|
||||
}
|
||||
|
||||
getHookConfig()
|
||||
|
||||
require.EqualValues(t, tc.expectedDriverCapabilities, config.SupportedDriverCapabilities)
|
||||
})
|
||||
}
|
||||
}
|
||||
19
config/config.toml.ubuntu+jetpack
Normal file
19
config/config.toml.ubuntu+jetpack
Normal file
@@ -0,0 +1,19 @@
|
||||
disable-require = false
|
||||
supported-driver-capabilities = "compute,compat32,graphics,utility,video,display"
|
||||
#swarm-resource = "DOCKER_RESOURCE_GPU"
|
||||
#accept-nvidia-visible-devices-envvar-when-unprivileged = true
|
||||
#accept-nvidia-visible-devices-as-volume-mounts = false
|
||||
|
||||
[nvidia-container-cli]
|
||||
#root = "/run/nvidia/driver"
|
||||
#path = "/usr/bin/nvidia-container-cli"
|
||||
environment = []
|
||||
#debug = "/var/log/nvidia-container-toolkit.log"
|
||||
#ldcache = "/etc/ld.so.cache"
|
||||
load-kmods = true
|
||||
#no-cgroups = false
|
||||
#user = "root:video"
|
||||
ldconfig = "@/sbin/ldconfig.real"
|
||||
|
||||
[nvidia-container-runtime]
|
||||
#debug = "/var/log/nvidia-container-runtime.log"
|
||||
@@ -42,7 +42,9 @@ COPY . .
|
||||
|
||||
RUN make PREFIX=${DIST_DIR} cmds
|
||||
|
||||
COPY config/config.toml.amzn $DIST_DIR/config.toml
|
||||
ARG CONFIG_TOML_SUFFIX
|
||||
ENV CONFIG_TOML_SUFFIX ${CONFIG_TOML_SUFFIX}
|
||||
COPY config/config.toml.${CONFIG_TOML_SUFFIX} $DIST_DIR/config.toml
|
||||
|
||||
# Hook for Project Atomic's fork of Docker: https://github.com/projectatomic/docker/tree/docker-1.13.1-rhel#add-dockerhooks-exec-custom-hooks-for-prestartpoststop-containerspatch
|
||||
# This might not be useful on Amazon Linux, but it's simpler to keep the RHEL
|
||||
|
||||
@@ -42,7 +42,9 @@ COPY . .
|
||||
|
||||
RUN make PREFIX=${DIST_DIR} cmds
|
||||
|
||||
COPY config/config.toml.centos $DIST_DIR/config.toml
|
||||
ARG CONFIG_TOML_SUFFIX
|
||||
ENV CONFIG_TOML_SUFFIX ${CONFIG_TOML_SUFFIX}
|
||||
COPY config/config.toml.${CONFIG_TOML_SUFFIX} $DIST_DIR/config.toml
|
||||
|
||||
# Hook for Project Atomic's fork of Docker: https://github.com/projectatomic/docker/tree/docker-1.13.1-rhel#add-dockerhooks-exec-custom-hooks-for-prestartpoststop-containerspatch
|
||||
COPY oci-nvidia-hook $DIST_DIR/oci-nvidia-hook
|
||||
|
||||
@@ -50,7 +50,9 @@ COPY . .
|
||||
|
||||
RUN make PREFIX=${DIST_DIR} cmds
|
||||
|
||||
COPY config/config.toml.debian $DIST_DIR/config.toml
|
||||
ARG CONFIG_TOML_SUFFIX
|
||||
ENV CONFIG_TOML_SUFFIX ${CONFIG_TOML_SUFFIX}
|
||||
COPY config/config.toml.${CONFIG_TOML_SUFFIX} $DIST_DIR/config.toml
|
||||
|
||||
# Debian Jessie still had ldconfig.real
|
||||
RUN if [ "$(lsb_release -cs)" = "jessie" ]; then \
|
||||
|
||||
@@ -47,7 +47,9 @@ COPY oci-nvidia-hook $DIST_DIR/oci-nvidia-hook
|
||||
# Hook for libpod/CRI-O: https://github.com/containers/libpod/blob/v0.8.5/pkg/hooks/docs/oci-hooks.5.md
|
||||
COPY oci-nvidia-hook.json $DIST_DIR/oci-nvidia-hook.json
|
||||
|
||||
COPY config/config.toml.opensuse-leap $DIST_DIR/config.toml
|
||||
ARG CONFIG_TOML_SUFFIX
|
||||
ENV CONFIG_TOML_SUFFIX ${CONFIG_TOML_SUFFIX}
|
||||
COPY config/config.toml.${CONFIG_TOML_SUFFIX} $DIST_DIR/config.toml
|
||||
|
||||
WORKDIR $DIST_DIR/..
|
||||
COPY packaging/rpm .
|
||||
|
||||
@@ -48,7 +48,9 @@ COPY . .
|
||||
|
||||
RUN make PREFIX=${DIST_DIR} cmds
|
||||
|
||||
COPY config/config.toml.ubuntu $DIST_DIR/config.toml
|
||||
ARG CONFIG_TOML_SUFFIX
|
||||
ENV CONFIG_TOML_SUFFIX ${CONFIG_TOML_SUFFIX}
|
||||
COPY config/config.toml.${CONFIG_TOML_SUFFIX} $DIST_DIR/config.toml
|
||||
|
||||
WORKDIR $DIST_DIR
|
||||
COPY packaging/debian ./debian
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
# Supported OSs by architecture
|
||||
AMD64_TARGETS := ubuntu20.04 ubuntu18.04 ubuntu16.04 debian10 debian9
|
||||
X86_64_TARGETS := centos7 centos8 rhel7 rhel8 amazonlinux1 amazonlinux2 opensuse-leap15.1
|
||||
X86_64_TARGETS := centos7 centos8 rhel7 rhel8 amazonlinux2 opensuse-leap15.1
|
||||
PPC64LE_TARGETS := ubuntu18.04 ubuntu16.04 centos7 centos8 rhel7 rhel8
|
||||
ARM64_TARGETS := ubuntu20.04 ubuntu18.04
|
||||
AARCH64_TARGETS := centos8 rhel8 amazonlinux2
|
||||
@@ -114,6 +114,9 @@ docker-all: $(AMD64_TARGETS) $(X86_64_TARGETS) \
|
||||
--rhel%: VERSION = $(patsubst rhel%-$(ARCH),%,$(TARGET_PLATFORM))
|
||||
--rhel%: ARTIFACTS_DIR = $(DIST_DIR)/rhel$(VERSION)/$(ARCH)
|
||||
|
||||
# We allow the CONFIG_TOML_SUFFIX to be overridden.
|
||||
CONFIG_TOML_SUFFIX ?= $(OS)
|
||||
|
||||
docker-build-%:
|
||||
@echo "Building for $(TARGET_PLATFORM)"
|
||||
docker pull --platform=linux/$(ARCH) $(BASEIMAGE)
|
||||
@@ -124,6 +127,7 @@ docker-build-%:
|
||||
--build-arg GOLANG_VERSION="$(GOLANG_VERSION)" \
|
||||
--build-arg PKG_VERS="$(LIB_VERSION)" \
|
||||
--build-arg PKG_REV="$(PKG_REV)" \
|
||||
--build-arg CONFIG_TOML_SUFFIX="$(CONFIG_TOML_SUFFIX)" \
|
||||
--tag $(BUILDIMAGE) \
|
||||
--file $(DOCKERFILE) .
|
||||
$(DOCKER) run \
|
||||
|
||||
1
go.mod
1
go.mod
@@ -4,7 +4,6 @@ go 1.14
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/containerd/containerd v1.5.7
|
||||
github.com/containers/podman/v2 v2.2.1
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20211101234015-a3c33d663ebc
|
||||
github.com/pelletier/go-toml v1.9.3
|
||||
|
||||
@@ -1,3 +1,30 @@
|
||||
nvidia-container-toolkit (1.8.0~rc.2-1) UNRELEASED; urgency=medium
|
||||
|
||||
* Remove support for building amazonlinux1 packages
|
||||
|
||||
-- NVIDIA CORPORATION <cudatools@nvidia.com> Thu, 20 Jan 2022 13:38:48 +0100
|
||||
|
||||
nvidia-container-toolkit (1.8.0~rc.1-1) UNRELEASED; urgency=medium
|
||||
|
||||
* [libnvidia-container] Add support for cgroupv2
|
||||
* Release toolkit-container images from nvidia-container-toolkit repository
|
||||
|
||||
-- NVIDIA CORPORATION <cudatools@nvidia.com> Wed, 08 Dec 2021 12:56:49 +0100
|
||||
|
||||
nvidia-container-toolkit (1.7.0-1) UNRELEASED; urgency=medium
|
||||
|
||||
* Promote 1.7.0~rc.1-1 to 1.7.0-1
|
||||
* Bump Golang version to 1.16.4
|
||||
|
||||
-- NVIDIA CORPORATION <cudatools@nvidia.com> Tue, 30 Nov 2021 14:11:55 +0100
|
||||
|
||||
nvidia-container-toolkit (1.7.0~rc.1-1) experimental; urgency=medium
|
||||
|
||||
* Specify containerd runtime type as string in config tools to remove dependency on containerd package
|
||||
* Add supported-driver-capabilities config option to allow for a subset of all driver capabilities to be specified
|
||||
|
||||
-- NVIDIA CORPORATION <cudatools@nvidia.com> Thu, 25 Nov 2021 11:36:29 +0100
|
||||
|
||||
nvidia-container-toolkit (1.6.0-1) UNRELEASED; urgency=medium
|
||||
|
||||
* Promote 1.6.0~rc.3-1 to 1.6.0-1
|
||||
@@ -7,6 +34,7 @@ nvidia-container-toolkit (1.6.0-1) UNRELEASED; urgency=medium
|
||||
|
||||
nvidia-container-toolkit (1.6.0~rc.3-1) experimental; urgency=medium
|
||||
|
||||
* Add supported-driver-capabilities config option to the nvidia-container-toolkit
|
||||
* Move OCI and command line checks for runtime to internal oci package
|
||||
|
||||
-- NVIDIA CORPORATION <cudatools@nvidia.com> Mon, 15 Nov 2021 13:02:23 +0100
|
||||
|
||||
@@ -64,12 +64,28 @@ rm -f %{_bindir}/nvidia-container-runtime-hook
|
||||
/usr/share/containers/oci/hooks.d/oci-nvidia-hook.json
|
||||
|
||||
%changelog
|
||||
* Wed Nov 17 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 3.6.0-1
|
||||
* Thu Jan 20 2022 NVIDIA CORPORATION <cudatools@nvidia.com> 1.8.0-0.1.rc.2
|
||||
- Remove support for building amazonlinux1 packages
|
||||
|
||||
* Wed Dec 08 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.8.0-0.1.rc.1
|
||||
- [libnvidia-container] Add support for cgroupv2
|
||||
- Release toolkit-container images from nvidia-container-toolkit repository
|
||||
|
||||
* Tue Nov 30 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.7.0-1
|
||||
- Promote 1.7.0~rc.1-1 to 1.7.0-1
|
||||
- Bump Golang version to 1.16.4
|
||||
|
||||
* Thu Nov 25 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.7.0-0.1.rc.1
|
||||
- Specify containerd runtime type as string in config tools to remove dependency on containerd package
|
||||
- Add supported-driver-capabilities config option to allow for a subset of all driver capabilities to be specified
|
||||
|
||||
* Wed Nov 17 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.6.0-1
|
||||
- Promote 1.6.0-0.1.rc.3 to 1.6.0-1
|
||||
- Fix unnecessary logging to stderr instead of configured nvidia-container-runtime log file
|
||||
|
||||
* Mon Nov 15 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.6.0-0.1.rc.3
|
||||
|
||||
- Add supported-driver-capabilities config option to the nvidia-container-toolkit
|
||||
- Move OCI and command line checks for runtime to internal oci package
|
||||
|
||||
* Fri Nov 05 2021 NVIDIA CORPORATION <cudatools@nvidia.com> 1.6.0-0.1.rc.2
|
||||
|
||||
@@ -59,15 +59,20 @@ if [[ -z ${NVIDIA_CONTAINER_TOOLKIT_VERSION} ]]; then
|
||||
eval $(${SCRIPTS_DIR}/get-component-versions.sh)
|
||||
fi
|
||||
|
||||
# We set the TOOLKIT_VERSION for the nvidia-container-runtime and nvidia-docker targets
|
||||
# We set the TOOLKIT_VERSION, TOOLKIT_TAG for the nvidia-container-runtime and nvidia-docker targets
|
||||
# The LIB_TAG is also overridden to match the TOOLKIT_TAG.
|
||||
# Build nvidia-container-runtime
|
||||
make -C ${NVIDIA_CONTAINER_RUNTIME_ROOT} \
|
||||
LIB_VERSION="${NVIDIA_CONTAINER_RUNTIME_VERSION}" \
|
||||
LIB_TAG="${NVIDIA_CONTAINER_TOOLKIT_TAG}" \
|
||||
TOOLKIT_VERSION="${NVIDIA_CONTAINER_TOOLKIT_VERSION}" \
|
||||
TOOLKIT_TAG="${NVIDIA_CONTAINER_TOOLKIT_TAG}" \
|
||||
${TARGET}
|
||||
|
||||
# Build nvidia-docker2
|
||||
make -C ${NVIDIA_DOCKER_ROOT} \
|
||||
LIB_VERSION="${NVIDIA_DOCKER_VERSION}" \
|
||||
LIB_TAG="${NVIDIA_CONTAINER_TOOLKIT_TAG}" \
|
||||
TOOLKIT_VERSION="${NVIDIA_CONTAINER_TOOLKIT_VERSION}" \
|
||||
TOOLKIT_TAG="${NVIDIA_CONTAINER_TOOLKIT_TAG}" \
|
||||
${TARGET}
|
||||
|
||||
@@ -28,31 +28,31 @@ set -e
|
||||
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
|
||||
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
|
||||
|
||||
: ${LIBNVIDIA_CONTAINER_ROOT:=${PROJECT_ROOT}/third_party/libnvidia-container}
|
||||
: ${NVIDIA_CONTAINER_TOOLKIT_ROOT:=${PROJECT_ROOT}}
|
||||
: ${NVIDIA_CONTAINER_RUNTIME_ROOT:=${PROJECT_ROOT}/third_party/nvidia-container-runtime}
|
||||
: ${NVIDIA_DOCKER_ROOT:=${PROJECT_ROOT}/third_party/nvidia-docker}
|
||||
LIBNVIDIA_CONTAINER_ROOT=${PROJECT_ROOT}/third_party/libnvidia-container
|
||||
NVIDIA_CONTAINER_TOOLKIT_ROOT=${PROJECT_ROOT}
|
||||
NVIDIA_CONTAINER_RUNTIME_ROOT=${PROJECT_ROOT}/third_party/nvidia-container-runtime
|
||||
NVIDIA_DOCKER_ROOT=${PROJECT_ROOT}/third_party/nvidia-docker
|
||||
|
||||
# Get version for libnvidia-container
|
||||
libnvidia_container_version_tag=$(grep "#define NVC_VERSION" ${LIBNVIDIA_CONTAINER_ROOT}/src/nvc.h \
|
||||
| sed -e 's/#define NVC_VERSION[[:space:]]"\(.*\)"/\1/')
|
||||
|
||||
versions_makefile=${NVIDIA_CONTAINER_TOOLKIT_ROOT}/versions.mk
|
||||
# Get version for nvidia-container-toolit
|
||||
nvidia_container_toolkit_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_CONTAINER_TOOLKIT_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_toolkit_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_CONTAINER_TOOLKIT_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_toolkit_version=$(grep -m 1 "^LIB_VERSION := " ${versions_makefile} | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_toolkit_tag=$(grep -m 1 "^LIB_TAG .= " ${versions_makefile} | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_toolkit_version_tag="${nvidia_container_toolkit_version}${nvidia_container_toolkit_tag:+~${nvidia_container_toolkit_tag}}"
|
||||
|
||||
# Get version for nvidia-container-runtime
|
||||
nvidia_container_runtime_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_CONTAINER_RUNTIME_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_runtime_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_CONTAINER_RUNTIME_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_runtime_version=$(grep -m 1 "^NVIDIA_CONTAINER_RUNTIME_VERSION := " ${versions_makefile} | sed -e 's/NVIDIA_CONTAINER_RUNTIME_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_container_runtime_tag=${nvidia_container_toolkit_tag}
|
||||
nvidia_container_runtime_version_tag="${nvidia_container_runtime_version}${nvidia_container_runtime_tag:+~${nvidia_container_runtime_tag}}"
|
||||
|
||||
# Get version for nvidia-docker
|
||||
nvidia_docker_version=$(grep -m 1 "^LIB_VERSION := " ${NVIDIA_DOCKER_ROOT}/Makefile | sed -e 's/LIB_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_docker_tag=$(grep -m 1 "^LIB_TAG .= " ${NVIDIA_DOCKER_ROOT}/Makefile | sed -e 's/LIB_TAG .=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_docker_version=$(grep -m 1 "^NVIDIA_DOCKER_VERSION := " ${versions_makefile} | sed -e 's/NVIDIA_DOCKER_VERSION :=[[:space:]]\(.*\)[[:space:]]*/\1/')
|
||||
nvidia_docker_tag=${nvidia_container_toolkit_tag}
|
||||
nvidia_docker_version_tag="${nvidia_docker_version}${nvidia_docker_tag:+~${nvidia_docker_tag}}"
|
||||
|
||||
|
||||
echo "LIBNVIDIA_CONTAINER_VERSION=${libnvidia_container_version_tag}"
|
||||
echo "NVIDIA_CONTAINER_TOOLKIT_VERSION=${nvidia_container_toolkit_version}"
|
||||
echo "NVIDIA_CONTAINER_TOOLKIT_TAG=${nvidia_container_toolkit_tag}"
|
||||
|
||||
58
scripts/pull-packages.sh
Executable file
58
scripts/pull-packages.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function assert_usage() {
|
||||
echo "Incorrect arguments: $*"
|
||||
echo "$(basename ${BASH_SOURCE[0]}) IMAGE DIST_DIR"
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -e -x
|
||||
|
||||
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
|
||||
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
|
||||
|
||||
if [[ $# -ne 2 ]]; then
|
||||
assert_usage $*
|
||||
fi
|
||||
|
||||
IMAGE=$1
|
||||
DIST_DIR=$2
|
||||
|
||||
if [[ -z ${IMAGE} ]]; then
|
||||
echo "ERROR: IMAGE must be non-empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z ${DIST_DIR} ]]; then
|
||||
echo "ERROR: DIST_DIR must be non-empty"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e ${DIST_DIR} ]]; then
|
||||
echo "ERROR: The specified DIST_DIR ${DIST_DIR} exists."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Copying package files from ${IMAGE} to ${DIST_DIR}"
|
||||
mkdir -p ${DIST_DIR}
|
||||
docker run --rm \
|
||||
-v $(pwd):$(pwd) \
|
||||
-w $(pwd) \
|
||||
-u $(id -u):$(id -g) \
|
||||
--entrypoint="bash" \
|
||||
${IMAGE} \
|
||||
-c "cp -R /artifacts/packages/* ${DIST_DIR}"
|
||||
@@ -28,7 +28,6 @@ PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
|
||||
# to the relevant repositories. This targets forwarded to the build-all-components script
|
||||
# can be overridden by specifying command line arguments.
|
||||
all=(
|
||||
amazonlinux1-x86_64
|
||||
amazonlinux2-aarch64
|
||||
amazonlinux2-x86_64
|
||||
centos7-ppc64le
|
||||
@@ -52,20 +51,30 @@ else
|
||||
targets=${all[@]}
|
||||
fi
|
||||
|
||||
eval $(${SCRIPTS_DIR}/get-component-versions.sh)
|
||||
export NVIDIA_CONTAINER_TOOLKIT_VERSION
|
||||
export NVIDIA_CONTAINER_TOOLKIT_TAG
|
||||
|
||||
if [[ "${NVIDIA_CONTAINER_TOOLKIT_TAG}" != "${NVIDIA_CONTAINER_RUNTIME_TAG}" ]]; then
|
||||
echo "ERROR: The nvidia-container-runtime and nvidia-container-toolkit version tags do not match"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${NVIDIA_CONTAINER_TOOLKIT_TAG}" != "${NVIDIA_DOCKER_TAG}" ]]; then
|
||||
echo "ERROR: The nvidia-docker and nvidia-container-toolkit version tags do not match"
|
||||
echo "Updating components"
|
||||
${SCRIPTS_DIR}/update-components.sh
|
||||
if [[ -n $(git status -s third_party) && ${ALLOW_LOCAL_COMPONENT_CHANGES} != "true" ]]; then
|
||||
echo "ERROR: Building with local component changes."
|
||||
echo "Commit pending changes or rerun with ALLOW_LOCAL_COMPONENT_CHANGES='true'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
eval $(${SCRIPTS_DIR}/get-component-versions.sh)
|
||||
|
||||
if [[ "${NVIDIA_CONTAINER_TOOLKIT_VERSION}${NVIDIA_CONTAINER_TOOLKIT_TAG:+~${NVIDIA_CONTAINER_TOOLKIT_TAG}}" != "${LIBNVIDIA_CONTAINER_VERSION}" ]]; then
|
||||
set +x
|
||||
echo "The libnvidia-container and nvidia-container-toolkit versions do not match."
|
||||
echo "lib: '${LIBNVIDIA_CONTAINER_VERSION}'"
|
||||
echo "toolkit: '${NVIDIA_CONTAINER_TOOLKIT_VERSION}${NVIDIA_CONTAINER_TOOLKIT_TAG:+~${NVIDIA_CONTAINER_TOOLKIT_TAG}}'"
|
||||
set -x
|
||||
[[ ${ALLOW_VERSION_MISMATCH} == "true" ]] || exit 1
|
||||
fi
|
||||
|
||||
export NVIDIA_CONTAINER_TOOLKIT_VERSION
|
||||
export NVIDIA_CONTAINER_TOOLKIT_TAG
|
||||
export NVIDIA_CONTAINER_RUNTIME_VERSION
|
||||
export NVIDIA_DOCKER_VERSION
|
||||
|
||||
for target in ${targets[@]}; do
|
||||
${SCRIPTS_DIR}/build-all-components.sh ${target}
|
||||
done
|
||||
|
||||
36
scripts/update-components.sh
Executable file
36
scripts/update-components.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e -x
|
||||
|
||||
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
|
||||
PROJECT_ROOT="$( cd ${SCRIPTS_DIR}/.. && pwd )"
|
||||
|
||||
git submodule update --init
|
||||
|
||||
echo "Component status before update"
|
||||
git submodule status
|
||||
|
||||
# We update all submodules from their respective remotes
|
||||
# NOTE: Appending `-- [PATH]` will limit the update to a specific component
|
||||
git submodule update --remote
|
||||
|
||||
if [[ -z $(git status -s third_party) ]]; then
|
||||
echo "Components already up to date"
|
||||
else
|
||||
echo "Components updated"
|
||||
git submodule status
|
||||
fi
|
||||
@@ -31,6 +31,8 @@ testing::toolkit::install() {
|
||||
|
||||
test -L "${shared_dir}/usr/local/nvidia/toolkit/libnvidia-container.so.1"
|
||||
test -e "$(${READLINK} -f "${shared_dir}/usr/local/nvidia/toolkit/libnvidia-container.so.1")"
|
||||
test -L "${shared_dir}/usr/local/nvidia/toolkit/libnvidia-container-go.so.1"
|
||||
test -e "$(${READLINK} -f "${shared_dir}/usr/local/nvidia/toolkit/libnvidia-container-go.so.1")"
|
||||
|
||||
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-cli"
|
||||
test -e "${shared_dir}/usr/local/nvidia/toolkit/nvidia-container-toolkit"
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# limitations under the License.
|
||||
|
||||
WORKFLOW ?= nvidia-docker
|
||||
TEST_REPO ?= elezar.github.io
|
||||
|
||||
DISTRIBUTIONS := ubuntu18.04 centos8
|
||||
|
||||
@@ -22,22 +21,23 @@ RUN_TARGETS := $(patsubst %,run-%, $(DISTRIBUTIONS))
|
||||
RELEASE_TARGETS := $(patsubst %,release-%, $(DISTRIBUTIONS))
|
||||
LOCAL_TARGETS := $(patsubst %,local-%, $(DISTRIBUTIONS))
|
||||
|
||||
.PHONY: $(IMAGE_TARGETS)
|
||||
.PHONY: $(IMAGE_TARGETS) $(RUN_TARGETS)
|
||||
|
||||
image-%: DOCKERFILE = docker/$(*)/Dockerfile
|
||||
|
||||
images: $(IMAGE_TARGETS)
|
||||
$(IMAGE_TARGETS): image-%:
|
||||
docker build \
|
||||
$(IMAGE_TARGETS): image-%: $(DOCKERFILE)
|
||||
docker build ${PLATFORM_ARGS} \
|
||||
--build-arg WORKFLOW="$(WORKFLOW)" \
|
||||
--build-arg TEST_REPO="$(TEST_REPO)" \
|
||||
-t nvidia-container-toolkit-repo-test:$(*) \
|
||||
-f $(DOCKERFILE) \
|
||||
$(shell dirname $(DOCKERFILE))
|
||||
|
||||
|
||||
%-ubuntu18.04: ARCH = amd64
|
||||
%-centos8: ARCH = x86_64
|
||||
%-ubuntu18.04: ARCH ?= amd64
|
||||
%-centos8: ARCH ?= x86_64
|
||||
|
||||
PLATFORM_ARGS = --platform=linux/${ARCH}
|
||||
|
||||
RELEASE_TEST_DIR := $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
|
||||
PROJECT_ROOT := $(RELEASE_TEST_DIR)/../..
|
||||
@@ -46,11 +46,11 @@ LOCAL_PACKAGE_ROOT := $(PROJECT_ROOT)/dist
|
||||
|
||||
local-%: DIST = $(*)
|
||||
local-%: LOCAL_REPO_ARGS = -v $(LOCAL_PACKAGE_ROOT)/$(DIST)/$(ARCH):/local-repository
|
||||
$(LOCAL_TARGETS): local-%: release-% run-% | release-%
|
||||
$(LOCAL_TARGETS): local-%: run-%
|
||||
|
||||
run-%: DIST = $(*)
|
||||
$(RUN_TARGETS): run-%:
|
||||
docker run --rm -ti \
|
||||
$(RUN_TARGETS): run-%: image-%
|
||||
docker run ${PLATFORM_ARGS} --rm -ti \
|
||||
$(LOCAL_REPO_ARGS) \
|
||||
nvidia-container-toolkit-repo-test:$(*)
|
||||
|
||||
|
||||
@@ -25,11 +25,10 @@ RUN fpm -s empty \
|
||||
|
||||
|
||||
ARG WORKFLOW=nvidia-docker
|
||||
ARG TEST_REPO=nvidia.github.io
|
||||
ENV TEST_REPO ${TEST_REPO}
|
||||
RUN curl -s -L https://nvidia.github.io/${WORKFLOW}/centos8/nvidia-docker.repo \
|
||||
| tee /etc/yum.repos.d/nvidia-docker.repo
|
||||
|
||||
COPY entrypoint.sh /
|
||||
COPY install_repo.sh /
|
||||
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
@@ -33,10 +33,10 @@ gpgcheck=0
|
||||
protect=1
|
||||
EOL
|
||||
yum-config-manager --enable local-repository
|
||||
elif [[ -n ${TEST_REPO} ]]; then
|
||||
./install_repo.sh ${TEST_REPO}
|
||||
else
|
||||
echo "Setting up TEST repo: ${TEST_REPO}"
|
||||
sed -i -e 's#nvidia\.github\.io/libnvidia-container#${TEST_REPO}/libnvidia-container#g' /etc/yum.repos.d/nvidia-docker.repo
|
||||
yum-config-manager --enable libnvidia-container-experimental
|
||||
echo "Skipping repo setup"
|
||||
fi
|
||||
|
||||
exec bash $@
|
||||
|
||||
25
test/release/docker/centos8/install_repo.sh
Executable file
25
test/release/docker/centos8/install_repo.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to build the packages for the components of the NVIDIA
|
||||
# Container Stack. These include the nvidia-container-toolkit in this repository
|
||||
# as well as the components included in the third_party folder.
|
||||
# All required packages are generated in the specified dist folder.
|
||||
|
||||
test_repo=$1
|
||||
echo "Setting up TEST repo: ${test_repo}"
|
||||
sed -i -e "s#nvidia\.github\.io/libnvidia-container#${test_repo}/libnvidia-container#g" /etc/yum.repos.d/nvidia-docker.repo
|
||||
yum-config-manager --enable libnvidia-container-experimental
|
||||
2
third_party/libnvidia-container
vendored
2
third_party/libnvidia-container
vendored
Submodule third_party/libnvidia-container updated: dd2c49d669...d48f9b0d50
2
third_party/nvidia-container-runtime
vendored
2
third_party/nvidia-container-runtime
vendored
Submodule third_party/nvidia-container-runtime updated: 38ff520daa...3588013b7d
2
third_party/nvidia-docker
vendored
2
third_party/nvidia-docker
vendored
Submodule third_party/nvidia-docker updated: fd3233aa5f...51d3c9e22b
@@ -25,7 +25,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/plugin"
|
||||
toml "github.com/pelletier/go-toml"
|
||||
log "github.com/sirupsen/logrus"
|
||||
cli "github.com/urfave/cli/v2"
|
||||
@@ -44,7 +43,7 @@ const (
|
||||
defaultConfig = "/etc/containerd/config.toml"
|
||||
defaultSocket = "/run/containerd/containerd.sock"
|
||||
defaultRuntimeClass = "nvidia"
|
||||
defaultRuntmeType = plugin.RuntimeRuncV2
|
||||
defaultRuntmeType = "io.containerd.runc.v2"
|
||||
defaultSetAsDefault = true
|
||||
defaultRestartMode = restartModeSignal
|
||||
defaultHostRootMount = "/host"
|
||||
|
||||
@@ -152,7 +152,7 @@ func Install(cli *cli.Context) error {
|
||||
return fmt.Errorf("could not create required directories: %v", err)
|
||||
}
|
||||
|
||||
err = installContainerLibrary(toolkitDirArg)
|
||||
err = installContainerLibraries(toolkitDirArg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error installing NVIDIA container library: %v", err)
|
||||
}
|
||||
@@ -180,14 +180,31 @@ func Install(cli *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// installContainerLibrary locates and installs the libnvidia-container.so.1 library.
|
||||
// installContainerLibraries locates and installs the libraries that are part of
|
||||
// the nvidia-container-toolkit.
|
||||
// A predefined set of library candidates are considered, with the first one
|
||||
// resulting in success being installed to the toolkit folder. The install process
|
||||
// resolves the symlink for the library and copies the versioned library itself.
|
||||
func installContainerLibrary(toolkitDir string) error {
|
||||
func installContainerLibraries(toolkitDir string) error {
|
||||
log.Infof("Installing NVIDIA container library to '%v'", toolkitDir)
|
||||
|
||||
const libName = "libnvidia-container.so.1"
|
||||
libs := []string{
|
||||
"libnvidia-container.so.1",
|
||||
"libnvidia-container-go.so.1",
|
||||
}
|
||||
|
||||
for _, l := range libs {
|
||||
err := installLibrary(l, toolkitDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to install %s: %v", l, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// installLibrary installs the specified library to the toolkit directory.
|
||||
func installLibrary(libName string, toolkitDir string) error {
|
||||
libraryPath, err := findLibrary("", libName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error locating NVIDIA container library: %v", err)
|
||||
|
||||
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
191
vendor/github.com/containerd/containerd/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
@@ -1,16 +0,0 @@
|
||||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
93
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
93
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Is(err, ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists)
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Is(err, ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Is(err, ErrUnavailable)
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Is(err, ErrNotImplemented)
|
||||
}
|
||||
|
||||
// IsCanceled returns true if the error is due to `context.Canceled`.
|
||||
func IsCanceled(err error) bool {
|
||||
return errors.Is(err, context.Canceled)
|
||||
}
|
||||
|
||||
// IsDeadlineExceeded returns true if the error is due to
|
||||
// `context.DeadlineExceeded`.
|
||||
func IsDeadlineExceeded(err error) bool {
|
||||
return errors.Is(err, context.DeadlineExceeded)
|
||||
}
|
||||
147
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
147
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
@@ -1,147 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
case IsCanceled(err):
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
case IsDeadlineExceeded(err):
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrap(cls, msg)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
81
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
81
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/typeurl"
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// Envelope provides the packaging for an event.
|
||||
type Envelope struct {
|
||||
Timestamp time.Time
|
||||
Namespace string
|
||||
Topic string
|
||||
Event *types.Any
|
||||
}
|
||||
|
||||
// Field returns the value for the given fieldpath as a string, if defined.
|
||||
// If the value is not defined, the second value will be false.
|
||||
func (e *Envelope) Field(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
// unhandled: timestamp
|
||||
case "namespace":
|
||||
return e.Namespace, len(e.Namespace) > 0
|
||||
case "topic":
|
||||
return e.Topic, len(e.Topic) > 0
|
||||
case "event":
|
||||
decoded, err := typeurl.UnmarshalAny(e.Event)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
adaptor, ok := decoded.(interface {
|
||||
Field([]string) (string, bool)
|
||||
})
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return adaptor.Field(fieldpath[1:])
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Event is a generic interface for any type of event
|
||||
type Event interface{}
|
||||
|
||||
// Publisher posts the event.
|
||||
type Publisher interface {
|
||||
Publish(ctx context.Context, topic string, event Event) error
|
||||
}
|
||||
|
||||
// Forwarder forwards an event to the underlying event bus
|
||||
type Forwarder interface {
|
||||
Forward(ctx context.Context, envelope *Envelope) error
|
||||
}
|
||||
|
||||
// Subscriber allows callers to subscribe to events
|
||||
type Subscriber interface {
|
||||
Subscribe(ctx context.Context, filters ...string) (ch <-chan *Envelope, errs <-chan error)
|
||||
}
|
||||
251
vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
251
vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
@@ -1,251 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/identifiers"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/typeurl"
|
||||
goevents "github.com/docker/go-events"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Exchange broadcasts events
|
||||
type Exchange struct {
|
||||
broadcaster *goevents.Broadcaster
|
||||
}
|
||||
|
||||
// NewExchange returns a new event Exchange
|
||||
func NewExchange() *Exchange {
|
||||
return &Exchange{
|
||||
broadcaster: goevents.NewBroadcaster(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ events.Publisher = &Exchange{}
|
||||
var _ events.Forwarder = &Exchange{}
|
||||
var _ events.Subscriber = &Exchange{}
|
||||
|
||||
// Forward accepts an envelope to be directly distributed on the exchange.
|
||||
//
|
||||
// This is useful when an event is forwarded on behalf of another namespace or
|
||||
// when the event is propagated on behalf of another publisher.
|
||||
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
|
||||
if err := validateEnvelope(envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"topic": envelope.Topic,
|
||||
"ns": envelope.Namespace,
|
||||
"type": envelope.Event.TypeUrl,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("error forwarding event")
|
||||
} else {
|
||||
logger.Debug("event forwarded")
|
||||
}
|
||||
}()
|
||||
|
||||
return e.broadcaster.Write(envelope)
|
||||
}
|
||||
|
||||
// Publish packages and sends an event. The caller will be considered the
|
||||
// initial publisher of the event. This means the timestamp will be calculated
|
||||
// at this point and this method may read from the calling context.
|
||||
func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) {
|
||||
var (
|
||||
namespace string
|
||||
encoded *types.Any
|
||||
envelope events.Envelope
|
||||
)
|
||||
|
||||
namespace, err = namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed publishing event")
|
||||
}
|
||||
if err := validateTopic(topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", topic)
|
||||
}
|
||||
|
||||
encoded, err = typeurl.MarshalAny(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envelope.Timestamp = time.Now().UTC()
|
||||
envelope.Namespace = namespace
|
||||
envelope.Topic = topic
|
||||
envelope.Event = encoded
|
||||
|
||||
defer func() {
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"topic": envelope.Topic,
|
||||
"ns": envelope.Namespace,
|
||||
"type": envelope.Event.TypeUrl,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("error publishing event")
|
||||
} else {
|
||||
logger.Debug("event published")
|
||||
}
|
||||
}()
|
||||
|
||||
return e.broadcaster.Write(&envelope)
|
||||
}
|
||||
|
||||
// Subscribe to events on the exchange. Events are sent through the returned
|
||||
// channel ch. If an error is encountered, it will be sent on channel errs and
|
||||
// errs will be closed. To end the subscription, cancel the provided context.
|
||||
//
|
||||
// Zero or more filters may be provided as strings. Only events that match
|
||||
// *any* of the provided filters will be sent on the channel. The filters use
|
||||
// the standard containerd filters package syntax.
|
||||
func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||
var (
|
||||
evch = make(chan *events.Envelope)
|
||||
errq = make(chan error, 1)
|
||||
channel = goevents.NewChannel(0)
|
||||
queue = goevents.NewQueue(channel)
|
||||
dst goevents.Sink = queue
|
||||
)
|
||||
|
||||
closeAll := func() {
|
||||
channel.Close()
|
||||
queue.Close()
|
||||
e.broadcaster.Remove(dst)
|
||||
close(errq)
|
||||
}
|
||||
|
||||
ch = evch
|
||||
errs = errq
|
||||
|
||||
if len(fs) > 0 {
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
errq <- errors.Wrapf(err, "failed parsing subscription filters")
|
||||
closeAll()
|
||||
return
|
||||
}
|
||||
|
||||
dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool {
|
||||
return filter.Match(adapt(gev))
|
||||
}))
|
||||
}
|
||||
|
||||
e.broadcaster.Add(dst)
|
||||
|
||||
go func() {
|
||||
defer closeAll()
|
||||
|
||||
var err error
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case ev := <-channel.C:
|
||||
env, ok := ev.(*events.Envelope)
|
||||
if !ok {
|
||||
// TODO(stevvooe): For the most part, we are well protected
|
||||
// from this condition. Both Forward and Publish protect
|
||||
// from this.
|
||||
err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case evch <- env:
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if cerr := ctx.Err(); cerr != context.Canceled {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
errq <- err
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateTopic(topic string) error {
|
||||
if topic == "" {
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
|
||||
}
|
||||
|
||||
if topic[0] != '/' {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
|
||||
}
|
||||
|
||||
if len(topic) == 1 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
|
||||
}
|
||||
|
||||
components := strings.Split(topic[1:], "/")
|
||||
for _, component := range components {
|
||||
if err := identifiers.Validate(component); err != nil {
|
||||
return errors.Wrapf(err, "failed validation on component %q", component)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateEnvelope(envelope *events.Envelope) error {
|
||||
if err := identifiers.Validate(envelope.Namespace); err != nil {
|
||||
return errors.Wrapf(err, "event envelope has invalid namespace")
|
||||
}
|
||||
|
||||
if err := validateTopic(envelope.Topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
|
||||
}
|
||||
|
||||
if envelope.Timestamp.IsZero() {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func adapt(ev interface{}) filters.Adaptor {
|
||||
if adaptor, ok := ev.(filters.Adaptor); ok {
|
||||
return adaptor
|
||||
}
|
||||
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
33
vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
33
vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
// Adaptor specifies the mapping of fieldpaths to a type. For the given field
|
||||
// path, the value and whether it is present should be returned. The mapping of
|
||||
// the fieldpath to a field is deferred to the adaptor implementation, but
|
||||
// should generally follow protobuf field path/mask semantics.
|
||||
type Adaptor interface {
|
||||
Field(fieldpath []string) (value string, present bool)
|
||||
}
|
||||
|
||||
// AdapterFunc allows implementation specific matching of fieldpaths
|
||||
type AdapterFunc func(fieldpath []string) (string, bool)
|
||||
|
||||
// Field returns the field name and true if it exists
|
||||
func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
|
||||
return fn(fieldpath)
|
||||
}
|
||||
179
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
179
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
@@ -1,179 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package filters defines a syntax and parser that can be used for the
|
||||
// filtration of items across the containerd API. The core is built on the
|
||||
// concept of protobuf field paths, with quoting. Several operators allow the
|
||||
// user to flexibly select items based on field presence, equality, inequality
|
||||
// and regular expressions. Flexible adaptors support working with any type.
|
||||
//
|
||||
// The syntax is fairly familiar, if you've used container ecosystem
|
||||
// projects. At the core, we base it on the concept of protobuf field
|
||||
// paths, augmenting with the ability to quote portions of the field path
|
||||
// to match arbitrary labels. These "selectors" come in the following
|
||||
// syntax:
|
||||
//
|
||||
// ```
|
||||
// <fieldpath>[<operator><value>]
|
||||
// ```
|
||||
//
|
||||
// A basic example is as follows:
|
||||
//
|
||||
// ```
|
||||
// name==foo
|
||||
// ```
|
||||
//
|
||||
// This would match all objects that have a field `name` with the value
|
||||
// `foo`. If we only want to test if the field is present, we can omit the
|
||||
// operator. This is most useful for matching labels in containerd. The
|
||||
// following will match objects that have the field "labels" and have the
|
||||
// label "foo" defined:
|
||||
//
|
||||
// ```
|
||||
// labels.foo
|
||||
// ```
|
||||
//
|
||||
// We also allow for quoting of parts of the field path to allow matching
|
||||
// of arbitrary items:
|
||||
//
|
||||
// ```
|
||||
// labels."very complex label"==something
|
||||
// ```
|
||||
//
|
||||
// We also define `!=` and `~=` as operators. The `!=` will match all
|
||||
// objects that don't match the value for a field and `~=` will compile the
|
||||
// target value as a regular expression and match the field value against that.
|
||||
//
|
||||
// Selectors can be combined using a comma, such that the resulting
|
||||
// selector will require all selectors are matched for the object to match.
|
||||
// The following example will match objects that are named `foo` and have
|
||||
// the label `bar`:
|
||||
//
|
||||
// ```
|
||||
// name==foo,labels.bar
|
||||
// ```
|
||||
//
|
||||
package filters
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
// Filter matches specific resources based the provided filter
|
||||
type Filter interface {
|
||||
Match(adaptor Adaptor) bool
|
||||
}
|
||||
|
||||
// FilterFunc is a function that handles matching with an adaptor
|
||||
type FilterFunc func(Adaptor) bool
|
||||
|
||||
// Match matches the FilterFunc returning true if the object matches the filter
|
||||
func (fn FilterFunc) Match(adaptor Adaptor) bool {
|
||||
return fn(adaptor)
|
||||
}
|
||||
|
||||
// Always is a filter that always returns true for any type of object
|
||||
var Always FilterFunc = func(adaptor Adaptor) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Any allows multiple filters to be matched against the object
|
||||
type Any []Filter
|
||||
|
||||
// Match returns true if any of the provided filters are true
|
||||
func (m Any) Match(adaptor Adaptor) bool {
|
||||
for _, m := range m {
|
||||
if m.Match(adaptor) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// All allows multiple filters to be matched against the object
|
||||
type All []Filter
|
||||
|
||||
// Match only returns true if all filters match the object
|
||||
func (m All) Match(adaptor Adaptor) bool {
|
||||
for _, m := range m {
|
||||
if !m.Match(adaptor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type operator int
|
||||
|
||||
const (
|
||||
operatorPresent = iota
|
||||
operatorEqual
|
||||
operatorNotEqual
|
||||
operatorMatches
|
||||
)
|
||||
|
||||
func (op operator) String() string {
|
||||
switch op {
|
||||
case operatorPresent:
|
||||
return "?"
|
||||
case operatorEqual:
|
||||
return "=="
|
||||
case operatorNotEqual:
|
||||
return "!="
|
||||
case operatorMatches:
|
||||
return "~="
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
type selector struct {
|
||||
fieldpath []string
|
||||
operator operator
|
||||
value string
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
func (m selector) Match(adaptor Adaptor) bool {
|
||||
value, present := adaptor.Field(m.fieldpath)
|
||||
|
||||
switch m.operator {
|
||||
case operatorPresent:
|
||||
return present
|
||||
case operatorEqual:
|
||||
return present && value == m.value
|
||||
case operatorNotEqual:
|
||||
return value != m.value
|
||||
case operatorMatches:
|
||||
if m.re == nil {
|
||||
r, err := regexp.Compile(m.value)
|
||||
if err != nil {
|
||||
log.L.Errorf("error compiling regexp %q", m.value)
|
||||
return false
|
||||
}
|
||||
|
||||
m.re = r
|
||||
}
|
||||
|
||||
return m.re.MatchString(value)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
292
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
292
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
@@ -1,292 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
Parse the strings into a filter that may be used with an adaptor.
|
||||
|
||||
The filter is made up of zero or more selectors.
|
||||
|
||||
The format is a comma separated list of expressions, in the form of
|
||||
`<fieldpath><op><value>`, known as selectors. All selectors must match the
|
||||
target object for the filter to be true.
|
||||
|
||||
We define the operators "==" for equality, "!=" for not equal and "~=" for a
|
||||
regular expression. If the operator and value are not present, the matcher will
|
||||
test for the presence of a value, as defined by the target object.
|
||||
|
||||
The formal grammar is as follows:
|
||||
|
||||
selectors := selector ("," selector)*
|
||||
selector := fieldpath (operator value)
|
||||
fieldpath := field ('.' field)*
|
||||
field := quoted | [A-Za-z] [A-Za-z0-9_]+
|
||||
operator := "==" | "!=" | "~="
|
||||
value := quoted | [^\s,]+
|
||||
quoted := <go string syntax>
|
||||
|
||||
*/
|
||||
func Parse(s string) (Filter, error) {
|
||||
// special case empty to match all
|
||||
if s == "" {
|
||||
return Always, nil
|
||||
}
|
||||
|
||||
p := parser{input: s}
|
||||
return p.parse()
|
||||
}
|
||||
|
||||
// ParseAll parses each filter in ss and returns a filter that will return true
|
||||
// if any filter matches the expression.
|
||||
//
|
||||
// If no filters are provided, the filter will match anything.
|
||||
func ParseAll(ss ...string) (Filter, error) {
|
||||
if len(ss) == 0 {
|
||||
return Always, nil
|
||||
}
|
||||
|
||||
var fs []Filter
|
||||
for _, s := range ss {
|
||||
f, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
return Any(fs), nil
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
input string
|
||||
scanner scanner
|
||||
}
|
||||
|
||||
func (p *parser) parse() (Filter, error) {
|
||||
p.scanner.init(p.input)
|
||||
|
||||
ss, err := p.selectors()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filters")
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (p *parser) selectors() (Filter, error) {
|
||||
s, err := p.selector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss := All{s}
|
||||
|
||||
loop:
|
||||
for {
|
||||
tok := p.scanner.peek()
|
||||
switch tok {
|
||||
case ',':
|
||||
pos, tok, _ := p.scanner.scan()
|
||||
if tok != tokenSeparator {
|
||||
return nil, p.mkerr(pos, "expected a separator")
|
||||
}
|
||||
|
||||
s, err := p.selector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
case tokenEOF:
|
||||
break loop
|
||||
default:
|
||||
return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
|
||||
}
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (p *parser) selector() (selector, error) {
|
||||
fieldpath, err := p.fieldpath()
|
||||
if err != nil {
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
switch p.scanner.peek() {
|
||||
case ',', tokenSeparator, tokenEOF:
|
||||
return selector{
|
||||
fieldpath: fieldpath,
|
||||
operator: operatorPresent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
op, err := p.operator()
|
||||
if err != nil {
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
var allowAltQuotes bool
|
||||
if op == operatorMatches {
|
||||
allowAltQuotes = true
|
||||
}
|
||||
|
||||
value, err := p.value(allowAltQuotes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return selector{}, io.ErrUnexpectedEOF
|
||||
}
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
return selector{
|
||||
fieldpath: fieldpath,
|
||||
value: value,
|
||||
operator: op,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *parser) fieldpath() ([]string, error) {
|
||||
f, err := p.field()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := []string{f}
|
||||
loop:
|
||||
for {
|
||||
tok := p.scanner.peek() // lookahead to consume field separator
|
||||
|
||||
switch tok {
|
||||
case '.':
|
||||
pos, tok, _ := p.scanner.scan() // consume separator
|
||||
if tok != tokenSeparator {
|
||||
return nil, p.mkerr(pos, "expected a field separator (`.`)")
|
||||
}
|
||||
|
||||
f, err := p.field()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
default:
|
||||
// let the layer above handle the other bad cases.
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (p *parser) field() (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
switch tok {
|
||||
case tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, false)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected field or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) operator() (operator, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
switch tok {
|
||||
case tokenOperator:
|
||||
switch s {
|
||||
case "==":
|
||||
return operatorEqual, nil
|
||||
case "!=":
|
||||
return operatorNotEqual, nil
|
||||
case "~=":
|
||||
return operatorMatches, nil
|
||||
default:
|
||||
return 0, p.mkerr(pos, "unsupported operator %q", s)
|
||||
}
|
||||
case tokenIllegal:
|
||||
return 0, p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
||||
}
|
||||
|
||||
func (p *parser) value(allowAltQuotes bool) (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
|
||||
switch tok {
|
||||
case tokenValue, tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, allowAltQuotes)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected value or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
|
||||
if !allowAlts && s[0] != '\'' && s[0] != '"' {
|
||||
return "", p.mkerr(pos, "invalid quote encountered")
|
||||
}
|
||||
|
||||
uq, err := unquote(s)
|
||||
if err != nil {
|
||||
return "", p.mkerr(pos, "unquoting failed: %v", err)
|
||||
}
|
||||
|
||||
return uq, nil
|
||||
}
|
||||
|
||||
type parseError struct {
|
||||
input string
|
||||
pos int
|
||||
msg string
|
||||
}
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
if pe.pos < len(pe.input) {
|
||||
before := pe.input[:pe.pos]
|
||||
location := pe.input[pe.pos : pe.pos+1] // need to handle end
|
||||
after := pe.input[pe.pos+1:]
|
||||
|
||||
return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
|
||||
}
|
||||
|
||||
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
||||
return errors.Wrap(parseError{
|
||||
input: p.input,
|
||||
pos: pos,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}, "parse error")
|
||||
}
|
||||
253
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
253
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
@@ -1,253 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
||||
// strconv package and modified to be able to handle quoting with `/` and `|`
|
||||
// as delimiters. The copyright is held by the Go authors.
|
||||
|
||||
var errQuoteSyntax = errors.New("quote syntax error")
|
||||
|
||||
// UnquoteChar decodes the first character or byte in the escaped string
|
||||
// or character literal represented by the string s.
|
||||
// It returns four values:
|
||||
//
|
||||
// 1) value, the decoded Unicode code point or byte value;
|
||||
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
||||
// 3) tail, the remainder of the string after the character; and
|
||||
// 4) an error that will be nil if the character is syntactically valid.
|
||||
//
|
||||
// The second argument, quote, specifies the type of literal being parsed
|
||||
// and therefore which escaped quote character is permitted.
|
||||
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
|
||||
// If set to a double quote, it permits \" and disallows unescaped ".
|
||||
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
|
||||
//
|
||||
// This is from Go strconv package, modified to support `|` and `/` as double
|
||||
// quotes for use with regular expressions.
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"', '|', '/':
|
||||
if c != quote {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
||||
|
||||
// unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
//
|
||||
// This is modified from the standard library to support `|` and `/` as quote
|
||||
// characters for use with regular expressions.
|
||||
func unquote(s string) (string, error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote == '`' {
|
||||
if contains(s, '`') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\r') {
|
||||
// -1 because we know there is at least one \r to remove.
|
||||
buf := make([]byte, 0, len(s)-1)
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] != '\r' {
|
||||
buf = append(buf, s[i])
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\n') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) {
|
||||
switch quote {
|
||||
case '"', '/', '|': // pipe and slash are treated like double quote
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
||||
297
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
297
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
@@ -1,297 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
tokenEOF = -(iota + 1)
|
||||
tokenQuoted
|
||||
tokenValue
|
||||
tokenField
|
||||
tokenSeparator
|
||||
tokenOperator
|
||||
tokenIllegal
|
||||
)
|
||||
|
||||
type token rune
|
||||
|
||||
func (t token) String() string {
|
||||
switch t {
|
||||
case tokenEOF:
|
||||
return "EOF"
|
||||
case tokenQuoted:
|
||||
return "Quoted"
|
||||
case tokenValue:
|
||||
return "Value"
|
||||
case tokenField:
|
||||
return "Field"
|
||||
case tokenSeparator:
|
||||
return "Separator"
|
||||
case tokenOperator:
|
||||
return "Operator"
|
||||
case tokenIllegal:
|
||||
return "Illegal"
|
||||
}
|
||||
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t token) GoString() string {
|
||||
return "token" + t.String()
|
||||
}
|
||||
|
||||
type scanner struct {
|
||||
input string
|
||||
pos int
|
||||
ppos int // bounds the current rune in the string
|
||||
value bool
|
||||
err string
|
||||
}
|
||||
|
||||
func (s *scanner) init(input string) {
|
||||
s.input = input
|
||||
s.pos = 0
|
||||
s.ppos = 0
|
||||
}
|
||||
|
||||
func (s *scanner) next() rune {
|
||||
if s.pos >= len(s.input) {
|
||||
return tokenEOF
|
||||
}
|
||||
s.pos = s.ppos
|
||||
|
||||
r, w := utf8.DecodeRuneInString(s.input[s.ppos:])
|
||||
s.ppos += w
|
||||
if r == utf8.RuneError {
|
||||
if w > 0 {
|
||||
s.error("rune error")
|
||||
return tokenIllegal
|
||||
}
|
||||
return tokenEOF
|
||||
}
|
||||
|
||||
if r == 0 {
|
||||
s.error("unexpected null")
|
||||
return tokenIllegal
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (s *scanner) peek() rune {
|
||||
pos := s.pos
|
||||
ppos := s.ppos
|
||||
ch := s.next()
|
||||
s.pos = pos
|
||||
s.ppos = ppos
|
||||
return ch
|
||||
}
|
||||
|
||||
func (s *scanner) scan() (nextp int, tk token, text string) {
|
||||
var (
|
||||
ch = s.next()
|
||||
pos = s.pos
|
||||
)
|
||||
|
||||
chomp:
|
||||
switch {
|
||||
case ch == tokenEOF:
|
||||
case ch == tokenIllegal:
|
||||
case isQuoteRune(ch):
|
||||
if !s.scanQuoted(ch) {
|
||||
return pos, tokenIllegal, s.input[pos:s.ppos]
|
||||
}
|
||||
return pos, tokenQuoted, s.input[pos:s.ppos]
|
||||
case isSeparatorRune(ch):
|
||||
s.value = false
|
||||
return pos, tokenSeparator, s.input[pos:s.ppos]
|
||||
case isOperatorRune(ch):
|
||||
s.scanOperator()
|
||||
s.value = true
|
||||
return pos, tokenOperator, s.input[pos:s.ppos]
|
||||
case unicode.IsSpace(ch):
|
||||
// chomp
|
||||
ch = s.next()
|
||||
pos = s.pos
|
||||
goto chomp
|
||||
case s.value:
|
||||
s.scanValue()
|
||||
s.value = false
|
||||
return pos, tokenValue, s.input[pos:s.ppos]
|
||||
case isFieldRune(ch):
|
||||
s.scanField()
|
||||
return pos, tokenField, s.input[pos:s.ppos]
|
||||
}
|
||||
|
||||
return s.pos, token(ch), ""
|
||||
}
|
||||
|
||||
func (s *scanner) scanField() {
|
||||
for {
|
||||
ch := s.peek()
|
||||
if !isFieldRune(ch) {
|
||||
break
|
||||
}
|
||||
s.next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) scanOperator() {
|
||||
for {
|
||||
ch := s.peek()
|
||||
switch ch {
|
||||
case '=', '!', '~':
|
||||
s.next()
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) scanValue() {
|
||||
for {
|
||||
ch := s.peek()
|
||||
if !isValueRune(ch) {
|
||||
break
|
||||
}
|
||||
s.next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) scanQuoted(quote rune) bool {
|
||||
var illegal bool
|
||||
ch := s.next() // read character after quote
|
||||
for ch != quote {
|
||||
if ch == '\n' || ch < 0 {
|
||||
s.error("quoted literal not terminated")
|
||||
return false
|
||||
}
|
||||
if ch == '\\' {
|
||||
var legal bool
|
||||
ch, legal = s.scanEscape(quote)
|
||||
if !legal {
|
||||
illegal = true
|
||||
}
|
||||
} else {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
return !illegal
|
||||
}
|
||||
|
||||
func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) {
|
||||
ch = s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
|
||||
// nothing to do
|
||||
ch = s.next()
|
||||
legal = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
ch, legal = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
ch, legal = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
ch, legal = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
ch, legal = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.error("illegal escape sequence")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.error("illegal numeric escape sequence")
|
||||
return ch, false
|
||||
}
|
||||
return ch, true
|
||||
}
|
||||
|
||||
func (s *scanner) error(msg string) {
|
||||
if s.err == "" {
|
||||
s.err = msg
|
||||
}
|
||||
}
|
||||
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
|
||||
func isFieldRune(r rune) bool {
|
||||
return (r == '_' || isAlphaRune(r) || isDigitRune(r))
|
||||
}
|
||||
|
||||
func isAlphaRune(r rune) bool {
|
||||
return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z'
|
||||
}
|
||||
|
||||
func isDigitRune(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isOperatorRune(r rune) bool {
|
||||
switch r {
|
||||
case '=', '!', '~':
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuoteRune(r rune) bool {
|
||||
switch r {
|
||||
case '/', '|', '"': // maybe add single quoting?
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isSeparatorRune(r rune) bool {
|
||||
switch r {
|
||||
case ',', '.':
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isValueRune(r rune) bool {
|
||||
return r != ',' && !unicode.IsSpace(r) &&
|
||||
(unicode.IsLetter(r) ||
|
||||
unicode.IsDigit(r) ||
|
||||
unicode.IsNumber(r) ||
|
||||
unicode.IsGraphic(r) ||
|
||||
unicode.IsPunct(r))
|
||||
}
|
||||
73
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
73
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package identifiers provides common validation for identifiers and keys
|
||||
// across containerd.
|
||||
//
|
||||
// Identifiers in containerd must be a alphanumeric, allowing limited
|
||||
// underscores, dashes and dots.
|
||||
//
|
||||
// While the character set may be expanded in the future, identifiers
|
||||
// are guaranteed to be safely used as filesystem path components.
|
||||
package identifiers
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLength = 76
|
||||
alphanum = `[A-Za-z0-9]+`
|
||||
separators = `[._-]`
|
||||
)
|
||||
|
||||
var (
|
||||
// identifierRe defines the pattern for valid identifiers.
|
||||
identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*"))
|
||||
)
|
||||
|
||||
// Validate returns nil if the string s is a valid identifier.
|
||||
//
|
||||
// identifiers are similar to the domain name rules according to RFC 1035, section 2.3.1. However
|
||||
// rules in this package are relaxed to allow numerals to follow period (".") and mixed case is
|
||||
// allowed.
|
||||
//
|
||||
// In general identifiers that pass this validation should be safe for use as filesystem path components.
|
||||
func Validate(s string) error {
|
||||
if len(s) == 0 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty")
|
||||
}
|
||||
|
||||
if len(s) > maxLength {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength)
|
||||
}
|
||||
|
||||
if !identifierRe.MatchString(s) {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reGroup(s string) string {
|
||||
return `(?:` + s + `)`
|
||||
}
|
||||
|
||||
func reAnchor(s string) string {
|
||||
return `^` + s + `$`
|
||||
}
|
||||
68
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
68
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// G is an alias for GetLogger.
|
||||
//
|
||||
// We may want to define this locally to a package to get package tagged log
|
||||
// messages.
|
||||
G = GetLogger
|
||||
|
||||
// L is an alias for the standard logger.
|
||||
L = logrus.NewEntry(logrus.StandardLogger())
|
||||
)
|
||||
|
||||
type (
|
||||
loggerKey struct{}
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
|
||||
// ensure the formatted time is always the same number of characters.
|
||||
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
|
||||
// TextFormat represents the text logging format
|
||||
TextFormat = "text"
|
||||
|
||||
// JSONFormat represents the JSON logging format
|
||||
JSONFormat = "json"
|
||||
)
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
|
||||
return context.WithValue(ctx, loggerKey{}, logger)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
func GetLogger(ctx context.Context) *logrus.Entry {
|
||||
logger := ctx.Value(loggerKey{})
|
||||
|
||||
if logger == nil {
|
||||
return L
|
||||
}
|
||||
|
||||
return logger.(*logrus.Entry)
|
||||
}
|
||||
78
vendor/github.com/containerd/containerd/namespaces/context.go
generated
vendored
78
vendor/github.com/containerd/containerd/namespaces/context.go
generated
vendored
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/identifiers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// NamespaceEnvVar is the environment variable key name
|
||||
NamespaceEnvVar = "CONTAINERD_NAMESPACE"
|
||||
// Default is the name of the default namespace
|
||||
Default = "default"
|
||||
)
|
||||
|
||||
type namespaceKey struct{}
|
||||
|
||||
// WithNamespace sets a given namespace on the context
|
||||
func WithNamespace(ctx context.Context, namespace string) context.Context {
|
||||
ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace
|
||||
// also store on the grpc and ttrpc headers so it gets picked up by any clients that
|
||||
// are using this.
|
||||
return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace)
|
||||
}
|
||||
|
||||
// NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or
|
||||
// default
|
||||
func NamespaceFromEnv(ctx context.Context) context.Context {
|
||||
namespace := os.Getenv(NamespaceEnvVar)
|
||||
if namespace == "" {
|
||||
namespace = Default
|
||||
}
|
||||
return WithNamespace(ctx, namespace)
|
||||
}
|
||||
|
||||
// Namespace returns the namespace from the context.
|
||||
//
|
||||
// The namespace is not guaranteed to be valid.
|
||||
func Namespace(ctx context.Context) (string, bool) {
|
||||
namespace, ok := ctx.Value(namespaceKey{}).(string)
|
||||
if !ok {
|
||||
if namespace, ok = fromGRPCHeader(ctx); !ok {
|
||||
return fromTTRPCHeader(ctx)
|
||||
}
|
||||
}
|
||||
return namespace, ok
|
||||
}
|
||||
|
||||
// NamespaceRequired returns the valid namespace from the context or an error.
|
||||
func NamespaceRequired(ctx context.Context) (string, error) {
|
||||
namespace, ok := Namespace(ctx)
|
||||
if !ok || namespace == "" {
|
||||
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required")
|
||||
}
|
||||
if err := identifiers.Validate(namespace); err != nil {
|
||||
return "", errors.Wrap(err, "namespace validation")
|
||||
}
|
||||
return namespace, nil
|
||||
}
|
||||
61
vendor/github.com/containerd/containerd/namespaces/grpc.go
generated
vendored
61
vendor/github.com/containerd/containerd/namespaces/grpc.go
generated
vendored
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
// GRPCHeader defines the header name for specifying a containerd namespace.
|
||||
GRPCHeader = "containerd-namespace"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): We can stub this file out if we don't want a grpc dependency here.
|
||||
|
||||
func withGRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
||||
// also store on the grpc headers so it gets picked up by any clients that
|
||||
// are using this.
|
||||
nsheader := metadata.Pairs(GRPCHeader, namespace)
|
||||
md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context.
|
||||
if !ok {
|
||||
md = nsheader
|
||||
} else {
|
||||
// order ensures the latest is first in this list.
|
||||
md = metadata.Join(nsheader, md)
|
||||
}
|
||||
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func fromGRPCHeader(ctx context.Context) (string, bool) {
|
||||
// try to extract for use in grpc servers.
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
// TODO(stevvooe): Check outgoing context?
|
||||
return "", false
|
||||
}
|
||||
|
||||
values := md[GRPCHeader]
|
||||
if len(values) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return values[0], true
|
||||
}
|
||||
46
vendor/github.com/containerd/containerd/namespaces/store.go
generated
vendored
46
vendor/github.com/containerd/containerd/namespaces/store.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import "context"
|
||||
|
||||
// Store provides introspection about namespaces.
|
||||
//
|
||||
// Note that these are slightly different than other objects, which are record
|
||||
// oriented. A namespace is really just a name and a set of labels. Objects
|
||||
// that belong to a namespace are returned when the namespace is assigned to a
|
||||
// given context.
|
||||
//
|
||||
//
|
||||
type Store interface {
|
||||
Create(ctx context.Context, namespace string, labels map[string]string) error
|
||||
Labels(ctx context.Context, namespace string) (map[string]string, error)
|
||||
SetLabel(ctx context.Context, namespace, key, value string) error
|
||||
List(ctx context.Context) ([]string, error)
|
||||
|
||||
// Delete removes the namespace. The namespace must be empty to be deleted.
|
||||
Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error
|
||||
}
|
||||
|
||||
// DeleteInfo specifies information for the deletion of a namespace
|
||||
type DeleteInfo struct {
|
||||
// Name of the namespace
|
||||
Name string
|
||||
}
|
||||
|
||||
// DeleteOpts allows the caller to set options for namespace deletion
|
||||
type DeleteOpts func(context.Context, *DeleteInfo) error
|
||||
51
vendor/github.com/containerd/containerd/namespaces/ttrpc.go
generated
vendored
51
vendor/github.com/containerd/containerd/namespaces/ttrpc.go
generated
vendored
@@ -1,51 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namespaces
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
// TTRPCHeader defines the header name for specifying a containerd namespace
|
||||
TTRPCHeader = "containerd-namespace-ttrpc"
|
||||
)
|
||||
|
||||
func copyMetadata(src ttrpc.MD) ttrpc.MD {
|
||||
md := ttrpc.MD{}
|
||||
for k, v := range src {
|
||||
md[k] = append(md[k], v...)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
||||
md, ok := ttrpc.GetMetadata(ctx)
|
||||
if !ok {
|
||||
md = ttrpc.MD{}
|
||||
} else {
|
||||
md = copyMetadata(md)
|
||||
}
|
||||
md.Set(TTRPCHeader, namespace)
|
||||
return ttrpc.WithMetadata(ctx, md)
|
||||
}
|
||||
|
||||
func fromTTRPCHeader(ctx context.Context) (string, bool) {
|
||||
return ttrpc.GetMetadataValue(ctx, TTRPCHeader)
|
||||
}
|
||||
146
vendor/github.com/containerd/containerd/plugin/context.go
generated
vendored
146
vendor/github.com/containerd/containerd/plugin/context.go
generated
vendored
@@ -1,146 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events/exchange"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// InitContext is used for plugin inititalization
|
||||
type InitContext struct {
|
||||
Context context.Context
|
||||
Root string
|
||||
State string
|
||||
Config interface{}
|
||||
Address string
|
||||
TTRPCAddress string
|
||||
Events *exchange.Exchange
|
||||
|
||||
Meta *Meta // plugins can fill in metadata at init.
|
||||
|
||||
plugins *Set
|
||||
}
|
||||
|
||||
// NewContext returns a new plugin InitContext
|
||||
func NewContext(ctx context.Context, r *Registration, plugins *Set, root, state string) *InitContext {
|
||||
return &InitContext{
|
||||
Context: ctx,
|
||||
Root: filepath.Join(root, r.URI()),
|
||||
State: filepath.Join(state, r.URI()),
|
||||
Meta: &Meta{
|
||||
Exports: map[string]string{},
|
||||
},
|
||||
plugins: plugins,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the first plugin by its type
|
||||
func (i *InitContext) Get(t Type) (interface{}, error) {
|
||||
return i.plugins.Get(t)
|
||||
}
|
||||
|
||||
// Meta contains information gathered from the registration and initialization
|
||||
// process.
|
||||
type Meta struct {
|
||||
Platforms []ocispec.Platform // platforms supported by plugin
|
||||
Exports map[string]string // values exported by plugin
|
||||
Capabilities []string // feature switches for plugin
|
||||
}
|
||||
|
||||
// Plugin represents an initialized plugin, used with an init context.
|
||||
type Plugin struct {
|
||||
Registration *Registration // registration, as initialized
|
||||
Config interface{} // config, as initialized
|
||||
Meta *Meta
|
||||
|
||||
instance interface{}
|
||||
err error // will be set if there was an error initializing the plugin
|
||||
}
|
||||
|
||||
// Err returns the errors during initialization.
|
||||
// returns nil if not error was encountered
|
||||
func (p *Plugin) Err() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
// Instance returns the instance and any initialization error of the plugin
|
||||
func (p *Plugin) Instance() (interface{}, error) {
|
||||
return p.instance, p.err
|
||||
}
|
||||
|
||||
// Set defines a plugin collection, used with InitContext.
|
||||
//
|
||||
// This maintains ordering and unique indexing over the set.
|
||||
//
|
||||
// After iteratively instantiating plugins, this set should represent, the
|
||||
// ordered, initialization set of plugins for a containerd instance.
|
||||
type Set struct {
|
||||
ordered []*Plugin // order of initialization
|
||||
byTypeAndID map[Type]map[string]*Plugin
|
||||
}
|
||||
|
||||
// NewPluginSet returns an initialized plugin set
|
||||
func NewPluginSet() *Set {
|
||||
return &Set{
|
||||
byTypeAndID: make(map[Type]map[string]*Plugin),
|
||||
}
|
||||
}
|
||||
|
||||
// Add a plugin to the set
|
||||
func (ps *Set) Add(p *Plugin) error {
|
||||
if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok {
|
||||
ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{
|
||||
p.Registration.ID: p,
|
||||
}
|
||||
} else if _, idok := byID[p.Registration.ID]; !idok {
|
||||
byID[p.Registration.ID] = p
|
||||
} else {
|
||||
return errors.Wrapf(errdefs.ErrAlreadyExists, "plugin %v already initialized", p.Registration.URI())
|
||||
}
|
||||
|
||||
ps.ordered = append(ps.ordered, p)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the first plugin by its type
|
||||
func (ps *Set) Get(t Type) (interface{}, error) {
|
||||
for _, v := range ps.byTypeAndID[t] {
|
||||
return v.Instance()
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t)
|
||||
}
|
||||
|
||||
// GetAll plugins in the set
|
||||
func (i *InitContext) GetAll() []*Plugin {
|
||||
return i.plugins.ordered
|
||||
}
|
||||
|
||||
// GetByType returns all plugins with the specific type.
|
||||
func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) {
|
||||
p, ok := i.plugins.byTypeAndID[t]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
239
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
239
vendor/github.com/containerd/containerd/plugin/plugin.go
generated
vendored
@@ -1,239 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/ttrpc"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoType is returned when no type is specified
|
||||
ErrNoType = errors.New("plugin: no type")
|
||||
// ErrNoPluginID is returned when no id is specified
|
||||
ErrNoPluginID = errors.New("plugin: no id")
|
||||
// ErrIDRegistered is returned when a duplicate id is already registered
|
||||
ErrIDRegistered = errors.New("plugin: id already registered")
|
||||
// ErrSkipPlugin is used when a plugin is not initialized and should not be loaded,
|
||||
// this allows the plugin loader differentiate between a plugin which is configured
|
||||
// not to load and one that fails to load.
|
||||
ErrSkipPlugin = errors.New("skip plugin")
|
||||
|
||||
// ErrInvalidRequires will be thrown if the requirements for a plugin are
|
||||
// defined in an invalid manner.
|
||||
ErrInvalidRequires = errors.New("invalid requires")
|
||||
)
|
||||
|
||||
// IsSkipPlugin returns true if the error is skipping the plugin
|
||||
func IsSkipPlugin(err error) bool {
|
||||
return errors.Is(err, ErrSkipPlugin)
|
||||
}
|
||||
|
||||
// Type is the type of the plugin
|
||||
type Type string
|
||||
|
||||
func (t Type) String() string { return string(t) }
|
||||
|
||||
const (
|
||||
// InternalPlugin implements an internal plugin to containerd
|
||||
InternalPlugin Type = "io.containerd.internal.v1"
|
||||
// RuntimePlugin implements a runtime
|
||||
RuntimePlugin Type = "io.containerd.runtime.v1"
|
||||
// RuntimePluginV2 implements a runtime v2
|
||||
RuntimePluginV2 Type = "io.containerd.runtime.v2"
|
||||
// ServicePlugin implements a internal service
|
||||
ServicePlugin Type = "io.containerd.service.v1"
|
||||
// GRPCPlugin implements a grpc service
|
||||
GRPCPlugin Type = "io.containerd.grpc.v1"
|
||||
// SnapshotPlugin implements a snapshotter
|
||||
SnapshotPlugin Type = "io.containerd.snapshotter.v1"
|
||||
// TaskMonitorPlugin implements a task monitor
|
||||
TaskMonitorPlugin Type = "io.containerd.monitor.v1"
|
||||
// DiffPlugin implements a differ
|
||||
DiffPlugin Type = "io.containerd.differ.v1"
|
||||
// MetadataPlugin implements a metadata store
|
||||
MetadataPlugin Type = "io.containerd.metadata.v1"
|
||||
// ContentPlugin implements a content store
|
||||
ContentPlugin Type = "io.containerd.content.v1"
|
||||
// GCPlugin implements garbage collection policy
|
||||
GCPlugin Type = "io.containerd.gc.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// RuntimeLinuxV1 is the legacy linux runtime
|
||||
RuntimeLinuxV1 = "io.containerd.runtime.v1.linux"
|
||||
// RuntimeRuncV1 is the runc runtime that supports a single container
|
||||
RuntimeRuncV1 = "io.containerd.runc.v1"
|
||||
// RuntimeRuncV2 is the runc runtime that supports multiple containers per shim
|
||||
RuntimeRuncV2 = "io.containerd.runc.v2"
|
||||
)
|
||||
|
||||
// Registration contains information for registering a plugin
|
||||
type Registration struct {
|
||||
// Type of the plugin
|
||||
Type Type
|
||||
// ID of the plugin
|
||||
ID string
|
||||
// Config specific to the plugin
|
||||
Config interface{}
|
||||
// Requires is a list of plugins that the registered plugin requires to be available
|
||||
Requires []Type
|
||||
|
||||
// InitFn is called when initializing a plugin. The registration and
|
||||
// context are passed in. The init function may modify the registration to
|
||||
// add exports, capabilities and platform support declarations.
|
||||
InitFn func(*InitContext) (interface{}, error)
|
||||
// Disable the plugin from loading
|
||||
Disable bool
|
||||
}
|
||||
|
||||
// Init the registered plugin
|
||||
func (r *Registration) Init(ic *InitContext) *Plugin {
|
||||
p, err := r.InitFn(ic)
|
||||
return &Plugin{
|
||||
Registration: r,
|
||||
Config: ic.Config,
|
||||
Meta: ic.Meta,
|
||||
instance: p,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// URI returns the full plugin URI
|
||||
func (r *Registration) URI() string {
|
||||
return fmt.Sprintf("%s.%s", r.Type, r.ID)
|
||||
}
|
||||
|
||||
// Service allows GRPC services to be registered with the underlying server
|
||||
type Service interface {
|
||||
Register(*grpc.Server) error
|
||||
}
|
||||
|
||||
// TTRPCService allows TTRPC services to be registered with the underlying server
|
||||
type TTRPCService interface {
|
||||
RegisterTTRPC(*ttrpc.Server) error
|
||||
}
|
||||
|
||||
// TCPService allows GRPC services to be registered with the underlying tcp server
|
||||
type TCPService interface {
|
||||
RegisterTCP(*grpc.Server) error
|
||||
}
|
||||
|
||||
var register = struct {
|
||||
sync.RWMutex
|
||||
r []*Registration
|
||||
}{}
|
||||
|
||||
// Load loads all plugins at the provided path into containerd
|
||||
func Load(path string) (err error) {
|
||||
defer func() {
|
||||
if v := recover(); v != nil {
|
||||
rerr, ok := v.(error)
|
||||
if !ok {
|
||||
rerr = fmt.Errorf("%s", v)
|
||||
}
|
||||
err = rerr
|
||||
}
|
||||
}()
|
||||
return loadPlugins(path)
|
||||
}
|
||||
|
||||
// Register allows plugins to register
|
||||
func Register(r *Registration) {
|
||||
register.Lock()
|
||||
defer register.Unlock()
|
||||
|
||||
if r.Type == "" {
|
||||
panic(ErrNoType)
|
||||
}
|
||||
if r.ID == "" {
|
||||
panic(ErrNoPluginID)
|
||||
}
|
||||
if err := checkUnique(r); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var last bool
|
||||
for _, requires := range r.Requires {
|
||||
if requires == "*" {
|
||||
last = true
|
||||
}
|
||||
}
|
||||
if last && len(r.Requires) != 1 {
|
||||
panic(ErrInvalidRequires)
|
||||
}
|
||||
|
||||
register.r = append(register.r, r)
|
||||
}
|
||||
|
||||
func checkUnique(r *Registration) error {
|
||||
for _, registered := range register.r {
|
||||
if r.URI() == registered.URI() {
|
||||
return errors.Wrap(ErrIDRegistered, r.URI())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableFilter filters out disabled plugins
|
||||
type DisableFilter func(r *Registration) bool
|
||||
|
||||
// Graph returns an ordered list of registered plugins for initialization.
|
||||
// Plugins in disableList specified by id will be disabled.
|
||||
func Graph(filter DisableFilter) (ordered []*Registration) {
|
||||
register.RLock()
|
||||
defer register.RUnlock()
|
||||
|
||||
for _, r := range register.r {
|
||||
if filter(r) {
|
||||
r.Disable = true
|
||||
}
|
||||
}
|
||||
|
||||
added := map[*Registration]bool{}
|
||||
for _, r := range register.r {
|
||||
if r.Disable {
|
||||
continue
|
||||
}
|
||||
children(r, added, &ordered)
|
||||
if !added[r] {
|
||||
ordered = append(ordered, r)
|
||||
added[r] = true
|
||||
}
|
||||
}
|
||||
return ordered
|
||||
}
|
||||
|
||||
func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) {
|
||||
for _, t := range reg.Requires {
|
||||
for _, r := range register.r {
|
||||
if !r.Disable &&
|
||||
r.URI() != reg.URI() &&
|
||||
(t == "*" || r.Type == t) {
|
||||
children(r, added, ordered)
|
||||
if !added[r] {
|
||||
*ordered = append(*ordered, r)
|
||||
added[r] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
62
vendor/github.com/containerd/containerd/plugin/plugin_go18.go
generated
vendored
62
vendor/github.com/containerd/containerd/plugin/plugin_go18.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
// +build go1.8,!windows,amd64,!static_build,!gccgo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"plugin"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// loadPlugins loads all plugins for the OS and Arch
|
||||
// that containerd is built for inside the provided path
|
||||
func loadPlugins(path string) error {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pattern := filepath.Join(abs, fmt.Sprintf(
|
||||
"*-%s-%s.%s",
|
||||
runtime.GOOS,
|
||||
runtime.GOARCH,
|
||||
getLibExt(),
|
||||
))
|
||||
libs, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, lib := range libs {
|
||||
if _, err := plugin.Open(lib); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLibExt returns a platform specific lib extension for
|
||||
// the platform that containerd is running on
|
||||
func getLibExt() string {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
return "dll"
|
||||
default:
|
||||
return "so"
|
||||
}
|
||||
}
|
||||
24
vendor/github.com/containerd/containerd/plugin/plugin_other.go
generated
vendored
24
vendor/github.com/containerd/containerd/plugin/plugin_other.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// +build !go1.8 windows !amd64 static_build gccgo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
func loadPlugins(path string) error {
|
||||
// plugins not supported until 1.8
|
||||
return nil
|
||||
}
|
||||
14
vendor/github.com/containerd/ttrpc/.gitignore
generated
vendored
14
vendor/github.com/containerd/ttrpc/.gitignore
generated
vendored
@@ -1,14 +0,0 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
24
vendor/github.com/containerd/ttrpc/.travis.yml
generated
vendored
24
vendor/github.com/containerd/ttrpc/.travis.yml
generated
vendored
@@ -1,24 +0,0 @@
|
||||
dist: bionic
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.13.x"
|
||||
- "1.15.x"
|
||||
|
||||
install:
|
||||
# Don't change local go.{mod, sum} by go get tools.
|
||||
#
|
||||
# ref: https://github.com/golang/go/issues/27643
|
||||
- pushd ..; go get -u github.com/vbatts/git-validation; popd
|
||||
- pushd ..; go get -u github.com/kunalkushwaha/ltag; popd
|
||||
|
||||
before_script:
|
||||
- pushd ..; git clone https://github.com/containerd/project; popd
|
||||
|
||||
script:
|
||||
- DCO_VERBOSITY=-q ../project/script/validate/dco
|
||||
- ../project/script/validate/fileheader ../project/
|
||||
- go test -v -race -covermode=atomic -coverprofile=coverage.txt ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
201
vendor/github.com/containerd/ttrpc/LICENSE
generated
vendored
201
vendor/github.com/containerd/ttrpc/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
62
vendor/github.com/containerd/ttrpc/README.md
generated
vendored
62
vendor/github.com/containerd/ttrpc/README.md
generated
vendored
@@ -1,62 +0,0 @@
|
||||
# ttrpc
|
||||
|
||||
[](https://travis-ci.org/containerd/ttrpc)
|
||||
|
||||
GRPC for low-memory environments.
|
||||
|
||||
The existing grpc-go project requires a lot of memory overhead for importing
|
||||
packages and at runtime. While this is great for many services with low density
|
||||
requirements, this can be a problem when running a large number of services on
|
||||
a single machine or on a machine with a small amount of memory.
|
||||
|
||||
Using the same GRPC definitions, this project reduces the binary size and
|
||||
protocol overhead required. We do this by eliding the `net/http`, `net/http2`
|
||||
and `grpc` package used by grpc replacing it with a lightweight framing
|
||||
protocol. The result are smaller binaries that use less resident memory with
|
||||
the same ease of use as GRPC.
|
||||
|
||||
Please note that while this project supports generating either end of the
|
||||
protocol, the generated service definitions will be incompatible with regular
|
||||
GRPC services, as they do not speak the same protocol.
|
||||
|
||||
# Usage
|
||||
|
||||
Create a gogo vanity binary (see
|
||||
[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an
|
||||
example with the ttrpc plugin enabled.
|
||||
|
||||
It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild)
|
||||
to build the protobufs for this project, but this will work with protoc
|
||||
directly, if required.
|
||||
|
||||
# Differences from GRPC
|
||||
|
||||
- The protocol stack has been replaced with a lighter protocol that doesn't
|
||||
require http, http2 and tls.
|
||||
- The client and server interface are identical whereas in GRPC there is a
|
||||
client and server interface that are different.
|
||||
- The Go stdlib context package is used instead.
|
||||
- No support for streams yet.
|
||||
|
||||
# Status
|
||||
|
||||
Very new. YMMV.
|
||||
|
||||
TODO:
|
||||
|
||||
- [X] Plumb error codes and GRPC status
|
||||
- [X] Remove use of any type and dependency on typeurl package
|
||||
- [X] Ensure that protocol can support streaming in the future
|
||||
- [ ] Document protocol layout
|
||||
- [ ] Add testing under concurrent load to ensure
|
||||
- [ ] Verify connection error handling
|
||||
|
||||
# Project details
|
||||
|
||||
ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
153
vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
153
vendor/github.com/containerd/ttrpc/channel.go
generated
vendored
@@ -1,153 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
messageHeaderLength = 10
|
||||
messageLengthMax = 4 << 20
|
||||
)
|
||||
|
||||
type messageType uint8
|
||||
|
||||
const (
|
||||
messageTypeRequest messageType = 0x1
|
||||
messageTypeResponse messageType = 0x2
|
||||
)
|
||||
|
||||
// messageHeader represents the fixed-length message header of 10 bytes sent
|
||||
// with every request.
|
||||
type messageHeader struct {
|
||||
Length uint32 // length excluding this header. b[:4]
|
||||
StreamID uint32 // identifies which request stream message is a part of. b[4:8]
|
||||
Type messageType // message type b[8]
|
||||
Flags uint8 // reserved b[9]
|
||||
}
|
||||
|
||||
func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) {
|
||||
_, err := io.ReadFull(r, p[:messageHeaderLength])
|
||||
if err != nil {
|
||||
return messageHeader{}, err
|
||||
}
|
||||
|
||||
return messageHeader{
|
||||
Length: binary.BigEndian.Uint32(p[:4]),
|
||||
StreamID: binary.BigEndian.Uint32(p[4:8]),
|
||||
Type: messageType(p[8]),
|
||||
Flags: p[9],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error {
|
||||
binary.BigEndian.PutUint32(p[:4], mh.Length)
|
||||
binary.BigEndian.PutUint32(p[4:8], mh.StreamID)
|
||||
p[8] = byte(mh.Type)
|
||||
p[9] = mh.Flags
|
||||
|
||||
_, err := w.Write(p[:])
|
||||
return err
|
||||
}
|
||||
|
||||
var buffers sync.Pool
|
||||
|
||||
type channel struct {
|
||||
conn net.Conn
|
||||
bw *bufio.Writer
|
||||
br *bufio.Reader
|
||||
hrbuf [messageHeaderLength]byte // avoid alloc when reading header
|
||||
hwbuf [messageHeaderLength]byte
|
||||
}
|
||||
|
||||
func newChannel(conn net.Conn) *channel {
|
||||
return &channel{
|
||||
conn: conn,
|
||||
bw: bufio.NewWriter(conn),
|
||||
br: bufio.NewReader(conn),
|
||||
}
|
||||
}
|
||||
|
||||
// recv a message from the channel. The returned buffer contains the message.
|
||||
//
|
||||
// If a valid grpc status is returned, the message header
|
||||
// returned will be valid and caller should send that along to
|
||||
// the correct consumer. The bytes on the underlying channel
|
||||
// will be discarded.
|
||||
func (ch *channel) recv() (messageHeader, []byte, error) {
|
||||
mh, err := readMessageHeader(ch.hrbuf[:], ch.br)
|
||||
if err != nil {
|
||||
return messageHeader{}, nil, err
|
||||
}
|
||||
|
||||
if mh.Length > uint32(messageLengthMax) {
|
||||
if _, err := ch.br.Discard(int(mh.Length)); err != nil {
|
||||
return mh, nil, errors.Wrapf(err, "failed to discard after receiving oversized message")
|
||||
}
|
||||
|
||||
return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax)
|
||||
}
|
||||
|
||||
p := ch.getmbuf(int(mh.Length))
|
||||
if _, err := io.ReadFull(ch.br, p); err != nil {
|
||||
return messageHeader{}, nil, errors.Wrapf(err, "failed reading message")
|
||||
}
|
||||
|
||||
return mh, p, nil
|
||||
}
|
||||
|
||||
func (ch *channel) send(streamID uint32, t messageType, p []byte) error {
|
||||
if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := ch.bw.Write(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ch.bw.Flush()
|
||||
}
|
||||
|
||||
func (ch *channel) getmbuf(size int) []byte {
|
||||
// we can't use the standard New method on pool because we want to allocate
|
||||
// based on size.
|
||||
b, ok := buffers.Get().(*[]byte)
|
||||
if !ok || cap(*b) < size {
|
||||
// TODO(stevvooe): It may be better to allocate these in fixed length
|
||||
// buckets to reduce fragmentation but its not clear that would help
|
||||
// with performance. An ilogb approach or similar would work well.
|
||||
bb := make([]byte, size)
|
||||
b = &bb
|
||||
} else {
|
||||
*b = (*b)[:size]
|
||||
}
|
||||
return *b
|
||||
}
|
||||
|
||||
func (ch *channel) putmbuf(p []byte) {
|
||||
buffers.Put(&p)
|
||||
}
|
||||
368
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
368
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
@@ -1,368 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ErrClosed is returned by client methods when the underlying connection is
|
||||
// closed.
|
||||
var ErrClosed = errors.New("ttrpc: closed")
|
||||
|
||||
// Client for a ttrpc server
|
||||
type Client struct {
|
||||
codec codec
|
||||
conn net.Conn
|
||||
channel *channel
|
||||
calls chan *callRequest
|
||||
|
||||
ctx context.Context
|
||||
closed func()
|
||||
|
||||
closeOnce sync.Once
|
||||
userCloseFunc func()
|
||||
userCloseWaitCh chan struct{}
|
||||
|
||||
errOnce sync.Once
|
||||
err error
|
||||
interceptor UnaryClientInterceptor
|
||||
}
|
||||
|
||||
// ClientOpts configures a client
|
||||
type ClientOpts func(c *Client)
|
||||
|
||||
// WithOnClose sets the close func whenever the client's Close() method is called
|
||||
func WithOnClose(onClose func()) ClientOpts {
|
||||
return func(c *Client) {
|
||||
c.userCloseFunc = onClose
|
||||
}
|
||||
}
|
||||
|
||||
// WithUnaryClientInterceptor sets the provided client interceptor
|
||||
func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts {
|
||||
return func(c *Client) {
|
||||
c.interceptor = i
|
||||
}
|
||||
}
|
||||
|
||||
func NewClient(conn net.Conn, opts ...ClientOpts) *Client {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
c := &Client{
|
||||
codec: codec{},
|
||||
conn: conn,
|
||||
channel: newChannel(conn),
|
||||
calls: make(chan *callRequest),
|
||||
closed: cancel,
|
||||
ctx: ctx,
|
||||
userCloseFunc: func() {},
|
||||
userCloseWaitCh: make(chan struct{}),
|
||||
interceptor: defaultClientInterceptor,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
}
|
||||
|
||||
go c.run()
|
||||
return c
|
||||
}
|
||||
|
||||
type callRequest struct {
|
||||
ctx context.Context
|
||||
req *Request
|
||||
resp *Response // response will be written back here
|
||||
errs chan error // error written here on completion
|
||||
}
|
||||
|
||||
func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error {
|
||||
payload, err := c.codec.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
creq = &Request{
|
||||
Service: service,
|
||||
Method: method,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
cresp = &Response{}
|
||||
)
|
||||
|
||||
if metadata, ok := GetMetadata(ctx); ok {
|
||||
metadata.setRequest(creq)
|
||||
}
|
||||
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds()
|
||||
}
|
||||
|
||||
info := &UnaryClientInfo{
|
||||
FullMethod: fullPath(service, method),
|
||||
}
|
||||
if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) {
|
||||
return status.ErrorProto(cresp.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
|
||||
errs := make(chan error, 1)
|
||||
call := &callRequest{
|
||||
ctx: ctx,
|
||||
req: req,
|
||||
resp: resp,
|
||||
errs: errs,
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case c.calls <- call:
|
||||
case <-c.ctx.Done():
|
||||
return c.error()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case err := <-errs:
|
||||
return filterCloseErr(err)
|
||||
case <-c.ctx.Done():
|
||||
return c.error()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) Close() error {
|
||||
c.closeOnce.Do(func() {
|
||||
c.closed()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserOnCloseWait is used to blocks untils the user's on-close callback
|
||||
// finishes.
|
||||
func (c *Client) UserOnCloseWait(ctx context.Context) error {
|
||||
select {
|
||||
case <-c.userCloseWaitCh:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
type message struct {
|
||||
messageHeader
|
||||
p []byte
|
||||
err error
|
||||
}
|
||||
|
||||
type receiver struct {
|
||||
wg *sync.WaitGroup
|
||||
messages chan *message
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *receiver) run(ctx context.Context, c *channel) {
|
||||
defer r.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.err = ctx.Err()
|
||||
return
|
||||
default:
|
||||
mh, p, err := c.recv()
|
||||
if err != nil {
|
||||
_, ok := status.FromError(err)
|
||||
if !ok {
|
||||
// treat all errors that are not an rpc status as terminal.
|
||||
// all others poison the connection.
|
||||
r.err = filterCloseErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case r.messages <- &message{
|
||||
messageHeader: mh,
|
||||
p: p[:mh.Length],
|
||||
err: err,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
r.err = ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) run() {
|
||||
var (
|
||||
streamID uint32 = 1
|
||||
waiters = make(map[uint32]*callRequest)
|
||||
calls = c.calls
|
||||
incoming = make(chan *message)
|
||||
receiversDone = make(chan struct{})
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
// broadcast the shutdown error to the remaining waiters.
|
||||
abortWaiters := func(wErr error) {
|
||||
for _, waiter := range waiters {
|
||||
waiter.errs <- wErr
|
||||
}
|
||||
}
|
||||
recv := &receiver{
|
||||
wg: &wg,
|
||||
messages: incoming,
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(receiversDone)
|
||||
}()
|
||||
go recv.run(c.ctx, c.channel)
|
||||
|
||||
defer func() {
|
||||
c.conn.Close()
|
||||
c.userCloseFunc()
|
||||
close(c.userCloseWaitCh)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case call := <-calls:
|
||||
if err := c.send(streamID, messageTypeRequest, call.req); err != nil {
|
||||
call.errs <- err
|
||||
continue
|
||||
}
|
||||
|
||||
waiters[streamID] = call
|
||||
streamID += 2 // enforce odd client initiated request ids
|
||||
case msg := <-incoming:
|
||||
call, ok := waiters[msg.StreamID]
|
||||
if !ok {
|
||||
logrus.Errorf("ttrpc: received message for unknown channel %v", msg.StreamID)
|
||||
continue
|
||||
}
|
||||
|
||||
call.errs <- c.recv(call.resp, msg)
|
||||
delete(waiters, msg.StreamID)
|
||||
case <-receiversDone:
|
||||
// all the receivers have exited
|
||||
if recv.err != nil {
|
||||
c.setError(recv.err)
|
||||
}
|
||||
// don't return out, let the close of the context trigger the abort of waiters
|
||||
c.Close()
|
||||
case <-c.ctx.Done():
|
||||
abortWaiters(c.error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) error() error {
|
||||
c.errOnce.Do(func() {
|
||||
if c.err == nil {
|
||||
c.err = ErrClosed
|
||||
}
|
||||
})
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *Client) setError(err error) {
|
||||
c.errOnce.Do(func() {
|
||||
c.err = err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error {
|
||||
p, err := c.codec.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.channel.send(streamID, mtype, p)
|
||||
}
|
||||
|
||||
func (c *Client) recv(resp *Response, msg *message) error {
|
||||
if msg.err != nil {
|
||||
return msg.err
|
||||
}
|
||||
|
||||
if msg.Type != messageTypeResponse {
|
||||
return errors.New("unknown message type received")
|
||||
}
|
||||
|
||||
defer c.channel.putmbuf(msg.p)
|
||||
return proto.Unmarshal(msg.p, resp)
|
||||
}
|
||||
|
||||
// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when
|
||||
// returning from call or handling errors from main read loop.
|
||||
//
|
||||
// This purposely ignores errors with a wrapped cause.
|
||||
func filterCloseErr(err error) error {
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case err == io.EOF:
|
||||
return ErrClosed
|
||||
case errors.Cause(err) == io.EOF:
|
||||
return ErrClosed
|
||||
case strings.Contains(err.Error(), "use of closed network connection"):
|
||||
return ErrClosed
|
||||
default:
|
||||
// if we have an epipe on a write or econnreset on a read , we cast to errclosed
|
||||
var oerr *net.OpError
|
||||
if errors.As(err, &oerr) && (oerr.Op == "write" || oerr.Op == "read") {
|
||||
serr, sok := oerr.Err.(*os.SyscallError)
|
||||
if sok && ((serr.Err == syscall.EPIPE && oerr.Op == "write") ||
|
||||
(serr.Err == syscall.ECONNRESET && oerr.Op == "read")) {
|
||||
|
||||
return ErrClosed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
42
vendor/github.com/containerd/ttrpc/codec.go
generated
vendored
42
vendor/github.com/containerd/ttrpc/codec.go
generated
vendored
@@ -1,42 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type codec struct{}
|
||||
|
||||
func (c codec) Marshal(msg interface{}) ([]byte, error) {
|
||||
switch v := msg.(type) {
|
||||
case proto.Message:
|
||||
return proto.Marshal(v)
|
||||
default:
|
||||
return nil, errors.Errorf("ttrpc: cannot marshal unknown type: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (c codec) Unmarshal(p []byte, msg interface{}) error {
|
||||
switch v := msg.(type) {
|
||||
case proto.Message:
|
||||
return proto.Unmarshal(p, v)
|
||||
default:
|
||||
return errors.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg)
|
||||
}
|
||||
}
|
||||
52
vendor/github.com/containerd/ttrpc/config.go
generated
vendored
52
vendor/github.com/containerd/ttrpc/config.go
generated
vendored
@@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
type serverConfig struct {
|
||||
handshaker Handshaker
|
||||
interceptor UnaryServerInterceptor
|
||||
}
|
||||
|
||||
// ServerOpt for configuring a ttrpc server
|
||||
type ServerOpt func(*serverConfig) error
|
||||
|
||||
// WithServerHandshaker can be passed to NewServer to ensure that the
|
||||
// handshaker is called before every connection attempt.
|
||||
//
|
||||
// Only one handshaker is allowed per server.
|
||||
func WithServerHandshaker(handshaker Handshaker) ServerOpt {
|
||||
return func(c *serverConfig) error {
|
||||
if c.handshaker != nil {
|
||||
return errors.New("only one handshaker allowed per server")
|
||||
}
|
||||
c.handshaker = handshaker
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithUnaryServerInterceptor sets the provided interceptor on the server
|
||||
func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt {
|
||||
return func(c *serverConfig) error {
|
||||
if c.interceptor != nil {
|
||||
return errors.New("only one interceptor allowed per server")
|
||||
}
|
||||
c.interceptor = i
|
||||
return nil
|
||||
}
|
||||
}
|
||||
14
vendor/github.com/containerd/ttrpc/go.mod
generated
vendored
14
vendor/github.com/containerd/ttrpc/go.mod
generated
vendored
@@ -1,14 +0,0 @@
|
||||
module github.com/containerd/ttrpc
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24
|
||||
google.golang.org/grpc v1.26.0
|
||||
)
|
||||
85
vendor/github.com/containerd/ttrpc/go.sum
generated
vendored
85
vendor/github.com/containerd/ttrpc/go.sum
generated
vendored
@@ -1,85 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1 h1:Lo6mRUjdS99f3zxYOUalftWHUoOGaDRqFk1+j0Q57/I=
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5 h1:f005F/Jl5JLP036x7QIvUVhNTqxvSYwFIiyOh2q12iU=
|
||||
golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479 h1:LhLiKguPgZL+Tglay4GhVtfF0kb8cvOJ0dHTCBO8YNI=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69 h1:4rNOqY4ULrKzS6twXa619uQgI7h9PaVd4ZhjFQ7C5zs=
|
||||
google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT0QnZDg9Uc2dH0Ql513kFvHocz+WM=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
50
vendor/github.com/containerd/ttrpc/handshake.go
generated
vendored
50
vendor/github.com/containerd/ttrpc/handshake.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Handshaker defines the interface for connection handshakes performed on the
|
||||
// server or client when first connecting.
|
||||
type Handshaker interface {
|
||||
// Handshake should confirm or decorate a connection that may be incoming
|
||||
// to a server or outgoing from a client.
|
||||
//
|
||||
// If this returns without an error, the caller should use the connection
|
||||
// in place of the original connection.
|
||||
//
|
||||
// The second return value can contain credential specific data, such as
|
||||
// unix socket credentials or TLS information.
|
||||
//
|
||||
// While we currently only have implementations on the server-side, this
|
||||
// interface should be sufficient to implement similar handshakes on the
|
||||
// client-side.
|
||||
Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
|
||||
}
|
||||
|
||||
type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
|
||||
|
||||
func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
|
||||
return fn(ctx, conn)
|
||||
}
|
||||
|
||||
func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
|
||||
return conn, nil, nil
|
||||
}
|
||||
50
vendor/github.com/containerd/ttrpc/interceptor.go
generated
vendored
50
vendor/github.com/containerd/ttrpc/interceptor.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import "context"
|
||||
|
||||
// UnaryServerInfo provides information about the server request
|
||||
type UnaryServerInfo struct {
|
||||
FullMethod string
|
||||
}
|
||||
|
||||
// UnaryClientInfo provides information about the client request
|
||||
type UnaryClientInfo struct {
|
||||
FullMethod string
|
||||
}
|
||||
|
||||
// Unmarshaler contains the server request data and allows it to be unmarshaled
|
||||
// into a concrete type
|
||||
type Unmarshaler func(interface{}) error
|
||||
|
||||
// Invoker invokes the client's request and response from the ttrpc server
|
||||
type Invoker func(context.Context, *Request, *Response) error
|
||||
|
||||
// UnaryServerInterceptor specifies the interceptor function for server request/response
|
||||
type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error)
|
||||
|
||||
// UnaryClientInterceptor specifies the interceptor function for client request/response
|
||||
type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error
|
||||
|
||||
func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) {
|
||||
return method(ctx, unmarshal)
|
||||
}
|
||||
|
||||
func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error {
|
||||
return invoker(ctx, req, resp)
|
||||
}
|
||||
107
vendor/github.com/containerd/ttrpc/metadata.go
generated
vendored
107
vendor/github.com/containerd/ttrpc/metadata.go
generated
vendored
@@ -1,107 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MD is the user type for ttrpc metadata
|
||||
type MD map[string][]string
|
||||
|
||||
// Get returns the metadata for a given key when they exist.
|
||||
// If there is no metadata, a nil slice and false are returned.
|
||||
func (m MD) Get(key string) ([]string, bool) {
|
||||
key = strings.ToLower(key)
|
||||
list, ok := m[key]
|
||||
if !ok || len(list) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return list, true
|
||||
}
|
||||
|
||||
// Set sets the provided values for a given key.
|
||||
// The values will overwrite any existing values.
|
||||
// If no values provided, a key will be deleted.
|
||||
func (m MD) Set(key string, values ...string) {
|
||||
key = strings.ToLower(key)
|
||||
if len(values) == 0 {
|
||||
delete(m, key)
|
||||
return
|
||||
}
|
||||
m[key] = values
|
||||
}
|
||||
|
||||
// Append appends additional values to the given key.
|
||||
func (m MD) Append(key string, values ...string) {
|
||||
key = strings.ToLower(key)
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
current, ok := m[key]
|
||||
if ok {
|
||||
m.Set(key, append(current, values...)...)
|
||||
} else {
|
||||
m.Set(key, values...)
|
||||
}
|
||||
}
|
||||
|
||||
func (m MD) setRequest(r *Request) {
|
||||
for k, values := range m {
|
||||
for _, v := range values {
|
||||
r.Metadata = append(r.Metadata, &KeyValue{
|
||||
Key: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m MD) fromRequest(r *Request) {
|
||||
for _, kv := range r.Metadata {
|
||||
m[kv.Key] = append(m[kv.Key], kv.Value)
|
||||
}
|
||||
}
|
||||
|
||||
type metadataKey struct{}
|
||||
|
||||
// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata)
|
||||
func GetMetadata(ctx context.Context) (MD, bool) {
|
||||
metadata, ok := ctx.Value(metadataKey{}).(MD)
|
||||
return metadata, ok
|
||||
}
|
||||
|
||||
// GetMetadataValue gets a specific metadata value by name from context.Context
|
||||
func GetMetadataValue(ctx context.Context, name string) (string, bool) {
|
||||
metadata, ok := GetMetadata(ctx)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if list, ok := metadata.Get(name); ok {
|
||||
return list[0], true
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// WithMetadata attaches metadata map to a context.Context
|
||||
func WithMetadata(ctx context.Context, md MD) context.Context {
|
||||
return context.WithValue(ctx, metadataKey{}, md)
|
||||
}
|
||||
500
vendor/github.com/containerd/ttrpc/server.go
generated
vendored
500
vendor/github.com/containerd/ttrpc/server.go
generated
vendored
@@ -1,500 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrServerClosed = errors.New("ttrpc: server closed")
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
config *serverConfig
|
||||
services *serviceSet
|
||||
codec codec
|
||||
|
||||
mu sync.Mutex
|
||||
listeners map[net.Listener]struct{}
|
||||
connections map[*serverConn]struct{} // all connections to current state
|
||||
done chan struct{} // marks point at which we stop serving requests
|
||||
}
|
||||
|
||||
func NewServer(opts ...ServerOpt) (*Server, error) {
|
||||
config := &serverConfig{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if config.interceptor == nil {
|
||||
config.interceptor = defaultServerInterceptor
|
||||
}
|
||||
|
||||
return &Server{
|
||||
config: config,
|
||||
services: newServiceSet(config.interceptor),
|
||||
done: make(chan struct{}),
|
||||
listeners: make(map[net.Listener]struct{}),
|
||||
connections: make(map[*serverConn]struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Register(name string, methods map[string]Method) {
|
||||
s.services.register(name, methods)
|
||||
}
|
||||
|
||||
func (s *Server) Serve(ctx context.Context, l net.Listener) error {
|
||||
s.addListener(l)
|
||||
defer s.closeListener(l)
|
||||
|
||||
var (
|
||||
backoff time.Duration
|
||||
handshaker = s.config.handshaker
|
||||
)
|
||||
|
||||
if handshaker == nil {
|
||||
handshaker = handshakerFunc(noopHandshake)
|
||||
}
|
||||
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-s.done:
|
||||
return ErrServerClosed
|
||||
default:
|
||||
}
|
||||
|
||||
if terr, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && terr.Temporary() {
|
||||
if backoff == 0 {
|
||||
backoff = time.Millisecond
|
||||
} else {
|
||||
backoff *= 2
|
||||
}
|
||||
|
||||
if max := time.Second; backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
|
||||
sleep := time.Duration(rand.Int63n(int64(backoff)))
|
||||
logrus.WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep)
|
||||
time.Sleep(sleep)
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
backoff = 0
|
||||
|
||||
approved, handshake, err := handshaker.Handshake(ctx, conn)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("ttrpc: refusing connection after handshake")
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
sc := s.newConn(approved, handshake)
|
||||
go sc.run(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
select {
|
||||
case <-s.done:
|
||||
default:
|
||||
// protected by mutex
|
||||
close(s.done)
|
||||
}
|
||||
lnerr := s.closeListeners()
|
||||
s.mu.Unlock()
|
||||
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
if s.closeIdleConns() {
|
||||
return lnerr
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close the server without waiting for active connections.
|
||||
func (s *Server) Close() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.done:
|
||||
default:
|
||||
// protected by mutex
|
||||
close(s.done)
|
||||
}
|
||||
|
||||
err := s.closeListeners()
|
||||
for c := range s.connections {
|
||||
c.close()
|
||||
delete(s.connections, c)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) addListener(l net.Listener) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.listeners[l] = struct{}{}
|
||||
}
|
||||
|
||||
func (s *Server) closeListener(l net.Listener) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return s.closeListenerLocked(l)
|
||||
}
|
||||
|
||||
func (s *Server) closeListenerLocked(l net.Listener) error {
|
||||
defer delete(s.listeners, l)
|
||||
return l.Close()
|
||||
}
|
||||
|
||||
func (s *Server) closeListeners() error {
|
||||
var err error
|
||||
for l := range s.listeners {
|
||||
if cerr := s.closeListenerLocked(l); cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) addConnection(c *serverConn) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.connections[c] = struct{}{}
|
||||
}
|
||||
|
||||
func (s *Server) delConnection(c *serverConn) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
delete(s.connections, c)
|
||||
}
|
||||
|
||||
func (s *Server) countConnection() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
return len(s.connections)
|
||||
}
|
||||
|
||||
func (s *Server) closeIdleConns() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
quiescent := true
|
||||
for c := range s.connections {
|
||||
st, ok := c.getState()
|
||||
if !ok || st != connStateIdle {
|
||||
quiescent = false
|
||||
continue
|
||||
}
|
||||
c.close()
|
||||
delete(s.connections, c)
|
||||
}
|
||||
return quiescent
|
||||
}
|
||||
|
||||
type connState int
|
||||
|
||||
const (
|
||||
connStateActive = iota + 1 // outstanding requests
|
||||
connStateIdle // no requests
|
||||
connStateClosed // closed connection
|
||||
)
|
||||
|
||||
func (cs connState) String() string {
|
||||
switch cs {
|
||||
case connStateActive:
|
||||
return "active"
|
||||
case connStateIdle:
|
||||
return "idle"
|
||||
case connStateClosed:
|
||||
return "closed"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
|
||||
c := &serverConn{
|
||||
server: s,
|
||||
conn: conn,
|
||||
handshake: handshake,
|
||||
shutdown: make(chan struct{}),
|
||||
}
|
||||
c.setState(connStateIdle)
|
||||
s.addConnection(c)
|
||||
return c
|
||||
}
|
||||
|
||||
type serverConn struct {
|
||||
server *Server
|
||||
conn net.Conn
|
||||
handshake interface{} // data from handshake, not used for now
|
||||
state atomic.Value
|
||||
|
||||
shutdownOnce sync.Once
|
||||
shutdown chan struct{} // forced shutdown, used by close
|
||||
}
|
||||
|
||||
func (c *serverConn) getState() (connState, bool) {
|
||||
cs, ok := c.state.Load().(connState)
|
||||
return cs, ok
|
||||
}
|
||||
|
||||
func (c *serverConn) setState(newstate connState) {
|
||||
c.state.Store(newstate)
|
||||
}
|
||||
|
||||
func (c *serverConn) close() error {
|
||||
c.shutdownOnce.Do(func() {
|
||||
close(c.shutdown)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *serverConn) run(sctx context.Context) {
|
||||
type (
|
||||
request struct {
|
||||
id uint32
|
||||
req *Request
|
||||
}
|
||||
|
||||
response struct {
|
||||
id uint32
|
||||
resp *Response
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
ch = newChannel(c.conn)
|
||||
ctx, cancel = context.WithCancel(sctx)
|
||||
active int
|
||||
state connState = connStateIdle
|
||||
responses = make(chan response)
|
||||
requests = make(chan request)
|
||||
recvErr = make(chan error, 1)
|
||||
shutdown = c.shutdown
|
||||
done = make(chan struct{})
|
||||
)
|
||||
|
||||
defer c.conn.Close()
|
||||
defer cancel()
|
||||
defer close(done)
|
||||
defer c.server.delConnection(c)
|
||||
|
||||
go func(recvErr chan error) {
|
||||
defer close(recvErr)
|
||||
sendImmediate := func(id uint32, st *status.Status) bool {
|
||||
select {
|
||||
case responses <- response{
|
||||
// even though we've had an invalid stream id, we send it
|
||||
// back on the same stream id so the client knows which
|
||||
// stream id was bad.
|
||||
id: id,
|
||||
resp: &Response{
|
||||
Status: st.Proto(),
|
||||
},
|
||||
}:
|
||||
return true
|
||||
case <-c.shutdown:
|
||||
return false
|
||||
case <-done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.shutdown:
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
default: // proceed
|
||||
}
|
||||
|
||||
mh, p, err := ch.recv()
|
||||
if err != nil {
|
||||
status, ok := status.FromError(err)
|
||||
if !ok {
|
||||
recvErr <- err
|
||||
return
|
||||
}
|
||||
|
||||
// in this case, we send an error for that particular message
|
||||
// when the status is defined.
|
||||
if !sendImmediate(mh.StreamID, status) {
|
||||
return
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if mh.Type != messageTypeRequest {
|
||||
// we must ignore this for future compat.
|
||||
continue
|
||||
}
|
||||
|
||||
var req Request
|
||||
if err := c.server.codec.Unmarshal(p, &req); err != nil {
|
||||
ch.putmbuf(p)
|
||||
if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
ch.putmbuf(p)
|
||||
|
||||
if mh.StreamID%2 != 1 {
|
||||
// enforce odd client initiated identifiers.
|
||||
if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Forward the request to the main loop. We don't wait on s.done
|
||||
// because we have already accepted the client request.
|
||||
select {
|
||||
case requests <- request{
|
||||
id: mh.StreamID,
|
||||
req: &req,
|
||||
}:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}(recvErr)
|
||||
|
||||
for {
|
||||
newstate := state
|
||||
switch {
|
||||
case active > 0:
|
||||
newstate = connStateActive
|
||||
shutdown = nil
|
||||
case active == 0:
|
||||
newstate = connStateIdle
|
||||
shutdown = c.shutdown // only enable this branch in idle mode
|
||||
}
|
||||
|
||||
if newstate != state {
|
||||
c.setState(newstate)
|
||||
state = newstate
|
||||
}
|
||||
|
||||
select {
|
||||
case request := <-requests:
|
||||
active++
|
||||
go func(id uint32) {
|
||||
ctx, cancel := getRequestContext(ctx, request.req)
|
||||
defer cancel()
|
||||
|
||||
p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload)
|
||||
resp := &Response{
|
||||
Status: status.Proto(),
|
||||
Payload: p,
|
||||
}
|
||||
|
||||
select {
|
||||
case responses <- response{
|
||||
id: id,
|
||||
resp: resp,
|
||||
}:
|
||||
case <-done:
|
||||
}
|
||||
}(request.id)
|
||||
case response := <-responses:
|
||||
p, err := c.server.codec.Marshal(response.resp)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("failed marshaling response")
|
||||
return
|
||||
}
|
||||
|
||||
if err := ch.send(response.id, messageTypeResponse, p); err != nil {
|
||||
logrus.WithError(err).Error("failed sending message on channel")
|
||||
return
|
||||
}
|
||||
|
||||
active--
|
||||
case err := <-recvErr:
|
||||
// TODO(stevvooe): Not wildly clear what we should do in this
|
||||
// branch. Basically, it means that we are no longer receiving
|
||||
// requests due to a terminal error.
|
||||
recvErr = nil // connection is now "closing"
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
// The client went away and we should stop processing
|
||||
// requests, so that the client connection is closed
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("error receiving message")
|
||||
}
|
||||
case <-shutdown:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var noopFunc = func() {}
|
||||
|
||||
func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) {
|
||||
if len(req.Metadata) > 0 {
|
||||
md := MD{}
|
||||
md.fromRequest(req)
|
||||
ctx = WithMetadata(ctx, md)
|
||||
}
|
||||
|
||||
cancel = noopFunc
|
||||
if req.TimeoutNano == 0 {
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(req.TimeoutNano))
|
||||
return ctx, cancel
|
||||
}
|
||||
165
vendor/github.com/containerd/ttrpc/services.go
generated
vendored
165
vendor/github.com/containerd/ttrpc/services.go
generated
vendored
@@ -1,165 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"unsafe"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error)
|
||||
|
||||
type ServiceDesc struct {
|
||||
Methods map[string]Method
|
||||
|
||||
// TODO(stevvooe): Add stream support.
|
||||
}
|
||||
|
||||
type serviceSet struct {
|
||||
services map[string]ServiceDesc
|
||||
interceptor UnaryServerInterceptor
|
||||
}
|
||||
|
||||
func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet {
|
||||
return &serviceSet{
|
||||
services: make(map[string]ServiceDesc),
|
||||
interceptor: interceptor,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serviceSet) register(name string, methods map[string]Method) {
|
||||
if _, ok := s.services[name]; ok {
|
||||
panic(errors.Errorf("duplicate service %v registered", name))
|
||||
}
|
||||
|
||||
s.services[name] = ServiceDesc{
|
||||
Methods: methods,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) {
|
||||
p, err := s.dispatch(ctx, serviceName, methodName, p)
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
st = status.New(convertCode(err), err.Error())
|
||||
}
|
||||
|
||||
return p, st
|
||||
}
|
||||
|
||||
func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) {
|
||||
method, err := s.resolve(serviceName, methodName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unmarshal := func(obj interface{}) error {
|
||||
switch v := obj.(type) {
|
||||
case proto.Message:
|
||||
if err := proto.Unmarshal(p, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error())
|
||||
}
|
||||
default:
|
||||
return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
info := &UnaryServerInfo{
|
||||
FullMethod: fullPath(serviceName, methodName),
|
||||
}
|
||||
|
||||
resp, err := s.interceptor(ctx, unmarshal, info, method)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isNil(resp) {
|
||||
return nil, errors.New("ttrpc: marshal called with nil")
|
||||
}
|
||||
|
||||
switch v := resp.(type) {
|
||||
case proto.Message:
|
||||
r, err := proto.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error())
|
||||
}
|
||||
|
||||
return r, nil
|
||||
default:
|
||||
return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serviceSet) resolve(service, method string) (Method, error) {
|
||||
srv, ok := s.services[service]
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "service %v", service)
|
||||
}
|
||||
|
||||
mthd, ok := srv.Methods[method]
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "method %v", method)
|
||||
}
|
||||
|
||||
return mthd, nil
|
||||
}
|
||||
|
||||
// convertCode maps stdlib go errors into grpc space.
|
||||
//
|
||||
// This is ripped from the grpc-go code base.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func fullPath(service, method string) string {
|
||||
return "/" + path.Join(service, method)
|
||||
}
|
||||
|
||||
func isNil(resp interface{}) bool {
|
||||
return (*[2]uintptr)(unsafe.Pointer(&resp))[1] == 0
|
||||
}
|
||||
63
vendor/github.com/containerd/ttrpc/types.go
generated
vendored
63
vendor/github.com/containerd/ttrpc/types.go
generated
vendored
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Service string `protobuf:"bytes,1,opt,name=service,proto3"`
|
||||
Method string `protobuf:"bytes,2,opt,name=method,proto3"`
|
||||
Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
|
||||
TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"`
|
||||
Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"`
|
||||
}
|
||||
|
||||
func (r *Request) Reset() { *r = Request{} }
|
||||
func (r *Request) String() string { return fmt.Sprintf("%+#v", r) }
|
||||
func (r *Request) ProtoMessage() {}
|
||||
|
||||
type Response struct {
|
||||
Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"`
|
||||
Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"`
|
||||
}
|
||||
|
||||
func (r *Response) Reset() { *r = Response{} }
|
||||
func (r *Response) String() string { return fmt.Sprintf("%+#v", r) }
|
||||
func (r *Response) ProtoMessage() {}
|
||||
|
||||
type StringList struct {
|
||||
List []string `protobuf:"bytes,1,rep,name=list,proto3"`
|
||||
}
|
||||
|
||||
func (r *StringList) Reset() { *r = StringList{} }
|
||||
func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) }
|
||||
func (r *StringList) ProtoMessage() {}
|
||||
|
||||
func makeStringList(item ...string) StringList { return StringList{List: item} }
|
||||
|
||||
type KeyValue struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3"`
|
||||
Value string `protobuf:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (m *KeyValue) Reset() { *m = KeyValue{} }
|
||||
func (*KeyValue) ProtoMessage() {}
|
||||
func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) }
|
||||
108
vendor/github.com/containerd/ttrpc/unixcreds_linux.go
generated
vendored
108
vendor/github.com/containerd/ttrpc/unixcreds_linux.go
generated
vendored
@@ -1,108 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ttrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type UnixCredentialsFunc func(*unix.Ucred) error
|
||||
|
||||
func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
|
||||
uc, err := requireUnixSocket(conn)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket")
|
||||
}
|
||||
|
||||
rs, err := uc.SyscallConn()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed")
|
||||
}
|
||||
var (
|
||||
ucred *unix.Ucred
|
||||
ucredErr error
|
||||
)
|
||||
if err := rs.Control(func(fd uintptr) {
|
||||
ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED)
|
||||
}); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed")
|
||||
}
|
||||
|
||||
if ucredErr != nil {
|
||||
return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials")
|
||||
}
|
||||
|
||||
if err := fn(ucred); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed")
|
||||
}
|
||||
|
||||
return uc, ucred, nil
|
||||
}
|
||||
|
||||
// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID.
|
||||
//
|
||||
// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an
|
||||
// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001.
|
||||
// So calling this function with uid=0 allows a connection from effective UID 0 but rejects
|
||||
// a connection from effective UID 1001.
|
||||
//
|
||||
// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)."
|
||||
func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc {
|
||||
return func(ucred *unix.Ucred) error {
|
||||
return requireUidGid(ucred, uid, gid)
|
||||
}
|
||||
}
|
||||
|
||||
func UnixSocketRequireRoot() UnixCredentialsFunc {
|
||||
return UnixSocketRequireUidGid(0, 0)
|
||||
}
|
||||
|
||||
// UnixSocketRequireSameUser resolves the current effective unix user and returns a
|
||||
// UnixCredentialsFunc that will validate incoming unix connections against the
|
||||
// current credentials.
|
||||
//
|
||||
// This is useful when using abstract sockets that are accessible by all users.
|
||||
func UnixSocketRequireSameUser() UnixCredentialsFunc {
|
||||
euid, egid := os.Geteuid(), os.Getegid()
|
||||
return UnixSocketRequireUidGid(euid, egid)
|
||||
}
|
||||
|
||||
func requireRoot(ucred *unix.Ucred) error {
|
||||
return requireUidGid(ucred, 0, 0)
|
||||
}
|
||||
|
||||
func requireUidGid(ucred *unix.Ucred, uid, gid int) error {
|
||||
if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) {
|
||||
return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) {
|
||||
uc, ok := conn.(*net.UnixConn)
|
||||
if !ok {
|
||||
return nil, errors.New("a unix socket connection is required")
|
||||
}
|
||||
|
||||
return uc, nil
|
||||
}
|
||||
2
vendor/github.com/containerd/typeurl/.gitignore
generated
vendored
2
vendor/github.com/containerd/typeurl/.gitignore
generated
vendored
@@ -1,2 +0,0 @@
|
||||
*.test
|
||||
coverage.txt
|
||||
191
vendor/github.com/containerd/typeurl/LICENSE
generated
vendored
191
vendor/github.com/containerd/typeurl/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
20
vendor/github.com/containerd/typeurl/README.md
generated
vendored
20
vendor/github.com/containerd/typeurl/README.md
generated
vendored
@@ -1,20 +0,0 @@
|
||||
# typeurl
|
||||
|
||||
[](https://pkg.go.dev/github.com/containerd/typeurl)
|
||||
[](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
|
||||
[](https://codecov.io/gh/containerd/typeurl)
|
||||
[](https://goreportcard.com/report/github.com/containerd/typeurl)
|
||||
|
||||
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
||||
|
||||
This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto).
|
||||
|
||||
## Project details
|
||||
|
||||
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
83
vendor/github.com/containerd/typeurl/doc.go
generated
vendored
83
vendor/github.com/containerd/typeurl/doc.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
// Package typeurl assists with managing the registration, marshaling, and
|
||||
// unmarshaling of types encoded as protobuf.Any.
|
||||
//
|
||||
// A protobuf.Any is a proto message that can contain any arbitrary data. It
|
||||
// consists of two components, a TypeUrl and a Value, and its proto definition
|
||||
// looks like this:
|
||||
//
|
||||
// message Any {
|
||||
// string type_url = 1;
|
||||
// bytes value = 2;
|
||||
// }
|
||||
//
|
||||
// The TypeUrl is used to distinguish the contents from other proto.Any
|
||||
// messages. This typeurl library manages these URLs to enable automagic
|
||||
// marshaling and unmarshaling of the contents.
|
||||
//
|
||||
// For example, consider this go struct:
|
||||
//
|
||||
// type Foo struct {
|
||||
// Field1 string
|
||||
// Field2 string
|
||||
// }
|
||||
//
|
||||
// To use typeurl, types must first be registered. This is typically done in
|
||||
// the init function
|
||||
//
|
||||
// func init() {
|
||||
// typeurl.Register(&Foo{}, "Foo")
|
||||
// }
|
||||
//
|
||||
// This will register the type Foo with the url path "Foo". The arguments to
|
||||
// Register are variadic, and are used to construct a url path. Consider this
|
||||
// example, from the github.com/containerd/containerd/client package:
|
||||
//
|
||||
// func init() {
|
||||
// const prefix = "types.containerd.io"
|
||||
// // register TypeUrls for commonly marshaled external types
|
||||
// major := strconv.Itoa(specs.VersionMajor)
|
||||
// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||
// // this function has more Register calls, which are elided.
|
||||
// }
|
||||
//
|
||||
// This registers several types under a more complex url, which ends up mapping
|
||||
// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
|
||||
// value for major).
|
||||
//
|
||||
// Once a type is registered, it can be marshaled to a proto.Any message simply
|
||||
// by calling `MarshalAny`, like this:
|
||||
//
|
||||
// foo := &Foo{Field1: "value1", Field2: "value2"}
|
||||
// anyFoo, err := typeurl.MarshalAny(foo)
|
||||
//
|
||||
// MarshalAny will resolve the correct URL for the type. If the type in
|
||||
// question implements the proto.Message interface, then it will be marshaled
|
||||
// as a proto message. Otherwise, it will be marshaled as json. This means that
|
||||
// typeurl will work on any arbitrary data, whether or not it has a proto
|
||||
// definition, as long as it can be serialized to json.
|
||||
//
|
||||
// To unmarshal, the process is simply inverse:
|
||||
//
|
||||
// iface, err := typeurl.UnmarshalAny(anyFoo)
|
||||
// foo := iface.(*Foo)
|
||||
//
|
||||
// The correct type is automatically chosen from the type registry, and the
|
||||
// returned interface can be cast straight to that type.
|
||||
8
vendor/github.com/containerd/typeurl/go.mod
generated
vendored
8
vendor/github.com/containerd/typeurl/go.mod
generated
vendored
@@ -1,8 +0,0 @@
|
||||
module github.com/containerd/typeurl
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
)
|
||||
33
vendor/github.com/containerd/typeurl/go.sum
generated
vendored
33
vendor/github.com/containerd/typeurl/go.sum
generated
vendored
@@ -1,33 +0,0 @@
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
214
vendor/github.com/containerd/typeurl/types.go
generated
vendored
214
vendor/github.com/containerd/typeurl/types.go
generated
vendored
@@ -1,214 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
registry = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout typeurl.
|
||||
//
|
||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||
// to an error.
|
||||
//
|
||||
// To detect an error class, use errors.Is() functions to tell whether an
|
||||
// error is of this type.
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
// Register a type with a base URL for JSON marshaling. When the MarshalAny and
|
||||
// UnmarshalAny functions are called they will treat the Any type value as JSON.
|
||||
// To use protocol buffers for handling the Any value the proto.Register
|
||||
// function should be used instead of this function.
|
||||
func Register(v interface{}, args ...string) {
|
||||
var (
|
||||
t = tryDereference(v)
|
||||
p = path.Join(args...)
|
||||
)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if et, ok := registry[t]; ok {
|
||||
if et != p {
|
||||
panic(errors.Errorf("type registered with alternate path %q != %q", et, p))
|
||||
}
|
||||
return
|
||||
}
|
||||
registry[t] = p
|
||||
}
|
||||
|
||||
// TypeURL returns the type url for a registered type.
|
||||
func TypeURL(v interface{}) (string, error) {
|
||||
mu.RLock()
|
||||
u, ok := registry[tryDereference(v)]
|
||||
mu.RUnlock()
|
||||
if !ok {
|
||||
// fallback to the proto registry if it is a proto message
|
||||
pb, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v))
|
||||
}
|
||||
return proto.MessageName(pb), nil
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Is returns true if the type of the Any is the same as v.
|
||||
func Is(any *types.Any, v interface{}) bool {
|
||||
// call to check that v is a pointer
|
||||
tryDereference(v)
|
||||
url, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return any.TypeUrl == url
|
||||
}
|
||||
|
||||
// MarshalAny marshals the value v into an any with the correct TypeUrl.
|
||||
// If the provided object is already a proto.Any message, then it will be
|
||||
// returned verbatim. If it is of type proto.Message, it will be marshaled as a
|
||||
// protocol buffer. Otherwise, the object will be marshaled to json.
|
||||
func MarshalAny(v interface{}) (*types.Any, error) {
|
||||
var marshal func(v interface{}) ([]byte, error)
|
||||
switch t := v.(type) {
|
||||
case *types.Any:
|
||||
// avoid reserializing the type if we have an any.
|
||||
return t, nil
|
||||
case proto.Message:
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(t)
|
||||
}
|
||||
default:
|
||||
marshal = json.Marshal
|
||||
}
|
||||
|
||||
url, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &types.Any{
|
||||
TypeUrl: url,
|
||||
Value: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the any type into a concrete type.
|
||||
func UnmarshalAny(any *types.Any) (interface{}, error) {
|
||||
return UnmarshalByTypeURL(any.TypeUrl, any.Value)
|
||||
}
|
||||
|
||||
// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
|
||||
func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
|
||||
return unmarshal(typeURL, value, nil)
|
||||
}
|
||||
|
||||
// UnmarshalTo unmarshals the any type into a concrete type passed in the out
|
||||
// argument. It is identical to UnmarshalAny, but lets clients provide a
|
||||
// destination type through the out argument.
|
||||
func UnmarshalTo(any *types.Any, out interface{}) error {
|
||||
return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out)
|
||||
}
|
||||
|
||||
// UnmarshalTo unmarshals the given type and value into a concrete type passed
|
||||
// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
|
||||
// provide a destination type through the out argument.
|
||||
func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
||||
_, err := unmarshal(typeURL, value, out)
|
||||
return err
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
v = reflect.New(t.t).Interface()
|
||||
} else {
|
||||
// Validate interface type provided by client
|
||||
vURL, err := TypeURL(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if typeURL != vURL {
|
||||
return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
|
||||
}
|
||||
}
|
||||
|
||||
if t.isProto {
|
||||
err = proto.Unmarshal(value, v.(proto.Message))
|
||||
} else {
|
||||
err = json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
return v, err
|
||||
}
|
||||
|
||||
type urlType struct {
|
||||
t reflect.Type
|
||||
isProto bool
|
||||
}
|
||||
|
||||
func getTypeByUrl(url string) (urlType, error) {
|
||||
mu.RLock()
|
||||
for t, u := range registry {
|
||||
if u == url {
|
||||
mu.RUnlock()
|
||||
return urlType{
|
||||
t: t,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
mu.RUnlock()
|
||||
// fallback to proto registry
|
||||
t := proto.MessageType(url)
|
||||
if t != nil {
|
||||
return urlType{
|
||||
// get the underlying Elem because proto returns a pointer to the type
|
||||
t: t.Elem(),
|
||||
isProto: true,
|
||||
}, nil
|
||||
}
|
||||
return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url)
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
t := reflect.TypeOf(v)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
// require check of pointer but dereference to register
|
||||
return t.Elem()
|
||||
}
|
||||
panic("v is not a pointer to a type")
|
||||
}
|
||||
24
vendor/github.com/docker/go-events/.gitignore
generated
vendored
24
vendor/github.com/docker/go-events/.gitignore
generated
vendored
@@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
70
vendor/github.com/docker/go-events/CONTRIBUTING.md
generated
vendored
70
vendor/github.com/docker/go-events/CONTRIBUTING.md
generated
vendored
@@ -1,70 +0,0 @@
|
||||
# Contributing to Docker open source projects
|
||||
|
||||
Want to hack on go-events? Awesome! Here are instructions to get you started.
|
||||
|
||||
go-events is part of the [Docker](https://www.docker.com) project, and
|
||||
follows the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
For an in-depth description of our contribution process, visit the
|
||||
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
201
vendor/github.com/docker/go-events/LICENSE
generated
vendored
201
vendor/github.com/docker/go-events/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
46
vendor/github.com/docker/go-events/MAINTAINERS
generated
vendored
46
vendor/github.com/docker/go-events/MAINTAINERS
generated
vendored
@@ -1,46 +0,0 @@
|
||||
# go-events maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/go-events project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"aaronlehmann",
|
||||
"aluzzardi",
|
||||
"lk4d4",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.aaronlehmann]
|
||||
Name = "Aaron Lehmann"
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.aluzzardi]
|
||||
Name = "Andrea Luzzardi"
|
||||
Email = "al@docker.com"
|
||||
GitHub = "aluzzardi"
|
||||
|
||||
[people.lk4d4]
|
||||
Name = "Alexander Morozov"
|
||||
Email = "lk4d4@docker.com"
|
||||
GitHub = "lk4d4"
|
||||
|
||||
[people.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
||||
117
vendor/github.com/docker/go-events/README.md
generated
vendored
117
vendor/github.com/docker/go-events/README.md
generated
vendored
@@ -1,117 +0,0 @@
|
||||
# Docker Events Package
|
||||
|
||||
[](https://godoc.org/github.com/docker/go-events)
|
||||
[](https://circleci.com/gh/docker/go-events)
|
||||
|
||||
The Docker `events` package implements a composable event distribution package
|
||||
for Go.
|
||||
|
||||
Originally created to implement the [notifications in Docker Registry
|
||||
2](https://github.com/docker/distribution/blob/master/docs/notifications.md),
|
||||
we've found the pattern to be useful in other applications. This package is
|
||||
most of the same code with slightly updated interfaces. Much of the internals
|
||||
have been made available.
|
||||
|
||||
## Usage
|
||||
|
||||
The `events` package centers around a `Sink` type. Events are written with
|
||||
calls to `Sink.Write(event Event)`. Sinks can be wired up in various
|
||||
configurations to achieve interesting behavior.
|
||||
|
||||
The canonical example is that employed by the
|
||||
[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications)
|
||||
package. Let's say we have a type `httpSink` where we'd like to queue
|
||||
notifications. As a rule, it should send a single http request and return an
|
||||
error if it fails:
|
||||
|
||||
```go
|
||||
func (h *httpSink) Write(event Event) error {
|
||||
p, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := bytes.NewReader(p)
|
||||
resp, err := h.client.Post(h.url, "application/json", body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.Status != 200 {
|
||||
return errors.New("unexpected status")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// implement (*httpSink).Close()
|
||||
```
|
||||
|
||||
With just that, we can start using components from this package. One can call
|
||||
`(*httpSink).Write` to send events as the body of a post request to a
|
||||
configured URL.
|
||||
|
||||
### Retries
|
||||
|
||||
HTTP can be unreliable. The first feature we'd like is to have some retry:
|
||||
|
||||
```go
|
||||
hs := newHTTPSink(/*...*/)
|
||||
retry := NewRetryingSink(hs, NewBreaker(5, time.Second))
|
||||
```
|
||||
|
||||
We now have a sink that will retry events against the `httpSink` until they
|
||||
succeed. The retry will backoff for one second after 5 consecutive failures
|
||||
using the breaker strategy.
|
||||
|
||||
### Queues
|
||||
|
||||
This isn't quite enough. We we want a sink that doesn't block while we are
|
||||
waiting for events to be sent. Let's add a `Queue`:
|
||||
|
||||
```go
|
||||
queue := NewQueue(retry)
|
||||
```
|
||||
|
||||
Now, we have an unbounded queue that will work through all events sent with
|
||||
`(*Queue).Write`. Events can be added asynchronously to the queue without
|
||||
blocking the current execution path. This is ideal for use in an http request.
|
||||
|
||||
### Broadcast
|
||||
|
||||
It usually turns out that you want to send to more than one listener. We can
|
||||
use `Broadcaster` to support this:
|
||||
|
||||
```go
|
||||
var broadcast = NewBroadcaster() // make it available somewhere in your application.
|
||||
broadcast.Add(queue) // add your queue!
|
||||
broadcast.Add(queue2) // and another!
|
||||
```
|
||||
|
||||
With the above, we can now call `broadcast.Write` in our http handlers and have
|
||||
all the events distributed to each queue. Because the events are queued, not
|
||||
listener blocks another.
|
||||
|
||||
### Extending
|
||||
|
||||
For the most part, the above is sufficient for a lot of applications. However,
|
||||
extending the above functionality can be done implementing your own `Sink`. The
|
||||
behavior and semantics of the sink can be completely dependent on the
|
||||
application requirements. The interface is provided below for reference:
|
||||
|
||||
```go
|
||||
type Sink {
|
||||
Write(Event) error
|
||||
Close() error
|
||||
}
|
||||
```
|
||||
|
||||
Application behavior can be controlled by how `Write` behaves. The examples
|
||||
above are designed to queue the message and return as quickly as possible.
|
||||
Other implementations may block until the event is committed to durable
|
||||
storage.
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License,
|
||||
Version 2.0. See [LICENSE](LICENSE) for the full license text.
|
||||
178
vendor/github.com/docker/go-events/broadcast.go
generated
vendored
178
vendor/github.com/docker/go-events/broadcast.go
generated
vendored
@@ -1,178 +0,0 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Broadcaster sends events to multiple, reliable Sinks. The goal of this
|
||||
// component is to dispatch events to configured endpoints. Reliability can be
|
||||
// provided by wrapping incoming sinks.
|
||||
type Broadcaster struct {
|
||||
sinks []Sink
|
||||
events chan Event
|
||||
adds chan configureRequest
|
||||
removes chan configureRequest
|
||||
|
||||
shutdown chan struct{}
|
||||
closed chan struct{}
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewBroadcaster appends one or more sinks to the list of sinks. The
|
||||
// broadcaster behavior will be affected by the properties of the sink.
|
||||
// Generally, the sink should accept all messages and deal with reliability on
|
||||
// its own. Use of EventQueue and RetryingSink should be used here.
|
||||
func NewBroadcaster(sinks ...Sink) *Broadcaster {
|
||||
b := Broadcaster{
|
||||
sinks: sinks,
|
||||
events: make(chan Event),
|
||||
adds: make(chan configureRequest),
|
||||
removes: make(chan configureRequest),
|
||||
shutdown: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start the broadcaster
|
||||
go b.run()
|
||||
|
||||
return &b
|
||||
}
|
||||
|
||||
// Write accepts an event to be dispatched to all sinks. This method will never
|
||||
// fail and should never block (hopefully!). The caller cedes the memory to the
|
||||
// broadcaster and should not modify it after calling write.
|
||||
func (b *Broadcaster) Write(event Event) error {
|
||||
select {
|
||||
case b.events <- event:
|
||||
case <-b.closed:
|
||||
return ErrSinkClosed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the sink to the broadcaster.
|
||||
//
|
||||
// The provided sink must be comparable with equality. Typically, this just
|
||||
// works with a regular pointer type.
|
||||
func (b *Broadcaster) Add(sink Sink) error {
|
||||
return b.configure(b.adds, sink)
|
||||
}
|
||||
|
||||
// Remove the provided sink.
|
||||
func (b *Broadcaster) Remove(sink Sink) error {
|
||||
return b.configure(b.removes, sink)
|
||||
}
|
||||
|
||||
type configureRequest struct {
|
||||
sink Sink
|
||||
response chan error
|
||||
}
|
||||
|
||||
func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error {
|
||||
response := make(chan error, 1)
|
||||
|
||||
for {
|
||||
select {
|
||||
case ch <- configureRequest{
|
||||
sink: sink,
|
||||
response: response}:
|
||||
ch = nil
|
||||
case err := <-response:
|
||||
return err
|
||||
case <-b.closed:
|
||||
return ErrSinkClosed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close the broadcaster, ensuring that all messages are flushed to the
|
||||
// underlying sink before returning.
|
||||
func (b *Broadcaster) Close() error {
|
||||
b.once.Do(func() {
|
||||
close(b.shutdown)
|
||||
})
|
||||
|
||||
<-b.closed
|
||||
return nil
|
||||
}
|
||||
|
||||
// run is the main broadcast loop, started when the broadcaster is created.
|
||||
// Under normal conditions, it waits for events on the event channel. After
|
||||
// Close is called, this goroutine will exit.
|
||||
func (b *Broadcaster) run() {
|
||||
defer close(b.closed)
|
||||
remove := func(target Sink) {
|
||||
for i, sink := range b.sinks {
|
||||
if sink == target {
|
||||
b.sinks = append(b.sinks[:i], b.sinks[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-b.events:
|
||||
for _, sink := range b.sinks {
|
||||
if err := sink.Write(event); err != nil {
|
||||
if err == ErrSinkClosed {
|
||||
// remove closed sinks
|
||||
remove(sink)
|
||||
continue
|
||||
}
|
||||
logrus.WithField("event", event).WithField("events.sink", sink).WithError(err).
|
||||
Errorf("broadcaster: dropping event")
|
||||
}
|
||||
}
|
||||
case request := <-b.adds:
|
||||
// while we have to iterate for add/remove, common iteration for
|
||||
// send is faster against slice.
|
||||
|
||||
var found bool
|
||||
for _, sink := range b.sinks {
|
||||
if request.sink == sink {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
b.sinks = append(b.sinks, request.sink)
|
||||
}
|
||||
// b.sinks[request.sink] = struct{}{}
|
||||
request.response <- nil
|
||||
case request := <-b.removes:
|
||||
remove(request.sink)
|
||||
request.response <- nil
|
||||
case <-b.shutdown:
|
||||
// close all the underlying sinks
|
||||
for _, sink := range b.sinks {
|
||||
if err := sink.Close(); err != nil && err != ErrSinkClosed {
|
||||
logrus.WithField("events.sink", sink).WithError(err).
|
||||
Errorf("broadcaster: closing sink failed")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broadcaster) String() string {
|
||||
// Serialize copy of this broadcaster without the sync.Once, to avoid
|
||||
// a data race.
|
||||
|
||||
b2 := map[string]interface{}{
|
||||
"sinks": b.sinks,
|
||||
"events": b.events,
|
||||
"adds": b.adds,
|
||||
"removes": b.removes,
|
||||
|
||||
"shutdown": b.shutdown,
|
||||
"closed": b.closed,
|
||||
}
|
||||
|
||||
return fmt.Sprint(b2)
|
||||
}
|
||||
61
vendor/github.com/docker/go-events/channel.go
generated
vendored
61
vendor/github.com/docker/go-events/channel.go
generated
vendored
@@ -1,61 +0,0 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Channel provides a sink that can be listened on. The writer and channel
|
||||
// listener must operate in separate goroutines.
|
||||
//
|
||||
// Consumers should listen on Channel.C until Closed is closed.
|
||||
type Channel struct {
|
||||
C chan Event
|
||||
|
||||
closed chan struct{}
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewChannel returns a channel. If buffer is zero, the channel is
|
||||
// unbuffered.
|
||||
func NewChannel(buffer int) *Channel {
|
||||
return &Channel{
|
||||
C: make(chan Event, buffer),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Done returns a channel that will always proceed once the sink is closed.
|
||||
func (ch *Channel) Done() chan struct{} {
|
||||
return ch.closed
|
||||
}
|
||||
|
||||
// Write the event to the channel. Must be called in a separate goroutine from
|
||||
// the listener.
|
||||
func (ch *Channel) Write(event Event) error {
|
||||
select {
|
||||
case ch.C <- event:
|
||||
return nil
|
||||
case <-ch.closed:
|
||||
return ErrSinkClosed
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel sink.
|
||||
func (ch *Channel) Close() error {
|
||||
ch.once.Do(func() {
|
||||
close(ch.closed)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch *Channel) String() string {
|
||||
// Serialize a copy of the Channel that doesn't contain the sync.Once,
|
||||
// to avoid a data race.
|
||||
ch2 := map[string]interface{}{
|
||||
"C": ch.C,
|
||||
"closed": ch.closed,
|
||||
}
|
||||
return fmt.Sprint(ch2)
|
||||
}
|
||||
10
vendor/github.com/docker/go-events/errors.go
generated
vendored
10
vendor/github.com/docker/go-events/errors.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
package events
|
||||
|
||||
import "fmt"
|
||||
|
||||
var (
|
||||
// ErrSinkClosed is returned if a write is issued to a sink that has been
|
||||
// closed. If encountered, the error should be considered terminal and
|
||||
// retries will not be successful.
|
||||
ErrSinkClosed = fmt.Errorf("events: sink closed")
|
||||
)
|
||||
15
vendor/github.com/docker/go-events/event.go
generated
vendored
15
vendor/github.com/docker/go-events/event.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
package events
|
||||
|
||||
// Event marks items that can be sent as events.
|
||||
type Event interface{}
|
||||
|
||||
// Sink accepts and sends events.
|
||||
type Sink interface {
|
||||
// Write an event to the Sink. If no error is returned, the caller will
|
||||
// assume that all events have been committed to the sink. If an error is
|
||||
// received, the caller may retry sending the event.
|
||||
Write(event Event) error
|
||||
|
||||
// Close the sink, possibly waiting for pending events to flush.
|
||||
Close() error
|
||||
}
|
||||
52
vendor/github.com/docker/go-events/filter.go
generated
vendored
52
vendor/github.com/docker/go-events/filter.go
generated
vendored
@@ -1,52 +0,0 @@
|
||||
package events
|
||||
|
||||
// Matcher matches events.
|
||||
type Matcher interface {
|
||||
Match(event Event) bool
|
||||
}
|
||||
|
||||
// MatcherFunc implements matcher with just a function.
|
||||
type MatcherFunc func(event Event) bool
|
||||
|
||||
// Match calls the wrapped function.
|
||||
func (fn MatcherFunc) Match(event Event) bool {
|
||||
return fn(event)
|
||||
}
|
||||
|
||||
// Filter provides an event sink that sends only events that are accepted by a
|
||||
// Matcher. No methods on filter are goroutine safe.
|
||||
type Filter struct {
|
||||
dst Sink
|
||||
matcher Matcher
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewFilter returns a new filter that will send to events to dst that return
|
||||
// true for Matcher.
|
||||
func NewFilter(dst Sink, matcher Matcher) Sink {
|
||||
return &Filter{dst: dst, matcher: matcher}
|
||||
}
|
||||
|
||||
// Write an event to the filter.
|
||||
func (f *Filter) Write(event Event) error {
|
||||
if f.closed {
|
||||
return ErrSinkClosed
|
||||
}
|
||||
|
||||
if f.matcher.Match(event) {
|
||||
return f.dst.Write(event)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the filter and allow no more events to pass through.
|
||||
func (f *Filter) Close() error {
|
||||
// TODO(stevvooe): Not all sinks should have Close.
|
||||
if f.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
f.closed = true
|
||||
return f.dst.Close()
|
||||
}
|
||||
111
vendor/github.com/docker/go-events/queue.go
generated
vendored
111
vendor/github.com/docker/go-events/queue.go
generated
vendored
@@ -1,111 +0,0 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Queue accepts all messages into a queue for asynchronous consumption
|
||||
// by a sink. It is unbounded and thread safe but the sink must be reliable or
|
||||
// events will be dropped.
|
||||
type Queue struct {
|
||||
dst Sink
|
||||
events *list.List
|
||||
cond *sync.Cond
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewQueue returns a queue to the provided Sink dst.
|
||||
func NewQueue(dst Sink) *Queue {
|
||||
eq := Queue{
|
||||
dst: dst,
|
||||
events: list.New(),
|
||||
}
|
||||
|
||||
eq.cond = sync.NewCond(&eq.mu)
|
||||
go eq.run()
|
||||
return &eq
|
||||
}
|
||||
|
||||
// Write accepts the events into the queue, only failing if the queue has
|
||||
// been closed.
|
||||
func (eq *Queue) Write(event Event) error {
|
||||
eq.mu.Lock()
|
||||
defer eq.mu.Unlock()
|
||||
|
||||
if eq.closed {
|
||||
return ErrSinkClosed
|
||||
}
|
||||
|
||||
eq.events.PushBack(event)
|
||||
eq.cond.Signal() // signal waiters
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close shutsdown the event queue, flushing
|
||||
func (eq *Queue) Close() error {
|
||||
eq.mu.Lock()
|
||||
defer eq.mu.Unlock()
|
||||
|
||||
if eq.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// set closed flag
|
||||
eq.closed = true
|
||||
eq.cond.Signal() // signal flushes queue
|
||||
eq.cond.Wait() // wait for signal from last flush
|
||||
return eq.dst.Close()
|
||||
}
|
||||
|
||||
// run is the main goroutine to flush events to the target sink.
|
||||
func (eq *Queue) run() {
|
||||
for {
|
||||
event := eq.next()
|
||||
|
||||
if event == nil {
|
||||
return // nil block means event queue is closed.
|
||||
}
|
||||
|
||||
if err := eq.dst.Write(event); err != nil {
|
||||
// TODO(aaronl): Dropping events could be bad depending
|
||||
// on the application. We should have a way of
|
||||
// communicating this condition. However, logging
|
||||
// at a log level above debug may not be appropriate.
|
||||
// Eventually, go-events should not use logrus at all,
|
||||
// and should bubble up conditions like this through
|
||||
// error values.
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"event": event,
|
||||
"sink": eq.dst,
|
||||
}).WithError(err).Debug("eventqueue: dropped event")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// next encompasses the critical section of the run loop. When the queue is
|
||||
// empty, it will block on the condition. If new data arrives, it will wake
|
||||
// and return a block. When closed, a nil slice will be returned.
|
||||
func (eq *Queue) next() Event {
|
||||
eq.mu.Lock()
|
||||
defer eq.mu.Unlock()
|
||||
|
||||
for eq.events.Len() < 1 {
|
||||
if eq.closed {
|
||||
eq.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
eq.cond.Wait()
|
||||
}
|
||||
|
||||
front := eq.events.Front()
|
||||
block := front.Value.(Event)
|
||||
eq.events.Remove(front)
|
||||
|
||||
return block
|
||||
}
|
||||
260
vendor/github.com/docker/go-events/retry.go
generated
vendored
260
vendor/github.com/docker/go-events/retry.go
generated
vendored
@@ -1,260 +0,0 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RetryingSink retries the write until success or an ErrSinkClosed is
|
||||
// returned. Underlying sink must have p > 0 of succeeding or the sink will
|
||||
// block. Retry is configured with a RetryStrategy. Concurrent calls to a
|
||||
// retrying sink are serialized through the sink, meaning that if one is
|
||||
// in-flight, another will not proceed.
|
||||
type RetryingSink struct {
|
||||
sink Sink
|
||||
strategy RetryStrategy
|
||||
closed chan struct{}
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewRetryingSink returns a sink that will retry writes to a sink, backing
|
||||
// off on failure. Parameters threshold and backoff adjust the behavior of the
|
||||
// circuit breaker.
|
||||
func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink {
|
||||
rs := &RetryingSink{
|
||||
sink: sink,
|
||||
strategy: strategy,
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// Write attempts to flush the events to the downstream sink until it succeeds
|
||||
// or the sink is closed.
|
||||
func (rs *RetryingSink) Write(event Event) error {
|
||||
logger := logrus.WithField("event", event)
|
||||
|
||||
retry:
|
||||
select {
|
||||
case <-rs.closed:
|
||||
return ErrSinkClosed
|
||||
default:
|
||||
}
|
||||
|
||||
if backoff := rs.strategy.Proceed(event); backoff > 0 {
|
||||
select {
|
||||
case <-time.After(backoff):
|
||||
// TODO(stevvooe): This branch holds up the next try. Before, we
|
||||
// would simply break to the "retry" label and then possibly wait
|
||||
// again. However, this requires all retry strategies to have a
|
||||
// large probability of probing the sync for success, rather than
|
||||
// just backing off and sending the request.
|
||||
case <-rs.closed:
|
||||
return ErrSinkClosed
|
||||
}
|
||||
}
|
||||
|
||||
if err := rs.sink.Write(event); err != nil {
|
||||
if err == ErrSinkClosed {
|
||||
// terminal!
|
||||
return err
|
||||
}
|
||||
|
||||
logger := logger.WithError(err) // shadow!!
|
||||
|
||||
if rs.strategy.Failure(event, err) {
|
||||
logger.Errorf("retryingsink: dropped event")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Errorf("retryingsink: error writing event, retrying")
|
||||
goto retry
|
||||
}
|
||||
|
||||
rs.strategy.Success(event)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the sink and the underlying sink.
|
||||
func (rs *RetryingSink) Close() error {
|
||||
rs.once.Do(func() {
|
||||
close(rs.closed)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *RetryingSink) String() string {
|
||||
// Serialize a copy of the RetryingSink without the sync.Once, to avoid
|
||||
// a data race.
|
||||
rs2 := map[string]interface{}{
|
||||
"sink": rs.sink,
|
||||
"strategy": rs.strategy,
|
||||
"closed": rs.closed,
|
||||
}
|
||||
return fmt.Sprint(rs2)
|
||||
}
|
||||
|
||||
// RetryStrategy defines a strategy for retrying event sink writes.
|
||||
//
|
||||
// All methods should be goroutine safe.
|
||||
type RetryStrategy interface {
|
||||
// Proceed is called before every event send. If proceed returns a
|
||||
// positive, non-zero integer, the retryer will back off by the provided
|
||||
// duration.
|
||||
//
|
||||
// An event is provided, by may be ignored.
|
||||
Proceed(event Event) time.Duration
|
||||
|
||||
// Failure reports a failure to the strategy. If this method returns true,
|
||||
// the event should be dropped.
|
||||
Failure(event Event, err error) bool
|
||||
|
||||
// Success should be called when an event is sent successfully.
|
||||
Success(event Event)
|
||||
}
|
||||
|
||||
// Breaker implements a circuit breaker retry strategy.
|
||||
//
|
||||
// The current implementation never drops events.
|
||||
type Breaker struct {
|
||||
threshold int
|
||||
recent int
|
||||
last time.Time
|
||||
backoff time.Duration // time after which we retry after failure.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var _ RetryStrategy = &Breaker{}
|
||||
|
||||
// NewBreaker returns a breaker that will backoff after the threshold has been
|
||||
// tripped. A Breaker is thread safe and may be shared by many goroutines.
|
||||
func NewBreaker(threshold int, backoff time.Duration) *Breaker {
|
||||
return &Breaker{
|
||||
threshold: threshold,
|
||||
backoff: backoff,
|
||||
}
|
||||
}
|
||||
|
||||
// Proceed checks the failures against the threshold.
|
||||
func (b *Breaker) Proceed(event Event) time.Duration {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if b.recent < b.threshold {
|
||||
return 0
|
||||
}
|
||||
|
||||
return b.last.Add(b.backoff).Sub(time.Now())
|
||||
}
|
||||
|
||||
// Success resets the breaker.
|
||||
func (b *Breaker) Success(event Event) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
b.recent = 0
|
||||
b.last = time.Time{}
|
||||
}
|
||||
|
||||
// Failure records the failure and latest failure time.
|
||||
func (b *Breaker) Failure(event Event, err error) bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
b.recent++
|
||||
b.last = time.Now().UTC()
|
||||
return false // never drop events.
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultExponentialBackoffConfig provides a default configuration for
|
||||
// exponential backoff.
|
||||
DefaultExponentialBackoffConfig = ExponentialBackoffConfig{
|
||||
Base: time.Second,
|
||||
Factor: time.Second,
|
||||
Max: 20 * time.Second,
|
||||
}
|
||||
)
|
||||
|
||||
// ExponentialBackoffConfig configures backoff parameters.
|
||||
//
|
||||
// Note that these parameters operate on the upper bound for choosing a random
|
||||
// value. For example, at Base=1s, a random value in [0,1s) will be chosen for
|
||||
// the backoff value.
|
||||
type ExponentialBackoffConfig struct {
|
||||
// Base is the minimum bound for backing off after failure.
|
||||
Base time.Duration
|
||||
|
||||
// Factor sets the amount of time by which the backoff grows with each
|
||||
// failure.
|
||||
Factor time.Duration
|
||||
|
||||
// Max is the absolute maxiumum bound for a single backoff.
|
||||
Max time.Duration
|
||||
}
|
||||
|
||||
// ExponentialBackoff implements random backoff with exponentially increasing
|
||||
// bounds as the number consecutive failures increase.
|
||||
type ExponentialBackoff struct {
|
||||
failures uint64 // consecutive failure counter (needs to be 64-bit aligned)
|
||||
config ExponentialBackoffConfig
|
||||
}
|
||||
|
||||
// NewExponentialBackoff returns an exponential backoff strategy with the
|
||||
// desired config. If config is nil, the default is returned.
|
||||
func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff {
|
||||
return &ExponentialBackoff{
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Proceed returns the next randomly bound exponential backoff time.
|
||||
func (b *ExponentialBackoff) Proceed(event Event) time.Duration {
|
||||
return b.backoff(atomic.LoadUint64(&b.failures))
|
||||
}
|
||||
|
||||
// Success resets the failures counter.
|
||||
func (b *ExponentialBackoff) Success(event Event) {
|
||||
atomic.StoreUint64(&b.failures, 0)
|
||||
}
|
||||
|
||||
// Failure increments the failure counter.
|
||||
func (b *ExponentialBackoff) Failure(event Event, err error) bool {
|
||||
atomic.AddUint64(&b.failures, 1)
|
||||
return false
|
||||
}
|
||||
|
||||
// backoff calculates the amount of time to wait based on the number of
|
||||
// consecutive failures.
|
||||
func (b *ExponentialBackoff) backoff(failures uint64) time.Duration {
|
||||
if failures <= 0 {
|
||||
// proceed normally when there are no failures.
|
||||
return 0
|
||||
}
|
||||
|
||||
factor := b.config.Factor
|
||||
if factor <= 0 {
|
||||
factor = DefaultExponentialBackoffConfig.Factor
|
||||
}
|
||||
|
||||
backoff := b.config.Base + factor*time.Duration(1<<(failures-1))
|
||||
|
||||
max := b.config.Max
|
||||
if max <= 0 {
|
||||
max = DefaultExponentialBackoffConfig.Max
|
||||
}
|
||||
|
||||
if backoff > max || backoff < 0 {
|
||||
backoff = max
|
||||
}
|
||||
|
||||
// Choose a uniformly distributed value from [0, backoff).
|
||||
return time.Duration(rand.Int63n(int64(backoff)))
|
||||
}
|
||||
15
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
15
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
@@ -1,15 +0,0 @@
|
||||
# This is the official list of GoGo authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS file, which
|
||||
# lists people. For example, employees are listed in CONTRIBUTORS,
|
||||
# but not in AUTHORS, because the employer holds the copyright.
|
||||
|
||||
# Names should be added to this file as one of
|
||||
# Organization's name
|
||||
# Individual's name <submission email address>
|
||||
# Individual's name <submission email address> <email2> <emailN>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Sendgrid, Inc
|
||||
Vastech SA (PTY) LTD
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
23
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
23
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
@@ -1,23 +0,0 @@
|
||||
Anton Povarov <anton.povarov@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Denis Smirnov <denis.smirnov.91@gmail.com>
|
||||
DongYun Kang <ceram1000@gmail.com>
|
||||
Dwayne Schultz <dschultz@pivotal.io>
|
||||
Georg Apitz <gapitz@pivotal.io>
|
||||
Gustav Paul <gustav.paul@gmail.com>
|
||||
Johan Brandhorst <johan.brandhorst@gmail.com>
|
||||
John Shahid <jvshahid@gmail.com>
|
||||
John Tuley <john@tuley.org>
|
||||
Laurent <laurent@adyoulike.com>
|
||||
Patrick Lee <patrick@dropbox.com>
|
||||
Peter Edge <peter.edge@gmail.com>
|
||||
Roger Johansson <rogeralsing@gmail.com>
|
||||
Sam Nguyen <sam.nguyen@sendgrid.com>
|
||||
Sergio Arbeo <serabe@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Todd Eisenberger <teisenberger@dropbox.com>
|
||||
Tormod Erevik Lea <tormodlea@gmail.com>
|
||||
Vyacheslav Kim <kane@sendgrid.com>
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user