diff --git a/Makefile b/Makefile index fb5f48a..cd54b22 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,7 @@ endif IMAGE_TAG ?= $(GOLANG_VERSION) BUILDIMAGE ?= $(IMAGE):$(IMAGE_TAG)-devel -TARGETS := binary build all check fmt assert-fmt lint vet test +TARGETS := binary build all check fmt assert-fmt generate lint vet test DOCKER_TARGETS := $(patsubst %, docker-%, $(TARGETS)) .PHONY: $(TARGETS) $(DOCKER_TARGETS) @@ -54,6 +54,9 @@ assert-fmt: rm fmt.out; \ fi +generate: + go generate $(MODULE)/... + lint: # We use `go list -f '{{.Dir}}' $(MODULE)/...` to skip the `vendor` folder. go list -f '{{.Dir}}' $(MODULE)/... | xargs golint -set_exit_status diff --git a/docker/Dockerfile.devel b/docker/Dockerfile.devel index 953ca35..bd93c72 100644 --- a/docker/Dockerfile.devel +++ b/docker/Dockerfile.devel @@ -15,3 +15,4 @@ ARG GOLANG_VERSION=1.16 FROM golang:${GOLANG_VERSION} RUN go get -u golang.org/x/lint/golint +RUN go install github.com/matryer/moq@latest diff --git a/pkg/nvml/ci.go b/pkg/nvml/ci.go new file mode 100644 index 0000000..6a9d798 --- /dev/null +++ b/pkg/nvml/ci.go @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +type nvmlComputeInstance nvml.ComputeInstance + +var _ ComputeInstance = (*nvmlComputeInstance)(nil) + +// GetInfo() returns info about a Compute Instance +func (ci nvmlComputeInstance) GetInfo() (ComputeInstanceInfo, Return) { + i, r := nvml.ComputeInstance(ci).GetInfo() + info := ComputeInstanceInfo{ + Device: nvmlDevice(i.Device), + GpuInstance: nvmlGpuInstance(i.GpuInstance), + Id: i.Id, + ProfileId: i.ProfileId, + Placement: ComputeInstancePlacement(i.Placement), + } + return info, Return(r) +} + +// Destroy() destroys a Compute Instance +func (ci nvmlComputeInstance) Destroy() Return { + r := nvml.ComputeInstance(ci).Destroy() + return Return(r) +} diff --git a/pkg/nvml/ci_mock.go b/pkg/nvml/ci_mock.go new file mode 100644 index 0000000..af323b3 --- /dev/null +++ b/pkg/nvml/ci_mock.go @@ -0,0 +1,102 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package nvml + +import ( + "sync" +) + +// Ensure, that ComputeInstanceMock does implement ComputeInstance. +// If this is not the case, regenerate this file with moq. +var _ ComputeInstance = &ComputeInstanceMock{} + +// ComputeInstanceMock is a mock implementation of ComputeInstance. +// +// func TestSomethingThatUsesComputeInstance(t *testing.T) { +// +// // make and configure a mocked ComputeInstance +// mockedComputeInstance := &ComputeInstanceMock{ +// DestroyFunc: func() Return { +// panic("mock out the Destroy method") +// }, +// GetInfoFunc: func() (ComputeInstanceInfo, Return) { +// panic("mock out the GetInfo method") +// }, +// } +// +// // use mockedComputeInstance in code that requires ComputeInstance +// // and then make assertions. +// +// } +type ComputeInstanceMock struct { + // DestroyFunc mocks the Destroy method. + DestroyFunc func() Return + + // GetInfoFunc mocks the GetInfo method. + GetInfoFunc func() (ComputeInstanceInfo, Return) + + // calls tracks calls to the methods. + calls struct { + // Destroy holds details about calls to the Destroy method. + Destroy []struct { + } + // GetInfo holds details about calls to the GetInfo method. + GetInfo []struct { + } + } + lockDestroy sync.RWMutex + lockGetInfo sync.RWMutex +} + +// Destroy calls DestroyFunc. +func (mock *ComputeInstanceMock) Destroy() Return { + if mock.DestroyFunc == nil { + panic("ComputeInstanceMock.DestroyFunc: method is nil but ComputeInstance.Destroy was just called") + } + callInfo := struct { + }{} + mock.lockDestroy.Lock() + mock.calls.Destroy = append(mock.calls.Destroy, callInfo) + mock.lockDestroy.Unlock() + return mock.DestroyFunc() +} + +// DestroyCalls gets all the calls that were made to Destroy. +// Check the length with: +// len(mockedComputeInstance.DestroyCalls()) +func (mock *ComputeInstanceMock) DestroyCalls() []struct { +} { + var calls []struct { + } + mock.lockDestroy.RLock() + calls = mock.calls.Destroy + mock.lockDestroy.RUnlock() + return calls +} + +// GetInfo calls GetInfoFunc. +func (mock *ComputeInstanceMock) GetInfo() (ComputeInstanceInfo, Return) { + if mock.GetInfoFunc == nil { + panic("ComputeInstanceMock.GetInfoFunc: method is nil but ComputeInstance.GetInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetInfo.Lock() + mock.calls.GetInfo = append(mock.calls.GetInfo, callInfo) + mock.lockGetInfo.Unlock() + return mock.GetInfoFunc() +} + +// GetInfoCalls gets all the calls that were made to GetInfo. +// Check the length with: +// len(mockedComputeInstance.GetInfoCalls()) +func (mock *ComputeInstanceMock) GetInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetInfo.RLock() + calls = mock.calls.GetInfo + mock.lockGetInfo.RUnlock() + return calls +} diff --git a/pkg/nvml/consts.go b/pkg/nvml/consts.go new file mode 100644 index 0000000..2ded48b --- /dev/null +++ b/pkg/nvml/consts.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +// Return constants +const ( + SUCCESS = Return(nvml.SUCCESS) + ERROR_UNINITIALIZED = Return(nvml.ERROR_UNINITIALIZED) + ERROR_INVALID_ARGUMENT = Return(nvml.ERROR_INVALID_ARGUMENT) + ERROR_NOT_SUPPORTED = Return(nvml.ERROR_NOT_SUPPORTED) + ERROR_NO_PERMISSION = Return(nvml.ERROR_NO_PERMISSION) + ERROR_ALREADY_INITIALIZED = Return(nvml.ERROR_ALREADY_INITIALIZED) + ERROR_NOT_FOUND = Return(nvml.ERROR_NOT_FOUND) + ERROR_INSUFFICIENT_SIZE = Return(nvml.ERROR_INSUFFICIENT_SIZE) + ERROR_INSUFFICIENT_POWER = Return(nvml.ERROR_INSUFFICIENT_POWER) + ERROR_DRIVER_NOT_LOADED = Return(nvml.ERROR_DRIVER_NOT_LOADED) + ERROR_TIMEOUT = Return(nvml.ERROR_TIMEOUT) + ERROR_IRQ_ISSUE = Return(nvml.ERROR_IRQ_ISSUE) + ERROR_LIBRARY_NOT_FOUND = Return(nvml.ERROR_LIBRARY_NOT_FOUND) + ERROR_FUNCTION_NOT_FOUND = Return(nvml.ERROR_FUNCTION_NOT_FOUND) + ERROR_CORRUPTED_INFOROM = Return(nvml.ERROR_CORRUPTED_INFOROM) + ERROR_GPU_IS_LOST = Return(nvml.ERROR_GPU_IS_LOST) + ERROR_RESET_REQUIRED = Return(nvml.ERROR_RESET_REQUIRED) + ERROR_OPERATING_SYSTEM = Return(nvml.ERROR_OPERATING_SYSTEM) + ERROR_LIB_RM_VERSION_MISMATCH = Return(nvml.ERROR_LIB_RM_VERSION_MISMATCH) + ERROR_IN_USE = Return(nvml.ERROR_IN_USE) + ERROR_MEMORY = Return(nvml.ERROR_MEMORY) + ERROR_NO_DATA = Return(nvml.ERROR_NO_DATA) + ERROR_VGPU_ECC_NOT_SUPPORTED = Return(nvml.ERROR_VGPU_ECC_NOT_SUPPORTED) + ERROR_INSUFFICIENT_RESOURCES = Return(nvml.ERROR_INSUFFICIENT_RESOURCES) + ERROR_UNKNOWN = Return(nvml.ERROR_UNKNOWN) +) + +// MIG Mode constants +const ( + DEVICE_MIG_ENABLE = nvml.DEVICE_MIG_ENABLE + DEVICE_MIG_DISABLE = nvml.DEVICE_MIG_DISABLE +) + +// GPU Instance Profiles +const ( + GPU_INSTANCE_PROFILE_1_SLICE = nvml.GPU_INSTANCE_PROFILE_1_SLICE + GPU_INSTANCE_PROFILE_2_SLICE = nvml.GPU_INSTANCE_PROFILE_2_SLICE + GPU_INSTANCE_PROFILE_3_SLICE = nvml.GPU_INSTANCE_PROFILE_3_SLICE + GPU_INSTANCE_PROFILE_4_SLICE = nvml.GPU_INSTANCE_PROFILE_4_SLICE + GPU_INSTANCE_PROFILE_6_SLICE = nvml.GPU_INSTANCE_PROFILE_6_SLICE + GPU_INSTANCE_PROFILE_7_SLICE = nvml.GPU_INSTANCE_PROFILE_7_SLICE + GPU_INSTANCE_PROFILE_8_SLICE = nvml.GPU_INSTANCE_PROFILE_8_SLICE + GPU_INSTANCE_PROFILE_1_SLICE_REV1 = nvml.GPU_INSTANCE_PROFILE_1_SLICE_REV1 + GPU_INSTANCE_PROFILE_COUNT = nvml.GPU_INSTANCE_PROFILE_COUNT +) + +// Compute Instance Profiles +const ( + COMPUTE_INSTANCE_PROFILE_1_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_1_SLICE + COMPUTE_INSTANCE_PROFILE_2_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_2_SLICE + COMPUTE_INSTANCE_PROFILE_3_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_3_SLICE + COMPUTE_INSTANCE_PROFILE_4_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_4_SLICE + COMPUTE_INSTANCE_PROFILE_6_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_6_SLICE + COMPUTE_INSTANCE_PROFILE_7_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_7_SLICE + COMPUTE_INSTANCE_PROFILE_8_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_8_SLICE + COMPUTE_INSTANCE_PROFILE_COUNT = nvml.COMPUTE_INSTANCE_PROFILE_COUNT +) + +// Compute Instance Engine Profiles +const ( + COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = nvml.COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED + COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = nvml.COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT +) diff --git a/pkg/nvml/device.go b/pkg/nvml/device.go new file mode 100644 index 0000000..9b0caf8 --- /dev/null +++ b/pkg/nvml/device.go @@ -0,0 +1,117 @@ +/** +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package nvml + +import "github.com/NVIDIA/go-nvml/pkg/nvml" + +type nvmlDevice nvml.Device + +var _ Device = (*nvmlDevice)(nil) + +// GetIndex returns the index of a Device +func (d nvmlDevice) GetIndex() (int, Return) { + i, r := nvml.Device(d).GetIndex() + return i, Return(r) +} + +// GetPciInfo returns the PCI info of a Device +func (d nvmlDevice) GetPciInfo() (PciInfo, Return) { + p, r := nvml.Device(d).GetPciInfo() + return PciInfo(p), Return(r) +} + +// GetMemoryInfo returns the memory info of a Device +func (d nvmlDevice) GetMemoryInfo() (Memory, Return) { + p, r := nvml.Device(d).GetMemoryInfo() + return Memory(p), Return(r) +} + +// GetUUID returns the UUID of a Device +func (d nvmlDevice) GetUUID() (string, Return) { + u, r := nvml.Device(d).GetUUID() + return u, Return(r) +} + +// GetMinorNumber returns the minor number of a Device +func (d nvmlDevice) GetMinorNumber() (int, Return) { + m, r := nvml.Device(d).GetMinorNumber() + return m, Return(r) +} + +// IsMigDeviceHandle returns whether a Device is a MIG device or not +func (d nvmlDevice) IsMigDeviceHandle() (bool, Return) { + b, r := nvml.Device(d).IsMigDeviceHandle() + return b, Return(r) +} + +// GetDeviceHandleFromMigDeviceHandle returns the parent Device of a MIG device +func (d nvmlDevice) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + p, r := nvml.Device(d).GetDeviceHandleFromMigDeviceHandle() + return nvmlDevice(p), Return(r) +} + +// SetMigMode sets the MIG mode of a Device +func (d nvmlDevice) SetMigMode(mode int) (Return, Return) { + r1, r2 := nvml.Device(d).SetMigMode(mode) + return Return(r1), Return(r2) +} + +// GetMigMode returns the MIG mode of a Device +func (d nvmlDevice) GetMigMode() (int, int, Return) { + s1, s2, r := nvml.Device(d).GetMigMode() + return s1, s2, Return(r) +} + +// GetGpuInstanceProfileInfo returns the profile info of a GPU Instance +func (d nvmlDevice) GetGpuInstanceProfileInfo(profile int) (GpuInstanceProfileInfo, Return) { + p, r := nvml.Device(d).GetGpuInstanceProfileInfo(profile) + return GpuInstanceProfileInfo(p), Return(r) +} + +// GetGpuInstances returns the set of GPU Instances associated with a Device +func (d nvmlDevice) GetGpuInstances(info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + nvmlGis, r := nvml.Device(d).GetGpuInstances((*nvml.GpuInstanceProfileInfo)(info)) + var gis []GpuInstance + for _, gi := range nvmlGis { + gis = append(gis, nvmlGpuInstance(gi)) + } + return gis, Return(r) +} + +// GetMaxMigDeviceCount returns the maximum number of MIG devices that can be created on a Device +func (d nvmlDevice) GetMaxMigDeviceCount() (int, Return) { + m, r := nvml.Device(d).GetMaxMigDeviceCount() + return m, Return(r) +} + +// GetMigDeviceHandleByIndex returns the handle to a MIG device given its index +func (d nvmlDevice) GetMigDeviceHandleByIndex(Index int) (Device, Return) { + h, r := nvml.Device(d).GetMigDeviceHandleByIndex(Index) + return nvmlDevice(h), Return(r) +} + +// GetGpuInstanceId returns the GPU Instance ID of a MIG device +func (d nvmlDevice) GetGpuInstanceId() (int, Return) { + gi, r := nvml.Device(d).GetGpuInstanceId() + return gi, Return(r) +} + +// GetComputeInstanceId returns the Compute Instance ID of a MIG device +func (d nvmlDevice) GetComputeInstanceId() (int, Return) { + ci, r := nvml.Device(d).GetComputeInstanceId() + return ci, Return(r) +} diff --git a/pkg/nvml/device_mock.go b/pkg/nvml/device_mock.go new file mode 100644 index 0000000..a25898e --- /dev/null +++ b/pkg/nvml/device_mock.go @@ -0,0 +1,598 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package nvml + +import ( + "sync" +) + +// Ensure, that DeviceMock does implement Device. +// If this is not the case, regenerate this file with moq. +var _ Device = &DeviceMock{} + +// DeviceMock is a mock implementation of Device. +// +// func TestSomethingThatUsesDevice(t *testing.T) { +// +// // make and configure a mocked Device +// mockedDevice := &DeviceMock{ +// GetComputeInstanceIdFunc: func() (int, Return) { +// panic("mock out the GetComputeInstanceId method") +// }, +// GetDeviceHandleFromMigDeviceHandleFunc: func() (Device, Return) { +// panic("mock out the GetDeviceHandleFromMigDeviceHandle method") +// }, +// GetGpuInstanceIdFunc: func() (int, Return) { +// panic("mock out the GetGpuInstanceId method") +// }, +// GetGpuInstanceProfileInfoFunc: func(Profile int) (GpuInstanceProfileInfo, Return) { +// panic("mock out the GetGpuInstanceProfileInfo method") +// }, +// GetGpuInstancesFunc: func(Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { +// panic("mock out the GetGpuInstances method") +// }, +// GetIndexFunc: func() (int, Return) { +// panic("mock out the GetIndex method") +// }, +// GetMaxMigDeviceCountFunc: func() (int, Return) { +// panic("mock out the GetMaxMigDeviceCount method") +// }, +// GetMemoryInfoFunc: func() (Memory, Return) { +// panic("mock out the GetMemoryInfo method") +// }, +// GetMigDeviceHandleByIndexFunc: func(Index int) (Device, Return) { +// panic("mock out the GetMigDeviceHandleByIndex method") +// }, +// GetMigModeFunc: func() (int, int, Return) { +// panic("mock out the GetMigMode method") +// }, +// GetMinorNumberFunc: func() (int, Return) { +// panic("mock out the GetMinorNumber method") +// }, +// GetPciInfoFunc: func() (PciInfo, Return) { +// panic("mock out the GetPciInfo method") +// }, +// GetUUIDFunc: func() (string, Return) { +// panic("mock out the GetUUID method") +// }, +// IsMigDeviceHandleFunc: func() (bool, Return) { +// panic("mock out the IsMigDeviceHandle method") +// }, +// SetMigModeFunc: func(Mode int) (Return, Return) { +// panic("mock out the SetMigMode method") +// }, +// } +// +// // use mockedDevice in code that requires Device +// // and then make assertions. +// +// } +type DeviceMock struct { + // GetComputeInstanceIdFunc mocks the GetComputeInstanceId method. + GetComputeInstanceIdFunc func() (int, Return) + + // GetDeviceHandleFromMigDeviceHandleFunc mocks the GetDeviceHandleFromMigDeviceHandle method. + GetDeviceHandleFromMigDeviceHandleFunc func() (Device, Return) + + // GetGpuInstanceIdFunc mocks the GetGpuInstanceId method. + GetGpuInstanceIdFunc func() (int, Return) + + // GetGpuInstanceProfileInfoFunc mocks the GetGpuInstanceProfileInfo method. + GetGpuInstanceProfileInfoFunc func(Profile int) (GpuInstanceProfileInfo, Return) + + // GetGpuInstancesFunc mocks the GetGpuInstances method. + GetGpuInstancesFunc func(Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) + + // GetIndexFunc mocks the GetIndex method. + GetIndexFunc func() (int, Return) + + // GetMaxMigDeviceCountFunc mocks the GetMaxMigDeviceCount method. + GetMaxMigDeviceCountFunc func() (int, Return) + + // GetMemoryInfoFunc mocks the GetMemoryInfo method. + GetMemoryInfoFunc func() (Memory, Return) + + // GetMigDeviceHandleByIndexFunc mocks the GetMigDeviceHandleByIndex method. + GetMigDeviceHandleByIndexFunc func(Index int) (Device, Return) + + // GetMigModeFunc mocks the GetMigMode method. + GetMigModeFunc func() (int, int, Return) + + // GetMinorNumberFunc mocks the GetMinorNumber method. + GetMinorNumberFunc func() (int, Return) + + // GetPciInfoFunc mocks the GetPciInfo method. + GetPciInfoFunc func() (PciInfo, Return) + + // GetUUIDFunc mocks the GetUUID method. + GetUUIDFunc func() (string, Return) + + // IsMigDeviceHandleFunc mocks the IsMigDeviceHandle method. + IsMigDeviceHandleFunc func() (bool, Return) + + // SetMigModeFunc mocks the SetMigMode method. + SetMigModeFunc func(Mode int) (Return, Return) + + // calls tracks calls to the methods. + calls struct { + // GetComputeInstanceId holds details about calls to the GetComputeInstanceId method. + GetComputeInstanceId []struct { + } + // GetDeviceHandleFromMigDeviceHandle holds details about calls to the GetDeviceHandleFromMigDeviceHandle method. + GetDeviceHandleFromMigDeviceHandle []struct { + } + // GetGpuInstanceId holds details about calls to the GetGpuInstanceId method. + GetGpuInstanceId []struct { + } + // GetGpuInstanceProfileInfo holds details about calls to the GetGpuInstanceProfileInfo method. + GetGpuInstanceProfileInfo []struct { + // Profile is the Profile argument value. + Profile int + } + // GetGpuInstances holds details about calls to the GetGpuInstances method. + GetGpuInstances []struct { + // Info is the Info argument value. + Info *GpuInstanceProfileInfo + } + // GetIndex holds details about calls to the GetIndex method. + GetIndex []struct { + } + // GetMaxMigDeviceCount holds details about calls to the GetMaxMigDeviceCount method. + GetMaxMigDeviceCount []struct { + } + // GetMemoryInfo holds details about calls to the GetMemoryInfo method. + GetMemoryInfo []struct { + } + // GetMigDeviceHandleByIndex holds details about calls to the GetMigDeviceHandleByIndex method. + GetMigDeviceHandleByIndex []struct { + // Index is the Index argument value. + Index int + } + // GetMigMode holds details about calls to the GetMigMode method. + GetMigMode []struct { + } + // GetMinorNumber holds details about calls to the GetMinorNumber method. + GetMinorNumber []struct { + } + // GetPciInfo holds details about calls to the GetPciInfo method. + GetPciInfo []struct { + } + // GetUUID holds details about calls to the GetUUID method. + GetUUID []struct { + } + // IsMigDeviceHandle holds details about calls to the IsMigDeviceHandle method. + IsMigDeviceHandle []struct { + } + // SetMigMode holds details about calls to the SetMigMode method. + SetMigMode []struct { + // Mode is the Mode argument value. + Mode int + } + } + lockGetComputeInstanceId sync.RWMutex + lockGetDeviceHandleFromMigDeviceHandle sync.RWMutex + lockGetGpuInstanceId sync.RWMutex + lockGetGpuInstanceProfileInfo sync.RWMutex + lockGetGpuInstances sync.RWMutex + lockGetIndex sync.RWMutex + lockGetMaxMigDeviceCount sync.RWMutex + lockGetMemoryInfo sync.RWMutex + lockGetMigDeviceHandleByIndex sync.RWMutex + lockGetMigMode sync.RWMutex + lockGetMinorNumber sync.RWMutex + lockGetPciInfo sync.RWMutex + lockGetUUID sync.RWMutex + lockIsMigDeviceHandle sync.RWMutex + lockSetMigMode sync.RWMutex +} + +// GetComputeInstanceId calls GetComputeInstanceIdFunc. +func (mock *DeviceMock) GetComputeInstanceId() (int, Return) { + if mock.GetComputeInstanceIdFunc == nil { + panic("DeviceMock.GetComputeInstanceIdFunc: method is nil but Device.GetComputeInstanceId was just called") + } + callInfo := struct { + }{} + mock.lockGetComputeInstanceId.Lock() + mock.calls.GetComputeInstanceId = append(mock.calls.GetComputeInstanceId, callInfo) + mock.lockGetComputeInstanceId.Unlock() + return mock.GetComputeInstanceIdFunc() +} + +// GetComputeInstanceIdCalls gets all the calls that were made to GetComputeInstanceId. +// Check the length with: +// len(mockedDevice.GetComputeInstanceIdCalls()) +func (mock *DeviceMock) GetComputeInstanceIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetComputeInstanceId.RLock() + calls = mock.calls.GetComputeInstanceId + mock.lockGetComputeInstanceId.RUnlock() + return calls +} + +// GetDeviceHandleFromMigDeviceHandle calls GetDeviceHandleFromMigDeviceHandleFunc. +func (mock *DeviceMock) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + if mock.GetDeviceHandleFromMigDeviceHandleFunc == nil { + panic("DeviceMock.GetDeviceHandleFromMigDeviceHandleFunc: method is nil but Device.GetDeviceHandleFromMigDeviceHandle was just called") + } + callInfo := struct { + }{} + mock.lockGetDeviceHandleFromMigDeviceHandle.Lock() + mock.calls.GetDeviceHandleFromMigDeviceHandle = append(mock.calls.GetDeviceHandleFromMigDeviceHandle, callInfo) + mock.lockGetDeviceHandleFromMigDeviceHandle.Unlock() + return mock.GetDeviceHandleFromMigDeviceHandleFunc() +} + +// GetDeviceHandleFromMigDeviceHandleCalls gets all the calls that were made to GetDeviceHandleFromMigDeviceHandle. +// Check the length with: +// len(mockedDevice.GetDeviceHandleFromMigDeviceHandleCalls()) +func (mock *DeviceMock) GetDeviceHandleFromMigDeviceHandleCalls() []struct { +} { + var calls []struct { + } + mock.lockGetDeviceHandleFromMigDeviceHandle.RLock() + calls = mock.calls.GetDeviceHandleFromMigDeviceHandle + mock.lockGetDeviceHandleFromMigDeviceHandle.RUnlock() + return calls +} + +// GetGpuInstanceId calls GetGpuInstanceIdFunc. +func (mock *DeviceMock) GetGpuInstanceId() (int, Return) { + if mock.GetGpuInstanceIdFunc == nil { + panic("DeviceMock.GetGpuInstanceIdFunc: method is nil but Device.GetGpuInstanceId was just called") + } + callInfo := struct { + }{} + mock.lockGetGpuInstanceId.Lock() + mock.calls.GetGpuInstanceId = append(mock.calls.GetGpuInstanceId, callInfo) + mock.lockGetGpuInstanceId.Unlock() + return mock.GetGpuInstanceIdFunc() +} + +// GetGpuInstanceIdCalls gets all the calls that were made to GetGpuInstanceId. +// Check the length with: +// len(mockedDevice.GetGpuInstanceIdCalls()) +func (mock *DeviceMock) GetGpuInstanceIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetGpuInstanceId.RLock() + calls = mock.calls.GetGpuInstanceId + mock.lockGetGpuInstanceId.RUnlock() + return calls +} + +// GetGpuInstanceProfileInfo calls GetGpuInstanceProfileInfoFunc. +func (mock *DeviceMock) GetGpuInstanceProfileInfo(Profile int) (GpuInstanceProfileInfo, Return) { + if mock.GetGpuInstanceProfileInfoFunc == nil { + panic("DeviceMock.GetGpuInstanceProfileInfoFunc: method is nil but Device.GetGpuInstanceProfileInfo was just called") + } + callInfo := struct { + Profile int + }{ + Profile: Profile, + } + mock.lockGetGpuInstanceProfileInfo.Lock() + mock.calls.GetGpuInstanceProfileInfo = append(mock.calls.GetGpuInstanceProfileInfo, callInfo) + mock.lockGetGpuInstanceProfileInfo.Unlock() + return mock.GetGpuInstanceProfileInfoFunc(Profile) +} + +// GetGpuInstanceProfileInfoCalls gets all the calls that were made to GetGpuInstanceProfileInfo. +// Check the length with: +// len(mockedDevice.GetGpuInstanceProfileInfoCalls()) +func (mock *DeviceMock) GetGpuInstanceProfileInfoCalls() []struct { + Profile int +} { + var calls []struct { + Profile int + } + mock.lockGetGpuInstanceProfileInfo.RLock() + calls = mock.calls.GetGpuInstanceProfileInfo + mock.lockGetGpuInstanceProfileInfo.RUnlock() + return calls +} + +// GetGpuInstances calls GetGpuInstancesFunc. +func (mock *DeviceMock) GetGpuInstances(Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + if mock.GetGpuInstancesFunc == nil { + panic("DeviceMock.GetGpuInstancesFunc: method is nil but Device.GetGpuInstances was just called") + } + callInfo := struct { + Info *GpuInstanceProfileInfo + }{ + Info: Info, + } + mock.lockGetGpuInstances.Lock() + mock.calls.GetGpuInstances = append(mock.calls.GetGpuInstances, callInfo) + mock.lockGetGpuInstances.Unlock() + return mock.GetGpuInstancesFunc(Info) +} + +// GetGpuInstancesCalls gets all the calls that were made to GetGpuInstances. +// Check the length with: +// len(mockedDevice.GetGpuInstancesCalls()) +func (mock *DeviceMock) GetGpuInstancesCalls() []struct { + Info *GpuInstanceProfileInfo +} { + var calls []struct { + Info *GpuInstanceProfileInfo + } + mock.lockGetGpuInstances.RLock() + calls = mock.calls.GetGpuInstances + mock.lockGetGpuInstances.RUnlock() + return calls +} + +// GetIndex calls GetIndexFunc. +func (mock *DeviceMock) GetIndex() (int, Return) { + if mock.GetIndexFunc == nil { + panic("DeviceMock.GetIndexFunc: method is nil but Device.GetIndex was just called") + } + callInfo := struct { + }{} + mock.lockGetIndex.Lock() + mock.calls.GetIndex = append(mock.calls.GetIndex, callInfo) + mock.lockGetIndex.Unlock() + return mock.GetIndexFunc() +} + +// GetIndexCalls gets all the calls that were made to GetIndex. +// Check the length with: +// len(mockedDevice.GetIndexCalls()) +func (mock *DeviceMock) GetIndexCalls() []struct { +} { + var calls []struct { + } + mock.lockGetIndex.RLock() + calls = mock.calls.GetIndex + mock.lockGetIndex.RUnlock() + return calls +} + +// GetMaxMigDeviceCount calls GetMaxMigDeviceCountFunc. +func (mock *DeviceMock) GetMaxMigDeviceCount() (int, Return) { + if mock.GetMaxMigDeviceCountFunc == nil { + panic("DeviceMock.GetMaxMigDeviceCountFunc: method is nil but Device.GetMaxMigDeviceCount was just called") + } + callInfo := struct { + }{} + mock.lockGetMaxMigDeviceCount.Lock() + mock.calls.GetMaxMigDeviceCount = append(mock.calls.GetMaxMigDeviceCount, callInfo) + mock.lockGetMaxMigDeviceCount.Unlock() + return mock.GetMaxMigDeviceCountFunc() +} + +// GetMaxMigDeviceCountCalls gets all the calls that were made to GetMaxMigDeviceCount. +// Check the length with: +// len(mockedDevice.GetMaxMigDeviceCountCalls()) +func (mock *DeviceMock) GetMaxMigDeviceCountCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMaxMigDeviceCount.RLock() + calls = mock.calls.GetMaxMigDeviceCount + mock.lockGetMaxMigDeviceCount.RUnlock() + return calls +} + +// GetMemoryInfo calls GetMemoryInfoFunc. +func (mock *DeviceMock) GetMemoryInfo() (Memory, Return) { + if mock.GetMemoryInfoFunc == nil { + panic("DeviceMock.GetMemoryInfoFunc: method is nil but Device.GetMemoryInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetMemoryInfo.Lock() + mock.calls.GetMemoryInfo = append(mock.calls.GetMemoryInfo, callInfo) + mock.lockGetMemoryInfo.Unlock() + return mock.GetMemoryInfoFunc() +} + +// GetMemoryInfoCalls gets all the calls that were made to GetMemoryInfo. +// Check the length with: +// len(mockedDevice.GetMemoryInfoCalls()) +func (mock *DeviceMock) GetMemoryInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMemoryInfo.RLock() + calls = mock.calls.GetMemoryInfo + mock.lockGetMemoryInfo.RUnlock() + return calls +} + +// GetMigDeviceHandleByIndex calls GetMigDeviceHandleByIndexFunc. +func (mock *DeviceMock) GetMigDeviceHandleByIndex(Index int) (Device, Return) { + if mock.GetMigDeviceHandleByIndexFunc == nil { + panic("DeviceMock.GetMigDeviceHandleByIndexFunc: method is nil but Device.GetMigDeviceHandleByIndex was just called") + } + callInfo := struct { + Index int + }{ + Index: Index, + } + mock.lockGetMigDeviceHandleByIndex.Lock() + mock.calls.GetMigDeviceHandleByIndex = append(mock.calls.GetMigDeviceHandleByIndex, callInfo) + mock.lockGetMigDeviceHandleByIndex.Unlock() + return mock.GetMigDeviceHandleByIndexFunc(Index) +} + +// GetMigDeviceHandleByIndexCalls gets all the calls that were made to GetMigDeviceHandleByIndex. +// Check the length with: +// len(mockedDevice.GetMigDeviceHandleByIndexCalls()) +func (mock *DeviceMock) GetMigDeviceHandleByIndexCalls() []struct { + Index int +} { + var calls []struct { + Index int + } + mock.lockGetMigDeviceHandleByIndex.RLock() + calls = mock.calls.GetMigDeviceHandleByIndex + mock.lockGetMigDeviceHandleByIndex.RUnlock() + return calls +} + +// GetMigMode calls GetMigModeFunc. +func (mock *DeviceMock) GetMigMode() (int, int, Return) { + if mock.GetMigModeFunc == nil { + panic("DeviceMock.GetMigModeFunc: method is nil but Device.GetMigMode was just called") + } + callInfo := struct { + }{} + mock.lockGetMigMode.Lock() + mock.calls.GetMigMode = append(mock.calls.GetMigMode, callInfo) + mock.lockGetMigMode.Unlock() + return mock.GetMigModeFunc() +} + +// GetMigModeCalls gets all the calls that were made to GetMigMode. +// Check the length with: +// len(mockedDevice.GetMigModeCalls()) +func (mock *DeviceMock) GetMigModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMigMode.RLock() + calls = mock.calls.GetMigMode + mock.lockGetMigMode.RUnlock() + return calls +} + +// GetMinorNumber calls GetMinorNumberFunc. +func (mock *DeviceMock) GetMinorNumber() (int, Return) { + if mock.GetMinorNumberFunc == nil { + panic("DeviceMock.GetMinorNumberFunc: method is nil but Device.GetMinorNumber was just called") + } + callInfo := struct { + }{} + mock.lockGetMinorNumber.Lock() + mock.calls.GetMinorNumber = append(mock.calls.GetMinorNumber, callInfo) + mock.lockGetMinorNumber.Unlock() + return mock.GetMinorNumberFunc() +} + +// GetMinorNumberCalls gets all the calls that were made to GetMinorNumber. +// Check the length with: +// len(mockedDevice.GetMinorNumberCalls()) +func (mock *DeviceMock) GetMinorNumberCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMinorNumber.RLock() + calls = mock.calls.GetMinorNumber + mock.lockGetMinorNumber.RUnlock() + return calls +} + +// GetPciInfo calls GetPciInfoFunc. +func (mock *DeviceMock) GetPciInfo() (PciInfo, Return) { + if mock.GetPciInfoFunc == nil { + panic("DeviceMock.GetPciInfoFunc: method is nil but Device.GetPciInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetPciInfo.Lock() + mock.calls.GetPciInfo = append(mock.calls.GetPciInfo, callInfo) + mock.lockGetPciInfo.Unlock() + return mock.GetPciInfoFunc() +} + +// GetPciInfoCalls gets all the calls that were made to GetPciInfo. +// Check the length with: +// len(mockedDevice.GetPciInfoCalls()) +func (mock *DeviceMock) GetPciInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetPciInfo.RLock() + calls = mock.calls.GetPciInfo + mock.lockGetPciInfo.RUnlock() + return calls +} + +// GetUUID calls GetUUIDFunc. +func (mock *DeviceMock) GetUUID() (string, Return) { + if mock.GetUUIDFunc == nil { + panic("DeviceMock.GetUUIDFunc: method is nil but Device.GetUUID was just called") + } + callInfo := struct { + }{} + mock.lockGetUUID.Lock() + mock.calls.GetUUID = append(mock.calls.GetUUID, callInfo) + mock.lockGetUUID.Unlock() + return mock.GetUUIDFunc() +} + +// GetUUIDCalls gets all the calls that were made to GetUUID. +// Check the length with: +// len(mockedDevice.GetUUIDCalls()) +func (mock *DeviceMock) GetUUIDCalls() []struct { +} { + var calls []struct { + } + mock.lockGetUUID.RLock() + calls = mock.calls.GetUUID + mock.lockGetUUID.RUnlock() + return calls +} + +// IsMigDeviceHandle calls IsMigDeviceHandleFunc. +func (mock *DeviceMock) IsMigDeviceHandle() (bool, Return) { + if mock.IsMigDeviceHandleFunc == nil { + panic("DeviceMock.IsMigDeviceHandleFunc: method is nil but Device.IsMigDeviceHandle was just called") + } + callInfo := struct { + }{} + mock.lockIsMigDeviceHandle.Lock() + mock.calls.IsMigDeviceHandle = append(mock.calls.IsMigDeviceHandle, callInfo) + mock.lockIsMigDeviceHandle.Unlock() + return mock.IsMigDeviceHandleFunc() +} + +// IsMigDeviceHandleCalls gets all the calls that were made to IsMigDeviceHandle. +// Check the length with: +// len(mockedDevice.IsMigDeviceHandleCalls()) +func (mock *DeviceMock) IsMigDeviceHandleCalls() []struct { +} { + var calls []struct { + } + mock.lockIsMigDeviceHandle.RLock() + calls = mock.calls.IsMigDeviceHandle + mock.lockIsMigDeviceHandle.RUnlock() + return calls +} + +// SetMigMode calls SetMigModeFunc. +func (mock *DeviceMock) SetMigMode(Mode int) (Return, Return) { + if mock.SetMigModeFunc == nil { + panic("DeviceMock.SetMigModeFunc: method is nil but Device.SetMigMode was just called") + } + callInfo := struct { + Mode int + }{ + Mode: Mode, + } + mock.lockSetMigMode.Lock() + mock.calls.SetMigMode = append(mock.calls.SetMigMode, callInfo) + mock.lockSetMigMode.Unlock() + return mock.SetMigModeFunc(Mode) +} + +// SetMigModeCalls gets all the calls that were made to SetMigMode. +// Check the length with: +// len(mockedDevice.SetMigModeCalls()) +func (mock *DeviceMock) SetMigModeCalls() []struct { + Mode int +} { + var calls []struct { + Mode int + } + mock.lockSetMigMode.RLock() + calls = mock.calls.SetMigMode + mock.lockSetMigMode.RUnlock() + return calls +} diff --git a/pkg/nvml/gi.go b/pkg/nvml/gi.go new file mode 100644 index 0000000..cd775bc --- /dev/null +++ b/pkg/nvml/gi.go @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +type nvmlGpuInstance nvml.GpuInstance + +var _ GpuInstance = (*nvmlGpuInstance)(nil) + +// GetInfo returns info about a GPU Intsance +func (gi nvmlGpuInstance) GetInfo() (GpuInstanceInfo, Return) { + i, r := nvml.GpuInstance(gi).GetInfo() + info := GpuInstanceInfo{ + Device: nvmlDevice(i.Device), + Id: i.Id, + ProfileId: i.ProfileId, + Placement: GpuInstancePlacement(i.Placement), + } + return info, Return(r) +} + +// GetComputeInstanceProfileInfo returns info about a given Compute Instance profile +func (gi nvmlGpuInstance) GetComputeInstanceProfileInfo(profile int, engProfile int) (ComputeInstanceProfileInfo, Return) { + p, r := nvml.GpuInstance(gi).GetComputeInstanceProfileInfo(profile, engProfile) + return ComputeInstanceProfileInfo(p), Return(r) +} + +// CreateComputeInstance creates a Compute Instance within the GPU Instance +func (gi nvmlGpuInstance) CreateComputeInstance(info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + ci, r := nvml.GpuInstance(gi).CreateComputeInstance((*nvml.ComputeInstanceProfileInfo)(info)) + return nvmlComputeInstance(ci), Return(r) +} + +// GetComputeInstances returns the set of Compute Instances associated with a GPU Instance +func (gi nvmlGpuInstance) GetComputeInstances(info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + nvmlCis, r := nvml.GpuInstance(gi).GetComputeInstances((*nvml.ComputeInstanceProfileInfo)(info)) + var cis []ComputeInstance + for _, ci := range nvmlCis { + cis = append(cis, nvmlComputeInstance(ci)) + } + return cis, Return(r) +} + +// Destroy destroys a GPU Instance +func (gi nvmlGpuInstance) Destroy() Return { + r := nvml.GpuInstance(gi).Destroy() + return Return(r) +} diff --git a/pkg/nvml/gi_mock.go b/pkg/nvml/gi_mock.go new file mode 100644 index 0000000..7393a04 --- /dev/null +++ b/pkg/nvml/gi_mock.go @@ -0,0 +1,237 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package nvml + +import ( + "sync" +) + +// Ensure, that GpuInstanceMock does implement GpuInstance. +// If this is not the case, regenerate this file with moq. +var _ GpuInstance = &GpuInstanceMock{} + +// GpuInstanceMock is a mock implementation of GpuInstance. +// +// func TestSomethingThatUsesGpuInstance(t *testing.T) { +// +// // make and configure a mocked GpuInstance +// mockedGpuInstance := &GpuInstanceMock{ +// CreateComputeInstanceFunc: func(Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { +// panic("mock out the CreateComputeInstance method") +// }, +// DestroyFunc: func() Return { +// panic("mock out the Destroy method") +// }, +// GetComputeInstanceProfileInfoFunc: func(Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) { +// panic("mock out the GetComputeInstanceProfileInfo method") +// }, +// GetComputeInstancesFunc: func(Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { +// panic("mock out the GetComputeInstances method") +// }, +// GetInfoFunc: func() (GpuInstanceInfo, Return) { +// panic("mock out the GetInfo method") +// }, +// } +// +// // use mockedGpuInstance in code that requires GpuInstance +// // and then make assertions. +// +// } +type GpuInstanceMock struct { + // CreateComputeInstanceFunc mocks the CreateComputeInstance method. + CreateComputeInstanceFunc func(Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) + + // DestroyFunc mocks the Destroy method. + DestroyFunc func() Return + + // GetComputeInstanceProfileInfoFunc mocks the GetComputeInstanceProfileInfo method. + GetComputeInstanceProfileInfoFunc func(Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) + + // GetComputeInstancesFunc mocks the GetComputeInstances method. + GetComputeInstancesFunc func(Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) + + // GetInfoFunc mocks the GetInfo method. + GetInfoFunc func() (GpuInstanceInfo, Return) + + // calls tracks calls to the methods. + calls struct { + // CreateComputeInstance holds details about calls to the CreateComputeInstance method. + CreateComputeInstance []struct { + // Info is the Info argument value. + Info *ComputeInstanceProfileInfo + } + // Destroy holds details about calls to the Destroy method. + Destroy []struct { + } + // GetComputeInstanceProfileInfo holds details about calls to the GetComputeInstanceProfileInfo method. + GetComputeInstanceProfileInfo []struct { + // Profile is the Profile argument value. + Profile int + // EngProfile is the EngProfile argument value. + EngProfile int + } + // GetComputeInstances holds details about calls to the GetComputeInstances method. + GetComputeInstances []struct { + // Info is the Info argument value. + Info *ComputeInstanceProfileInfo + } + // GetInfo holds details about calls to the GetInfo method. + GetInfo []struct { + } + } + lockCreateComputeInstance sync.RWMutex + lockDestroy sync.RWMutex + lockGetComputeInstanceProfileInfo sync.RWMutex + lockGetComputeInstances sync.RWMutex + lockGetInfo sync.RWMutex +} + +// CreateComputeInstance calls CreateComputeInstanceFunc. +func (mock *GpuInstanceMock) CreateComputeInstance(Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + if mock.CreateComputeInstanceFunc == nil { + panic("GpuInstanceMock.CreateComputeInstanceFunc: method is nil but GpuInstance.CreateComputeInstance was just called") + } + callInfo := struct { + Info *ComputeInstanceProfileInfo + }{ + Info: Info, + } + mock.lockCreateComputeInstance.Lock() + mock.calls.CreateComputeInstance = append(mock.calls.CreateComputeInstance, callInfo) + mock.lockCreateComputeInstance.Unlock() + return mock.CreateComputeInstanceFunc(Info) +} + +// CreateComputeInstanceCalls gets all the calls that were made to CreateComputeInstance. +// Check the length with: +// len(mockedGpuInstance.CreateComputeInstanceCalls()) +func (mock *GpuInstanceMock) CreateComputeInstanceCalls() []struct { + Info *ComputeInstanceProfileInfo +} { + var calls []struct { + Info *ComputeInstanceProfileInfo + } + mock.lockCreateComputeInstance.RLock() + calls = mock.calls.CreateComputeInstance + mock.lockCreateComputeInstance.RUnlock() + return calls +} + +// Destroy calls DestroyFunc. +func (mock *GpuInstanceMock) Destroy() Return { + if mock.DestroyFunc == nil { + panic("GpuInstanceMock.DestroyFunc: method is nil but GpuInstance.Destroy was just called") + } + callInfo := struct { + }{} + mock.lockDestroy.Lock() + mock.calls.Destroy = append(mock.calls.Destroy, callInfo) + mock.lockDestroy.Unlock() + return mock.DestroyFunc() +} + +// DestroyCalls gets all the calls that were made to Destroy. +// Check the length with: +// len(mockedGpuInstance.DestroyCalls()) +func (mock *GpuInstanceMock) DestroyCalls() []struct { +} { + var calls []struct { + } + mock.lockDestroy.RLock() + calls = mock.calls.Destroy + mock.lockDestroy.RUnlock() + return calls +} + +// GetComputeInstanceProfileInfo calls GetComputeInstanceProfileInfoFunc. +func (mock *GpuInstanceMock) GetComputeInstanceProfileInfo(Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) { + if mock.GetComputeInstanceProfileInfoFunc == nil { + panic("GpuInstanceMock.GetComputeInstanceProfileInfoFunc: method is nil but GpuInstance.GetComputeInstanceProfileInfo was just called") + } + callInfo := struct { + Profile int + EngProfile int + }{ + Profile: Profile, + EngProfile: EngProfile, + } + mock.lockGetComputeInstanceProfileInfo.Lock() + mock.calls.GetComputeInstanceProfileInfo = append(mock.calls.GetComputeInstanceProfileInfo, callInfo) + mock.lockGetComputeInstanceProfileInfo.Unlock() + return mock.GetComputeInstanceProfileInfoFunc(Profile, EngProfile) +} + +// GetComputeInstanceProfileInfoCalls gets all the calls that were made to GetComputeInstanceProfileInfo. +// Check the length with: +// len(mockedGpuInstance.GetComputeInstanceProfileInfoCalls()) +func (mock *GpuInstanceMock) GetComputeInstanceProfileInfoCalls() []struct { + Profile int + EngProfile int +} { + var calls []struct { + Profile int + EngProfile int + } + mock.lockGetComputeInstanceProfileInfo.RLock() + calls = mock.calls.GetComputeInstanceProfileInfo + mock.lockGetComputeInstanceProfileInfo.RUnlock() + return calls +} + +// GetComputeInstances calls GetComputeInstancesFunc. +func (mock *GpuInstanceMock) GetComputeInstances(Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + if mock.GetComputeInstancesFunc == nil { + panic("GpuInstanceMock.GetComputeInstancesFunc: method is nil but GpuInstance.GetComputeInstances was just called") + } + callInfo := struct { + Info *ComputeInstanceProfileInfo + }{ + Info: Info, + } + mock.lockGetComputeInstances.Lock() + mock.calls.GetComputeInstances = append(mock.calls.GetComputeInstances, callInfo) + mock.lockGetComputeInstances.Unlock() + return mock.GetComputeInstancesFunc(Info) +} + +// GetComputeInstancesCalls gets all the calls that were made to GetComputeInstances. +// Check the length with: +// len(mockedGpuInstance.GetComputeInstancesCalls()) +func (mock *GpuInstanceMock) GetComputeInstancesCalls() []struct { + Info *ComputeInstanceProfileInfo +} { + var calls []struct { + Info *ComputeInstanceProfileInfo + } + mock.lockGetComputeInstances.RLock() + calls = mock.calls.GetComputeInstances + mock.lockGetComputeInstances.RUnlock() + return calls +} + +// GetInfo calls GetInfoFunc. +func (mock *GpuInstanceMock) GetInfo() (GpuInstanceInfo, Return) { + if mock.GetInfoFunc == nil { + panic("GpuInstanceMock.GetInfoFunc: method is nil but GpuInstance.GetInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetInfo.Lock() + mock.calls.GetInfo = append(mock.calls.GetInfo, callInfo) + mock.lockGetInfo.Unlock() + return mock.GetInfoFunc() +} + +// GetInfoCalls gets all the calls that were made to GetInfo. +// Check the length with: +// len(mockedGpuInstance.GetInfoCalls()) +func (mock *GpuInstanceMock) GetInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetInfo.RLock() + calls = mock.calls.GetInfo + mock.lockGetInfo.RUnlock() + return calls +} diff --git a/pkg/nvml/nvml.go b/pkg/nvml/nvml.go new file mode 100644 index 0000000..67c1b23 --- /dev/null +++ b/pkg/nvml/nvml.go @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "sync" + + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +type nvmlLib struct { + sync.Mutex + refcount int +} + +var _ Interface = (*nvmlLib)(nil) + +// New creates a new instance of the NVML Interface +func New() Interface { + return &nvmlLib{} +} + +// Init initializes an NVML Interface +func (n *nvmlLib) Init() Return { + ret := nvml.Init() + if ret != nvml.SUCCESS { + return Return(ret) + } + + n.Lock() + defer n.Unlock() + if n.refcount == 0 { + errorStringFunc = nvml.ErrorString + } + n.refcount += 1 + + return SUCCESS +} + +// Shutdown shuts down an NVML Interface +func (n *nvmlLib) Shutdown() Return { + ret := nvml.Shutdown() + if ret != nvml.SUCCESS { + return Return(ret) + } + + n.Lock() + defer n.Unlock() + n.refcount -= 1 + if n.refcount == 0 { + errorStringFunc = defaultErrorStringFunc + } + + return SUCCESS +} + +// DeviceGetCount returns the total number of GPU Devices +func (n *nvmlLib) DeviceGetCount() (int, Return) { + c, r := nvml.DeviceGetCount() + return c, Return(r) +} + +// DeviceGetHandleByIndex returns a Device handle given its index +func (n *nvmlLib) DeviceGetHandleByIndex(index int) (Device, Return) { + d, r := nvml.DeviceGetHandleByIndex(index) + return nvmlDevice(d), Return(r) +} + +// DeviceGetHandleByUUID returns a Device handle given its UUID +func (n *nvmlLib) DeviceGetHandleByUUID(uuid string) (Device, Return) { + d, r := nvml.DeviceGetHandleByUUID(uuid) + return nvmlDevice(d), Return(r) +} + +// SystemGetDriverVersion returns the version of the installed NVIDIA driver +func (n *nvmlLib) SystemGetDriverVersion() (string, Return) { + v, r := nvml.SystemGetDriverVersion() + return v, Return(r) +} + +// ErrorString returns the error string associated with a given return value +func (n *nvmlLib) ErrorString(ret Return) string { + return nvml.ErrorString(nvml.Return(ret)) +} diff --git a/pkg/nvml/nvml_mock.go b/pkg/nvml/nvml_mock.go new file mode 100644 index 0000000..0efaf1d --- /dev/null +++ b/pkg/nvml/nvml_mock.go @@ -0,0 +1,303 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package nvml + +import ( + "sync" +) + +// Ensure, that InterfaceMock does implement Interface. +// If this is not the case, regenerate this file with moq. +var _ Interface = &InterfaceMock{} + +// InterfaceMock is a mock implementation of Interface. +// +// func TestSomethingThatUsesInterface(t *testing.T) { +// +// // make and configure a mocked Interface +// mockedInterface := &InterfaceMock{ +// DeviceGetCountFunc: func() (int, Return) { +// panic("mock out the DeviceGetCount method") +// }, +// DeviceGetHandleByIndexFunc: func(Index int) (Device, Return) { +// panic("mock out the DeviceGetHandleByIndex method") +// }, +// DeviceGetHandleByUUIDFunc: func(UUID string) (Device, Return) { +// panic("mock out the DeviceGetHandleByUUID method") +// }, +// ErrorStringFunc: func(r Return) string { +// panic("mock out the ErrorString method") +// }, +// InitFunc: func() Return { +// panic("mock out the Init method") +// }, +// ShutdownFunc: func() Return { +// panic("mock out the Shutdown method") +// }, +// SystemGetDriverVersionFunc: func() (string, Return) { +// panic("mock out the SystemGetDriverVersion method") +// }, +// } +// +// // use mockedInterface in code that requires Interface +// // and then make assertions. +// +// } +type InterfaceMock struct { + // DeviceGetCountFunc mocks the DeviceGetCount method. + DeviceGetCountFunc func() (int, Return) + + // DeviceGetHandleByIndexFunc mocks the DeviceGetHandleByIndex method. + DeviceGetHandleByIndexFunc func(Index int) (Device, Return) + + // DeviceGetHandleByUUIDFunc mocks the DeviceGetHandleByUUID method. + DeviceGetHandleByUUIDFunc func(UUID string) (Device, Return) + + // ErrorStringFunc mocks the ErrorString method. + ErrorStringFunc func(r Return) string + + // InitFunc mocks the Init method. + InitFunc func() Return + + // ShutdownFunc mocks the Shutdown method. + ShutdownFunc func() Return + + // SystemGetDriverVersionFunc mocks the SystemGetDriverVersion method. + SystemGetDriverVersionFunc func() (string, Return) + + // calls tracks calls to the methods. + calls struct { + // DeviceGetCount holds details about calls to the DeviceGetCount method. + DeviceGetCount []struct { + } + // DeviceGetHandleByIndex holds details about calls to the DeviceGetHandleByIndex method. + DeviceGetHandleByIndex []struct { + // Index is the Index argument value. + Index int + } + // DeviceGetHandleByUUID holds details about calls to the DeviceGetHandleByUUID method. + DeviceGetHandleByUUID []struct { + // UUID is the UUID argument value. + UUID string + } + // ErrorString holds details about calls to the ErrorString method. + ErrorString []struct { + // R is the r argument value. + R Return + } + // Init holds details about calls to the Init method. + Init []struct { + } + // Shutdown holds details about calls to the Shutdown method. + Shutdown []struct { + } + // SystemGetDriverVersion holds details about calls to the SystemGetDriverVersion method. + SystemGetDriverVersion []struct { + } + } + lockDeviceGetCount sync.RWMutex + lockDeviceGetHandleByIndex sync.RWMutex + lockDeviceGetHandleByUUID sync.RWMutex + lockErrorString sync.RWMutex + lockInit sync.RWMutex + lockShutdown sync.RWMutex + lockSystemGetDriverVersion sync.RWMutex +} + +// DeviceGetCount calls DeviceGetCountFunc. +func (mock *InterfaceMock) DeviceGetCount() (int, Return) { + if mock.DeviceGetCountFunc == nil { + panic("InterfaceMock.DeviceGetCountFunc: method is nil but Interface.DeviceGetCount was just called") + } + callInfo := struct { + }{} + mock.lockDeviceGetCount.Lock() + mock.calls.DeviceGetCount = append(mock.calls.DeviceGetCount, callInfo) + mock.lockDeviceGetCount.Unlock() + return mock.DeviceGetCountFunc() +} + +// DeviceGetCountCalls gets all the calls that were made to DeviceGetCount. +// Check the length with: +// len(mockedInterface.DeviceGetCountCalls()) +func (mock *InterfaceMock) DeviceGetCountCalls() []struct { +} { + var calls []struct { + } + mock.lockDeviceGetCount.RLock() + calls = mock.calls.DeviceGetCount + mock.lockDeviceGetCount.RUnlock() + return calls +} + +// DeviceGetHandleByIndex calls DeviceGetHandleByIndexFunc. +func (mock *InterfaceMock) DeviceGetHandleByIndex(Index int) (Device, Return) { + if mock.DeviceGetHandleByIndexFunc == nil { + panic("InterfaceMock.DeviceGetHandleByIndexFunc: method is nil but Interface.DeviceGetHandleByIndex was just called") + } + callInfo := struct { + Index int + }{ + Index: Index, + } + mock.lockDeviceGetHandleByIndex.Lock() + mock.calls.DeviceGetHandleByIndex = append(mock.calls.DeviceGetHandleByIndex, callInfo) + mock.lockDeviceGetHandleByIndex.Unlock() + return mock.DeviceGetHandleByIndexFunc(Index) +} + +// DeviceGetHandleByIndexCalls gets all the calls that were made to DeviceGetHandleByIndex. +// Check the length with: +// len(mockedInterface.DeviceGetHandleByIndexCalls()) +func (mock *InterfaceMock) DeviceGetHandleByIndexCalls() []struct { + Index int +} { + var calls []struct { + Index int + } + mock.lockDeviceGetHandleByIndex.RLock() + calls = mock.calls.DeviceGetHandleByIndex + mock.lockDeviceGetHandleByIndex.RUnlock() + return calls +} + +// DeviceGetHandleByUUID calls DeviceGetHandleByUUIDFunc. +func (mock *InterfaceMock) DeviceGetHandleByUUID(UUID string) (Device, Return) { + if mock.DeviceGetHandleByUUIDFunc == nil { + panic("InterfaceMock.DeviceGetHandleByUUIDFunc: method is nil but Interface.DeviceGetHandleByUUID was just called") + } + callInfo := struct { + UUID string + }{ + UUID: UUID, + } + mock.lockDeviceGetHandleByUUID.Lock() + mock.calls.DeviceGetHandleByUUID = append(mock.calls.DeviceGetHandleByUUID, callInfo) + mock.lockDeviceGetHandleByUUID.Unlock() + return mock.DeviceGetHandleByUUIDFunc(UUID) +} + +// DeviceGetHandleByUUIDCalls gets all the calls that were made to DeviceGetHandleByUUID. +// Check the length with: +// len(mockedInterface.DeviceGetHandleByUUIDCalls()) +func (mock *InterfaceMock) DeviceGetHandleByUUIDCalls() []struct { + UUID string +} { + var calls []struct { + UUID string + } + mock.lockDeviceGetHandleByUUID.RLock() + calls = mock.calls.DeviceGetHandleByUUID + mock.lockDeviceGetHandleByUUID.RUnlock() + return calls +} + +// ErrorString calls ErrorStringFunc. +func (mock *InterfaceMock) ErrorString(r Return) string { + if mock.ErrorStringFunc == nil { + panic("InterfaceMock.ErrorStringFunc: method is nil but Interface.ErrorString was just called") + } + callInfo := struct { + R Return + }{ + R: r, + } + mock.lockErrorString.Lock() + mock.calls.ErrorString = append(mock.calls.ErrorString, callInfo) + mock.lockErrorString.Unlock() + return mock.ErrorStringFunc(r) +} + +// ErrorStringCalls gets all the calls that were made to ErrorString. +// Check the length with: +// len(mockedInterface.ErrorStringCalls()) +func (mock *InterfaceMock) ErrorStringCalls() []struct { + R Return +} { + var calls []struct { + R Return + } + mock.lockErrorString.RLock() + calls = mock.calls.ErrorString + mock.lockErrorString.RUnlock() + return calls +} + +// Init calls InitFunc. +func (mock *InterfaceMock) Init() Return { + if mock.InitFunc == nil { + panic("InterfaceMock.InitFunc: method is nil but Interface.Init was just called") + } + callInfo := struct { + }{} + mock.lockInit.Lock() + mock.calls.Init = append(mock.calls.Init, callInfo) + mock.lockInit.Unlock() + return mock.InitFunc() +} + +// InitCalls gets all the calls that were made to Init. +// Check the length with: +// len(mockedInterface.InitCalls()) +func (mock *InterfaceMock) InitCalls() []struct { +} { + var calls []struct { + } + mock.lockInit.RLock() + calls = mock.calls.Init + mock.lockInit.RUnlock() + return calls +} + +// Shutdown calls ShutdownFunc. +func (mock *InterfaceMock) Shutdown() Return { + if mock.ShutdownFunc == nil { + panic("InterfaceMock.ShutdownFunc: method is nil but Interface.Shutdown was just called") + } + callInfo := struct { + }{} + mock.lockShutdown.Lock() + mock.calls.Shutdown = append(mock.calls.Shutdown, callInfo) + mock.lockShutdown.Unlock() + return mock.ShutdownFunc() +} + +// ShutdownCalls gets all the calls that were made to Shutdown. +// Check the length with: +// len(mockedInterface.ShutdownCalls()) +func (mock *InterfaceMock) ShutdownCalls() []struct { +} { + var calls []struct { + } + mock.lockShutdown.RLock() + calls = mock.calls.Shutdown + mock.lockShutdown.RUnlock() + return calls +} + +// SystemGetDriverVersion calls SystemGetDriverVersionFunc. +func (mock *InterfaceMock) SystemGetDriverVersion() (string, Return) { + if mock.SystemGetDriverVersionFunc == nil { + panic("InterfaceMock.SystemGetDriverVersionFunc: method is nil but Interface.SystemGetDriverVersion was just called") + } + callInfo := struct { + }{} + mock.lockSystemGetDriverVersion.Lock() + mock.calls.SystemGetDriverVersion = append(mock.calls.SystemGetDriverVersion, callInfo) + mock.lockSystemGetDriverVersion.Unlock() + return mock.SystemGetDriverVersionFunc() +} + +// SystemGetDriverVersionCalls gets all the calls that were made to SystemGetDriverVersion. +// Check the length with: +// len(mockedInterface.SystemGetDriverVersionCalls()) +func (mock *InterfaceMock) SystemGetDriverVersionCalls() []struct { +} { + var calls []struct { + } + mock.lockSystemGetDriverVersion.RLock() + calls = mock.calls.SystemGetDriverVersion + mock.lockSystemGetDriverVersion.RUnlock() + return calls +} diff --git a/pkg/nvml/return.go b/pkg/nvml/return.go new file mode 100644 index 0000000..64cc1e1 --- /dev/null +++ b/pkg/nvml/return.go @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "fmt" + + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +// String returns the string representation of a Return +func (r Return) String() string { + return errorStringFunc(nvml.Return(r)) +} + +// Error returns the string representation of a Return +func (r Return) Error() string { + return errorStringFunc(nvml.Return(r)) +} + +// Assigned to nvml.ErrorString if the system nvml library is in use +var errorStringFunc = defaultErrorStringFunc + +var defaultErrorStringFunc = func(r nvml.Return) string { + switch Return(r) { + case SUCCESS: + return "SUCCESS" + case ERROR_UNINITIALIZED: + return "ERROR_UNINITIALIZED" + case ERROR_INVALID_ARGUMENT: + return "ERROR_INVALID_ARGUMENT" + case ERROR_NOT_SUPPORTED: + return "ERROR_NOT_SUPPORTED" + case ERROR_NO_PERMISSION: + return "ERROR_NO_PERMISSION" + case ERROR_ALREADY_INITIALIZED: + return "ERROR_ALREADY_INITIALIZED" + case ERROR_NOT_FOUND: + return "ERROR_NOT_FOUND" + case ERROR_INSUFFICIENT_SIZE: + return "ERROR_INSUFFICIENT_SIZE" + case ERROR_INSUFFICIENT_POWER: + return "ERROR_INSUFFICIENT_POWER" + case ERROR_DRIVER_NOT_LOADED: + return "ERROR_DRIVER_NOT_LOADED" + case ERROR_TIMEOUT: + return "ERROR_TIMEOUT" + case ERROR_IRQ_ISSUE: + return "ERROR_IRQ_ISSUE" + case ERROR_LIBRARY_NOT_FOUND: + return "ERROR_LIBRARY_NOT_FOUND" + case ERROR_FUNCTION_NOT_FOUND: + return "ERROR_FUNCTION_NOT_FOUND" + case ERROR_CORRUPTED_INFOROM: + return "ERROR_CORRUPTED_INFOROM" + case ERROR_GPU_IS_LOST: + return "ERROR_GPU_IS_LOST" + case ERROR_RESET_REQUIRED: + return "ERROR_RESET_REQUIRED" + case ERROR_OPERATING_SYSTEM: + return "ERROR_OPERATING_SYSTEM" + case ERROR_LIB_RM_VERSION_MISMATCH: + return "ERROR_LIB_RM_VERSION_MISMATCH" + case ERROR_IN_USE: + return "ERROR_IN_USE" + case ERROR_MEMORY: + return "ERROR_MEMORY" + case ERROR_NO_DATA: + return "ERROR_NO_DATA" + case ERROR_VGPU_ECC_NOT_SUPPORTED: + return "ERROR_VGPU_ECC_NOT_SUPPORTED" + case ERROR_INSUFFICIENT_RESOURCES: + return "ERROR_INSUFFICIENT_RESOURCES" + case ERROR_UNKNOWN: + return "ERROR_UNKNOWN" + default: + return fmt.Sprintf("Unknown return value: %d", r) + } +} diff --git a/pkg/nvml/return_test.go b/pkg/nvml/return_test.go new file mode 100644 index 0000000..d12d796 --- /dev/null +++ b/pkg/nvml/return_test.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReturnErrorString(t *testing.T) { + testCases := []struct { + in Return + out string + }{ + { + SUCCESS, + "SUCCESS", + }, + { + Return(1024), + "Unknown return value: 1024", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%d", tc.in), func(t *testing.T) { + out := tc.in.Error() + require.Equal(t, out, tc.out) + }) + } +} diff --git a/pkg/nvml/types.go b/pkg/nvml/types.go new file mode 100644 index 0000000..31ee7dd --- /dev/null +++ b/pkg/nvml/types.go @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nvml + +import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +//go:generate moq -out nvml_mock.go . Interface +// Interface defines the functions implemented by an NVML library +type Interface interface { + Init() Return + Shutdown() Return + DeviceGetCount() (int, Return) + DeviceGetHandleByIndex(Index int) (Device, Return) + DeviceGetHandleByUUID(UUID string) (Device, Return) + SystemGetDriverVersion() (string, Return) + ErrorString(r Return) string +} + +//go:generate moq -out device_mock.go . Device +// Device defines the functions implemented by an NVML device +type Device interface { + GetIndex() (int, Return) + GetPciInfo() (PciInfo, Return) + GetMemoryInfo() (Memory, Return) + GetUUID() (string, Return) + GetMinorNumber() (int, Return) + IsMigDeviceHandle() (bool, Return) + GetDeviceHandleFromMigDeviceHandle() (Device, Return) + SetMigMode(Mode int) (Return, Return) + GetMigMode() (int, int, Return) + GetGpuInstanceProfileInfo(Profile int) (GpuInstanceProfileInfo, Return) + GetGpuInstances(Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) + GetMaxMigDeviceCount() (int, Return) + GetMigDeviceHandleByIndex(Index int) (Device, Return) + GetGpuInstanceId() (int, Return) + GetComputeInstanceId() (int, Return) +} + +//go:generate moq -out gi_mock.go . GpuInstance +// GpuInstance defines the functions implemented by a GpuInstance +type GpuInstance interface { + GetInfo() (GpuInstanceInfo, Return) + GetComputeInstanceProfileInfo(Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) + CreateComputeInstance(Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) + GetComputeInstances(Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) + Destroy() Return +} + +//go:generate moq -out ci_mock.go . ComputeInstance +// ComputeInstance defines the functions implemented by a ComputeInstance +type ComputeInstance interface { + GetInfo() (ComputeInstanceInfo, Return) + Destroy() Return +} + +// GpuInstanceInfo holds info about a GPU Instance +type GpuInstanceInfo struct { + Device Device + Id uint32 + ProfileId uint32 + Placement GpuInstancePlacement +} + +// ComputeInstanceInfo holds info about a Compute Instance +type ComputeInstanceInfo struct { + Device Device + GpuInstance GpuInstance + Id uint32 + ProfileId uint32 + Placement ComputeInstancePlacement +} + +// Return defines an NVML return type +type Return nvml.Return + +// Memory holds info about GPU device memory +type Memory nvml.Memory + +//PciInfo holds info about the PCI connections of a GPU dvice +type PciInfo nvml.PciInfo + +// GpuInstanceProfileInfo holds info about a GPU Instance Profile +type GpuInstanceProfileInfo nvml.GpuInstanceProfileInfo + +// GpuInstancePlacement holds placement info about a GPU Instance +type GpuInstancePlacement nvml.GpuInstancePlacement + +// ComputeInstanceProfileInfo holds info about a Compute Instance Profile +type ComputeInstanceProfileInfo nvml.ComputeInstanceProfileInfo + +// ComputeInstancePlacement holds placement info about a Compute Instance +type ComputeInstancePlacement nvml.ComputeInstancePlacement diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.go new file mode 100644 index 0000000..b04f366 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.go @@ -0,0 +1,64 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +import "C" + +var cgoAllocsUnknown = new(struct{}) + +type stringHeader struct { + Data unsafe.Pointer + Len int +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +func uint32SliceToIntSlice(s []uint32) []int { + ret := make([]int, len(s)) + for i := range s { + ret[i] = int(s[i]) + } + return ret +} + +// packPCharString creates a Go string backed by *C.char and avoids copying. +func packPCharString(p *C.char) (raw string) { + if p != nil && *p != 0 { + h := (*stringHeader)(unsafe.Pointer(&raw)) + h.Data = unsafe.Pointer(p) + for *p != 0 { + p = (*C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) // p++ + } + h.Len = int(uintptr(unsafe.Pointer(p)) - uintptr(h.Data)) + } + return +} + +// unpackPCharString represents the data from Go string as *C.char and avoids copying. +func unpackPCharString(str string) (*C.char, *struct{}) { + h := (*stringHeader)(unsafe.Pointer(&str)) + return (*C.char)(h.Data), cgoAllocsUnknown +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h new file mode 100644 index 0000000..b25c5e5 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h @@ -0,0 +1,23 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +#include "nvml.h" +#include +#pragma once + +#define __CGOGEN 1 + diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go new file mode 100644 index 0000000..420f0ae --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go @@ -0,0 +1,1139 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +package nvml + +/* +#cgo LDFLAGS: -Wl,--unresolved-symbols=ignore-in-object-files +#cgo CFLAGS: -DNVML_NO_UNVERSIONED_FUNC_DEFS=1 +#include "nvml.h" +#include +#include "cgo_helpers.h" +*/ +import "C" + +const ( + // NO_UNVERSIONED_FUNC_DEFS as defined in go-nvml/:24 + NO_UNVERSIONED_FUNC_DEFS = 1 + // API_VERSION as defined in nvml/nvml.h + API_VERSION = 11 + // API_VERSION_STR as defined in nvml/nvml.h + API_VERSION_STR = "11" + // VALUE_NOT_AVAILABLE as defined in nvml/nvml.h + VALUE_NOT_AVAILABLE = -1 + // DEVICE_PCI_BUS_ID_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_BUFFER_SIZE = 32 + // DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE = 16 + // DEVICE_PCI_BUS_ID_LEGACY_FMT as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_LEGACY_FMT = "%04X:%02X:%02X.0" + // DEVICE_PCI_BUS_ID_FMT as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_FMT = "%08X:%02X:%02X.0" + // NVLINK_MAX_LINKS as defined in nvml/nvml.h + NVLINK_MAX_LINKS = 12 + // TOPOLOGY_CPU as defined in nvml/nvml.h + TOPOLOGY_CPU = 0 + // MAX_PHYSICAL_BRIDGE as defined in nvml/nvml.h + MAX_PHYSICAL_BRIDGE = 128 + // FlagDefault as defined in nvml/nvml.h + FlagDefault = 0 + // FlagForce as defined in nvml/nvml.h + FlagForce = 1 + // SINGLE_BIT_ECC as defined in nvml/nvml.h + SINGLE_BIT_ECC = 0 + // DOUBLE_BIT_ECC as defined in nvml/nvml.h + DOUBLE_BIT_ECC = 0 + // GRID_LICENSE_EXPIRY_NOT_AVAILABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_AVAILABLE = 0 + // GRID_LICENSE_EXPIRY_INVALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_INVALID = 1 + // GRID_LICENSE_EXPIRY_VALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_VALID = 2 + // GRID_LICENSE_EXPIRY_NOT_APPLICABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_APPLICABLE = 3 + // GRID_LICENSE_EXPIRY_PERMANENT as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_PERMANENT = 4 + // GRID_LICENSE_BUFFER_SIZE as defined in nvml/nvml.h + GRID_LICENSE_BUFFER_SIZE = 128 + // VGPU_NAME_BUFFER_SIZE as defined in nvml/nvml.h + VGPU_NAME_BUFFER_SIZE = 64 + // GRID_LICENSE_FEATURE_MAX_COUNT as defined in nvml/nvml.h + GRID_LICENSE_FEATURE_MAX_COUNT = 3 + // GRID_LICENSE_STATE_UNKNOWN as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNKNOWN = 0 + // GRID_LICENSE_STATE_UNINITIALIZED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNINITIALIZED = 1 + // GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED = 2 + // GRID_LICENSE_STATE_UNLICENSED_RESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_RESTRICTED = 3 + // GRID_LICENSE_STATE_UNLICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED = 4 + // GRID_LICENSE_STATE_LICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_LICENSED = 5 + // DEVICE_ARCH_KEPLER as defined in nvml/nvml.h + DEVICE_ARCH_KEPLER = 2 + // DEVICE_ARCH_MAXWELL as defined in nvml/nvml.h + DEVICE_ARCH_MAXWELL = 3 + // DEVICE_ARCH_PASCAL as defined in nvml/nvml.h + DEVICE_ARCH_PASCAL = 4 + // DEVICE_ARCH_VOLTA as defined in nvml/nvml.h + DEVICE_ARCH_VOLTA = 5 + // DEVICE_ARCH_TURING as defined in nvml/nvml.h + DEVICE_ARCH_TURING = 6 + // DEVICE_ARCH_AMPERE as defined in nvml/nvml.h + DEVICE_ARCH_AMPERE = 7 + // DEVICE_ARCH_UNKNOWN as defined in nvml/nvml.h + DEVICE_ARCH_UNKNOWN = 4294967295 + // BUS_TYPE_UNKNOWN as defined in nvml/nvml.h + BUS_TYPE_UNKNOWN = 0 + // BUS_TYPE_PCI as defined in nvml/nvml.h + BUS_TYPE_PCI = 1 + // BUS_TYPE_PCIE as defined in nvml/nvml.h + BUS_TYPE_PCIE = 2 + // BUS_TYPE_FPCI as defined in nvml/nvml.h + BUS_TYPE_FPCI = 3 + // BUS_TYPE_AGP as defined in nvml/nvml.h + BUS_TYPE_AGP = 4 + // POWER_SOURCE_AC as defined in nvml/nvml.h + POWER_SOURCE_AC = 0 + // POWER_SOURCE_BATTERY as defined in nvml/nvml.h + POWER_SOURCE_BATTERY = 1 + // PCIE_LINK_MAX_SPEED_INVALID as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_INVALID = 0 + // PCIE_LINK_MAX_SPEED_2500MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_2500MBPS = 1 + // PCIE_LINK_MAX_SPEED_5000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_5000MBPS = 2 + // PCIE_LINK_MAX_SPEED_8000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_8000MBPS = 3 + // PCIE_LINK_MAX_SPEED_16000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_16000MBPS = 4 + // PCIE_LINK_MAX_SPEED_32000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_32000MBPS = 5 + // ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED as defined in nvml/nvml.h + ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED = 0 + // ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED as defined in nvml/nvml.h + ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED = 1 + // FI_DEV_ECC_CURRENT as defined in nvml/nvml.h + FI_DEV_ECC_CURRENT = 1 + // FI_DEV_ECC_PENDING as defined in nvml/nvml.h + FI_DEV_ECC_PENDING = 2 + // FI_DEV_ECC_SBE_VOL_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_TOTAL = 3 + // FI_DEV_ECC_DBE_VOL_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_TOTAL = 4 + // FI_DEV_ECC_SBE_AGG_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_TOTAL = 5 + // FI_DEV_ECC_DBE_AGG_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_TOTAL = 6 + // FI_DEV_ECC_SBE_VOL_L1 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_L1 = 7 + // FI_DEV_ECC_DBE_VOL_L1 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_L1 = 8 + // FI_DEV_ECC_SBE_VOL_L2 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_L2 = 9 + // FI_DEV_ECC_DBE_VOL_L2 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_L2 = 10 + // FI_DEV_ECC_SBE_VOL_DEV as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_DEV = 11 + // FI_DEV_ECC_DBE_VOL_DEV as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_DEV = 12 + // FI_DEV_ECC_SBE_VOL_REG as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_REG = 13 + // FI_DEV_ECC_DBE_VOL_REG as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_REG = 14 + // FI_DEV_ECC_SBE_VOL_TEX as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_TEX = 15 + // FI_DEV_ECC_DBE_VOL_TEX as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_TEX = 16 + // FI_DEV_ECC_DBE_VOL_CBU as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_CBU = 17 + // FI_DEV_ECC_SBE_AGG_L1 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_L1 = 18 + // FI_DEV_ECC_DBE_AGG_L1 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_L1 = 19 + // FI_DEV_ECC_SBE_AGG_L2 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_L2 = 20 + // FI_DEV_ECC_DBE_AGG_L2 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_L2 = 21 + // FI_DEV_ECC_SBE_AGG_DEV as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_DEV = 22 + // FI_DEV_ECC_DBE_AGG_DEV as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_DEV = 23 + // FI_DEV_ECC_SBE_AGG_REG as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_REG = 24 + // FI_DEV_ECC_DBE_AGG_REG as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_REG = 25 + // FI_DEV_ECC_SBE_AGG_TEX as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_TEX = 26 + // FI_DEV_ECC_DBE_AGG_TEX as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_TEX = 27 + // FI_DEV_ECC_DBE_AGG_CBU as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_CBU = 28 + // FI_DEV_RETIRED_SBE as defined in nvml/nvml.h + FI_DEV_RETIRED_SBE = 29 + // FI_DEV_RETIRED_DBE as defined in nvml/nvml.h + FI_DEV_RETIRED_DBE = 30 + // FI_DEV_RETIRED_PENDING as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING = 31 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 = 32 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 = 33 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 = 34 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 = 35 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 = 36 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 = 37 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL = 38 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 = 39 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 = 40 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 = 41 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 = 42 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 = 43 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 = 44 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL = 45 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 = 46 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 = 47 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 = 48 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 = 49 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 = 50 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 = 51 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL = 52 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 = 53 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 = 54 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 = 55 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 = 56 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 = 57 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 = 58 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL = 59 + // FI_DEV_NVLINK_BANDWIDTH_C0_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L0 = 60 + // FI_DEV_NVLINK_BANDWIDTH_C0_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L1 = 61 + // FI_DEV_NVLINK_BANDWIDTH_C0_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L2 = 62 + // FI_DEV_NVLINK_BANDWIDTH_C0_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L3 = 63 + // FI_DEV_NVLINK_BANDWIDTH_C0_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L4 = 64 + // FI_DEV_NVLINK_BANDWIDTH_C0_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L5 = 65 + // FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 + // FI_DEV_NVLINK_BANDWIDTH_C1_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L0 = 67 + // FI_DEV_NVLINK_BANDWIDTH_C1_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L1 = 68 + // FI_DEV_NVLINK_BANDWIDTH_C1_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L2 = 69 + // FI_DEV_NVLINK_BANDWIDTH_C1_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L3 = 70 + // FI_DEV_NVLINK_BANDWIDTH_C1_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L4 = 71 + // FI_DEV_NVLINK_BANDWIDTH_C1_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L5 = 72 + // FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 + // FI_DEV_PERF_POLICY_POWER as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_POWER = 74 + // FI_DEV_PERF_POLICY_THERMAL as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_THERMAL = 75 + // FI_DEV_PERF_POLICY_SYNC_BOOST as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_SYNC_BOOST = 76 + // FI_DEV_PERF_POLICY_BOARD_LIMIT as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_BOARD_LIMIT = 77 + // FI_DEV_PERF_POLICY_LOW_UTILIZATION as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_LOW_UTILIZATION = 78 + // FI_DEV_PERF_POLICY_RELIABILITY as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_RELIABILITY = 79 + // FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS = 80 + // FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81 + // FI_DEV_MEMORY_TEMP as defined in nvml/nvml.h + FI_DEV_MEMORY_TEMP = 82 + // FI_DEV_TOTAL_ENERGY_CONSUMPTION as defined in nvml/nvml.h + FI_DEV_TOTAL_ENERGY_CONSUMPTION = 83 + // FI_DEV_NVLINK_SPEED_MBPS_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L0 = 84 + // FI_DEV_NVLINK_SPEED_MBPS_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L1 = 85 + // FI_DEV_NVLINK_SPEED_MBPS_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L2 = 86 + // FI_DEV_NVLINK_SPEED_MBPS_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L3 = 87 + // FI_DEV_NVLINK_SPEED_MBPS_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L4 = 88 + // FI_DEV_NVLINK_SPEED_MBPS_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L5 = 89 + // FI_DEV_NVLINK_SPEED_MBPS_COMMON as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_COMMON = 90 + // FI_DEV_NVLINK_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_NVLINK_LINK_COUNT = 91 + // FI_DEV_RETIRED_PENDING_SBE as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING_SBE = 92 + // FI_DEV_RETIRED_PENDING_DBE as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING_DBE = 93 + // FI_DEV_PCIE_REPLAY_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_REPLAY_COUNTER = 94 + // FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER = 95 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 = 96 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 = 97 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 = 98 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 = 99 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 = 100 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 = 101 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 = 102 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 = 103 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 = 104 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 = 105 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 = 106 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 = 107 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 = 108 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 = 109 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 = 110 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 = 111 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 = 112 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 = 113 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 = 114 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 = 115 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 = 116 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 = 117 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 = 118 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 = 119 + // FI_DEV_NVLINK_BANDWIDTH_C0_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L6 = 120 + // FI_DEV_NVLINK_BANDWIDTH_C0_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L7 = 121 + // FI_DEV_NVLINK_BANDWIDTH_C0_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L8 = 122 + // FI_DEV_NVLINK_BANDWIDTH_C0_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L9 = 123 + // FI_DEV_NVLINK_BANDWIDTH_C0_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L10 = 124 + // FI_DEV_NVLINK_BANDWIDTH_C0_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L11 = 125 + // FI_DEV_NVLINK_BANDWIDTH_C1_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L6 = 126 + // FI_DEV_NVLINK_BANDWIDTH_C1_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L7 = 127 + // FI_DEV_NVLINK_BANDWIDTH_C1_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L8 = 128 + // FI_DEV_NVLINK_BANDWIDTH_C1_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L9 = 129 + // FI_DEV_NVLINK_BANDWIDTH_C1_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L10 = 130 + // FI_DEV_NVLINK_BANDWIDTH_C1_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L11 = 131 + // FI_DEV_NVLINK_SPEED_MBPS_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L6 = 132 + // FI_DEV_NVLINK_SPEED_MBPS_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L7 = 133 + // FI_DEV_NVLINK_SPEED_MBPS_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L8 = 134 + // FI_DEV_NVLINK_SPEED_MBPS_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L9 = 135 + // FI_DEV_NVLINK_SPEED_MBPS_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L10 = 136 + // FI_DEV_NVLINK_SPEED_MBPS_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L11 = 137 + // FI_DEV_NVLINK_THROUGHPUT_DATA_TX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_DATA_TX = 138 + // FI_DEV_NVLINK_THROUGHPUT_DATA_RX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_DATA_RX = 139 + // FI_DEV_NVLINK_THROUGHPUT_RAW_TX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_RAW_TX = 140 + // FI_DEV_NVLINK_THROUGHPUT_RAW_RX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_RAW_RX = 141 + // FI_DEV_REMAPPED_COR as defined in nvml/nvml.h + FI_DEV_REMAPPED_COR = 142 + // FI_DEV_REMAPPED_UNC as defined in nvml/nvml.h + FI_DEV_REMAPPED_UNC = 143 + // FI_DEV_REMAPPED_PENDING as defined in nvml/nvml.h + FI_DEV_REMAPPED_PENDING = 144 + // FI_DEV_REMAPPED_FAILURE as defined in nvml/nvml.h + FI_DEV_REMAPPED_FAILURE = 145 + // FI_DEV_NVLINK_REMOTE_NVLINK_ID as defined in nvml/nvml.h + FI_DEV_NVLINK_REMOTE_NVLINK_ID = 146 + // FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT = 147 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 = 148 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 = 149 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 = 150 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 = 151 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 = 152 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 = 153 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 = 154 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 = 155 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 = 156 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 = 157 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 = 158 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 = 159 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL = 160 + // FI_MAX as defined in nvml/nvml.h + FI_MAX = 161 + // EventTypeSingleBitEccError as defined in nvml/nvml.h + EventTypeSingleBitEccError = 1 + // EventTypeDoubleBitEccError as defined in nvml/nvml.h + EventTypeDoubleBitEccError = 2 + // EventTypePState as defined in nvml/nvml.h + EventTypePState = 4 + // EventTypeXidCriticalError as defined in nvml/nvml.h + EventTypeXidCriticalError = 8 + // EventTypeClock as defined in nvml/nvml.h + EventTypeClock = 16 + // EventTypePowerSourceChange as defined in nvml/nvml.h + EventTypePowerSourceChange = 128 + // EventMigConfigChange as defined in nvml/nvml.h + EventMigConfigChange = 256 + // EventTypeNone as defined in nvml/nvml.h + EventTypeNone = 0 + // EventTypeAll as defined in nvml/nvml.h + EventTypeAll = 415 + // ClocksThrottleReasonGpuIdle as defined in nvml/nvml.h + ClocksThrottleReasonGpuIdle = 1 + // ClocksThrottleReasonApplicationsClocksSetting as defined in nvml/nvml.h + ClocksThrottleReasonApplicationsClocksSetting = 2 + // ClocksThrottleReasonUserDefinedClocks as defined in nvml/nvml.h + ClocksThrottleReasonUserDefinedClocks = 2 + // ClocksThrottleReasonSwPowerCap as defined in nvml/nvml.h + ClocksThrottleReasonSwPowerCap = 4 + // ClocksThrottleReasonHwSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwSlowdown = 8 + // ClocksThrottleReasonSyncBoost as defined in nvml/nvml.h + ClocksThrottleReasonSyncBoost = 16 + // ClocksThrottleReasonSwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonSwThermalSlowdown = 32 + // ClocksThrottleReasonHwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwThermalSlowdown = 64 + // ClocksThrottleReasonHwPowerBrakeSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwPowerBrakeSlowdown = 128 + // ClocksThrottleReasonDisplayClockSetting as defined in nvml/nvml.h + ClocksThrottleReasonDisplayClockSetting = 256 + // ClocksThrottleReasonNone as defined in nvml/nvml.h + ClocksThrottleReasonNone = 0 + // ClocksThrottleReasonAll as defined in nvml/nvml.h + ClocksThrottleReasonAll = 511 + // NVFBC_SESSION_FLAG_DIFFMAP_ENABLED as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_DIFFMAP_ENABLED = 1 + // NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED = 2 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT = 4 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE = 8 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT = 16 + // INIT_FLAG_NO_GPUS as defined in nvml/nvml.h + INIT_FLAG_NO_GPUS = 1 + // INIT_FLAG_NO_ATTACH as defined in nvml/nvml.h + INIT_FLAG_NO_ATTACH = 2 + // DEVICE_INFOROM_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_INFOROM_VERSION_BUFFER_SIZE = 16 + // DEVICE_UUID_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_UUID_BUFFER_SIZE = 80 + // DEVICE_UUID_V2_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_UUID_V2_BUFFER_SIZE = 96 + // DEVICE_PART_NUMBER_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_PART_NUMBER_BUFFER_SIZE = 80 + // SYSTEM_DRIVER_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + SYSTEM_DRIVER_VERSION_BUFFER_SIZE = 80 + // SYSTEM_NVML_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + SYSTEM_NVML_VERSION_BUFFER_SIZE = 80 + // DEVICE_NAME_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_NAME_BUFFER_SIZE = 64 + // DEVICE_NAME_V2_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_NAME_V2_BUFFER_SIZE = 96 + // DEVICE_SERIAL_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_SERIAL_BUFFER_SIZE = 30 + // DEVICE_VBIOS_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_VBIOS_VERSION_BUFFER_SIZE = 32 + // AFFINITY_SCOPE_NODE as defined in nvml/nvml.h + AFFINITY_SCOPE_NODE = 0 + // AFFINITY_SCOPE_SOCKET as defined in nvml/nvml.h + AFFINITY_SCOPE_SOCKET = 1 + // DEVICE_MIG_DISABLE as defined in nvml/nvml.h + DEVICE_MIG_DISABLE = 0 + // DEVICE_MIG_ENABLE as defined in nvml/nvml.h + DEVICE_MIG_ENABLE = 1 + // GPU_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE = 0 + // GPU_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE = 1 + // GPU_INSTANCE_PROFILE_3_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_3_SLICE = 2 + // GPU_INSTANCE_PROFILE_4_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_4_SLICE = 3 + // GPU_INSTANCE_PROFILE_7_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_7_SLICE = 4 + // GPU_INSTANCE_PROFILE_8_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_8_SLICE = 5 + // GPU_INSTANCE_PROFILE_6_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_6_SLICE = 6 + // GPU_INSTANCE_PROFILE_1_SLICE_REV1 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_REV1 = 7 + // GPU_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_COUNT = 8 + // COMPUTE_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_1_SLICE = 0 + // COMPUTE_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_2_SLICE = 1 + // COMPUTE_INSTANCE_PROFILE_3_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_3_SLICE = 2 + // COMPUTE_INSTANCE_PROFILE_4_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_4_SLICE = 3 + // COMPUTE_INSTANCE_PROFILE_7_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_7_SLICE = 4 + // COMPUTE_INSTANCE_PROFILE_8_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_8_SLICE = 5 + // COMPUTE_INSTANCE_PROFILE_6_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_6_SLICE = 6 + // COMPUTE_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_COUNT = 7 + // COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED as defined in nvml/nvml.h + COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = 0 + // COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT as defined in nvml/nvml.h + COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = 1 +) + +// BridgeChipType as declared in nvml/nvml.h +type BridgeChipType int32 + +// BridgeChipType enumeration from nvml/nvml.h +const ( + BRIDGE_CHIP_PLX BridgeChipType = iota + BRIDGE_CHIP_BRO4 BridgeChipType = 1 +) + +// NvLinkUtilizationCountUnits as declared in nvml/nvml.h +type NvLinkUtilizationCountUnits int32 + +// NvLinkUtilizationCountUnits enumeration from nvml/nvml.h +const ( + NVLINK_COUNTER_UNIT_CYCLES NvLinkUtilizationCountUnits = iota + NVLINK_COUNTER_UNIT_PACKETS NvLinkUtilizationCountUnits = 1 + NVLINK_COUNTER_UNIT_BYTES NvLinkUtilizationCountUnits = 2 + NVLINK_COUNTER_UNIT_RESERVED NvLinkUtilizationCountUnits = 3 + NVLINK_COUNTER_UNIT_COUNT NvLinkUtilizationCountUnits = 4 +) + +// NvLinkUtilizationCountPktTypes as declared in nvml/nvml.h +type NvLinkUtilizationCountPktTypes int32 + +// NvLinkUtilizationCountPktTypes enumeration from nvml/nvml.h +const ( + NVLINK_COUNTER_PKTFILTER_NOP NvLinkUtilizationCountPktTypes = 1 + NVLINK_COUNTER_PKTFILTER_READ NvLinkUtilizationCountPktTypes = 2 + NVLINK_COUNTER_PKTFILTER_WRITE NvLinkUtilizationCountPktTypes = 4 + NVLINK_COUNTER_PKTFILTER_RATOM NvLinkUtilizationCountPktTypes = 8 + NVLINK_COUNTER_PKTFILTER_NRATOM NvLinkUtilizationCountPktTypes = 16 + NVLINK_COUNTER_PKTFILTER_FLUSH NvLinkUtilizationCountPktTypes = 32 + NVLINK_COUNTER_PKTFILTER_RESPDATA NvLinkUtilizationCountPktTypes = 64 + NVLINK_COUNTER_PKTFILTER_RESPNODATA NvLinkUtilizationCountPktTypes = 128 + NVLINK_COUNTER_PKTFILTER_ALL NvLinkUtilizationCountPktTypes = 255 +) + +// NvLinkCapability as declared in nvml/nvml.h +type NvLinkCapability int32 + +// NvLinkCapability enumeration from nvml/nvml.h +const ( + NVLINK_CAP_P2P_SUPPORTED NvLinkCapability = iota + NVLINK_CAP_SYSMEM_ACCESS NvLinkCapability = 1 + NVLINK_CAP_P2P_ATOMICS NvLinkCapability = 2 + NVLINK_CAP_SYSMEM_ATOMICS NvLinkCapability = 3 + NVLINK_CAP_SLI_BRIDGE NvLinkCapability = 4 + NVLINK_CAP_VALID NvLinkCapability = 5 + NVLINK_CAP_COUNT NvLinkCapability = 6 +) + +// NvLinkErrorCounter as declared in nvml/nvml.h +type NvLinkErrorCounter int32 + +// NvLinkErrorCounter enumeration from nvml/nvml.h +const ( + NVLINK_ERROR_DL_REPLAY NvLinkErrorCounter = iota + NVLINK_ERROR_DL_RECOVERY NvLinkErrorCounter = 1 + NVLINK_ERROR_DL_CRC_FLIT NvLinkErrorCounter = 2 + NVLINK_ERROR_DL_CRC_DATA NvLinkErrorCounter = 3 + NVLINK_ERROR_DL_ECC_DATA NvLinkErrorCounter = 4 + NVLINK_ERROR_COUNT NvLinkErrorCounter = 5 +) + +// IntNvLinkDeviceType as declared in nvml/nvml.h +type IntNvLinkDeviceType int32 + +// IntNvLinkDeviceType enumeration from nvml/nvml.h +const ( + NVLINK_DEVICE_TYPE_GPU IntNvLinkDeviceType = iota + NVLINK_DEVICE_TYPE_IBMNPU IntNvLinkDeviceType = 1 + NVLINK_DEVICE_TYPE_SWITCH IntNvLinkDeviceType = 2 + NVLINK_DEVICE_TYPE_UNKNOWN IntNvLinkDeviceType = 255 +) + +// GpuTopologyLevel as declared in nvml/nvml.h +type GpuTopologyLevel int32 + +// GpuTopologyLevel enumeration from nvml/nvml.h +const ( + TOPOLOGY_INTERNAL GpuTopologyLevel = iota + TOPOLOGY_SINGLE GpuTopologyLevel = 10 + TOPOLOGY_MULTIPLE GpuTopologyLevel = 20 + TOPOLOGY_HOSTBRIDGE GpuTopologyLevel = 30 + TOPOLOGY_NODE GpuTopologyLevel = 40 + TOPOLOGY_SYSTEM GpuTopologyLevel = 50 +) + +// GpuP2PStatus as declared in nvml/nvml.h +type GpuP2PStatus int32 + +// GpuP2PStatus enumeration from nvml/nvml.h +const ( + P2P_STATUS_OK GpuP2PStatus = iota + P2P_STATUS_CHIPSET_NOT_SUPPORED GpuP2PStatus = 1 + P2P_STATUS_GPU_NOT_SUPPORTED GpuP2PStatus = 2 + P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED GpuP2PStatus = 3 + P2P_STATUS_DISABLED_BY_REGKEY GpuP2PStatus = 4 + P2P_STATUS_NOT_SUPPORTED GpuP2PStatus = 5 + P2P_STATUS_UNKNOWN GpuP2PStatus = 6 +) + +// GpuP2PCapsIndex as declared in nvml/nvml.h +type GpuP2PCapsIndex int32 + +// GpuP2PCapsIndex enumeration from nvml/nvml.h +const ( + P2P_CAPS_INDEX_READ GpuP2PCapsIndex = iota + P2P_CAPS_INDEX_WRITE GpuP2PCapsIndex = 1 + P2P_CAPS_INDEX_NVLINK GpuP2PCapsIndex = 2 + P2P_CAPS_INDEX_ATOMICS GpuP2PCapsIndex = 3 + P2P_CAPS_INDEX_PROP GpuP2PCapsIndex = 4 + P2P_CAPS_INDEX_UNKNOWN GpuP2PCapsIndex = 5 +) + +// SamplingType as declared in nvml/nvml.h +type SamplingType int32 + +// SamplingType enumeration from nvml/nvml.h +const ( + TOTAL_POWER_SAMPLES SamplingType = iota + GPU_UTILIZATION_SAMPLES SamplingType = 1 + MEMORY_UTILIZATION_SAMPLES SamplingType = 2 + ENC_UTILIZATION_SAMPLES SamplingType = 3 + DEC_UTILIZATION_SAMPLES SamplingType = 4 + PROCESSOR_CLK_SAMPLES SamplingType = 5 + MEMORY_CLK_SAMPLES SamplingType = 6 + SAMPLINGTYPE_COUNT SamplingType = 7 +) + +// PcieUtilCounter as declared in nvml/nvml.h +type PcieUtilCounter int32 + +// PcieUtilCounter enumeration from nvml/nvml.h +const ( + PCIE_UTIL_TX_BYTES PcieUtilCounter = iota + PCIE_UTIL_RX_BYTES PcieUtilCounter = 1 + PCIE_UTIL_COUNT PcieUtilCounter = 2 +) + +// ValueType as declared in nvml/nvml.h +type ValueType int32 + +// ValueType enumeration from nvml/nvml.h +const ( + VALUE_TYPE_DOUBLE ValueType = iota + VALUE_TYPE_UNSIGNED_INT ValueType = 1 + VALUE_TYPE_UNSIGNED_LONG ValueType = 2 + VALUE_TYPE_UNSIGNED_LONG_LONG ValueType = 3 + VALUE_TYPE_SIGNED_LONG_LONG ValueType = 4 + VALUE_TYPE_COUNT ValueType = 5 +) + +// PerfPolicyType as declared in nvml/nvml.h +type PerfPolicyType int32 + +// PerfPolicyType enumeration from nvml/nvml.h +const ( + PERF_POLICY_POWER PerfPolicyType = iota + PERF_POLICY_THERMAL PerfPolicyType = 1 + PERF_POLICY_SYNC_BOOST PerfPolicyType = 2 + PERF_POLICY_BOARD_LIMIT PerfPolicyType = 3 + PERF_POLICY_LOW_UTILIZATION PerfPolicyType = 4 + PERF_POLICY_RELIABILITY PerfPolicyType = 5 + PERF_POLICY_TOTAL_APP_CLOCKS PerfPolicyType = 10 + PERF_POLICY_TOTAL_BASE_CLOCKS PerfPolicyType = 11 + PERF_POLICY_COUNT PerfPolicyType = 12 +) + +// EnableState as declared in nvml/nvml.h +type EnableState int32 + +// EnableState enumeration from nvml/nvml.h +const ( + FEATURE_DISABLED EnableState = iota + FEATURE_ENABLED EnableState = 1 +) + +// BrandType as declared in nvml/nvml.h +type BrandType int32 + +// BrandType enumeration from nvml/nvml.h +const ( + BRAND_UNKNOWN BrandType = iota + BRAND_QUADRO BrandType = 1 + BRAND_TESLA BrandType = 2 + BRAND_NVS BrandType = 3 + BRAND_GRID BrandType = 4 + BRAND_GEFORCE BrandType = 5 + BRAND_TITAN BrandType = 6 + BRAND_NVIDIA_VAPPS BrandType = 7 + BRAND_NVIDIA_VPC BrandType = 8 + BRAND_NVIDIA_VCS BrandType = 9 + BRAND_NVIDIA_VWS BrandType = 10 + BRAND_NVIDIA_CLOUD_GAMING BrandType = 11 + BRAND_NVIDIA_VGAMING BrandType = 11 + BRAND_QUADRO_RTX BrandType = 12 + BRAND_NVIDIA_RTX BrandType = 13 + BRAND_NVIDIA BrandType = 14 + BRAND_GEFORCE_RTX BrandType = 15 + BRAND_TITAN_RTX BrandType = 16 + BRAND_COUNT BrandType = 17 +) + +// TemperatureThresholds as declared in nvml/nvml.h +type TemperatureThresholds int32 + +// TemperatureThresholds enumeration from nvml/nvml.h +const ( + TEMPERATURE_THRESHOLD_SHUTDOWN TemperatureThresholds = iota + TEMPERATURE_THRESHOLD_SLOWDOWN TemperatureThresholds = 1 + TEMPERATURE_THRESHOLD_MEM_MAX TemperatureThresholds = 2 + TEMPERATURE_THRESHOLD_GPU_MAX TemperatureThresholds = 3 + TEMPERATURE_THRESHOLD_ACOUSTIC_MIN TemperatureThresholds = 4 + TEMPERATURE_THRESHOLD_ACOUSTIC_CURR TemperatureThresholds = 5 + TEMPERATURE_THRESHOLD_ACOUSTIC_MAX TemperatureThresholds = 6 + TEMPERATURE_THRESHOLD_COUNT TemperatureThresholds = 7 +) + +// TemperatureSensors as declared in nvml/nvml.h +type TemperatureSensors int32 + +// TemperatureSensors enumeration from nvml/nvml.h +const ( + TEMPERATURE_GPU TemperatureSensors = iota + TEMPERATURE_COUNT TemperatureSensors = 1 +) + +// ComputeMode as declared in nvml/nvml.h +type ComputeMode int32 + +// ComputeMode enumeration from nvml/nvml.h +const ( + COMPUTEMODE_DEFAULT ComputeMode = iota + COMPUTEMODE_EXCLUSIVE_THREAD ComputeMode = 1 + COMPUTEMODE_PROHIBITED ComputeMode = 2 + COMPUTEMODE_EXCLUSIVE_PROCESS ComputeMode = 3 + COMPUTEMODE_COUNT ComputeMode = 4 +) + +// MemoryErrorType as declared in nvml/nvml.h +type MemoryErrorType int32 + +// MemoryErrorType enumeration from nvml/nvml.h +const ( + MEMORY_ERROR_TYPE_CORRECTED MemoryErrorType = iota + MEMORY_ERROR_TYPE_UNCORRECTED MemoryErrorType = 1 + MEMORY_ERROR_TYPE_COUNT MemoryErrorType = 2 +) + +// EccCounterType as declared in nvml/nvml.h +type EccCounterType int32 + +// EccCounterType enumeration from nvml/nvml.h +const ( + VOLATILE_ECC EccCounterType = iota + AGGREGATE_ECC EccCounterType = 1 + ECC_COUNTER_TYPE_COUNT EccCounterType = 2 +) + +// ClockType as declared in nvml/nvml.h +type ClockType int32 + +// ClockType enumeration from nvml/nvml.h +const ( + CLOCK_GRAPHICS ClockType = iota + CLOCK_SM ClockType = 1 + CLOCK_MEM ClockType = 2 + CLOCK_VIDEO ClockType = 3 + CLOCK_COUNT ClockType = 4 +) + +// ClockId as declared in nvml/nvml.h +type ClockId int32 + +// ClockId enumeration from nvml/nvml.h +const ( + CLOCK_ID_CURRENT ClockId = iota + CLOCK_ID_APP_CLOCK_TARGET ClockId = 1 + CLOCK_ID_APP_CLOCK_DEFAULT ClockId = 2 + CLOCK_ID_CUSTOMER_BOOST_MAX ClockId = 3 + CLOCK_ID_COUNT ClockId = 4 +) + +// DriverModel as declared in nvml/nvml.h +type DriverModel int32 + +// DriverModel enumeration from nvml/nvml.h +const ( + DRIVER_WDDM DriverModel = iota + DRIVER_WDM DriverModel = 1 +) + +// Pstates as declared in nvml/nvml.h +type Pstates int32 + +// Pstates enumeration from nvml/nvml.h +const ( + PSTATE_0 Pstates = iota + PSTATE_1 Pstates = 1 + PSTATE_2 Pstates = 2 + PSTATE_3 Pstates = 3 + PSTATE_4 Pstates = 4 + PSTATE_5 Pstates = 5 + PSTATE_6 Pstates = 6 + PSTATE_7 Pstates = 7 + PSTATE_8 Pstates = 8 + PSTATE_9 Pstates = 9 + PSTATE_10 Pstates = 10 + PSTATE_11 Pstates = 11 + PSTATE_12 Pstates = 12 + PSTATE_13 Pstates = 13 + PSTATE_14 Pstates = 14 + PSTATE_15 Pstates = 15 + PSTATE_UNKNOWN Pstates = 32 +) + +// GpuOperationMode as declared in nvml/nvml.h +type GpuOperationMode int32 + +// GpuOperationMode enumeration from nvml/nvml.h +const ( + GOM_ALL_ON GpuOperationMode = iota + GOM_COMPUTE GpuOperationMode = 1 + GOM_LOW_DP GpuOperationMode = 2 +) + +// InforomObject as declared in nvml/nvml.h +type InforomObject int32 + +// InforomObject enumeration from nvml/nvml.h +const ( + INFOROM_OEM InforomObject = iota + INFOROM_ECC InforomObject = 1 + INFOROM_POWER InforomObject = 2 + INFOROM_COUNT InforomObject = 3 +) + +// Return as declared in nvml/nvml.h +type Return int32 + +// Return enumeration from nvml/nvml.h +const ( + SUCCESS Return = iota + ERROR_UNINITIALIZED Return = 1 + ERROR_INVALID_ARGUMENT Return = 2 + ERROR_NOT_SUPPORTED Return = 3 + ERROR_NO_PERMISSION Return = 4 + ERROR_ALREADY_INITIALIZED Return = 5 + ERROR_NOT_FOUND Return = 6 + ERROR_INSUFFICIENT_SIZE Return = 7 + ERROR_INSUFFICIENT_POWER Return = 8 + ERROR_DRIVER_NOT_LOADED Return = 9 + ERROR_TIMEOUT Return = 10 + ERROR_IRQ_ISSUE Return = 11 + ERROR_LIBRARY_NOT_FOUND Return = 12 + ERROR_FUNCTION_NOT_FOUND Return = 13 + ERROR_CORRUPTED_INFOROM Return = 14 + ERROR_GPU_IS_LOST Return = 15 + ERROR_RESET_REQUIRED Return = 16 + ERROR_OPERATING_SYSTEM Return = 17 + ERROR_LIB_RM_VERSION_MISMATCH Return = 18 + ERROR_IN_USE Return = 19 + ERROR_MEMORY Return = 20 + ERROR_NO_DATA Return = 21 + ERROR_VGPU_ECC_NOT_SUPPORTED Return = 22 + ERROR_INSUFFICIENT_RESOURCES Return = 23 + ERROR_FREQ_NOT_SUPPORTED Return = 24 + ERROR_UNKNOWN Return = 999 +) + +// MemoryLocation as declared in nvml/nvml.h +type MemoryLocation int32 + +// MemoryLocation enumeration from nvml/nvml.h +const ( + MEMORY_LOCATION_L1_CACHE MemoryLocation = iota + MEMORY_LOCATION_L2_CACHE MemoryLocation = 1 + MEMORY_LOCATION_DRAM MemoryLocation = 2 + MEMORY_LOCATION_DEVICE_MEMORY MemoryLocation = 2 + MEMORY_LOCATION_REGISTER_FILE MemoryLocation = 3 + MEMORY_LOCATION_TEXTURE_MEMORY MemoryLocation = 4 + MEMORY_LOCATION_TEXTURE_SHM MemoryLocation = 5 + MEMORY_LOCATION_CBU MemoryLocation = 6 + MEMORY_LOCATION_SRAM MemoryLocation = 7 + MEMORY_LOCATION_COUNT MemoryLocation = 8 +) + +// PageRetirementCause as declared in nvml/nvml.h +type PageRetirementCause int32 + +// PageRetirementCause enumeration from nvml/nvml.h +const ( + PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS PageRetirementCause = iota + PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR PageRetirementCause = 1 + PAGE_RETIREMENT_CAUSE_COUNT PageRetirementCause = 2 +) + +// RestrictedAPI as declared in nvml/nvml.h +type RestrictedAPI int32 + +// RestrictedAPI enumeration from nvml/nvml.h +const ( + RESTRICTED_API_SET_APPLICATION_CLOCKS RestrictedAPI = iota + RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS RestrictedAPI = 1 + RESTRICTED_API_COUNT RestrictedAPI = 2 +) + +// NvLinkEccLaneErrorCounter as declared in nvml/nvml.h +type NvLinkEccLaneErrorCounter int32 + +// NvLinkEccLaneErrorCounter enumeration from nvml/nvml.h +const ( + NVLINK_ERROR_DL_ECC_LANE0 NvLinkEccLaneErrorCounter = iota + NVLINK_ERROR_DL_ECC_LANE1 NvLinkEccLaneErrorCounter = 1 + NVLINK_ERROR_DL_ECC_LANE2 NvLinkEccLaneErrorCounter = 2 + NVLINK_ERROR_DL_ECC_LANE3 NvLinkEccLaneErrorCounter = 3 + NVLINK_ERROR_DL_ECC_COUNT NvLinkEccLaneErrorCounter = 4 +) + +// GpuVirtualizationMode as declared in nvml/nvml.h +type GpuVirtualizationMode int32 + +// GpuVirtualizationMode enumeration from nvml/nvml.h +const ( + GPU_VIRTUALIZATION_MODE_NONE GpuVirtualizationMode = iota + GPU_VIRTUALIZATION_MODE_PASSTHROUGH GpuVirtualizationMode = 1 + GPU_VIRTUALIZATION_MODE_VGPU GpuVirtualizationMode = 2 + GPU_VIRTUALIZATION_MODE_HOST_VGPU GpuVirtualizationMode = 3 + GPU_VIRTUALIZATION_MODE_HOST_VSGA GpuVirtualizationMode = 4 +) + +// HostVgpuMode as declared in nvml/nvml.h +type HostVgpuMode int32 + +// HostVgpuMode enumeration from nvml/nvml.h +const ( + HOST_VGPU_MODE_NON_SRIOV HostVgpuMode = iota + HOST_VGPU_MODE_SRIOV HostVgpuMode = 1 +) + +// VgpuVmIdType as declared in nvml/nvml.h +type VgpuVmIdType int32 + +// VgpuVmIdType enumeration from nvml/nvml.h +const ( + VGPU_VM_ID_DOMAIN_ID VgpuVmIdType = iota + VGPU_VM_ID_UUID VgpuVmIdType = 1 +) + +// VgpuGuestInfoState as declared in nvml/nvml.h +type VgpuGuestInfoState int32 + +// VgpuGuestInfoState enumeration from nvml/nvml.h +const ( + VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED VgpuGuestInfoState = iota + VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED VgpuGuestInfoState = 1 +) + +// FanState as declared in nvml/nvml.h +type FanState int32 + +// FanState enumeration from nvml/nvml.h +const ( + FAN_NORMAL FanState = iota + FAN_FAILED FanState = 1 +) + +// LedColor as declared in nvml/nvml.h +type LedColor int32 + +// LedColor enumeration from nvml/nvml.h +const ( + LED_COLOR_GREEN LedColor = iota + LED_COLOR_AMBER LedColor = 1 +) + +// EncoderType as declared in nvml/nvml.h +type EncoderType int32 + +// EncoderType enumeration from nvml/nvml.h +const ( + ENCODER_QUERY_H264 EncoderType = iota + ENCODER_QUERY_HEVC EncoderType = 1 +) + +// FBCSessionType as declared in nvml/nvml.h +type FBCSessionType int32 + +// FBCSessionType enumeration from nvml/nvml.h +const ( + FBC_SESSION_TYPE_UNKNOWN FBCSessionType = iota + FBC_SESSION_TYPE_TOSYS FBCSessionType = 1 + FBC_SESSION_TYPE_CUDA FBCSessionType = 2 + FBC_SESSION_TYPE_VID FBCSessionType = 3 + FBC_SESSION_TYPE_HWENC FBCSessionType = 4 +) + +// DetachGpuState as declared in nvml/nvml.h +type DetachGpuState int32 + +// DetachGpuState enumeration from nvml/nvml.h +const ( + DETACH_GPU_KEEP DetachGpuState = iota + DETACH_GPU_REMOVE DetachGpuState = 1 +) + +// PcieLinkState as declared in nvml/nvml.h +type PcieLinkState int32 + +// PcieLinkState enumeration from nvml/nvml.h +const ( + PCIE_LINK_KEEP PcieLinkState = iota + PCIE_LINK_SHUT_DOWN PcieLinkState = 1 +) + +// ClockLimitId as declared in nvml/nvml.h +type ClockLimitId int32 + +// ClockLimitId enumeration from nvml/nvml.h +const ( + CLOCK_LIMIT_ID_RANGE_START ClockLimitId = -256 + CLOCK_LIMIT_ID_TDP ClockLimitId = -255 + CLOCK_LIMIT_ID_UNLIMITED ClockLimitId = -254 +) + +// VgpuVmCompatibility as declared in nvml/nvml.h +type VgpuVmCompatibility int32 + +// VgpuVmCompatibility enumeration from nvml/nvml.h +const ( + VGPU_VM_COMPATIBILITY_NONE VgpuVmCompatibility = iota + VGPU_VM_COMPATIBILITY_COLD VgpuVmCompatibility = 1 + VGPU_VM_COMPATIBILITY_HIBERNATE VgpuVmCompatibility = 2 + VGPU_VM_COMPATIBILITY_SLEEP VgpuVmCompatibility = 4 + VGPU_VM_COMPATIBILITY_LIVE VgpuVmCompatibility = 8 +) + +// VgpuPgpuCompatibilityLimitCode as declared in nvml/nvml.h +type VgpuPgpuCompatibilityLimitCode int32 + +// VgpuPgpuCompatibilityLimitCode enumeration from nvml/nvml.h +const ( + VGPU_COMPATIBILITY_LIMIT_NONE VgpuPgpuCompatibilityLimitCode = iota + VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER VgpuPgpuCompatibilityLimitCode = 1 + VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER VgpuPgpuCompatibilityLimitCode = 2 + VGPU_COMPATIBILITY_LIMIT_GPU VgpuPgpuCompatibilityLimitCode = 4 + VGPU_COMPATIBILITY_LIMIT_OTHER VgpuPgpuCompatibilityLimitCode = -2147483648 +) + +// GridLicenseFeatureCode as declared in nvml/nvml.h +type GridLicenseFeatureCode int32 + +// GridLicenseFeatureCode enumeration from nvml/nvml.h +const ( + GRID_LICENSE_FEATURE_CODE_UNKNOWN GridLicenseFeatureCode = iota + GRID_LICENSE_FEATURE_CODE_VGPU GridLicenseFeatureCode = 1 + GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX GridLicenseFeatureCode = 2 + GRID_LICENSE_FEATURE_CODE_VWORKSTATION GridLicenseFeatureCode = 2 + GRID_LICENSE_FEATURE_CODE_GAMING GridLicenseFeatureCode = 3 + GRID_LICENSE_FEATURE_CODE_COMPUTE GridLicenseFeatureCode = 4 +) diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_gen.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_gen.go new file mode 100644 index 0000000..9038b31 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_gen.go @@ -0,0 +1,27 @@ +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "reflect" +) + +const ( + SYSTEM_PROCESS_NAME_BUFFER_SIZE = 256 +) + +func STRUCT_VERSION(data interface{}, version uint32) uint32 { + return uint32(uint32(reflect.Indirect(reflect.ValueOf(data)).Type().Size()) | (version << uint32(24))) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go new file mode 100644 index 0000000..546261c --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go @@ -0,0 +1,2288 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +// EccBitType +type EccBitType = MemoryErrorType + +// nvml.DeviceGetCount() +func DeviceGetCount() (int, Return) { + var DeviceCount uint32 + ret := nvmlDeviceGetCount(&DeviceCount) + return int(DeviceCount), ret +} + +// nvml.DeviceGetHandleByIndex() +func DeviceGetHandleByIndex(Index int) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleByIndex(uint32(Index), &Device) + return Device, ret +} + +// nvml.DeviceGetHandleBySerial() +func DeviceGetHandleBySerial(Serial string) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleBySerial(Serial, &Device) + return Device, ret +} + +// nvml.DeviceGetHandleByUUID() +func DeviceGetHandleByUUID(Uuid string) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleByUUID(Uuid, &Device) + return Device, ret +} + +// nvml.DeviceGetHandleByPciBusId() +func DeviceGetHandleByPciBusId(PciBusId string) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleByPciBusId(PciBusId, &Device) + return Device, ret +} + +// nvml.DeviceGetName() +func DeviceGetName(Device Device) (string, Return) { + Name := make([]byte, DEVICE_NAME_V2_BUFFER_SIZE) + ret := nvmlDeviceGetName(Device, &Name[0], DEVICE_NAME_V2_BUFFER_SIZE) + return string(Name[:clen(Name)]), ret +} + +func (Device Device) GetName() (string, Return) { + return DeviceGetName(Device) +} + +// nvml.DeviceGetBrand() +func DeviceGetBrand(Device Device) (BrandType, Return) { + var _type BrandType + ret := nvmlDeviceGetBrand(Device, &_type) + return _type, ret +} + +func (Device Device) GetBrand() (BrandType, Return) { + return DeviceGetBrand(Device) +} + +// nvml.DeviceGetIndex() +func DeviceGetIndex(Device Device) (int, Return) { + var Index uint32 + ret := nvmlDeviceGetIndex(Device, &Index) + return int(Index), ret +} + +func (Device Device) GetIndex() (int, Return) { + return DeviceGetIndex(Device) +} + +// nvml.DeviceGetSerial() +func DeviceGetSerial(Device Device) (string, Return) { + Serial := make([]byte, DEVICE_SERIAL_BUFFER_SIZE) + ret := nvmlDeviceGetSerial(Device, &Serial[0], DEVICE_SERIAL_BUFFER_SIZE) + return string(Serial[:clen(Serial)]), ret +} + +func (Device Device) GetSerial() (string, Return) { + return DeviceGetSerial(Device) +} + +// nvml.DeviceGetCpuAffinity() +func DeviceGetCpuAffinity(Device Device, NumCPUs int) ([]uint, Return) { + CpuSetSize := uint32((NumCPUs-1)/int(unsafe.Sizeof(uint(0))) + 1) + CpuSet := make([]uint, CpuSetSize) + ret := nvmlDeviceGetCpuAffinity(Device, CpuSetSize, &CpuSet[0]) + return CpuSet, ret +} + +func (Device Device) GetCpuAffinity(NumCPUs int) ([]uint, Return) { + return DeviceGetCpuAffinity(Device, NumCPUs) +} + +// nvml.DeviceSetCpuAffinity() +func DeviceSetCpuAffinity(Device Device) Return { + return nvmlDeviceSetCpuAffinity(Device) +} + +func (Device Device) SetCpuAffinity() Return { + return DeviceSetCpuAffinity(Device) +} + +// nvml.DeviceClearCpuAffinity() +func DeviceClearCpuAffinity(Device Device) Return { + return nvmlDeviceClearCpuAffinity(Device) +} + +func (Device Device) ClearCpuAffinity() Return { + return DeviceClearCpuAffinity(Device) +} + +// nvml.DeviceGetMemoryAffinity() +func DeviceGetMemoryAffinity(Device Device, NumNodes int, Scope AffinityScope) ([]uint, Return) { + NodeSetSize := uint32((NumNodes-1)/int(unsafe.Sizeof(uint(0))) + 1) + NodeSet := make([]uint, NodeSetSize) + ret := nvmlDeviceGetMemoryAffinity(Device, NodeSetSize, &NodeSet[0], Scope) + return NodeSet, ret +} + +func (Device Device) GetMemoryAffinity(NumNodes int, Scope AffinityScope) ([]uint, Return) { + return DeviceGetMemoryAffinity(Device, NumNodes, Scope) +} + +// nvml.DeviceGetCpuAffinityWithinScope() +func DeviceGetCpuAffinityWithinScope(Device Device, NumCPUs int, Scope AffinityScope) ([]uint, Return) { + CpuSetSize := uint32((NumCPUs-1)/int(unsafe.Sizeof(uint(0))) + 1) + CpuSet := make([]uint, CpuSetSize) + ret := nvmlDeviceGetCpuAffinityWithinScope(Device, CpuSetSize, &CpuSet[0], Scope) + return CpuSet, ret +} + +func (Device Device) GetCpuAffinityWithinScope(NumCPUs int, Scope AffinityScope) ([]uint, Return) { + return DeviceGetCpuAffinityWithinScope(Device, NumCPUs, Scope) +} + +// nvml.DeviceGetTopologyCommonAncestor() +func DeviceGetTopologyCommonAncestor(Device1 Device, Device2 Device) (GpuTopologyLevel, Return) { + var PathInfo GpuTopologyLevel + ret := nvmlDeviceGetTopologyCommonAncestor(Device1, Device2, &PathInfo) + return PathInfo, ret +} + +func (Device1 Device) GetTopologyCommonAncestor(Device2 Device) (GpuTopologyLevel, Return) { + return DeviceGetTopologyCommonAncestor(Device1, Device2) +} + +// nvml.DeviceGetTopologyNearestGpus() +func DeviceGetTopologyNearestGpus(device Device, Level GpuTopologyLevel) ([]Device, Return) { + var Count uint32 + ret := nvmlDeviceGetTopologyNearestGpus(device, Level, &Count, nil) + if ret != SUCCESS { + return nil, ret + } + if Count == 0 { + return []Device{}, ret + } + DeviceArray := make([]Device, Count) + ret = nvmlDeviceGetTopologyNearestGpus(device, Level, &Count, &DeviceArray[0]) + return DeviceArray, ret +} + +func (Device Device) GetTopologyNearestGpus(Level GpuTopologyLevel) ([]Device, Return) { + return DeviceGetTopologyNearestGpus(Device, Level) +} + +// nvml.DeviceGetP2PStatus() +func DeviceGetP2PStatus(Device1 Device, Device2 Device, P2pIndex GpuP2PCapsIndex) (GpuP2PStatus, Return) { + var P2pStatus GpuP2PStatus + ret := nvmlDeviceGetP2PStatus(Device1, Device2, P2pIndex, &P2pStatus) + return P2pStatus, ret +} + +func (Device1 Device) GetP2PStatus(Device2 Device, P2pIndex GpuP2PCapsIndex) (GpuP2PStatus, Return) { + return DeviceGetP2PStatus(Device1, Device2, P2pIndex) +} + +// nvml.DeviceGetUUID() +func DeviceGetUUID(Device Device) (string, Return) { + Uuid := make([]byte, DEVICE_UUID_V2_BUFFER_SIZE) + ret := nvmlDeviceGetUUID(Device, &Uuid[0], DEVICE_UUID_V2_BUFFER_SIZE) + return string(Uuid[:clen(Uuid)]), ret +} + +func (Device Device) GetUUID() (string, Return) { + return DeviceGetUUID(Device) +} + +// nvml.DeviceGetMinorNumber() +func DeviceGetMinorNumber(Device Device) (int, Return) { + var MinorNumber uint32 + ret := nvmlDeviceGetMinorNumber(Device, &MinorNumber) + return int(MinorNumber), ret +} + +func (Device Device) GetMinorNumber() (int, Return) { + return DeviceGetMinorNumber(Device) +} + +// nvml.DeviceGetBoardPartNumber() +func DeviceGetBoardPartNumber(Device Device) (string, Return) { + PartNumber := make([]byte, DEVICE_PART_NUMBER_BUFFER_SIZE) + ret := nvmlDeviceGetBoardPartNumber(Device, &PartNumber[0], DEVICE_PART_NUMBER_BUFFER_SIZE) + return string(PartNumber[:clen(PartNumber)]), ret +} + +func (Device Device) GetBoardPartNumber() (string, Return) { + return DeviceGetBoardPartNumber(Device) +} + +// nvml.DeviceGetInforomVersion() +func DeviceGetInforomVersion(Device Device, Object InforomObject) (string, Return) { + Version := make([]byte, DEVICE_INFOROM_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetInforomVersion(Device, Object, &Version[0], DEVICE_INFOROM_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetInforomVersion(Object InforomObject) (string, Return) { + return DeviceGetInforomVersion(Device, Object) +} + +// nvml.DeviceGetInforomImageVersion() +func DeviceGetInforomImageVersion(Device Device) (string, Return) { + Version := make([]byte, DEVICE_INFOROM_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetInforomImageVersion(Device, &Version[0], DEVICE_INFOROM_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetInforomImageVersion() (string, Return) { + return DeviceGetInforomImageVersion(Device) +} + +// nvml.DeviceGetInforomConfigurationChecksum() +func DeviceGetInforomConfigurationChecksum(Device Device) (uint32, Return) { + var Checksum uint32 + ret := nvmlDeviceGetInforomConfigurationChecksum(Device, &Checksum) + return Checksum, ret +} + +func (Device Device) GetInforomConfigurationChecksum() (uint32, Return) { + return DeviceGetInforomConfigurationChecksum(Device) +} + +// nvml.DeviceValidateInforom() +func DeviceValidateInforom(Device Device) Return { + return nvmlDeviceValidateInforom(Device) +} + +func (Device Device) ValidateInforom() Return { + return DeviceValidateInforom(Device) +} + +// nvml.DeviceGetDisplayMode() +func DeviceGetDisplayMode(Device Device) (EnableState, Return) { + var Display EnableState + ret := nvmlDeviceGetDisplayMode(Device, &Display) + return Display, ret +} + +func (Device Device) GetDisplayMode() (EnableState, Return) { + return DeviceGetDisplayMode(Device) +} + +// nvml.DeviceGetDisplayActive() +func DeviceGetDisplayActive(Device Device) (EnableState, Return) { + var IsActive EnableState + ret := nvmlDeviceGetDisplayActive(Device, &IsActive) + return IsActive, ret +} + +func (Device Device) GetDisplayActive() (EnableState, Return) { + return DeviceGetDisplayActive(Device) +} + +// nvml.DeviceGetPersistenceMode() +func DeviceGetPersistenceMode(Device Device) (EnableState, Return) { + var Mode EnableState + ret := nvmlDeviceGetPersistenceMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetPersistenceMode() (EnableState, Return) { + return DeviceGetPersistenceMode(Device) +} + +// nvml.DeviceGetPciInfo() +func DeviceGetPciInfo(Device Device) (PciInfo, Return) { + var Pci PciInfo + ret := nvmlDeviceGetPciInfo(Device, &Pci) + return Pci, ret +} + +func (Device Device) GetPciInfo() (PciInfo, Return) { + return DeviceGetPciInfo(Device) +} + +// nvml.DeviceGetMaxPcieLinkGeneration() +func DeviceGetMaxPcieLinkGeneration(Device Device) (int, Return) { + var MaxLinkGen uint32 + ret := nvmlDeviceGetMaxPcieLinkGeneration(Device, &MaxLinkGen) + return int(MaxLinkGen), ret +} + +func (Device Device) GetMaxPcieLinkGeneration() (int, Return) { + return DeviceGetMaxPcieLinkGeneration(Device) +} + +// nvml.DeviceGetMaxPcieLinkWidth() +func DeviceGetMaxPcieLinkWidth(Device Device) (int, Return) { + var MaxLinkWidth uint32 + ret := nvmlDeviceGetMaxPcieLinkWidth(Device, &MaxLinkWidth) + return int(MaxLinkWidth), ret +} + +func (Device Device) GetMaxPcieLinkWidth() (int, Return) { + return DeviceGetMaxPcieLinkWidth(Device) +} + +// nvml.DeviceGetCurrPcieLinkGeneration() +func DeviceGetCurrPcieLinkGeneration(Device Device) (int, Return) { + var CurrLinkGen uint32 + ret := nvmlDeviceGetCurrPcieLinkGeneration(Device, &CurrLinkGen) + return int(CurrLinkGen), ret +} + +func (Device Device) GetCurrPcieLinkGeneration() (int, Return) { + return DeviceGetCurrPcieLinkGeneration(Device) +} + +// nvml.DeviceGetCurrPcieLinkWidth() +func DeviceGetCurrPcieLinkWidth(Device Device) (int, Return) { + var CurrLinkWidth uint32 + ret := nvmlDeviceGetCurrPcieLinkWidth(Device, &CurrLinkWidth) + return int(CurrLinkWidth), ret +} + +func (Device Device) GetCurrPcieLinkWidth() (int, Return) { + return DeviceGetCurrPcieLinkWidth(Device) +} + +// nvml.DeviceGetPcieThroughput() +func DeviceGetPcieThroughput(Device Device, Counter PcieUtilCounter) (uint32, Return) { + var Value uint32 + ret := nvmlDeviceGetPcieThroughput(Device, Counter, &Value) + return Value, ret +} + +func (Device Device) GetPcieThroughput(Counter PcieUtilCounter) (uint32, Return) { + return DeviceGetPcieThroughput(Device, Counter) +} + +// nvml.DeviceGetPcieReplayCounter() +func DeviceGetPcieReplayCounter(Device Device) (int, Return) { + var Value uint32 + ret := nvmlDeviceGetPcieReplayCounter(Device, &Value) + return int(Value), ret +} + +func (Device Device) GetPcieReplayCounter() (int, Return) { + return DeviceGetPcieReplayCounter(Device) +} + +// nvml.nvmlDeviceGetClockInfo() +func DeviceGetClockInfo(Device Device, _type ClockType) (uint32, Return) { + var Clock uint32 + ret := nvmlDeviceGetClockInfo(Device, _type, &Clock) + return Clock, ret +} + +func (Device Device) GetClockInfo(_type ClockType) (uint32, Return) { + return DeviceGetClockInfo(Device, _type) +} + +// nvml.DeviceGetMaxClockInfo() +func DeviceGetMaxClockInfo(Device Device, _type ClockType) (uint32, Return) { + var Clock uint32 + ret := nvmlDeviceGetMaxClockInfo(Device, _type, &Clock) + return Clock, ret +} + +func (Device Device) GetMaxClockInfo(_type ClockType) (uint32, Return) { + return DeviceGetMaxClockInfo(Device, _type) +} + +// nvml.DeviceGetApplicationsClock() +func DeviceGetApplicationsClock(Device Device, ClockType ClockType) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetApplicationsClock(Device, ClockType, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetApplicationsClock(ClockType ClockType) (uint32, Return) { + return DeviceGetApplicationsClock(Device, ClockType) +} + +// nvml.DeviceGetDefaultApplicationsClock() +func DeviceGetDefaultApplicationsClock(Device Device, ClockType ClockType) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetDefaultApplicationsClock(Device, ClockType, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetDefaultApplicationsClock(ClockType ClockType) (uint32, Return) { + return DeviceGetDefaultApplicationsClock(Device, ClockType) +} + +// nvml.DeviceResetApplicationsClocks() +func DeviceResetApplicationsClocks(Device Device) Return { + return nvmlDeviceResetApplicationsClocks(Device) +} + +func (Device Device) ResetApplicationsClocks() Return { + return DeviceResetApplicationsClocks(Device) +} + +// nvml.DeviceGetClock() +func DeviceGetClock(Device Device, ClockType ClockType, ClockId ClockId) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetClock(Device, ClockType, ClockId, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetClock(ClockType ClockType, ClockId ClockId) (uint32, Return) { + return DeviceGetClock(Device, ClockType, ClockId) +} + +// nvml.DeviceGetMaxCustomerBoostClock() +func DeviceGetMaxCustomerBoostClock(Device Device, ClockType ClockType) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetMaxCustomerBoostClock(Device, ClockType, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetMaxCustomerBoostClock(ClockType ClockType) (uint32, Return) { + return DeviceGetMaxCustomerBoostClock(Device, ClockType) +} + +// nvml.DeviceGetSupportedMemoryClocks() +func DeviceGetSupportedMemoryClocks(Device Device) (int, uint32, Return) { + var Count, ClocksMHz uint32 + ret := nvmlDeviceGetSupportedMemoryClocks(Device, &Count, &ClocksMHz) + return int(Count), ClocksMHz, ret +} + +func (Device Device) GetSupportedMemoryClocks() (int, uint32, Return) { + return DeviceGetSupportedMemoryClocks(Device) +} + +// nvml.DeviceGetSupportedGraphicsClocks() +func DeviceGetSupportedGraphicsClocks(Device Device, MemoryClockMHz int) (int, uint32, Return) { + var Count, ClocksMHz uint32 + ret := nvmlDeviceGetSupportedGraphicsClocks(Device, uint32(MemoryClockMHz), &Count, &ClocksMHz) + return int(Count), ClocksMHz, ret +} + +func (Device Device) GetSupportedGraphicsClocks(MemoryClockMHz int) (int, uint32, Return) { + return DeviceGetSupportedGraphicsClocks(Device, MemoryClockMHz) +} + +// nvml.DeviceGetAutoBoostedClocksEnabled() +func DeviceGetAutoBoostedClocksEnabled(Device Device) (EnableState, EnableState, Return) { + var IsEnabled, DefaultIsEnabled EnableState + ret := nvmlDeviceGetAutoBoostedClocksEnabled(Device, &IsEnabled, &DefaultIsEnabled) + return IsEnabled, DefaultIsEnabled, ret +} + +func (Device Device) GetAutoBoostedClocksEnabled() (EnableState, EnableState, Return) { + return DeviceGetAutoBoostedClocksEnabled(Device) +} + +// nvml.DeviceSetAutoBoostedClocksEnabled() +func DeviceSetAutoBoostedClocksEnabled(Device Device, Enabled EnableState) Return { + return nvmlDeviceSetAutoBoostedClocksEnabled(Device, Enabled) +} + +func (Device Device) SetAutoBoostedClocksEnabled(Enabled EnableState) Return { + return DeviceSetAutoBoostedClocksEnabled(Device, Enabled) +} + +// nvml.DeviceSetDefaultAutoBoostedClocksEnabled() +func DeviceSetDefaultAutoBoostedClocksEnabled(Device Device, Enabled EnableState, Flags uint32) Return { + return nvmlDeviceSetDefaultAutoBoostedClocksEnabled(Device, Enabled, Flags) +} + +func (Device Device) SetDefaultAutoBoostedClocksEnabled(Enabled EnableState, Flags uint32) Return { + return DeviceSetDefaultAutoBoostedClocksEnabled(Device, Enabled, Flags) +} + +// nvml.DeviceGetFanSpeed() +func DeviceGetFanSpeed(Device Device) (uint32, Return) { + var Speed uint32 + ret := nvmlDeviceGetFanSpeed(Device, &Speed) + return Speed, ret +} + +func (Device Device) GetFanSpeed() (uint32, Return) { + return DeviceGetFanSpeed(Device) +} + +// nvml.DeviceGetFanSpeed_v2() +func DeviceGetFanSpeed_v2(Device Device, Fan int) (uint32, Return) { + var Speed uint32 + ret := nvmlDeviceGetFanSpeed_v2(Device, uint32(Fan), &Speed) + return Speed, ret +} + +func (Device Device) GetFanSpeed_v2(Fan int) (uint32, Return) { + return DeviceGetFanSpeed_v2(Device, Fan) +} + +// nvml.DeviceGetNumFans() +func DeviceGetNumFans(Device Device) (int, Return) { + var NumFans uint32 + ret := nvmlDeviceGetNumFans(Device, &NumFans) + return int(NumFans), ret +} + +func (Device Device) GetNumFans() (int, Return) { + return DeviceGetNumFans(Device) +} + +// nvml.DeviceGetTemperature() +func DeviceGetTemperature(Device Device, SensorType TemperatureSensors) (uint32, Return) { + var Temp uint32 + ret := nvmlDeviceGetTemperature(Device, SensorType, &Temp) + return Temp, ret +} + +func (Device Device) GetTemperature(SensorType TemperatureSensors) (uint32, Return) { + return DeviceGetTemperature(Device, SensorType) +} + +// nvml.DeviceGetTemperatureThreshold() +func DeviceGetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds) (uint32, Return) { + var Temp uint32 + ret := nvmlDeviceGetTemperatureThreshold(Device, ThresholdType, &Temp) + return Temp, ret +} + +func (Device Device) GetTemperatureThreshold(ThresholdType TemperatureThresholds) (uint32, Return) { + return DeviceGetTemperatureThreshold(Device, ThresholdType) +} + +// nvml.DeviceSetTemperatureThreshold() +func DeviceSetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds, Temp int) Return { + t := int32(Temp) + ret := nvmlDeviceSetTemperatureThreshold(Device, ThresholdType, &t) + return ret +} + +func (Device Device) SetTemperatureThreshold(ThresholdType TemperatureThresholds, Temp int) Return { + return DeviceSetTemperatureThreshold(Device, ThresholdType, Temp) +} + +// nvml.DeviceGetPerformanceState() +func DeviceGetPerformanceState(Device Device) (Pstates, Return) { + var PState Pstates + ret := nvmlDeviceGetPerformanceState(Device, &PState) + return PState, ret +} + +func (Device Device) GetPerformanceState() (Pstates, Return) { + return DeviceGetPerformanceState(Device) +} + +// nvml.DeviceGetCurrentClocksThrottleReasons() +func DeviceGetCurrentClocksThrottleReasons(Device Device) (uint64, Return) { + var ClocksThrottleReasons uint64 + ret := nvmlDeviceGetCurrentClocksThrottleReasons(Device, &ClocksThrottleReasons) + return ClocksThrottleReasons, ret +} + +func (Device Device) GetCurrentClocksThrottleReasons() (uint64, Return) { + return DeviceGetCurrentClocksThrottleReasons(Device) +} + +// nvml.DeviceGetSupportedClocksThrottleReasons() +func DeviceGetSupportedClocksThrottleReasons(Device Device) (uint64, Return) { + var SupportedClocksThrottleReasons uint64 + ret := nvmlDeviceGetSupportedClocksThrottleReasons(Device, &SupportedClocksThrottleReasons) + return SupportedClocksThrottleReasons, ret +} + +func (Device Device) GetSupportedClocksThrottleReasons() (uint64, Return) { + return DeviceGetSupportedClocksThrottleReasons(Device) +} + +// nvml.DeviceGetPowerState() +func DeviceGetPowerState(Device Device) (Pstates, Return) { + var PState Pstates + ret := nvmlDeviceGetPowerState(Device, &PState) + return PState, ret +} + +func (Device Device) GetPowerState() (Pstates, Return) { + return DeviceGetPowerState(Device) +} + +// nvml.DeviceGetPowerManagementMode() +func DeviceGetPowerManagementMode(Device Device) (EnableState, Return) { + var Mode EnableState + ret := nvmlDeviceGetPowerManagementMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetPowerManagementMode() (EnableState, Return) { + return DeviceGetPowerManagementMode(Device) +} + +// nvml.DeviceGetPowerManagementLimit() +func DeviceGetPowerManagementLimit(Device Device) (uint32, Return) { + var Limit uint32 + ret := nvmlDeviceGetPowerManagementLimit(Device, &Limit) + return Limit, ret +} + +func (Device Device) GetPowerManagementLimit() (uint32, Return) { + return DeviceGetPowerManagementLimit(Device) +} + +// nvml.DeviceGetPowerManagementLimitConstraints() +func DeviceGetPowerManagementLimitConstraints(Device Device) (uint32, uint32, Return) { + var MinLimit, MaxLimit uint32 + ret := nvmlDeviceGetPowerManagementLimitConstraints(Device, &MinLimit, &MaxLimit) + return MinLimit, MaxLimit, ret +} + +func (Device Device) GetPowerManagementLimitConstraints() (uint32, uint32, Return) { + return DeviceGetPowerManagementLimitConstraints(Device) +} + +// nvml.DeviceGetPowerManagementDefaultLimit() +func DeviceGetPowerManagementDefaultLimit(Device Device) (uint32, Return) { + var DefaultLimit uint32 + ret := nvmlDeviceGetPowerManagementDefaultLimit(Device, &DefaultLimit) + return DefaultLimit, ret +} + +func (Device Device) GetPowerManagementDefaultLimit() (uint32, Return) { + return DeviceGetPowerManagementDefaultLimit(Device) +} + +// nvml.DeviceGetPowerUsage() +func DeviceGetPowerUsage(Device Device) (uint32, Return) { + var Power uint32 + ret := nvmlDeviceGetPowerUsage(Device, &Power) + return Power, ret +} + +func (Device Device) GetPowerUsage() (uint32, Return) { + return DeviceGetPowerUsage(Device) +} + +// nvml.DeviceGetTotalEnergyConsumption() +func DeviceGetTotalEnergyConsumption(Device Device) (uint64, Return) { + var Energy uint64 + ret := nvmlDeviceGetTotalEnergyConsumption(Device, &Energy) + return Energy, ret +} + +func (Device Device) GetTotalEnergyConsumption() (uint64, Return) { + return DeviceGetTotalEnergyConsumption(Device) +} + +// nvml.DeviceGetEnforcedPowerLimit() +func DeviceGetEnforcedPowerLimit(Device Device) (uint32, Return) { + var Limit uint32 + ret := nvmlDeviceGetEnforcedPowerLimit(Device, &Limit) + return Limit, ret +} + +func (Device Device) GetEnforcedPowerLimit() (uint32, Return) { + return DeviceGetEnforcedPowerLimit(Device) +} + +// nvml.DeviceGetGpuOperationMode() +func DeviceGetGpuOperationMode(Device Device) (GpuOperationMode, GpuOperationMode, Return) { + var Current, Pending GpuOperationMode + ret := nvmlDeviceGetGpuOperationMode(Device, &Current, &Pending) + return Current, Pending, ret +} + +func (Device Device) GetGpuOperationMode() (GpuOperationMode, GpuOperationMode, Return) { + return DeviceGetGpuOperationMode(Device) +} + +// nvml.DeviceGetMemoryInfo() +func DeviceGetMemoryInfo(Device Device) (Memory, Return) { + var Memory Memory + ret := nvmlDeviceGetMemoryInfo(Device, &Memory) + return Memory, ret +} + +func (Device Device) GetMemoryInfo() (Memory, Return) { + return DeviceGetMemoryInfo(Device) +} + +// nvml.DeviceGetMemoryInfo_v2() +func DeviceGetMemoryInfo_v2(Device Device) (Memory_v2, Return) { + var Memory Memory_v2 + Memory.Version = STRUCT_VERSION(Memory, 2) + ret := nvmlDeviceGetMemoryInfo_v2(Device, &Memory) + return Memory, ret +} + +func (Device Device) GetMemoryInfo_v2() (Memory_v2, Return) { + return DeviceGetMemoryInfo_v2(Device) +} + +// nvml.DeviceGetComputeMode() +func DeviceGetComputeMode(Device Device) (ComputeMode, Return) { + var Mode ComputeMode + ret := nvmlDeviceGetComputeMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetComputeMode() (ComputeMode, Return) { + return DeviceGetComputeMode(Device) +} + +// nvml.DeviceGetCudaComputeCapability() +func DeviceGetCudaComputeCapability(Device Device) (int, int, Return) { + var Major, Minor int32 + ret := nvmlDeviceGetCudaComputeCapability(Device, &Major, &Minor) + return int(Major), int(Minor), ret +} + +func (Device Device) GetCudaComputeCapability() (int, int, Return) { + return DeviceGetCudaComputeCapability(Device) +} + +// nvml.DeviceGetEccMode() +func DeviceGetEccMode(Device Device) (EnableState, EnableState, Return) { + var Current, Pending EnableState + ret := nvmlDeviceGetEccMode(Device, &Current, &Pending) + return Current, Pending, ret +} + +func (Device Device) GetEccMode() (EnableState, EnableState, Return) { + return DeviceGetEccMode(Device) +} + +// nvml.DeviceGetBoardId() +func DeviceGetBoardId(Device Device) (uint32, Return) { + var BoardId uint32 + ret := nvmlDeviceGetBoardId(Device, &BoardId) + return BoardId, ret +} + +func (Device Device) GetBoardId() (uint32, Return) { + return DeviceGetBoardId(Device) +} + +// nvml.DeviceGetMultiGpuBoard() +func DeviceGetMultiGpuBoard(Device Device) (int, Return) { + var MultiGpuBool uint32 + ret := nvmlDeviceGetMultiGpuBoard(Device, &MultiGpuBool) + return int(MultiGpuBool), ret +} + +func (Device Device) GetMultiGpuBoard() (int, Return) { + return DeviceGetMultiGpuBoard(Device) +} + +// nvml.DeviceGetTotalEccErrors() +func DeviceGetTotalEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType) (uint64, Return) { + var EccCounts uint64 + ret := nvmlDeviceGetTotalEccErrors(Device, ErrorType, CounterType, &EccCounts) + return EccCounts, ret +} + +func (Device Device) GetTotalEccErrors(ErrorType MemoryErrorType, CounterType EccCounterType) (uint64, Return) { + return DeviceGetTotalEccErrors(Device, ErrorType, CounterType) +} + +// nvml.DeviceGetDetailedEccErrors() +func DeviceGetDetailedEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType) (EccErrorCounts, Return) { + var EccCounts EccErrorCounts + ret := nvmlDeviceGetDetailedEccErrors(Device, ErrorType, CounterType, &EccCounts) + return EccCounts, ret +} + +func (Device Device) GetDetailedEccErrors(ErrorType MemoryErrorType, CounterType EccCounterType) (EccErrorCounts, Return) { + return DeviceGetDetailedEccErrors(Device, ErrorType, CounterType) +} + +// nvml.DeviceGetMemoryErrorCounter() +func DeviceGetMemoryErrorCounter(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation) (uint64, Return) { + var Count uint64 + ret := nvmlDeviceGetMemoryErrorCounter(Device, ErrorType, CounterType, LocationType, &Count) + return Count, ret +} + +func (Device Device) GetMemoryErrorCounter(ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation) (uint64, Return) { + return DeviceGetMemoryErrorCounter(Device, ErrorType, CounterType, LocationType) +} + +// nvml.DeviceGetUtilizationRates() +func DeviceGetUtilizationRates(Device Device) (Utilization, Return) { + var Utilization Utilization + ret := nvmlDeviceGetUtilizationRates(Device, &Utilization) + return Utilization, ret +} + +func (Device Device) GetUtilizationRates() (Utilization, Return) { + return DeviceGetUtilizationRates(Device) +} + +// nvml.DeviceGetEncoderUtilization() +func DeviceGetEncoderUtilization(Device Device) (uint32, uint32, Return) { + var Utilization, SamplingPeriodUs uint32 + ret := nvmlDeviceGetEncoderUtilization(Device, &Utilization, &SamplingPeriodUs) + return Utilization, SamplingPeriodUs, ret +} + +func (Device Device) GetEncoderUtilization() (uint32, uint32, Return) { + return DeviceGetEncoderUtilization(Device) +} + +// nvml.DeviceGetEncoderCapacity() +func DeviceGetEncoderCapacity(Device Device, EncoderQueryType EncoderType) (int, Return) { + var EncoderCapacity uint32 + ret := nvmlDeviceGetEncoderCapacity(Device, EncoderQueryType, &EncoderCapacity) + return int(EncoderCapacity), ret +} + +func (Device Device) GetEncoderCapacity(EncoderQueryType EncoderType) (int, Return) { + return DeviceGetEncoderCapacity(Device, EncoderQueryType) +} + +// nvml.DeviceGetEncoderStats() +func DeviceGetEncoderStats(Device Device) (int, uint32, uint32, Return) { + var SessionCount, AverageFps, AverageLatency uint32 + ret := nvmlDeviceGetEncoderStats(Device, &SessionCount, &AverageFps, &AverageLatency) + return int(SessionCount), AverageFps, AverageLatency, ret +} + +func (Device Device) GetEncoderStats() (int, uint32, uint32, Return) { + return DeviceGetEncoderStats(Device) +} + +// nvml.DeviceGetEncoderSessions() +func DeviceGetEncoderSessions(Device Device) ([]EncoderSessionInfo, Return) { + var SessionCount uint32 = 1 // Will be reduced upon returning + for { + SessionInfos := make([]EncoderSessionInfo, SessionCount) + ret := nvmlDeviceGetEncoderSessions(Device, &SessionCount, &SessionInfos[0]) + if ret == SUCCESS { + return SessionInfos[:SessionCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + SessionCount *= 2 + } +} + +func (Device Device) GetEncoderSessions() ([]EncoderSessionInfo, Return) { + return DeviceGetEncoderSessions(Device) +} + +// nvml.DeviceGetDecoderUtilization() +func DeviceGetDecoderUtilization(Device Device) (uint32, uint32, Return) { + var Utilization, SamplingPeriodUs uint32 + ret := nvmlDeviceGetDecoderUtilization(Device, &Utilization, &SamplingPeriodUs) + return Utilization, SamplingPeriodUs, ret +} + +func (Device Device) GetDecoderUtilization() (uint32, uint32, Return) { + return DeviceGetDecoderUtilization(Device) +} + +// nvml.DeviceGetFBCStats() +func DeviceGetFBCStats(Device Device) (FBCStats, Return) { + var FbcStats FBCStats + ret := nvmlDeviceGetFBCStats(Device, &FbcStats) + return FbcStats, ret +} + +func (Device Device) GetFBCStats() (FBCStats, Return) { + return DeviceGetFBCStats(Device) +} + +// nvml.DeviceGetFBCSessions() +func DeviceGetFBCSessions(Device Device) ([]FBCSessionInfo, Return) { + var SessionCount uint32 = 1 // Will be reduced upon returning + for { + SessionInfo := make([]FBCSessionInfo, SessionCount) + ret := nvmlDeviceGetFBCSessions(Device, &SessionCount, &SessionInfo[0]) + if ret == SUCCESS { + return SessionInfo[:SessionCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + SessionCount *= 2 + } +} + +func (Device Device) GetFBCSessions() ([]FBCSessionInfo, Return) { + return DeviceGetFBCSessions(Device) +} + +// nvml.DeviceGetDriverModel() +func DeviceGetDriverModel(Device Device) (DriverModel, DriverModel, Return) { + var Current, Pending DriverModel + ret := nvmlDeviceGetDriverModel(Device, &Current, &Pending) + return Current, Pending, ret +} + +func (Device Device) GetDriverModel() (DriverModel, DriverModel, Return) { + return DeviceGetDriverModel(Device) +} + +// nvml.DeviceGetVbiosVersion() +func DeviceGetVbiosVersion(Device Device) (string, Return) { + Version := make([]byte, DEVICE_VBIOS_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetVbiosVersion(Device, &Version[0], DEVICE_VBIOS_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetVbiosVersion() (string, Return) { + return DeviceGetVbiosVersion(Device) +} + +// nvml.DeviceGetBridgeChipInfo() +func DeviceGetBridgeChipInfo(Device Device) (BridgeChipHierarchy, Return) { + var BridgeHierarchy BridgeChipHierarchy + ret := nvmlDeviceGetBridgeChipInfo(Device, &BridgeHierarchy) + return BridgeHierarchy, ret +} + +func (Device Device) GetBridgeChipInfo() (BridgeChipHierarchy, Return) { + return DeviceGetBridgeChipInfo(Device) +} + +// nvml.DeviceGetComputeRunningProcesses() +func deviceGetComputeRunningProcesses_v1(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v1, InfoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v1(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetComputeRunningProcesses_v2(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v2, InfoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v2(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetComputeRunningProcesses_v3(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo, InfoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v3(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return Infos[:InfoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func (Device Device) GetComputeRunningProcesses() ([]ProcessInfo, Return) { + return DeviceGetComputeRunningProcesses(Device) +} + +// nvml.DeviceGetGraphicsRunningProcesses() +func deviceGetGraphicsRunningProcesses_v1(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v1, InfoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v1(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetGraphicsRunningProcesses_v2(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v2, InfoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v2(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetGraphicsRunningProcesses_v3(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo, InfoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v3(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return Infos[:InfoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func (Device Device) GetGraphicsRunningProcesses() ([]ProcessInfo, Return) { + return DeviceGetGraphicsRunningProcesses(Device) +} + +// nvml.DeviceGetMPSComputeRunningProcesses() +func deviceGetMPSComputeRunningProcesses_v1(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v1, InfoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v1(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetMPSComputeRunningProcesses_v2(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v2, InfoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v2(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetMPSComputeRunningProcesses_v3(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo, InfoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v3(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return Infos[:InfoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func (Device Device) GetMPSComputeRunningProcesses() ([]ProcessInfo, Return) { + return DeviceGetMPSComputeRunningProcesses(Device) +} + +// nvml.DeviceOnSameBoard() +func DeviceOnSameBoard(Device1 Device, Device2 Device) (int, Return) { + var OnSameBoard int32 + ret := nvmlDeviceOnSameBoard(Device1, Device2, &OnSameBoard) + return int(OnSameBoard), ret +} + +func (Device1 Device) OnSameBoard(Device2 Device) (int, Return) { + return DeviceOnSameBoard(Device1, Device2) +} + +// nvml.DeviceGetAPIRestriction() +func DeviceGetAPIRestriction(Device Device, ApiType RestrictedAPI) (EnableState, Return) { + var IsRestricted EnableState + ret := nvmlDeviceGetAPIRestriction(Device, ApiType, &IsRestricted) + return IsRestricted, ret +} + +func (Device Device) GetAPIRestriction(ApiType RestrictedAPI) (EnableState, Return) { + return DeviceGetAPIRestriction(Device, ApiType) +} + +// nvml.DeviceGetSamples() +func DeviceGetSamples(Device Device, _type SamplingType, LastSeenTimeStamp uint64) (ValueType, []Sample, Return) { + var SampleValType ValueType + var SampleCount uint32 + ret := nvmlDeviceGetSamples(Device, _type, LastSeenTimeStamp, &SampleValType, &SampleCount, nil) + if ret != SUCCESS { + return SampleValType, nil, ret + } + if SampleCount == 0 { + return SampleValType, []Sample{}, ret + } + Samples := make([]Sample, SampleCount) + ret = nvmlDeviceGetSamples(Device, _type, LastSeenTimeStamp, &SampleValType, &SampleCount, &Samples[0]) + return SampleValType, Samples, ret +} + +func (Device Device) GetSamples(_type SamplingType, LastSeenTimeStamp uint64) (ValueType, []Sample, Return) { + return DeviceGetSamples(Device, _type, LastSeenTimeStamp) +} + +// nvml.DeviceGetBAR1MemoryInfo() +func DeviceGetBAR1MemoryInfo(Device Device) (BAR1Memory, Return) { + var Bar1Memory BAR1Memory + ret := nvmlDeviceGetBAR1MemoryInfo(Device, &Bar1Memory) + return Bar1Memory, ret +} + +func (Device Device) GetBAR1MemoryInfo() (BAR1Memory, Return) { + return DeviceGetBAR1MemoryInfo(Device) +} + +// nvml.DeviceGetViolationStatus() +func DeviceGetViolationStatus(Device Device, PerfPolicyType PerfPolicyType) (ViolationTime, Return) { + var ViolTime ViolationTime + ret := nvmlDeviceGetViolationStatus(Device, PerfPolicyType, &ViolTime) + return ViolTime, ret +} + +func (Device Device) GetViolationStatus(PerfPolicyType PerfPolicyType) (ViolationTime, Return) { + return DeviceGetViolationStatus(Device, PerfPolicyType) +} + +// nvml.DeviceGetIrqNum() +func DeviceGetIrqNum(Device Device) (int, Return) { + var IrqNum uint32 + ret := nvmlDeviceGetIrqNum(Device, &IrqNum) + return int(IrqNum), ret +} + +func (Device Device) GetIrqNum() (int, Return) { + return DeviceGetIrqNum(Device) +} + +// nvml.DeviceGetNumGpuCores() +func DeviceGetNumGpuCores(Device Device) (int, Return) { + var NumCores uint32 + ret := nvmlDeviceGetNumGpuCores(Device, &NumCores) + return int(NumCores), ret +} + +func (Device Device) GetNumGpuCores() (int, Return) { + return DeviceGetNumGpuCores(Device) +} + +// nvml.DeviceGetPowerSource() +func DeviceGetPowerSource(Device Device) (PowerSource, Return) { + var PowerSource PowerSource + ret := nvmlDeviceGetPowerSource(Device, &PowerSource) + return PowerSource, ret +} + +func (Device Device) GetPowerSource() (PowerSource, Return) { + return DeviceGetPowerSource(Device) +} + +// nvml.DeviceGetMemoryBusWidth() +func DeviceGetMemoryBusWidth(Device Device) (uint32, Return) { + var BusWidth uint32 + ret := nvmlDeviceGetMemoryBusWidth(Device, &BusWidth) + return BusWidth, ret +} + +func (Device Device) GetMemoryBusWidth() (uint32, Return) { + return DeviceGetMemoryBusWidth(Device) +} + +// nvml.DeviceGetPcieLinkMaxSpeed() +func DeviceGetPcieLinkMaxSpeed(Device Device) (uint32, Return) { + var MaxSpeed uint32 + ret := nvmlDeviceGetPcieLinkMaxSpeed(Device, &MaxSpeed) + return MaxSpeed, ret +} + +func (Device Device) GetPcieLinkMaxSpeed() (uint32, Return) { + return DeviceGetPcieLinkMaxSpeed(Device) +} + +// nvml.DeviceGetAdaptiveClockInfoStatus() +func DeviceGetAdaptiveClockInfoStatus(Device Device) (uint32, Return) { + var AdaptiveClockStatus uint32 + ret := nvmlDeviceGetAdaptiveClockInfoStatus(Device, &AdaptiveClockStatus) + return AdaptiveClockStatus, ret +} + +func (Device Device) GetAdaptiveClockInfoStatus() (uint32, Return) { + return DeviceGetAdaptiveClockInfoStatus(Device) +} + +// nvml.DeviceGetAccountingMode() +func DeviceGetAccountingMode(Device Device) (EnableState, Return) { + var Mode EnableState + ret := nvmlDeviceGetAccountingMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetAccountingMode() (EnableState, Return) { + return DeviceGetAccountingMode(Device) +} + +// nvml.DeviceGetAccountingStats() +func DeviceGetAccountingStats(Device Device, Pid uint32) (AccountingStats, Return) { + var Stats AccountingStats + ret := nvmlDeviceGetAccountingStats(Device, Pid, &Stats) + return Stats, ret +} + +func (Device Device) GetAccountingStats(Pid uint32) (AccountingStats, Return) { + return DeviceGetAccountingStats(Device, Pid) +} + +// nvml.DeviceGetAccountingPids() +func DeviceGetAccountingPids(Device Device) ([]int, Return) { + var Count uint32 = 1 // Will be reduced upon returning + for { + Pids := make([]uint32, Count) + ret := nvmlDeviceGetAccountingPids(Device, &Count, &Pids[0]) + if ret == SUCCESS { + return uint32SliceToIntSlice(Pids[:Count]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + Count *= 2 + } +} + +func (Device Device) GetAccountingPids() ([]int, Return) { + return DeviceGetAccountingPids(Device) +} + +// nvml.DeviceGetAccountingBufferSize() +func DeviceGetAccountingBufferSize(Device Device) (int, Return) { + var BufferSize uint32 + ret := nvmlDeviceGetAccountingBufferSize(Device, &BufferSize) + return int(BufferSize), ret +} + +func (Device Device) GetAccountingBufferSize() (int, Return) { + return DeviceGetAccountingBufferSize(Device) +} + +// nvml.DeviceGetRetiredPages() +func DeviceGetRetiredPages(Device Device, Cause PageRetirementCause) ([]uint64, Return) { + var PageCount uint32 = 1 // Will be reduced upon returning + for { + Addresses := make([]uint64, PageCount) + ret := nvmlDeviceGetRetiredPages(Device, Cause, &PageCount, &Addresses[0]) + if ret == SUCCESS { + return Addresses[:PageCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + PageCount *= 2 + } +} + +func (Device Device) GetRetiredPages(Cause PageRetirementCause) ([]uint64, Return) { + return DeviceGetRetiredPages(Device, Cause) +} + +// nvml.DeviceGetRetiredPages_v2() +func DeviceGetRetiredPages_v2(Device Device, Cause PageRetirementCause) ([]uint64, []uint64, Return) { + var PageCount uint32 = 1 // Will be reduced upon returning + for { + Addresses := make([]uint64, PageCount) + Timestamps := make([]uint64, PageCount) + ret := nvmlDeviceGetRetiredPages_v2(Device, Cause, &PageCount, &Addresses[0], &Timestamps[0]) + if ret == SUCCESS { + return Addresses[:PageCount], Timestamps[:PageCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, nil, ret + } + PageCount *= 2 + } +} + +func (Device Device) GetRetiredPages_v2(Cause PageRetirementCause) ([]uint64, []uint64, Return) { + return DeviceGetRetiredPages_v2(Device, Cause) +} + +// nvml.DeviceGetRetiredPagesPendingStatus() +func DeviceGetRetiredPagesPendingStatus(Device Device) (EnableState, Return) { + var IsPending EnableState + ret := nvmlDeviceGetRetiredPagesPendingStatus(Device, &IsPending) + return IsPending, ret +} + +func (Device Device) GetRetiredPagesPendingStatus() (EnableState, Return) { + return DeviceGetRetiredPagesPendingStatus(Device) +} + +// nvml.DeviceSetPersistenceMode() +func DeviceSetPersistenceMode(Device Device, Mode EnableState) Return { + return nvmlDeviceSetPersistenceMode(Device, Mode) +} + +func (Device Device) SetPersistenceMode(Mode EnableState) Return { + return DeviceSetPersistenceMode(Device, Mode) +} + +// nvml.DeviceSetComputeMode() +func DeviceSetComputeMode(Device Device, Mode ComputeMode) Return { + return nvmlDeviceSetComputeMode(Device, Mode) +} + +func (Device Device) SetComputeMode(Mode ComputeMode) Return { + return DeviceSetComputeMode(Device, Mode) +} + +// nvml.DeviceSetEccMode() +func DeviceSetEccMode(Device Device, Ecc EnableState) Return { + return nvmlDeviceSetEccMode(Device, Ecc) +} + +func (Device Device) SetEccMode(Ecc EnableState) Return { + return DeviceSetEccMode(Device, Ecc) +} + +// nvml.DeviceClearEccErrorCounts() +func DeviceClearEccErrorCounts(Device Device, CounterType EccCounterType) Return { + return nvmlDeviceClearEccErrorCounts(Device, CounterType) +} + +func (Device Device) ClearEccErrorCounts(CounterType EccCounterType) Return { + return DeviceClearEccErrorCounts(Device, CounterType) +} + +// nvml.DeviceSetDriverModel() +func DeviceSetDriverModel(Device Device, DriverModel DriverModel, Flags uint32) Return { + return nvmlDeviceSetDriverModel(Device, DriverModel, Flags) +} + +func (Device Device) SetDriverModel(DriverModel DriverModel, Flags uint32) Return { + return DeviceSetDriverModel(Device, DriverModel, Flags) +} + +// nvml.DeviceSetGpuLockedClocks() +func DeviceSetGpuLockedClocks(Device Device, MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + return nvmlDeviceSetGpuLockedClocks(Device, MinGpuClockMHz, MaxGpuClockMHz) +} + +func (Device Device) SetGpuLockedClocks(MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + return DeviceSetGpuLockedClocks(Device, MinGpuClockMHz, MaxGpuClockMHz) +} + +// nvml.DeviceResetGpuLockedClocks() +func DeviceResetGpuLockedClocks(Device Device) Return { + return nvmlDeviceResetGpuLockedClocks(Device) +} + +func (Device Device) ResetGpuLockedClocks() Return { + return DeviceResetGpuLockedClocks(Device) +} + +// nvmlDeviceSetMemoryLockedClocks() +func DeviceSetMemoryLockedClocks(Device Device, MinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + return nvmlDeviceSetMemoryLockedClocks(Device, MinMemClockMHz, MaxMemClockMHz) +} + +func (Device Device) SetMemoryLockedClocks(NinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + return DeviceSetMemoryLockedClocks(Device, NinMemClockMHz, MaxMemClockMHz) +} + +// nvmlDeviceResetMemoryLockedClocks() +func DeviceResetMemoryLockedClocks(Device Device) Return { + return nvmlDeviceResetMemoryLockedClocks(Device) +} + +func (Device Device) ResetMemoryLockedClocks() Return { + return DeviceResetMemoryLockedClocks(Device) +} + +// nvml.DeviceGetClkMonStatus() +func DeviceGetClkMonStatus(Device Device) (ClkMonStatus, Return) { + var Status ClkMonStatus + ret := nvmlDeviceGetClkMonStatus(Device, &Status) + return Status, ret +} + +func (Device Device) GetClkMonStatus() (ClkMonStatus, Return) { + return DeviceGetClkMonStatus(Device) +} + +// nvml.DeviceSetApplicationsClocks() +func DeviceSetApplicationsClocks(Device Device, MemClockMHz uint32, GraphicsClockMHz uint32) Return { + return nvmlDeviceSetApplicationsClocks(Device, MemClockMHz, GraphicsClockMHz) +} + +func (Device Device) SetApplicationsClocks(MemClockMHz uint32, GraphicsClockMHz uint32) Return { + return DeviceSetApplicationsClocks(Device, MemClockMHz, GraphicsClockMHz) +} + +// nvml.DeviceSetPowerManagementLimit() +func DeviceSetPowerManagementLimit(Device Device, Limit uint32) Return { + return nvmlDeviceSetPowerManagementLimit(Device, Limit) +} + +func (Device Device) SetPowerManagementLimit(Limit uint32) Return { + return DeviceSetPowerManagementLimit(Device, Limit) +} + +// nvml.DeviceSetGpuOperationMode() +func DeviceSetGpuOperationMode(Device Device, Mode GpuOperationMode) Return { + return nvmlDeviceSetGpuOperationMode(Device, Mode) +} + +func (Device Device) SetGpuOperationMode(Mode GpuOperationMode) Return { + return DeviceSetGpuOperationMode(Device, Mode) +} + +// nvml.DeviceSetAPIRestriction() +func DeviceSetAPIRestriction(Device Device, ApiType RestrictedAPI, IsRestricted EnableState) Return { + return nvmlDeviceSetAPIRestriction(Device, ApiType, IsRestricted) +} + +func (Device Device) SetAPIRestriction(ApiType RestrictedAPI, IsRestricted EnableState) Return { + return DeviceSetAPIRestriction(Device, ApiType, IsRestricted) +} + +// nvml.DeviceSetAccountingMode() +func DeviceSetAccountingMode(Device Device, Mode EnableState) Return { + return nvmlDeviceSetAccountingMode(Device, Mode) +} + +func (Device Device) SetAccountingMode(Mode EnableState) Return { + return DeviceSetAccountingMode(Device, Mode) +} + +// nvml.DeviceClearAccountingPids() +func DeviceClearAccountingPids(Device Device) Return { + return nvmlDeviceClearAccountingPids(Device) +} + +func (Device Device) ClearAccountingPids() Return { + return DeviceClearAccountingPids(Device) +} + +// nvml.DeviceGetNvLinkState() +func DeviceGetNvLinkState(Device Device, Link int) (EnableState, Return) { + var IsActive EnableState + ret := nvmlDeviceGetNvLinkState(Device, uint32(Link), &IsActive) + return IsActive, ret +} + +func (Device Device) GetNvLinkState(Link int) (EnableState, Return) { + return DeviceGetNvLinkState(Device, Link) +} + +// nvml.DeviceGetNvLinkVersion() +func DeviceGetNvLinkVersion(Device Device, Link int) (uint32, Return) { + var Version uint32 + ret := nvmlDeviceGetNvLinkVersion(Device, uint32(Link), &Version) + return Version, ret +} + +func (Device Device) GetNvLinkVersion(Link int) (uint32, Return) { + return DeviceGetNvLinkVersion(Device, Link) +} + +// nvml.DeviceGetNvLinkCapability() +func DeviceGetNvLinkCapability(Device Device, Link int, Capability NvLinkCapability) (uint32, Return) { + var CapResult uint32 + ret := nvmlDeviceGetNvLinkCapability(Device, uint32(Link), Capability, &CapResult) + return CapResult, ret +} + +func (Device Device) GetNvLinkCapability(Link int, Capability NvLinkCapability) (uint32, Return) { + return DeviceGetNvLinkCapability(Device, Link, Capability) +} + +// nvml.DeviceGetNvLinkRemotePciInfo() +func DeviceGetNvLinkRemotePciInfo(Device Device, Link int) (PciInfo, Return) { + var Pci PciInfo + ret := nvmlDeviceGetNvLinkRemotePciInfo(Device, uint32(Link), &Pci) + return Pci, ret +} + +func (Device Device) GetNvLinkRemotePciInfo(Link int) (PciInfo, Return) { + return DeviceGetNvLinkRemotePciInfo(Device, Link) +} + +// nvml.DeviceGetNvLinkErrorCounter() +func DeviceGetNvLinkErrorCounter(Device Device, Link int, Counter NvLinkErrorCounter) (uint64, Return) { + var CounterValue uint64 + ret := nvmlDeviceGetNvLinkErrorCounter(Device, uint32(Link), Counter, &CounterValue) + return CounterValue, ret +} + +func (Device Device) GetNvLinkErrorCounter(Link int, Counter NvLinkErrorCounter) (uint64, Return) { + return DeviceGetNvLinkErrorCounter(Device, Link, Counter) +} + +// nvml.DeviceResetNvLinkErrorCounters() +func DeviceResetNvLinkErrorCounters(Device Device, Link int) Return { + return nvmlDeviceResetNvLinkErrorCounters(Device, uint32(Link)) +} + +func (Device Device) ResetNvLinkErrorCounters(Link int) Return { + return DeviceResetNvLinkErrorCounters(Device, Link) +} + +// nvml.DeviceSetNvLinkUtilizationControl() +func DeviceSetNvLinkUtilizationControl(Device Device, Link int, Counter int, Control *NvLinkUtilizationControl, Reset bool) Return { + reset := uint32(0) + if Reset { + reset = 1 + } + return nvmlDeviceSetNvLinkUtilizationControl(Device, uint32(Link), uint32(Counter), Control, reset) +} + +func (Device Device) SetNvLinkUtilizationControl(Link int, Counter int, Control *NvLinkUtilizationControl, Reset bool) Return { + return DeviceSetNvLinkUtilizationControl(Device, Link, Counter, Control, Reset) +} + +// nvml.DeviceGetNvLinkUtilizationControl() +func DeviceGetNvLinkUtilizationControl(Device Device, Link int, Counter int) (NvLinkUtilizationControl, Return) { + var Control NvLinkUtilizationControl + ret := nvmlDeviceGetNvLinkUtilizationControl(Device, uint32(Link), uint32(Counter), &Control) + return Control, ret +} + +func (Device Device) GetNvLinkUtilizationControl(Link int, Counter int) (NvLinkUtilizationControl, Return) { + return DeviceGetNvLinkUtilizationControl(Device, Link, Counter) +} + +// nvml.DeviceGetNvLinkUtilizationCounter() +func DeviceGetNvLinkUtilizationCounter(Device Device, Link int, Counter int) (uint64, uint64, Return) { + var Rxcounter, Txcounter uint64 + ret := nvmlDeviceGetNvLinkUtilizationCounter(Device, uint32(Link), uint32(Counter), &Rxcounter, &Txcounter) + return Rxcounter, Txcounter, ret +} + +func (Device Device) GetNvLinkUtilizationCounter(Link int, Counter int) (uint64, uint64, Return) { + return DeviceGetNvLinkUtilizationCounter(Device, Link, Counter) +} + +// nvml.DeviceFreezeNvLinkUtilizationCounter() +func DeviceFreezeNvLinkUtilizationCounter(Device Device, Link int, Counter int, Freeze EnableState) Return { + return nvmlDeviceFreezeNvLinkUtilizationCounter(Device, uint32(Link), uint32(Counter), Freeze) +} + +func (Device Device) FreezeNvLinkUtilizationCounter(Link int, Counter int, Freeze EnableState) Return { + return DeviceFreezeNvLinkUtilizationCounter(Device, Link, Counter, Freeze) +} + +// nvml.DeviceResetNvLinkUtilizationCounter() +func DeviceResetNvLinkUtilizationCounter(Device Device, Link int, Counter int) Return { + return nvmlDeviceResetNvLinkUtilizationCounter(Device, uint32(Link), uint32(Counter)) +} + +func (Device Device) ResetNvLinkUtilizationCounter(Link int, Counter int) Return { + return DeviceResetNvLinkUtilizationCounter(Device, Link, Counter) +} + +// nvml.DeviceGetNvLinkRemoteDeviceType() +func DeviceGetNvLinkRemoteDeviceType(Device Device, Link int) (IntNvLinkDeviceType, Return) { + var NvLinkDeviceType IntNvLinkDeviceType + ret := nvmlDeviceGetNvLinkRemoteDeviceType(Device, uint32(Link), &NvLinkDeviceType) + return NvLinkDeviceType, ret +} + +func (Device Device) GetNvLinkRemoteDeviceType(Link int) (IntNvLinkDeviceType, Return) { + return DeviceGetNvLinkRemoteDeviceType(Device, Link) +} + +// nvml.DeviceRegisterEvents() +func DeviceRegisterEvents(Device Device, EventTypes uint64, Set EventSet) Return { + return nvmlDeviceRegisterEvents(Device, EventTypes, Set) +} + +func (Device Device) RegisterEvents(EventTypes uint64, Set EventSet) Return { + return DeviceRegisterEvents(Device, EventTypes, Set) +} + +// nvmlDeviceGetSupportedEventTypes() +func DeviceGetSupportedEventTypes(Device Device) (uint64, Return) { + var EventTypes uint64 + ret := nvmlDeviceGetSupportedEventTypes(Device, &EventTypes) + return EventTypes, ret +} + +func (Device Device) GetSupportedEventTypes() (uint64, Return) { + return DeviceGetSupportedEventTypes(Device) +} + +// nvml.DeviceModifyDrainState() +func DeviceModifyDrainState(PciInfo *PciInfo, NewState EnableState) Return { + return nvmlDeviceModifyDrainState(PciInfo, NewState) +} + +// nvml.DeviceQueryDrainState() +func DeviceQueryDrainState(PciInfo *PciInfo) (EnableState, Return) { + var CurrentState EnableState + ret := nvmlDeviceQueryDrainState(PciInfo, &CurrentState) + return CurrentState, ret +} + +// nvml.DeviceRemoveGpu() +func DeviceRemoveGpu(PciInfo *PciInfo) Return { + return nvmlDeviceRemoveGpu(PciInfo) +} + +// nvml.DeviceRemoveGpu_v2() +func DeviceRemoveGpu_v2(PciInfo *PciInfo, GpuState DetachGpuState, LinkState PcieLinkState) Return { + return nvmlDeviceRemoveGpu_v2(PciInfo, GpuState, LinkState) +} + +// nvml.DeviceDiscoverGpus() +func DeviceDiscoverGpus() (PciInfo, Return) { + var PciInfo PciInfo + ret := nvmlDeviceDiscoverGpus(&PciInfo) + return PciInfo, ret +} + +// nvml.DeviceGetFieldValues() +func DeviceGetFieldValues(Device Device, Values []FieldValue) Return { + ValuesCount := len(Values) + return nvmlDeviceGetFieldValues(Device, int32(ValuesCount), &Values[0]) +} + +func (Device Device) GetFieldValues(Values []FieldValue) Return { + return DeviceGetFieldValues(Device, Values) +} + +// nvml.DeviceGetVirtualizationMode() +func DeviceGetVirtualizationMode(Device Device) (GpuVirtualizationMode, Return) { + var PVirtualMode GpuVirtualizationMode + ret := nvmlDeviceGetVirtualizationMode(Device, &PVirtualMode) + return PVirtualMode, ret +} + +func (Device Device) GetVirtualizationMode() (GpuVirtualizationMode, Return) { + return DeviceGetVirtualizationMode(Device) +} + +// nvml.DeviceGetHostVgpuMode() +func DeviceGetHostVgpuMode(Device Device) (HostVgpuMode, Return) { + var PHostVgpuMode HostVgpuMode + ret := nvmlDeviceGetHostVgpuMode(Device, &PHostVgpuMode) + return PHostVgpuMode, ret +} + +func (Device Device) GetHostVgpuMode() (HostVgpuMode, Return) { + return DeviceGetHostVgpuMode(Device) +} + +// nvml.DeviceSetVirtualizationMode() +func DeviceSetVirtualizationMode(Device Device, VirtualMode GpuVirtualizationMode) Return { + return nvmlDeviceSetVirtualizationMode(Device, VirtualMode) +} + +func (Device Device) SetVirtualizationMode(VirtualMode GpuVirtualizationMode) Return { + return DeviceSetVirtualizationMode(Device, VirtualMode) +} + +// nvml.DeviceGetGridLicensableFeatures() +func DeviceGetGridLicensableFeatures(Device Device) (GridLicensableFeatures, Return) { + var PGridLicensableFeatures GridLicensableFeatures + ret := nvmlDeviceGetGridLicensableFeatures(Device, &PGridLicensableFeatures) + return PGridLicensableFeatures, ret +} + +func (Device Device) GetGridLicensableFeatures() (GridLicensableFeatures, Return) { + return DeviceGetGridLicensableFeatures(Device) +} + +// nvml.DeviceGetProcessUtilization() +func DeviceGetProcessUtilization(Device Device, LastSeenTimeStamp uint64) ([]ProcessUtilizationSample, Return) { + var ProcessSamplesCount uint32 + ret := nvmlDeviceGetProcessUtilization(Device, nil, &ProcessSamplesCount, LastSeenTimeStamp) + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + if ProcessSamplesCount == 0 { + return []ProcessUtilizationSample{}, ret + } + Utilization := make([]ProcessUtilizationSample, ProcessSamplesCount) + ret = nvmlDeviceGetProcessUtilization(Device, &Utilization[0], &ProcessSamplesCount, LastSeenTimeStamp) + return Utilization[:ProcessSamplesCount], ret +} + +func (Device Device) GetProcessUtilization(LastSeenTimeStamp uint64) ([]ProcessUtilizationSample, Return) { + return DeviceGetProcessUtilization(Device, LastSeenTimeStamp) +} + +// nvml.DeviceGetSupportedVgpus() +func DeviceGetSupportedVgpus(Device Device) ([]VgpuTypeId, Return) { + var VgpuCount uint32 = 1 // Will be reduced upon returning + for { + VgpuTypeIds := make([]VgpuTypeId, VgpuCount) + ret := nvmlDeviceGetSupportedVgpus(Device, &VgpuCount, &VgpuTypeIds[0]) + if ret == SUCCESS { + return VgpuTypeIds[:VgpuCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuCount *= 2 + } +} + +func (Device Device) GetSupportedVgpus() ([]VgpuTypeId, Return) { + return DeviceGetSupportedVgpus(Device) +} + +// nvml.DeviceGetCreatableVgpus() +func DeviceGetCreatableVgpus(Device Device) ([]VgpuTypeId, Return) { + var VgpuCount uint32 = 1 // Will be reduced upon returning + for { + VgpuTypeIds := make([]VgpuTypeId, VgpuCount) + ret := nvmlDeviceGetCreatableVgpus(Device, &VgpuCount, &VgpuTypeIds[0]) + if ret == SUCCESS { + return VgpuTypeIds[:VgpuCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuCount *= 2 + } +} + +func (Device Device) GetCreatableVgpus() ([]VgpuTypeId, Return) { + return DeviceGetCreatableVgpus(Device) +} + +// nvml.DeviceGetActiveVgpus() +func DeviceGetActiveVgpus(Device Device) ([]VgpuInstance, Return) { + var VgpuCount uint32 = 1 // Will be reduced upon returning + for { + VgpuInstances := make([]VgpuInstance, VgpuCount) + ret := nvmlDeviceGetActiveVgpus(Device, &VgpuCount, &VgpuInstances[0]) + if ret == SUCCESS { + return VgpuInstances[:VgpuCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuCount *= 2 + } +} + +func (Device Device) GetActiveVgpus() ([]VgpuInstance, Return) { + return DeviceGetActiveVgpus(Device) +} + +// nvml.DeviceGetVgpuMetadata() +func DeviceGetVgpuMetadata(Device Device) (VgpuPgpuMetadata, Return) { + var VgpuPgpuMetadata VgpuPgpuMetadata + OpaqueDataSize := unsafe.Sizeof(VgpuPgpuMetadata.nvmlVgpuPgpuMetadata.OpaqueData) + VgpuPgpuMetadataSize := unsafe.Sizeof(VgpuPgpuMetadata.nvmlVgpuPgpuMetadata) - OpaqueDataSize + for { + BufferSize := uint32(VgpuPgpuMetadataSize + OpaqueDataSize) + Buffer := make([]byte, BufferSize) + nvmlVgpuPgpuMetadataPtr := (*nvmlVgpuPgpuMetadata)(unsafe.Pointer(&Buffer[0])) + ret := nvmlDeviceGetVgpuMetadata(Device, nvmlVgpuPgpuMetadataPtr, &BufferSize) + if ret == SUCCESS { + VgpuPgpuMetadata.nvmlVgpuPgpuMetadata = *nvmlVgpuPgpuMetadataPtr + VgpuPgpuMetadata.OpaqueData = Buffer[VgpuPgpuMetadataSize:BufferSize] + return VgpuPgpuMetadata, ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return VgpuPgpuMetadata, ret + } + OpaqueDataSize = 2 * OpaqueDataSize + } +} + +func (Device Device) GetVgpuMetadata() (VgpuPgpuMetadata, Return) { + return DeviceGetVgpuMetadata(Device) +} + +// nvml.DeviceGetPgpuMetadataString() +func DeviceGetPgpuMetadataString(Device Device) (string, Return) { + var BufferSize uint32 = 1 // Will be reduced upon returning + for { + PgpuMetadata := make([]byte, BufferSize) + ret := nvmlDeviceGetPgpuMetadataString(Device, &PgpuMetadata[0], &BufferSize) + if ret == SUCCESS { + return string(PgpuMetadata[:clen(PgpuMetadata)]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return "", ret + } + BufferSize *= 2 + } +} + +func (Device Device) GetPgpuMetadataString() (string, Return) { + return DeviceGetPgpuMetadataString(Device) +} + +// nvml.DeviceGetVgpuUtilization() +func DeviceGetVgpuUtilization(Device Device, LastSeenTimeStamp uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) { + var SampleValType ValueType + var VgpuInstanceSamplesCount uint32 = 1 // Will be reduced upon returning + for { + UtilizationSamples := make([]VgpuInstanceUtilizationSample, VgpuInstanceSamplesCount) + ret := nvmlDeviceGetVgpuUtilization(Device, LastSeenTimeStamp, &SampleValType, &VgpuInstanceSamplesCount, &UtilizationSamples[0]) + if ret == SUCCESS { + return SampleValType, UtilizationSamples[:VgpuInstanceSamplesCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return SampleValType, nil, ret + } + VgpuInstanceSamplesCount *= 2 + } +} + +func (Device Device) GetVgpuUtilization(LastSeenTimeStamp uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) { + return DeviceGetVgpuUtilization(Device, LastSeenTimeStamp) +} + +// nvml.DeviceGetAttributes() +func DeviceGetAttributes(Device Device) (DeviceAttributes, Return) { + var Attributes DeviceAttributes + ret := nvmlDeviceGetAttributes(Device, &Attributes) + return Attributes, ret +} + +func (Device Device) GetAttributes() (DeviceAttributes, Return) { + return DeviceGetAttributes(Device) +} + +// nvml.DeviceGetRemappedRows() +func DeviceGetRemappedRows(Device Device) (int, int, bool, bool, Return) { + var CorrRows, UncRows, IsPending, FailureOccured uint32 + ret := nvmlDeviceGetRemappedRows(Device, &CorrRows, &UncRows, &IsPending, &FailureOccured) + return int(CorrRows), int(UncRows), (IsPending != 0), (FailureOccured != 0), ret +} + +func (Device Device) GetRemappedRows() (int, int, bool, bool, Return) { + return DeviceGetRemappedRows(Device) +} + +// nvml.DeviceGetRowRemapperHistogram() +func DeviceGetRowRemapperHistogram(Device Device) (RowRemapperHistogramValues, Return) { + var Values RowRemapperHistogramValues + ret := nvmlDeviceGetRowRemapperHistogram(Device, &Values) + return Values, ret +} + +func (Device Device) GetRowRemapperHistogram() (RowRemapperHistogramValues, Return) { + return DeviceGetRowRemapperHistogram(Device) +} + +// nvml.DeviceGetArchitecture() +func DeviceGetArchitecture(Device Device) (DeviceArchitecture, Return) { + var Arch DeviceArchitecture + ret := nvmlDeviceGetArchitecture(Device, &Arch) + return Arch, ret +} + +func (Device Device) GetArchitecture() (DeviceArchitecture, Return) { + return DeviceGetArchitecture(Device) +} + +// nvml.DeviceGetVgpuProcessUtilization() +func DeviceGetVgpuProcessUtilization(Device Device, LastSeenTimeStamp uint64) ([]VgpuProcessUtilizationSample, Return) { + var VgpuProcessSamplesCount uint32 = 1 // Will be reduced upon returning + for { + UtilizationSamples := make([]VgpuProcessUtilizationSample, VgpuProcessSamplesCount) + ret := nvmlDeviceGetVgpuProcessUtilization(Device, LastSeenTimeStamp, &VgpuProcessSamplesCount, &UtilizationSamples[0]) + if ret == SUCCESS { + return UtilizationSamples[:VgpuProcessSamplesCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuProcessSamplesCount *= 2 + } +} + +func (Device Device) GetVgpuProcessUtilization(LastSeenTimeStamp uint64) ([]VgpuProcessUtilizationSample, Return) { + return DeviceGetVgpuProcessUtilization(Device, LastSeenTimeStamp) +} + +// nvml.GetExcludedDeviceCount() +func GetExcludedDeviceCount() (int, Return) { + var DeviceCount uint32 + ret := nvmlGetExcludedDeviceCount(&DeviceCount) + return int(DeviceCount), ret +} + +// nvml.GetExcludedDeviceInfoByIndex() +func GetExcludedDeviceInfoByIndex(Index int) (ExcludedDeviceInfo, Return) { + var Info ExcludedDeviceInfo + ret := nvmlGetExcludedDeviceInfoByIndex(uint32(Index), &Info) + return Info, ret +} + +// nvml.DeviceSetMigMode() +func DeviceSetMigMode(Device Device, Mode int) (Return, Return) { + var ActivationStatus Return + ret := nvmlDeviceSetMigMode(Device, uint32(Mode), &ActivationStatus) + return ActivationStatus, ret +} + +func (Device Device) SetMigMode(Mode int) (Return, Return) { + return DeviceSetMigMode(Device, Mode) +} + +// nvml.DeviceGetMigMode() +func DeviceGetMigMode(Device Device) (int, int, Return) { + var CurrentMode, PendingMode uint32 + ret := nvmlDeviceGetMigMode(Device, &CurrentMode, &PendingMode) + return int(CurrentMode), int(PendingMode), ret +} + +func (Device Device) GetMigMode() (int, int, Return) { + return DeviceGetMigMode(Device) +} + +// nvml.DeviceGetGpuInstanceProfileInfo() +func DeviceGetGpuInstanceProfileInfo(Device Device, Profile int) (GpuInstanceProfileInfo, Return) { + var Info GpuInstanceProfileInfo + ret := nvmlDeviceGetGpuInstanceProfileInfo(Device, uint32(Profile), &Info) + return Info, ret +} + +func (Device Device) GetGpuInstanceProfileInfo(Profile int) (GpuInstanceProfileInfo, Return) { + return DeviceGetGpuInstanceProfileInfo(Device, Profile) +} + +// nvml.DeviceGetGpuInstanceProfileInfoV() +type GpuInstanceProfileInfoV struct { + device Device + profile int +} + +func (InfoV GpuInstanceProfileInfoV) V1() (GpuInstanceProfileInfo, Return) { + return DeviceGetGpuInstanceProfileInfo(InfoV.device, InfoV.profile) +} + +func (InfoV GpuInstanceProfileInfoV) V2() (GpuInstanceProfileInfo_v2, Return) { + var Info GpuInstanceProfileInfo_v2 + Info.Version = STRUCT_VERSION(Info, 2) + ret := nvmlDeviceGetGpuInstanceProfileInfoV(InfoV.device, uint32(InfoV.profile), &Info) + return Info, ret +} + +func DeviceGetGpuInstanceProfileInfoV(Device Device, Profile int) GpuInstanceProfileInfoV { + return GpuInstanceProfileInfoV{Device, Profile} +} + +func (Device Device) GetGpuInstanceProfileInfoV(Profile int) GpuInstanceProfileInfoV { + return DeviceGetGpuInstanceProfileInfoV(Device, Profile) +} + +// nvml.DeviceGetGpuInstancePossiblePlacements() +func DeviceGetGpuInstancePossiblePlacements(Device Device, Info *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) { + if Info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var Count uint32 = Info.InstanceCount + Placements := make([]GpuInstancePlacement, Count) + ret := nvmlDeviceGetGpuInstancePossiblePlacements(Device, Info.Id, &Placements[0], &Count) + return Placements[:Count], ret +} + +func (Device Device) GetGpuInstancePossiblePlacements(Info *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) { + return DeviceGetGpuInstancePossiblePlacements(Device, Info) +} + +// nvml.DeviceGetGpuInstanceRemainingCapacity() +func DeviceGetGpuInstanceRemainingCapacity(Device Device, Info *GpuInstanceProfileInfo) (int, Return) { + if Info == nil { + return 0, ERROR_INVALID_ARGUMENT + } + var Count uint32 + ret := nvmlDeviceGetGpuInstanceRemainingCapacity(Device, Info.Id, &Count) + return int(Count), ret +} + +func (Device Device) GetGpuInstanceRemainingCapacity(Info *GpuInstanceProfileInfo) (int, Return) { + return DeviceGetGpuInstanceRemainingCapacity(Device, Info) +} + +// nvml.DeviceCreateGpuInstance() +func DeviceCreateGpuInstance(Device Device, Info *GpuInstanceProfileInfo) (GpuInstance, Return) { + if Info == nil { + return GpuInstance{}, ERROR_INVALID_ARGUMENT + } + var GpuInstance GpuInstance + ret := nvmlDeviceCreateGpuInstance(Device, Info.Id, &GpuInstance) + return GpuInstance, ret +} + +func (Device Device) CreateGpuInstance(Info *GpuInstanceProfileInfo) (GpuInstance, Return) { + return DeviceCreateGpuInstance(Device, Info) +} + +// nvml.DeviceCreateGpuInstanceWithPlacement() +func DeviceCreateGpuInstanceWithPlacement(Device Device, Info *GpuInstanceProfileInfo, Placement *GpuInstancePlacement) (GpuInstance, Return) { + if Info == nil { + return GpuInstance{}, ERROR_INVALID_ARGUMENT + } + var GpuInstance GpuInstance + ret := nvmlDeviceCreateGpuInstanceWithPlacement(Device, Info.Id, Placement, &GpuInstance) + return GpuInstance, ret +} + +func (Device Device) CreateGpuInstanceWithPlacement(Info *GpuInstanceProfileInfo, Placement *GpuInstancePlacement) (GpuInstance, Return) { + return DeviceCreateGpuInstanceWithPlacement(Device, Info, Placement) +} + +// nvml.GpuInstanceDestroy() +func GpuInstanceDestroy(GpuInstance GpuInstance) Return { + return nvmlGpuInstanceDestroy(GpuInstance) +} + +func (GpuInstance GpuInstance) Destroy() Return { + return GpuInstanceDestroy(GpuInstance) +} + +// nvml.DeviceGetGpuInstances() +func DeviceGetGpuInstances(Device Device, Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + if Info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var Count uint32 = Info.InstanceCount + GpuInstances := make([]GpuInstance, Count) + ret := nvmlDeviceGetGpuInstances(Device, Info.Id, &GpuInstances[0], &Count) + return GpuInstances[:Count], ret +} + +func (Device Device) GetGpuInstances(Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + return DeviceGetGpuInstances(Device, Info) +} + +// nvml.DeviceGetGpuInstanceById() +func DeviceGetGpuInstanceById(Device Device, Id int) (GpuInstance, Return) { + var GpuInstance GpuInstance + ret := nvmlDeviceGetGpuInstanceById(Device, uint32(Id), &GpuInstance) + return GpuInstance, ret +} + +func (Device Device) GetGpuInstanceById(Id int) (GpuInstance, Return) { + return DeviceGetGpuInstanceById(Device, Id) +} + +// nvml.GpuInstanceGetInfo() +func GpuInstanceGetInfo(GpuInstance GpuInstance) (GpuInstanceInfo, Return) { + var Info GpuInstanceInfo + ret := nvmlGpuInstanceGetInfo(GpuInstance, &Info) + return Info, ret +} + +func (GpuInstance GpuInstance) GetInfo() (GpuInstanceInfo, Return) { + return GpuInstanceGetInfo(GpuInstance) +} + +// nvml.GpuInstanceGetComputeInstanceProfileInfo() +func GpuInstanceGetComputeInstanceProfileInfo(GpuInstance GpuInstance, Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) { + var Info ComputeInstanceProfileInfo + ret := nvmlGpuInstanceGetComputeInstanceProfileInfo(GpuInstance, uint32(Profile), uint32(EngProfile), &Info) + return Info, ret +} + +func (GpuInstance GpuInstance) GetComputeInstanceProfileInfo(Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) { + return GpuInstanceGetComputeInstanceProfileInfo(GpuInstance, Profile, EngProfile) +} + +// nvml.GpuInstanceGetComputeInstanceProfileInfoV() +type ComputeInstanceProfileInfoV struct { + gpuInstance GpuInstance + profile int + engProfile int +} + +func (InfoV ComputeInstanceProfileInfoV) V1() (ComputeInstanceProfileInfo, Return) { + return GpuInstanceGetComputeInstanceProfileInfo(InfoV.gpuInstance, InfoV.profile, InfoV.engProfile) +} + +func (InfoV ComputeInstanceProfileInfoV) V2() (ComputeInstanceProfileInfo_v2, Return) { + var Info ComputeInstanceProfileInfo_v2 + Info.Version = STRUCT_VERSION(Info, 2) + ret := nvmlGpuInstanceGetComputeInstanceProfileInfoV(InfoV.gpuInstance, uint32(InfoV.profile), uint32(InfoV.engProfile), &Info) + return Info, ret +} + +func GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance GpuInstance, Profile int, EngProfile int) ComputeInstanceProfileInfoV { + return ComputeInstanceProfileInfoV{GpuInstance, Profile, EngProfile} +} + +func (GpuInstance GpuInstance) GetComputeInstanceProfileInfoV(Profile int, EngProfile int) ComputeInstanceProfileInfoV { + return GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance, Profile, EngProfile) +} + +// nvml.GpuInstanceGetComputeInstanceRemainingCapacity() +func GpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) (int, Return) { + if Info == nil { + return 0, ERROR_INVALID_ARGUMENT + } + var Count uint32 + ret := nvmlGpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance, Info.Id, &Count) + return int(Count), ret +} + +func (GpuInstance GpuInstance) GetComputeInstanceRemainingCapacity(Info *ComputeInstanceProfileInfo) (int, Return) { + return GpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance, Info) +} + +// nvml.GpuInstanceCreateComputeInstance() +func GpuInstanceCreateComputeInstance(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + if Info == nil { + return ComputeInstance{}, ERROR_INVALID_ARGUMENT + } + var ComputeInstance ComputeInstance + ret := nvmlGpuInstanceCreateComputeInstance(GpuInstance, Info.Id, &ComputeInstance) + return ComputeInstance, ret +} + +func (GpuInstance GpuInstance) CreateComputeInstance(Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + return GpuInstanceCreateComputeInstance(GpuInstance, Info) +} + +// nvml.ComputeInstanceDestroy() +func ComputeInstanceDestroy(ComputeInstance ComputeInstance) Return { + return nvmlComputeInstanceDestroy(ComputeInstance) +} + +func (ComputeInstance ComputeInstance) Destroy() Return { + return ComputeInstanceDestroy(ComputeInstance) +} + +// nvml.GpuInstanceGetComputeInstances() +func GpuInstanceGetComputeInstances(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + if Info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var Count uint32 = Info.InstanceCount + ComputeInstances := make([]ComputeInstance, Count) + ret := nvmlGpuInstanceGetComputeInstances(GpuInstance, Info.Id, &ComputeInstances[0], &Count) + return ComputeInstances[:Count], ret +} + +func (GpuInstance GpuInstance) GetComputeInstances(Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + return GpuInstanceGetComputeInstances(GpuInstance, Info) +} + +// nvml.GpuInstanceGetComputeInstanceById() +func GpuInstanceGetComputeInstanceById(GpuInstance GpuInstance, Id int) (ComputeInstance, Return) { + var ComputeInstance ComputeInstance + ret := nvmlGpuInstanceGetComputeInstanceById(GpuInstance, uint32(Id), &ComputeInstance) + return ComputeInstance, ret +} + +func (GpuInstance GpuInstance) GetComputeInstanceById(Id int) (ComputeInstance, Return) { + return GpuInstanceGetComputeInstanceById(GpuInstance, Id) +} + +// nvml.ComputeInstanceGetInfo() +func ComputeInstanceGetInfo(ComputeInstance ComputeInstance) (ComputeInstanceInfo, Return) { + var Info ComputeInstanceInfo + ret := nvmlComputeInstanceGetInfo(ComputeInstance, &Info) + return Info, ret +} + +func (ComputeInstance ComputeInstance) GetInfo() (ComputeInstanceInfo, Return) { + return ComputeInstanceGetInfo(ComputeInstance) +} + +// nvml.DeviceIsMigDeviceHandle() +func DeviceIsMigDeviceHandle(Device Device) (bool, Return) { + var IsMigDevice uint32 + ret := nvmlDeviceIsMigDeviceHandle(Device, &IsMigDevice) + return (IsMigDevice != 0), ret +} + +func (Device Device) IsMigDeviceHandle() (bool, Return) { + return DeviceIsMigDeviceHandle(Device) +} + +// nvml DeviceGetGpuInstanceId() +func DeviceGetGpuInstanceId(Device Device) (int, Return) { + var Id uint32 + ret := nvmlDeviceGetGpuInstanceId(Device, &Id) + return int(Id), ret +} + +func (Device Device) GetGpuInstanceId() (int, Return) { + return DeviceGetGpuInstanceId(Device) +} + +// nvml.DeviceGetComputeInstanceId() +func DeviceGetComputeInstanceId(Device Device) (int, Return) { + var Id uint32 + ret := nvmlDeviceGetComputeInstanceId(Device, &Id) + return int(Id), ret +} + +func (Device Device) GetComputeInstanceId() (int, Return) { + return DeviceGetComputeInstanceId(Device) +} + +// nvml.DeviceGetMaxMigDeviceCount() +func DeviceGetMaxMigDeviceCount(Device Device) (int, Return) { + var Count uint32 + ret := nvmlDeviceGetMaxMigDeviceCount(Device, &Count) + return int(Count), ret +} + +func (Device Device) GetMaxMigDeviceCount() (int, Return) { + return DeviceGetMaxMigDeviceCount(Device) +} + +// nvml.DeviceGetMigDeviceHandleByIndex() +func DeviceGetMigDeviceHandleByIndex(device Device, Index int) (Device, Return) { + var MigDevice Device + ret := nvmlDeviceGetMigDeviceHandleByIndex(device, uint32(Index), &MigDevice) + return MigDevice, ret +} + +func (Device Device) GetMigDeviceHandleByIndex(Index int) (Device, Return) { + return DeviceGetMigDeviceHandleByIndex(Device, Index) +} + +// nvml.DeviceGetDeviceHandleFromMigDeviceHandle() +func DeviceGetDeviceHandleFromMigDeviceHandle(MigDevice Device) (Device, Return) { + var Device Device + ret := nvmlDeviceGetDeviceHandleFromMigDeviceHandle(MigDevice, &Device) + return Device, ret +} + +func (MigDevice Device) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + return DeviceGetDeviceHandleFromMigDeviceHandle(MigDevice) +} + +// nvml.DeviceGetBusType() +func DeviceGetBusType(Device Device) (BusType, Return) { + var Type BusType + ret := nvmlDeviceGetBusType(Device, &Type) + return Type, ret +} + +func (Device Device) GetBusType() (BusType, Return) { + return DeviceGetBusType(Device) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go new file mode 100644 index 0000000..c2ce2e3 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go @@ -0,0 +1,21 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +/* +Package NVML bindings +*/ +package nvml diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go new file mode 100644 index 0000000..c6315f5 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go @@ -0,0 +1,42 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.EventSetCreate() +func EventSetCreate() (EventSet, Return) { + var Set EventSet + ret := nvmlEventSetCreate(&Set) + return Set, ret +} + +// nvml.EventSetWait() +func EventSetWait(Set EventSet, Timeoutms uint32) (EventData, Return) { + var Data EventData + ret := nvmlEventSetWait(Set, &Data, Timeoutms) + return Data, ret +} + +func (Set EventSet) Wait(Timeoutms uint32) (EventData, Return) { + return EventSetWait(Set, Timeoutms) +} + +// nvml.EventSetFree() +func EventSetFree(Set EventSet) Return { + return nvmlEventSetFree(Set) +} + +func (Set EventSet) Free() Return { + return EventSetFree(Set) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go new file mode 100644 index 0000000..0a77d22 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go @@ -0,0 +1,226 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "fmt" + + "github.com/NVIDIA/go-nvml/pkg/dl" +) + +import "C" + +const ( + nvmlLibraryName = "libnvidia-ml.so.1" + nvmlLibraryLoadFlags = dl.RTLD_LAZY | dl.RTLD_GLOBAL +) + +var nvml *dl.DynamicLibrary + +// nvml.Init() +func Init() Return { + lib := dl.New(nvmlLibraryName, nvmlLibraryLoadFlags) + if lib == nil { + panic(fmt.Sprintf("error instantiating DynamicLibrary for %s", nvmlLibraryName)) + } + + err := lib.Open() + if err != nil { + panic(fmt.Sprintf("error opening %s: %v", nvmlLibraryName, err)) + } + + nvml = lib + updateVersionedSymbols() + + return nvmlInit() +} + +// nvml.InitWithFlags() +func InitWithFlags(Flags uint32) Return { + lib := dl.New(nvmlLibraryName, nvmlLibraryLoadFlags) + if lib == nil { + panic(fmt.Sprintf("error instantiating DynamicLibrary for %s", nvmlLibraryName)) + } + + err := lib.Open() + if err != nil { + panic(fmt.Sprintf("error opening %s: %v", nvmlLibraryName, err)) + } + + nvml = lib + + return nvmlInitWithFlags(Flags) +} + +// nvml.Shutdown() +func Shutdown() Return { + ret := nvmlShutdown() + if ret != SUCCESS { + return ret + } + + err := nvml.Close() + if err != nil { + panic(fmt.Sprintf("error closing %s: %v", nvmlLibraryName, err)) + } + + return ret +} + +// Default all versioned APIs to v1 (to infer the types) +var nvmlInit = nvmlInit_v1 +var nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v1 +var nvmlDeviceGetCount = nvmlDeviceGetCount_v1 +var nvmlDeviceGetHandleByIndex = nvmlDeviceGetHandleByIndex_v1 +var nvmlDeviceGetHandleByPciBusId = nvmlDeviceGetHandleByPciBusId_v1 +var nvmlDeviceGetNvLinkRemotePciInfo = nvmlDeviceGetNvLinkRemotePciInfo_v1 +var nvmlDeviceRemoveGpu = nvmlDeviceRemoveGpu_v1 +var nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v1 +var nvmlEventSetWait = nvmlEventSetWait_v1 +var nvmlDeviceGetAttributes = nvmlDeviceGetAttributes_v1 +var nvmlComputeInstanceGetInfo = nvmlComputeInstanceGetInfo_v1 +var DeviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v1 +var DeviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v1 +var DeviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v1 +var GetBlacklistDeviceCount = GetExcludedDeviceCount +var GetBlacklistDeviceInfoByIndex = GetExcludedDeviceInfoByIndex +var nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v1 +var nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v1 + +type BlacklistDeviceInfo = ExcludedDeviceInfo +type ProcessInfo_v1Slice []ProcessInfo_v1 +type ProcessInfo_v2Slice []ProcessInfo_v2 + +func (pis ProcessInfo_v1Slice) ToProcessInfoSlice() []ProcessInfo { + var newInfos []ProcessInfo + for _, pi := range pis { + info := ProcessInfo{ + Pid: pi.Pid, + UsedGpuMemory: pi.UsedGpuMemory, + GpuInstanceId: 0xFFFFFFFF, // GPU instance ID is invalid in v1 + ComputeInstanceId: 0xFFFFFFFF, // Compute instance ID is invalid in v1 + } + newInfos = append(newInfos, info) + } + return newInfos +} + +func (pis ProcessInfo_v2Slice) ToProcessInfoSlice() []ProcessInfo { + var newInfos []ProcessInfo + for _, pi := range pis { + info := ProcessInfo{ + Pid: pi.Pid, + UsedGpuMemory: pi.UsedGpuMemory, + GpuInstanceId: pi.GpuInstanceId, + ComputeInstanceId: pi.ComputeInstanceId, + } + newInfos = append(newInfos, info) + } + return newInfos +} + +// updateVersionedSymbols() +func updateVersionedSymbols() { + err := nvml.Lookup("nvmlInit_v2") + if err == nil { + nvmlInit = nvmlInit_v2 + } + err = nvml.Lookup("nvmlDeviceGetPciInfo_v2") + if err == nil { + nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v2 + } + err = nvml.Lookup("nvmlDeviceGetPciInfo_v3") + if err == nil { + nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v3 + } + err = nvml.Lookup("nvmlDeviceGetCount_v2") + if err == nil { + nvmlDeviceGetCount = nvmlDeviceGetCount_v2 + } + err = nvml.Lookup("nvmlDeviceGetHandleByIndex_v2") + if err == nil { + nvmlDeviceGetHandleByIndex = nvmlDeviceGetHandleByIndex_v2 + } + err = nvml.Lookup("nvmlDeviceGetHandleByPciBusId_v2") + if err == nil { + nvmlDeviceGetHandleByPciBusId = nvmlDeviceGetHandleByPciBusId_v2 + } + err = nvml.Lookup("nvmlDeviceGetNvLinkRemotePciInfo_v2") + if err == nil { + nvmlDeviceGetNvLinkRemotePciInfo = nvmlDeviceGetNvLinkRemotePciInfo_v2 + } + // Unable to overwrite nvmlDeviceRemoveGpu() because the v2 function takes + // a different set of parameters than the v1 function. + //err = nvml.Lookup("nvmlDeviceRemoveGpu_v2") + //if err == nil { + // nvmlDeviceRemoveGpu = nvmlDeviceRemoveGpu_v2 + //} + err = nvml.Lookup("nvmlDeviceGetGridLicensableFeatures_v2") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v2 + } + err = nvml.Lookup("nvmlDeviceGetGridLicensableFeatures_v3") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v3 + } + err = nvml.Lookup("nvmlDeviceGetGridLicensableFeatures_v4") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v4 + } + err = nvml.Lookup("nvmlEventSetWait_v2") + if err == nil { + nvmlEventSetWait = nvmlEventSetWait_v2 + } + err = nvml.Lookup("nvmlDeviceGetAttributes_v2") + if err == nil { + nvmlDeviceGetAttributes = nvmlDeviceGetAttributes_v2 + } + err = nvml.Lookup("nvmlComputeInstanceGetInfo_v2") + if err == nil { + nvmlComputeInstanceGetInfo = nvmlComputeInstanceGetInfo_v2 + } + err = nvml.Lookup("nvmlDeviceGetComputeRunningProcesses_v2") + if err == nil { + DeviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v2 + } + err = nvml.Lookup("nvmlDeviceGetComputeRunningProcesses_v3") + if err == nil { + DeviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v3 + } + err = nvml.Lookup("nvmlDeviceGetGraphicsRunningProcesses_v2") + if err == nil { + DeviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v2 + } + err = nvml.Lookup("nvmlDeviceGetGraphicsRunningProcesses_v3") + if err == nil { + DeviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v3 + } + err = nvml.Lookup("nvmlDeviceGetMPSComputeRunningProcesses_v2") + if err == nil { + DeviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v2 + } + err = nvml.Lookup("nvmlDeviceGetMPSComputeRunningProcesses_v3") + if err == nil { + DeviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v3 + } + err = nvml.Lookup("nvmlDeviceGetGpuInstancePossiblePlacements_v2") + if err == nil { + nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v2 + } + err = nvml.Lookup("nvmlVgpuInstanceGetLicenseInfo_v2") + if err == nil { + nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v2 + } +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go new file mode 100644 index 0000000..d7dbfba --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go @@ -0,0 +1,2583 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +package nvml + +/* +#cgo LDFLAGS: -Wl,--unresolved-symbols=ignore-in-object-files +#cgo CFLAGS: -DNVML_NO_UNVERSIONED_FUNC_DEFS=1 +#include "nvml.h" +#include +#include "cgo_helpers.h" +*/ +import "C" +import "unsafe" + +// nvmlInit_v2 function as declared in nvml/nvml.h +func nvmlInit_v2() Return { + __ret := C.nvmlInit_v2() + __v := (Return)(__ret) + return __v +} + +// nvmlInitWithFlags function as declared in nvml/nvml.h +func nvmlInitWithFlags(Flags uint32) Return { + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlInitWithFlags(cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlShutdown function as declared in nvml/nvml.h +func nvmlShutdown() Return { + __ret := C.nvmlShutdown() + __v := (Return)(__ret) + return __v +} + +// nvmlErrorString function as declared in nvml/nvml.h +func nvmlErrorString(Result Return) string { + cResult, _ := (C.nvmlReturn_t)(Result), cgoAllocsUnknown + __ret := C.nvmlErrorString(cResult) + __v := packPCharString(__ret) + return __v +} + +// nvmlSystemGetDriverVersion function as declared in nvml/nvml.h +func nvmlSystemGetDriverVersion(Version *byte, Length uint32) Return { + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetDriverVersion(cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetNVMLVersion function as declared in nvml/nvml.h +func nvmlSystemGetNVMLVersion(Version *byte, Length uint32) Return { + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetNVMLVersion(cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetCudaDriverVersion function as declared in nvml/nvml.h +func nvmlSystemGetCudaDriverVersion(CudaDriverVersion *int32) Return { + cCudaDriverVersion, _ := (*C.int)(unsafe.Pointer(CudaDriverVersion)), cgoAllocsUnknown + __ret := C.nvmlSystemGetCudaDriverVersion(cCudaDriverVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetCudaDriverVersion_v2 function as declared in nvml/nvml.h +func nvmlSystemGetCudaDriverVersion_v2(CudaDriverVersion *int32) Return { + cCudaDriverVersion, _ := (*C.int)(unsafe.Pointer(CudaDriverVersion)), cgoAllocsUnknown + __ret := C.nvmlSystemGetCudaDriverVersion_v2(cCudaDriverVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetProcessName function as declared in nvml/nvml.h +func nvmlSystemGetProcessName(Pid uint32, Name *byte, Length uint32) Return { + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cName, _ := (*C.char)(unsafe.Pointer(Name)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetProcessName(cPid, cName, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetCount function as declared in nvml/nvml.h +func nvmlUnitGetCount(UnitCount *uint32) Return { + cUnitCount, _ := (*C.uint)(unsafe.Pointer(UnitCount)), cgoAllocsUnknown + __ret := C.nvmlUnitGetCount(cUnitCount) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetHandleByIndex function as declared in nvml/nvml.h +func nvmlUnitGetHandleByIndex(Index uint32, Unit *Unit) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cUnit, _ := (*C.nvmlUnit_t)(unsafe.Pointer(Unit)), cgoAllocsUnknown + __ret := C.nvmlUnitGetHandleByIndex(cIndex, cUnit) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetUnitInfo function as declared in nvml/nvml.h +func nvmlUnitGetUnitInfo(Unit Unit, Info *UnitInfo) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlUnitInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlUnitGetUnitInfo(cUnit, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetLedState function as declared in nvml/nvml.h +func nvmlUnitGetLedState(Unit Unit, State *LedState) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cState, _ := (*C.nvmlLedState_t)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlUnitGetLedState(cUnit, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetPsuInfo function as declared in nvml/nvml.h +func nvmlUnitGetPsuInfo(Unit Unit, Psu *PSUInfo) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cPsu, _ := (*C.nvmlPSUInfo_t)(unsafe.Pointer(Psu)), cgoAllocsUnknown + __ret := C.nvmlUnitGetPsuInfo(cUnit, cPsu) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetTemperature function as declared in nvml/nvml.h +func nvmlUnitGetTemperature(Unit Unit, _type uint32, Temp *uint32) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + c_type, _ := (C.uint)(_type), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlUnitGetTemperature(cUnit, c_type, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetFanSpeedInfo function as declared in nvml/nvml.h +func nvmlUnitGetFanSpeedInfo(Unit Unit, FanSpeeds *UnitFanSpeeds) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cFanSpeeds, _ := (*C.nvmlUnitFanSpeeds_t)(unsafe.Pointer(FanSpeeds)), cgoAllocsUnknown + __ret := C.nvmlUnitGetFanSpeedInfo(cUnit, cFanSpeeds) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetDevices function as declared in nvml/nvml.h +func nvmlUnitGetDevices(Unit Unit, DeviceCount *uint32, Devices *Device) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + cDevices, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Devices)), cgoAllocsUnknown + __ret := C.nvmlUnitGetDevices(cUnit, cDeviceCount, cDevices) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetHicVersion function as declared in nvml/nvml.h +func nvmlSystemGetHicVersion(HwbcCount *uint32, HwbcEntries *HwbcEntry) Return { + cHwbcCount, _ := (*C.uint)(unsafe.Pointer(HwbcCount)), cgoAllocsUnknown + cHwbcEntries, _ := (*C.nvmlHwbcEntry_t)(unsafe.Pointer(HwbcEntries)), cgoAllocsUnknown + __ret := C.nvmlSystemGetHicVersion(cHwbcCount, cHwbcEntries) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCount_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetCount_v2(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCount_v2(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAttributes_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetAttributes_v2(Device Device, Attributes *DeviceAttributes) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cAttributes, _ := (*C.nvmlDeviceAttributes_t)(unsafe.Pointer(Attributes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAttributes_v2(cDevice, cAttributes) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByIndex_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByIndex_v2(Index uint32, Device *Device) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByIndex_v2(cIndex, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleBySerial function as declared in nvml/nvml.h +func nvmlDeviceGetHandleBySerial(Serial string, Device *Device) Return { + cSerial, _ := unpackPCharString(Serial) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleBySerial(cSerial, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByUUID function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByUUID(Uuid string, Device *Device) Return { + cUuid, _ := unpackPCharString(Uuid) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByUUID(cUuid, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByPciBusId_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByPciBusId_v2(PciBusId string, Device *Device) Return { + cPciBusId, _ := unpackPCharString(PciBusId) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByPciBusId_v2(cPciBusId, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetName function as declared in nvml/nvml.h +func nvmlDeviceGetName(Device Device, Name *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cName, _ := (*C.char)(unsafe.Pointer(Name)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetName(cDevice, cName, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBrand function as declared in nvml/nvml.h +func nvmlDeviceGetBrand(Device Device, _type *BrandType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBrandType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBrand(cDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetIndex function as declared in nvml/nvml.h +func nvmlDeviceGetIndex(Device Device, Index *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIndex, _ := (*C.uint)(unsafe.Pointer(Index)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetIndex(cDevice, cIndex) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSerial function as declared in nvml/nvml.h +func nvmlDeviceGetSerial(Device Device, Serial *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSerial, _ := (*C.char)(unsafe.Pointer(Serial)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSerial(cDevice, cSerial, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryAffinity function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryAffinity(Device Device, NodeSetSize uint32, NodeSet *uint, Scope AffinityScope) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cNodeSetSize, _ := (C.uint)(NodeSetSize), cgoAllocsUnknown + cNodeSet, _ := (*C.ulong)(unsafe.Pointer(NodeSet)), cgoAllocsUnknown + cScope, _ := (C.nvmlAffinityScope_t)(Scope), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryAffinity(cDevice, cNodeSetSize, cNodeSet, cScope) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCpuAffinityWithinScope function as declared in nvml/nvml.h +func nvmlDeviceGetCpuAffinityWithinScope(Device Device, CpuSetSize uint32, CpuSet *uint, Scope AffinityScope) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCpuSetSize, _ := (C.uint)(CpuSetSize), cgoAllocsUnknown + cCpuSet, _ := (*C.ulong)(unsafe.Pointer(CpuSet)), cgoAllocsUnknown + cScope, _ := (C.nvmlAffinityScope_t)(Scope), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCpuAffinityWithinScope(cDevice, cCpuSetSize, cCpuSet, cScope) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceGetCpuAffinity(Device Device, CpuSetSize uint32, CpuSet *uint) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCpuSetSize, _ := (C.uint)(CpuSetSize), cgoAllocsUnknown + cCpuSet, _ := (*C.ulong)(unsafe.Pointer(CpuSet)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCpuAffinity(cDevice, cCpuSetSize, cCpuSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceSetCpuAffinity(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetCpuAffinity(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceClearCpuAffinity(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearCpuAffinity(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTopologyCommonAncestor function as declared in nvml/nvml.h +func nvmlDeviceGetTopologyCommonAncestor(Device1 Device, Device2 Device, PathInfo *GpuTopologyLevel) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cPathInfo, _ := (*C.nvmlGpuTopologyLevel_t)(unsafe.Pointer(PathInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTopologyCommonAncestor(cDevice1, cDevice2, cPathInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTopologyNearestGpus function as declared in nvml/nvml.h +func nvmlDeviceGetTopologyNearestGpus(Device Device, Level GpuTopologyLevel, Count *uint32, DeviceArray *Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLevel, _ := (C.nvmlGpuTopologyLevel_t)(Level), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTopologyNearestGpus(cDevice, cLevel, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetTopologyGpuSet function as declared in nvml/nvml.h +func nvmlSystemGetTopologyGpuSet(CpuNumber uint32, Count *uint32, DeviceArray *Device) Return { + cCpuNumber, _ := (C.uint)(CpuNumber), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlSystemGetTopologyGpuSet(cCpuNumber, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetP2PStatus function as declared in nvml/nvml.h +func nvmlDeviceGetP2PStatus(Device1 Device, Device2 Device, P2pIndex GpuP2PCapsIndex, P2pStatus *GpuP2PStatus) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cP2pIndex, _ := (C.nvmlGpuP2PCapsIndex_t)(P2pIndex), cgoAllocsUnknown + cP2pStatus, _ := (*C.nvmlGpuP2PStatus_t)(unsafe.Pointer(P2pStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetP2PStatus(cDevice1, cDevice2, cP2pIndex, cP2pStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetUUID function as declared in nvml/nvml.h +func nvmlDeviceGetUUID(Device Device, Uuid *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUuid, _ := (*C.char)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetUUID(cDevice, cUuid, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetMdevUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMdevUUID(VgpuInstance VgpuInstance, MdevUuid *byte, Size uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cMdevUuid, _ := (*C.char)(unsafe.Pointer(MdevUuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMdevUUID(cVgpuInstance, cMdevUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinorNumber function as declared in nvml/nvml.h +func nvmlDeviceGetMinorNumber(Device Device, MinorNumber *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinorNumber, _ := (*C.uint)(unsafe.Pointer(MinorNumber)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinorNumber(cDevice, cMinorNumber) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBoardPartNumber function as declared in nvml/nvml.h +func nvmlDeviceGetBoardPartNumber(Device Device, PartNumber *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPartNumber, _ := (*C.char)(unsafe.Pointer(PartNumber)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBoardPartNumber(cDevice, cPartNumber, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomVersion function as declared in nvml/nvml.h +func nvmlDeviceGetInforomVersion(Device Device, Object InforomObject, Version *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cObject, _ := (C.nvmlInforomObject_t)(Object), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomVersion(cDevice, cObject, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomImageVersion function as declared in nvml/nvml.h +func nvmlDeviceGetInforomImageVersion(Device Device, Version *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomImageVersion(cDevice, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomConfigurationChecksum function as declared in nvml/nvml.h +func nvmlDeviceGetInforomConfigurationChecksum(Device Device, Checksum *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cChecksum, _ := (*C.uint)(unsafe.Pointer(Checksum)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomConfigurationChecksum(cDevice, cChecksum) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceValidateInforom function as declared in nvml/nvml.h +func nvmlDeviceValidateInforom(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceValidateInforom(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDisplayMode function as declared in nvml/nvml.h +func nvmlDeviceGetDisplayMode(Device Device, Display *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDisplay, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Display)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDisplayMode(cDevice, cDisplay) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDisplayActive function as declared in nvml/nvml.h +func nvmlDeviceGetDisplayActive(Device Device, IsActive *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsActive, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsActive)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDisplayActive(cDevice, cIsActive) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPersistenceMode function as declared in nvml/nvml.h +func nvmlDeviceGetPersistenceMode(Device Device, Mode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPersistenceMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v3(Device Device, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo_v3(cDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetMaxPcieLinkGeneration(Device Device, MaxLinkGen *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxLinkGen, _ := (*C.uint)(unsafe.Pointer(MaxLinkGen)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxPcieLinkGeneration(cDevice, cMaxLinkGen) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxPcieLinkWidth function as declared in nvml/nvml.h +func nvmlDeviceGetMaxPcieLinkWidth(Device Device, MaxLinkWidth *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxLinkWidth, _ := (*C.uint)(unsafe.Pointer(MaxLinkWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxPcieLinkWidth(cDevice, cMaxLinkWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetCurrPcieLinkGeneration(Device Device, CurrLinkGen *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrLinkGen, _ := (*C.uint)(unsafe.Pointer(CurrLinkGen)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrPcieLinkGeneration(cDevice, cCurrLinkGen) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrPcieLinkWidth function as declared in nvml/nvml.h +func nvmlDeviceGetCurrPcieLinkWidth(Device Device, CurrLinkWidth *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrLinkWidth, _ := (*C.uint)(unsafe.Pointer(CurrLinkWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrPcieLinkWidth(cDevice, cCurrLinkWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieThroughput function as declared in nvml/nvml.h +func nvmlDeviceGetPcieThroughput(Device Device, Counter PcieUtilCounter, Value *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCounter, _ := (C.nvmlPcieUtilCounter_t)(Counter), cgoAllocsUnknown + cValue, _ := (*C.uint)(unsafe.Pointer(Value)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieThroughput(cDevice, cCounter, cValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieReplayCounter function as declared in nvml/nvml.h +func nvmlDeviceGetPcieReplayCounter(Device Device, Value *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValue, _ := (*C.uint)(unsafe.Pointer(Value)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieReplayCounter(cDevice, cValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClockInfo function as declared in nvml/nvml.h +func nvmlDeviceGetClockInfo(Device Device, _type ClockType, Clock *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cClock, _ := (*C.uint)(unsafe.Pointer(Clock)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClockInfo(cDevice, c_type, cClock) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxClockInfo function as declared in nvml/nvml.h +func nvmlDeviceGetMaxClockInfo(Device Device, _type ClockType, Clock *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cClock, _ := (*C.uint)(unsafe.Pointer(Clock)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxClockInfo(cDevice, c_type, cClock) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetApplicationsClock function as declared in nvml/nvml.h +func nvmlDeviceGetApplicationsClock(Device Device, ClockType ClockType, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetApplicationsClock(cDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDefaultApplicationsClock function as declared in nvml/nvml.h +func nvmlDeviceGetDefaultApplicationsClock(Device Device, ClockType ClockType, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDefaultApplicationsClock(cDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceResetApplicationsClocks(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetApplicationsClocks(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClock function as declared in nvml/nvml.h +func nvmlDeviceGetClock(Device Device, ClockType ClockType, ClockId ClockId, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockId, _ := (C.nvmlClockId_t)(ClockId), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClock(cDevice, cClockType, cClockId, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxCustomerBoostClock function as declared in nvml/nvml.h +func nvmlDeviceGetMaxCustomerBoostClock(Device Device, ClockType ClockType, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxCustomerBoostClock(cDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedMemoryClocks function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedMemoryClocks(Device Device, Count *uint32, ClocksMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cClocksMHz, _ := (*C.uint)(unsafe.Pointer(ClocksMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedMemoryClocks(cDevice, cCount, cClocksMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedGraphicsClocks function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedGraphicsClocks(Device Device, MemoryClockMHz uint32, Count *uint32, ClocksMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemoryClockMHz, _ := (C.uint)(MemoryClockMHz), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cClocksMHz, _ := (*C.uint)(unsafe.Pointer(ClocksMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedGraphicsClocks(cDevice, cMemoryClockMHz, cCount, cClocksMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceGetAutoBoostedClocksEnabled(Device Device, IsEnabled *EnableState, DefaultIsEnabled *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsEnabled, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown + cDefaultIsEnabled, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(DefaultIsEnabled)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAutoBoostedClocksEnabled(cDevice, cIsEnabled, cDefaultIsEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetAutoBoostedClocksEnabled(Device Device, Enabled EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAutoBoostedClocksEnabled(cDevice, cEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultAutoBoostedClocksEnabled(Device Device, Enabled EnableState, Flags uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultAutoBoostedClocksEnabled(cDevice, cEnabled, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeed(Device Device, Speed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSpeed, _ := (*C.uint)(unsafe.Pointer(Speed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeed(cDevice, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeed_v2(Device Device, Fan uint32, Speed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cSpeed, _ := (*C.uint)(unsafe.Pointer(Speed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeed_v2(cDevice, cFan, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumFans function as declared in nvml/nvml.h +func nvmlDeviceGetNumFans(Device Device, NumFans *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cNumFans, _ := (*C.uint)(unsafe.Pointer(NumFans)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumFans(cDevice, cNumFans) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperature function as declared in nvml/nvml.h +func nvmlDeviceGetTemperature(Device Device, SensorType TemperatureSensors, Temp *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSensorType, _ := (C.nvmlTemperatureSensors_t)(SensorType), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperature(cDevice, cSensorType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceGetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds, Temp *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperatureThreshold(cDevice, cThresholdType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds, Temp *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.int)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetTemperatureThreshold(cDevice, cThresholdType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPerformanceState function as declared in nvml/nvml.h +func nvmlDeviceGetPerformanceState(Device Device, PState *Pstates) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPState, _ := (*C.nvmlPstates_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPerformanceState(cDevice, cPState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrentClocksThrottleReasons function as declared in nvml/nvml.h +func nvmlDeviceGetCurrentClocksThrottleReasons(Device Device, ClocksThrottleReasons *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClocksThrottleReasons, _ := (*C.ulonglong)(unsafe.Pointer(ClocksThrottleReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrentClocksThrottleReasons(cDevice, cClocksThrottleReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedClocksThrottleReasons function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedClocksThrottleReasons(Device Device, SupportedClocksThrottleReasons *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSupportedClocksThrottleReasons, _ := (*C.ulonglong)(unsafe.Pointer(SupportedClocksThrottleReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedClocksThrottleReasons(cDevice, cSupportedClocksThrottleReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerState function as declared in nvml/nvml.h +func nvmlDeviceGetPowerState(Device Device, PState *Pstates) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPState, _ := (*C.nvmlPstates_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerState(cDevice, cPState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementMode function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementMode(Device Device, Mode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementLimit function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementLimit(Device Device, Limit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLimit, _ := (*C.uint)(unsafe.Pointer(Limit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementLimit(cDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementLimitConstraints function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementLimitConstraints(Device Device, MinLimit *uint32, MaxLimit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinLimit, _ := (*C.uint)(unsafe.Pointer(MinLimit)), cgoAllocsUnknown + cMaxLimit, _ := (*C.uint)(unsafe.Pointer(MaxLimit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementLimitConstraints(cDevice, cMinLimit, cMaxLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementDefaultLimit function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementDefaultLimit(Device Device, DefaultLimit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDefaultLimit, _ := (*C.uint)(unsafe.Pointer(DefaultLimit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementDefaultLimit(cDevice, cDefaultLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerUsage function as declared in nvml/nvml.h +func nvmlDeviceGetPowerUsage(Device Device, Power *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPower, _ := (*C.uint)(unsafe.Pointer(Power)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerUsage(cDevice, cPower) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTotalEnergyConsumption function as declared in nvml/nvml.h +func nvmlDeviceGetTotalEnergyConsumption(Device Device, Energy *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEnergy, _ := (*C.ulonglong)(unsafe.Pointer(Energy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTotalEnergyConsumption(cDevice, cEnergy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEnforcedPowerLimit function as declared in nvml/nvml.h +func nvmlDeviceGetEnforcedPowerLimit(Device Device, Limit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLimit, _ := (*C.uint)(unsafe.Pointer(Limit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEnforcedPowerLimit(cDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuOperationMode function as declared in nvml/nvml.h +func nvmlDeviceGetGpuOperationMode(Device Device, Current *GpuOperationMode, Pending *GpuOperationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlGpuOperationMode_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlGpuOperationMode_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuOperationMode(cDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryInfo function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryInfo(Device Device, Memory *Memory) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryInfo(cDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryInfo_v2(Device Device, Memory *Memory_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_v2_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryInfo_v2(cDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeMode function as declared in nvml/nvml.h +func nvmlDeviceGetComputeMode(Device Device, Mode *ComputeMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlComputeMode_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCudaComputeCapability function as declared in nvml/nvml.h +func nvmlDeviceGetCudaComputeCapability(Device Device, Major *int32, Minor *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMajor, _ := (*C.int)(unsafe.Pointer(Major)), cgoAllocsUnknown + cMinor, _ := (*C.int)(unsafe.Pointer(Minor)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCudaComputeCapability(cDevice, cMajor, cMinor) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEccMode function as declared in nvml/nvml.h +func nvmlDeviceGetEccMode(Device Device, Current *EnableState, Pending *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEccMode(cDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBoardId function as declared in nvml/nvml.h +func nvmlDeviceGetBoardId(Device Device, BoardId *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBoardId, _ := (*C.uint)(unsafe.Pointer(BoardId)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBoardId(cDevice, cBoardId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMultiGpuBoard function as declared in nvml/nvml.h +func nvmlDeviceGetMultiGpuBoard(Device Device, MultiGpuBool *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMultiGpuBool, _ := (*C.uint)(unsafe.Pointer(MultiGpuBool)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMultiGpuBoard(cDevice, cMultiGpuBool) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTotalEccErrors function as declared in nvml/nvml.h +func nvmlDeviceGetTotalEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, EccCounts *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cEccCounts, _ := (*C.ulonglong)(unsafe.Pointer(EccCounts)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTotalEccErrors(cDevice, cErrorType, cCounterType, cEccCounts) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDetailedEccErrors function as declared in nvml/nvml.h +func nvmlDeviceGetDetailedEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, EccCounts *EccErrorCounts) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cEccCounts, _ := (*C.nvmlEccErrorCounts_t)(unsafe.Pointer(EccCounts)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDetailedEccErrors(cDevice, cErrorType, cCounterType, cEccCounts) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryErrorCounter function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryErrorCounter(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation, Count *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cLocationType, _ := (C.nvmlMemoryLocation_t)(LocationType), cgoAllocsUnknown + cCount, _ := (*C.ulonglong)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryErrorCounter(cDevice, cErrorType, cCounterType, cLocationType, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetUtilizationRates function as declared in nvml/nvml.h +func nvmlDeviceGetUtilizationRates(Device Device, Utilization *Utilization) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlUtilization_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetUtilizationRates(cDevice, cUtilization) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderUtilization(Device Device, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderUtilization(cDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderCapacity function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderCapacity(Device Device, EncoderQueryType EncoderType, EncoderCapacity *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEncoderQueryType, _ := (C.nvmlEncoderType_t)(EncoderQueryType), cgoAllocsUnknown + cEncoderCapacity, _ := (*C.uint)(unsafe.Pointer(EncoderCapacity)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderCapacity(cDevice, cEncoderQueryType, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderStats function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderStats(Device Device, SessionCount *uint32, AverageFps *uint32, AverageLatency *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cAverageFps, _ := (*C.uint)(unsafe.Pointer(AverageFps)), cgoAllocsUnknown + cAverageLatency, _ := (*C.uint)(unsafe.Pointer(AverageLatency)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderStats(cDevice, cSessionCount, cAverageFps, cAverageLatency) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderSessions function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderSessions(Device Device, SessionCount *uint32, SessionInfos *EncoderSessionInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfos, _ := (*C.nvmlEncoderSessionInfo_t)(unsafe.Pointer(SessionInfos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderSessions(cDevice, cSessionCount, cSessionInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDecoderUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetDecoderUtilization(Device Device, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDecoderUtilization(cDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFBCStats function as declared in nvml/nvml.h +func nvmlDeviceGetFBCStats(Device Device, FbcStats *FBCStats) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFbcStats, _ := (*C.nvmlFBCStats_t)(unsafe.Pointer(FbcStats)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFBCStats(cDevice, cFbcStats) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFBCSessions function as declared in nvml/nvml.h +func nvmlDeviceGetFBCSessions(Device Device, SessionCount *uint32, SessionInfo *FBCSessionInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlFBCSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFBCSessions(cDevice, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDriverModel function as declared in nvml/nvml.h +func nvmlDeviceGetDriverModel(Device Device, Current *DriverModel, Pending *DriverModel) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDriverModel(cDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVbiosVersion function as declared in nvml/nvml.h +func nvmlDeviceGetVbiosVersion(Device Device, Version *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVbiosVersion(cDevice, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBridgeChipInfo function as declared in nvml/nvml.h +func nvmlDeviceGetBridgeChipInfo(Device Device, BridgeHierarchy *BridgeChipHierarchy) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBridgeHierarchy, _ := (*C.nvmlBridgeChipHierarchy_t)(unsafe.Pointer(BridgeHierarchy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBridgeChipInfo(cDevice, cBridgeHierarchy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v3(Device Device, InfoCount *uint32, Infos *ProcessInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses_v3(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v3(Device Device, InfoCount *uint32, Infos *ProcessInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses_v3(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v3(Device Device, InfoCount *uint32, Infos *ProcessInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses_v3(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceOnSameBoard function as declared in nvml/nvml.h +func nvmlDeviceOnSameBoard(Device1 Device, Device2 Device, OnSameBoard *int32) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cOnSameBoard, _ := (*C.int)(unsafe.Pointer(OnSameBoard)), cgoAllocsUnknown + __ret := C.nvmlDeviceOnSameBoard(cDevice1, cDevice2, cOnSameBoard) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAPIRestriction function as declared in nvml/nvml.h +func nvmlDeviceGetAPIRestriction(Device Device, ApiType RestrictedAPI, IsRestricted *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cApiType, _ := (C.nvmlRestrictedAPI_t)(ApiType), cgoAllocsUnknown + cIsRestricted, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsRestricted)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAPIRestriction(cDevice, cApiType, cIsRestricted) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSamples function as declared in nvml/nvml.h +func nvmlDeviceGetSamples(Device Device, _type SamplingType, LastSeenTimeStamp uint64, SampleValType *ValueType, SampleCount *uint32, Samples *Sample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlSamplingType_t)(_type), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cSampleValType, _ := (*C.nvmlValueType_t)(unsafe.Pointer(SampleValType)), cgoAllocsUnknown + cSampleCount, _ := (*C.uint)(unsafe.Pointer(SampleCount)), cgoAllocsUnknown + cSamples, _ := (*C.nvmlSample_t)(unsafe.Pointer(Samples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSamples(cDevice, c_type, cLastSeenTimeStamp, cSampleValType, cSampleCount, cSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBAR1MemoryInfo function as declared in nvml/nvml.h +func nvmlDeviceGetBAR1MemoryInfo(Device Device, Bar1Memory *BAR1Memory) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBar1Memory, _ := (*C.nvmlBAR1Memory_t)(unsafe.Pointer(Bar1Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBAR1MemoryInfo(cDevice, cBar1Memory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetViolationStatus function as declared in nvml/nvml.h +func nvmlDeviceGetViolationStatus(Device Device, PerfPolicyType PerfPolicyType, ViolTime *ViolationTime) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPerfPolicyType, _ := (C.nvmlPerfPolicyType_t)(PerfPolicyType), cgoAllocsUnknown + cViolTime, _ := (*C.nvmlViolationTime_t)(unsafe.Pointer(ViolTime)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetViolationStatus(cDevice, cPerfPolicyType, cViolTime) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetIrqNum function as declared in nvml/nvml.h +func nvmlDeviceGetIrqNum(Device Device, IrqNum *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIrqNum, _ := (*C.uint)(unsafe.Pointer(IrqNum)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetIrqNum(cDevice, cIrqNum) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumGpuCores function as declared in nvml/nvml.h +func nvmlDeviceGetNumGpuCores(Device Device, NumCores *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cNumCores, _ := (*C.uint)(unsafe.Pointer(NumCores)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumGpuCores(cDevice, cNumCores) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerSource function as declared in nvml/nvml.h +func nvmlDeviceGetPowerSource(Device Device, PowerSource *PowerSource) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPowerSource, _ := (*C.nvmlPowerSource_t)(unsafe.Pointer(PowerSource)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerSource(cDevice, cPowerSource) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryBusWidth function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryBusWidth(Device Device, BusWidth *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBusWidth, _ := (*C.uint)(unsafe.Pointer(BusWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryBusWidth(cDevice, cBusWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieLinkMaxSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetPcieLinkMaxSpeed(Device Device, MaxSpeed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxSpeed, _ := (*C.uint)(unsafe.Pointer(MaxSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieLinkMaxSpeed(cDevice, cMaxSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAdaptiveClockInfoStatus function as declared in nvml/nvml.h +func nvmlDeviceGetAdaptiveClockInfoStatus(Device Device, AdaptiveClockStatus *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cAdaptiveClockStatus, _ := (*C.uint)(unsafe.Pointer(AdaptiveClockStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAdaptiveClockInfoStatus(cDevice, cAdaptiveClockStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingMode function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingMode(Device Device, Mode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingStats function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingStats(Device Device, Pid uint32, Stats *AccountingStats) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cStats, _ := (*C.nvmlAccountingStats_t)(unsafe.Pointer(Stats)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingStats(cDevice, cPid, cStats) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingPids function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingPids(Device Device, Count *uint32, Pids *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cPids, _ := (*C.uint)(unsafe.Pointer(Pids)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingPids(cDevice, cCount, cPids) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingBufferSize function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingBufferSize(Device Device, BufferSize *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingBufferSize(cDevice, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPages function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPages(Device Device, Cause PageRetirementCause, PageCount *uint32, Addresses *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCause, _ := (C.nvmlPageRetirementCause_t)(Cause), cgoAllocsUnknown + cPageCount, _ := (*C.uint)(unsafe.Pointer(PageCount)), cgoAllocsUnknown + cAddresses, _ := (*C.ulonglong)(unsafe.Pointer(Addresses)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPages(cDevice, cCause, cPageCount, cAddresses) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPages_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPages_v2(Device Device, Cause PageRetirementCause, PageCount *uint32, Addresses *uint64, Timestamps *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCause, _ := (C.nvmlPageRetirementCause_t)(Cause), cgoAllocsUnknown + cPageCount, _ := (*C.uint)(unsafe.Pointer(PageCount)), cgoAllocsUnknown + cAddresses, _ := (*C.ulonglong)(unsafe.Pointer(Addresses)), cgoAllocsUnknown + cTimestamps, _ := (*C.ulonglong)(unsafe.Pointer(Timestamps)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPages_v2(cDevice, cCause, cPageCount, cAddresses, cTimestamps) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPagesPendingStatus function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPagesPendingStatus(Device Device, IsPending *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsPending, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsPending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPagesPendingStatus(cDevice, cIsPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRemappedRows function as declared in nvml/nvml.h +func nvmlDeviceGetRemappedRows(Device Device, CorrRows *uint32, UncRows *uint32, IsPending *uint32, FailureOccurred *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCorrRows, _ := (*C.uint)(unsafe.Pointer(CorrRows)), cgoAllocsUnknown + cUncRows, _ := (*C.uint)(unsafe.Pointer(UncRows)), cgoAllocsUnknown + cIsPending, _ := (*C.uint)(unsafe.Pointer(IsPending)), cgoAllocsUnknown + cFailureOccurred, _ := (*C.uint)(unsafe.Pointer(FailureOccurred)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRemappedRows(cDevice, cCorrRows, cUncRows, cIsPending, cFailureOccurred) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRowRemapperHistogram function as declared in nvml/nvml.h +func nvmlDeviceGetRowRemapperHistogram(Device Device, Values *RowRemapperHistogramValues) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValues, _ := (*C.nvmlRowRemapperHistogramValues_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRowRemapperHistogram(cDevice, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetArchitecture function as declared in nvml/nvml.h +func nvmlDeviceGetArchitecture(Device Device, Arch *DeviceArchitecture) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cArch, _ := (*C.nvmlDeviceArchitecture_t)(unsafe.Pointer(Arch)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetArchitecture(cDevice, cArch) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitSetLedState function as declared in nvml/nvml.h +func nvmlUnitSetLedState(Unit Unit, Color LedColor) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cColor, _ := (C.nvmlLedColor_t)(Color), cgoAllocsUnknown + __ret := C.nvmlUnitSetLedState(cUnit, cColor) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPersistenceMode function as declared in nvml/nvml.h +func nvmlDeviceSetPersistenceMode(Device Device, Mode EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlEnableState_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPersistenceMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetComputeMode function as declared in nvml/nvml.h +func nvmlDeviceSetComputeMode(Device Device, Mode ComputeMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlComputeMode_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetComputeMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetEccMode function as declared in nvml/nvml.h +func nvmlDeviceSetEccMode(Device Device, Ecc EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEcc, _ := (C.nvmlEnableState_t)(Ecc), cgoAllocsUnknown + __ret := C.nvmlDeviceSetEccMode(cDevice, cEcc) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearEccErrorCounts function as declared in nvml/nvml.h +func nvmlDeviceClearEccErrorCounts(Device Device, CounterType EccCounterType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + __ret := C.nvmlDeviceClearEccErrorCounts(cDevice, cCounterType) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDriverModel function as declared in nvml/nvml.h +func nvmlDeviceSetDriverModel(Device Device, DriverModel DriverModel, Flags uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDriverModel, _ := (C.nvmlDriverModel_t)(DriverModel), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDriverModel(cDevice, cDriverModel, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpuLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceSetGpuLockedClocks(Device Device, MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinGpuClockMHz, _ := (C.uint)(MinGpuClockMHz), cgoAllocsUnknown + cMaxGpuClockMHz, _ := (C.uint)(MaxGpuClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpuLockedClocks(cDevice, cMinGpuClockMHz, cMaxGpuClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetGpuLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceResetGpuLockedClocks(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetGpuLockedClocks(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMemoryLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceSetMemoryLockedClocks(Device Device, MinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinMemClockMHz, _ := (C.uint)(MinMemClockMHz), cgoAllocsUnknown + cMaxMemClockMHz, _ := (C.uint)(MaxMemClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMemoryLockedClocks(cDevice, cMinMemClockMHz, cMaxMemClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetMemoryLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceResetMemoryLockedClocks(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetMemoryLockedClocks(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceSetApplicationsClocks(Device Device, MemClockMHz uint32, GraphicsClockMHz uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemClockMHz, _ := (C.uint)(MemClockMHz), cgoAllocsUnknown + cGraphicsClockMHz, _ := (C.uint)(GraphicsClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetApplicationsClocks(cDevice, cMemClockMHz, cGraphicsClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClkMonStatus function as declared in nvml/nvml.h +func nvmlDeviceGetClkMonStatus(Device Device, Status *ClkMonStatus) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cStatus, _ := (*C.nvmlClkMonStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClkMonStatus(cDevice, cStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPowerManagementLimit function as declared in nvml/nvml.h +func nvmlDeviceSetPowerManagementLimit(Device Device, Limit uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLimit, _ := (C.uint)(Limit), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPowerManagementLimit(cDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpuOperationMode function as declared in nvml/nvml.h +func nvmlDeviceSetGpuOperationMode(Device Device, Mode GpuOperationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlGpuOperationMode_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpuOperationMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAPIRestriction function as declared in nvml/nvml.h +func nvmlDeviceSetAPIRestriction(Device Device, ApiType RestrictedAPI, IsRestricted EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cApiType, _ := (C.nvmlRestrictedAPI_t)(ApiType), cgoAllocsUnknown + cIsRestricted, _ := (C.nvmlEnableState_t)(IsRestricted), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAPIRestriction(cDevice, cApiType, cIsRestricted) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAccountingMode function as declared in nvml/nvml.h +func nvmlDeviceSetAccountingMode(Device Device, Mode EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlEnableState_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAccountingMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearAccountingPids function as declared in nvml/nvml.h +func nvmlDeviceClearAccountingPids(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearAccountingPids(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkState function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkState(Device Device, Link uint32, IsActive *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cIsActive, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsActive)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkState(cDevice, cLink, cIsActive) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkVersion function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkVersion(Device Device, Link uint32, Version *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cVersion, _ := (*C.uint)(unsafe.Pointer(Version)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkVersion(cDevice, cLink, cVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkCapability function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkCapability(Device Device, Link uint32, Capability NvLinkCapability, CapResult *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCapability, _ := (C.nvmlNvLinkCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkCapability(cDevice, cLink, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemotePciInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemotePciInfo_v2(Device Device, Link uint32, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemotePciInfo_v2(cDevice, cLink, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkErrorCounter function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkErrorCounter(Device Device, Link uint32, Counter NvLinkErrorCounter, CounterValue *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.nvmlNvLinkErrorCounter_t)(Counter), cgoAllocsUnknown + cCounterValue, _ := (*C.ulonglong)(unsafe.Pointer(CounterValue)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkErrorCounter(cDevice, cLink, cCounter, cCounterValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetNvLinkErrorCounters function as declared in nvml/nvml.h +func nvmlDeviceResetNvLinkErrorCounters(Device Device, Link uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + __ret := C.nvmlDeviceResetNvLinkErrorCounters(cDevice, cLink) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetNvLinkUtilizationControl function as declared in nvml/nvml.h +func nvmlDeviceSetNvLinkUtilizationControl(Device Device, Link uint32, Counter uint32, Control *NvLinkUtilizationControl, Reset uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cControl, _ := (*C.nvmlNvLinkUtilizationControl_t)(unsafe.Pointer(Control)), cgoAllocsUnknown + cReset, _ := (C.uint)(Reset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvLinkUtilizationControl(cDevice, cLink, cCounter, cControl, cReset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkUtilizationControl function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkUtilizationControl(Device Device, Link uint32, Counter uint32, Control *NvLinkUtilizationControl) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cControl, _ := (*C.nvmlNvLinkUtilizationControl_t)(unsafe.Pointer(Control)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkUtilizationControl(cDevice, cLink, cCounter, cControl) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkUtilizationCounter(Device Device, Link uint32, Counter uint32, Rxcounter *uint64, Txcounter *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cRxcounter, _ := (*C.ulonglong)(unsafe.Pointer(Rxcounter)), cgoAllocsUnknown + cTxcounter, _ := (*C.ulonglong)(unsafe.Pointer(Txcounter)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkUtilizationCounter(cDevice, cLink, cCounter, cRxcounter, cTxcounter) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceFreezeNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceFreezeNvLinkUtilizationCounter(Device Device, Link uint32, Counter uint32, Freeze EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cFreeze, _ := (C.nvmlEnableState_t)(Freeze), cgoAllocsUnknown + __ret := C.nvmlDeviceFreezeNvLinkUtilizationCounter(cDevice, cLink, cCounter, cFreeze) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceResetNvLinkUtilizationCounter(Device Device, Link uint32, Counter uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + __ret := C.nvmlDeviceResetNvLinkUtilizationCounter(cDevice, cLink, cCounter) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemoteDeviceType function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemoteDeviceType(Device Device, Link uint32, PNvLinkDeviceType *IntNvLinkDeviceType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPNvLinkDeviceType, _ := (*C.nvmlIntNvLinkDeviceType_t)(unsafe.Pointer(PNvLinkDeviceType)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemoteDeviceType(cDevice, cLink, cPNvLinkDeviceType) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetCreate function as declared in nvml/nvml.h +func nvmlEventSetCreate(Set *EventSet) Return { + cSet, _ := (*C.nvmlEventSet_t)(unsafe.Pointer(Set)), cgoAllocsUnknown + __ret := C.nvmlEventSetCreate(cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRegisterEvents function as declared in nvml/nvml.h +func nvmlDeviceRegisterEvents(Device Device, EventTypes uint64, Set EventSet) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEventTypes, _ := (C.ulonglong)(EventTypes), cgoAllocsUnknown + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + __ret := C.nvmlDeviceRegisterEvents(cDevice, cEventTypes, cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedEventTypes function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedEventTypes(Device Device, EventTypes *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEventTypes, _ := (*C.ulonglong)(unsafe.Pointer(EventTypes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedEventTypes(cDevice, cEventTypes) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetWait_v2 function as declared in nvml/nvml.h +func nvmlEventSetWait_v2(Set EventSet, Data *EventData, Timeoutms uint32) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + cData, _ := (*C.nvmlEventData_t)(unsafe.Pointer(Data)), cgoAllocsUnknown + cTimeoutms, _ := (C.uint)(Timeoutms), cgoAllocsUnknown + __ret := C.nvmlEventSetWait_v2(cSet, cData, cTimeoutms) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetFree function as declared in nvml/nvml.h +func nvmlEventSetFree(Set EventSet) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + __ret := C.nvmlEventSetFree(cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceModifyDrainState function as declared in nvml/nvml.h +func nvmlDeviceModifyDrainState(PciInfo *PciInfo, NewState EnableState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cNewState, _ := (C.nvmlEnableState_t)(NewState), cgoAllocsUnknown + __ret := C.nvmlDeviceModifyDrainState(cPciInfo, cNewState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceQueryDrainState function as declared in nvml/nvml.h +func nvmlDeviceQueryDrainState(PciInfo *PciInfo, CurrentState *EnableState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cCurrentState, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(CurrentState)), cgoAllocsUnknown + __ret := C.nvmlDeviceQueryDrainState(cPciInfo, cCurrentState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRemoveGpu_v2 function as declared in nvml/nvml.h +func nvmlDeviceRemoveGpu_v2(PciInfo *PciInfo, GpuState DetachGpuState, LinkState PcieLinkState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cGpuState, _ := (C.nvmlDetachGpuState_t)(GpuState), cgoAllocsUnknown + cLinkState, _ := (C.nvmlPcieLinkState_t)(LinkState), cgoAllocsUnknown + __ret := C.nvmlDeviceRemoveGpu_v2(cPciInfo, cGpuState, cLinkState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceDiscoverGpus function as declared in nvml/nvml.h +func nvmlDeviceDiscoverGpus(PciInfo *PciInfo) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceDiscoverGpus(cPciInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFieldValues function as declared in nvml/nvml.h +func nvmlDeviceGetFieldValues(Device Device, ValuesCount int32, Values *FieldValue) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValuesCount, _ := (C.int)(ValuesCount), cgoAllocsUnknown + cValues, _ := (*C.nvmlFieldValue_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFieldValues(cDevice, cValuesCount, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVirtualizationMode function as declared in nvml/nvml.h +func nvmlDeviceGetVirtualizationMode(Device Device, PVirtualMode *GpuVirtualizationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPVirtualMode, _ := (*C.nvmlGpuVirtualizationMode_t)(unsafe.Pointer(PVirtualMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVirtualizationMode(cDevice, cPVirtualMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHostVgpuMode function as declared in nvml/nvml.h +func nvmlDeviceGetHostVgpuMode(Device Device, PHostVgpuMode *HostVgpuMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPHostVgpuMode, _ := (*C.nvmlHostVgpuMode_t)(unsafe.Pointer(PHostVgpuMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHostVgpuMode(cDevice, cPHostVgpuMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVirtualizationMode function as declared in nvml/nvml.h +func nvmlDeviceSetVirtualizationMode(Device Device, VirtualMode GpuVirtualizationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVirtualMode, _ := (C.nvmlGpuVirtualizationMode_t)(VirtualMode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVirtualizationMode(cDevice, cVirtualMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v4 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v4(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v4(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetProcessUtilization(Device Device, Utilization *ProcessUtilizationSample, ProcessSamplesCount *uint32, LastSeenTimeStamp uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlProcessUtilizationSample_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(ProcessSamplesCount)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + __ret := C.nvmlDeviceGetProcessUtilization(cDevice, cUtilization, cProcessSamplesCount, cLastSeenTimeStamp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedVgpus(Device Device, VgpuCount *uint32, VgpuTypeIds *VgpuTypeId) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuTypeIds, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeIds)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedVgpus(cDevice, cVgpuCount, cVgpuTypeIds) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCreatableVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetCreatableVgpus(Device Device, VgpuCount *uint32, VgpuTypeIds *VgpuTypeId) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuTypeIds, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeIds)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCreatableVgpus(cDevice, cVgpuCount, cVgpuTypeIds) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetClass function as declared in nvml/nvml.h +func nvmlVgpuTypeGetClass(VgpuTypeId VgpuTypeId, VgpuTypeClass *byte, Size *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuTypeClass, _ := (*C.char)(unsafe.Pointer(VgpuTypeClass)), cgoAllocsUnknown + cSize, _ := (*C.uint)(unsafe.Pointer(Size)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetClass(cVgpuTypeId, cVgpuTypeClass, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetName function as declared in nvml/nvml.h +func nvmlVgpuTypeGetName(VgpuTypeId VgpuTypeId, VgpuTypeName *byte, Size *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuTypeName, _ := (*C.char)(unsafe.Pointer(VgpuTypeName)), cgoAllocsUnknown + cSize, _ := (*C.uint)(unsafe.Pointer(Size)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetName(cVgpuTypeId, cVgpuTypeName, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetGpuInstanceProfileId function as declared in nvml/nvml.h +func nvmlVgpuTypeGetGpuInstanceProfileId(VgpuTypeId VgpuTypeId, GpuInstanceProfileId *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cGpuInstanceProfileId, _ := (*C.uint)(unsafe.Pointer(GpuInstanceProfileId)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetGpuInstanceProfileId(cVgpuTypeId, cGpuInstanceProfileId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetDeviceID function as declared in nvml/nvml.h +func nvmlVgpuTypeGetDeviceID(VgpuTypeId VgpuTypeId, DeviceID *uint64, SubsystemID *uint64) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cDeviceID, _ := (*C.ulonglong)(unsafe.Pointer(DeviceID)), cgoAllocsUnknown + cSubsystemID, _ := (*C.ulonglong)(unsafe.Pointer(SubsystemID)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetDeviceID(cVgpuTypeId, cDeviceID, cSubsystemID) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFramebufferSize function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFramebufferSize(VgpuTypeId VgpuTypeId, FbSize *uint64) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cFbSize, _ := (*C.ulonglong)(unsafe.Pointer(FbSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFramebufferSize(cVgpuTypeId, cFbSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetNumDisplayHeads function as declared in nvml/nvml.h +func nvmlVgpuTypeGetNumDisplayHeads(VgpuTypeId VgpuTypeId, NumDisplayHeads *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cNumDisplayHeads, _ := (*C.uint)(unsafe.Pointer(NumDisplayHeads)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetNumDisplayHeads(cVgpuTypeId, cNumDisplayHeads) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetResolution function as declared in nvml/nvml.h +func nvmlVgpuTypeGetResolution(VgpuTypeId VgpuTypeId, DisplayIndex uint32, Xdim *uint32, Ydim *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cDisplayIndex, _ := (C.uint)(DisplayIndex), cgoAllocsUnknown + cXdim, _ := (*C.uint)(unsafe.Pointer(Xdim)), cgoAllocsUnknown + cYdim, _ := (*C.uint)(unsafe.Pointer(Ydim)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetResolution(cVgpuTypeId, cDisplayIndex, cXdim, cYdim) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetLicense function as declared in nvml/nvml.h +func nvmlVgpuTypeGetLicense(VgpuTypeId VgpuTypeId, VgpuTypeLicenseString *byte, Size uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuTypeLicenseString, _ := (*C.char)(unsafe.Pointer(VgpuTypeLicenseString)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetLicense(cVgpuTypeId, cVgpuTypeLicenseString, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFrameRateLimit function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFrameRateLimit(VgpuTypeId VgpuTypeId, FrameRateLimit *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cFrameRateLimit, _ := (*C.uint)(unsafe.Pointer(FrameRateLimit)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFrameRateLimit(cVgpuTypeId, cFrameRateLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstances function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstances(Device Device, VgpuTypeId VgpuTypeId, VgpuInstanceCount *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuInstanceCount, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceCount)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstances(cDevice, cVgpuTypeId, cVgpuInstanceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstancesPerVm function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstancesPerVm(VgpuTypeId VgpuTypeId, VgpuInstanceCountPerVm *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuInstanceCountPerVm, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceCountPerVm)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstancesPerVm(cVgpuTypeId, cVgpuInstanceCountPerVm) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetActiveVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetActiveVgpus(Device Device, VgpuCount *uint32, VgpuInstances *VgpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuInstances, _ := (*C.nvmlVgpuInstance_t)(unsafe.Pointer(VgpuInstances)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetActiveVgpus(cDevice, cVgpuCount, cVgpuInstances) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetVmID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetVmID(VgpuInstance VgpuInstance, VmId *byte, Size uint32, VmIdType *VgpuVmIdType) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVmId, _ := (*C.char)(unsafe.Pointer(VmId)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + cVmIdType, _ := (*C.nvmlVgpuVmIdType_t)(unsafe.Pointer(VmIdType)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetVmID(cVgpuInstance, cVmId, cSize, cVmIdType) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetUUID(VgpuInstance VgpuInstance, Uuid *byte, Size uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cUuid, _ := (*C.char)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetUUID(cVgpuInstance, cUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetVmDriverVersion function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetVmDriverVersion(VgpuInstance VgpuInstance, Version *byte, Length uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetVmDriverVersion(cVgpuInstance, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFbUsage function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFbUsage(VgpuInstance VgpuInstance, FbUsage *uint64) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cFbUsage, _ := (*C.ulonglong)(unsafe.Pointer(FbUsage)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFbUsage(cVgpuInstance, cFbUsage) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseStatus function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseStatus(VgpuInstance VgpuInstance, Licensed *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cLicensed, _ := (*C.uint)(unsafe.Pointer(Licensed)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseStatus(cVgpuInstance, cLicensed) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetType function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetType(VgpuInstance VgpuInstance, VgpuTypeId *VgpuTypeId) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVgpuTypeId, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeId)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetType(cVgpuInstance, cVgpuTypeId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFrameRateLimit function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFrameRateLimit(VgpuInstance VgpuInstance, FrameRateLimit *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cFrameRateLimit, _ := (*C.uint)(unsafe.Pointer(FrameRateLimit)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFrameRateLimit(cVgpuInstance, cFrameRateLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEccMode function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEccMode(VgpuInstance VgpuInstance, EccMode *EnableState) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cEccMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(EccMode)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEccMode(cVgpuInstance, cEccMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderCapacity function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderCapacity(VgpuInstance VgpuInstance, EncoderCapacity *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cEncoderCapacity, _ := (*C.uint)(unsafe.Pointer(EncoderCapacity)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderCapacity(cVgpuInstance, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceSetEncoderCapacity function as declared in nvml/nvml.h +func nvmlVgpuInstanceSetEncoderCapacity(VgpuInstance VgpuInstance, EncoderCapacity uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cEncoderCapacity, _ := (C.uint)(EncoderCapacity), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceSetEncoderCapacity(cVgpuInstance, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderStats(VgpuInstance VgpuInstance, SessionCount *uint32, AverageFps *uint32, AverageLatency *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cAverageFps, _ := (*C.uint)(unsafe.Pointer(AverageFps)), cgoAllocsUnknown + cAverageLatency, _ := (*C.uint)(unsafe.Pointer(AverageLatency)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderStats(cVgpuInstance, cSessionCount, cAverageFps, cAverageLatency) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderSessions function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderSessions(VgpuInstance VgpuInstance, SessionCount *uint32, SessionInfo *EncoderSessionInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlEncoderSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderSessions(cVgpuInstance, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFBCStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFBCStats(VgpuInstance VgpuInstance, FbcStats *FBCStats) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cFbcStats, _ := (*C.nvmlFBCStats_t)(unsafe.Pointer(FbcStats)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFBCStats(cVgpuInstance, cFbcStats) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFBCSessions function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFBCSessions(VgpuInstance VgpuInstance, SessionCount *uint32, SessionInfo *FBCSessionInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlFBCSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFBCSessions(cVgpuInstance, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetGpuInstanceId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetGpuInstanceId(VgpuInstance VgpuInstance, GpuInstanceId *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cGpuInstanceId, _ := (*C.uint)(unsafe.Pointer(GpuInstanceId)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetGpuInstanceId(cVgpuInstance, cGpuInstanceId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetGpuPciId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetGpuPciId(VgpuInstance VgpuInstance, VgpuPciId *byte, Length *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVgpuPciId, _ := (*C.char)(unsafe.Pointer(VgpuPciId)), cgoAllocsUnknown + cLength, _ := (*C.uint)(unsafe.Pointer(Length)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetGpuPciId(cVgpuInstance, cVgpuPciId, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetMetadata function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMetadata(VgpuInstance VgpuInstance, nvmlVgpuMetadata *nvmlVgpuMetadata, BufferSize *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cnvmlVgpuMetadata, _ := (*C.nvmlVgpuMetadata_t)(unsafe.Pointer(nvmlVgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMetadata(cVgpuInstance, cnvmlVgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuMetadata function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuMetadata(Device Device, PgpuMetadata *nvmlVgpuPgpuMetadata, BufferSize *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.nvmlVgpuPgpuMetadata_t)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuMetadata(cDevice, cPgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuCompatibility function as declared in nvml/nvml.h +func nvmlGetVgpuCompatibility(nvmlVgpuMetadata *nvmlVgpuMetadata, PgpuMetadata *nvmlVgpuPgpuMetadata, CompatibilityInfo *VgpuPgpuCompatibility) Return { + cnvmlVgpuMetadata, _ := (*C.nvmlVgpuMetadata_t)(unsafe.Pointer(nvmlVgpuMetadata)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.nvmlVgpuPgpuMetadata_t)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cCompatibilityInfo, _ := (*C.nvmlVgpuPgpuCompatibility_t)(unsafe.Pointer(CompatibilityInfo)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuCompatibility(cnvmlVgpuMetadata, cPgpuMetadata, cCompatibilityInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPgpuMetadataString function as declared in nvml/nvml.h +func nvmlDeviceGetPgpuMetadataString(Device Device, PgpuMetadata *byte, BufferSize *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.char)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPgpuMetadataString(cDevice, cPgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuVersion function as declared in nvml/nvml.h +func nvmlGetVgpuVersion(Supported *VgpuVersion, Current *VgpuVersion) Return { + cSupported, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(Supported)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuVersion(cSupported, cCurrent) + __v := (Return)(__ret) + return __v +} + +// nvmlSetVgpuVersion function as declared in nvml/nvml.h +func nvmlSetVgpuVersion(VgpuVersion *VgpuVersion) Return { + cVgpuVersion, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(VgpuVersion)), cgoAllocsUnknown + __ret := C.nvmlSetVgpuVersion(cVgpuVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuUtilization(Device Device, LastSeenTimeStamp uint64, SampleValType *ValueType, VgpuInstanceSamplesCount *uint32, UtilizationSamples *VgpuInstanceUtilizationSample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cSampleValType, _ := (*C.nvmlValueType_t)(unsafe.Pointer(SampleValType)), cgoAllocsUnknown + cVgpuInstanceSamplesCount, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceSamplesCount)), cgoAllocsUnknown + cUtilizationSamples, _ := (*C.nvmlVgpuInstanceUtilizationSample_t)(unsafe.Pointer(UtilizationSamples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuUtilization(cDevice, cLastSeenTimeStamp, cSampleValType, cVgpuInstanceSamplesCount, cUtilizationSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuProcessUtilization(Device Device, LastSeenTimeStamp uint64, VgpuProcessSamplesCount *uint32, UtilizationSamples *VgpuProcessUtilizationSample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cVgpuProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(VgpuProcessSamplesCount)), cgoAllocsUnknown + cUtilizationSamples, _ := (*C.nvmlVgpuProcessUtilizationSample_t)(unsafe.Pointer(UtilizationSamples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuProcessUtilization(cDevice, cLastSeenTimeStamp, cVgpuProcessSamplesCount, cUtilizationSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingMode function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingMode(VgpuInstance VgpuInstance, Mode *EnableState) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingMode(cVgpuInstance, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingPids function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingPids(VgpuInstance VgpuInstance, Count *uint32, Pids *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cPids, _ := (*C.uint)(unsafe.Pointer(Pids)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingPids(cVgpuInstance, cCount, cPids) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingStats(VgpuInstance VgpuInstance, Pid uint32, Stats *AccountingStats) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cStats, _ := (*C.nvmlAccountingStats_t)(unsafe.Pointer(Stats)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingStats(cVgpuInstance, cPid, cStats) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceClearAccountingPids function as declared in nvml/nvml.h +func nvmlVgpuInstanceClearAccountingPids(VgpuInstance VgpuInstance) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceClearAccountingPids(cVgpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseInfo_v2 function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseInfo_v2(VgpuInstance VgpuInstance, LicenseInfo *VgpuLicenseInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cLicenseInfo, _ := (*C.nvmlVgpuLicenseInfo_t)(unsafe.Pointer(LicenseInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseInfo_v2(cVgpuInstance, cLicenseInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGetExcludedDeviceCount function as declared in nvml/nvml.h +func nvmlGetExcludedDeviceCount(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlGetExcludedDeviceCount(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGetExcludedDeviceInfoByIndex function as declared in nvml/nvml.h +func nvmlGetExcludedDeviceInfoByIndex(Index uint32, Info *ExcludedDeviceInfo) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cInfo, _ := (*C.nvmlExcludedDeviceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGetExcludedDeviceInfoByIndex(cIndex, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMigMode function as declared in nvml/nvml.h +func nvmlDeviceSetMigMode(Device Device, Mode uint32, ActivationStatus *Return) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.uint)(Mode), cgoAllocsUnknown + cActivationStatus, _ := (*C.nvmlReturn_t)(unsafe.Pointer(ActivationStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMigMode(cDevice, cMode, cActivationStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMigMode function as declared in nvml/nvml.h +func nvmlDeviceGetMigMode(Device Device, CurrentMode *uint32, PendingMode *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrentMode, _ := (*C.uint)(unsafe.Pointer(CurrentMode)), cgoAllocsUnknown + cPendingMode, _ := (*C.uint)(unsafe.Pointer(PendingMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMigMode(cDevice, cCurrentMode, cPendingMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceProfileInfo function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceProfileInfo(Device Device, Profile uint32, Info *GpuInstanceProfileInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceProfileInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceProfileInfo(cDevice, cProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceProfileInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceProfileInfoV(Device Device, Profile uint32, Info *GpuInstanceProfileInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceProfileInfo_v2_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceProfileInfoV(cDevice, cProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstancePossiblePlacements_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstancePossiblePlacements_v2(Device Device, ProfileId uint32, Placements *GpuInstancePlacement, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstancePossiblePlacements_v2(cDevice, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceRemainingCapacity function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceRemainingCapacity(Device Device, ProfileId uint32, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceRemainingCapacity(cDevice, cProfileId, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCreateGpuInstance function as declared in nvml/nvml.h +func nvmlDeviceCreateGpuInstance(Device Device, ProfileId uint32, GpuInstance *GpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceCreateGpuInstance(cDevice, cProfileId, cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCreateGpuInstanceWithPlacement function as declared in nvml/nvml.h +func nvmlDeviceCreateGpuInstanceWithPlacement(Device Device, ProfileId uint32, Placement *GpuInstancePlacement, GpuInstance *GpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacement, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placement)), cgoAllocsUnknown + cGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceCreateGpuInstanceWithPlacement(cDevice, cProfileId, cPlacement, cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceDestroy function as declared in nvml/nvml.h +func nvmlGpuInstanceDestroy(GpuInstance GpuInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceDestroy(cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstances function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstances(Device Device, ProfileId uint32, GpuInstances *GpuInstance, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cGpuInstances, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstances)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstances(cDevice, cProfileId, cGpuInstances, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceById function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceById(Device Device, Id uint32, GpuInstance *GpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cId, _ := (C.uint)(Id), cgoAllocsUnknown + cGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceById(cDevice, cId, cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetInfo function as declared in nvml/nvml.h +func nvmlGpuInstanceGetInfo(GpuInstance GpuInstance, Info *GpuInstanceInfo) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetInfo(cGpuInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceProfileInfo function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceProfileInfo(GpuInstance GpuInstance, Profile uint32, EngProfile uint32, Info *ComputeInstanceProfileInfo) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cEngProfile, _ := (C.uint)(EngProfile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceProfileInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceProfileInfo(cGpuInstance, cProfile, cEngProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceProfileInfoV function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceProfileInfoV(GpuInstance GpuInstance, Profile uint32, EngProfile uint32, Info *ComputeInstanceProfileInfo_v2) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cEngProfile, _ := (C.uint)(EngProfile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceProfileInfo_v2_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceProfileInfoV(cGpuInstance, cProfile, cEngProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceRemainingCapacity function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance GpuInstance, ProfileId uint32, Count *uint32) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceRemainingCapacity(cGpuInstance, cProfileId, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceCreateComputeInstance function as declared in nvml/nvml.h +func nvmlGpuInstanceCreateComputeInstance(GpuInstance GpuInstance, ProfileId uint32, ComputeInstance *ComputeInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceCreateComputeInstance(cGpuInstance, cProfileId, cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceDestroy function as declared in nvml/nvml.h +func nvmlComputeInstanceDestroy(ComputeInstance ComputeInstance) Return { + cComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceDestroy(cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstances function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstances(GpuInstance GpuInstance, ProfileId uint32, ComputeInstances *ComputeInstance, Count *uint32) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cComputeInstances, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstances)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstances(cGpuInstance, cProfileId, cComputeInstances, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceById function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceById(GpuInstance GpuInstance, Id uint32, ComputeInstance *ComputeInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cId, _ := (C.uint)(Id), cgoAllocsUnknown + cComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceById(cGpuInstance, cId, cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceGetInfo_v2 function as declared in nvml/nvml.h +func nvmlComputeInstanceGetInfo_v2(ComputeInstance ComputeInstance, Info *ComputeInstanceInfo) Return { + cComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&ComputeInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceGetInfo_v2(cComputeInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceIsMigDeviceHandle function as declared in nvml/nvml.h +func nvmlDeviceIsMigDeviceHandle(Device Device, IsMigDevice *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsMigDevice, _ := (*C.uint)(unsafe.Pointer(IsMigDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceIsMigDeviceHandle(cDevice, cIsMigDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceId function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceId(Device Device, Id *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cId, _ := (*C.uint)(unsafe.Pointer(Id)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceId(cDevice, cId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeInstanceId function as declared in nvml/nvml.h +func nvmlDeviceGetComputeInstanceId(Device Device, Id *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cId, _ := (*C.uint)(unsafe.Pointer(Id)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeInstanceId(cDevice, cId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxMigDeviceCount function as declared in nvml/nvml.h +func nvmlDeviceGetMaxMigDeviceCount(Device Device, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxMigDeviceCount(cDevice, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMigDeviceHandleByIndex function as declared in nvml/nvml.h +func nvmlDeviceGetMigDeviceHandleByIndex(Device Device, Index uint32, MigDevice *Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cMigDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(MigDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMigDeviceHandleByIndex(cDevice, cIndex, cMigDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDeviceHandleFromMigDeviceHandle function as declared in nvml/nvml.h +func nvmlDeviceGetDeviceHandleFromMigDeviceHandle(MigDevice Device, Device *Device) Return { + cMigDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&MigDevice)), cgoAllocsUnknown + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDeviceHandleFromMigDeviceHandle(cMigDevice, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBusType function as declared in nvml/nvml.h +func nvmlDeviceGetBusType(Device Device, _type *BusType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBusType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBusType(cDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlInit_v1 function as declared in nvml/nvml.h +func nvmlInit_v1() Return { + __ret := C.nvmlInit() + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCount_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetCount_v1(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCount(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByIndex_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByIndex_v1(Index uint32, Device *Device) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByIndex(cIndex, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByPciBusId_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByPciBusId_v1(PciBusId string, Device *Device) Return { + cPciBusId, _ := unpackPCharString(PciBusId) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByPciBusId(cPciBusId, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v1(Device Device, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo(cDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v2(Device Device, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo_v2(cDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemotePciInfo_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemotePciInfo_v1(Device Device, Link uint32, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemotePciInfo(cDevice, cLink, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v1(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v2(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v2(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v3(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v3(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRemoveGpu_v1 function as declared in nvml/nvml.h +func nvmlDeviceRemoveGpu_v1(PciInfo *PciInfo) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceRemoveGpu(cPciInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetWait_v1 function as declared in nvml/nvml.h +func nvmlEventSetWait_v1(Set EventSet, Data *EventData, Timeoutms uint32) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + cData, _ := (*C.nvmlEventData_t)(unsafe.Pointer(Data)), cgoAllocsUnknown + cTimeoutms, _ := (C.uint)(Timeoutms), cgoAllocsUnknown + __ret := C.nvmlEventSetWait(cSet, cData, cTimeoutms) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAttributes_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetAttributes_v1(Device Device, Attributes *DeviceAttributes) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cAttributes, _ := (*C.nvmlDeviceAttributes_t)(unsafe.Pointer(Attributes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAttributes(cDevice, cAttributes) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceGetInfo_v1 function as declared in nvml/nvml.h +func nvmlComputeInstanceGetInfo_v1(ComputeInstance ComputeInstance, Info *ComputeInstanceInfo) Return { + cComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&ComputeInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceGetInfo(cComputeInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v1(Device Device, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v2(Device Device, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses_v2(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v1(Device Device, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v2(Device Device, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses_v2(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v1(Device Device, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v2(Device Device, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses_v2(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstancePossiblePlacements_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstancePossiblePlacements_v1(Device Device, ProfileId uint32, Placements *GpuInstancePlacement, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstancePossiblePlacements(cDevice, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseInfo_v1 function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseInfo_v1(VgpuInstance VgpuInstance, LicenseInfo *VgpuLicenseInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cLicenseInfo, _ := (*C.nvmlVgpuLicenseInfo_t)(unsafe.Pointer(LicenseInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseInfo(cVgpuInstance, cLicenseInfo) + __v := (Return)(__ret) + return __v +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h new file mode 100644 index 0000000..804db91 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h @@ -0,0 +1,8459 @@ +/*** NVML VERSION: 11.6.55 ***/ +/*** From https://api.anaconda.org/download/nvidia/cuda-nvml-dev/11.6.55/linux-64/cuda-nvml-dev-11.6.55-haa9ef22_0.tar.bz2 ***/ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO USER: + * + * This source code is subject to NVIDIA ownership rights under U.S. and + * international Copyright laws. Users and possessors of this source code + * are hereby granted a nonexclusive, royalty-free license to use this code + * in individual and commercial software. + * + * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE + * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR + * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH + * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, + * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE + * OR PERFORMANCE OF THIS SOURCE CODE. + * + * U.S. Government End Users. This source code is a "commercial item" as + * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of + * "commercial computer software" and "commercial computer software + * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) + * and is provided to the U.S. Government only as a commercial end item. + * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through + * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the + * source code with only those rights set forth herein. + * + * Any use of this source code in individual and commercial software must + * include, in the user documentation and internal comments to the code, + * the above Disclaimer and U.S. Government End Users Notice. + */ + +/* +NVML API Reference + +The NVIDIA Management Library (NVML) is a C-based programmatic interface for monitoring and +managing various states within NVIDIA Tesla &tm; GPUs. It is intended to be a platform for building +3rd party applications, and is also the underlying library for the NVIDIA-supported nvidia-smi +tool. NVML is thread-safe so it is safe to make simultaneous NVML calls from multiple threads. + +API Documentation + +Supported platforms: +- Windows: Windows Server 2008 R2 64bit, Windows Server 2012 R2 64bit, Windows 7 64bit, Windows 8 64bit, Windows 10 64bit +- Linux: 32-bit and 64-bit +- Hypervisors: Windows Server 2008R2/2012 Hyper-V 64bit, Citrix XenServer 6.2 SP1+, VMware ESX 5.1/5.5 + +Supported products: +- Full Support + - All Tesla products, starting with the Fermi architecture + - All Quadro products, starting with the Fermi architecture + - All vGPU Software products, starting with the Kepler architecture + - Selected GeForce Titan products +- Limited Support + - All Geforce products, starting with the Fermi architecture + +The NVML library can be found at \%ProgramW6432\%\\"NVIDIA Corporation"\\NVSMI\\ on Windows. It is +not be added to the system path by default. To dynamically link to NVML, add this path to the PATH +environmental variable. To dynamically load NVML, call LoadLibrary with this path. + +On Linux the NVML library will be found on the standard library path. For 64 bit Linux, both the 32 bit +and 64 bit NVML libraries will be installed. + +Online documentation for this library is available at http://docs.nvidia.com/deploy/nvml-api/index.html +*/ + +#ifndef __nvml_nvml_h__ +#define __nvml_nvml_h__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * On Windows, set up methods for DLL export + * define NVML_STATIC_IMPORT when using nvml_loader library + */ +#if defined _WINDOWS + #if !defined NVML_STATIC_IMPORT + #if defined NVML_LIB_EXPORT + #define DECLDIR __declspec(dllexport) + #else + #define DECLDIR __declspec(dllimport) + #endif + #else + #define DECLDIR + #endif +#else + #define DECLDIR +#endif + +/** + * NVML API versioning support + */ +#define NVML_API_VERSION 11 +#define NVML_API_VERSION_STR "11" +/** + * Defining NVML_NO_UNVERSIONED_FUNC_DEFS will disable "auto upgrading" of APIs. + * e.g. the user will have to call nvmlInit_v2 instead of nvmlInit. Enable this + * guard if you need to support older versions of the API + */ +#ifndef NVML_NO_UNVERSIONED_FUNC_DEFS + #define nvmlInit nvmlInit_v2 + #define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v3 + #define nvmlDeviceGetCount nvmlDeviceGetCount_v2 + #define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 + #define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 + #define nvmlDeviceGetNvLinkRemotePciInfo nvmlDeviceGetNvLinkRemotePciInfo_v2 + #define nvmlDeviceRemoveGpu nvmlDeviceRemoveGpu_v2 + #define nvmlDeviceGetGridLicensableFeatures nvmlDeviceGetGridLicensableFeatures_v4 + #define nvmlEventSetWait nvmlEventSetWait_v2 + #define nvmlDeviceGetAttributes nvmlDeviceGetAttributes_v2 + #define nvmlComputeInstanceGetInfo nvmlComputeInstanceGetInfo_v2 + #define nvmlDeviceGetComputeRunningProcesses nvmlDeviceGetComputeRunningProcesses_v3 + #define nvmlDeviceGetGraphicsRunningProcesses nvmlDeviceGetGraphicsRunningProcesses_v3 + #define nvmlDeviceGetMPSComputeRunningProcesses nvmlDeviceGetMPSComputeRunningProcesses_v3 + #define nvmlBlacklistDeviceInfo_t nvmlExcludedDeviceInfo_t + #define nvmlGetBlacklistDeviceCount nvmlGetExcludedDeviceCount + #define nvmlGetBlacklistDeviceInfoByIndex nvmlGetExcludedDeviceInfoByIndex + #define nvmlDeviceGetGpuInstancePossiblePlacements nvmlDeviceGetGpuInstancePossiblePlacements_v2 + #define nvmlVgpuInstanceGetLicenseInfo nvmlVgpuInstanceGetLicenseInfo_v2 +#endif // #ifndef NVML_NO_UNVERSIONED_FUNC_DEFS + +#define NVML_STRUCT_VERSION(data, ver) (unsigned int)(sizeof(nvml ## data ## _v ## ver ## _t) | \ + (ver << 24U)) + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceStructs Device Structs + * @{ + */ +/***************************************************************************************************/ + +/** + * Special constant that some fields take when they are not available. + * Used when only part of the struct is not available. + * + * Each structure explicitly states when to check for this value. + */ +#define NVML_VALUE_NOT_AVAILABLE (-1) + +typedef struct +{ + struct nvmlDevice_st* handle; +} nvmlDevice_t; + +/** + * Buffer size guaranteed to be large enough for pci bus id + */ +#define NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE 32 + +/** + * Buffer size guaranteed to be large enough for pci bus id for ::busIdLegacy + */ +#define NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE 16 + +/** + * PCI information about a GPU device. + */ +typedef struct nvmlPciInfo_st +{ + char busIdLegacy[NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE]; //!< The legacy tuple domain:bus:device.function PCI identifier (& NULL terminator) + unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffffffff + unsigned int bus; //!< The bus on which the device resides, 0 to 0xff + unsigned int device; //!< The device's id on the bus, 0 to 31 + unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id + + // Added in NVML 2.285 API + unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID + + char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) +} nvmlPciInfo_t; + +/** + * PCI format string for ::busIdLegacy + */ +#define NVML_DEVICE_PCI_BUS_ID_LEGACY_FMT "%04X:%02X:%02X.0" + +/** + * PCI format string for ::busId + */ +#define NVML_DEVICE_PCI_BUS_ID_FMT "%08X:%02X:%02X.0" + +/** + * Utility macro for filling the pci bus id format from a nvmlPciInfo_t + */ +#define NVML_DEVICE_PCI_BUS_ID_FMT_ARGS(pciInfo) (pciInfo)->domain, \ + (pciInfo)->bus, \ + (pciInfo)->device + +/** + * Detailed ECC error counts for a device. + * + * @deprecated Different GPU families can have different memory error counters + * See \ref nvmlDeviceGetMemoryErrorCounter + */ +typedef struct nvmlEccErrorCounts_st +{ + unsigned long long l1Cache; //!< L1 cache errors + unsigned long long l2Cache; //!< L2 cache errors + unsigned long long deviceMemory; //!< Device memory errors + unsigned long long registerFile; //!< Register file errors +} nvmlEccErrorCounts_t; + +/** + * Utilization information for a device. + * Each sample period may be between 1 second and 1/6 second, depending on the product being queried. + */ +typedef struct nvmlUtilization_st +{ + unsigned int gpu; //!< Percent of time over the past sample period during which one or more kernels was executing on the GPU + unsigned int memory; //!< Percent of time over the past sample period during which global (device) memory was being read or written +} nvmlUtilization_t; + +/** + * Memory allocation information for a device (v1). + * The total amount is equal to the sum of the amounts of free and used memory. + */ +typedef struct nvmlMemory_st +{ + unsigned long long total; //!< Total physical device memory (in bytes) + unsigned long long free; //!< Unallocated device memory (in bytes) + unsigned long long used; //!< Sum of Reserved and Allocated device memory (in bytes). + //!< Note that the driver/GPU always sets aside a small amount of memory for bookkeeping +} nvmlMemory_t; + +/** + * Memory allocation information for a device (v2). + * + * Version 2 adds versioning for the struct and the amount of system-reserved memory as an output. + * @note The \ref nvmlMemory_v2_t.used amount also includes the \ref nvmlMemory_v2_t.reserved amount. + */ +typedef struct nvmlMemory_v2_st +{ + unsigned int version; //!< Structure format version (must be 2) + unsigned long long total; //!< Total physical device memory (in bytes) + unsigned long long reserved; //!< Device memory (in bytes) reserved for system use (driver or firmware) + unsigned long long free; //!< Unallocated device memory (in bytes) + unsigned long long used; //!< Allocated device memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping +} nvmlMemory_v2_t; + +#define nvmlMemory_v2 NVML_STRUCT_VERSION(Memory, 2) + +/** + * BAR1 Memory allocation Information for a device + */ +typedef struct nvmlBAR1Memory_st +{ + unsigned long long bar1Total; //!< Total BAR1 Memory (in bytes) + unsigned long long bar1Free; //!< Unallocated BAR1 Memory (in bytes) + unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) +}nvmlBAR1Memory_t; + +/** + * Information about running compute processes on the GPU, legacy version + * for older versions of the API. + */ +typedef struct nvmlProcessInfo_v1_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver +} nvmlProcessInfo_v1_t; + +/** + * Information about running compute processes on the GPU + */ +typedef struct nvmlProcessInfo_v2_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to + // 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. +} nvmlProcessInfo_v2_t; + +/** + * Information about running compute processes on the GPU + * Version 2 adds versioning for the struct and the conf compute protected memory in output. + */ +typedef struct nvmlProcessInfo_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to + // 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. +} nvmlProcessInfo_t; + +typedef struct nvmlDeviceAttributes_st +{ + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + unsigned int gpuInstanceSliceCount; //!< GPU instance slice count + unsigned int computeInstanceSliceCount; //!< Compute instance slice count + unsigned long long memorySizeMB; //!< Device memory size (in MiB) +} nvmlDeviceAttributes_t; + +/** + * Possible values that classify the remap availability for each bank. The max + * field will contain the number of banks that have maximum remap availability + * (all reserved rows are available). None means that there are no reserved + * rows available. + */ +typedef struct nvmlRowRemapperHistogramValues_st +{ + unsigned int max; + unsigned int high; + unsigned int partial; + unsigned int low; + unsigned int none; +} nvmlRowRemapperHistogramValues_t; + +/** + * Enum to represent type of bridge chip + */ +typedef enum nvmlBridgeChipType_enum +{ + NVML_BRIDGE_CHIP_PLX = 0, + NVML_BRIDGE_CHIP_BRO4 = 1 +}nvmlBridgeChipType_t; + +/** + * Maximum number of NvLink links supported + */ +#define NVML_NVLINK_MAX_LINKS 12 + +/** + * Enum to represent the NvLink utilization counter packet units + */ +typedef enum nvmlNvLinkUtilizationCountUnits_enum +{ + NVML_NVLINK_COUNTER_UNIT_CYCLES = 0, // count by cycles + NVML_NVLINK_COUNTER_UNIT_PACKETS = 1, // count by packets + NVML_NVLINK_COUNTER_UNIT_BYTES = 2, // count by bytes + NVML_NVLINK_COUNTER_UNIT_RESERVED = 3, // count reserved for internal use + // this must be last + NVML_NVLINK_COUNTER_UNIT_COUNT +} nvmlNvLinkUtilizationCountUnits_t; + +/** + * Enum to represent the NvLink utilization counter packet types to count + * ** this is ONLY applicable with the units as packets or bytes + * ** as specified in \a nvmlNvLinkUtilizationCountUnits_t + * ** all packet filter descriptions are target GPU centric + * ** these can be "OR'd" together + */ +typedef enum nvmlNvLinkUtilizationCountPktTypes_enum +{ + NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1, // no operation packets + NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2, // read packets + NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4, // write packets + NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8, // reduction atomic requests + NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10, // non-reduction atomic requests + NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20, // flush requests + NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40, // responses with data + NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80, // responses without data + NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF // all packets +} nvmlNvLinkUtilizationCountPktTypes_t; + +/** + * Struct to define the NVLINK counter controls + */ +typedef struct nvmlNvLinkUtilizationControl_st +{ + nvmlNvLinkUtilizationCountUnits_t units; + nvmlNvLinkUtilizationCountPktTypes_t pktfilter; +} nvmlNvLinkUtilizationControl_t; + +/** + * Enum to represent NvLink queryable capabilities + */ +typedef enum nvmlNvLinkCapability_enum +{ + NVML_NVLINK_CAP_P2P_SUPPORTED = 0, // P2P over NVLink is supported + NVML_NVLINK_CAP_SYSMEM_ACCESS = 1, // Access to system memory is supported + NVML_NVLINK_CAP_P2P_ATOMICS = 2, // P2P atomics are supported + NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3, // System memory atomics are supported + NVML_NVLINK_CAP_SLI_BRIDGE = 4, // SLI is supported over this link + NVML_NVLINK_CAP_VALID = 5, // Link is supported on this device + // should be last + NVML_NVLINK_CAP_COUNT +} nvmlNvLinkCapability_t; + +/** + * Enum to represent NvLink queryable error counters + */ +typedef enum nvmlNvLinkErrorCounter_enum +{ + NVML_NVLINK_ERROR_DL_REPLAY = 0, // Data link transmit replay error counter + NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter + NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter + NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter + NVML_NVLINK_ERROR_DL_ECC_DATA = 4, // Data link receive data ECC error counter + + // this must be last + NVML_NVLINK_ERROR_COUNT +} nvmlNvLinkErrorCounter_t; + +/** + * Enum to represent NvLink's remote device type + */ +typedef enum nvmlIntNvLinkDeviceType_enum +{ + NVML_NVLINK_DEVICE_TYPE_GPU = 0x00, + NVML_NVLINK_DEVICE_TYPE_IBMNPU = 0x01, + NVML_NVLINK_DEVICE_TYPE_SWITCH = 0x02, + NVML_NVLINK_DEVICE_TYPE_UNKNOWN = 0xFF +} nvmlIntNvLinkDeviceType_t; + +/** + * Represents level relationships within a system between two GPUs + * The enums are spaced to allow for future relationships + */ +typedef enum nvmlGpuLevel_enum +{ + NVML_TOPOLOGY_INTERNAL = 0, // e.g. Tesla K80 + NVML_TOPOLOGY_SINGLE = 10, // all devices that only need traverse a single PCIe switch + NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge + NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge + NVML_TOPOLOGY_NODE = 40, // all devices that are connected to the same NUMA node but possibly multiple host bridges + NVML_TOPOLOGY_SYSTEM = 50 // all devices in the system + + // there is purposefully no COUNT here because of the need for spacing above +} nvmlGpuTopologyLevel_t; + +/* Compatibility for CPU->NODE renaming */ +#define NVML_TOPOLOGY_CPU NVML_TOPOLOGY_NODE + +/* P2P Capability Index Status*/ +typedef enum nvmlGpuP2PStatus_enum +{ + NVML_P2P_STATUS_OK = 0, + NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED, + NVML_P2P_STATUS_GPU_NOT_SUPPORTED, + NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, + NVML_P2P_STATUS_DISABLED_BY_REGKEY, + NVML_P2P_STATUS_NOT_SUPPORTED, + NVML_P2P_STATUS_UNKNOWN + +} nvmlGpuP2PStatus_t; + +/* P2P Capability Index*/ +typedef enum nvmlGpuP2PCapsIndex_enum +{ + NVML_P2P_CAPS_INDEX_READ = 0, + NVML_P2P_CAPS_INDEX_WRITE, + NVML_P2P_CAPS_INDEX_NVLINK, + NVML_P2P_CAPS_INDEX_ATOMICS, + NVML_P2P_CAPS_INDEX_PROP, + NVML_P2P_CAPS_INDEX_UNKNOWN +}nvmlGpuP2PCapsIndex_t; + +/** + * Maximum limit on Physical Bridges per Board + */ +#define NVML_MAX_PHYSICAL_BRIDGE (128) + +/** + * Information about the Bridge Chip Firmware + */ +typedef struct nvmlBridgeChipInfo_st +{ + nvmlBridgeChipType_t type; //!< Type of Bridge Chip + unsigned int fwVersion; //!< Firmware Version. 0=Version is unavailable +}nvmlBridgeChipInfo_t; + +/** + * This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate + * bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth. + */ +typedef struct nvmlBridgeChipHierarchy_st +{ + unsigned char bridgeCount; //!< Number of Bridge Chips on the Board + nvmlBridgeChipInfo_t bridgeChipInfo[NVML_MAX_PHYSICAL_BRIDGE]; //!< Hierarchy of Bridge Chips on the board +}nvmlBridgeChipHierarchy_t; + +/** + * Represents Type of Sampling Event + */ +typedef enum nvmlSamplingType_enum +{ + NVML_TOTAL_POWER_SAMPLES = 0, //!< To represent total power drawn by GPU + NVML_GPU_UTILIZATION_SAMPLES = 1, //!< To represent percent of time during which one or more kernels was executing on the GPU + NVML_MEMORY_UTILIZATION_SAMPLES = 2, //!< To represent percent of time during which global (device) memory was being read or written + NVML_ENC_UTILIZATION_SAMPLES = 3, //!< To represent percent of time during which NVENC remains busy + NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy + NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples + NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples + + // Keep this last + NVML_SAMPLINGTYPE_COUNT +}nvmlSamplingType_t; + +/** + * Represents the queryable PCIe utilization counters + */ +typedef enum nvmlPcieUtilCounter_enum +{ + NVML_PCIE_UTIL_TX_BYTES = 0, // 1KB granularity + NVML_PCIE_UTIL_RX_BYTES = 1, // 1KB granularity + + // Keep this last + NVML_PCIE_UTIL_COUNT +} nvmlPcieUtilCounter_t; + +/** + * Represents the type for sample value returned + */ +typedef enum nvmlValueType_enum +{ + NVML_VALUE_TYPE_DOUBLE = 0, + NVML_VALUE_TYPE_UNSIGNED_INT = 1, + NVML_VALUE_TYPE_UNSIGNED_LONG = 2, + NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, + NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, + + // Keep this last + NVML_VALUE_TYPE_COUNT +}nvmlValueType_t; + + +/** + * Union to represent different types of Value + */ +typedef union nvmlValue_st +{ + double dVal; //!< If the value is double + unsigned int uiVal; //!< If the value is unsigned int + unsigned long ulVal; //!< If the value is unsigned long + unsigned long long ullVal; //!< If the value is unsigned long long + signed long long sllVal; //!< If the value is signed long long +}nvmlValue_t; + +/** + * Information for Sample + */ +typedef struct nvmlSample_st +{ + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlValue_t sampleValue; //!< Sample Value +}nvmlSample_t; + +/** + * Represents type of perf policy for which violation times can be queried + */ +typedef enum nvmlPerfPolicyType_enum +{ + NVML_PERF_POLICY_POWER = 0, //!< How long did power violations cause the GPU to be below application clocks + NVML_PERF_POLICY_THERMAL = 1, //!< How long did thermal violations cause the GPU to be below application clocks + NVML_PERF_POLICY_SYNC_BOOST = 2, //!< How long did sync boost cause the GPU to be below application clocks + NVML_PERF_POLICY_BOARD_LIMIT = 3, //!< How long did the board limit cause the GPU to be below application clocks + NVML_PERF_POLICY_LOW_UTILIZATION = 4, //!< How long did low utilization cause the GPU to be below application clocks + NVML_PERF_POLICY_RELIABILITY = 5, //!< How long did the board reliability limit cause the GPU to be below application clocks + + NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10, //!< Total time the GPU was held below application clocks by any limiter (0 - 5 above) + NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11, //!< Total time the GPU was held below base clocks + + // Keep this last + NVML_PERF_POLICY_COUNT +}nvmlPerfPolicyType_t; + +/** + * Struct to hold perf policy violation status data + */ +typedef struct nvmlViolationTime_st +{ + unsigned long long referenceTime; //!< referenceTime represents CPU timestamp in microseconds + unsigned long long violationTime; //!< violationTime in Nanoseconds +}nvmlViolationTime_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceEnumvs Device Enums + * @{ + */ +/***************************************************************************************************/ + +/** + * Generic enable/disable enum. + */ +typedef enum nvmlEnableState_enum +{ + NVML_FEATURE_DISABLED = 0, //!< Feature disabled + NVML_FEATURE_ENABLED = 1 //!< Feature enabled +} nvmlEnableState_t; + +//! Generic flag used to specify the default behavior of some functions. See description of particular functions for details. +#define nvmlFlagDefault 0x00 +//! Generic flag used to force some behavior. See description of particular functions for details. +#define nvmlFlagForce 0x01 + +/** + * * The Brand of the GPU + * */ +typedef enum nvmlBrandType_enum +{ + NVML_BRAND_UNKNOWN = 0, + NVML_BRAND_QUADRO = 1, + NVML_BRAND_TESLA = 2, + NVML_BRAND_NVS = 3, + NVML_BRAND_GRID = 4, // Deprecated from API reporting. Keeping definition for backward compatibility. + NVML_BRAND_GEFORCE = 5, + NVML_BRAND_TITAN = 6, + NVML_BRAND_NVIDIA_VAPPS = 7, // NVIDIA Virtual Applications + NVML_BRAND_NVIDIA_VPC = 8, // NVIDIA Virtual PC + NVML_BRAND_NVIDIA_VCS = 9, // NVIDIA Virtual Compute Server + NVML_BRAND_NVIDIA_VWS = 10, // NVIDIA RTX Virtual Workstation + NVML_BRAND_NVIDIA_CLOUD_GAMING = 11, // NVIDIA Cloud Gaming + NVML_BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING, // Deprecated from API reporting. Keeping definition for backward compatibility. + NVML_BRAND_QUADRO_RTX = 12, + NVML_BRAND_NVIDIA_RTX = 13, + NVML_BRAND_NVIDIA = 14, + NVML_BRAND_GEFORCE_RTX = 15, // Unused + NVML_BRAND_TITAN_RTX = 16, // Unused + + // Keep this last + NVML_BRAND_COUNT +} nvmlBrandType_t; + +/** + * Temperature thresholds. + */ +typedef enum nvmlTemperatureThresholds_enum +{ + NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will + // shut down for HW protection + NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will + // begin HW slowdown + NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2, // Memory Temperature at which the GPU will + // begin SW slowdown + NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3, // GPU Temperature at which the GPU + // can be throttled below base clock + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = 4, // Minimum GPU Temperature that can be + // set as acoustic threshold + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = 5, // Current temperature that is set as + // acoustic threshold. + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6, // Maximum GPU temperature that can be + // set as acoustic threshold. + // Keep this last + NVML_TEMPERATURE_THRESHOLD_COUNT +} nvmlTemperatureThresholds_t; + +/** + * Temperature sensors. + */ +typedef enum nvmlTemperatureSensors_enum +{ + NVML_TEMPERATURE_GPU = 0, //!< Temperature sensor for the GPU die + + // Keep this last + NVML_TEMPERATURE_COUNT +} nvmlTemperatureSensors_t; + +/** + * Compute mode. + * + * NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0. + * Earlier CUDA versions supported a single exclusive mode, + * which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond. + */ +typedef enum nvmlComputeMode_enum +{ + NVML_COMPUTEMODE_DEFAULT = 0, //!< Default compute mode -- multiple contexts per device + NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1, //!< Support Removed + NVML_COMPUTEMODE_PROHIBITED = 2, //!< Compute-prohibited mode -- no contexts per device + NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3, //!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time + + // Keep this last + NVML_COMPUTEMODE_COUNT +} nvmlComputeMode_t; + +/** + * Max Clock Monitors available + */ +#define MAX_CLK_DOMAINS 32 + +/** + * Clock Monitor error types + */ +typedef struct nvmlClkMonFaultInfo_struct { + /** + * The Domain which faulted + */ + unsigned int clkApiDomain; + + /** + * Faults Information + */ + unsigned int clkDomainFaultMask; +} nvmlClkMonFaultInfo_t; + +/** + * Clock Monitor Status + */ +typedef struct nvmlClkMonStatus_status { + /** + * Fault status Indicator + */ + unsigned int bGlobalStatus; + + /** + * Total faulted domain numbers + */ + unsigned int clkMonListSize; + + /** + * The fault Information structure + */ + nvmlClkMonFaultInfo_t clkMonList[MAX_CLK_DOMAINS]; +} nvmlClkMonStatus_t; + +/** + * ECC bit types. + * + * @deprecated See \ref nvmlMemoryErrorType_t for a more flexible type + */ +#define nvmlEccBitType_t nvmlMemoryErrorType_t + +/** + * Single bit ECC errors + * + * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_CORRECTED + */ +#define NVML_SINGLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_CORRECTED + +/** + * Double bit ECC errors + * + * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_UNCORRECTED + */ +#define NVML_DOUBLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_UNCORRECTED + +/** + * Memory error types + */ +typedef enum nvmlMemoryErrorType_enum +{ + /** + * A memory error that was corrected + * + * For ECC errors, these are single bit errors + * For Texture memory, these are errors fixed by resend + */ + NVML_MEMORY_ERROR_TYPE_CORRECTED = 0, + /** + * A memory error that was not corrected + * + * For ECC errors, these are double bit errors + * For Texture memory, these are errors where the resend fails + */ + NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, + + + // Keep this last + NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types + +} nvmlMemoryErrorType_t; + +/** + * ECC counter types. + * + * Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent. + * On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver + * client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app + * is run. + */ +typedef enum nvmlEccCounterType_enum +{ + NVML_VOLATILE_ECC = 0, //!< Volatile counts are reset each time the driver loads. + NVML_AGGREGATE_ECC = 1, //!< Aggregate counts persist across reboots (i.e. for the lifetime of the device) + + // Keep this last + NVML_ECC_COUNTER_TYPE_COUNT //!< Count of memory counter types +} nvmlEccCounterType_t; + +/** + * Clock types. + * + * All speeds are in Mhz. + */ +typedef enum nvmlClockType_enum +{ + NVML_CLOCK_GRAPHICS = 0, //!< Graphics clock domain + NVML_CLOCK_SM = 1, //!< SM clock domain + NVML_CLOCK_MEM = 2, //!< Memory clock domain + NVML_CLOCK_VIDEO = 3, //!< Video encoder/decoder clock domain + + // Keep this last + NVML_CLOCK_COUNT //!< Count of clock types +} nvmlClockType_t; + +/** + * Clock Ids. These are used in combination with nvmlClockType_t + * to specify a single clock value. + */ +typedef enum nvmlClockId_enum +{ + NVML_CLOCK_ID_CURRENT = 0, //!< Current actual clock value + NVML_CLOCK_ID_APP_CLOCK_TARGET = 1, //!< Target application clock + NVML_CLOCK_ID_APP_CLOCK_DEFAULT = 2, //!< Default application clock target + NVML_CLOCK_ID_CUSTOMER_BOOST_MAX = 3, //!< OEM-defined maximum clock rate + + //Keep this last + NVML_CLOCK_ID_COUNT //!< Count of Clock Ids. +} nvmlClockId_t; + +/** + * Driver models. + * + * Windows only. + */ +typedef enum nvmlDriverModel_enum +{ + NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device + NVML_DRIVER_WDM = 1 //!< WDM (TCC) model (recommended) -- GPU treated as a generic device +} nvmlDriverModel_t; + +/** + * Allowed PStates. + */ +typedef enum nvmlPStates_enum +{ + NVML_PSTATE_0 = 0, //!< Performance state 0 -- Maximum Performance + NVML_PSTATE_1 = 1, //!< Performance state 1 + NVML_PSTATE_2 = 2, //!< Performance state 2 + NVML_PSTATE_3 = 3, //!< Performance state 3 + NVML_PSTATE_4 = 4, //!< Performance state 4 + NVML_PSTATE_5 = 5, //!< Performance state 5 + NVML_PSTATE_6 = 6, //!< Performance state 6 + NVML_PSTATE_7 = 7, //!< Performance state 7 + NVML_PSTATE_8 = 8, //!< Performance state 8 + NVML_PSTATE_9 = 9, //!< Performance state 9 + NVML_PSTATE_10 = 10, //!< Performance state 10 + NVML_PSTATE_11 = 11, //!< Performance state 11 + NVML_PSTATE_12 = 12, //!< Performance state 12 + NVML_PSTATE_13 = 13, //!< Performance state 13 + NVML_PSTATE_14 = 14, //!< Performance state 14 + NVML_PSTATE_15 = 15, //!< Performance state 15 -- Minimum Performance + NVML_PSTATE_UNKNOWN = 32 //!< Unknown performance state +} nvmlPstates_t; + +/** + * GPU Operation Mode + * + * GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features. + * + * Each GOM is designed to meet specific user needs. + */ +typedef enum nvmlGom_enum +{ + NVML_GOM_ALL_ON = 0, //!< Everything is enabled and running at full speed + + NVML_GOM_COMPUTE = 1, //!< Designed for running only compute tasks. Graphics operations + //!< are not allowed + + NVML_GOM_LOW_DP = 2 //!< Designed for running graphics applications that don't require + //!< high bandwidth double precision +} nvmlGpuOperationMode_t; + +/** + * Available infoROM objects. + */ +typedef enum nvmlInforomObject_enum +{ + NVML_INFOROM_OEM = 0, //!< An object defined by OEM + NVML_INFOROM_ECC = 1, //!< The ECC object determining the level of ECC support + NVML_INFOROM_POWER = 2, //!< The power management object + + // Keep this last + NVML_INFOROM_COUNT //!< This counts the number of infoROM objects the driver knows about +} nvmlInforomObject_t; + +/** + * Return values for NVML API calls. + */ +typedef enum nvmlReturn_enum +{ + // cppcheck-suppress * + NVML_SUCCESS = 0, //!< The operation was successful + NVML_ERROR_UNINITIALIZED = 1, //!< NVML was not first initialized with nvmlInit() + NVML_ERROR_INVALID_ARGUMENT = 2, //!< A supplied argument is invalid + NVML_ERROR_NOT_SUPPORTED = 3, //!< The requested operation is not available on target device + NVML_ERROR_NO_PERMISSION = 4, //!< The current user does not have permission for operation + NVML_ERROR_ALREADY_INITIALIZED = 5, //!< Deprecated: Multiple initializations are now allowed through ref counting + NVML_ERROR_NOT_FOUND = 6, //!< A query to find an object was unsuccessful + NVML_ERROR_INSUFFICIENT_SIZE = 7, //!< An input argument is not large enough + NVML_ERROR_INSUFFICIENT_POWER = 8, //!< A device's external power cables are not properly attached + NVML_ERROR_DRIVER_NOT_LOADED = 9, //!< NVIDIA driver is not loaded + NVML_ERROR_TIMEOUT = 10, //!< User provided timeout passed + NVML_ERROR_IRQ_ISSUE = 11, //!< NVIDIA Kernel detected an interrupt issue with a GPU + NVML_ERROR_LIBRARY_NOT_FOUND = 12, //!< NVML Shared Library couldn't be found or loaded + NVML_ERROR_FUNCTION_NOT_FOUND = 13, //!< Local version of NVML doesn't implement this function + NVML_ERROR_CORRUPTED_INFOROM = 14, //!< infoROM is corrupted + NVML_ERROR_GPU_IS_LOST = 15, //!< The GPU has fallen off the bus or has otherwise become inaccessible + NVML_ERROR_RESET_REQUIRED = 16, //!< The GPU requires a reset before it can be used again + NVML_ERROR_OPERATING_SYSTEM = 17, //!< The GPU control device has been blocked by the operating system/cgroups + NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18, //!< RM detects a driver/library version mismatch + NVML_ERROR_IN_USE = 19, //!< An operation cannot be performed because the GPU is currently in use + NVML_ERROR_MEMORY = 20, //!< Insufficient memory + NVML_ERROR_NO_DATA = 21, //!< No data + NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22, //!< The requested vgpu operation is not available on target device, becasue ECC is enabled + NVML_ERROR_INSUFFICIENT_RESOURCES = 23, //!< Ran out of critical resources, other than memory + NVML_ERROR_FREQ_NOT_SUPPORTED = 24, //!< Ran out of critical resources, other than memory + NVML_ERROR_UNKNOWN = 999 //!< An internal driver error occurred +} nvmlReturn_t; + +/** + * See \ref nvmlDeviceGetMemoryErrorCounter + */ +typedef enum nvmlMemoryLocation_enum +{ + NVML_MEMORY_LOCATION_L1_CACHE = 0, //!< GPU L1 Cache + NVML_MEMORY_LOCATION_L2_CACHE = 1, //!< GPU L2 Cache + NVML_MEMORY_LOCATION_DRAM = 2, //!< Turing+ DRAM + NVML_MEMORY_LOCATION_DEVICE_MEMORY = 2, //!< GPU Device Memory + NVML_MEMORY_LOCATION_REGISTER_FILE = 3, //!< GPU Register File + NVML_MEMORY_LOCATION_TEXTURE_MEMORY = 4, //!< GPU Texture Memory + NVML_MEMORY_LOCATION_TEXTURE_SHM = 5, //!< Shared memory + NVML_MEMORY_LOCATION_CBU = 6, //!< CBU + NVML_MEMORY_LOCATION_SRAM = 7, //!< Turing+ SRAM + // Keep this last + NVML_MEMORY_LOCATION_COUNT //!< This counts the number of memory locations the driver knows about +} nvmlMemoryLocation_t; + +/** + * Causes for page retirement + */ +typedef enum nvmlPageRetirementCause_enum +{ + NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS = 0, //!< Page was retired due to multiple single bit ECC error + NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR = 1, //!< Page was retired due to double bit ECC error + + // Keep this last + NVML_PAGE_RETIREMENT_CAUSE_COUNT +} nvmlPageRetirementCause_t; + +/** + * API types that allow changes to default permission restrictions + */ +typedef enum nvmlRestrictedAPI_enum +{ + NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS = 0, //!< APIs that change application clocks, see nvmlDeviceSetApplicationsClocks + //!< and see nvmlDeviceResetApplicationsClocks + NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS = 1, //!< APIs that enable/disable Auto Boosted clocks + //!< see nvmlDeviceSetAutoBoostedClocksEnabled + // Keep this last + NVML_RESTRICTED_API_COUNT +} nvmlRestrictedAPI_t; + +/** + * Enum to represent NvLink ECC per-lane error counts + */ +typedef enum nvmlNvLinkEccLaneErrorCounter_enum +{ + NVML_NVLINK_ERROR_DL_ECC_LANE0 = 0, // Data link receive ECC error counter lane 0 + NVML_NVLINK_ERROR_DL_ECC_LANE1 = 1, // Data link receive ECC error counter lane 1 + NVML_NVLINK_ERROR_DL_ECC_LANE2 = 2, // Data link receive ECC error counter lane 2 + NVML_NVLINK_ERROR_DL_ECC_LANE3 = 3, // Data link receive ECC error counter lane 3 + + // this must be last + NVML_NVLINK_ERROR_DL_ECC_COUNT +} nvmlNvLinkEccLaneErrorCounter_t; + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup virtualGPU + * @{ + */ +/***************************************************************************************************/ +/** @defgroup nvmlVirtualGpuEnums vGPU Enums + * @{ + */ +/***************************************************************************************************/ + +/*! + * GPU virtualization mode types. + */ +typedef enum nvmlGpuVirtualizationMode { + NVML_GPU_VIRTUALIZATION_MODE_NONE = 0, //!< Represents Bare Metal GPU + NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1, //!< Device is associated with GPU-Passthorugh + NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2, //!< Device is associated with vGPU inside virtual machine. + NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3, //!< Device is associated with VGX hypervisor in vGPU mode + NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4 //!< Device is associated with VGX hypervisor in vSGA mode +} nvmlGpuVirtualizationMode_t; + +/** + * Host vGPU modes + */ +typedef enum nvmlHostVgpuMode_enum +{ + NVML_HOST_VGPU_MODE_NON_SRIOV = 0, //!< Non SR-IOV mode + NVML_HOST_VGPU_MODE_SRIOV = 1 //!< SR-IOV mode +} nvmlHostVgpuMode_t; + +/*! + * Types of VM identifiers + */ +typedef enum nvmlVgpuVmIdType { + NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID + NVML_VGPU_VM_ID_UUID = 1 //!< VM ID represents UUID +} nvmlVgpuVmIdType_t; + +/** + * vGPU GUEST info state. + */ +typedef enum nvmlVgpuGuestInfoState_enum +{ + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //!< Guest-dependent fields uninitialized + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1 //!< Guest-dependent fields initialized +} nvmlVgpuGuestInfoState_t; + +/** + * vGPU software licensable features + */ +typedef enum { + NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0, //!< Unknown + NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU + NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2, //!< Nvidia RTX + NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX, //!< Deprecated, do not use. + NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3, //!< Gaming + NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4 //!< Compute +} nvmlGridLicenseFeatureCode_t; + +/** + * Status codes for license expiry + */ +#define NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE 0 //!< Expiry information not available +#define NVML_GRID_LICENSE_EXPIRY_INVALID 1 //!< Invalid expiry or error fetching expiry +#define NVML_GRID_LICENSE_EXPIRY_VALID 2 //!< Valid expiry +#define NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE 3 //!< Expiry not applicable +#define NVML_GRID_LICENSE_EXPIRY_PERMANENT 4 //!< Permanent expiry + +/** @} */ + +/***************************************************************************************************/ + +/** @defgroup nvmlVgpuConstants vGPU Constants + * @{ + */ +/***************************************************************************************************/ + +/** + * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense + */ +#define NVML_GRID_LICENSE_BUFFER_SIZE 128 + +#define NVML_VGPU_NAME_BUFFER_SIZE 64 + +#define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 + +#define INVALID_GPU_INSTANCE_PROFILE_ID 0xFFFFFFFF + +#define INVALID_GPU_INSTANCE_ID 0xFFFFFFFF + +/*! + * Macros for vGPU instance's virtualization capabilities bitfield. + */ +#define NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION 0:0 +#define NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 +#define NVML_VGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 + +/*! + * Macros for pGPU's virtualization capabilities bitfield. + */ +#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION 0:0 +#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 +#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpuStructs vGPU Structs + * @{ + */ +/***************************************************************************************************/ + +typedef unsigned int nvmlVgpuTypeId_t; + +typedef unsigned int nvmlVgpuInstance_t; + +/** + * Structure to store Utilization Value and vgpuInstance + */ +typedef struct nvmlVgpuInstanceUtilizationSample_st +{ + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value + nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value + nvmlValue_t encUtil; //!< Encoder Util Value + nvmlValue_t decUtil; //!< Decoder Util Value +} nvmlVgpuInstanceUtilizationSample_t; + +/** + * Structure to store Utilization Value, vgpuInstance and subprocess information + */ +typedef struct nvmlVgpuProcessUtilizationSample_st +{ + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned int pid; //!< PID of process running within the vGPU VM + char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value +} nvmlVgpuProcessUtilizationSample_t; + +/** + * Structure to store the vGPU license expiry details + */ +typedef struct nvmlVgpuLicenseExpiry_st +{ + unsigned int year; //!< Year of license expiry + unsigned short month; //!< Month of license expiry + unsigned short day; //!< Day of license expiry + unsigned short hour; //!< Hour of license expiry + unsigned short min; //!< Minutes of license expiry + unsigned short sec; //!< Seconds of license expiry + unsigned char status; //!< License expiry status +} nvmlVgpuLicenseExpiry_t; + +/** + * vGPU license state + */ +#define NVML_GRID_LICENSE_STATE_UNKNOWN 0 //!< Unknown state +#define NVML_GRID_LICENSE_STATE_UNINITIALIZED 1 //!< Uninitialized state +#define NVML_GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED 2 //!< Unlicensed unrestricted state +#define NVML_GRID_LICENSE_STATE_UNLICENSED_RESTRICTED 3 //!< Unlicensed restricted state +#define NVML_GRID_LICENSE_STATE_UNLICENSED 4 //!< Unlicensed state +#define NVML_GRID_LICENSE_STATE_LICENSED 5 //!< Licensed state + +typedef struct nvmlVgpuLicenseInfo_st +{ + unsigned char isLicensed; //!< License status + nvmlVgpuLicenseExpiry_t licenseExpiry; //!< License expiry information + unsigned int currentState; //!< Current license state +} nvmlVgpuLicenseInfo_t; + +/** + * Structure to store utilization value and process Id + */ +typedef struct nvmlProcessUtilizationSample_st +{ + unsigned int pid; //!< PID of process + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value +} nvmlProcessUtilizationSample_t; + +/** + * Structure to store license expiry date and time values + */ +typedef struct nvmlGridLicenseExpiry_st +{ + unsigned int year; //!< Year value of license expiry + unsigned short month; //!< Month value of license expiry + unsigned short day; //!< Day value of license expiry + unsigned short hour; //!< Hour value of license expiry + unsigned short min; //!< Minutes value of license expiry + unsigned short sec; //!< Seconds value of license expiry + unsigned char status; //!< License expiry status +} nvmlGridLicenseExpiry_t; + +/** + * Structure containing vGPU software licensable feature information + */ +typedef struct nvmlGridLicensableFeature_st +{ + nvmlGridLicenseFeatureCode_t featureCode; //!< Licensed feature code + unsigned int featureState; //!< Non-zero if feature is currently licensed, otherwise zero + char licenseInfo[NVML_GRID_LICENSE_BUFFER_SIZE]; //!< Deprecated. + char productName[NVML_GRID_LICENSE_BUFFER_SIZE]; //!< Product name of feature + unsigned int featureEnabled; //!< Non-zero if feature is enabled, otherwise zero + nvmlGridLicenseExpiry_t licenseExpiry; //!< License expiry structure containing date and time +} nvmlGridLicensableFeature_t; + +/** + * Structure to store vGPU software licensable features + */ +typedef struct nvmlGridLicensableFeatures_st +{ + int isGridLicenseSupported; //!< Non-zero if vGPU Software Licensing is supported on the system, otherwise zero + unsigned int licensableFeaturesCount; //!< Entries returned in \a gridLicensableFeatures array + nvmlGridLicensableFeature_t gridLicensableFeatures[NVML_GRID_LICENSE_FEATURE_MAX_COUNT]; //!< Array of vGPU software licensable features. +} nvmlGridLicensableFeatures_t; + +/** + * Simplified chip architecture + */ +#define NVML_DEVICE_ARCH_KEPLER 2 // Devices based on the NVIDIA Kepler architecture +#define NVML_DEVICE_ARCH_MAXWELL 3 // Devices based on the NVIDIA Maxwell architecture +#define NVML_DEVICE_ARCH_PASCAL 4 // Devices based on the NVIDIA Pascal architecture +#define NVML_DEVICE_ARCH_VOLTA 5 // Devices based on the NVIDIA Volta architecture +#define NVML_DEVICE_ARCH_TURING 6 // Devices based on the NVIDIA Turing architecture + +#define NVML_DEVICE_ARCH_AMPERE 7 // Devices based on the NVIDIA Ampere architecture + +#define NVML_DEVICE_ARCH_UNKNOWN 0xffffffff // Anything else, presumably something newer + +typedef unsigned int nvmlDeviceArchitecture_t; + +/** + * PCI bus types + */ +#define NVML_BUS_TYPE_UNKNOWN 0 +#define NVML_BUS_TYPE_PCI 1 +#define NVML_BUS_TYPE_PCIE 2 +#define NVML_BUS_TYPE_FPCI 3 +#define NVML_BUS_TYPE_AGP 4 + +typedef unsigned int nvmlBusType_t; + +/** + * Device Power Source + */ +#define NVML_POWER_SOURCE_AC 0x00000000 +#define NVML_POWER_SOURCE_BATTERY 0x00000001 + +typedef unsigned int nvmlPowerSource_t; + +/* + * Device PCIE link Max Speed + */ +#define NVML_PCIE_LINK_MAX_SPEED_INVALID 0x00000000 +#define NVML_PCIE_LINK_MAX_SPEED_2500MBPS 0x00000001 +#define NVML_PCIE_LINK_MAX_SPEED_5000MBPS 0x00000002 +#define NVML_PCIE_LINK_MAX_SPEED_8000MBPS 0x00000003 +#define NVML_PCIE_LINK_MAX_SPEED_16000MBPS 0x00000004 +#define NVML_PCIE_LINK_MAX_SPEED_32000MBPS 0x00000005 + +/* + * Adaptive clocking status + */ +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED 0x00000000 +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED 0x00000001 + +/** @} */ +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFieldValueEnums Field Value Enums + * @{ + */ +/***************************************************************************************************/ + +/** + * Field Identifiers. + * + * All Identifiers pertain to a device. Each ID is only used once and is guaranteed never to change. + */ +#define NVML_FI_DEV_ECC_CURRENT 1 //!< Current ECC mode. 1=Active. 0=Inactive +#define NVML_FI_DEV_ECC_PENDING 2 //!< Pending ECC mode. 1=Active. 0=Inactive +/* ECC Count Totals */ +#define NVML_FI_DEV_ECC_SBE_VOL_TOTAL 3 //!< Total single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_TOTAL 4 //!< Total double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_TOTAL 5 //!< Total single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_TOTAL 6 //!< Total double bit aggregate (persistent) ECC errors +/* Individual ECC locations */ +#define NVML_FI_DEV_ECC_SBE_VOL_L1 7 //!< L1 cache single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_L1 8 //!< L1 cache double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_L2 9 //!< L2 cache single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_L2 10 //!< L2 cache double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_DEV 11 //!< Device memory single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_DEV 12 //!< Device memory double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_REG 13 //!< Register file single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_REG 14 //!< Register file double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_VOL_TEX 15 //!< Texture memory single bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_TEX 16 //!< Texture memory double bit volatile ECC errors +#define NVML_FI_DEV_ECC_DBE_VOL_CBU 17 //!< CBU double bit volatile ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_L1 18 //!< L1 cache single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_L1 19 //!< L1 cache double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_L2 20 //!< L2 cache single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_L2 21 //!< L2 cache double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_DEV 22 //!< Device memory single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_DEV 23 //!< Device memory double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_REG 24 //!< Register File single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_REG 25 //!< Register File double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_SBE_AGG_TEX 26 //!< Texture memory single bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_TEX 27 //!< Texture memory double bit aggregate (persistent) ECC errors +#define NVML_FI_DEV_ECC_DBE_AGG_CBU 28 //!< CBU double bit aggregate ECC errors + +/* Page Retirement */ +#define NVML_FI_DEV_RETIRED_SBE 29 //!< Number of retired pages because of single bit errors +#define NVML_FI_DEV_RETIRED_DBE 30 //!< Number of retired pages because of double bit errors +#define NVML_FI_DEV_RETIRED_PENDING 31 //!< If any pages are pending retirement. 1=yes. 0=no. + +/* NvLink Flit Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 32 //!< NVLink flow control CRC Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 33 //!< NVLink flow control CRC Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 34 //!< NVLink flow control CRC Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 35 //!< NVLink flow control CRC Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 36 //!< NVLink flow control CRC Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 37 //!< NVLink flow control CRC Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL 38 //!< NVLink flow control CRC Error Counter total for all Lanes + +/* NvLink CRC Data Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 39 //!< NVLink data CRC Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 40 //!< NVLink data CRC Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 41 //!< NVLink data CRC Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 42 //!< NVLink data CRC Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 43 //!< NVLink data CRC Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 44 //!< NVLink data CRC Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL 45 //!< NvLink data CRC Error Counter total for all Lanes + +/* NvLink Replay Error Counters */ +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 46 //!< NVLink Replay Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 47 //!< NVLink Replay Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 48 //!< NVLink Replay Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 49 //!< NVLink Replay Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 50 //!< NVLink Replay Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 51 //!< NVLink Replay Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL 52 //!< NVLink Replay Error Counter total for all Lanes + +/* NvLink Recovery Error Counters */ +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 53 //!< NVLink Recovery Error Counter for Lane 0 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 54 //!< NVLink Recovery Error Counter for Lane 1 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 55 //!< NVLink Recovery Error Counter for Lane 2 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 56 //!< NVLink Recovery Error Counter for Lane 3 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 57 //!< NVLink Recovery Error Counter for Lane 4 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 58 //!< NVLink Recovery Error Counter for Lane 5 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL 59 //!< NVLink Recovery Error Counter total for all Lanes + +/* NvLink Bandwidth Counters */ +/* + * NVML_FI_DEV_NVLINK_BANDWIDTH_* field values are now deprecated. + * Please use the following field values instead: + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX + */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L0 60 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 0 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L1 61 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 1 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L2 62 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 2 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L3 63 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 3 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L4 64 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 4 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L5 65 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 5 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL 66 //!< NVLink Bandwidth Counter Total for Counter Set 0, All Lanes + +/* NvLink Bandwidth Counters */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L0 67 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 0 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L1 68 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 1 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L2 69 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 2 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L3 70 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 3 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L4 71 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 4 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L5 72 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 5 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL 73 //!< NVLink Bandwidth Counter Total for Counter Set 1, All Lanes + +/* NVML Perf Policy Counters */ +#define NVML_FI_DEV_PERF_POLICY_POWER 74 //!< Perf Policy Counter for Power Policy +#define NVML_FI_DEV_PERF_POLICY_THERMAL 75 //!< Perf Policy Counter for Thermal Policy +#define NVML_FI_DEV_PERF_POLICY_SYNC_BOOST 76 //!< Perf Policy Counter for Sync boost Policy +#define NVML_FI_DEV_PERF_POLICY_BOARD_LIMIT 77 //!< Perf Policy Counter for Board Limit +#define NVML_FI_DEV_PERF_POLICY_LOW_UTILIZATION 78 //!< Perf Policy Counter for Low GPU Utilization Policy +#define NVML_FI_DEV_PERF_POLICY_RELIABILITY 79 //!< Perf Policy Counter for Reliability Policy +#define NVML_FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS 80 //!< Perf Policy Counter for Total App Clock Policy +#define NVML_FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS 81 //!< Perf Policy Counter for Total Base Clocks Policy + +/* Memory temperatures */ +#define NVML_FI_DEV_MEMORY_TEMP 82 //!< Memory temperature for the device + +/* Energy Counter */ +#define NVML_FI_DEV_TOTAL_ENERGY_CONSUMPTION 83 //!< Total energy consumption for the GPU in mJ since the driver was last reloaded + +/* NVLink Speed */ +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L0 84 //!< NVLink Speed in MBps for Link 0 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L1 85 //!< NVLink Speed in MBps for Link 1 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L2 86 //!< NVLink Speed in MBps for Link 2 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L3 87 //!< NVLink Speed in MBps for Link 3 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L4 88 //!< NVLink Speed in MBps for Link 4 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L5 89 //!< NVLink Speed in MBps for Link 5 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_COMMON 90 //!< Common NVLink Speed in MBps for active links + +#define NVML_FI_DEV_NVLINK_LINK_COUNT 91 //!< Number of NVLinks present on the device + +#define NVML_FI_DEV_RETIRED_PENDING_SBE 92 //!< If any pages are pending retirement due to SBE. 1=yes. 0=no. +#define NVML_FI_DEV_RETIRED_PENDING_DBE 93 //!< If any pages are pending retirement due to DBE. 1=yes. 0=no. + +#define NVML_FI_DEV_PCIE_REPLAY_COUNTER 94 //!< PCIe replay counter +#define NVML_FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER 95 //!< PCIe replay rollover counter + +/* NvLink Flit Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 96 //!< NVLink flow control CRC Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 97 //!< NVLink flow control CRC Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 98 //!< NVLink flow control CRC Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 99 //!< NVLink flow control CRC Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 100 //!< NVLink flow control CRC Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 101 //!< NVLink flow control CRC Error Counter for Lane 11 + +/* NvLink CRC Data Error Counters */ +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 102 //!< NVLink data CRC Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 103 //!< NVLink data CRC Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 104 //!< NVLink data CRC Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 105 //!< NVLink data CRC Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 106 //!< NVLink data CRC Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 107 //!< NVLink data CRC Error Counter for Lane 11 + +/* NvLink Replay Error Counters */ +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 108 //!< NVLink Replay Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 109 //!< NVLink Replay Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 110 //!< NVLink Replay Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 111 //!< NVLink Replay Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 112 //!< NVLink Replay Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 113 //!< NVLink Replay Error Counter for Lane 11 + +/* NvLink Recovery Error Counters */ +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 114 //!< NVLink Recovery Error Counter for Lane 6 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 115 //!< NVLink Recovery Error Counter for Lane 7 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 116 //!< NVLink Recovery Error Counter for Lane 8 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 117 //!< NVLink Recovery Error Counter for Lane 9 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 118 //!< NVLink Recovery Error Counter for Lane 10 +#define NVML_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 119 //!< NVLink Recovery Error Counter for Lane 11 + +/* NvLink Bandwidth Counters */ +/* + * NVML_FI_DEV_NVLINK_BANDWIDTH_* field values are now deprecated. + * Please use the following field values instead: + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX + * NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX + */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L6 120 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 6 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L7 121 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 7 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L8 122 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 8 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L9 123 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 9 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L10 124 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 10 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C0_L11 125 //!< NVLink Bandwidth Counter for Counter Set 0, Lane 11 + +/* NvLink Bandwidth Counters */ +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L6 126 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 6 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L7 127 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 7 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L8 128 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 8 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L9 129 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 9 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L10 130 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 10 +#define NVML_FI_DEV_NVLINK_BANDWIDTH_C1_L11 131 //!< NVLink Bandwidth Counter for Counter Set 1, Lane 11 + +/* NVLink Speed */ +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L6 132 //!< NVLink Speed in MBps for Link 6 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L7 133 //!< NVLink Speed in MBps for Link 7 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L8 134 //!< NVLink Speed in MBps for Link 8 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L9 135 //!< NVLink Speed in MBps for Link 9 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L10 136 //!< NVLink Speed in MBps for Link 10 +#define NVML_FI_DEV_NVLINK_SPEED_MBPS_L11 137 //!< NVLink Speed in MBps for Link 11 + +/** + * NVLink throughput counters field values + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + * A scopeId of UINT_MAX returns aggregate value summed up across all links + * for the specified counter type in fieldId. + */ +#define NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_TX 138 //!< NVLink TX Data throughput in KiB +#define NVML_FI_DEV_NVLINK_THROUGHPUT_DATA_RX 139 //!< NVLink RX Data throughput in KiB +#define NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_TX 140 //!< NVLink TX Data + protocol overhead in KiB +#define NVML_FI_DEV_NVLINK_THROUGHPUT_RAW_RX 141 //!< NVLink RX Data + protocol overhead in KiB + +/* Row Remapper */ +#define NVML_FI_DEV_REMAPPED_COR 142 //!< Number of remapped rows due to correctable errors +#define NVML_FI_DEV_REMAPPED_UNC 143 //!< Number of remapped rows due to uncorrectable errors +#define NVML_FI_DEV_REMAPPED_PENDING 144 //!< If any rows are pending remapping. 1=yes 0=no +#define NVML_FI_DEV_REMAPPED_FAILURE 145 //!< If any rows failed to be remapped 1=yes 0=no + +/** + * Remote device NVLink ID + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_REMOTE_NVLINK_ID 146 //!< Remote device NVLink ID + +/** + * NVSwitch: connected NVLink count + */ +#define NVML_FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT 147 //!< Number of NVLinks connected to NVSwitch + +/* NvLink ECC Data Error Counters + * + * Lane ID needs to be specified in the scopeId field in nvmlFieldValue_t. + * + */ +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 148 //!< NVLink data ECC Error Counter for Link 0 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 149 //!< NVLink data ECC Error Counter for Link 1 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 150 //!< NVLink data ECC Error Counter for Link 2 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 151 //!< NVLink data ECC Error Counter for Link 3 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 152 //!< NVLink data ECC Error Counter for Link 4 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 153 //!< NVLink data ECC Error Counter for Link 5 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 154 //!< NVLink data ECC Error Counter for Link 6 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 155 //!< NVLink data ECC Error Counter for Link 7 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 156 //!< NVLink data ECC Error Counter for Link 8 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 157 //!< NVLink data ECC Error Counter for Link 9 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 158 //!< NVLink data ECC Error Counter for Link 10 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 159 //!< NVLink data ECC Error Counter for Link 11 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL 160 //!< NvLink data ECC Error Counter total for all Links + +#define NVML_FI_MAX 161 //!< One greater than the largest field ID defined above + +/** + * Information for a Field Value Sample + */ +typedef struct nvmlFieldValue_st +{ + unsigned int fieldId; //!< ID of the NVML field to retrieve. This must be set before any call that uses this struct. See the constants starting with NVML_FI_ above. + unsigned int scopeId; //!< Scope ID can represent data used by NVML depending on fieldId's context. For example, for NVLink throughput counter data, scopeId can represent linkId. + long long timestamp; //!< CPU Timestamp of this value in microseconds since 1970 + long long latencyUsec; //!< How long this field value took to update (in usec) within NVML. This may be averaged across several fields that are serviced by the same driver call. + nvmlValueType_t valueType; //!< Type of the value stored in value + nvmlReturn_t nvmlReturn; //!< Return code for retrieving this value. This must be checked before looking at value, as value is undefined if nvmlReturn != NVML_SUCCESS + nvmlValue_t value; //!< Value for this field. This is only valid if nvmlReturn == NVML_SUCCESS +} nvmlFieldValue_t; + + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitStructs Unit Structs + * @{ + */ +/***************************************************************************************************/ + +typedef struct +{ + struct nvmlUnit_st* handle; +} nvmlUnit_t; + +/** + * Description of HWBC entry + */ +typedef struct nvmlHwbcEntry_st +{ + unsigned int hwbcId; + char firmwareVersion[32]; +} nvmlHwbcEntry_t; + +/** + * Fan state enum. + */ +typedef enum nvmlFanState_enum +{ + NVML_FAN_NORMAL = 0, //!< Fan is working properly + NVML_FAN_FAILED = 1 //!< Fan has failed +} nvmlFanState_t; + +/** + * Led color enum. + */ +typedef enum nvmlLedColor_enum +{ + NVML_LED_COLOR_GREEN = 0, //!< GREEN, indicates good health + NVML_LED_COLOR_AMBER = 1 //!< AMBER, indicates problem +} nvmlLedColor_t; + + +/** + * LED states for an S-class unit. + */ +typedef struct nvmlLedState_st +{ + char cause[256]; //!< If amber, a text description of the cause + nvmlLedColor_t color; //!< GREEN or AMBER +} nvmlLedState_t; + +/** + * Static S-class unit info. + */ +typedef struct nvmlUnitInfo_st +{ + char name[96]; //!< Product name + char id[96]; //!< Product identifier + char serial[96]; //!< Product serial number + char firmwareVersion[96]; //!< Firmware version +} nvmlUnitInfo_t; + +/** + * Power usage information for an S-class unit. + * The power supply state is a human readable string that equals "Normal" or contains + * a combination of "Abnormal" plus one or more of the following: + * + * - High voltage + * - Fan failure + * - Heatsink temperature + * - Current limit + * - Voltage below UV alarm threshold + * - Low-voltage + * - SI2C remote off command + * - MOD_DISABLE input + * - Short pin transition +*/ +typedef struct nvmlPSUInfo_st +{ + char state[256]; //!< The power supply state + unsigned int current; //!< PSU current (A) + unsigned int voltage; //!< PSU voltage (V) + unsigned int power; //!< PSU power draw (W) +} nvmlPSUInfo_t; + +/** + * Fan speed reading for a single fan in an S-class unit. + */ +typedef struct nvmlUnitFanInfo_st +{ + unsigned int speed; //!< Fan speed (RPM) + nvmlFanState_t state; //!< Flag that indicates whether fan is working properly +} nvmlUnitFanInfo_t; + +/** + * Fan speed readings for an entire S-class unit. + */ +typedef struct nvmlUnitFanSpeeds_st +{ + nvmlUnitFanInfo_t fans[24]; //!< Fan speed data for each fan + unsigned int count; //!< Number of fans in unit +} nvmlUnitFanSpeeds_t; + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup nvmlEvents + * @{ + */ +/***************************************************************************************************/ + +/** + * Handle to an event set + */ +typedef struct +{ + struct nvmlEventSet_st* handle; +} nvmlEventSet_t; + +/** @defgroup nvmlEventType Event Types + * @{ + * Event Types which user can be notified about. + * See description of particular functions for details. + * + * See \ref nvmlDeviceRegisterEvents and \ref nvmlDeviceGetSupportedEventTypes to check which devices + * support each event. + * + * Types can be combined with bitwise or operator '|' when passed to \ref nvmlDeviceRegisterEvents + */ +//! Event about single bit ECC errors +/** + * \note A corrected texture memory error is not an ECC error, so it does not generate a single bit event + */ +#define nvmlEventTypeSingleBitEccError 0x0000000000000001LL + +//! Event about double bit ECC errors +/** + * \note An uncorrected texture memory error is not an ECC error, so it does not generate a double bit event + */ +#define nvmlEventTypeDoubleBitEccError 0x0000000000000002LL + +//! Event about PState changes +/** + * \note On Fermi architecture PState changes are also an indicator that GPU is throttling down due to + * no work being executed on the GPU, power capping or thermal capping. In a typical situation, + * Fermi-based GPU should stay in P0 for the duration of the execution of the compute process. + */ +#define nvmlEventTypePState 0x0000000000000004LL + +//! Event that Xid critical error occurred +#define nvmlEventTypeXidCriticalError 0x0000000000000008LL + +//! Event about clock changes +/** + * Kepler only + */ +#define nvmlEventTypeClock 0x0000000000000010LL + +//! Event about AC/Battery power source changes +#define nvmlEventTypePowerSourceChange 0x0000000000000080LL + +//! Event about MIG configuration changes +#define nvmlEventMigConfigChange 0x0000000000000100LL + +//! Mask with no events +#define nvmlEventTypeNone 0x0000000000000000LL + +//! Mask of all events +#define nvmlEventTypeAll (nvmlEventTypeNone \ + | nvmlEventTypeSingleBitEccError \ + | nvmlEventTypeDoubleBitEccError \ + | nvmlEventTypePState \ + | nvmlEventTypeClock \ + | nvmlEventTypeXidCriticalError \ + | nvmlEventTypePowerSourceChange \ + | nvmlEventMigConfigChange \ + ) +/** @} */ + +/** + * Information about occurred event + */ +typedef struct nvmlEventData_st +{ + nvmlDevice_t device; //!< Specific device where the event occurred + unsigned long long eventType; //!< Information about what specific event occurred + unsigned long long eventData; //!< Stores XID error for the device in the event of nvmlEventTypeXidCriticalError, + // eventData is 0 for any other event. eventData is set as 999 for unknown xid error. + unsigned int gpuInstanceId; //!< If MIG is enabled and nvmlEventTypeXidCriticalError event is attributable to a GPU + // instance, stores a valid GPU instance ID. gpuInstanceId is set to 0xFFFFFFFF + // otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled and nvmlEventTypeXidCriticalError event is attributable to a + // compute instance, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. +} nvmlEventData_t; + +/** @} */ + +/***************************************************************************************************/ +/** @addtogroup nvmlClocksThrottleReasons + * @{ + */ +/***************************************************************************************************/ + +/** Nothing is running on the GPU and the clocks are dropping to Idle state + * \note This limiter may be removed in a later release + */ +#define nvmlClocksThrottleReasonGpuIdle 0x0000000000000001LL + +/** GPU clocks are limited by current setting of applications clocks + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetApplicationsClock + */ +#define nvmlClocksThrottleReasonApplicationsClocksSetting 0x0000000000000002LL + +/** + * @deprecated Renamed to \ref nvmlClocksThrottleReasonApplicationsClocksSetting + * as the name describes the situation more accurately. + */ +#define nvmlClocksThrottleReasonUserDefinedClocks nvmlClocksThrottleReasonApplicationsClocksSetting + +/** SW Power Scaling algorithm is reducing the clocks below requested clocks + * + * @see nvmlDeviceGetPowerUsage + * @see nvmlDeviceSetPowerManagementLimit + * @see nvmlDeviceGetPowerManagementLimit + */ +#define nvmlClocksThrottleReasonSwPowerCap 0x0000000000000004LL + +/** HW Slowdown (reducing the core clocks by a factor of 2 or more) is engaged + * + * This is an indicator of: + * - temperature being too high + * - External Power Brake Assertion is triggered (e.g. by the system power supply) + * - Power draw is too high and Fast Trigger protection is reducing the clocks + * - May be also reported during PState or clock change + * - This behavior may be removed in a later release. + * + * @see nvmlDeviceGetTemperature + * @see nvmlDeviceGetTemperatureThreshold + * @see nvmlDeviceGetPowerUsage + */ +#define nvmlClocksThrottleReasonHwSlowdown 0x0000000000000008LL + +/** Sync Boost + * + * This GPU has been added to a Sync boost group with nvidia-smi or DCGM in + * order to maximize performance per watt. All GPUs in the sync boost group + * will boost to the minimum possible clocks across the entire group. Look at + * the throttle reasons for other GPUs in the system to see why those GPUs are + * holding this one at lower clocks. + * + */ +#define nvmlClocksThrottleReasonSyncBoost 0x0000000000000010LL + +/** SW Thermal Slowdown + * + * This is an indicator of one or more of the following: + * - Current GPU temperature above the GPU Max Operating Temperature + * - Current memory temperature above the Memory Max Operating Temperature + * + */ +#define nvmlClocksThrottleReasonSwThermalSlowdown 0x0000000000000020LL + +/** HW Thermal Slowdown (reducing the core clocks by a factor of 2 or more) is engaged + * + * This is an indicator of: + * - temperature being too high + * + * @see nvmlDeviceGetTemperature + * @see nvmlDeviceGetTemperatureThreshold + * @see nvmlDeviceGetPowerUsage + */ +#define nvmlClocksThrottleReasonHwThermalSlowdown 0x0000000000000040LL + +/** HW Power Brake Slowdown (reducing the core clocks by a factor of 2 or more) is engaged + * + * This is an indicator of: + * - External Power Brake Assertion being triggered (e.g. by the system power supply) + * + * @see nvmlDeviceGetTemperature + * @see nvmlDeviceGetTemperatureThreshold + * @see nvmlDeviceGetPowerUsage + */ +#define nvmlClocksThrottleReasonHwPowerBrakeSlowdown 0x0000000000000080LL + +/** GPU clocks are limited by current setting of Display clocks + * + * @see bug 1997531 + */ +#define nvmlClocksThrottleReasonDisplayClockSetting 0x0000000000000100LL + +/** Bit mask representing no clocks throttling + * + * Clocks are as high as possible. + * */ +#define nvmlClocksThrottleReasonNone 0x0000000000000000LL + +/** Bit mask representing all supported clocks throttling reasons + * New reasons might be added to this list in the future + */ +#define nvmlClocksThrottleReasonAll (nvmlClocksThrottleReasonNone \ + | nvmlClocksThrottleReasonGpuIdle \ + | nvmlClocksThrottleReasonApplicationsClocksSetting \ + | nvmlClocksThrottleReasonSwPowerCap \ + | nvmlClocksThrottleReasonHwSlowdown \ + | nvmlClocksThrottleReasonSyncBoost \ + | nvmlClocksThrottleReasonSwThermalSlowdown \ + | nvmlClocksThrottleReasonHwThermalSlowdown \ + | nvmlClocksThrottleReasonHwPowerBrakeSlowdown \ + | nvmlClocksThrottleReasonDisplayClockSetting \ +) +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlAccountingStats Accounting Statistics + * @{ + * + * Set of APIs designed to provide per process information about usage of GPU. + * + * @note All accounting statistics and accounting mode live in nvidia driver and reset + * to default (Disabled) when driver unloads. + * It is advised to run with persistence mode enabled. + * + * @note Enabling accounting mode has no negative impact on the GPU performance. + */ +/***************************************************************************************************/ + +/** + * Describes accounting statistics of a process. + */ +typedef struct nvmlAccountingStats_st { + unsigned int gpuUtilization; //!< Percent of time over the process's lifetime during which one or more kernels was executing on the GPU. + //! Utilization stats just like returned by \ref nvmlDeviceGetUtilizationRates but for the life time of a + //! process (not just the last sample period). + //! Set to NVML_VALUE_NOT_AVAILABLE if nvmlDeviceGetUtilizationRates is not supported + + unsigned int memoryUtilization; //!< Percent of time over the process's lifetime during which global (device) memory was being read or written. + //! Set to NVML_VALUE_NOT_AVAILABLE if nvmlDeviceGetUtilizationRates is not supported + + unsigned long long maxMemoryUsage; //!< Maximum total memory in bytes that was ever allocated by the process. + //! Set to NVML_VALUE_NOT_AVAILABLE if nvmlProcessInfo_t->usedGpuMemory is not supported + + + unsigned long long time; //!< Amount of time in ms during which the compute context was active. The time is reported as 0 if + //!< the process is not terminated + + unsigned long long startTime; //!< CPU Timestamp in usec representing start time for the process + + unsigned int isRunning; //!< Flag to represent if the process is running (1 for running, 0 for terminated) + + unsigned int reserved[5]; //!< Reserved for future use +} nvmlAccountingStats_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlEncoderStructs Encoder Structs + * @{ + */ +/***************************************************************************************************/ + +/** + * Represents type of encoder for capacity can be queried + */ +typedef enum nvmlEncoderQueryType_enum +{ + NVML_ENCODER_QUERY_H264 = 0, //!< H264 encoder + NVML_ENCODER_QUERY_HEVC = 1 //!< HEVC encoder +}nvmlEncoderType_t; + +/** + * Structure to hold encoder session data + */ +typedef struct nvmlEncoderSessionInfo_st +{ + unsigned int sessionId; //!< Unique session ID + unsigned int pid; //!< Owning process ID + nvmlVgpuInstance_t vgpuInstance; //!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero) + nvmlEncoderType_t codecType; //!< Video encoder type + unsigned int hResolution; //!< Current encode horizontal resolution + unsigned int vResolution; //!< Current encode vertical resolution + unsigned int averageFps; //!< Moving average encode frames per second + unsigned int averageLatency; //!< Moving average encode latency in microseconds +}nvmlEncoderSessionInfo_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFBCStructs Frame Buffer Capture Structures +* @{ +*/ +/***************************************************************************************************/ + +/** + * Represents frame buffer capture session type + */ +typedef enum nvmlFBCSessionType_enum +{ + NVML_FBC_SESSION_TYPE_UNKNOWN = 0, //!< Unknwon + NVML_FBC_SESSION_TYPE_TOSYS, //!< ToSys + NVML_FBC_SESSION_TYPE_CUDA, //!< Cuda + NVML_FBC_SESSION_TYPE_VID, //!< Vid + NVML_FBC_SESSION_TYPE_HWENC //!< HEnc +} nvmlFBCSessionType_t; + +/** + * Structure to hold frame buffer capture sessions stats + */ +typedef struct nvmlFBCStats_st +{ + unsigned int sessionsCount; //!< Total no of sessions + unsigned int averageFPS; //!< Moving average new frames captured per second + unsigned int averageLatency; //!< Moving average new frame capture latency in microseconds +} nvmlFBCStats_t; + +#define NVML_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED 0x00000001 //!< Bit specifying differential map state. +#define NVML_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED 0x00000002 //!< Bit specifying classification map state. +#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT 0x00000004 //!< Bit specifying if capture was requested as non-blocking call. +#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE 0x00000008 //!< Bit specifying if capture was requested as blocking call. +#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT 0x00000010 //!< Bit specifying if capture was requested as blocking call with timeout period. + +/** + * Structure to hold FBC session data + */ +typedef struct nvmlFBCSessionInfo_st +{ + unsigned int sessionId; //!< Unique session ID + unsigned int pid; //!< Owning process ID + nvmlVgpuInstance_t vgpuInstance; //!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero) + unsigned int displayOrdinal; //!< Display identifier + nvmlFBCSessionType_t sessionType; //!< Type of frame buffer capture session + unsigned int sessionFlags; //!< Session flags (one or more of NVML_NVFBC_SESSION_FLAG_XXX). + unsigned int hMaxResolution; //!< Max horizontal resolution supported by the capture session + unsigned int vMaxResolution; //!< Max vertical resolution supported by the capture session + unsigned int hResolution; //!< Horizontal resolution requested by caller in capture call + unsigned int vResolution; //!< Vertical resolution requested by caller in capture call + unsigned int averageFPS; //!< Moving average new frames captured per second + unsigned int averageLatency; //!< Moving average new frame capture latency in microseconds +} nvmlFBCSessionInfo_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDrainDefs definitions related to the drain state + * @{ + */ +/***************************************************************************************************/ + +/** + * Is the GPU device to be removed from the kernel by nvmlDeviceRemoveGpu() + */ +typedef enum nvmlDetachGpuState_enum +{ + NVML_DETACH_GPU_KEEP = 0, + NVML_DETACH_GPU_REMOVE +} nvmlDetachGpuState_t; + +/** + * Parent bridge PCIe link state requested by nvmlDeviceRemoveGpu() + */ +typedef enum nvmlPcieLinkState_enum +{ + NVML_PCIE_LINK_KEEP = 0, + NVML_PCIE_LINK_SHUT_DOWN +} nvmlPcieLinkState_t; + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlInitializationAndCleanup Initialization and Cleanup + * This chapter describes the methods that handle NVML initialization and cleanup. + * It is the user's responsibility to call \ref nvmlInit_v2() before calling any other methods, and + * nvmlShutdown() once NVML is no longer being used. + * @{ + */ +/***************************************************************************************************/ + +#define NVML_INIT_FLAG_NO_GPUS 1 //!< Don't fail nvmlInit() when no GPUs are found +#define NVML_INIT_FLAG_NO_ATTACH 2 //!< Don't attach GPUs + +/** + * Initialize NVML, but don't initialize any GPUs yet. + * + * \note nvmlInit_v3 introduces a "flags" argument, that allows passing boolean values + * modifying the behaviour of nvmlInit(). + * \note In NVML 5.319 new nvmlInit_v2 has replaced nvmlInit"_v1" (default in NVML 4.304 and older) that + * did initialize all GPU devices in the system. + * + * This allows NVML to communicate with a GPU + * when other GPUs in the system are unstable or in a bad state. When using this API, GPUs are + * discovered and initialized in nvmlDeviceGetHandleBy* functions instead. + * + * \note To contrast nvmlInit_v2 with nvmlInit"_v1", NVML 4.304 nvmlInit"_v1" will fail when any detected GPU is in + * a bad or unstable state. + * + * For all products. + * + * This method, should be called once before invoking any other methods in the library. + * A reference count of the number of initializations is maintained. Shutdown only occurs + * when the reference count reaches zero. + * + * @return + * - \ref NVML_SUCCESS if NVML has been properly initialized + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running + * - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlInit_v2(void); + +/** + * nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values + * modifying the behaviour of nvmlInit(). + * Other than the "flags" parameter it is completely similar to \ref nvmlInit_v2. + * + * For all products. + * + * @param flags behaviour modifier flags + * + * @return + * - \ref NVML_SUCCESS if NVML has been properly initialized + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running + * - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlInitWithFlags(unsigned int flags); + +/** + * Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit_v2(). + * + * For all products. + * + * This method should be called after NVML work is done, once for each call to \ref nvmlInit_v2() + * A reference count of the number of initializations is maintained. Shutdown only occurs + * when the reference count reaches zero. For backwards compatibility, no error is reported if + * nvmlShutdown() is called more times than nvmlInit(). + * + * @return + * - \ref NVML_SUCCESS if NVML has been properly shut down + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlShutdown(void); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlErrorReporting Error reporting + * This chapter describes helper functions for error reporting routines. + * @{ + */ +/***************************************************************************************************/ + +/** + * Helper method for converting NVML error codes into readable strings. + * + * For all products. + * + * @param result NVML error code to convert + * + * @return String representation of the error. + * + */ +const DECLDIR char* nvmlErrorString(nvmlReturn_t result); +/** @} */ + + +/***************************************************************************************************/ +/** @defgroup nvmlConstants Constants + * @{ + */ +/***************************************************************************************************/ + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetInforomVersion and \ref nvmlDeviceGetInforomImageVersion + */ +#define NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE 16 + +/** + * Buffer size guaranteed to be large enough for storing GPU identifiers. + */ +#define NVML_DEVICE_UUID_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetUUID + */ +#define NVML_DEVICE_UUID_V2_BUFFER_SIZE 96 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetBoardPartNumber + */ +#define NVML_DEVICE_PART_NUMBER_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlSystemGetDriverVersion + */ +#define NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlSystemGetNVMLVersion + */ +#define NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE 80 + +/** + * Buffer size guaranteed to be large enough for storing GPU device names. + */ +#define NVML_DEVICE_NAME_BUFFER_SIZE 64 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetName + */ +#define NVML_DEVICE_NAME_V2_BUFFER_SIZE 96 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetSerial + */ +#define NVML_DEVICE_SERIAL_BUFFER_SIZE 30 + +/** + * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetVbiosVersion + */ +#define NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE 32 + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlSystemQueries System Queries + * This chapter describes the queries that NVML can perform against the local system. These queries + * are not device-specific. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves the version of the system's graphics driver. + * + * For all products. + * + * The version identifier is an alphanumeric string. It will not exceed 80 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. + * + * @param version Reference in which to return the version identifier + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetDriverVersion(char *version, unsigned int length); + +/** + * Retrieves the version of the NVML library. + * + * For all products. + * + * The version identifier is an alphanumeric string. It will not exceed 80 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE. + * + * @param version Reference in which to return the version identifier + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetNVMLVersion(char *version, unsigned int length); + +/** + * Retrieves the version of the CUDA driver. + * + * For all products. + * + * The CUDA driver version returned will be retreived from the currently installed version of CUDA. + * If the cuda library is not found, this function will return a known supported version number. + * + * @param cudaDriverVersion Reference in which to return the version identifier + * + * @return + * - \ref NVML_SUCCESS if \a cudaDriverVersion has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL + */ +nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion(int *cudaDriverVersion); + +/** + * Retrieves the version of the CUDA driver from the shared library. + * + * For all products. + * + * The returned CUDA driver version by calling cuDriverGetVersion() + * + * @param cudaDriverVersion Reference in which to return the version identifier + * + * @return + * - \ref NVML_SUCCESS if \a cudaDriverVersion has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL + * - \ref NVML_ERROR_LIBRARY_NOT_FOUND if \a libcuda.so.1 or libcuda.dll is not found + * - \ref NVML_ERROR_FUNCTION_NOT_FOUND if \a cuDriverGetVersion() is not found in the shared library + */ +nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion_v2(int *cudaDriverVersion); + +/** + * Macros for converting the CUDA driver version number to Major and Minor version numbers. + */ +#define NVML_CUDA_DRIVER_VERSION_MAJOR(v) ((v)/1000) +#define NVML_CUDA_DRIVER_VERSION_MINOR(v) (((v)%1000)/10) + +/** + * Gets name of the process with provided process id + * + * For all products. + * + * Returned process name is cropped to provided length. + * name string is encoded in ANSI. + * + * @param pid The identifier of the process + * @param name Reference in which to return the process name + * @param length The maximum allowed length of the string returned in \a name + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0. + * - \ref NVML_ERROR_NOT_FOUND if process doesn't exists + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlSystemGetProcessName(unsigned int pid, char *name, unsigned int length); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitQueries Unit Queries + * This chapter describes that queries that NVML can perform against each unit. For S-class systems only. + * In each case the device is identified with an nvmlUnit_t handle. This handle is obtained by + * calling \ref nvmlUnitGetHandleByIndex(). + * @{ + */ +/***************************************************************************************************/ + + /** + * Retrieves the number of units in the system. + * + * For S-class products. + * + * @param unitCount Reference in which to return the number of units + * + * @return + * - \ref NVML_SUCCESS if \a unitCount has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetCount(unsigned int *unitCount); + +/** + * Acquire the handle for a particular unit, based on its index. + * + * For S-class products. + * + * Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount(). + * For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1. + * + * The order in which NVML enumerates units has no guarantees of consistency between reboots. + * + * @param index The index of the target unit, >= 0 and < \a unitCount + * @param unit Reference in which to return the unit handle + * + * @return + * - \ref NVML_SUCCESS if \a unit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t *unit); + +/** + * Retrieves the static information associated with a unit. + * + * For S-class products. + * + * See \ref nvmlUnitInfo_t for details on available unit info. + * + * @param unit The identifier of the target unit + * @param info Reference in which to return the unit information + * + * @return + * - \ref NVML_SUCCESS if \a info has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL + */ +nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t *info); + +/** + * Retrieves the LED state associated with this unit. + * + * For S-class products. + * + * See \ref nvmlLedState_t for details on allowed states. + * + * @param unit The identifier of the target unit + * @param state Reference in which to return the current LED state + * + * @return + * - \ref NVML_SUCCESS if \a state has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlUnitSetLedState() + */ +nvmlReturn_t DECLDIR nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t *state); + +/** + * Retrieves the PSU stats for the unit. + * + * For S-class products. + * + * See \ref nvmlPSUInfo_t for details on available PSU info. + * + * @param unit The identifier of the target unit + * @param psu Reference in which to return the PSU information + * + * @return + * - \ref NVML_SUCCESS if \a psu has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t *psu); + +/** + * Retrieves the temperature readings for the unit, in degrees C. + * + * For S-class products. + * + * Depending on the product, readings may be available for intake (type=0), + * exhaust (type=1) and board (type=2). + * + * @param unit The identifier of the target unit + * @param type The type of reading to take + * @param temp Reference in which to return the intake temperature + * + * @return + * - \ref NVML_SUCCESS if \a temp has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int *temp); + +/** + * Retrieves the fan speed readings for the unit. + * + * For S-class products. + * + * See \ref nvmlUnitFanSpeeds_t for details on available fan speed info. + * + * @param unit The identifier of the target unit + * @param fanSpeeds Reference in which to return the fan speed information + * + * @return + * - \ref NVML_SUCCESS if \a fanSpeeds has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t *fanSpeeds); + +/** + * Retrieves the set of GPU devices that are attached to the specified unit. + * + * For S-class products. + * + * The \a deviceCount argument is expected to be set to the size of the input \a devices array. + * + * @param unit The identifier of the target unit + * @param deviceCount Reference in which to provide the \a devices array size, and + * to return the number of attached GPU devices + * @param devices Reference in which to return the references to the attached GPU devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); + +/** + * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. + * + * For S-class products. + * + * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. + * The HIC must be connected to an S-class system for it to be reported by this function. + * + * @param hwbcCount Size of hwbcEntries array + * @param hwbcEntries Array holding information about hwbc + * + * @return + * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small + */ +nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceQueries Device Queries + * This chapter describes that queries that NVML can perform against each device. + * In each case the device is identified with an nvmlDevice_t handle. This handle is obtained by + * calling one of \ref nvmlDeviceGetHandleByIndex_v2(), \ref nvmlDeviceGetHandleBySerial(), + * \ref nvmlDeviceGetHandleByPciBusId_v2(). or \ref nvmlDeviceGetHandleByUUID(). + * @{ + */ +/***************************************************************************************************/ + + /** + * Retrieves the number of compute devices in the system. A compute device is a single GPU. + * + * For all products. + * + * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system + * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. + * Update your code to handle this error, or use NVML 4.304 or older nvml header file. + * For backward binary compatibility reasons _v1 version of the API is still present in the shared + * library. + * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. + * + * @param deviceCount Reference in which to return the number of accessible devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCount_v2(unsigned int *deviceCount); + +/** + * Get attributes (engine counts etc.) for the given NVML device handle. + * + * @note This API currently only supports MIG device handles. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device NVML device handle + * @param attributes Device attributes + * + * @return + * - \ref NVML_SUCCESS if \a device attributes were successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is invalid + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAttributes_v2(nvmlDevice_t device, nvmlDeviceAttributes_t *attributes); + +/** + * Acquire the handle for a particular device, based on its index. + * + * For all products. + * + * Valid indices are derived from the \a accessibleDevices count returned by + * \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it + * is recommended that devices be looked up by their PCI ids or UUID. See + * \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId_v2(). + * + * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs if: + * - The target GPU is an SLI slave + * + * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system + * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. + * Update your code to handle this error, or use NVML 4.304 or older nvml header file. + * For backward binary compatibility reasons _v1 version of the API is still present in the shared + * library. + * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. + * + * This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index. + * If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't + * need to worry about that. + * + * @param index The index of the target GPU, >= 0 and < \a accessibleDevices + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetIndex + * @see nvmlDeviceGetCount + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex_v2(unsigned int index, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its board serial number. + * + * For Fermi &tm; or newer fully supported devices. + * + * This number corresponds to the value printed directly on the board, and to the value returned by + * \ref nvmlDeviceGetSerial(). + * + * @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor + * of \ref nvmlDeviceGetHandleByUUID. + * For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT. + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @param serial The board serial number of the target GPU + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one + * device has the same serial (dual GPU boards) + * - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetSerial + * @see nvmlDeviceGetHandleByUUID + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. + * + * For all products. + * + * @param uuid The UUID of the target GPU or MIG instance + * @param device Reference in which to return the device handle or MIG device handle + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs as it searches for the target GPU + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null + * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetUUID + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); + +/** + * Acquire the handle for a particular device, based on its PCI bus id. + * + * For all products. + * + * This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo_v3(). + * + * Starting from NVML 5, this API causes NVML to initialize the target GPU + * NVML may initialize additional GPUs if: + * - The target GPU is an SLI slave + * + * \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND + * instead of NVML_ERROR_NO_PERMISSION. + * + * @param pciBusId The PCI bus id of the target GPU + * @param device Reference in which to return the device handle + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system + * - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device + * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId_v2(const char *pciBusId, nvmlDevice_t *device); + +/** + * Retrieves the name of this device. + * + * For all products. + * + * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not + * exceed 96 characters in length (including the NULL terminator). See \ref + * nvmlConstants::NVML_DEVICE_NAME_V2_BUFFER_SIZE. + * + * When used with MIG device handles the API returns MIG device names which can be used to identify devices + * based on their attributes. + * + * @param device The identifier of the target device + * @param name Reference in which to return the product name + * @param length The maximum allowed length of the string returned in \a name + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length); + +/** + * Retrieves the brand of this device. + * + * For all products. + * + * The type is a member of \ref nvmlBrandType_t defined above. + * + * @param device The identifier of the target device + * @param type Reference in which to return the product brand type + * + * @return + * - \ref NVML_SUCCESS if \a name has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t *type); + +/** + * Retrieves the NVML index of this device. + * + * For all products. + * + * Valid indices are derived from the \a accessibleDevices count returned by + * \ref nvmlDeviceGetCount_v2(). For example, if \a accessibleDevices is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it + * is recommended that devices be looked up by their PCI ids or GPU UUID. See + * \ref nvmlDeviceGetHandleByPciBusId_v2() and \ref nvmlDeviceGetHandleByUUID(). + * + * When used with MIG device handles this API returns indices that can be + * passed to \ref nvmlDeviceGetMigDeviceHandleByIndex to retrieve an identical handle. + * MIG device indices are unique within a device. + * + * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. + * + * @param device The identifier of the target device + * @param index Reference in which to return the NVML index of the device + * + * @return + * - \ref NVML_SUCCESS if \a index has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetHandleByIndex() + * @see nvmlDeviceGetCount() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index); + +/** + * Retrieves the globally unique board serial number associated with this device's board. + * + * For all products with an inforom. + * + * The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator). + * This number matches the serial number tag that is physically attached to the board. See \ref + * nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param serial Reference in which to return the board/module serial number + * @param length The maximum allowed length of the string returned in \a serial + * + * @return + * - \ref NVML_SUCCESS if \a serial has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); + + +/***************************************************************************************************/ + +/** @defgroup nvmlAffinity CPU and Memory Affinity + * This chapter describes NVML operations that are associated with CPU and memory + * affinity. + * @{ + */ +/***************************************************************************************************/ + +//! Scope of NUMA node for affinity queries +#define NVML_AFFINITY_SCOPE_NODE 0 +//! Scope of processor socket for affinity queries +#define NVML_AFFINITY_SCOPE_SOCKET 1 + +typedef unsigned int nvmlAffinityScope_t; + +/** + * Retrieves an array of unsigned ints (sized to nodeSetSize) of bitmasks with + * the ideal memory affinity within node or socket for the device. + * For example, if NUMA node 0, 1 are ideal within the socket for the device and nodeSetSize == 1, + * result[0] = 0x3 + * + * \note If requested scope is not applicable to the target topology, the API + * will fall back to reporting the memory affinity for the immediate non-I/O + * ancestor of the device. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param nodeSetSize The size of the nodeSet array that is safe to access + * @param nodeSet Array reference in which to return a bitmask of NODEs, 64 NODEs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * @param scope Scope that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if \a NUMA node Affinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, nodeSetSize == 0, nodeSet is NULL or scope is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ + +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryAffinity(nvmlDevice_t device, unsigned int nodeSetSize, unsigned long *nodeSet, nvmlAffinityScope_t scope); + +/** + * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the + * ideal CPU affinity within node or socket for the device. + * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, + * result[0] = 0x3, result[1] = 0x3 + * + * \note If requested scope is not applicable to the target topology, the API + * will fall back to reporting the CPU affinity for the immediate non-I/O + * ancestor of the device. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param cpuSetSize The size of the cpuSet array that is safe to access + * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * @param scope Scope that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if \a cpuAffinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, cpuSet is NULL or sope is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ + +nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet, nvmlAffinityScope_t scope); + +/** + * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device + * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, + * result[0] = 0x3, result[1] = 0x3 + * This is equivalent to calling \ref nvmlDeviceGetCpuAffinityWithinScope with \ref NVML_AFFINITY_SCOPE_NODE. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param cpuSetSize The size of the cpuSet array that is safe to access + * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per + * unsigned long on 64-bit machines, 32 on 32-bit machines + * + * @return + * - \ref NVML_SUCCESS if \a cpuAffinity has been filled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet); + +/** + * Sets the ideal affinity for the calling thread and device using the guidelines + * given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0. + * Older versions set the affinity for a calling process and all children. + * Currently supports up to 1024 processors. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if the calling process has been successfully bound + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); + +/** + * Clear all affinity bindings for the calling thread. Note, this is a change as of version + * 8.0 as older versions cleared the affinity for a calling process and all children. + * + * For Kepler &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if the calling process has been successfully unbound + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); + +/** + * Retrieve the common ancestor for two devices + * For all products. + * Supported on Linux only. + * + * @param device1 The identifier of the first device + * @param device2 The identifier of the second device + * @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type + * + * @return + * - \ref NVML_SUCCESS if \a pathInfo has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ + +/** @} */ +nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t *pathInfo); + +/** + * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level + * For all products. + * Supported on Linux only. + * + * @param device The identifier of the first device + * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found at \a level + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); + +/** + * Retrieve the set of GPUs that have a CPU affinity with the given CPU number + * For all products. + * Supported on Linux only. + * + * @param cpuNumber The CPU number + * @param count When zero, is set to the number of matching GPUs such that \a deviceArray + * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count + * number of device handles. + * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber + * + * @return + * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature + * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery + */ +nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); + +/** + * Retrieve the status for a given p2p capability index between a given pair of GPU + * + * @param device1 The first device + * @param device2 The second device + * @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2 + * @param p2pStatus Reference in which to return the status of the \a p2pIndex + * between \a device1 and \a device2 + * @return + * - \ref NVML_SUCCESS if \a p2pStatus has been populated + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex,nvmlGpuP2PStatus_t *p2pStatus); + +/** + * Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, + * that augments the immutable, board serial identifier. + * + * For all products. + * + * The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products. + * It does NOT correspond to any identifier printed on the board. It will not exceed 96 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_V2_BUFFER_SIZE. + * + * When used with MIG device handles the API returns globally unique UUIDs which can be used to identify MIG + * devices across both GPU and MIG devices. UUIDs are immutable for the lifetime of a MIG device. + * + * @param device The identifier of the target device + * @param uuid Reference in which to return the GPU UUID + * @param length The maximum allowed length of the string returned in \a uuid + * + * @return + * - \ref NVML_SUCCESS if \a uuid has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); + +/** + * Retrieve the MDEV UUID of a vGPU instance. + * + * The MDEV UUID is a globally unique identifier of the mdev device assigned to the VM, and is returned as a 5-part hexadecimal string, + * not exceeding 80 characters in length (including the NULL terminator). + * MDEV UUID is displayed only on KVM platform. + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param mdevUuid Pointer to caller-supplied buffer to hold MDEV UUID + * @param size Size of buffer in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED on any hypervisor other than KVM + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mdevUuid is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char *mdevUuid, unsigned int size); + +/** + * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for + * each GPU will have the form /dev/nvidia[minor number]. + * + * For all products. + * Supported only for Linux + * + * @param device The identifier of the target device + * @param minorNumber Reference in which to return the minor number for the device + * @return + * - \ref NVML_SUCCESS if the minor number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber); + +/** + * Retrieves the the device board part number which is programmed into the board's InfoROM + * + * For all products. + * + * @param device Identifier of the target device + * @param partNumber Reference to the buffer to return + * @param length Length of the buffer reference + * + * @return + * - \ref NVML_SUCCESS if \a partNumber has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length); + +/** + * Retrieves the version information for the device's infoROM object. + * + * For all products with an inforom. + * + * Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate + * ECC counts. The version of the data structures in this memory may change from time to time. It will not + * exceed 16 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. + * + * See \ref nvmlInforomObject_t for details on the available infoROM objects. + * + * @param device The identifier of the target device + * @param object The target infoROM object + * @param version Reference in which to return the infoROM version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomImageVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char *version, unsigned int length); + +/** + * Retrieves the global infoROM image version + * + * For all products with an inforom. + * + * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board + * in contrast to infoROM object version which is only an indicator of supported features. + * Version string will not exceed 16 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param version Reference in which to return the infoROM image version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetInforomVersion + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char *version, unsigned int length); + +/** + * Retrieves the checksum of the configuration stored in the device's infoROM. + * + * For all products with an inforom. + * + * Can be used to make sure that two GPUs have the exact same configuration. + * Current checksum takes into account configuration stored in PWR and ECC infoROM objects. + * Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC) + * + * @param device The identifier of the target device + * @param checksum Reference in which to return the infoROM configuration checksum + * + * @return + * - \ref NVML_SUCCESS if \a checksum has been set + * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int *checksum); + +/** + * Reads the infoROM from the flash and verifies the checksums. + * + * For all products with an inforom. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if infoROM is not corrupted + * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); + +/** + * Retrieves the display mode for the device. + * + * For all products. + * + * This method indicates whether a physical display (e.g. monitor) is currently connected to + * any of the device's connectors. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param display Reference in which to return the display mode + * + * @return + * - \ref NVML_SUCCESS if \a display has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t *display); + +/** + * Retrieves the display active state for the device. + * + * For all products. + * + * This method indicates whether a display is initialized on the device. + * For example whether X Server is attached to this device and has allocated memory for the screen. + * + * Display can be active even when no monitor is physically attached. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param isActive Reference in which to return the display active state + * + * @return + * - \ref NVML_SUCCESS if \a isActive has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t *isActive); + +/** + * Retrieves the persistence mode associated with this device. + * + * For all products. + * For Linux only. + * + * When driver persistence mode is enabled the driver software state is not torn down when the last + * client disconnects. By default this feature is disabled. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current driver persistence mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetPersistenceMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Retrieves the PCI attributes of this device. + * + * For all products. + * + * See \ref nvmlPciInfo_t for details on the available PCI info. + * + * @param device The identifier of the target device + * @param pci Reference in which to return the PCI info + * + * @return + * - \ref NVML_SUCCESS if \a pci has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo_v3(nvmlDevice_t device, nvmlPciInfo_t *pci); + +/** + * Retrieves the maximum PCIe link generation possible with this device and system + * + * I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will + * report is generation 1. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkGen Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkGen has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); + +/** + * Retrieves the maximum PCIe link width possible with this device and system + * + * I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report + * a max link width of 8. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkWidth Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkWidth has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth); + +/** + * Retrieves the current PCIe link generation + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param currLinkGen Reference in which to return the current PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a currLinkGen has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen); + +/** + * Retrieves the current PCIe link width + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param currLinkWidth Reference in which to return the current PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a currLinkWidth has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth); + +/** + * Retrieve PCIe utilization information. + * This function is querying a byte counter over a 20ms interval and thus is the + * PCIe throughput over that interval. + * + * For Maxwell &tm; or newer fully supported devices. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t + * @param value Reference in which to return throughput in KB/s + * + * @return + * - \ref NVML_SUCCESS if \a value has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int *value); + +/** + * Retrieve the PCIe replay counter. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param value Reference in which to return the counter's value + * + * @return + * - \ref NVML_SUCCESS if \a value has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int *value); + +/** + * Retrieves the current clock speeds for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlClockType_t for details on available clock information. + * + * @param device The identifier of the target device + * @param type Identify which clock domain to query + * @param clock Reference in which to return the clock speed in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clock has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); + +/** + * Retrieves the maximum clock speeds for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlClockType_t for details on available clock information. + * + * \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks + * by few MHz. + * + * @param device The identifier of the target device + * @param type Identify which clock domain to query + * @param clock Reference in which to return the clock speed in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clock has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); + +/** + * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. + * Can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the default applications clock that GPU boots with or + * defaults to after \ref nvmlDeviceResetApplicationsClocks call. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the default clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * \see nvmlDeviceGetApplicationsClock + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Resets the application clock to the default value + * + * This is the applications clock that will be used after system reboot or driver reload. + * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, + * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above + * base clocks as thermal limits allow. + * + * @see nvmlDeviceGetApplicationsClock + * @see nvmlDeviceSetApplicationsClocks + * + * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); + +/** + * Retrieves the clock speed for the clock specified by the clock type and clock ID. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockId Identify which clock in the domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int *clockMHz); + +/** + * Retrieves the customer defined maximum boost clock speed specified by the given clock type. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param clockType Identify which clock domain to query + * @param clockMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a clockMHz has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); + +/** + * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param count Reference in which to provide the \a clocksMHz array size, and + * to return the number of elements + * @param clocksMHz Reference in which to return the clock in MHz + * + * @return + * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of + * required elements) + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedGraphicsClocks + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int *count, unsigned int *clocksMHz); + +/** + * Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param memoryClockMHz Memory clock for which to return possible graphics clocks + * @param count Reference in which to provide the \a clocksMHz array size, and + * to return the number of elements + * @param clocksMHz Reference in which to return the clocks in MHz + * + * @return + * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetApplicationsClocks + * @see nvmlDeviceGetSupportedMemoryClocks + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); + +/** + * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. + * + * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device + * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will + * revert to when no applications are using the GPU + * + * @return + * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); + +/** + * Try to set the current state of Auto Boosted clocks on a device. + * + * For Kepler &tm; or newer fully supported devices. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * Non-root users may use this API by default but can be restricted by root from using this API by calling + * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. + * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set Auto Boosted clocks of the target device to + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); + +/** + * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will + * return to when no compute running processes (e.g. CUDA application which have an active context) are running + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates + * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock + * rates are desired. + * + * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. + * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost + * behavior. + * + * @param device The identifier of the target device + * @param enabled What state to try to set default Auto Boosted clocks of the target device to + * @param flags Flags that change the default behavior. Currently Unused. + * + * @return + * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); + + +/** + * Retrieves the intended operating speed of the device's fan. + * + * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the + * output will not match the actual fan speed. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed. + * This value may exceed 100% in certain cases. + * + * @param device The identifier of the target device + * @param speed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed); + + +/** + * Retrieves the intended operating speed of the device's specified fan. + * + * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the + * output will not match the actual fan speed. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed. + * This value may exceed 100% in certain cases. + * + * @param device The identifier of the target device + * @param fan The index of the target fan, zero indexed. + * @param speed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int * speed); + +/** + * Retrieves the number of fans on the device. + * + * For all discrete products with dedicated fans. + * + * @param device The identifier of the target device + * @param numFans The number of fans + * + * @return + * - \ref NVML_SUCCESS if \a fan number query was successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int *numFans); + +/** + * Retrieves the current temperature readings for the device, in degrees C. + * + * For all products. + * + * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. + * + * @param device The identifier of the target device + * @param sensorType Flag that indicates which sensor reading to retrieve + * @param temp Reference in which to return the temperature reading + * + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); + +/** + * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value queried + * @param temp Reference in which to return the temperature reading + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); + +/** + * Sets the temperature threshold for the GPU with the specified threshold type in degrees C. + * + * For Maxwell &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value to be set + * @param temp Reference which hold the value to be set + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int *temp); + +/** + * Retrieves the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * Retrieves current clocks throttling reasons. + * + * For all fully supported products. + * + * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. + * + * @param device The identifier of the target device + * @param clocksThrottleReasons Reference in which to return bitmask of active clocks throttle + * reasons + * + * @return + * - \ref NVML_SUCCESS if \a clocksThrottleReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksThrottleReasons is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksThrottleReasons + * @see nvmlDeviceGetSupportedClocksThrottleReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); + +/** + * Retrieves bitmask of supported clocks throttle reasons that can be returned by + * \ref nvmlDeviceGetCurrentClocksThrottleReasons + * + * For all fully supported products. + * + * This method is not supported in virtual machines running virtual GPU (vGPU). + * + * @param device The identifier of the target device + * @param supportedClocksThrottleReasons Reference in which to return bitmask of supported + * clocks throttle reasons + * + * @return + * - \ref NVML_SUCCESS if \a supportedClocksThrottleReasons has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksThrottleReasons is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlClocksThrottleReasons + * @see nvmlDeviceGetCurrentClocksThrottleReasons + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); + +/** + * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. + * + * Retrieve the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlPstates_t for details on allowed performance states. + * + * @param device The identifier of the target device + * @param pState Reference in which to return the performance state reading + * + * @return + * - \ref NVML_SUCCESS if \a pState has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); + +/** + * This API has been deprecated. + * + * Retrieves the power management mode associated with this device. + * + * For products from the Fermi family. + * - Requires \a NVML_INFOROM_POWER version 3.0 or higher. + * + * For from the Kepler or newer families. + * - Does not require \a NVML_INFOROM_POWER object. + * + * This flag indicates whether any power management algorithm is currently active on the device. An + * enabled state does not necessarily mean the device is being actively throttled -- only that + * that the driver will do so if the appropriate conditions are met. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current power management mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Retrieves the power management limit associated with this device. + * + * For Fermi &tm; or newer fully supported devices. + * + * The power limit defines the upper boundary for the card's power draw. If + * the card's total power draw reaches this limit the power management algorithm kicks in. + * + * This reading is only available if power management mode is supported. + * See \ref nvmlDeviceGetPowerManagementMode. + * + * @param device The identifier of the target device + * @param limit Reference in which to return the power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int *limit); + +/** + * Retrieves information about possible values of power management limits on this device. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minLimit Reference in which to return the minimum power management limit in milliwatts + * @param maxLimit Reference in which to return the maximum power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetPowerManagementLimit + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int *minLimit, unsigned int *maxLimit); + +/** + * Retrieves default power management limit on this device, in milliwatts. + * Default power management limit is a power management limit that the device boots with. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param defaultLimit Reference in which to return the default power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a defaultLimit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int *defaultLimit); + +/** + * Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory) + * + * For Fermi &tm; or newer fully supported devices. + * + * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. + * + * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. + * + * @param device The identifier of the target device + * @param power Reference in which to return the power usage information + * + * @return + * - \ref NVML_SUCCESS if \a power has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power); + +/** + * Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded + * + * For Volta &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param energy Reference in which to return the energy consumption information + * + * @return + * - \ref NVML_SUCCESS if \a energy has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy); + +/** + * Get the effective power limit that the driver enforces after taking into account all limiters + * + * Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere + * This includes the out of band power limit interface + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The device to communicate with + * @param limit Reference in which to return the power management limit in milliwatts + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int *limit); + +/** + * Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot). + * + * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. + * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. + * Not supported on Quadro ® and Tesla &tm; C-class products. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current GOM + * @param pending Reference in which to return the pending GOM + * + * @return + * - \ref NVML_SUCCESS if \a mode has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlGpuOperationMode_t + * @see nvmlDeviceSetGpuOperationMode + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); + +/** + * Retrieves the amount of used, free, reserved and total memory available on the device, in bytes. + * The reserved amount is supported on version 2 only. + * + * For all products. + * + * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. + * Under WDDM most device memory is allocated and managed on startup by Windows. + * + * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated + * by all active channels on the device. + * + * See \ref nvmlMemory_v2_t for details on available memory info. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate + * information, only if the caller has appropriate privileges. Per-instance + * information can be queried by using specific MIG device handles. + * + * @note nvmlDeviceGetMemoryInfo_v2 adds additional memory information. + * + * @param device The identifier of the target device + * @param memory Reference in which to return the memory information + * + * @return + * - \ref NVML_SUCCESS if \a memory has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t *memory); + +/** + * Retrieves the current compute mode for the device. + * + * For all products. + * + * See \ref nvmlComputeMode_t for details on allowed compute modes. + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current compute mode + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetComputeMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t *mode); + +/** + * Retrieves the CUDA compute capability of the device. + * + * For all products. + * + * Returns the major and minor compute capability version numbers of the + * device. The major and minor versions are equivalent to the + * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and + * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be + * returned by CUDA's cuDeviceGetAttribute(). + * + * @param device The identifier of the target device + * @param major Reference in which to return the major CUDA compute capability + * @param minor Reference in which to return the minor CUDA compute capability + * + * @return + * - \ref NVML_SUCCESS if \a major and \a minor have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); + +/** + * Retrieves the current and pending ECC modes for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * + * Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following + * the next reboot. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current ECC mode + * @param pending Reference in which to return the pending ECC mode + * + * @return + * - \ref NVML_SUCCESS if \a current and \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); + +/** + * Retrieves the device boardId from 0-N. + * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with + * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. + * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across + * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and + * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will + * always return those values but they will always be different from each other). + * + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param boardId Reference in which to return the device's board ID + * + * @return + * - \ref NVML_SUCCESS if \a boardId has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int *boardId); + +/** + * Retrieves whether the device is on a Multi-GPU Board + * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param multiGpuBool Reference in which to return a zero or non-zero value + * to indicate whether the device is on a multi GPU board + * + * @return + * - \ref NVML_SUCCESS if \a multiGpuBool has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int *multiGpuBool); + +/** + * Retrieves the total ECC error counts for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * Requires ECC Mode to be enabled. + * + * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of + * errors across the entire device. + * + * See \ref nvmlMemoryErrorType_t for a description of available error types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types. + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of the errors. + * @param counterType Flag that specifies the counter-type of the errors. + * @param eccCounts Reference in which to return the specified ECC errors + * + * @return + * - \ref NVML_SUCCESS if \a eccCounts has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceClearEccErrorCounts() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long *eccCounts); + +/** + * Retrieves the detailed ECC error counts for the device. + * + * @deprecated This API supports only a fixed set of ECC error locations + * On different GPU architectures different locations are supported + * See \ref nvmlDeviceGetMemoryErrorCounter + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts. + * Requires ECC Mode to be enabled. + * + * Detailed errors provide separate ECC counts for specific parts of the memory system. + * + * Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported. + * + * See \ref nvmlMemoryErrorType_t for a description of available bit types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types.\n + * See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts. + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of the errors. + * @param counterType Flag that specifies the counter-type of the errors. + * @param eccCounts Reference in which to return the specified ECC errors + * + * @return + * - \ref NVML_SUCCESS if \a eccCounts has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceClearEccErrorCounts() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDetailedEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlEccErrorCounts_t *eccCounts); + +/** + * Retrieves the requested memory error counter for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. + * + * Only applicable to devices with ECC. + * + * Requires ECC Mode to be enabled. + * + * @note On MIG-enabled GPUs, per instance information can be queried using specific + * MIG device handles. Per instance information is currently only supported for + * non-DRAM uncorrectable volatile errors. Querying volatile errors using device + * handles is currently not supported. + * + * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n + * See \ref nvmlEccCounterType_t for a description of available counter types.\n + * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n + * + * @param device The identifier of the target device + * @param errorType Flag that specifies the type of error. + * @param counterType Flag that specifies the counter-type of the errors. + * @param locationType Specifies the location of the counter. + * @param count Reference in which to return the ECC counter + * + * @return + * - \ref NVML_SUCCESS if \a count has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is + * invalid, or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, + nvmlEccCounterType_t counterType, + nvmlMemoryLocation_t locationType, unsigned long long *count); + +/** + * Retrieves the current utilization rates for the device's major subsystems. + * + * For Fermi &tm; or newer fully supported devices. + * + * See \ref nvmlUtilization_t for details on available utilization rates. + * + * \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings. + * This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization. + * + * @note On MIG-enabled GPUs, querying device utilization rates is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference in which to return the utilization information + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization); + +/** + * Retrieves the current utilization and sampling size in microseconds for the Encoder + * + * For Kepler &tm; or newer fully supported devices. + * + * @note On MIG-enabled GPUs, querying encoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for encoder utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** + * Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param encoderQueryType Type of encoder to query + * @param encoderCapacity Reference to an unsigned int for the encoder capacity + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderCapacity (nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int *encoderCapacity); + +/** + * Retrieves the current encoder statistics for a given device. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param sessionCount Reference to an unsigned int for count of active encoder sessions + * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions + * @param averageLatency Reference to an unsigned int for encode latency in microseconds + * + * @return + * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps, + * or \a averageLatency is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned int *sessionCount, + unsigned int *averageFps, unsigned int *averageLatency); + +/** + * Retrieves information about active encoder sessions on a target device. + * + * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The + * array elememt count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the active session array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. + * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return + * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param sessionCount Reference to caller supplied array size, and returns the number of sessions. + * @param sessionInfos Reference in which to return the session information + * + * @return + * - \ref NVML_SUCCESS if \a sessionInfos is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); + +/** + * Retrieves the current utilization and sampling size in microseconds for the Decoder + * + * For Kepler &tm; or newer fully supported devices. + * + * @note On MIG-enabled GPUs, querying decoder utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Reference to an unsigned int for decoder utilization info + * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US + * + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); + +/** +* Retrieves the active frame buffer capture sessions statistics for a given device. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @param device The identifier of the target device +* @param fbcStats Reference to nvmlFBCStats_t structure contianing NvFBC stats +* +* @return +* - \ref NVML_SUCCESS if \a fbcStats is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a fbcStats is NULL +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t *fbcStats); + +/** +* Retrieves information about active frame buffer capture sessions on a target device. +* +* An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The +* array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions +* written to the buffer. +* +* If the supplied buffer is not large enough to accomodate the active session array, the function returns +* NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. +* To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return +* NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may +* be zero if there are no new frames captured since the session started. +* +* @param device The identifier of the target device +* @param sessionCount Reference to caller supplied array size, and returns the number of sessions. +* @param sessionInfo Reference in which to return the session information +* +* @return +* - \ref NVML_SUCCESS if \a sessionInfo is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); + +/** + * Retrieves the current and pending driver model for the device. + * + * For Fermi &tm; or newer fully supported devices. + * For windows only. + * + * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached + * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. + * + * See \ref nvmlDriverModel_t for details on available driver models. + * + * @param device The identifier of the target device + * @param current Reference in which to return the current driver model + * @param pending Reference in which to return the pending driver model + * + * @return + * - \ref NVML_SUCCESS if either \a current and/or \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetDriverModel() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); + +/** + * Get VBIOS version of the device. + * + * For all products. + * + * The VBIOS version may change from time to time. It will not exceed 32 characters in length + * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE. + * + * @param device The identifier of the target device + * @param version Reference to which to return the VBIOS version + * @param length The maximum allowed length of the string returned in \a version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char *version, unsigned int length); + +/** + * Get Bridge Chip Information for all the bridge chips on the board. + * + * For all fully supported products. + * Only applicable to multi-GPU products. + * + * @param device The identifier of the target device + * @param bridgeHierarchy Reference to the returned bridge chip Hierarchy + * + * @return + * - \ref NVML_SUCCESS if bridge chip exists + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t *bridgeHierarchy); + +/** + * Get information about processes with a compute context on a device + * + * For Fermi &tm; or newer fully supported devices. + * + * This function returns information only about compute running processes (e.g. CUDA application which have + * active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function. + * + * To query the current number of running compute processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new compute processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about processes with a graphics context on a device + * + * For Kepler &tm; or newer fully supported devices. + * + * This function returns information only about graphics based processes + * (eg. applications using OpenGL, DirectX) + * + * To query the current number of running graphics processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new graphics processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about processes with a MPS compute context on a device + * + * For Volta &tm; or newer fully supported devices. + * + * This function returns information only about compute running processes (e.g. CUDA application which have + * active context) utilizing MPS. Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by + * this function. + * + * To query the current number of running compute processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new compute processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Check if the GPU devices are on the same physical board. + * + * For all fully supported products. + * + * @param device1 The first GPU device + * @param device2 The second GPU device + * @param onSameBoard Reference in which to return the status. + * Non-zero indicates that the GPUs are on the same board. + * + * @return + * - \ref NVML_SUCCESS if \a onSameBoard has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int *onSameBoard); + +/** + * Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs. + * If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions. + * + * For all fully supported products. + * + * @param device The identifier of the target device + * @param apiType Target API type for this operation + * @param isRestricted Reference in which to return the current restriction + * NVML_FEATURE_ENABLED indicates that the API is root-only + * NVML_FEATURE_DISABLED indicates that the API is accessible to all users + * + * @return + * - \ref NVML_SUCCESS if \a isRestricted has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support + * the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is + * not supported by the device) + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t *isRestricted); + +/** + * Gets recent samples for the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by + * the driver. + * + * Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t. + * + * To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL. + * The returned samplesCount will provide the number of samples that can be queried. The user needs to + * allocate the buffer with size as samplesCount * sizeof(nvmlSample_t). + * + * lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the + * underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query + * to get more recent samples. + * + * This method fetches the number of entries which can be accommodated in the provided samples array, and the + * reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this + * method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost. + * + * @note On MIG-enabled GPUs, querying the following sample types, NVML_GPU_UTILIZATION_SAMPLES, NVML_MEMORY_UTILIZATION_SAMPLES + * NVML_ENC_UTILIZATION_SAMPLES and NVML_DEC_UTILIZATION_SAMPLES, is not currently supported. + * + * @param device The identifier for the target device + * @param type Type of sampling event + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t + * @param sampleCount Reference to provide the number of elements which can be queried in samples array + * @param samples Reference in which samples are returned + + * @return + * - \ref NVML_SUCCESS if samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or + * reference to \a sampleCount is 0 for non null \a samples + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, + nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); + +/** + * Gets Total, Available and Used size of BAR1 memory. + * + * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party + * devices (peer-to-peer on the PCIE bus). + * + * @note In MIG mode, if device handle is provided, the API returns aggregate + * information, only if the caller has appropriate privileges. Per-instance + * information can be queried by using specific MIG device handles. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param bar1Memory Reference in which BAR1 memory + * information is returned. + * + * @return + * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); + +/** + * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power + * or thermal constraints. + * + * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The + * difference in violation times at two different reference times gives the indication of GPU throttling event. + * + * Violation for thermal capping is not supported at this time. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param perfPolicyType Represents Performance policy which can trigger GPU throttling + * @param violTime Reference to which violation time related information is returned + * + * + * @return + * - \ref NVML_SUCCESS if violation time is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); + +/** + * Gets the device's interrupt number + * + * @param device The identifier of the target device + * @param irqNum The interrupt number associated with the specified device + * + * @return + * - \ref NVML_SUCCESS if irq number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int *irqNum); + +/** + * Gets the device's core count + * + * @param device The identifier of the target device + * @param numCores The number of cores for the specified device + * + * @return + * - \ref NVML_SUCCESS if Gpu core count is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int *numCores); + +/** + * Gets the devices power source + * + * @param device The identifier of the target device + * @param powerSource The power source of the device + * + * @return + * - \ref NVML_SUCCESS if the current power source was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t *powerSource); + +/** + * Gets the device's memory bus width + * + * @param device The identifier of the target device + * @param maxSpeed The devices's memory bus width + * + * @return + * - \ref NVML_SUCCESS if the memory bus width is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int *busWidth); + +/** + * Gets the device's PCIE Max Link speed in MBPS + * + * @param device The identifier of the target device + * @param maxSpeed The devices's PCIE Max Link speed in MBPS + * + * @return + * - \ref NVML_SUCCESS if Pcie Max Link Speed is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int *maxSpeed); + +/** + * Gets the device's Adaptive Clock status + * + * @param device The identifier of the target device + * @param adaptiveClockStatus The current adaptive clocking status + * + * @return + * - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int *adaptiveClockStatus); + +/** + * @} + */ + +/** @addtogroup nvmlAccountingStats + * @{ + */ + +/** + * Queries the state of per process accounting mode. + * + * For Kepler &tm; or newer fully supported devices. + * + * See \ref nvmlDeviceGetAccountingStats for more details. + * See \ref nvmlDeviceSetAccountingMode + * + * @param device The identifier of the target device + * @param mode Reference in which to return the current accounting mode + * + * @return + * - \ref NVML_SUCCESS if the mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t *mode); + +/** + * Queries process's accounting stats. + * + * For Kepler &tm; or newer fully supported devices. + * + * Accounting stats capture GPU utilization and other statistics across the lifetime of a process. + * Accounting stats can be queried during life time of the process and after its termination. + * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and + * updated to actual running time after its termination. + * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old + * processes. + * + * See \ref nvmlAccountingStats_t for description of each returned metric. + * List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids. + * + * @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode. + * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be + * queried since they don't contribute to GPU utilization. + * @note In case of pid collision stats of only the latest process (that terminated last) will be reported + * + * @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU. + * + * @param device The identifier of the target device + * @param pid Process Id of the target process to query stats for + * @param stats Reference in which to return the process's accounting stats + * + * @return + * - \ref NVML_SUCCESS if stats have been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL + * - \ref NVML_ERROR_NOT_FOUND if process stats were not found + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled + * or on vGPU host. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingBufferSize + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t *stats); + +/** + * Queries list of processes that can be queried for accounting stats. The list of processes returned + * can be in running or terminated state. + * + * For Kepler &tm; or newer fully supported devices. + * + * To just query the number of processes ready to be queried, call this function with *count = 0 and + * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. + * + * For more details see \ref nvmlDeviceGetAccountingStats. + * + * @note In case of PID collision some processes might not be accessible before the circular buffer is full. + * + * @param device The identifier of the target device + * @param count Reference in which to provide the \a pids array size, and + * to return the number of elements ready to be queried + * @param pids Reference in which to return list of process ids + * + * @return + * - \ref NVML_SUCCESS if pids were successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature or accounting mode is disabled + * or on vGPU host. + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to + * expected value) + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingBufferSize + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int *count, unsigned int *pids); + +/** + * Returns the number of processes that the circular buffer with accounting pids can hold. + * + * For Kepler &tm; or newer fully supported devices. + * + * This is the maximum number of processes that accounting information will be stored for before information + * about oldest processes will get overwritten by information about new processes. + * + * @param device The identifier of the target device + * @param bufferSize Reference in which to provide the size (in number of elements) + * of the circular buffer for accounting stats. + * + * @return + * - \ref NVML_SUCCESS if buffer size was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetAccountingStats + * @see nvmlDeviceGetAccountingPids + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int *bufferSize); + +/** @} */ + +/** @addtogroup nvmlDeviceQueries + * @{ + */ + +/** + * Returns the list of retired pages by source, including pages that are pending retirement + * The address information provided from this API is the hardware address of the page that was retired. Note + * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param cause Filter page addresses by cause of retirement + * @param pageCount Reference in which to provide the \a addresses buffer size, and + * to return the number of retired pages that match \a cause + * Set to 0 to query the size without allocating an \a addresses buffer + * @param addresses Buffer to write the page addresses into + * + * @return + * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the + * matching page addresses. \a pageCount is set to the needed size. + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or + * \a addresses is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, + unsigned int *pageCount, unsigned long long *addresses); + +/** + * Returns the list of retired pages by source, including pages that are pending retirement + * The address information provided from this API is the hardware address of the page that was retired. Note + * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 + * + * \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps paramter to return the time of each page's + * retirement. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param cause Filter page addresses by cause of retirement + * @param pageCount Reference in which to provide the \a addresses buffer size, and + * to return the number of retired pages that match \a cause + * Set to 0 to query the size without allocating an \a addresses buffer + * @param addresses Buffer to write the page addresses into + * @param timestamps Buffer to write the timestamps of page retirement, additional for _v2 + * + * @return + * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the + * matching page addresses. \a pageCount is set to the needed size. + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or + * \a addresses is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageRetirementCause_t cause, + unsigned int *pageCount, unsigned long long *addresses, unsigned long long *timestamps); + +/** + * Check if any pages are pending retirement and need a reboot to fully retire. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param isPending Reference in which to return the pending status + * + * @return + * - \ref NVML_SUCCESS if \a isPending was populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); + +/** + * Get number of remapped rows. The number of rows reported will be based on + * the cause of the remapping. isPending indicates whether or not there are + * pending remappings. A reset will be required to actually remap the row. + * failureOccurred will be set if a row remapping ever failed in the past. A + * pending remapping won't affect future work on the GPU since + * error-containment and dynamic page blacklisting will take care of that. + * + * @note On MIG-enabled GPUs with active instances, querying the number of + * remapped rows is not supported + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param corrRows Reference for number of rows remapped due to correctable errors + * @param uncRows Reference for number of rows remapped due to uncorrectable errors + * @param isPending Reference for whether or not remappings are pending + * @param failureOccurred Reference that is set when a remapping has failed in the past + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a corrRows, \a uncRows, \a isPending or \a failureOccurred is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If MIG is enabled or if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN Unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int *corrRows, unsigned int *uncRows, + unsigned int *isPending, unsigned int *failureOccurred); + +/** + * Get the row remapper histogram. Returns the remap availability for each bank + * on the GPU. + * + * @param device Device handle + * @param values Histogram values + * + * @return + * - \ref NVML_SUCCESS On success + * - \ref NVML_ERROR_UNKNOWN On any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t *values); + +/** + * Get architecture for device + * + * @param device The identifier of the target device + * @param arch Reference where architecture is returned, if call successful. + * Set to NVML_DEVICE_ARCH_* upon success + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device or \a arch (output refererence) are invalid + */ +nvmlReturn_t DECLDIR nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t *arch); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUnitCommands Unit Commands + * This chapter describes NVML operations that change the state of the unit. For S-class products. + * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION + * error code when invoking any of these methods. + * @{ + */ +/***************************************************************************************************/ + +/** + * Set the LED state for the unit. The LED can be either green (0) or amber (1). + * + * For S-class products. + * Requires root/admin permissions. + * + * This operation takes effect immediately. + * + * + * Current S-Class products don't provide unique LEDs for each unit. As such, both front + * and back LEDs will be toggled in unison regardless of which unit is specified with this command. + * + * See \ref nvmlLedColor_t for available colors. + * + * @param unit The identifier of the target unit + * @param color The target LED color + * + * @return + * - \ref NVML_SUCCESS if the LED color has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlUnitGetLedState() + */ +nvmlReturn_t DECLDIR nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlDeviceCommands Device Commands + * This chapter describes NVML operations that change the state of the device. + * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION + * error code when invoking any of these methods. + * @{ + */ +/***************************************************************************************************/ + +/** + * Set the persistence mode for the device. + * + * For all products. + * For Linux only. + * Requires root/admin permissions. + * + * The persistence mode determines whether the GPU driver software is torn down after the last client + * exits. + * + * This operation takes effect immediately. It is not persistent across reboots. After each reboot the + * persistence mode is reset to "Disabled". + * + * See \ref nvmlEnableState_t for available modes. + * + * After calling this API with mode set to NVML_FEATURE_DISABLED on a device that has its own NUMA + * memory, the given device handle will no longer be valid, and to continue to interact with this + * device, a new handle should be obtained from one of the nvmlDeviceGetHandleBy*() APIs. This + * limitation is currently only applicable to devices that have a coherent NVLink connection to + * system memory. + * + * @param device The identifier of the target device + * @param mode The target persistence mode + * + * @return + * - \ref NVML_SUCCESS if the persistence mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetPersistenceMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode); + +/** + * Set the compute mode for the device. + * + * For all products. + * Requires root/admin permissions. + * + * The compute mode determines whether a GPU can be used for compute operations and whether it can + * be shared across contexts. + * + * This operation takes effect immediately. Under Linux it is not persistent across reboots and + * always resets to "Default". Under windows it is persistent. + * + * Under windows compute mode may only be set to DEFAULT when running in WDDM + * + * @note On MIG-enabled GPUs, compute mode would be set to DEFAULT and changing it is not supported. + * + * See \ref nvmlComputeMode_t for details on available compute modes. + * + * @param device The identifier of the target device + * @param mode The target compute mode + * + * @return + * - \ref NVML_SUCCESS if the compute mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetComputeMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode); + +/** + * Set the ECC mode for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * Requires root/admin permissions. + * + * The ECC mode determines whether the GPU enables its ECC support. + * + * This operation takes effect after the next reboot. + * + * See \ref nvmlEnableState_t for details on available modes. + * + * @param device The identifier of the target device + * @param ecc The target ECC mode + * + * @return + * - \ref NVML_SUCCESS if the ECC mode was set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc); + +/** + * Clear the ECC error and other memory error counts for the device. + * + * For Kepler &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. + * Requires root/admin permissions. + * Requires ECC Mode to be enabled. + * + * Sets all of the specified ECC counters to 0, including both detailed and total counts. + * + * This operation takes effect immediately. + * + * See \ref nvmlMemoryErrorType_t for details on available counter types. + * + * @param device The identifier of the target device + * @param counterType Flag that indicates which type of errors should be cleared. + * + * @return + * - \ref NVML_SUCCESS if the error counts were cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see + * - nvmlDeviceGetDetailedEccErrors() + * - nvmlDeviceGetTotalEccErrors() + */ +nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType); + +/** + * Set the driver model for the device. + * + * For Fermi &tm; or newer fully supported devices. + * For windows only. + * Requires root/admin permissions. + * + * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached + * to the device it must run in WDDM mode. + * + * It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce). + * This should only be done if the host is subsequently powered down and the display is detached from the device + * before the next reboot. + * + * This operation takes effect after the next reboot. + * + * Windows driver model may only be set to WDDM when running in DEFAULT compute mode. + * + * Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or + * will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode. + * + * See \ref nvmlDriverModel_t for details on available driver models. + * See \ref nvmlFlagDefault and \ref nvmlFlagForce + * + * @param device The identifier of the target device + * @param driverModel The target driver model + * @param flags Flags that change the default behavior + * + * @return + * - \ref NVML_SUCCESS if the driver model has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetDriverModel() + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags); + +typedef enum nvmlClockLimitId_enum { + NVML_CLOCK_LIMIT_ID_RANGE_START = 0xffffff00, + NVML_CLOCK_LIMIT_ID_TDP, + NVML_CLOCK_LIMIT_ID_UNLIMITED +} nvmlClockLimitId_t; + +/** + * Set clocks that device will lock to. + * + * Sets the clocks that the device will be running at to the value in the range of minGpuClockMHz to maxGpuClockMHz. + * Setting this will supercede application clock values and take effect regardless if a cuda app is running. + * See /ref nvmlDeviceSetApplicationsClocks + * + * Can be used as a setting to request constant performance. + * + * This can be called with a pair of integer clock frequencies in MHz, or a pair of /ref nvmlClockLimitId_t values. + * See the table below for valid combinations of these values. + * + * minGpuClock | maxGpuClock | Effect + * ------------+-------------+-------------------------------------------------- + * tdp | tdp | Lock clock to TDP + * unlimited | tdp | Upper bound is TDP but clock may drift below this + * tdp | unlimited | Lower bound is TDP but clock may boost above this + * unlimited | unlimited | Unlocked (== nvmlDeviceResetGpuLockedClocks) + * + * If one arg takes one of these values, the other must be one of these values as + * well. Mixed numeric and symbolic calls return NVML_ERROR_INVALID_ARGUMENT. + * + * Requires root/admin permissions. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetGpuLockedClocks. + * + * For Volta &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minGpuClockMHz Requested minimum gpu clock in MHz + * @param maxGpuClockMHz Requested maximum gpu clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned int minGpuClockMHz, unsigned int maxGpuClockMHz); + +/** + * Resets the gpu clock to the default value + * + * This is the gpu clock that will be used after system reboot or driver reload. + * Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * @see nvmlDeviceSetGpuLockedClocks + * + * For Volta &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device); + +/** + * Set memory clocks that device will lock to. + * + * Sets the device's memory clocks to the value in the range of minMemClockMHz to maxMemClockMHz. + * Setting this will supersede application clock values and take effect regardless of whether a cuda app is running. + * See /ref nvmlDeviceSetApplicationsClocks + * + * Can be used as a setting to request constant performance. + * + * Requires root/admin permissions. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetMemoryLockedClocks. + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minMemClockMHz Requested minimum memory clock in MHz + * @param maxMemClockMHz Requested maximum memory clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t device, unsigned int minMemClockMHz, unsigned int maxMemClockMHz); + +/** + * Resets the memory clock to the default value + * + * This is the memory clock that will be used after system reboot or driver reload. + * Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * @see nvmlDeviceSetMemoryLockedClocks + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device); + +/** + * Set clocks that applications will lock to. + * + * Sets the clocks that compute and graphics applications will be running at. + * e.g. CUDA driver requests these clocks during context creation which means this property + * defines clocks at which CUDA applications will be running unless some overspec event + * occurs (e.g. over power, over thermal or external HW brake). + * + * Can be used as a setting to request constant performance. + * + * On Pascal and newer hardware, this will automatically disable automatic boosting of clocks. + * + * On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call + * \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting + * above the clock value being set. + * + * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks + * for details on how to list available clocks combinations. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetApplicationsClocks. + * + * @param device The identifier of the target device + * @param memClockMHz Requested memory clock in MHz + * @param graphicsClockMHz Requested graphics clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); + +/** + * Retrieves the frequency monitor fault status for the device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root user. + * + * See \ref nvmlClkMonStatus_t for details on decoding the status output. + * + * @param device The identifier of the target device + * @param status Reference in which to return the clkmon fault status + * + * @return + * - \ref NVML_SUCCESS if \a status has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetClkMonStatus() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t *status); + +/** + * Set new power limit of this device. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. + * + * \note Limit is not persistent across reboots or driver unloads. + * Enable persistent mode to prevent driver from unloading when no application is using the device. + * + * @param device The identifier of the target device + * @param limit Power management limit in milliwatts to set + * + * @return + * - \ref NVML_SUCCESS if \a limit has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetPowerManagementLimitConstraints + * @see nvmlDeviceGetPowerManagementDefaultLimit + */ +nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit); + +/** + * Sets new GOM. See \a nvmlGpuOperationMode_t for details. + * + * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. + * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. + * Not supported on Quadro ® and Tesla &tm; C-class products. + * Requires root/admin permissions. + * + * Changing GOMs requires a reboot. + * The reboot requirement might be removed in the future. + * + * Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when + * pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel. + * + * @param device The identifier of the target device + * @param mode Target GOM + * + * @return + * - \ref NVML_SUCCESS if \a mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlGpuOperationMode_t + * @see nvmlDeviceGetGpuOperationMode + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode); + +/** + * Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs. + * This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. + * The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction + * to query the current restriction settings. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @param device The identifier of the target device + * @param apiType Target API type for this operation + * @param isRestricted The target restriction + * + * @return + * - \ref NVML_SUCCESS if \a isRestricted has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support + * the feature that api restrictions are being set for (E.G. Enabling/disabling auto + * boosted clocks is not supported by the device) + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlRestrictedAPI_t + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); + +/** + * @} + */ + +/** @addtogroup nvmlAccountingStats + * @{ + */ + +/** + * Enables or disables per process accounting. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @note This setting is not persistent and will default to disabled after driver unloads. + * Enable persistence mode to be sure the setting doesn't switch off to disabled. + * + * @note Enabling accounting mode has no negative impact on the GPU performance. + * + * @note Disabling accounting clears all accounting pids information. + * + * @note On MIG-enabled GPUs, accounting mode would be set to DISABLED and changing it is not supported. + * + * See \ref nvmlDeviceGetAccountingMode + * See \ref nvmlDeviceGetAccountingStats + * See \ref nvmlDeviceClearAccountingPids + * + * @param device The identifier of the target device + * @param mode The target accounting mode + * + * @return + * - \ref NVML_SUCCESS if the new mode has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode); + +/** + * Clears accounting information about all processes that have already terminated. + * + * For Kepler &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * See \ref nvmlDeviceGetAccountingMode + * See \ref nvmlDeviceGetAccountingStats + * See \ref nvmlDeviceSetAccountingMode + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if accounting information has been cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup NvLink NvLink Methods + * This chapter describes methods that NVML can perform on NVLINK enabled devices. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves the state of the device's NvLink for the link specified + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that + * the link is active and NVML_FEATURE_DISABLED indicates it + * is inactive + * + * @return + * - \ref NVML_SUCCESS if \a isActive has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t *isActive); + +/** + * Retrieves the version of the device's NvLink for the link specified + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param version Requested NvLink version + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int *version); + +/** + * Retrieves the requested capability from the device's NvLink for the link specified + * Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried + * The return value should be treated as a boolean. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param capability Specifies the \a nvmlNvLinkCapability_t to be queried + * @param capResult A boolean for the queried capability indicating that feature is available + * + * @return + * - \ref NVML_SUCCESS if \a capResult has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, + nvmlNvLinkCapability_t capability, unsigned int *capResult); + +/** + * Retrieves the PCI information for the remote node on a NvLink link + * Note: pciSubSystemId is not filled in this function and is indeterminate + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param pci \a nvmlPciInfo_t of the remote node for the specified link + * + * @return + * - \ref NVML_SUCCESS if \a pci has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); + +/** + * Retrieves the specified error counter value + * Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the NvLink counter to be queried + * @param counterValue Returned counter value + * + * @return + * - \ref NVML_SUCCESS if \a counter has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, + nvmlNvLinkErrorCounter_t counter, unsigned long long *counterValue); + +/** + * Resets all error counters to zero + * Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * + * @return + * - \ref NVML_SUCCESS if the reset is successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link); + +/** + * Deprecated: Setting utilization counter control is no longer supported. + * + * Set the NVLINK utilization counter control information for the specified counter, 0 or 1. + * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset + * of the counters if the reset parameter is non-zero. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param counter Specifies the counter that should be set (0 or 1). + * @param link Specifies the NvLink link to be queried + * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set + * @param reset Resets the counters on set if non-zero + * + * @return + * - \ref NVML_SUCCESS if the control has been set successfully + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, + nvmlNvLinkUtilizationControl_t *control, unsigned int reset); + +/** + * Deprecated: Getting utilization counter control is no longer supported. + * + * Get the NVLINK utilization counter control information for the specified counter, 0 or 1. + * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param counter Specifies the counter that should be set (0 or 1). + * @param link Specifies the NvLink link to be queried + * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information + * + * @return + * - \ref NVML_SUCCESS if the control has been set successfully + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, + nvmlNvLinkUtilizationControl_t *control); + + +/** + * Deprecated: Use \ref nvmlDeviceGetFieldValues with NVML_FI_DEV_NVLINK_THROUGHPUT_* as field values instead. + * + * Retrieve the NVLINK utilization counter based on the current control for a specified counter. + * In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl + * before reading the utilization counters as they have no default state + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the counter that should be read (0 or 1). + * @param rxcounter Receive counter return value + * @param txcounter Transmit counter return value + * + * @return + * - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, + unsigned long long *rxcounter, unsigned long long *txcounter); + +/** + * Deprecated: Freezing NVLINK utilization counters is no longer supported. + * + * Freeze the NVLINK utilization counters + * Both the receive and transmit counters are operated on by this function + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be queried + * @param counter Specifies the counter that should be frozen (0 or 1). + * @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters + * NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters + * + * @return + * - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, + unsigned int counter, nvmlEnableState_t freeze); + +/** + * Deprecated: Resetting NVLINK utilization counters is no longer supported. + * + * Reset the NVLINK utilization counters + * Both the receive and transmit counters are operated on by this function + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param link Specifies the NvLink link to be reset + * @param counter Specifies the counter that should be reset (0 or 1) + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); + +/** +* Get the NVLink device type of the remote device connected over the given link. +* +* @param device The device handle of the target GPU +* @param link The NVLink link index on the target GPU +* @param pNvLinkDeviceType Pointer in which the output remote device type is returned +* +* @return +* - \ref NVML_SUCCESS if \a pNvLinkDeviceType has been set +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_NOT_SUPPORTED if NVLink is not supported +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid, or +* \a pNvLinkDeviceType is NULL +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is +* otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t *pNvLinkDeviceType); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlEvents Event Handling Methods + * This chapter describes methods that NVML can perform against each device to register and wait for + * some event to occur. + * @{ + */ +/***************************************************************************************************/ + +/** + * Create an empty set of events. + * Event set should be freed by \ref nvmlEventSetFree + * + * For Fermi &tm; or newer fully supported devices. + * @param set Reference in which to return the event handle + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); + +/** + * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t + * + * For Fermi &tm; or newer fully supported devices. + * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) + * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) + * + * For Linux only. + * + * \b IMPORTANT: Operations on \a set are not thread safe + * + * This call starts recording of events on specific device. + * All events that occurred before this call are not recorded. + * Checking if some event occurred can be done with \ref nvmlEventSetWait_v2 + * + * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. + * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes + * are registered in that case. + * + * @param device The identifier of the target device + * @param eventTypes Bitmask of \ref nvmlEventType to record + * @param set Set to which add new event types + * + * @return + * - \ref NVML_SUCCESS if the event has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceGetSupportedEventTypes + * @see nvmlEventSetWait + * @see nvmlEventSetFree + */ +nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set); + +/** + * Returns information about events supported on device + * + * For Fermi &tm; or newer fully supported devices. + * + * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. + * + * @param device The identifier of the target device + * @param eventTypes Reference in which to return bitmask of supported events + * + * @return + * - \ref NVML_SUCCESS if the eventTypes has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long *eventTypes); + +/** + * Waits on events and delivers events + * + * For Fermi &tm; or newer fully supported devices. + * + * If some events are ready to be delivered at the time of the call, function returns immediately. + * If there are no events ready to be delivered, function sleeps till event arrives + * but not longer than specified timeout. This function in certain conditions can return before + * specified timeout passes (e.g. when interrupt arrives) + * + * On Windows, in case of xid error, the function returns the most recent xid error type seen by the system. + * If there are multiple xid errors generated before nvmlEventSetWait is invoked then the last seen xid error + * type is returned for all xid error events. + * + * On Linux, every xid error event would return the associated event data and other information if applicable. + * + * In MIG mode, if device handle is provided, the API reports all the events for the available instances, + * only if the caller has appropriate privileges. In absence of required privileges, only the events which + * affect all the instances (i.e. whole device) are reported. + * + * This API does not currently support per-instance event reporting using MIG device handles. + * + * @param set Reference to set of events to wait on + * @param data Reference in which to return event data + * @param timeoutms Maximum amount of wait time in milliseconds for registered event + * + * @return + * - \ref NVML_SUCCESS if the data has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL + * - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived + * - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlEventType + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); + +/** + * Releases events in the set + * + * For Fermi &tm; or newer fully supported devices. + * + * @param set Reference to events to be released + * + * @return + * - \ref NVML_SUCCESS if the event has been successfully released + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceRegisterEvents + */ +nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlZPI Drain states + * This chapter describes methods that NVML can perform against each device to control their drain state + * and recognition by NVML and NVIDIA kernel driver. These methods can be used with out-of-band tools to + * power on/off GPUs, enable robust reset scenarios, etc. + * @{ + */ +/***************************************************************************************************/ + +/** + * Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. + * Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before + * this call is made. + * Must be called as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU drain state to be modified + * @param newState The drain state that should be entered, see \ref nvmlEnableState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation + * - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceModifyDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t newState); + +/** + * Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining + * state. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU drain state to be queried + * @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceQueryDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t *currentState); + +/** + * This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver + * as long as no other processes are attached. If other processes are attached, this call will return + * NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the + * only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called + * to initiate the draining state is if that process was using, and is still using, a GPU before the + * call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled + * prior to this call. + * + * For long-running NVML processes please note that this will change the enumeration of current GPUs. + * For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2. + * Also, device handles after the removed GPU will not be valid and must be re-established. + * Must be run as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI address of the GPU to be removed + * @param gpuState Whether the GPU is to be removed, from the OS + * see \ref nvmlDetachGpuState_t + * @param linkState Requested upstream PCIe link state, see \ref nvmlPcieLinkState_t + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed + */ +nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu_v2(nvmlPciInfo_t *pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState); + +/** + * Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that + * were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. + * If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes + * the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order. + * + * In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds + * per GPU. Also, all device handles are no longer guaranteed to be valid post discovery. + * + * Must be run as administrator. + * For Linux only. + * + * For Pascal &tm; or newer fully supported devices. + * Some Kepler devices supported. + * + * @param pciInfo The PCI tree to be searched. Only the domain, bus, and device + * fields are used in this call. + * + * @return + * - \ref NVML_SUCCESS if counters were successfully reset + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature + * - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature + * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlFieldValueQueries Field Value Queries + * This chapter describes NVML operations that are associated with retrieving Field Values from NVML + * @{ + */ +/***************************************************************************************************/ + +/** + * Request values for a list of fields for a device. This API allows multiple fields to be queried at once. + * If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs + * will be populated from a single call rather than making a driver call for each fieldId. + * + * @param device The device handle of the GPU to request field values for + * @param valuesCount Number of entries in values that should be retrieved + * @param values Array of \a valuesCount structures to hold field values. + * Each value's fieldId must be populated prior to this call + * + * @return + * - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must + * check the nvmlReturn field of each value for each individual + * status + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); + + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup vGPU Enums, Constants and Structs + * @{ + */ +/** @} */ +/***************************************************************************************************/ + +/***************************************************************************************************/ +/** @defgroup nvmlVirtualGpuQueries vGPU APIs + * This chapter describes operations that are associated with NVIDIA vGPU Software products. + * @{ + */ +/***************************************************************************************************/ + +/** + * This method is used to get the virtualization mode corresponding to the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device Identifier of the target device + * @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_? + * + * @return + * - \ref NVML_SUCCESS if \a pVirtualMode is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t *pVirtualMode); + +/** + * Queries if SR-IOV host operation is supported on a vGPU supported device. + * + * Checks whether SR-IOV host capability is supported by the device and the + * driver, and indicates device is in SR-IOV mode if both of these conditions + * are true. + * + * @param device The identifier of the target device + * @param pHostVgpuMode Reference in which to return the current vGPU mode + * + * @return + * - \ref NVML_SUCCESS if device's vGPU mode has been successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle is 0 or \a pVgpuMode is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device doesn't support this feature. + * - \ref NVML_ERROR_UNKNOWN if any unexpected error occurred + */ +nvmlReturn_t DECLDIR nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpuMode_t *pHostVgpuMode); + +/** + * This method is used to set the virtualization mode corresponding to the GPU. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device Identifier of the target device + * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? + * + * @return + * - \ref NVML_SUCCESS if \a pVirtualMode is set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. + * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); + +/** + * Retrieve the vGPU Software licensable features. + * + * Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s) + * and their current license status. + * + * @param device Identifier of the target device + * @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned + * + * @return + * - \ref NVML_SUCCESS if licensable features are successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); + +/** + * Retrieves the current utilization and process ID + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. + * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at + * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization + * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values + * are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilization set to NULL. The caller should allocate a buffer of size + * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed + * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. + * + * On successful return, the function updates \a processSamplesCount with the number of process utilization sample + * structures that were actually written. This may differ from a previously read value as instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @note On MIG-enabled GPUs, querying process utilization is not currently supported. + * + * @param device The identifier of the target device + * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned + * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + + * @return + * - \ref NVML_SUCCESS if \a utilization has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, + unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpu vGPU Management + * @{ + * + * This chapter describes APIs supporting NVIDIA vGPU. + */ +/***************************************************************************************************/ + +/** + * Retrieve the supported vGPU types on a physical GPU (device). + * + * An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer + * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount + * is used to return the number of vGPU types written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. + * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. + * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types + * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); + +/** + * Retrieve the currently creatable vGPU types on a physical GPU (device). + * + * An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer + * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount + * is used to return the number of vGPU types written to the buffer. + * + * The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types + * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable + * list will be restricted to whatever vGPU type is already running on the device. + * + * If the supplied buffer is not large enough to accomodate the vGPU type array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. + * To query the number of vGPU types createable for the GPU, call this function with *vgpuCount = 0. + * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types + * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); + +/** + * Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeClass Pointer to string array to return class in + * @param size Size of string + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeClass, unsigned int *size); + +/** + * Retrieve the vGPU type name. + * + * The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not + * exceed 64 characters in length (including the NUL terminator). See \ref + * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeName Pointer to buffer to return name + * @param size Size of buffer + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); + +/** + * Retrieve the GPU Instance Profile ID for the given vGPU type ID. + * The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is + * returned. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param gpuInstanceProfileId GPU Instance Profile ID + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device is not in vGPU Host virtualization mode + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a gpuInstanceProfileId is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *gpuInstanceProfileId); + +/** + * Retrieve the device ID of a vGPU type. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value + * @param subsystemID Subsytem ID and subsytem vendor ID of the device contained in single 32 bit value + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *deviceID, unsigned long long *subsystemID); + +/** + * Retrieve the vGPU framebuffer size in bytes. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param fbSize Pointer to framebuffer size in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbSize); + +/** + * Retrieve count of vGPU's supported display heads. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param numDisplayHeads Pointer to number of display heads + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *numDisplayHeads); + +/** + * Retrieve vGPU display head's maximum supported resolution. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param displayIndex Zero-based index of display head + * @param xdim Pointer to maximum number of pixels in X dimension + * @param ydim Pointer to maximum number of pixels in Y dimension + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex + * is out of range. + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int *xdim, unsigned int *ydim); + +/** + * Retrieve license requirements for a vGPU type + * + * The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form + * ",", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license, + * the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0". + * + * The total length of the returned string will not exceed 128 characters, including the NUL terminator. + * See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuTypeLicenseString Pointer to buffer to return license info + * @param size Size of \a vgpuTypeLicenseString buffer + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeLicenseString, unsigned int size); + +/** + * Retrieve the static frame rate limit value of the vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param frameRateLimit Reference to return the frame rate limit value + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a frameRateLimit is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *frameRateLimit); + +/** + * Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param vgpuTypeId Handle to vGPU type + * @param vgpuInstanceCount Pointer to get the max number of vGPU instances + * that can be created on a deicve for given vgpuTypeId + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device, + * or \a vgpuInstanceCount is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCount); + +/** + * Retrieve the maximum number of vGPU instances supported per VM for given vGPU type + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param vgpuInstanceCountPerVm Pointer to get the max number of vGPU instances supported per VM for given \a vgpuTypeId + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuInstanceCountPerVm is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCountPerVm); + +/** + * Retrieve the active vGPU instances on a device. + * + * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The + * array elememt count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances + * written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the vGPU instance array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. + * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return + * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param vgpuCount Pointer which passes in the array size as well as get + * back the number of types + * @param vgpuInstances Pointer to array in which to return list of vGPU instances + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances); + +/** + * Retrieve the VM ID associated with a vGPU instance. + * + * The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param vmId Pointer to caller-supplied buffer to hold VM ID + * @param size Size of buffer in bytes + * @param vmIdType Pointer to hold VM ID type + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vmId or \a vmIdType is NULL, or \a vgpuInstance is 0 + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char *vmId, unsigned int size, nvmlVgpuVmIdType_t *vmIdType); + +/** + * Retrieve the UUID of a vGPU instance. + * + * The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string, + * not exceeding 80 characters in length (including the NULL terminator). + * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param uuid Pointer to caller-supplied buffer to hold vGPU UUID + * @param size Size of buffer in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a uuid is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char *uuid, unsigned int size); + +/** + * Retrieve the NVIDIA driver version installed in the VM associated with a vGPU. + * + * The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version + * string will not exceed 80 characters in length (including the NUL terminator). + * See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. + * + * nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is + * returned as "Not Available" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the + * NVIDIA driver is loaded and initialized. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param version Caller-supplied buffer to return driver version string + * @param length Size of \a version buffer + * + * @return + * - \ref NVML_SUCCESS if \a version has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0 + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length); + +/** + * Retrieve the framebuffer usage in bytes. + * + * Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance The identifier of the target instance + * @param fbUsage Pointer to framebuffer usage in bytes + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbUsage is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); + +/** + * @deprecated Use \ref nvmlVgpuInstanceGetLicenseInfo_v2. + * + * Retrieve the current licensing state of the vGPU instance. + * + * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param licensed Reference to return the licensing status + * + * @return + * - \ref NVML_SUCCESS if \a licensed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licensed is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int *licensed); + +/** + * Retrieve the vGPU type of a vGPU instance. + * + * Returns the vGPU type ID of vgpu assigned to the vGPU instance. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param vgpuTypeId Reference to return the vgpuTypeId + * + * @return + * - \ref NVML_SUCCESS if \a vgpuTypeId has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuTypeId is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t *vgpuTypeId); + +/** + * Retrieve the frame rate limit set for the vGPU instance. + * + * Returns the value of the frame rate limit set for the vGPU instance + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param frameRateLimit Reference to return the frame rate limit + * + * @return + * - \ref NVML_SUCCESS if \a frameRateLimit has been set + * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a frameRateLimit is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int *frameRateLimit); + +/** + * Retrieve the current ECC mode of vGPU instance. + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param eccMode Reference in which to return the current ECC mode + * + * @return + * - \ref NVML_SUCCESS if the vgpuInstance's ECC mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t *eccMode); + +/** + * Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param encoderCapacity Reference to an unsigned int for the encoder capacity + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity has been retrived + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderQueryType is invalid + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity); + +/** + * Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param encoderCapacity Unsigned int for the encoder capacity value + * + * @return + * - \ref NVML_SUCCESS if \a encoderCapacity has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderCapacity is out of range of 0-100. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity); + +/** + * Retrieves the current encoder statistics of a vGPU Instance + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param sessionCount Reference to an unsigned int for count of active encoder sessions + * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions + * @param averageLatency Reference to an unsigned int for encode latency in microseconds + * + * @return + * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL + * or \a vgpuInstance is 0. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, + unsigned int *averageFps, unsigned int *averageLatency); + +/** + * Retrieves information about all active encoder sessions on a vGPU Instance. + * + * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The + * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions + * written to the buffer. + * + * If the supplied buffer is not large enough to accomodate the active session array, the function returns + * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. + * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return + * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param sessionCount Reference to caller supplied array size, and returns + * the number of sessions. + * @param sessionInfo Reference to caller supplied array in which the list + * of session information us returned. + * + * @return + * - \ref NVML_SUCCESS if \a sessionInfo is fetched + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is + returned in \a sessionCount + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL, or \a vgpuInstance is 0. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfo); + +/** +* Retrieves the active frame buffer capture sessions statistics of a vGPU Instance +* +* For Maxwell &tm; or newer fully supported devices. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param fbcStats Reference to nvmlFBCStats_t structure contianing NvFBC stats +* +* @return +* - \ref NVML_SUCCESS if \a fbcStats is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbcStats is NULL +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t *fbcStats); + +/** +* Retrieves information about active frame buffer capture sessions on a vGPU Instance. +* +* An array of active FBC sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The +* array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions +* written to the buffer. +* +* If the supplied buffer is not large enough to accomodate the active session array, the function returns +* NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. +* To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return +* NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may +* be zero if there are no new frames captured since the session started. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param sessionCount Reference to caller supplied array size, and returns the number of sessions. +* @param sessionInfo Reference in which to return the session information +* +* @return +* - \ref NVML_SUCCESS if \a sessionInfo is fetched +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a sessionCount is NULL. +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); + +/** +* Retrieve the GPU Instance ID for the given vGPU Instance. +* The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned. +* +* For Kepler &tm; or newer fully supported devices. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param gpuInstanceId GPU Instance ID +* +* @return +* - \ref NVML_SUCCESS successful completion +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a gpuInstanceId is NULL. +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t vgpuInstance, unsigned int *gpuInstanceId); + +/** +* Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM. +* +* The vGPU PCI id is returned as "00000000:00:00.0" if NVIDIA driver is not installed on the vGPU instance. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param vgpuPciId Caller-supplied buffer to return vGPU PCI Id string +* @param length Size of the vgpuPciId buffer +* +* @return +* - \ref NVML_SUCCESS if vGPU PCI Id is sucessfully retrieved +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuPciId is NULL +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small, \a length is set to required length +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance, char *vgpuPciId, unsigned int *length); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvml vGPU Migration + * This chapter describes operations that are associated with vGPU Migration. + * @{ + */ +/***************************************************************************************************/ + +/** + * Structure representing range of vGPU versions. + */ +typedef struct nvmlVgpuVersion_st +{ + unsigned int minVersion; //!< Minimum vGPU version. + unsigned int maxVersion; //!< Maximum vGPU version. +} nvmlVgpuVersion_t; + +/** + * vGPU metadata structure. + */ +typedef struct nvmlVgpuMetadata_st +{ + unsigned int version; //!< Current version of the structure + unsigned int revision; //!< Current revision of the structure + nvmlVgpuGuestInfoState_t guestInfoState; //!< Current state of Guest-dependent fields + char guestDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in guest + char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in host + unsigned int reserved[6]; //!< Reserved for internal use + unsigned int vgpuVirtualizationCaps; //!< vGPU virtualizaion capabilities bitfileld + unsigned int guestVgpuVersion; //!< vGPU version of guest driver + unsigned int opaqueDataSize; //!< Size of opaque data field in bytes + char opaqueData[4]; //!< Opaque data +} nvmlVgpuMetadata_t; + +/** + * Physical GPU metadata structure + */ +typedef struct nvmlVgpuPgpuMetadata_st +{ + unsigned int version; //!< Current version of the structure + unsigned int revision; //!< Current revision of the structure + char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Host driver version + unsigned int pgpuVirtualizationCaps; //!< Pgpu virtualizaion capabilities bitfileld + unsigned int reserved[5]; //!< Reserved for internal use + nvmlVgpuVersion_t hostSupportedVgpuRange; //!< vGPU version range supported by host driver + unsigned int opaqueDataSize; //!< Size of opaque data field in bytes + char opaqueData[4]; //!< Opaque data +} nvmlVgpuPgpuMetadata_t; + +/** + * vGPU VM compatibility codes + */ +typedef enum nvmlVgpuVmCompatibility_enum +{ + NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0, //!< vGPU is not runnable + NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1, //!< vGPU is runnable from a cold / powered-off state (ACPI S5) + NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2, //!< vGPU is runnable from a hibernated state (ACPI S4) + NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4, //!< vGPU is runnable from a sleeped state (ACPI S3) + NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8 //!< vGPU is runnable from a live/paused (ACPI S0) +} nvmlVgpuVmCompatibility_t; + +/** + * vGPU-pGPU compatibility limit codes + */ +typedef enum nvmlVgpuPgpuCompatibilityLimitCode_enum +{ + NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0, //!< Compatibility is not limited. + NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1, //!< ompatibility is limited by host driver version. + NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2, //!< Compatibility is limited by guest driver version. + NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4, //!< Compatibility is limited by GPU hardware. + NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000 //!< Compatibility is limited by an undefined factor. +} nvmlVgpuPgpuCompatibilityLimitCode_t; + +/** + * vGPU-pGPU compatibility structure + */ +typedef struct nvmlVgpuPgpuCompatibility_st +{ + nvmlVgpuVmCompatibility_t vgpuVmCompatibility; //!< Compatibility of vGPU VM. See \ref nvmlVgpuVmCompatibility_t + nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode; //!< Limiting factor for vGPU-pGPU compatibility. See \ref nvmlVgpuPgpuCompatibilityLimitCode_t +} nvmlVgpuPgpuCompatibility_t; + +/** + * Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its associated VM + * such as the currently installed NVIDIA guest driver version, together with host driver version and an opaque data section + * containing internal state. + * + * nvmlVgpuInstanceGetMetadata() may be called at any time for a vGPU instance. Some fields in the returned structure are + * dependent on information obtained from the guest VM, which may not yet have reached a state where that information + * is available. The current state of these dependent fields is reflected in the info structure's \ref nvmlVgpuGuestInfoState_t field. + * + * The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide + * it to Virtual GPU Manager when creating a vGPU for subsequent instances of the VM. + * + * The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure + * is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed + * in \a bufferSize. + * + * @param vgpuInstance vGPU instance handle + * @param vgpuMetadata Pointer to caller-supplied buffer into which vGPU metadata is written + * @param bufferSize Size of vgpuMetadata buffer + * + * @return + * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned + * - \ref NVML_ERROR_INSUFFICIENT_SIZE vgpuMetadata buffer is too small, required size is returned in \a bufferSize + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a vgpuInstance is 0; if \a vgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t *vgpuMetadata, unsigned int *bufferSize); + +/** + * Returns a vGPU metadata structure for the physical GPU indicated by \a device. The structure contains information about + * the GPU and the currently installed NVIDIA host driver version that's controlling it, together with an opaque data section + * containing internal state. + * + * The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the \a pgpuMetadata + * structure is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed + * in \a bufferSize. + * + * @param device The identifier of the target device + * @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written + * @param bufferSize Pointer to size of \a pgpuMetadata buffer + * + * @return + * - \ref NVML_SUCCESS GPU metadata structure was successfully returned + * - \ref NVML_ERROR_INSUFFICIENT_SIZE pgpuMetadata buffer is too small, required size is returned in \a bufferSize + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_SUPPORTED vGPU is not supported by the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t *pgpuMetadata, unsigned int *bufferSize); + +/** + * Takes a vGPU instance metadata structure read from \ref nvmlVgpuInstanceGetMetadata(), and a vGPU metadata structure for a + * physical GPU read from \ref nvmlDeviceGetVgpuMetadata(), and returns compatibility information of the vGPU instance and the + * physical GPU. + * + * The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The + * structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility + * with the physical GPU is limited, a limit code indicates the factor limiting compability. + * (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details). + * + * Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to + * boot a given vGPU or associated VM. + * + * @param vgpuMetadata Pointer to caller-supplied vGPU metadata structure + * @param pgpuMetadata Pointer to caller-supplied GPU metadata structure + * @param compatibilityInfo Pointer to caller-supplied buffer to hold compatibility info + * + * @return + * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, nvmlVgpuPgpuMetadata_t *pgpuMetadata, nvmlVgpuPgpuCompatibility_t *compatibilityInfo); + +/** + * Returns the properties of the physical GPU indicated by the device in an ascii-encoded string format. + * + * The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the + * string is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed + * in \a bufferSize. + * + * @param device The identifier of the target device + * @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written + * @param bufferSize Pointer to size of \a pgpuMetadata buffer + * + * @return + * - \ref NVML_SUCCESS GPU metadata structure was successfully returned + * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a pgpuMetadata buffer is too small, required size is returned in \a bufferSize + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the system + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char *pgpuMetadata, unsigned int *bufferSize); + +/* + * Virtual GPU (vGPU) version + * + * The NVIDIA vGPU Manager and the guest drivers are tagged with a range of supported vGPU versions. This determines the range of NVIDIA guest driver versions that + * are compatible for vGPU feature support with a given NVIDIA vGPU Manager. For vGPU feature support, the range of supported versions for the NVIDIA vGPU Manager + * and the guest driver must overlap. Otherwise, the guest driver fails to load in the VM. + * + * When the NVIDIA guest driver loads, either when the VM is booted or when the driver is installed or upgraded, a negotiation occurs between the guest driver + * and the NVIDIA vGPU Manager to select the highest mutually compatible vGPU version. The negotiated vGPU version stays the same across VM migration. + */ + +/** + * Query the ranges of supported vGPU versions. + * + * This function gets the linear range of supported vGPU versions that is preset for the NVIDIA vGPU Manager and the range set by an administrator. + * If the preset range has not been overridden by \ref nvmlSetVgpuVersion, both ranges are the same. + * + * The caller passes pointers to the following \ref nvmlVgpuVersion_t structures, into which the NVIDIA vGPU Manager writes the ranges: + * 1. \a supported structure that represents the preset range of vGPU versions supported by the NVIDIA vGPU Manager. + * 2. \a current structure that represents the range of supported vGPU versions set by an administrator. By default, this range is the same as the preset range. + * + * @param supported Pointer to the structure in which the preset range of vGPU versions supported by the NVIDIA vGPU Manager is written + * @param current Pointer to the structure in which the range of supported vGPU versions set by an administrator is written + * + * @return + * - \ref NVML_SUCCESS The vGPU version range structures were successfully obtained. + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported. + * - \ref NVML_ERROR_INVALID_ARGUMENT The \a supported parameter or the \a current parameter is NULL. + * - \ref NVML_ERROR_UNKNOWN An error occurred while the data was being fetched. + */ +nvmlReturn_t DECLDIR nvmlGetVgpuVersion(nvmlVgpuVersion_t *supported, nvmlVgpuVersion_t *current); + +/** + * Override the preset range of vGPU versions supported by the NVIDIA vGPU Manager with a range set by an administrator. + * + * This function configures the NVIDIA vGPU Manager with a range of supported vGPU versions set by an administrator. This range must be a subset of the + * preset range that the NVIDIA vGPU Manager supports. The custom range set by an administrator takes precedence over the preset range and is advertised to + * the guest VM for negotiating the vGPU version. See \ref nvmlGetVgpuVersion for details of how to query the preset range of versions supported. + * + * This function takes a pointer to vGPU version range structure \ref nvmlVgpuVersion_t as input to override the preset vGPU version range that the NVIDIA vGPU Manager supports. + * + * After host system reboot or driver reload, the range of supported versions reverts to the range that is preset for the NVIDIA vGPU Manager. + * + * @note 1. The range set by the administrator must be a subset of the preset range that the NVIDIA vGPU Manager supports. Otherwise, an error is returned. + * 2. If the range of supported guest driver versions does not overlap the range set by the administrator, the guest driver fails to load. + * 3. If the range of supported guest driver versions overlaps the range set by the administrator, the guest driver will load with a negotiated + * vGPU version that is the maximum value in the overlapping range. + * 4. No VMs must be running on the host when this function is called. If a VM is running on the host, the call to this function fails. + * + * @param vgpuVersion Pointer to a caller-supplied range of supported vGPU versions. + * + * @return + * - \ref NVML_SUCCESS The preset range of supported vGPU versions was successfully overridden. + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported. + * - \ref NVML_ERROR_IN_USE The range was not overridden because a VM is running on the host. + * - \ref NVML_ERROR_INVALID_ARGUMENT The \a vgpuVersion parameter specifies a range that is outside the range supported by the NVIDIA vGPU Manager or if \a vgpuVersion is NULL. + */ +nvmlReturn_t DECLDIR nvmlSetVgpuVersion(nvmlVgpuVersion_t *vgpuVersion); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlUtil vGPU Utilization and Accounting + * This chapter describes operations that are associated with vGPU Utilization and Accounting. + * @{ + */ +/***************************************************************************************************/ + +/** + * Retrieves current utilization for vGPUs on a physical GPU (device). + * + * For Kepler &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running + * on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer + * pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the + * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values + * in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to + * indicate the returned value type. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate + * a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with + * the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample + * structures that were actually written. This may differ from a previously read value as vGPU instances are created or + * destroyed. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values + * @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances + * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is + * NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all + * vGPU instances currently executing on the device + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, + nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, + nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); + +/** + * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). + * + * For Maxwell &tm; or newer fully supported devices. + * + * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on + * vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the + * caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running + * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which + * the samples were recorded. Individual utilization values are returned as "unsigned int" values. + * + * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with + * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance + * count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size + * vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with + * the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the + * buffer is sized for. + * + * On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample + * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active + * in any given sample period. + * + * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 + * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp + * to a timeStamp retrieved from a previous query to read utilization since the previous query. + * + * @param device The identifier for the target device + * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. + * @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances + * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned + + * @return + * - \ref NVML_SUCCESS if utilization samples are successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is + * passed with a non-NULL \a utilizationSamples + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all + * vGPU instances currently executing on the device + * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, + unsigned int *vgpuProcessSamplesCount, + nvmlVgpuProcessUtilizationSample_t *utilizationSamples); +/** + * Queries the state of per process accounting mode on vGPU. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param mode Reference in which to return the current accounting mode + * + * @return + * - \ref NVML_SUCCESS if the mode has been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t *mode); + +/** + * Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes + * returned can be in running or terminated state. + * + * For Maxwell &tm; or newer fully supported devices. + * + * To just query the maximum number of processes that can be queried, call this function with *count = 0 and + * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. + * + * For more details see \ref nvmlVgpuInstanceGetAccountingStats. + * + * @note In case of PID collision some processes might not be accessible before the circular buffer is full. + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param count Reference in which to provide the \a pids array size, and + * to return the number of elements ready to be queried + * @param pids Reference in which to return list of process ids + * + * @return + * - \ref NVML_SUCCESS if pids were successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a count is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to expected value) + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlVgpuInstanceGetAccountingPids + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t vgpuInstance, unsigned int *count, unsigned int *pids); + +/** + * Queries process's accounting stats. + * + * For Maxwell &tm; or newer fully supported devices. + * + * Accounting stats capture GPU utilization and other statistics across the lifetime of a process, and + * can be queried during life time of the process or after its termination. + * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and + * updated to actual running time after its termination. + * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old + * processes. + * + * See \ref nvmlAccountingStats_t for description of each returned metric. + * List of processes that can be queried can be retrieved from \ref nvmlVgpuInstanceGetAccountingPids. + * + * @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode. + * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be + * queried since they don't contribute to GPU utilization. + * @note In case of pid collision stats of only the latest process (that terminated last) will be reported + * + * @param vgpuInstance The identifier of the target vGPU instance + * @param pid Process Id of the target process to query stats for + * @param stats Reference in which to return the process's accounting stats + * + * @return + * - \ref NVML_SUCCESS if stats have been successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a stats is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * or \a stats is not found + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuInstance, unsigned int pid, nvmlAccountingStats_t *stats); + +/** + * Clears accounting information of the vGPU instance that have already terminated. + * + * For Maxwell &tm; or newer fully supported devices. + * Requires root/admin permissions. + * + * @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode. + * @note Only compute and graphics applications stats are reported and can be cleared since monitoring applications + * stats don't contribute to GPU utilization. + * + * @param vgpuInstance The identifier of the target vGPU instance + * + * @return + * - \ref NVML_SUCCESS if accounting information has been cleared + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is invalid + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t vgpuInstance); + +/** + * Query the license information of the vGPU instance. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param licenseInfo Pointer to vGPU license information structure + * + * @return + * - \ref NVML_SUCCESS if information is successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licenseInfo is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlExcludedGpuQueries Excluded GPU Queries + * This chapter describes NVML operations that are associated with excluded GPUs. + * @{ + */ +/***************************************************************************************************/ + +/** + * Excluded GPU device information + **/ +typedef struct nvmlExcludedDeviceInfo_st +{ + nvmlPciInfo_t pciInfo; //!< The PCI information for the excluded GPU + char uuid[NVML_DEVICE_UUID_BUFFER_SIZE]; //!< The ASCII string UUID for the excluded GPU +} nvmlExcludedDeviceInfo_t; + + /** + * Retrieves the number of excluded GPU devices in the system. + * + * For all products. + * + * @param deviceCount Reference in which to return the number of excluded devices + * + * @return + * - \ref NVML_SUCCESS if \a deviceCount has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL + */ +nvmlReturn_t DECLDIR nvmlGetExcludedDeviceCount(unsigned int *deviceCount); + +/** + * Acquire the device information for an excluded GPU device, based on its index. + * + * For all products. + * + * Valid indices are derived from the \a deviceCount returned by + * \ref nvmlGetExcludedDeviceCount(). For example, if \a deviceCount is 2 the valid indices + * are 0 and 1, corresponding to GPU 0 and GPU 1. + * + * @param index The index of the target GPU, >= 0 and < \a deviceCount + * @param info Reference in which to return the device information + * + * @return + * - \ref NVML_SUCCESS if \a device has been set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a info is NULL + * + * @see nvmlGetExcludedDeviceCount + */ +nvmlReturn_t DECLDIR nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlExcludedDeviceInfo_t *info); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlMultiInstanceGPU Multi Instance GPU Management + * This chapter describes NVML operations that are associated with Multi Instance GPU management. + * @{ + */ +/***************************************************************************************************/ + +/** + * Disable Multi Instance GPU mode. + */ +#define NVML_DEVICE_MIG_DISABLE 0x0 + +/** + * Enable Multi Instance GPU mode. + */ +#define NVML_DEVICE_MIG_ENABLE 0x1 + +/** + * GPU instance profiles. + * + * These macros should be passed to \ref nvmlDeviceGetGpuInstanceProfileInfo to retrieve the + * detailed information about a GPU instance such as profile ID, engine counts. + */ +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE 0x0 +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE 0x1 +#define NVML_GPU_INSTANCE_PROFILE_3_SLICE 0x2 +#define NVML_GPU_INSTANCE_PROFILE_4_SLICE 0x3 +#define NVML_GPU_INSTANCE_PROFILE_7_SLICE 0x4 +#define NVML_GPU_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_GPU_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1 0x7 +#define NVML_GPU_INSTANCE_PROFILE_COUNT 0x8 + +typedef struct nvmlGpuInstancePlacement_st +{ + unsigned int start; //!< Index of first occupied memory slice + unsigned int size; //!< Number of memory slices occupied +} nvmlGpuInstancePlacement_t; + +/** + * GPU instance profile information. + */ +typedef struct nvmlGpuInstanceProfileInfo_st +{ + unsigned int id; //!< Unique profile ID within the device + unsigned int isP2pSupported; //!< Peer-to-Peer support + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes +} nvmlGpuInstanceProfileInfo_t; + +/** + * GPU instance profile information (v2). + * + * Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlGpuInstanceProfileInfo_t. + */ +typedef struct nvmlGpuInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the device + unsigned int isP2pSupported; //!< Peer-to-Peer support + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlGpuInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v2_t.version. + */ +#define nvmlGpuInstanceProfileInfo_v2 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 2) + +typedef struct nvmlGpuInstanceInfo_st +{ + nvmlDevice_t device; //!< Parent device + unsigned int id; //!< Unique instance ID within the device + unsigned int profileId; //!< Unique profile ID within the device + nvmlGpuInstancePlacement_t placement; //!< Placement for this instance +} nvmlGpuInstanceInfo_t; + +typedef struct +{ + struct nvmlGpuInstance_st* handle; +} nvmlGpuInstance_t; + +/** + * Compute instance profiles. + * + * These macros should be passed to \ref nvmlGpuInstanceGetComputeInstanceProfileInfo to retrieve the + * detailed information about a compute instance such as profile ID, engine counts + */ +#define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE 0x0 +#define NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE 0x1 +#define NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE 0x2 +#define NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE 0x3 +#define NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE 0x4 +#define NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_COMPUTE_INSTANCE_PROFILE_COUNT 0x7 + +#define NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED 0x0 //!< All the engines except multiprocessors would be shared +#define NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT 0x1 + +typedef struct nvmlComputeInstancePlacement_st +{ + unsigned int start; //!< Index of first occupied compute slice + unsigned int size; //!< Number of compute slices occupied +} nvmlComputeInstancePlacement_t; + +/** + * Compute instance profile information. + */ +typedef struct nvmlComputeInstanceProfileInfo_st +{ + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count +} nvmlComputeInstanceProfileInfo_t; + +/** + * Compute instance profile information (v2). + * + * Version 2 adds the \ref nvmlComputeInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlComputeInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlComputeInstanceProfileInfo_t. + */ +typedef struct nvmlComputeInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlComputeInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlComputeInstanceProfileInfo_v2_t.version. + */ +#define nvmlComputeInstanceProfileInfo_v2 NVML_STRUCT_VERSION(ComputeInstanceProfileInfo, 2) + +typedef struct nvmlComputeInstanceInfo_st +{ + nvmlDevice_t device; //!< Parent device + nvmlGpuInstance_t gpuInstance; //!< Parent GPU instance + unsigned int id; //!< Unique instance ID within the GPU instance + unsigned int profileId; //!< Unique profile ID within the GPU instance + nvmlComputeInstancePlacement_t placement; //!< Placement for this instance within the GPU instance's compute slice range {0, sliceCount} +} nvmlComputeInstanceInfo_t; + +typedef struct +{ + struct nvmlComputeInstance_st* handle; +} nvmlComputeInstance_t; + +/** + * Set MIG mode for the device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root user. + * + * This mode determines whether a GPU instance can be created. + * + * This API may unbind or reset the device to activate the requested mode. Thus, the attributes associated with the + * device, such as minor number, might change. The caller of this API is expected to query such attributes again. + * + * On certain platforms like pass-through virtualization, where reset functionality may not be exposed directly, VM + * reboot is required. \a activationStatus would return \ref NVML_ERROR_RESET_REQUIRED for such cases. + * + * \a activationStatus would return the appropriate error code upon unsuccessful activation. For example, if device + * unbind fails because the device isn't idle, \ref NVML_ERROR_IN_USE would be returned. The caller of this API + * is expected to idle the device and retry setting the \a mode. + * + * @note On Windows, only disabling MIG mode is supported. \a activationStatus would return \ref + * NVML_ERROR_NOT_SUPPORTED as GPU reset is not supported on Windows through this API. + * + * @param device The identifier of the target device + * @param mode The mode to be set, \ref NVML_DEVICE_MIG_DISABLE or + * \ref NVML_DEVICE_MIG_ENABLE + * @param activationStatus The activationStatus status + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device,\a mode or \a activationStatus are invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode, nvmlReturn_t *activationStatus); + +/** + * Get MIG mode for the device. + * + * For Ampere &tm; or newer fully supported devices. + * + * Changing MIG modes may require device unbind or reset. The "pending" MIG mode refers to the target mode following the + * next activation trigger. + * + * @param device The identifier of the target device + * @param currentMode Returns the current mode, \ref NVML_DEVICE_MIG_DISABLE or + * \ref NVML_DEVICE_MIG_ENABLE + * @param pendingMode Returns the pending mode, \ref NVML_DEVICE_MIG_DISABLE or + * \ref NVML_DEVICE_MIG_ENABLE + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a currentMode or \a pendingMode are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support MIG mode + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int *currentMode, unsigned int *pendingMode); + +/** + * Get GPU instance profile information. + * + * Information provided by this API is immutable throughout the lifetime of a MIG mode. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param profile One of the NVML_GPU_INSTANCE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile or \a info are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfo(nvmlDevice_t device, unsigned int profile, + nvmlGpuInstanceProfileInfo_t *info); + +/** + * Versioned wrapper around \ref nvmlDeviceGetGpuInstanceProfileInfo that accepts a versioned + * \ref nvmlGpuInstanceProfileInfo_v2_t or later output structure. + * + * @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the + * appropriate version prior to calling this function. For example: + * \code + * nvmlGpuInstanceProfileInfo_v2_t profileInfo = + * { .version = nvmlGpuInstanceProfileInfo_v2 }; + * nvmlReturn_t result = nvmlDeviceGetGpuInstanceProfileInfoV(device, + * profile, + * &profileInfo); + * \endcode + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param profile One of the NVML_GPU_INSTANCE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a info, or \a info->version are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, unsigned int profile, + nvmlGpuInstanceProfileInfo_v2_t *info); + +/** + * Get GPU instance placements. + * + * A placement represents the location of a GPU instance within a device. This API only returns all the possible + * placements for the given profile. + * A created GPU instance occupies memory slices described by its placement. Creation of new GPU instance will + * fail if there is overlap with the already occupied memory slices. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param placements Returns placements allowed for the profile. Can be NULL to discover number + * of allowed placements for this profile. If non-NULL must be large enough + * to accommodate the placements supported by the profile. + * @param count Returns number of allowed placemenets for the profile. + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstancePlacement_t *placements, + unsigned int *count); + +/** + * Get GPU instance profile capacity. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param count Returns remaining instance count for the profile ID + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t device, unsigned int profileId, + unsigned int *count); + +/** + * Create GPU instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would + * become invalid. The GPU instance must be recreated to acquire a valid handle. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param gpuInstance Returns the GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId or \a gpuInstance are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created + */ +nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstance_t *gpuInstance); + +/** + * Create GPU instance with the specified placement. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would + * become invalid. The GPU instance must be recreated to acquire a valid handle. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param placement The requested placement. See \ref nvmlDeviceGetGpuInstancePossiblePlacements_v2 + * @param gpuInstance Returns the GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId, \a placement or \a gpuInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created + */ +nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t device, unsigned int profileId, + const nvmlGpuInstancePlacement_t *placement, + nvmlGpuInstance_t *gpuInstance); +/** + * Destroy GPU instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_IN_USE If the GPU instance is in use. This error would be returned if processes + * (e.g. CUDA application) or compute instances are active on the + * GPU instance. + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceDestroy(nvmlGpuInstance_t gpuInstance); + +/** + * Get GPU instances for given profile ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param gpuInstances Returns pre-exiting GPU instances, the buffer must be large enough to + * accommodate the instances supported by the profile. + * See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param count The count of returned GPU instances + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId, \a gpuInstances or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstances(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstance_t *gpuInstances, unsigned int *count); + +/** + * Get GPU instances for given instance ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param device The identifier of the target device + * @param id The GPU instance ID + * @param gpuInstance Returns GPU instance + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a id or \a gpuInstance are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_FOUND If the GPU instance is not found. + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceById(nvmlDevice_t device, unsigned int id, nvmlGpuInstance_t *gpuInstance); + +/** + * Get GPU instance information. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The GPU instance handle + * @param info Return GPU instance information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance or \a info are invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlGpuInstanceInfo_t *info); + +/** + * Get compute instance profile information. + * + * Information provided by this API is immutable throughout the lifetime of a MIG mode. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_* + * @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile or \a info are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfo(nvmlGpuInstance_t gpuInstance, unsigned int profile, + unsigned int engProfile, + nvmlComputeInstanceProfileInfo_t *info); + +/** + * Versioned wrapper around \ref nvmlGpuInstanceGetComputeInstanceProfileInfo that accepts a versioned + * \ref nvmlComputeInstanceProfileInfo_v2_t or later output structure. + * + * @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the + * appropriate version prior to calling this function. For example: + * \code + * nvmlComputeInstanceProfileInfo_v2_t profileInfo = + * { .version = nvmlComputeInstanceProfileInfo_v2 }; + * nvmlReturn_t result = nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance, + * profile, + * engProfile, + * &profileInfo); + * \endcode + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_* + * @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile, \a info, or \a info->version are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t gpuInstance, unsigned int profile, + unsigned int engProfile, + nvmlComputeInstanceProfileInfo_v2_t *info); + +/** + * Get compute instance profile capacity. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param count Returns remaining instance count for the profile ID + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a availableCount are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t gpuInstance, + unsigned int profileId, unsigned int *count); + +/** + * Create compute instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed + * explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire + * a valid handle. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param computeInstance Returns the compute instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t gpuInstance, unsigned int profileId, + nvmlComputeInstance_t *computeInstance); + +/** + * Destroy compute instance. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param computeInstance The compute instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance is invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_IN_USE If the compute instance is in use. This error would be returned if + * processes (e.g. CUDA application) are active on the compute instance. + */ +nvmlReturn_t DECLDIR nvmlComputeInstanceDestroy(nvmlComputeInstance_t computeInstance); + +/** + * Get compute instances for given profile ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param computeInstances Returns pre-exiting compute instances, the buffer must be large enough to + * accommodate the instances supported by the profile. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param count The count of returned compute instances + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId, \a computeInstances or \a count + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t gpuInstance, unsigned int profileId, + nvmlComputeInstance_t *computeInstances, unsigned int *count); + +/** + * Get compute instance for given instance ID. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * @param gpuInstance The identifier of the target GPU instance + * @param id The compute instance ID + * @param computeInstance Returns compute instance + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a ID or \a computeInstance are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_NOT_FOUND If the compute instance is not found. + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpuInstance, unsigned int id, + nvmlComputeInstance_t *computeInstance); + +/** + * Get compute instance information. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param computeInstance The compute instance handle + * @param info Return compute instance information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance or \a info are invalid + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); + +/** + * Test if the given handle refers to a MIG device. + * + * A MIG device handle is an NVML abstraction which maps to a MIG compute instance. + * These overloaded references can be used (with some restrictions) interchangeably + * with a GPU device handle to execute queries at a per-compute instance granularity. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device NVML handle to test + * @param isMigDevice True when handle refers to a MIG device + * + * @return + * - \ref NVML_SUCCESS if \a device status was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device handle or \a isMigDevice reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned int *isMigDevice); + +/** + * Get GPU instance ID for the given MIG device handle. + * + * GPU instance IDs are unique per device and remain valid until the GPU instance is destroyed. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target MIG device handle + * @param id GPU instance ID + * + * @return + * - \ref NVML_SUCCESS if instance ID was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int *id); + +/** + * Get compute instance ID for the given MIG device handle. + * + * Compute instance IDs are unique per GPU instance and remain valid until the compute instance + * is destroyed. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target MIG device handle + * @param id Compute instance ID + * + * @return + * - \ref NVML_SUCCESS if instance ID was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a id reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int *id); + +/** + * Get the maximum number of MIG devices that can exist under a given parent NVML device. + * + * Returns zero if MIG is not supported or enabled. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Target device handle + * @param count Count of MIG devices + * + * @return + * - \ref NVML_SUCCESS if \a count was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a count reference is invalid + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int *count); + +/** + * Get MIG device handle for the given index under its parent NVML device. + * + * If the compute instance is destroyed either explicitly or by destroying, + * resetting or unbinding the parent GPU instance or the GPU device itself + * the MIG device handle would remain invalid and must be requested again + * using this API. Handles may be reused and their properties can change in + * the process. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device Reference to the parent GPU device handle + * @param index Index of the MIG device + * @param migDevice Reference to the MIG device handle + * + * @return + * - \ref NVML_SUCCESS if \a migDevice handle was successfully created + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a index or \a migDevice reference is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_NOT_FOUND if no valid MIG device was found at \a index + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, + nvmlDevice_t *migDevice); + +/** + * Get parent device handle from a MIG device handle. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param migDevice MIG device handle + * @param device Device handle + * + * @return + * - \ref NVML_SUCCESS if \a device handle was successfully created + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a migDevice or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t *device); + +/** + * Get the type of the GPU Bus (PCIe, PCI, ...) + * + * @param device The identifier of the target device + * @param type The PCI Bus type + * + * return + * - \ref NVML_SUCCESS if the bus \a type is successfully retreived + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \device is invalid or \type is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t *type); + +/** @} */ + +/** + * NVML API versioning support + */ + +#ifdef NVML_NO_UNVERSIONED_FUNC_DEFS +nvmlReturn_t DECLDIR nvmlInit(void); +nvmlReturn_t DECLDIR nvmlDeviceGetCount(unsigned int *deviceCount); +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device); +nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId(const char *pciBusId, nvmlDevice_t *device); +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo(nvmlDevice_t device, nvmlPciInfo_t *pci); +nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo_v2(nvmlDevice_t device, nvmlPciInfo_t *pci); +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v2(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v3(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu(nvmlPciInfo_t *pciInfo); +nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); +nvmlReturn_t DECLDIR nvmlDeviceGetAttributes(nvmlDevice_t device, nvmlDeviceAttributes_t *attributes); +nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t *placements, unsigned int *count); +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); +#endif // #ifdef NVML_NO_UNVERSIONED_FUNC_DEFS + +#if defined(NVML_NO_UNVERSIONED_FUNC_DEFS) +// We don't define APIs to run new versions if this guard is present so there is +// no need to undef +#elif defined(__NVML_API_VERSION_INTERNAL) +#undef nvmlDeviceGetGraphicsRunningProcesses +#undef nvmlDeviceGetComputeRunningProcesses +#undef nvmlDeviceGetMPSComputeRunningProcesses +#undef nvmlDeviceGetAttributes +#undef nvmlComputeInstanceGetInfo +#undef nvmlEventSetWait +#undef nvmlDeviceGetGridLicensableFeatures +#undef nvmlDeviceRemoveGpu +#undef nvmlDeviceGetNvLinkRemotePciInfo +#undef nvmlDeviceGetPciInfo +#undef nvmlDeviceGetCount +#undef nvmlDeviceGetHandleByIndex +#undef nvmlDeviceGetHandleByPciBusId +#undef nvmlInit +#undef nvmlBlacklistDeviceInfo_t +#undef nvmlGetBlacklistDeviceCount +#undef nvmlGetBlacklistDeviceInfoByIndex +#undef nvmlDeviceGetGpuInstancePossiblePlacements +#undef nvmlVgpuInstanceGetLicenseInfo +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go new file mode 100644 index 0000000..fdf1191 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go @@ -0,0 +1,20 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.ErrorString() +func ErrorString(Result Return) string { + return nvmlErrorString(Result) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go new file mode 100644 index 0000000..424f99b --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go @@ -0,0 +1,81 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.SystemGetDriverVersion() +func SystemGetDriverVersion() (string, Return) { + Version := make([]byte, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + ret := nvmlSystemGetDriverVersion(&Version[0], SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +// nvml.SystemGetNVMLVersion() +func SystemGetNVMLVersion() (string, Return) { + Version := make([]byte, SYSTEM_NVML_VERSION_BUFFER_SIZE) + ret := nvmlSystemGetNVMLVersion(&Version[0], SYSTEM_NVML_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +// nvml.SystemGetCudaDriverVersion() +func SystemGetCudaDriverVersion() (int, Return) { + var CudaDriverVersion int32 + ret := nvmlSystemGetCudaDriverVersion(&CudaDriverVersion) + return int(CudaDriverVersion), ret +} + +// nvml.SystemGetCudaDriverVersion_v2() +func SystemGetCudaDriverVersion_v2() (int, Return) { + var CudaDriverVersion int32 + ret := nvmlSystemGetCudaDriverVersion_v2(&CudaDriverVersion) + return int(CudaDriverVersion), ret +} + +// nvml.SystemGetProcessName() +func SystemGetProcessName(Pid int) (string, Return) { + Name := make([]byte, SYSTEM_PROCESS_NAME_BUFFER_SIZE) + ret := nvmlSystemGetProcessName(uint32(Pid), &Name[0], SYSTEM_PROCESS_NAME_BUFFER_SIZE) + return string(Name[:clen(Name)]), ret +} + +// nvml.SystemGetHicVersion() +func SystemGetHicVersion() ([]HwbcEntry, Return) { + var HwbcCount uint32 = 1 // Will be reduced upon returning + for { + HwbcEntries := make([]HwbcEntry, HwbcCount) + ret := nvmlSystemGetHicVersion(&HwbcCount, &HwbcEntries[0]) + if ret == SUCCESS { + return HwbcEntries[:HwbcCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + HwbcCount *= 2 + } +} + +// nvml.SystemGetTopologyGpuSet() +func SystemGetTopologyGpuSet(CpuNumber int) ([]Device, Return) { + var Count uint32 + ret := nvmlSystemGetTopologyGpuSet(uint32(CpuNumber), &Count, nil) + if ret != SUCCESS { + return nil, ret + } + if Count == 0 { + return []Device{}, ret + } + DeviceArray := make([]Device, Count) + ret = nvmlSystemGetTopologyGpuSet(uint32(CpuNumber), &Count, &DeviceArray[0]) + return DeviceArray, ret +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go new file mode 100644 index 0000000..573c929 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go @@ -0,0 +1,445 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types.go + +package nvml + +import "unsafe" + +type Device struct { + Handle *_Ctype_struct_nvmlDevice_st +} + +type PciInfo struct { + BusIdLegacy [16]int8 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BusId [32]int8 +} + +type EccErrorCounts struct { + L1Cache uint64 + L2Cache uint64 + DeviceMemory uint64 + RegisterFile uint64 +} + +type Utilization struct { + Gpu uint32 + Memory uint32 +} + +type Memory struct { + Total uint64 + Free uint64 + Used uint64 +} + +type Memory_v2 struct { + Version uint32 + Total uint64 + Reserved uint64 + Free uint64 + Used uint64 +} + +type BAR1Memory struct { + Bar1Total uint64 + Bar1Free uint64 + Bar1Used uint64 +} + +type ProcessInfo_v1 struct { + Pid uint32 + UsedGpuMemory uint64 +} + +type ProcessInfo_v2 struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type ProcessInfo struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type DeviceAttributes struct { + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + GpuInstanceSliceCount uint32 + ComputeInstanceSliceCount uint32 + MemorySizeMB uint64 +} + +type RowRemapperHistogramValues struct { + Max uint32 + High uint32 + Partial uint32 + Low uint32 + None uint32 +} + +type NvLinkUtilizationControl struct { + Units uint32 + Pktfilter uint32 +} + +type BridgeChipInfo struct { + Type uint32 + FwVersion uint32 +} + +type BridgeChipHierarchy struct { + BridgeCount uint8 + BridgeChipInfo [128]BridgeChipInfo +} + +const sizeofValue = unsafe.Sizeof([8]byte{}) + +type Value [sizeofValue]byte + +type Sample struct { + TimeStamp uint64 + SampleValue [8]byte +} + +type ViolationTime struct { + ReferenceTime uint64 + ViolationTime uint64 +} + +type ClkMonFaultInfo struct { + ClkApiDomain uint32 + ClkDomainFaultMask uint32 +} + +type ClkMonStatus struct { + BGlobalStatus uint32 + ClkMonListSize uint32 + ClkMonList [32]ClkMonFaultInfo +} + +type VgpuTypeId uint32 + +type VgpuInstance uint32 + +type VgpuInstanceUtilizationSample struct { + VgpuInstance uint32 + TimeStamp uint64 + SmUtil [8]byte + MemUtil [8]byte + EncUtil [8]byte + DecUtil [8]byte +} + +type VgpuProcessUtilizationSample struct { + VgpuInstance uint32 + Pid uint32 + ProcessName [64]int8 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type VgpuLicenseExpiry struct { + Year uint32 + Month uint16 + Day uint16 + Hour uint16 + Min uint16 + Sec uint16 + Status uint8 + Pad_cgo_0 [1]byte +} + +type VgpuLicenseInfo struct { + IsLicensed uint8 + LicenseExpiry VgpuLicenseExpiry + CurrentState uint32 +} + +type ProcessUtilizationSample struct { + Pid uint32 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type GridLicenseExpiry struct { + Year uint32 + Month uint16 + Day uint16 + Hour uint16 + Min uint16 + Sec uint16 + Status uint8 + Pad_cgo_0 [1]byte +} + +type GridLicensableFeature struct { + FeatureCode uint32 + FeatureState uint32 + LicenseInfo [128]int8 + ProductName [128]int8 + FeatureEnabled uint32 + LicenseExpiry GridLicenseExpiry +} + +type GridLicensableFeatures struct { + IsGridLicenseSupported int32 + LicensableFeaturesCount uint32 + GridLicensableFeatures [3]GridLicensableFeature +} + +type DeviceArchitecture uint32 + +type BusType uint32 + +type PowerSource uint32 + +type FieldValue struct { + FieldId uint32 + ScopeId uint32 + Timestamp int64 + LatencyUsec int64 + ValueType uint32 + NvmlReturn uint32 + Value [8]byte +} + +type Unit struct { + Handle *_Ctype_struct_nvmlUnit_st +} + +type HwbcEntry struct { + HwbcId uint32 + FirmwareVersion [32]int8 +} + +type LedState struct { + Cause [256]int8 + Color uint32 +} + +type UnitInfo struct { + Name [96]int8 + Id [96]int8 + Serial [96]int8 + FirmwareVersion [96]int8 +} + +type PSUInfo struct { + State [256]int8 + Current uint32 + Voltage uint32 + Power uint32 +} + +type UnitFanInfo struct { + Speed uint32 + State uint32 +} + +type UnitFanSpeeds struct { + Fans [24]UnitFanInfo + Count uint32 +} + +type EventSet struct { + Handle *_Ctype_struct_nvmlEventSet_st +} + +type EventData struct { + Device Device + EventType uint64 + EventData uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type AccountingStats struct { + GpuUtilization uint32 + MemoryUtilization uint32 + MaxMemoryUsage uint64 + Time uint64 + StartTime uint64 + IsRunning uint32 + Reserved [5]uint32 +} + +type EncoderSessionInfo struct { + SessionId uint32 + Pid uint32 + VgpuInstance uint32 + CodecType uint32 + HResolution uint32 + VResolution uint32 + AverageFps uint32 + AverageLatency uint32 +} + +type FBCStats struct { + SessionsCount uint32 + AverageFPS uint32 + AverageLatency uint32 +} + +type FBCSessionInfo struct { + SessionId uint32 + Pid uint32 + VgpuInstance uint32 + DisplayOrdinal uint32 + SessionType uint32 + SessionFlags uint32 + HMaxResolution uint32 + VMaxResolution uint32 + HResolution uint32 + VResolution uint32 + AverageFPS uint32 + AverageLatency uint32 +} + +type AffinityScope uint32 + +type VgpuVersion struct { + MinVersion uint32 + MaxVersion uint32 +} + +type nvmlVgpuMetadata struct { + Version uint32 + Revision uint32 + GuestInfoState uint32 + GuestDriverVersion [80]int8 + HostDriverVersion [80]int8 + Reserved [6]uint32 + VgpuVirtualizationCaps uint32 + GuestVgpuVersion uint32 + OpaqueDataSize uint32 + OpaqueData [4]int8 +} + +type nvmlVgpuPgpuMetadata struct { + Version uint32 + Revision uint32 + HostDriverVersion [80]int8 + PgpuVirtualizationCaps uint32 + Reserved [5]uint32 + HostSupportedVgpuRange VgpuVersion + OpaqueDataSize uint32 + OpaqueData [4]int8 +} + +type VgpuPgpuCompatibility struct { + VgpuVmCompatibility uint32 + CompatibilityLimitCode uint32 +} + +type ExcludedDeviceInfo struct { + PciInfo PciInfo + Uuid [80]int8 +} + +type GpuInstancePlacement struct { + Start uint32 + Size uint32 +} + +type GpuInstanceProfileInfo struct { + Id uint32 + IsP2pSupported uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 +} + +type GpuInstanceProfileInfo_v2 struct { + Version uint32 + Id uint32 + IsP2pSupported uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 + Name [96]int8 +} + +type GpuInstanceInfo struct { + Device Device + Id uint32 + ProfileId uint32 + Placement GpuInstancePlacement +} + +type GpuInstance struct { + Handle *_Ctype_struct_nvmlGpuInstance_st +} + +type ComputeInstancePlacement struct { + Start uint32 + Size uint32 +} + +type ComputeInstanceProfileInfo struct { + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 +} + +type ComputeInstanceProfileInfo_v2 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + Name [96]int8 +} + +type ComputeInstanceInfo struct { + Device Device + GpuInstance GpuInstance + Id uint32 + ProfileId uint32 + Placement ComputeInstancePlacement +} + +type ComputeInstance struct { + Handle *_Ctype_struct_nvmlComputeInstance_st +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go new file mode 100644 index 0000000..aba916a --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go @@ -0,0 +1,113 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.UnitGetCount() +func UnitGetCount() (int, Return) { + var UnitCount uint32 + ret := nvmlUnitGetCount(&UnitCount) + return int(UnitCount), ret +} + +// nvml.UnitGetHandleByIndex() +func UnitGetHandleByIndex(Index int) (Unit, Return) { + var Unit Unit + ret := nvmlUnitGetHandleByIndex(uint32(Index), &Unit) + return Unit, ret +} + +// nvml.UnitGetUnitInfo() +func UnitGetUnitInfo(Unit Unit) (UnitInfo, Return) { + var Info UnitInfo + ret := nvmlUnitGetUnitInfo(Unit, &Info) + return Info, ret +} + +func (Unit Unit) GetUnitInfo() (UnitInfo, Return) { + return UnitGetUnitInfo(Unit) +} + +// nvml.UnitGetLedState() +func UnitGetLedState(Unit Unit) (LedState, Return) { + var State LedState + ret := nvmlUnitGetLedState(Unit, &State) + return State, ret +} + +func (Unit Unit) GetLedState() (LedState, Return) { + return UnitGetLedState(Unit) +} + +// nvml.UnitGetPsuInfo() +func UnitGetPsuInfo(Unit Unit) (PSUInfo, Return) { + var Psu PSUInfo + ret := nvmlUnitGetPsuInfo(Unit, &Psu) + return Psu, ret +} + +func (Unit Unit) GetPsuInfo() (PSUInfo, Return) { + return UnitGetPsuInfo(Unit) +} + +// nvml.UnitGetTemperature() +func UnitGetTemperature(Unit Unit, Type int) (uint32, Return) { + var Temp uint32 + ret := nvmlUnitGetTemperature(Unit, uint32(Type), &Temp) + return Temp, ret +} + +func (Unit Unit) GetTemperature(Type int) (uint32, Return) { + return UnitGetTemperature(Unit, Type) +} + +// nvml.UnitGetFanSpeedInfo() +func UnitGetFanSpeedInfo(Unit Unit) (UnitFanSpeeds, Return) { + var FanSpeeds UnitFanSpeeds + ret := nvmlUnitGetFanSpeedInfo(Unit, &FanSpeeds) + return FanSpeeds, ret +} + +func (Unit Unit) GetFanSpeedInfo() (UnitFanSpeeds, Return) { + return UnitGetFanSpeedInfo(Unit) +} + +// nvml.UnitGetDevices() +func UnitGetDevices(Unit Unit) ([]Device, Return) { + var DeviceCount uint32 = 1 // Will be reduced upon returning + for { + Devices := make([]Device, DeviceCount) + ret := nvmlUnitGetDevices(Unit, &DeviceCount, &Devices[0]) + if ret == SUCCESS { + return Devices[:DeviceCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + DeviceCount *= 2 + } +} + +func (Unit Unit) GetDevices() ([]Device, Return) { + return UnitGetDevices(Unit) +} + +// nvml.UnitSetLedState() +func UnitSetLedState(Unit Unit, Color LedColor) Return { + return nvmlUnitSetLedState(Unit, Color) +} + +func (Unit Unit) SetLedState(Color LedColor) Return { + return UnitSetLedState(Unit, Color) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go new file mode 100644 index 0000000..a9f9f24 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go @@ -0,0 +1,462 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +// nvml.VgpuMetadata +type VgpuMetadata struct { + nvmlVgpuMetadata + OpaqueData []byte +} + +// nvml.VgpuPgpuMetadata +type VgpuPgpuMetadata struct { + nvmlVgpuPgpuMetadata + OpaqueData []byte +} + +// nvml.VgpuTypeGetClass() +func VgpuTypeGetClass(VgpuTypeId VgpuTypeId) (string, Return) { + var Size uint32 = DEVICE_NAME_BUFFER_SIZE + VgpuTypeClass := make([]byte, DEVICE_NAME_BUFFER_SIZE) + ret := nvmlVgpuTypeGetClass(VgpuTypeId, &VgpuTypeClass[0], &Size) + return string(VgpuTypeClass[:clen(VgpuTypeClass)]), ret +} + +func (VgpuTypeId VgpuTypeId) GetClass() (string, Return) { + return VgpuTypeGetClass(VgpuTypeId) +} + +// nvml.VgpuTypeGetName() +func VgpuTypeGetName(VgpuTypeId VgpuTypeId) (string, Return) { + var Size uint32 = DEVICE_NAME_BUFFER_SIZE + VgpuTypeName := make([]byte, DEVICE_NAME_BUFFER_SIZE) + ret := nvmlVgpuTypeGetName(VgpuTypeId, &VgpuTypeName[0], &Size) + return string(VgpuTypeName[:clen(VgpuTypeName)]), ret +} + +func (VgpuTypeId VgpuTypeId) GetName() (string, Return) { + return VgpuTypeGetName(VgpuTypeId) +} + +// nvml.VgpuTypeGetGpuInstanceProfileId() +func VgpuTypeGetGpuInstanceProfileId(VgpuTypeId VgpuTypeId) (uint32, Return) { + var Size uint32 + ret := nvmlVgpuTypeGetGpuInstanceProfileId(VgpuTypeId, &Size) + return Size, ret +} + +func (VgpuTypeId VgpuTypeId) GetGpuInstanceProfileId() (uint32, Return) { + return VgpuTypeGetGpuInstanceProfileId(VgpuTypeId) +} + +// nvml.VgpuTypeGetDeviceID() +func VgpuTypeGetDeviceID(VgpuTypeId VgpuTypeId) (uint64, uint64, Return) { + var DeviceID, SubsystemID uint64 + ret := nvmlVgpuTypeGetDeviceID(VgpuTypeId, &DeviceID, &SubsystemID) + return DeviceID, SubsystemID, ret +} + +func (VgpuTypeId VgpuTypeId) GetDeviceID() (uint64, uint64, Return) { + return VgpuTypeGetDeviceID(VgpuTypeId) +} + +// nvml.VgpuTypeGetFramebufferSize() +func VgpuTypeGetFramebufferSize(VgpuTypeId VgpuTypeId) (uint64, Return) { + var FbSize uint64 + ret := nvmlVgpuTypeGetFramebufferSize(VgpuTypeId, &FbSize) + return FbSize, ret +} + +func (VgpuTypeId VgpuTypeId) GetFramebufferSize() (uint64, Return) { + return VgpuTypeGetFramebufferSize(VgpuTypeId) +} + +// nvml.VgpuTypeGetNumDisplayHeads() +func VgpuTypeGetNumDisplayHeads(VgpuTypeId VgpuTypeId) (int, Return) { + var NumDisplayHeads uint32 + ret := nvmlVgpuTypeGetNumDisplayHeads(VgpuTypeId, &NumDisplayHeads) + return int(NumDisplayHeads), ret +} + +func (VgpuTypeId VgpuTypeId) GetNumDisplayHeads() (int, Return) { + return VgpuTypeGetNumDisplayHeads(VgpuTypeId) +} + +// nvml.VgpuTypeGetResolution() +func VgpuTypeGetResolution(VgpuTypeId VgpuTypeId, DisplayIndex int) (uint32, uint32, Return) { + var Xdim, Ydim uint32 + ret := nvmlVgpuTypeGetResolution(VgpuTypeId, uint32(DisplayIndex), &Xdim, &Ydim) + return Xdim, Ydim, ret +} + +func (VgpuTypeId VgpuTypeId) GetResolution(DisplayIndex int) (uint32, uint32, Return) { + return VgpuTypeGetResolution(VgpuTypeId, DisplayIndex) +} + +// nvml.VgpuTypeGetLicense() +func VgpuTypeGetLicense(VgpuTypeId VgpuTypeId) (string, Return) { + VgpuTypeLicenseString := make([]byte, GRID_LICENSE_BUFFER_SIZE) + ret := nvmlVgpuTypeGetLicense(VgpuTypeId, &VgpuTypeLicenseString[0], GRID_LICENSE_BUFFER_SIZE) + return string(VgpuTypeLicenseString[:clen(VgpuTypeLicenseString)]), ret +} + +func (VgpuTypeId VgpuTypeId) GetLicense() (string, Return) { + return VgpuTypeGetLicense(VgpuTypeId) +} + +// nvml.VgpuTypeGetFrameRateLimit() +func VgpuTypeGetFrameRateLimit(VgpuTypeId VgpuTypeId) (uint32, Return) { + var FrameRateLimit uint32 + ret := nvmlVgpuTypeGetFrameRateLimit(VgpuTypeId, &FrameRateLimit) + return FrameRateLimit, ret +} + +func (VgpuTypeId VgpuTypeId) GetFrameRateLimit() (uint32, Return) { + return VgpuTypeGetFrameRateLimit(VgpuTypeId) +} + +// nvml.VgpuTypeGetMaxInstances() +func VgpuTypeGetMaxInstances(Device Device, VgpuTypeId VgpuTypeId) (int, Return) { + var VgpuInstanceCount uint32 + ret := nvmlVgpuTypeGetMaxInstances(Device, VgpuTypeId, &VgpuInstanceCount) + return int(VgpuInstanceCount), ret +} + +func (Device Device) VgpuTypeGetMaxInstances(VgpuTypeId VgpuTypeId) (int, Return) { + return VgpuTypeGetMaxInstances(Device, VgpuTypeId) +} + +func (VgpuTypeId VgpuTypeId) GetMaxInstances(Device Device) (int, Return) { + return VgpuTypeGetMaxInstances(Device, VgpuTypeId) +} + +// nvml.VgpuTypeGetMaxInstancesPerVm() +func VgpuTypeGetMaxInstancesPerVm(VgpuTypeId VgpuTypeId) (int, Return) { + var VgpuInstanceCountPerVm uint32 + ret := nvmlVgpuTypeGetMaxInstancesPerVm(VgpuTypeId, &VgpuInstanceCountPerVm) + return int(VgpuInstanceCountPerVm), ret +} + +func (VgpuTypeId VgpuTypeId) GetMaxInstancesPerVm() (int, Return) { + return VgpuTypeGetMaxInstancesPerVm(VgpuTypeId) +} + +// nvml.VgpuInstanceGetVmID() +func VgpuInstanceGetVmID(VgpuInstance VgpuInstance) (string, VgpuVmIdType, Return) { + var VmIdType VgpuVmIdType + VmId := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetVmID(VgpuInstance, &VmId[0], DEVICE_UUID_BUFFER_SIZE, &VmIdType) + return string(VmId[:clen(VmId)]), VmIdType, ret +} + +func (VgpuInstance VgpuInstance) GetVmID() (string, VgpuVmIdType, Return) { + return VgpuInstanceGetVmID(VgpuInstance) +} + +// nvml.VgpuInstanceGetUUID() +func VgpuInstanceGetUUID(VgpuInstance VgpuInstance) (string, Return) { + Uuid := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetUUID(VgpuInstance, &Uuid[0], DEVICE_UUID_BUFFER_SIZE) + return string(Uuid[:clen(Uuid)]), ret +} + +func (VgpuInstance VgpuInstance) GetUUID() (string, Return) { + return VgpuInstanceGetUUID(VgpuInstance) +} + +// nvml.VgpuInstanceGetVmDriverVersion() +func VgpuInstanceGetVmDriverVersion(VgpuInstance VgpuInstance) (string, Return) { + Version := make([]byte, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetVmDriverVersion(VgpuInstance, &Version[0], SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (VgpuInstance VgpuInstance) GetVmDriverVersion() (string, Return) { + return VgpuInstanceGetVmDriverVersion(VgpuInstance) +} + +// nvml.VgpuInstanceGetFbUsage() +func VgpuInstanceGetFbUsage(VgpuInstance VgpuInstance) (uint64, Return) { + var FbUsage uint64 + ret := nvmlVgpuInstanceGetFbUsage(VgpuInstance, &FbUsage) + return FbUsage, ret +} + +func (VgpuInstance VgpuInstance) GetFbUsage() (uint64, Return) { + return VgpuInstanceGetFbUsage(VgpuInstance) +} + +// nvml.VgpuInstanceGetLicenseInfo() +func VgpuInstanceGetLicenseInfo(VgpuInstance VgpuInstance) (VgpuLicenseInfo, Return) { + var LicenseInfo VgpuLicenseInfo + ret := nvmlVgpuInstanceGetLicenseInfo(VgpuInstance, &LicenseInfo) + return LicenseInfo, ret +} + +func (VgpuInstance VgpuInstance) GetLicenseInfo() (VgpuLicenseInfo, Return) { + return VgpuInstanceGetLicenseInfo(VgpuInstance) +} + +// nvml.VgpuInstanceGetLicenseStatus() +func VgpuInstanceGetLicenseStatus(VgpuInstance VgpuInstance) (int, Return) { + var Licensed uint32 + ret := nvmlVgpuInstanceGetLicenseStatus(VgpuInstance, &Licensed) + return int(Licensed), ret +} + +func (VgpuInstance VgpuInstance) GetLicenseStatus() (int, Return) { + return VgpuInstanceGetLicenseStatus(VgpuInstance) +} + +// nvml.VgpuInstanceGetType() +func VgpuInstanceGetType(VgpuInstance VgpuInstance) (VgpuTypeId, Return) { + var VgpuTypeId VgpuTypeId + ret := nvmlVgpuInstanceGetType(VgpuInstance, &VgpuTypeId) + return VgpuTypeId, ret +} + +func (VgpuInstance VgpuInstance) GetType() (VgpuTypeId, Return) { + return VgpuInstanceGetType(VgpuInstance) +} + +// nvml.VgpuInstanceGetFrameRateLimit() +func VgpuInstanceGetFrameRateLimit(VgpuInstance VgpuInstance) (uint32, Return) { + var FrameRateLimit uint32 + ret := nvmlVgpuInstanceGetFrameRateLimit(VgpuInstance, &FrameRateLimit) + return FrameRateLimit, ret +} + +func (VgpuInstance VgpuInstance) GetFrameRateLimit() (uint32, Return) { + return VgpuInstanceGetFrameRateLimit(VgpuInstance) +} + +// nvml.VgpuInstanceGetEccMode() +func VgpuInstanceGetEccMode(VgpuInstance VgpuInstance) (EnableState, Return) { + var EccMode EnableState + ret := nvmlVgpuInstanceGetEccMode(VgpuInstance, &EccMode) + return EccMode, ret +} + +func (VgpuInstance VgpuInstance) GetEccMode() (EnableState, Return) { + return VgpuInstanceGetEccMode(VgpuInstance) +} + +// nvml.VgpuInstanceGetEncoderCapacity() +func VgpuInstanceGetEncoderCapacity(VgpuInstance VgpuInstance) (int, Return) { + var EncoderCapacity uint32 + ret := nvmlVgpuInstanceGetEncoderCapacity(VgpuInstance, &EncoderCapacity) + return int(EncoderCapacity), ret +} + +func (VgpuInstance VgpuInstance) GetEncoderCapacity() (int, Return) { + return VgpuInstanceGetEncoderCapacity(VgpuInstance) +} + +// nvml.VgpuInstanceSetEncoderCapacity() +func VgpuInstanceSetEncoderCapacity(VgpuInstance VgpuInstance, EncoderCapacity int) Return { + return nvmlVgpuInstanceSetEncoderCapacity(VgpuInstance, uint32(EncoderCapacity)) +} + +func (VgpuInstance VgpuInstance) SetEncoderCapacity(EncoderCapacity int) Return { + return VgpuInstanceSetEncoderCapacity(VgpuInstance, EncoderCapacity) +} + +// nvml.VgpuInstanceGetEncoderStats() +func VgpuInstanceGetEncoderStats(VgpuInstance VgpuInstance) (int, uint32, uint32, Return) { + var SessionCount, AverageFps, AverageLatency uint32 + ret := nvmlVgpuInstanceGetEncoderStats(VgpuInstance, &SessionCount, &AverageFps, &AverageLatency) + return int(SessionCount), AverageFps, AverageLatency, ret +} + +func (VgpuInstance VgpuInstance) GetEncoderStats() (int, uint32, uint32, Return) { + return VgpuInstanceGetEncoderStats(VgpuInstance) +} + +// nvml.VgpuInstanceGetEncoderSessions() +func VgpuInstanceGetEncoderSessions(VgpuInstance VgpuInstance) (int, EncoderSessionInfo, Return) { + var SessionCount uint32 + var SessionInfo EncoderSessionInfo + ret := nvmlVgpuInstanceGetEncoderSessions(VgpuInstance, &SessionCount, &SessionInfo) + return int(SessionCount), SessionInfo, ret +} + +func (VgpuInstance VgpuInstance) GetEncoderSessions() (int, EncoderSessionInfo, Return) { + return VgpuInstanceGetEncoderSessions(VgpuInstance) +} + +// nvml.VgpuInstanceGetFBCStats() +func VgpuInstanceGetFBCStats(VgpuInstance VgpuInstance) (FBCStats, Return) { + var FbcStats FBCStats + ret := nvmlVgpuInstanceGetFBCStats(VgpuInstance, &FbcStats) + return FbcStats, ret +} + +func (VgpuInstance VgpuInstance) GetFBCStats() (FBCStats, Return) { + return VgpuInstanceGetFBCStats(VgpuInstance) +} + +// nvml.VgpuInstanceGetFBCSessions() +func VgpuInstanceGetFBCSessions(VgpuInstance VgpuInstance) (int, FBCSessionInfo, Return) { + var SessionCount uint32 + var SessionInfo FBCSessionInfo + ret := nvmlVgpuInstanceGetFBCSessions(VgpuInstance, &SessionCount, &SessionInfo) + return int(SessionCount), SessionInfo, ret +} + +func (VgpuInstance VgpuInstance) GetFBCSessions() (int, FBCSessionInfo, Return) { + return VgpuInstanceGetFBCSessions(VgpuInstance) +} + +// nvml.VgpuInstanceGetGpuInstanceId() +func VgpuInstanceGetGpuInstanceId(VgpuInstance VgpuInstance) (int, Return) { + var gpuInstanceId uint32 + ret := nvmlVgpuInstanceGetGpuInstanceId(VgpuInstance, &gpuInstanceId) + return int(gpuInstanceId), ret +} + +func (VgpuInstance VgpuInstance) GetGpuInstanceId() (int, Return) { + return VgpuInstanceGetGpuInstanceId(VgpuInstance) +} + +// nvml.VgpuInstanceGetGpuPciId() +func VgpuInstanceGetGpuPciId(VgpuInstance VgpuInstance) (string, Return) { + var Length uint32 = 1 // Will be reduced upon returning + for { + VgpuPciId := make([]byte, Length) + ret := nvmlVgpuInstanceGetGpuPciId(VgpuInstance, &VgpuPciId[0], &Length) + if ret == SUCCESS { + return string(VgpuPciId[:clen(VgpuPciId)]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return "", ret + } + Length *= 2 + } +} + +func (VgpuInstance VgpuInstance) GetGpuPciId() (string, Return) { + return VgpuInstanceGetGpuPciId(VgpuInstance) +} + +// nvml.VgpuInstanceGetMetadata() +func VgpuInstanceGetMetadata(VgpuInstance VgpuInstance) (VgpuMetadata, Return) { + var VgpuMetadata VgpuMetadata + OpaqueDataSize := unsafe.Sizeof(VgpuMetadata.nvmlVgpuMetadata.OpaqueData) + VgpuMetadataSize := unsafe.Sizeof(VgpuMetadata.nvmlVgpuMetadata) - OpaqueDataSize + for { + BufferSize := uint32(VgpuMetadataSize + OpaqueDataSize) + Buffer := make([]byte, BufferSize) + nvmlVgpuMetadataPtr := (*nvmlVgpuMetadata)(unsafe.Pointer(&Buffer[0])) + ret := nvmlVgpuInstanceGetMetadata(VgpuInstance, nvmlVgpuMetadataPtr, &BufferSize) + if ret == SUCCESS { + VgpuMetadata.nvmlVgpuMetadata = *nvmlVgpuMetadataPtr + VgpuMetadata.OpaqueData = Buffer[VgpuMetadataSize:BufferSize] + return VgpuMetadata, ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return VgpuMetadata, ret + } + OpaqueDataSize = 2 * OpaqueDataSize + } +} + +func (VgpuInstance VgpuInstance) GetMetadata() (VgpuMetadata, Return) { + return VgpuInstanceGetMetadata(VgpuInstance) +} + +// nvml.VgpuInstanceGetAccountingMode() +func VgpuInstanceGetAccountingMode(VgpuInstance VgpuInstance) (EnableState, Return) { + var Mode EnableState + ret := nvmlVgpuInstanceGetAccountingMode(VgpuInstance, &Mode) + return Mode, ret +} + +func (VgpuInstance VgpuInstance) GetAccountingMode() (EnableState, Return) { + return VgpuInstanceGetAccountingMode(VgpuInstance) +} + +// nvml.VgpuInstanceGetAccountingPids() +func VgpuInstanceGetAccountingPids(VgpuInstance VgpuInstance) ([]int, Return) { + var Count uint32 = 1 // Will be reduced upon returning + for { + Pids := make([]uint32, Count) + ret := nvmlVgpuInstanceGetAccountingPids(VgpuInstance, &Count, &Pids[0]) + if ret == SUCCESS { + return uint32SliceToIntSlice(Pids[:Count]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + Count *= 2 + } +} + +func (VgpuInstance VgpuInstance) GetAccountingPids() ([]int, Return) { + return VgpuInstanceGetAccountingPids(VgpuInstance) +} + +// nvml.VgpuInstanceGetAccountingStats() +func VgpuInstanceGetAccountingStats(VgpuInstance VgpuInstance, Pid int) (AccountingStats, Return) { + var Stats AccountingStats + ret := nvmlVgpuInstanceGetAccountingStats(VgpuInstance, uint32(Pid), &Stats) + return Stats, ret +} + +func (VgpuInstance VgpuInstance) GetAccountingStats(Pid int) (AccountingStats, Return) { + return VgpuInstanceGetAccountingStats(VgpuInstance, Pid) +} + +// nvml.GetVgpuCompatibility() +func GetVgpuCompatibility(nvmlVgpuMetadata *nvmlVgpuMetadata, PgpuMetadata *nvmlVgpuPgpuMetadata) (VgpuPgpuCompatibility, Return) { + var CompatibilityInfo VgpuPgpuCompatibility + ret := nvmlGetVgpuCompatibility(nvmlVgpuMetadata, PgpuMetadata, &CompatibilityInfo) + return CompatibilityInfo, ret +} + +// nvml.GetVgpuVersion() +func GetVgpuVersion() (VgpuVersion, VgpuVersion, Return) { + var Supported, Current VgpuVersion + ret := nvmlGetVgpuVersion(&Supported, &Current) + return Supported, Current, ret +} + +// nvml.SetVgpuVersion() +func SetVgpuVersion(VgpuVersion *VgpuVersion) Return { + return SetVgpuVersion(VgpuVersion) +} + +// nvml.VgpuInstanceClearAccountingPids() +func VgpuInstanceClearAccountingPids(VgpuInstance VgpuInstance) Return { + return nvmlVgpuInstanceClearAccountingPids(VgpuInstance) +} + +func (VgpuInstance VgpuInstance) ClearAccountingPids() Return { + return VgpuInstanceClearAccountingPids(VgpuInstance) +} + +// nvml.VgpuInstanceGetMdevUUID() +func VgpuInstanceGetMdevUUID(VgpuInstance VgpuInstance) (string, Return) { + MdevUuid := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetMdevUUID(VgpuInstance, &MdevUuid[0], DEVICE_UUID_BUFFER_SIZE) + return string(MdevUuid[:clen(MdevUuid)]), ret +} + +func (VgpuInstance VgpuInstance) GetMdevUUID() (string, Return) { + return VgpuInstanceGetMdevUUID(VgpuInstance) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3f89125..a194258 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,7 @@ # github.com/NVIDIA/go-nvml v0.11.6-0.0.20220614115128-31f8b89eb740 ## explicit github.com/NVIDIA/go-nvml/pkg/dl +github.com/NVIDIA/go-nvml/pkg/nvml # github.com/davecgh/go-spew v1.1.0 github.com/davecgh/go-spew/spew # github.com/pmezard/go-difflib v1.0.0