-
Notifications
You must be signed in to change notification settings - Fork 13
/
options.go
135 lines (122 loc) · 4.79 KB
/
options.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
package hugot
type ortOptions struct {
libraryPath string
telemetry bool
intraOpNumThreads int
interOpNumThreads int
cpuMemArena bool
cpuMemArenaSet bool
memPattern bool
memPatternSet bool
cudaOptions map[string]string
cudaOptionsSet bool
coreMLOptions uint32
coreMLOptionsSet bool
directMLOptions int
directMLOptionsSet bool
openVINOOptions map[string]string
openVINOOptionsSet bool
tensorRTOptions map[string]string
tensorRTOptionsSet bool
}
// WithOption is the interface for all option functions
type WithOption func(o *ortOptions)
// WithOnnxLibraryPath Use this function to set the path to the "onnxruntime.so" or "onnxruntime.dll" function.
// By default, it will be set to "onnxruntime.so" on non-Windows systems, and "onnxruntime.dll" on Windows.
func WithOnnxLibraryPath(ortLibraryPath string) WithOption {
return func(o *ortOptions) {
o.libraryPath = ortLibraryPath
}
}
// WithTelemetry Enables telemetry events for the onnxruntime environment. Default is off.
func WithTelemetry() WithOption {
return func(o *ortOptions) {
o.telemetry = true
}
}
// WithIntraOpNumThreads Sets the number of threads used to parallelize execution within onnxruntime
// graph nodes. If unspecified, onnxruntime uses the number of physical CPU cores.
func WithIntraOpNumThreads(numThreads int) WithOption {
return func(o *ortOptions) {
o.intraOpNumThreads = numThreads
}
}
// WithInterOpNumThreads Sets the number of threads used to parallelize execution across separate
// onnxruntime graph nodes. If unspecified, onnxruntime uses the number of physical CPU cores.
func WithInterOpNumThreads(numThreads int) WithOption {
return func(o *ortOptions) {
o.interOpNumThreads = numThreads
}
}
// WithCpuMemArena Enable/Disable the usage of the memory arena on CPU.
// Arena may pre-allocate memory for future usage. Default is true.
func WithCpuMemArena(enable bool) WithOption {
return func(o *ortOptions) {
o.cpuMemArena = enable
o.cpuMemArenaSet = true
}
}
// WithMemPattern Enable/Disable the memory pattern optimization.
// If this is enabled memory is preallocated if all shapes are known. Default is true.
func WithMemPattern(enable bool) WithOption {
return func(o *ortOptions) {
o.memPattern = enable
o.memPatternSet = true
}
}
// WithCuda Use this function to set the options for CUDA provider.
// It takes a pointer to an instance of CUDAProviderOptions struct as input.
// The options will be applied to the ortOptions struct and the cudaOptionsSet flag will be set to true.
func WithCuda(options map[string]string) WithOption {
return func(o *ortOptions) {
o.cudaOptions = options
o.cudaOptionsSet = true
}
}
// WithCoreML Use this function to set the CoreML options flags for the ONNX Runtime configuration.
// The `flags` parameter represents the CoreML options flags.
// The `o.coreMLOptions` field in `ortOptions` struct will be set to the provided flags parameter.
// The `o.coreMLOptionsSet` field in `ortOptions` struct will be set to true.
func WithCoreML(flags uint32) WithOption {
return func(o *ortOptions) {
o.coreMLOptions = flags
o.coreMLOptionsSet = true
}
}
// WithDirectML Use this function to set the DirectML device ID for the
// onnxruntime. By default, this option is not set.
func WithDirectML(deviceID int) WithOption {
return func(o *ortOptions) {
o.directMLOptions = deviceID
o.directMLOptionsSet = true
}
}
// WithOpenVINO Use this function to set the OpenVINO options for the OpenVINO execution provider.
// The options parameter should be a map of string keys and string values, representing the configuration options.
// For each key-value pair in the map, the specified option will be set in the OpenVINO execution provider.
// Once the options are set, the openVINOOptionsSet flag in the ortOptions struct will be set to true.
// Example usage: WithOpenVINO(map[string]string{"device_type": "CPU", "num_threads": "4"})
// This will configure the OpenVINO execution provider to use CPU as the device type and set the number of threads to 4.
func WithOpenVINO(options map[string]string) WithOption {
return func(o *ortOptions) {
o.openVINOOptions = options
o.openVINOOptionsSet = true
}
}
// WithTensorRT Use this function to set the options for the TensorRT provider.
// The options parameter should be a pointer to an instance of TensorRTProviderOptions.
// By default, the options will be nil and the TensorRT provider will not be used.
// Example usage:
//
// options := &onnxruntime_go.TensorRTProviderOptions{
// DeviceID: 0,
// }
// WithTensorRT(options)
//
// Note: For the TensorRT provider to work, the onnxruntime library must be built with TensorRT support.
func WithTensorRT(options map[string]string) WithOption {
return func(o *ortOptions) {
o.tensorRTOptions = options
o.tensorRTOptionsSet = true
}
}