|
2 | 2 | # `julia build_tarballs.jl --help` to see a usage message. |
3 | 3 | using BinaryBuilder, Pkg |
4 | 4 |
|
| 5 | +include(joinpath(@__DIR__, "..", "..", "platforms", "cuda.jl")) |
| 6 | + |
5 | 7 | name = "ONNXRuntime" |
6 | 8 | version = v"1.10.0" |
7 | 9 |
|
8 | 10 | # Cf. https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements |
9 | 11 | # Cf. https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#requirements |
10 | 12 | cuda_versions = [ |
11 | | - # No CUDA 10.2, since pre-built x86_64 CUDA binaries are built for CUDA 11 |
12 | | - v"11.3", # Using 11.3, and not 11.4, to be compatible with TensorRT (JLL) v8.0.1 (which includes aarch64 support) |
| 13 | + v"11.3.1", # Using 11.3, and not 11.4, to be compatible with TensorRT (JLL) v8.0.1 (which includes aarch64 support) |
13 | 14 | ] |
14 | 15 | cuda_aarch64_tag = "10.2" |
15 | 16 | cudnn_version = v"8.2.4" |
@@ -134,12 +135,12 @@ dependencies = [ |
134 | 135 | compat = tensorrt_compat, |
135 | 136 | platforms = cuda_platforms), |
136 | 137 | Dependency("Zlib_jll"; platforms = cuda_platforms), |
137 | | - BuildDependency(PackageSpec("CUDA_full_jll", v"11.3.1"); platforms = cuda_platforms), |
| 138 | + BuildDependency(PackageSpec("CUDA_full_jll", first(cuda_versions)); platforms = cuda_platforms), |
138 | 139 | HostBuildDependency(PackageSpec("protoc_jll", v"3.16.1")) |
139 | 140 | ] |
140 | 141 |
|
141 | 142 | # Build the tarballs, and possibly a `build.jl` as well. |
142 | 143 | build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies; |
143 | | - lazy_artifacts = true, |
| 144 | + augment_platform_block = CUDA.augment, |
144 | 145 | julia_compat = "1.6", |
145 | 146 | preferred_gcc_version = v"8") |
0 commit comments