forked from anduril/jetpack-nixos
-
Notifications
You must be signed in to change notification settings - Fork 0
/
samples.nix
391 lines (316 loc) · 11.2 KB
/
samples.nix
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
{ stdenv, lib, fetchurl, dpkg, pkg-config, autoAddOpenGLRunpathHook, freeimage,
cmake, opencv, opencv2, libX11, libdrm, libglvnd, python3, coreutils, gnused,
libGL, libXau, wayland, libxkbcommon, libffi,
writeShellApplication,
l4t, cudaPackages,
cudaVersion, debs
}:
let
cudaVersionDashes = lib.replaceStrings [ "." ] [ "-"] cudaVersion;
# This package is unfortunately not identical to the upstream cuda-samples
# published at https://github.com/NVIDIA/cuda-samples, so we can't use
# nixpkgs's pkgs/tests/cuda packages
cuda-samples = stdenv.mkDerivation {
pname = "cuda-samples";
version = debs.common."cuda-samples-${cudaVersionDashes}".version;
src = debs.common."cuda-samples-${cudaVersionDashes}".src;
unpackCmd = "dpkg -x $src source";
sourceRoot = "source/usr/local/cuda-${cudaVersion}/samples";
patches = [ ./cuda-samples.patch ];
nativeBuildInputs = [ dpkg pkg-config autoAddOpenGLRunpathHook ];
buildInputs = [ cudaPackages.cudatoolkit ];
preConfigure = ''
export CUDA_PATH=${cudaPackages.cudatoolkit}
export CUDA_SEARCH_PATH=${cudaPackages.cudatoolkit}/lib/stubs
'';
enableParallelBuilding = true;
installPhase = ''
runHook preInstall
install -Dm755 -t $out/bin bin/${stdenv.hostPlatform.parsed.cpu.name}/${stdenv.hostPlatform.parsed.kernel.name}/release/*
# *_nvrtc samples require your current working directory contains the corresponding .cu file
find -ipath "*_nvrtc/*.cu" -exec install -Dt $out/data {} \;
runHook postInstall
'';
};
cuda-test = writeShellApplication {
name = "cuda-test";
text = ''
for binary in deviceQuery deviceQueryDrv bandwidthTest clock clock_nvrtc; do
echo " * Running $binary"
# clock_nvrtc expects .cu files under $PWD/data
(cd ${cuda-samples}; ${cuda-samples}/bin/$binary)
echo
echo
done
'';
};
cudnn-samples = stdenv.mkDerivation {
pname = "cudnn-samples";
version = debs.common.libcudnn8-samples.version;
src = debs.common.libcudnn8-samples.src;
unpackCmd = "dpkg -x $src source";
sourceRoot = "source/usr/src/cudnn_samples_v8";
nativeBuildInputs = [ dpkg autoAddOpenGLRunpathHook ];
buildInputs = with cudaPackages; [ cudatoolkit cudnn freeimage ];
buildFlags = [
"CUDA_PATH=${cudaPackages.cudatoolkit}"
"CUDNN_INCLUDE_PATH=${cudaPackages.cudnn}/include"
"CUDNN_LIB_PATH=${cudaPackages.cudnn}/lib"
];
enableParallelBuilding = true;
buildPhase = ''
runHook preBuild
for dirname in conv_sample mnistCUDNN multiHeadAttention RNN_v8.0; do
pushd "$dirname"
make $buildFlags
popd 2>/dev/null
done
runHook postBuild
'';
installPhase = ''
runHook preInstall
install -Dm755 -t $out/bin \
conv_sample/conv_sample \
mnistCUDNN/mnistCUDNN \
multiHeadAttention/multiHeadAttention \
RNN_v8.0/RNN
runHook postInstall
'';
};
cudnn-test = writeShellApplication {
name = "cudnn-test";
text = ''
echo " * Running conv_sample"
${cudnn-samples}/bin/conv_sample
'';
};
graphics-demos = stdenv.mkDerivation {
pname = "graphics-demos";
version = debs.t234.nvidia-l4t-graphics-demos.version;
src = debs.t234.nvidia-l4t-graphics-demos.src;
unpackCmd = "dpkg -x $src source";
sourceRoot = "source/usr/src/nvidia/graphics_demos";
nativeBuildInputs = [ dpkg ];
buildInputs = [ libX11 libGL libXau libdrm wayland libxkbcommon libffi ];
postPatch = ''
substituteInPlace Makefile.l4tsdkdefs \
--replace /bin/cat ${coreutils}/bin/cat \
--replace /bin/sed ${gnused}/bin/sed \
--replace libffi.so.7 libffi.so
'';
buildPhase = ''
runHook preBuild
# TODO: Also do winsys=egldevice
for winsys in wayland x11; do
for demo in bubble ctree eglstreamcube gears-basic gears-cube gears-lib; do
pushd "$demo"
make NV_WINSYS=$winsys NV_PLATFORM_LDFLAGS= $buildFlags
popd 2>/dev/null
done
done
runHook postBuild
'';
installPhase = ''
runHook preInstall
for winsys in wayland x11; do
for demo in bubble ctree eglstreamcube; do
install -Dm 755 "$demo/$winsys/$demo" "$out/bin/$winsys-$demo"
done
install -Dm 755 "gears-basic/$winsys/gears" "$out/bin/$winsys-gears"
install -Dm 755 "gears-cube/$winsys/gearscube" "$out/bin/$winsys-gearscube"
done
runHook postInstall
'';
enableParallelBuilding = true;
};
# TODO: Add wayland and x11 tests for graphics demos....
# Contains a bunch of tests for tensorrt, for example:
# ./result/bin/sample_mnist --datadir=result/data/mnist
libnvinfer-samples = stdenv.mkDerivation {
pname = "libnvinfer-samples";
version = debs.common.libnvinfer-samples.version;
src = debs.common.libnvinfer-samples.src;
unpackCmd = "dpkg -x $src source";
sourceRoot = "source/usr/src/tensorrt/samples";
nativeBuildInputs = [ dpkg autoAddOpenGLRunpathHook ];
buildInputs = with cudaPackages; [ tensorrt cuda_profiler_api cudnn ];
# These environment variables are required by the /usr/src/tensorrt/samples/README.md
CUDA_INSTALL_DIR = cudaPackages.cudatoolkit;
CUDNN_INSTALL_DIR = cudaPackages.cudnn;
enableParallelBuilding = true;
installPhase = ''
runHook preInstall
mkdir -p $out
rm -rf ../bin/chobj
rm -rf ../bin/dchobj
cp -r ../bin $out/
cp -r ../data $out/
runHook postInstall
'';
};
libnvinfer-test = writeShellApplication {
name = "libnvinfer-test";
text = ''
echo " * Running sample_onnx_mnist"
${libnvinfer-samples}/bin/sample_onnx_mnist --datadir ${libnvinfer-samples}/data/mnist
echo
echo
'';
};
# https://docs.nvidia.com/jetson/l4t-multimedia/group__l4t__mm__test__group.html
multimedia-samples = stdenv.mkDerivation {
pname = "multimedia-samples";
src = debs.common.nvidia-l4t-jetson-multimedia-api.src;
version = debs.common.nvidia-l4t-jetson-multimedia-api.version;
unpackCmd = "dpkg -x $src source";
sourceRoot = "source/usr/src/jetson_multimedia_api";
nativeBuildInputs = [ dpkg python3 ];
buildInputs = [ libX11 libdrm libglvnd opencv2 ]
++ (with l4t; [ l4t-cuda l4t-multimedia l4t-camera ])
++ (with cudaPackages; [ cudatoolkit tensorrt ]);
# Usually provided by pkg-config, but the samples don't use it.
NIX_CFLAGS_COMPILE = [ "-I${lib.getDev libdrm}/include/libdrm" ];
# TODO: Unify this with headers in l4t-jetson-multimedia-api
patches = [
(fetchurl {
url = "https://raw.githubusercontent.com/OE4T/meta-tegra/af0a93313c13e9eac4e80082d8a8e8ac5f7ad6e8/recipes-multimedia/argus/files/0005-Remove-DO-NOT-USE-declarations-from-v4l2_nv_extensio.patch";
sha256 = "sha256-IJ1teGEUxYDEPYSvYZbqdmUYg9tOORN7WGYpDaUUnHY=";
})
];
postPatch = ''
substituteInPlace samples/Rules.mk \
--replace /usr/local/cuda "${cudaPackages.cudatoolkit}"
substituteInPlace samples/08_video_dec_drm/Makefile \
--replace /usr/bin/python "${python3}/bin/python"
'';
installPhase = ''
runHook preInstall
install -Dm 755 -t $out/bin $(find samples -type f -perm 755)
rm -f $out/bin/*.h
cp -r data $out/
runHook postInstall
'';
};
# ./result/bin/video_decode H264 /nix/store/zry377bb5vkz560ra31ds8r485jsizip-multimedia-samples-35.1.0-20220825113828/data/Video/sample_outdoor_car_1080p_10fps.h26
# (Requires X11)
#
# Doing example here: https://docs.nvidia.com/jetson/l4t-multimedia/l4t_mm_07_video_convert.html
multimedia-test = writeShellApplication {
name = "multimedia-test";
text = ''
WORKDIR=$(mktemp -d)
on_exit() {
rm -rf "$WORKDIR"
}
trap on_exit EXIT
echo " * Running jpeg_decode"
${multimedia-samples}/bin/jpeg_decode num_files 1 ${multimedia-samples}/data/Picture/nvidia-logo.jpg "$WORKDIR"/nvidia-logo.yuv
echo
echo " * Running video_convert"
${multimedia-samples}/bin/video_convert "$WORKDIR"/nvidia-logo.yuv 1920 1080 YUV420 "$WORKDIR"/test.yuv 1920 1080 YUYV
echo
'';
};
# Tested via "./result/bin/vpi_sample_05_benchmark <cpu|pva|cuda>" (Try pva especially)
# Getting a bunch of "pva 16000000.pva0: failed to get firmware" messages, so unsure if its working.
vpi2-samples = stdenv.mkDerivation {
pname = "vpi2-samples";
version = debs.common.vpi2-samples.version;
src = debs.common.vpi2-samples.src;
unpackCmd = "dpkg -x $src source";
sourceRoot = "source/opt/nvidia/vpi2/samples";
nativeBuildInputs = [ dpkg cmake ];
buildInputs = [ opencv ] ++ (with cudaPackages; [ vpi2 ]);
configurePhase = ''
runHook preBuild
for dirname in $(find . -type d | sort); do
if [[ -e "$dirname/CMakeLists.txt" ]]; then
echo "Configuring $dirname"
pushd $dirname
cmake .
popd 2>/dev/null
fi
done
runHook postBuild
'';
buildPhase = ''
runHook preBuild
for dirname in $(find . -type d | sort); do
if [[ -e "$dirname/CMakeLists.txt" ]]; then
echo "Building $dirname"
pushd $dirname
make $buildFlags
popd 2>/dev/null
fi
done
runHook postBuild
'';
enableParallelBuilding = true;
installPhase = ''
runHook preInstall
install -Dm 755 -t $out/bin $(find . -type f -maxdepth 2 -perm 755)
runHook postInstall
'';
};
vpi2-test = writeShellApplication {
name = "vpi2-test";
text = ''
echo " * Running vpi_sample_05_benchmark cuda"
${vpi2-samples}/bin/vpi_sample_05_benchmark cuda
echo
echo " * Running vpi_sample_05_benchmark cpu"
${vpi2-samples}/bin/vpi_sample_05_benchmark cpu
echo
CHIP="$(tr -d '\0' < /proc/device-tree/compatible)"
if [[ "''${CHIP}" =~ "tegra194" ]]; then
echo " * Running vpi_sample_05_benchmark pva"
${vpi2-samples}/bin/vpi_sample_05_benchmark pva
echo
fi
'';
# PVA is only available on Xaviers. If the Jetpack version of the
# firmware doesnt match the vpi2 version, it might fail with the
# following:
# [ 435.318277] pva 16800000.pva1: invalid symbol id in descriptor for dst2 VMEM
# [ 435.318467] pva 16800000.pva1: failed to map DMA desc info
};
combined-test = writeShellApplication {
name = "combined-test";
text = ''
echo "====="
echo "Running CUDA test"
echo "====="
${cuda-test}/bin/cuda-test
echo "====="
echo "Running CUDNN test"
echo "====="
${cudnn-test}/bin/cudnn-test
echo "====="
echo "Running TensorRT test"
echo "====="
${libnvinfer-test}/bin/libnvinfer-test
echo "====="
echo "Running Multimedia test"
echo "====="
${multimedia-test}/bin/multimedia-test
echo "====="
echo "Running VPI2 test"
echo "====="
${vpi2-test}/bin/vpi2-test
'';
};
in {
inherit
cuda-samples
cuda-test
cudnn-samples
cudnn-test
graphics-demos
libnvinfer-samples
libnvinfer-test
multimedia-samples
multimedia-test
vpi2-samples
vpi2-test;
inherit combined-test;
}