161
161
fi
162
162
THREADS=${THREADS:- $procs }
163
163
LATEST_VERSION=$( curl -s " https://api.github.com/repos/mudler/LocalAI/releases/latest" | grep ' "tag_name":' | sed -E ' s/.*"([^"]+)".*/\1/' )
164
- VERSION =" ${VERSION :- $LATEST_VERSION } "
164
+ LOCALAI_VERSION =" ${LOCALAI_VERSION :- $LATEST_VERSION } " # changed due to VERSION beign already defined in Fedora 42 Cloud Edition
165
165
MODELS_PATH=${MODELS_PATH:-/ usr/ share/ local-ai/ models}
166
166
167
167
@@ -228,7 +228,7 @@ WorkingDirectory=/usr/share/local-ai
228
228
[Install]
229
229
WantedBy=default.target
230
230
EOF
231
-
231
+
232
232
$SUDO touch /etc/localai.env
233
233
$SUDO echo " ADDRESS=0.0.0.0:$PORT " | $SUDO tee /etc/localai.env > /dev/null
234
234
$SUDO echo " API_KEY=$API_KEY " | $SUDO tee -a /etc/localai.env > /dev/null
@@ -261,14 +261,21 @@ EOF
261
261
262
262
# ref: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installing-with-yum-or-dnf
263
263
install_container_toolkit_yum () {
264
- info ' Installing NVIDIA repository...'
264
+ info ' Installing NVIDIA container toolkit repository...'
265
265
266
266
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | \
267
267
$SUDO tee /etc/yum.repos.d/nvidia-container-toolkit.repo
268
268
269
269
if [ " $PACKAGE_MANAGER " = " dnf" ]; then
270
- $SUDO $PACKAGE_MANAGER config-manager --enable nvidia-container-toolkit-experimental
271
- else
270
+ DNF_VERSION=$( $PACKAGE_MANAGER --version | grep -oE ' [0-9]+\.[0-9]+\.[0-9]+' | head -n1 | cut -d. -f1)
271
+ if [ " $DNF_VERSION " -ge 5 ]; then
272
+ # DNF5: Use 'setopt' to enable the repository
273
+ $SUDO $PACKAGE_MANAGER config-manager setopt nvidia-container-toolkit-experimental.enabled=1
274
+ else
275
+ # DNF4: Use '--set-enabled' to enable the repository
276
+ $SUDO $PACKAGE_MANAGER config-manager --enable nvidia-container-toolkit-experimental
277
+ fi
278
+ else
272
279
$SUDO $PACKAGE_MANAGER -y install yum-utils
273
280
$SUDO $PACKAGE_MANAGER -config-manager --enable nvidia-container-toolkit-experimental
274
281
fi
@@ -277,7 +284,7 @@ install_container_toolkit_yum() {
277
284
278
285
# ref: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installing-with-apt
279
286
install_container_toolkit_apt () {
280
- info ' Installing NVIDIA repository...'
287
+ info ' Installing NVIDIA container toolkit repository...'
281
288
282
289
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | $SUDO gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
283
290
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
@@ -289,7 +296,7 @@ install_container_toolkit_apt() {
289
296
290
297
# ref: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installing-with-zypper
291
298
install_container_toolkit_zypper () {
292
- info ' Installing NVIDIA repository...'
299
+ info ' Installing NVIDIA zypper repository...'
293
300
$SUDO zypper ar https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo
294
301
$SUDO zypper modifyrepo --enable nvidia-container-toolkit-experimental
295
302
$SUDO zypper --gpg-auto-import-keys install -y nvidia-container-toolkit
@@ -325,14 +332,21 @@ install_container_toolkit() {
325
332
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
326
333
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
327
334
install_cuda_driver_yum () {
328
- info ' Installing NVIDIA repository...'
335
+ info ' Installing NVIDIA CUDA repository...'
329
336
case $PACKAGE_MANAGER in
330
337
yum)
331
338
$SUDO $PACKAGE_MANAGER -y install yum-utils
332
339
$SUDO $PACKAGE_MANAGER -config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2 /$( uname -m) /cuda-$1$2 .repo
333
340
;;
334
341
dnf)
335
- $SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2 /$( uname -m) /cuda-$1$2 .repo
342
+ DNF_VERSION=$( $PACKAGE_MANAGER --version | grep -oE ' [0-9]+\.[0-9]+\.[0-9]+' | head -n1 | cut -d. -f1)
343
+ if [ " $DNF_VERSION " -ge 5 ]; then
344
+ # DNF5: Use 'addrepo' to add the repository
345
+ $SUDO $PACKAGE_MANAGER config-manager addrepo --id=nome-repo --set=name=" nvidia-cuda" --set=baseurl=" https://developer.download.nvidia.com/compute/cuda/repos/$1$2 /$( uname -m) /cuda-$1$2 .repo"
346
+ else
347
+ # DNF4: Use '--add-repo' to add the repository
348
+ $SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2 /$( uname -m) /cuda-$1$2 .repo
349
+ fi
336
350
;;
337
351
esac
338
352
@@ -356,7 +370,7 @@ install_cuda_driver_yum() {
356
370
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
357
371
# ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
358
372
install_cuda_driver_apt () {
359
- info ' Installing NVIDIA repository...'
373
+ info ' Installing NVIDIA CUDA repository...'
360
374
curl -fsSL -o $TEMP_DIR /cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2 /$( uname -m) /cuda-keyring_1.1-1_all.deb
361
375
362
376
case $1 in
@@ -395,7 +409,7 @@ install_cuda() {
395
409
case $OS_NAME in
396
410
centos|rhel) install_cuda_driver_yum ' rhel' $( echo $OS_VERSION | cut -d ' .' -f 1) ;;
397
411
rocky) install_cuda_driver_yum ' rhel' $( echo $OS_VERSION | cut -c1) ;;
398
- fedora) [ $OS_VERSION -lt ' 37 ' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME ' 37 ' ;;
412
+ fedora) [ $OS_VERSION -lt ' 41 ' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME ' 41 ' ;;
399
413
amzn) install_cuda_driver_yum ' fedora' ' 37' ;;
400
414
debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;
401
415
ubuntu) install_cuda_driver_apt $OS_NAME $( echo $OS_VERSION | sed ' s/\.//' ) ;;
@@ -485,7 +499,7 @@ install_docker() {
485
499
# if $SUDO docker ps --format '{{.Names}}' | grep -q local-ai; then
486
500
# info "LocalAI Docker container is already running."
487
501
# exit 0
488
- # fi
502
+ # fi
489
503
490
504
# info "Starting LocalAI Docker container..."
491
505
# $SUDO docker start local-ai
@@ -502,20 +516,24 @@ install_docker() {
502
516
503
517
IMAGE_TAG=
504
518
if [ " $HAS_CUDA " ]; then
505
- IMAGE_TAG=${VERSION } -cublas-cuda12-ffmpeg
519
+ IMAGE_TAG=${LOCALAI_VERSION } -cublas-cuda12-ffmpeg
506
520
# CORE
507
521
if [ " $CORE_IMAGES " = true ]; then
508
- IMAGE_TAG=${VERSION } -cublas-cuda12-ffmpeg-core
522
+ IMAGE_TAG=${LOCALAI_VERSION } -cublas-cuda12-ffmpeg-core
509
523
fi
510
524
# AIO
511
525
if [ " $USE_AIO " = true ]; then
512
- IMAGE_TAG=${VERSION } -aio-gpu-nvidia-cuda-12
526
+ IMAGE_TAG=${LOCALAI_VERSION } -aio-gpu-nvidia-cuda-12
513
527
fi
514
528
515
529
if ! available nvidia-smi; then
516
- info " Installing nvidia-cuda-toolkit..."
517
- # TODO:
518
- $SUDO apt-get -y install nvidia-cuda-toolkit
530
+ # TODO Temporary Bypass for Fedora Headless (Cloud Edition), need to find a way to install nvidia-smi without pulling x11
531
+ OS_NAME=$ID
532
+ OS_VERSION=$VERSION_ID
533
+
534
+ case $OS_NAME in
535
+ debian|ubuntu) $SUDO apt-get -y install nvidia-cuda-toolkit;;
536
+ esac
519
537
fi
520
538
521
539
$SUDO docker run -v local-ai-data:/build/models \
@@ -526,14 +544,14 @@ install_docker() {
526
544
$envs \
527
545
-d -p $PORT :8080 --name local-ai localai/localai:$IMAGE_TAG $STARTCOMMAND
528
546
elif [ " $HAS_AMD " ]; then
529
- IMAGE_TAG=${VERSION } -hipblas-ffmpeg
547
+ IMAGE_TAG=${LOCALAI_VERSION } -hipblas-ffmpeg
530
548
# CORE
531
549
if [ " $CORE_IMAGES " = true ]; then
532
- IMAGE_TAG=${VERSION } -hipblas-ffmpeg-core
550
+ IMAGE_TAG=${LOCALAI_VERSION } -hipblas-ffmpeg-core
533
551
fi
534
552
# AIO
535
553
if [ " $USE_AIO " = true ]; then
536
- IMAGE_TAG=${VERSION } -aio-gpu-hipblas
554
+ IMAGE_TAG=${LOCALAI_VERSION } -aio-gpu-hipblas
537
555
fi
538
556
539
557
$SUDO docker run -v local-ai-data:/build/models \
@@ -545,14 +563,14 @@ install_docker() {
545
563
$envs \
546
564
-d -p $PORT :8080 --name local-ai localai/localai:$IMAGE_TAG $STARTCOMMAND
547
565
elif [ " $HAS_INTEL " ]; then
548
- IMAGE_TAG=${VERSION } -sycl-f32-ffmpeg
566
+ IMAGE_TAG=${LOCALAI_VERSION } -sycl-f32-ffmpeg
549
567
# CORE
550
568
if [ " $CORE_IMAGES " = true ]; then
551
- IMAGE_TAG=${VERSION } -sycl-f32-ffmpeg-core
569
+ IMAGE_TAG=${LOCALAI_VERSION } -sycl-f32-ffmpeg-core
552
570
fi
553
571
# AIO
554
572
if [ " $USE_AIO " = true ]; then
555
- IMAGE_TAG=${VERSION } -aio-gpu-intel-f32
573
+ IMAGE_TAG=${LOCALAI_VERSION } -aio-gpu-intel-f32
556
574
fi
557
575
558
576
$SUDO docker run -v local-ai-data:/build/models \
@@ -563,15 +581,15 @@ install_docker() {
563
581
$envs \
564
582
-d -p $PORT :8080 --name local-ai localai/localai:$IMAGE_TAG $STARTCOMMAND
565
583
else
566
- IMAGE_TAG=${VERSION } -ffmpeg
584
+ IMAGE_TAG=${LOCALAI_VERSION } -ffmpeg
567
585
# CORE
568
586
if [ " $CORE_IMAGES " = true ]; then
569
- IMAGE_TAG=${VERSION } -ffmpeg-core
587
+ IMAGE_TAG=${LOCALAI_VERSION } -ffmpeg-core
570
588
fi
571
589
# AIO
572
590
if [ " $USE_AIO " = true ]; then
573
- IMAGE_TAG=${VERSION } -aio-cpu
574
- fi
591
+ IMAGE_TAG=${LOCALAI_VERSION } -aio-cpu
592
+ fi
575
593
$SUDO docker run -v local-ai-data:/models \
576
594
--restart=always \
577
595
-e MODELS_PATH=/models \
@@ -588,8 +606,8 @@ install_docker() {
588
606
install_binary_darwin () {
589
607
[ " $( uname -s) " = " Darwin" ] || fatal ' This script is intended to run on macOS only.'
590
608
591
- info " Downloading LocalAI ${VERSION } ..."
592
- curl --fail --show-error --location --progress-bar -o $TEMP_DIR /local-ai " https://github.com/mudler/LocalAI/releases/download/${VERSION } /local-ai-Darwin-${ARCH} "
609
+ info " Downloading LocalAI ${LOCALAI_VERSION } ..."
610
+ curl --fail --show-error --location --progress-bar -o $TEMP_DIR /local-ai " https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION } /local-ai-Darwin-${ARCH} "
593
611
594
612
info " Installing to /usr/local/bin/local-ai"
595
613
install -o0 -g0 -m755 $TEMP_DIR /local-ai /usr/local/bin/local-ai
@@ -620,8 +638,8 @@ install_binary() {
620
638
exit 1
621
639
fi
622
640
623
- info " Downloading LocalAI ${VERSION } ..."
624
- curl --fail --location --progress-bar -o $TEMP_DIR /local-ai " https://github.com/mudler/LocalAI/releases/download/${VERSION } /local-ai-Linux-${ARCH} "
641
+ info " Downloading LocalAI ${LOCALAI_VERSION } ..."
642
+ curl --fail --location --progress-bar -o $TEMP_DIR /local-ai " https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION } /local-ai-Linux-${ARCH} "
625
643
626
644
for BINDIR in /usr/local/bin /usr/bin /bin; do
627
645
echo $PATH | grep -q $BINDIR && break || continue
@@ -675,7 +693,7 @@ detect_start_command() {
675
693
if [ " $WORKER " = true ]; then
676
694
if [ -n " $P2P_TOKEN " ]; then
677
695
STARTCOMMAND=" worker p2p-llama-cpp-rpc"
678
- else
696
+ else
679
697
STARTCOMMAND=" worker llama-cpp-rpc"
680
698
fi
681
699
elif [ " $FEDERATED " = true ]; then
0 commit comments