[PATCH] gnu: llama-cpp: Update to 1873.

  • Open
  • quality assurance status badge
Details
2 participants
  • David Pflug
  • Mathieu Othacehe
Owner
unassigned
Submitted by
David Pflug
Severity
normal
D
D
David Pflug wrote on 14 Jan 21:32 +0100
(address . guix-patches@gnu.org)(name . David Pflug)(address . david@pflug.io)
20240114203255.26500-1-david@pflug.io
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.

Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
gnu/packages/machine-learning.scm | 133 ++++++++++++++++++------------
1 file changed, 78 insertions(+), 55 deletions(-)

Toggle diff (161 lines)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 1616738399..0cdfe7bb08 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -22,6 +22,7 @@
;;; Copyright © 2023 Navid Afkhami <navid.afkhami@mdc-berlin.de>
;;; Copyright © 2023 Zheng Junjie <873216071@qq.com>
;;; Copyright © 2023 Troy Figiel <troy@troyfigiel.com>
+;;; Copyright © 2023 David Pflug <david@pflug.io>
;;;
;;; This file is part of GNU Guix.
;;;
@@ -517,63 +518,63 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
- (revision "0"))
- (package
- (name "llama-cpp")
- (version (git-version "0.0.0" revision commit))
- (source
- (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
- (commit (string-append "master-" (string-take commit 7)))))
- (file-name (git-file-name name version))
- (sha256
- (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
- (build-system cmake-build-system)
- (arguments
- (list
- #:modules '((ice-9 textual-ports)
- (guix build utils)
- ((guix build python-build-system) #:prefix python:)
- (guix build cmake-build-system))
- #:imported-modules `(,@%cmake-build-system-modules
- (guix build python-build-system))
- #:phases
- #~(modify-phases %standard-phases
- (add-before 'install 'install-python-scripts
- (lambda _
- (let ((bin (string-append #$output "/bin/")))
- (define (make-script script)
- (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
- (call-with-input-file
- (string-append "../source/" script suffix)
- (lambda (input)
- (call-with-output-file (string-append bin script)
- (lambda (output)
- (format output "#!~a/bin/python3\n~a"
- #$(this-package-input "python")
- (get-string-all input))))))
- (chmod (string-append bin script) #o555)))
- (mkdir-p bin)
- (make-script "convert-pth-to-ggml")
- (make-script "convert-lora-to-ggml")
- (make-script "convert"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
- (replace 'install
- (lambda _
- (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
- (inputs (list python))
- (propagated-inputs
- (list python-numpy python-pytorch python-sentencepiece))
- (home-page "https://github.com/ggerganov/llama.cpp")
- (synopsis "Port of Facebook's LLaMA model in C/C++")
- (description "This package provides a port to Facebook's LLaMA collection
+ (package
+ (name "llama-cpp")
+ (version "1873")
+ (source
+ (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://github.com/ggerganov/llama.cpp")
+ (commit (string-append "b" version))))
+ (file-name (git-file-name name version))
+ (sha256
+ (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+ (build-system cmake-build-system)
+ (arguments
+ (list
+ #:modules '((ice-9 textual-ports)
+ (guix build utils)
+ ((guix build python-build-system) #:prefix python:)
+ (guix build cmake-build-system))
+ #:imported-modules `(,@%cmake-build-system-modules
+ (guix build python-build-system))
+ #:phases
+ #~(modify-phases %standard-phases
+ (add-before 'install 'install-python-scripts
+ (lambda _
+ (let ((bin (string-append #$output "/bin/")))
+ (define (make-script script)
+ (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+ (call-with-input-file
+ (string-append "../source/" script suffix)
+ (lambda (input)
+ (call-with-output-file (string-append bin script)
+ (lambda (output)
+ (format output "#!~a/bin/python3\n~a"
+ #$(this-package-input "python")
+ (get-string-all input))))))
+ (chmod (string-append bin script) #o555)))
+ (mkdir-p bin)
+ (make-script "convert-hf-to-gguf")
+ (make-script "convert-llama-ggml-to-gguf")
+ (make-script "convert-lora-to-ggml")
+ (make-script "convert-persimmon-to-gguf")
+ (make-script "convert"))))
+ (add-after 'install-python-scripts 'wrap-python-scripts
+ (assoc-ref python:%standard-phases 'wrap))
+ (replace 'install
+ (lambda _
+ (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+ (inputs (list python))
+ (propagated-inputs
+ (list python-numpy python-pytorch python-sentencepiece python-gguf))
+ (home-page "https://github.com/ggerganov/llama.cpp")
+ (synopsis "Port of Facebook's LLaMA model in C/C++")
+ (description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
independently to be able to run a LLaMA model.")
- (license license:expat))))
+ (license license:expat)))
(define-public mcl
(package
@@ -5257,3 +5258,25 @@ (define-public oneapi-dnnl
"OneAPI Deep Neural Network Library (oneDNN) is a cross-platform
performance library of basic building blocks for deep learning applications.")
(license license:asl2.0)))
+
+(define-public python-gguf
+ (package
+ (name "python-gguf")
+ (version "0.6.0")
+ (source
+ (origin
+ (method url-fetch)
+ (uri (pypi-uri "gguf" version))
+ (sha256
+ (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
+ (build-system pyproject-build-system)
+ (arguments
+ `(#:phases
+ (modify-phases %standard-phases
+ (delete 'check))))
+ (inputs (list poetry python-pytest))
+ (propagated-inputs (list python-numpy))
+ (home-page "https://ggml.ai")
+ (synopsis "Read and write ML models in GGUF for GGML")
+ (description "Read and write ML models in GGUF for GGML")
+ (license license:expat)))

base-commit: 18393fcdddf5c3d834fa89ebf5f3925fc5b166ed
--
2.41.0
M
M
Mathieu Othacehe wrote on 17 Jan 18:29 +0100
(name . David Pflug)(address . david@pflug.io)(address . 68455@debbugs.gnu.org)
87o7djj2bk.fsf@gnu.org
Hello David,

Toggle quote (22 lines)
> +(define-public python-gguf
> + (package
> + (name "python-gguf")
> + (version "0.6.0")
> + (source
> + (origin
> + (method url-fetch)
> + (uri (pypi-uri "gguf" version))
> + (sha256
> + (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
> + (build-system pyproject-build-system)
> + (arguments
> + `(#:phases
> + (modify-phases %standard-phases
> + (delete 'check))))
> + (inputs (list poetry python-pytest))
> + (propagated-inputs (list python-numpy))
> + (home-page "https://ggml.ai")
> + (synopsis "Read and write ML models in GGUF for GGML")
> + (description "Read and write ML models in GGUF for GGML")
> + (license license:expat)))

This should be part of a separate patch. Can you send a v2?

Thanks,

Mathieu
D
D
David Pflug wrote on 26 Jan 13:20 +0100
[PATCH v2] gnu: llama-cpp: Update to 1873.
(address . 68455@debbugs.gnu.org)(name . David Pflug)(address . david@pflug.io)
20240126122110.10991-1-david@pflug.io
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.

python-gguf added by #68735

Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
gnu/packages/machine-learning.scm | 110 +++++++++++++++---------------
1 file changed, 55 insertions(+), 55 deletions(-)

Toggle diff (127 lines)
diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 0e88f7265b..1d590d1c1b 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -519,63 +519,63 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
- (revision "0"))
- (package
- (name "llama-cpp")
- (version (git-version "0.0.0" revision commit))
- (source
- (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
- (commit (string-append "master-" (string-take commit 7)))))
- (file-name (git-file-name name version))
- (sha256
- (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
- (build-system cmake-build-system)
- (arguments
- (list
- #:modules '((ice-9 textual-ports)
- (guix build utils)
- ((guix build python-build-system) #:prefix python:)
- (guix build cmake-build-system))
- #:imported-modules `(,@%cmake-build-system-modules
- (guix build python-build-system))
- #:phases
- #~(modify-phases %standard-phases
- (add-before 'install 'install-python-scripts
- (lambda _
- (let ((bin (string-append #$output "/bin/")))
- (define (make-script script)
- (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
- (call-with-input-file
- (string-append "../source/" script suffix)
- (lambda (input)
- (call-with-output-file (string-append bin script)
- (lambda (output)
- (format output "#!~a/bin/python3\n~a"
- #$(this-package-input "python")
- (get-string-all input))))))
- (chmod (string-append bin script) #o555)))
- (mkdir-p bin)
- (make-script "convert-pth-to-ggml")
- (make-script "convert-lora-to-ggml")
- (make-script "convert"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
- (replace 'install
- (lambda _
- (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
- (inputs (list python))
- (propagated-inputs
- (list python-numpy python-pytorch python-sentencepiece))
- (home-page "https://github.com/ggerganov/llama.cpp")
- (synopsis "Port of Facebook's LLaMA model in C/C++")
- (description "This package provides a port to Facebook's LLaMA collection
+ (package
+ (name "llama-cpp")
+ (version "1873")
+ (source
+ (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://github.com/ggerganov/llama.cpp")
+ (commit (string-append "b" version))))
+ (file-name (git-file-name name version))
+ (sha256
+ (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+ (build-system cmake-build-system)
+ (arguments
+ (list
+ #:modules '((ice-9 textual-ports)
+ (guix build utils)
+ ((guix build python-build-system) #:prefix python:)
+ (guix build cmake-build-system))
+ #:imported-modules `(,@%cmake-build-system-modules
+ (guix build python-build-system))
+ #:phases
+ #~(modify-phases %standard-phases
+ (add-before 'install 'install-python-scripts
+ (lambda _
+ (let ((bin (string-append #$output "/bin/")))
+ (define (make-script script)
+ (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+ (call-with-input-file
+ (string-append "../source/" script suffix)
+ (lambda (input)
+ (call-with-output-file (string-append bin script)
+ (lambda (output)
+ (format output "#!~a/bin/python3\n~a"
+ #$(this-package-input "python")
+ (get-string-all input))))))
+ (chmod (string-append bin script) #o555)))
+ (mkdir-p bin)
+ (make-script "convert-hf-to-gguf")
+ (make-script "convert-llama-ggml-to-gguf")
+ (make-script "convert-lora-to-ggml")
+ (make-script "convert-persimmon-to-gguf")
+ (make-script "convert"))))
+ (add-after 'install-python-scripts 'wrap-python-scripts
+ (assoc-ref python:%standard-phases 'wrap))
+ (replace 'install
+ (lambda _
+ (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+ (inputs (list python))
+ (propagated-inputs
+ (list python-numpy python-pytorch python-sentencepiece python-gguf))
+ (home-page "https://github.com/ggerganov/llama.cpp")
+ (synopsis "Port of Facebook's LLaMA model in C/C++")
+ (description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
independently to be able to run a LLaMA model.")
- (license license:expat))))
+ (license license:expat)))
(define-public mcl
(package

base-commit: c5453fbfeb0dbd19cb402199fe1e5ad51a051e56
--
2.41.0
?
Your comment

Commenting via the web interface is currently disabled.

To comment on this conversation send an email to 68455@debbugs.gnu.org

To respond to this issue using the mumi CLI, first switch to it
mumi current 68455
Then, you may apply the latest patchset in this issue (with sign off)
mumi am -- -s
Or, compose a reply to this issue
mumi compose
Or, send patches to this issue
mumi send-email *.patch