diff --git a/fix-CVE-2024-41130.patch b/fix-CVE-2024-41130.patch new file mode 100644 index 0000000000000000000000000000000000000000..d9abc8208f2fb5d817694b4ee7ad3efcadeb7297 --- /dev/null +++ b/fix-CVE-2024-41130.patch @@ -0,0 +1,41 @@ +From 07283b1a90e1320aae4762c7e03c879043910252 Mon Sep 17 00:00:00 2001 +From: Georgi Gerganov +Date: Sat, 20 Jul 2024 17:15:42 +0300 +Subject: [PATCH] gguf : handle null name during init (#8587) +--- + examples/gguf/gguf.cpp | 5 +++++ + ggml.c | 2 +- + 2 files changed, 6 insertions(+), 1 deletion(-) + +diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp +index 5751437..7498f85 100644 +--- a/examples/gguf/gguf.cpp ++++ b/examples/gguf/gguf.cpp +@@ -92,6 +92,11 @@ static bool gguf_ex_read_0(const std::string & fname) { + + struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params); + ++ if (!ctx) { ++ fprintf(stderr, "%s: failed to load '%s'\n", __func__, fname.c_str()); ++ return false; ++ } ++ + printf("%s: version: %d\n", __func__, gguf_get_version(ctx)); + printf("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx)); + printf("%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx)); +diff --git a/ggml.c b/ggml.c +index 7680363..9110c4c 100644 +--- a/ggml.c ++++ b/ggml.c +@@ -21881,7 +21881,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p + gguf_tensor_info_sanitize(info); + + // make sure there is no duplicated tensor names +- for (uint64_t j = 0; j < i; ++j) { ++ for (uint64_t j = 0; j < i && ok; ++j) { + if (strcmp(info->name.data, ctx->infos[j].name.data) == 0) { + fprintf(stderr, "%s: duplicated tensor name %s\n", __func__, info->name.data); + ok = false; +-- +2.43.0 + diff --git a/llama.cpp.spec b/llama.cpp.spec index 7e114f6d4c313aaa08a72dbc924aebd7a8bc8216..86db8e41dc3b44744f80f6fa5f10f923416bb32d 100644 --- a/llama.cpp.spec +++ b/llama.cpp.spec @@ -3,12 +3,13 @@ Name: llama.cpp Version: 20240531 -Release: 1 +Release: 2 License: MIT Summary: Port of English lagre model LLaMA implemented based on C/C++ URL: https://github.com/ggerganov/llama.cpp Source0: https://github.com/ggerganov/llama.cpp/archive/refs/tags/%{llama_commitid}.tar.gz +Patch0: fix-CVE-2024-41130.patch BuildRequires: gcc,gcc-c++,cmake @@ -40,6 +41,9 @@ popd %{_libdir}/* %changelog +* Tue Jul 23 2024 kywqs - 20240531-2 +- fixed CVE-2024-41130 + * Fri Jun 21 2024 zhoupengcheng - 20240531-1 - update llama.cpp to b3051