Add pipeline guide and enhance CMake configuration for llama integration

This commit is contained in:
Aaron Po
2026-03-28 14:16:31 -04:00
parent ad1adfeb62
commit 7f1ca2050c
4 changed files with 651 additions and 76 deletions

View File

@@ -19,6 +19,23 @@ FetchContent_Declare(
)
FetchContent_MakeAvailable(nlohmann_json)
FetchContent_Declare(
llama
GIT_REPOSITORY https://github.com/ggml-org/llama.cpp.git
# Stable release tag: b8485 (commit 31a5cf4c3f5d3af7f16fc4abc9baa75f8d568421)
GIT_TAG 31a5cf4c3f5d3af7f16fc4abc9baa75f8d568421
)
FetchContent_MakeAvailable(llama)
# Workaround for upstream llama.cpp release stream (b8485/b8496) missing
# <algorithm> include in llama-quant.cpp where std::sort is used.
# Remove once fixed upstream.
if(TARGET llama)
target_compile_options(llama PRIVATE
$<$<COMPILE_LANGUAGE:CXX>:-include algorithm>
)
endif()
file(GLOB_RECURSE SOURCES CONFIGURE_DEPENDS
src/*.cpp
src/*.h
@@ -36,6 +53,7 @@ target_link_libraries(biergarten-pipeline
CURL::libcurl
nlohmann_json::nlohmann_json
Boost::unit_test_framework
llama
)
target_compile_options(biergarten-pipeline PRIVATE
@@ -95,6 +113,7 @@ if(BUILD_TESTING)
Boost::unit_test_framework
CURL::libcurl
nlohmann_json::nlohmann_json
llama
)
add_test(