diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
index ff61000fd0..dd5ab054d7 100644
--- a/sci-misc/llama-cpp/llama-cpp-9999.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
@@ -34,7 +34,7 @@ SLOT="0"
CPU_FLAGS_X86=( avx avx2 f16c )
# wwma USE explained here: https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md#hip
-IUSE="curl openblas +openmp blis rocm cuda opencl vulkan flexiblas wmma examples"
+IUSE="curl openblas +openmp blis rocm cuda opencl openssl vulkan flexiblas wmma examples"
REQUIRED_USE="
?? (
@@ -63,6 +63,7 @@ CDEPEND="
)
)
cuda? ( dev-util/nvidia-cuda-toolkit:= )
+ openssl? ( dev-libs/openssl:= )
"
DEPEND="${CDEPEND}
opencl? ( dev-util/opencl-headers )
@@ -105,6 +106,7 @@ src_configure() {
-DGGML_NATIVE=0 # don't set march
-DGGML_RPC=ON
-DLLAMA_CURL=$(usex curl)
+ -DLLAMA_OPENSSL=$(usex openssl)
-DBUILD_NUMBER="1"
-DGENTOO_REMOVE_CMAKE_BLAS_HACK=ON
-DGGML_CUDA=$(usex cuda)
diff --git a/sci-misc/llama-cpp/metadata.xml b/sci-misc/llama-cpp/metadata.xml
index 8f1876715c..1dd2e6d959 100644
--- a/sci-misc/llama-cpp/metadata.xml
+++ b/sci-misc/llama-cpp/metadata.xml
@@ -12,6 +12,7 @@
Use rocWMMA to enhance flash attention performance
Build an OpenBLAS backend
Build an OpenCL backend, so far only works on Adreno and Intel GPUs
+ Use openssl to support HTTPS
zl29ah@gmail.com