From 2811014bae356401856a9c0796f42e719f2e8c3c Mon Sep 17 00:00:00 2001 From: Douglas Hanley Date: Fri, 8 Mar 2024 19:59:35 -0600 Subject: [PATCH] feat: Switch embed to llama_get_embeddings_seq (#1263) * switch to llama_get_embeddings_seq * Remove duplicate definition of llama_get_embeddings_seq Co-authored-by: Andrei --------- Co-authored-by: Andrei --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 7187b4a..aabbb7e 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -814,7 +814,7 @@ class Llama: # store embeddings for i in range(n_seq): - embedding: List[float] = llama_cpp.llama_get_embeddings_ith( + embedding: List[float] = llama_cpp.llama_get_embeddings_seq( self._ctx.ctx, i )[:n_embd] if normalize: