Add support to get embeddings from high-level api. Closes #4

This commit is contained in:
Andrei Betlen 2023-03-28 04:59:54 -04:00
parent 9ba5c3c3b7
commit 70b8a1ef75
2 changed files with 26 additions and 0 deletions

View file

@ -0,0 +1,12 @@
import json
import argparse
from llama_cpp import Llama
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", type=str, default=".//models/...")
args = parser.parse_args()
llm = Llama(model_path=args.model, embedding=True)
print(llm.embed("Hello world!"))

View file

@ -105,6 +105,20 @@ class Llama:
output += llama_cpp.llama_token_to_str(self.ctx, token)
return output
def embed(self, text: str):
"""Embed a string.
Args:
text: The utf-8 encoded string to embed.
Returns:
A list of embeddings.
"""
tokens = self.tokenize(text.encode("utf-8"))
self._eval(tokens, 0)
embeddings = llama_cpp.llama_get_embeddings(self.ctx)
return embeddings[:llama_cpp.llama_n_embd(self.ctx)]
def _eval(self, tokens: List[int], n_past):
rc = llama_cpp.llama_eval(
self.ctx,