From 9aa64163dbf57a6f36b80ba1b8399b050607b9c7 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 11:40:59 -0400 Subject: [PATCH 01/11] Update llama.cpp --- vendor/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 6463955..1d16309 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 64639555ff93c8ead2b80becb49cc6b60aeac240 +Subproject commit 1d1630996920f889cdc08de26cebf2415958540e From 0f3c474a49af412117449b19a2844f84c23205ca Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 11:44:29 -0400 Subject: [PATCH 02/11] Bump version --- CHANGELOG.md | 2 ++ pyproject.toml | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 805d7be..0e181d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.1.69] + ### Added - (server) Streaming requests can are now interrupted pre-maturely when a concurrent request is made. Can be controlled with the `interrupt_requests` setting. diff --git a/pyproject.toml b/pyproject.toml index 841a868..fb19629 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama_cpp_python" -version = "0.1.68" +version = "0.1.69" description = "Python bindings for the llama.cpp library" authors = ["Andrei Betlen "] license = "MIT" diff --git a/setup.py b/setup.py index 1d7ecbc..baaabcc 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( description="A Python wrapper for llama.cpp", long_description=long_description, long_description_content_type="text/markdown", - version="0.1.68", + version="0.1.69", author="Andrei Betlen", author_email="abetlen@gmail.com", license="MIT", From 6f70cc4b7dd950a95708ed7e7da9ac550e87a76c Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 18:03:31 -0400 Subject: [PATCH 03/11] bugfix: pydantic settings missing / changed fields --- llama_cpp/server/__main__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llama_cpp/server/__main__.py b/llama_cpp/server/__main__.py index 2110db3..995dd44 100644 --- a/llama_cpp/server/__main__.py +++ b/llama_cpp/server/__main__.py @@ -30,14 +30,14 @@ from llama_cpp.server.app import create_app, Settings if __name__ == "__main__": parser = argparse.ArgumentParser() - for name, field in Settings.__model_fields__.items(): - description = field.field_info.description + for name, field in Settings.model_fields.items(): + description = field.description if field.default is not None and description is not None: description += f" (default: {field.default})" parser.add_argument( f"--{name}", dest=name, - type=field.type_, + type=field.annotation if field.annotation is not None else str, help=description, ) From a86bfdf0a50f23a6aebb3f095ada0afcf8791d6e Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 18:13:29 -0400 Subject: [PATCH 04/11] bugfix: truncate completion max_tokens to fit context length by default --- llama_cpp/llama.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 62e0dae..edb68c9 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -824,18 +824,14 @@ class Llama: if self.verbose: llama_cpp.llama_reset_timings(self.ctx) + if len(prompt_tokens) >= llama_cpp.llama_n_ctx(self.ctx): + raise ValueError( + f"Requested tokens exceed context window of {llama_cpp.llama_n_ctx(self.ctx)}" + ) + if max_tokens <= 0: # Unlimited, depending on n_ctx. - if len(prompt_tokens) >= int(llama_cpp.llama_n_ctx(self.ctx)): - raise ValueError( - f"Requested tokens exceed context window of {llama_cpp.llama_n_ctx(self.ctx)}" - ) - else: - max_tokens = int(llama_cpp.llama_n_ctx(self.ctx)) - len(prompt_tokens) - elif len(prompt_tokens) + max_tokens > int(llama_cpp.llama_n_ctx(self.ctx)): - raise ValueError( - f"Requested tokens ({len(prompt_tokens)}) exceed context window of {self._n_ctx}" - ) + max_tokens = llama_cpp.llama_n_ctx(self.ctx) - len(prompt_tokens) # Truncate max_tokens if requested tokens would exceed the context window max_tokens = ( From df3d54593868fbe5e8e488cd0c7a638971fbd3b8 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 18:13:41 -0400 Subject: [PATCH 05/11] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e181d6..4097413 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Fixed + +- (Llama.create_completion) Revert change so that `max_tokens` is not truncated to `context_size` in `create_completion` +- (server) Fixed changed settings field names from pydantic v2 migration + ## [0.1.69] ### Added From c988c2ac0b7611e4fe8001a28002767f37e09675 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 18:19:37 -0400 Subject: [PATCH 06/11] Bump version --- pyproject.toml | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fb19629..a9e012e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama_cpp_python" -version = "0.1.69" +version = "0.1.70" description = "Python bindings for the llama.cpp library" authors = ["Andrei Betlen "] license = "MIT" diff --git a/setup.py b/setup.py index baaabcc..b8acedb 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( description="A Python wrapper for llama.cpp", long_description=long_description, long_description_content_type="text/markdown", - version="0.1.69", + version="0.1.70", author="Andrei Betlen", author_email="abetlen@gmail.com", license="MIT", From 8e0f6253db0e8aa30bcc90fc26d49d221d003070 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 9 Jul 2023 18:20:04 -0400 Subject: [PATCH 07/11] Bump version --- CHANGELOG.md | 2 ++ pyproject.toml | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4097413..8b5db37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.1.70] + ### Fixed - (Llama.create_completion) Revert change so that `max_tokens` is not truncated to `context_size` in `create_completion` diff --git a/pyproject.toml b/pyproject.toml index fb19629..a9e012e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama_cpp_python" -version = "0.1.69" +version = "0.1.70" description = "Python bindings for the llama.cpp library" authors = ["Andrei Betlen "] license = "MIT" diff --git a/setup.py b/setup.py index baaabcc..b8acedb 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( description="A Python wrapper for llama.cpp", long_description=long_description, long_description_content_type="text/markdown", - version="0.1.69", + version="0.1.70", author="Andrei Betlen", author_email="abetlen@gmail.com", license="MIT", From 7bb0024cd0c12d0d36207172410f13e1d343eeac Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 12 Jul 2023 19:31:43 -0400 Subject: [PATCH 08/11] Fix uvicorn dependency --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b8acedb..ab5d825 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup( packages=["llama_cpp", "llama_cpp.server"], install_requires=["typing-extensions>=4.5.0", "numpy>=1.20.0", "diskcache>=5.6.1"], extras_require={ - "server": ["uvicorn>=0.22.1", "fastapi>=0.100.0", "pydantic-settings>=2.0.1", "sse-starlette>=1.6.1"], + "server": ["uvicorn>=0.22.0", "fastapi>=0.100.0", "pydantic-settings>=2.0.1", "sse-starlette>=1.6.1"], }, python_requires=">=3.7", classifiers=[ From 896ab7b88a45768dcb0e6038ed6ec8cbdd88a634 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 13 Jul 2023 23:24:55 -0400 Subject: [PATCH 09/11] Update llama.cpp --- llama_cpp/llama_cpp.py | 55 +++++++++++++++++++++++++++++++++++++----- vendor/llama.cpp | 2 +- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 17c6319..b5bab56 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -326,13 +326,23 @@ _lib.llama_mlock_supported.restype = c_bool # // Initialize the llama + ggml backend # // If numa is true, use NUMA optimizations # // Call once at the start of the program -# LLAMA_API void llama_init_backend(bool numa); -def llama_init_backend(numa: c_bool): - return _lib.llama_init_backend(numa) +# LLAMA_API void llama_backend_init(bool numa); +def llama_backend_init(numa: c_bool): + return _lib.llama_backend_init(numa) -_lib.llama_init_backend.argtypes = [c_bool] -_lib.llama_init_backend.restype = None +_lib.llama_backend_init.argtypes = [c_bool] +_lib.llama_backend_init.restype = None + + +# // Call once at the end of the program - currently only used for MPI +# LLAMA_API void llama_backend_free(); +def llama_backend_free(): + return _lib.llama_backend_free() + + +_lib.llama_backend_free.argtypes = [] +_lib.llama_backend_free.restype = None # LLAMA_API struct llama_model * llama_load_model_from_file( @@ -819,6 +829,39 @@ _lib.llama_sample_frequency_and_presence_penalties.argtypes = [ _lib.llama_sample_frequency_and_presence_penalties.restype = None +# /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806 +# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted. +# /// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context. +# /// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance. +# /// @params smooth_factor Smooth factor between guidance logits and original logits. 1.0f means only use guidance logits. 0.0f means only original logits. +# LLAMA_API void llama_sample_classifier_free_guidance( +# struct llama_context * ctx, +# llama_token_data_array * candidates, +# struct llama_context * guidance_ctx, +# float scale, +# float smooth_factor); +def llama_sample_classifier_free_guidance( + ctx: llama_context_p, + candidates, # type: _Pointer[llama_token_data_array] + guidance_ctx: llama_context_p, + scale: c_float, + smooth_factor: c_float, +): + return _lib.llama_sample_classifier_free_guidance( + ctx, candidates, guidance_ctx, scale, smooth_factor + ) + + +_lib.llama_sample_classifier_free_guidance.argtypes = [ + llama_context_p, + llama_token_data_array_p, + llama_context_p, + c_float, + c_float, +] +_lib.llama_sample_classifier_free_guidance.restype = None + + # @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. # LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates); def llama_sample_softmax( @@ -1063,5 +1106,5 @@ _lib.llama_print_system_info.restype = c_char_p _llama_initialized = False if not _llama_initialized: - llama_init_backend(c_bool(False)) + llama_backend_init(c_bool(False)) _llama_initialized = True diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 1d16309..32c5411 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 1d1630996920f889cdc08de26cebf2415958540e +Subproject commit 32c54116318929c90fd7ae814cf9b5232cd44c36 From de4cc5a233952e0dede642702f3170cd1bae5869 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 13 Jul 2023 23:25:12 -0400 Subject: [PATCH 10/11] bugfix: pydantic v2 fields --- llama_cpp/server/app.py | 106 +++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 57 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index ffd07fa..202a06d 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -31,9 +31,7 @@ class Settings(BaseSettings): ge=0, description="The number of layers to put on the GPU. The rest will be on the CPU.", ) - seed: int = Field( - default=1337, description="Random seed. -1 for random." - ) + seed: int = Field(default=1337, description="Random seed. -1 for random.") n_batch: int = Field( default=512, ge=1, description="The batch size to use per eval." ) @@ -80,12 +78,8 @@ class Settings(BaseSettings): verbose: bool = Field( default=True, description="Whether to print debug information." ) - host: str = Field( - default="localhost", description="Listen address" - ) - port: int = Field( - default=8000, description="Listen port" - ) + host: str = Field(default="localhost", description="Listen address") + port: int = Field(default=8000, description="Listen port") interrupt_requests: bool = Field( default=True, description="Whether to interrupt requests when a new request is received.", @@ -178,7 +172,7 @@ def get_settings(): yield settings -model_field = Field(description="The model to use for generating completions.") +model_field = Field(description="The model to use for generating completions.", default=None) max_tokens_field = Field( default=16, ge=1, le=2048, description="The maximum number of tokens to generate." @@ -242,21 +236,18 @@ mirostat_mode_field = Field( default=0, ge=0, le=2, - description="Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)" + description="Enable Mirostat constant-perplexity algorithm of the specified version (1 or 2; 0 = disabled)", ) mirostat_tau_field = Field( default=5.0, ge=0.0, le=10.0, - description="Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text" + description="Mirostat target entropy, i.e. the target perplexity - lower values produce focused and coherent text, larger values produce more diverse and less coherent text", ) mirostat_eta_field = Field( - default=0.1, - ge=0.001, - le=1.0, - description="Mirostat learning rate" + default=0.1, ge=0.001, le=1.0, description="Mirostat learning rate" ) @@ -294,22 +285,23 @@ class CreateCompletionRequest(BaseModel): model: Optional[str] = model_field n: Optional[int] = 1 best_of: Optional[int] = 1 - user: Optional[str] = Field(None) + user: Optional[str] = Field(default=None) # llama.cpp specific parameters top_k: int = top_k_field repeat_penalty: float = repeat_penalty_field logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None) - class Config: - schema_extra = { - "example": { - "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", - "stop": ["\n", "###"], - } + model_config = { + "json_schema_extra": { + "examples": [ + { + "prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n", + "stop": ["\n", "###"], + } + ] } - - + } def make_logit_bias_processor( @@ -328,7 +320,7 @@ def make_logit_bias_processor( elif logit_bias_type == "tokens": for token, score in logit_bias.items(): - token = token.encode('utf-8') + token = token.encode("utf-8") for input_id in llama.tokenize(token, add_bos=False): to_bias[input_id] = score @@ -352,7 +344,7 @@ async def create_completion( request: Request, body: CreateCompletionRequest, llama: llama_cpp.Llama = Depends(get_llama), -): +) -> llama_cpp.Completion: if isinstance(body.prompt, list): assert len(body.prompt) <= 1 body.prompt = body.prompt[0] if len(body.prompt) > 0 else "" @@ -364,7 +356,7 @@ async def create_completion( "logit_bias_type", "user", } - kwargs = body.dict(exclude=exclude) + kwargs = body.model_dump(exclude=exclude) if body.logit_bias is not None: kwargs['logits_processor'] = llama_cpp.LogitsProcessorList([ @@ -396,7 +388,7 @@ async def create_completion( return EventSourceResponse( recv_chan, data_sender_callable=partial(event_publisher, send_chan) - ) + ) # type: ignore else: completion: llama_cpp.Completion = await run_in_threadpool(llama, **kwargs) # type: ignore return completion @@ -405,16 +397,17 @@ async def create_completion( class CreateEmbeddingRequest(BaseModel): model: Optional[str] = model_field input: Union[str, List[str]] = Field(description="The input to embed.") - user: Optional[str] + user: Optional[str] = Field(default=None) - class Config: - schema_extra = { - "example": { - "input": "The food was delicious and the waiter...", - } + model_config = { + "json_schema_extra": { + "examples": [ + { + "input": "The food was delicious and the waiter...", + } + ] } - - + } @router.post( @@ -424,7 +417,7 @@ async def create_embedding( request: CreateEmbeddingRequest, llama: llama_cpp.Llama = Depends(get_llama) ): return await run_in_threadpool( - llama.create_embedding, **request.dict(exclude={"user"}) + llama.create_embedding, **request.model_dump(exclude={"user"}) ) @@ -461,21 +454,22 @@ class CreateChatCompletionRequest(BaseModel): repeat_penalty: float = repeat_penalty_field logit_bias_type: Optional[Literal["input_ids", "tokens"]] = Field(None) - class Config: - schema_extra = { - "example": { - "messages": [ - ChatCompletionRequestMessage( - role="system", content="You are a helpful assistant." - ), - ChatCompletionRequestMessage( - role="user", content="What is the capital of France?" - ), - ] - } + model_config = { + "json_schema_extra": { + "examples": [ + { + "messages": [ + ChatCompletionRequestMessage( + role="system", content="You are a helpful assistant." + ).model_dump(), + ChatCompletionRequestMessage( + role="user", content="What is the capital of France?" + ).model_dump(), + ] + } + ] } - - + } @router.post( @@ -486,14 +480,14 @@ async def create_chat_completion( body: CreateChatCompletionRequest, llama: llama_cpp.Llama = Depends(get_llama), settings: Settings = Depends(get_settings), -) -> Union[llama_cpp.ChatCompletion]: # type: ignore +) -> llama_cpp.ChatCompletion: exclude = { "n", "logit_bias", "logit_bias_type", "user", } - kwargs = body.dict(exclude=exclude) + kwargs = body.model_dump(exclude=exclude) if body.logit_bias is not None: kwargs['logits_processor'] = llama_cpp.LogitsProcessorList([ @@ -526,7 +520,7 @@ async def create_chat_completion( return EventSourceResponse( recv_chan, data_sender_callable=partial(event_publisher, send_chan), - ) + ) # type: ignore else: completion: llama_cpp.ChatCompletion = await run_in_threadpool( llama.create_chat_completion, **kwargs # type: ignore @@ -546,8 +540,6 @@ class ModelList(TypedDict): data: List[ModelData] - - @router.get("/v1/models") async def get_models( settings: Settings = Depends(get_settings), From 6705f9b6c6b3369481c4e2e0e15d0f1af7a96eff Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 13 Jul 2023 23:32:06 -0400 Subject: [PATCH 11/11] Bump version --- CHANGELOG.md | 10 ++++++++++ pyproject.toml | 2 +- setup.py | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b5db37..47b55a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.1.71] + +### Added + +- (llama.cpp) Update llama.cpp + +### Fixed + +- (server) Fix several pydantic v2 migration bugs + ## [0.1.70] ### Fixed diff --git a/pyproject.toml b/pyproject.toml index a9e012e..1cff231 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama_cpp_python" -version = "0.1.70" +version = "0.1.71" description = "Python bindings for the llama.cpp library" authors = ["Andrei Betlen "] license = "MIT" diff --git a/setup.py b/setup.py index ab5d825..71af72c 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( description="A Python wrapper for llama.cpp", long_description=long_description, long_description_content_type="text/markdown", - version="0.1.70", + version="0.1.71", author="Andrei Betlen", author_email="abetlen@gmail.com", license="MIT",