Initial commit

This commit is contained in:
Andrei Betlen 2023-03-23 05:33:06 -04:00
commit 79b304c9d4
10 changed files with 736 additions and 0 deletions

164
.gitignore vendored Normal file
View File

@ -0,0 +1,164 @@
.envrc
models/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

9
LICENSE.md Normal file
View File

@ -0,0 +1,9 @@
MIT License
Copyright (c) 2023 Andrei Betlen
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

40
README.md Normal file
View File

@ -0,0 +1,40 @@
# `llama.cpp` Python Bindings
Simple Python bindings for @ggerganov's [`llama.cpp`](https://github.com/ggerganov/llama.cpp) library.
These bindings expose the low-level `llama.cpp` C API through a complete `ctypes` interface.
This module also exposes a high-level Python API that is more convenient to use and follows a familiar format.
# Install
```bash
pip install llama_cpp
```
# Usage
```python
>>> from llama_cpp import Llama
>>> llm = Llama(model_path="models/7B/...")
>>> output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True)
>>> print(output)
{
"id": "cmpl-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"object": "text_completion",
"created": 1679561337,
"model": "models/7B/...",
"choices": [
{
"text": "Q: Name the planets in the solar system? A: Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune and Pluto.",
"index": 0,
"logprobs": None,
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 14,
"completion_tokens": 28,
"total_tokens": 42
}
}
```

8
examples/basic.py Normal file
View File

@ -0,0 +1,8 @@
import json
from llama_cpp import Llama
llm = Llama(model_path="models/...")
output = llm("Q: Name the planets in the solar system? A: ", max_tokens=32, stop=["Q:", "\n"], echo=True)
print(json.dumps(output, indent=2))

2
llama_cpp/__init__.py Normal file
View File

@ -0,0 +1,2 @@
from .llama_cpp import *
from .llama import *

131
llama_cpp/llama.py Normal file
View File

@ -0,0 +1,131 @@
import uuid
import time
import multiprocessing
from typing import List, Optional
from . import llama_cpp
class Llama:
def __init__(
self,
model_path: str,
n_ctx: int = 512,
n_parts: int = -1,
seed: int = 1337,
f16_kv: bool = False,
logits_all: bool = False,
vocab_only: bool = False,
n_threads: Optional[int] = None,
model_name: Optional[str]=None,
):
self.model_path = model_path
self.model = model_name or model_path
self.params = llama_cpp.llama_context_default_params()
self.params.n_ctx = n_ctx
self.params.n_parts = n_parts
self.params.seed = seed
self.params.f16_kv = f16_kv
self.params.logits_all = logits_all
self.params.vocab_only = vocab_only
self.n_threads = n_threads or multiprocessing.cpu_count()
self.tokens = (llama_cpp.llama_token * self.params.n_ctx)()
self.ctx = llama_cpp.llama_init_from_file(
self.model_path.encode("utf-8"), self.params
)
def __call__(
self,
prompt: str,
suffix: Optional[str] = None,
max_tokens: int = 16,
temperature: float = 0.8,
top_p: float = 0.95,
echo: bool = False,
stop: List[str] = [],
repeat_penalty: float = 1.1,
top_k: int = 40,
):
text = ""
finish_reason = "length"
completion_tokens = 0
prompt_tokens = llama_cpp.llama_tokenize(
self.ctx, prompt.encode("utf-8"), self.tokens, self.params.n_ctx, True
)
if prompt_tokens + max_tokens > self.params.n_ctx:
raise ValueError(
f"Requested tokens exceed context window of {self.params.n_ctx}"
)
for i in range(prompt_tokens):
llama_cpp.llama_eval(
self.ctx, (llama_cpp.c_int * 1)(self.tokens[i]), 1, i, self.n_threads
)
for i in range(max_tokens):
token = llama_cpp.llama_sample_top_p_top_k(
self.ctx,
self.tokens,
prompt_tokens + completion_tokens,
top_k=top_k,
top_p=top_p,
temp=temperature,
repeat_penalty=repeat_penalty,
)
if token == llama_cpp.llama_token_eos():
finish_reason = "stop"
break
text += llama_cpp.llama_token_to_str(self.ctx, token).decode("utf-8")
self.tokens[prompt_tokens + i] = token
completion_tokens += 1
any_stop = [s for s in stop if s in text]
if len(any_stop) > 0:
first_stop = any_stop[0]
text = text[: text.index(first_stop)]
finish_reason = "stop"
break
llama_cpp.llama_eval(
self.ctx,
(llama_cpp.c_int * 1)(self.tokens[prompt_tokens + i]),
1,
prompt_tokens + completion_tokens,
self.n_threads,
)
if echo:
text = prompt + text
if suffix is not None:
text = text + suffix
return {
"id": f"cmpl-{str(uuid.uuid4())}", # Likely to change
"object": "text_completion",
"created": int(time.time()),
"model": self.model, # Likely to change
"choices": [
{
"text": text,
"index": 0,
"logprobs": None,
"finish_reason": finish_reason,
}
],
"usage": {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": prompt_tokens + completion_tokens,
},
}
def __del__(self):
llama_cpp.llama_free(self.ctx)

157
llama_cpp/llama_cpp.py Normal file
View File

@ -0,0 +1,157 @@
import ctypes
from ctypes import c_int, c_float, c_double, c_char_p, c_void_p, c_bool, POINTER, Structure
import pathlib
# Load the library
libfile = pathlib.Path(__file__).parent.parent / "libllama.so"
lib = ctypes.CDLL(str(libfile))
# C types
llama_token = c_int
llama_token_p = POINTER(llama_token)
class llama_token_data(Structure):
_fields_ = [
('id', llama_token), # token id
('p', c_float), # probability of the token
('plog', c_float), # log probability of the token
]
llama_token_data_p = POINTER(llama_token_data)
class llama_context_params(Structure):
_fields_ = [
('n_ctx', c_int), # text context
('n_parts', c_int), # -1 for default
('seed', c_int), # RNG seed, 0 for random
('f16_kv', c_bool), # use fp16 for KV cache
('logits_all', c_bool), # the llama_eval() call computes all logits, not just the last one
('vocab_only', c_bool), # only load the vocabulary, no weights
]
llama_context_params_p = POINTER(llama_context_params)
llama_context_p = c_void_p
# C functions
lib.llama_context_default_params.argtypes = []
lib.llama_context_default_params.restype = llama_context_params
lib.llama_init_from_file.argtypes = [c_char_p, llama_context_params]
lib.llama_init_from_file.restype = llama_context_p
lib.llama_free.argtypes = [llama_context_p]
lib.llama_free.restype = None
lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int, c_int]
lib.llama_model_quantize.restype = c_int
lib.llama_eval.argtypes = [llama_context_p, llama_token_p, c_int, c_int, c_int]
lib.llama_eval.restype = c_int
lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, c_bool]
lib.llama_tokenize.restype = c_int
lib.llama_n_vocab.argtypes = [llama_context_p]
lib.llama_n_vocab.restype = c_int
lib.llama_n_ctx.argtypes = [llama_context_p]
lib.llama_n_ctx.restype = c_int
lib.llama_get_logits.argtypes = [llama_context_p]
lib.llama_get_logits.restype = POINTER(c_float)
lib.llama_token_to_str.argtypes = [llama_context_p, llama_token]
lib.llama_token_to_str.restype = c_char_p
lib.llama_token_bos.argtypes = []
lib.llama_token_bos.restype = llama_token
lib.llama_token_eos.argtypes = []
lib.llama_token_eos.restype = llama_token
lib.llama_sample_top_p_top_k.argtypes = [llama_context_p, llama_token_p, c_int, c_int, c_double, c_double, c_double]
lib.llama_sample_top_p_top_k.restype = llama_token
lib.llama_print_timings.argtypes = [llama_context_p]
lib.llama_print_timings.restype = None
lib.llama_reset_timings.argtypes = [llama_context_p]
lib.llama_reset_timings.restype = None
lib.llama_print_system_info.argtypes = []
lib.llama_print_system_info.restype = c_char_p
# Python functions
def llama_context_default_params() -> llama_context_params:
params = lib.llama_context_default_params()
return params
def llama_init_from_file(path_model: bytes, params: llama_context_params) -> llama_context_p:
"""Various functions for loading a ggml llama model.
Allocate (almost) all memory needed for the model.
Return NULL on failure """
return lib.llama_init_from_file(path_model, params)
def llama_free(ctx: llama_context_p):
"""Free all allocated memory"""
lib.llama_free(ctx)
def llama_model_quantize(fname_inp: bytes, fname_out: bytes, itype: c_int, qk: c_int) -> c_int:
"""Returns 0 on success"""
return lib.llama_model_quantize(fname_inp, fname_out, itype, qk)
def llama_eval(ctx: llama_context_p, tokens: llama_token_p, n_tokens: c_int, n_past: c_int, n_threads: c_int) -> c_int:
"""Run the llama inference to obtain the logits and probabilities for the next token.
tokens + n_tokens is the provided batch of new tokens to process
n_past is the number of tokens to use from previous eval calls
Returns 0 on success"""
return lib.llama_eval(ctx, tokens, n_tokens, n_past, n_threads)
def llama_tokenize(ctx: llama_context_p, text: bytes, tokens: llama_token_p, n_max_tokens: c_int, add_bos: c_bool) -> c_int:
"""Convert the provided text into tokens.
The tokens pointer must be large enough to hold the resulting tokens.
Returns the number of tokens on success, no more than n_max_tokens
Returns a negative number on failure - the number of tokens that would have been returned"""
return lib.llama_tokenize(ctx, text, tokens, n_max_tokens, add_bos)
def llama_n_vocab(ctx: llama_context_p) -> c_int:
return lib.llama_n_vocab(ctx)
def llama_n_ctx(ctx: llama_context_p) -> c_int:
return lib.llama_n_ctx(ctx)
def llama_get_logits(ctx: llama_context_p):
"""Token logits obtained from the last call to llama_eval()
The logits for the last token are stored in the last row
Can be mutated in order to change the probabilities of the next token
Rows: n_tokens
Cols: n_vocab"""
return lib.llama_get_logits(ctx)
def llama_token_to_str(ctx: llama_context_p, token: int) -> bytes:
"""Token Id -> String. Uses the vocabulary in the provided context"""
return lib.llama_token_to_str(ctx, token)
def llama_token_bos() -> llama_token:
return lib.llama_token_bos()
def llama_token_eos() -> llama_token:
return lib.llama_token_eos()
def llama_sample_top_p_top_k(ctx: llama_context_p, last_n_tokens_data: llama_token_p, last_n_tokens_size: c_int, top_k: c_int, top_p: c_double, temp: c_double, repeat_penalty: c_double) -> llama_token:
return lib.llama_sample_top_p_top_k(ctx, last_n_tokens_data, last_n_tokens_size, top_k, top_p, temp, repeat_penalty)
def llama_print_timings(ctx: llama_context_p):
lib.llama_print_timings(ctx)
def llama_reset_timings(ctx: llama_context_p):
lib.llama_reset_timings(ctx)
def llama_print_system_info() -> bytes:
"""Print system informaiton"""
return lib.llama_print_system_info()

159
poetry.lock generated Normal file
View File

@ -0,0 +1,159 @@
[[package]]
name = "black"
version = "23.1.0"
description = "The uncompromising code formatter."
category = "dev"
optional = false
python-versions = ">=3.7"
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
packaging = ">=22.0"
pathspec = ">=0.9.0"
platformdirs = ">=2"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
[package.extras]
colorama = ["colorama (>=0.4.3)"]
d = ["aiohttp (>=3.7.4)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "click"
version = "8.1.3"
description = "Composable command line interface toolkit"
category = "dev"
optional = false
python-versions = ">=3.7"
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
category = "dev"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
category = "dev"
optional = false
python-versions = ">=3.5"
[[package]]
name = "packaging"
version = "23.0"
description = "Core utilities for Python packages"
category = "dev"
optional = false
python-versions = ">=3.7"
[[package]]
name = "pathspec"
version = "0.11.1"
description = "Utility library for gitignore style pattern matching of file paths."
category = "dev"
optional = false
python-versions = ">=3.7"
[[package]]
name = "platformdirs"
version = "3.1.1"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
category = "dev"
optional = false
python-versions = ">=3.7"
[package.extras]
docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
[[package]]
name = "tomli"
version = "2.0.1"
description = "A lil' TOML parser"
category = "dev"
optional = false
python-versions = ">=3.7"
[[package]]
name = "typing-extensions"
version = "4.5.0"
description = "Backported and Experimental Type Hints for Python 3.7+"
category = "dev"
optional = false
python-versions = ">=3.7"
[metadata]
lock-version = "1.1"
python-versions = "^3.8.1"
content-hash = "3f76d52f05fe9351f546ad2dd8038dd9442d52a80c04112022683805560265e0"
[metadata.files]
black = [
{file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"},
{file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"},
{file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"},
{file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"},
{file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"},
{file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"},
{file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"},
{file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"},
{file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"},
{file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"},
{file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"},
{file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"},
{file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"},
{file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"},
{file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"},
{file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"},
{file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"},
{file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"},
{file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"},
{file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"},
{file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"},
{file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"},
{file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"},
{file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"},
{file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"},
]
click = [
{file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
{file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
]
colorama = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
mypy-extensions = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
packaging = [
{file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"},
{file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"},
]
pathspec = [
{file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
{file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
]
platformdirs = [
{file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"},
{file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
]
tomli = [
{file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
typing-extensions = [
{file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
{file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
]

24
pyproject.toml Normal file
View File

@ -0,0 +1,24 @@
[tool.poetry]
name = "llama_cpp"
version = "0.1.0"
description = "Python bindings for the llama.cpp library"
authors = ["Andrei Betlen <abetlen@gmail.com>"]
license = "MIT"
readme = "README.md"
homepage = "https://github.com/abetlen/llama_cpp_python"
repository = "https://github.com/abetlen/llama_cpp_python"
packages = [{include = "llama_cpp"}]
include = [
"LICENSE.md",
]
[tool.poetry.dependencies]
python = "^3.8.1"
[tool.poetry.group.dev.dependencies]
black = "^23.1.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

42
setup.py Normal file
View File

@ -0,0 +1,42 @@
import os
import subprocess
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
class build_ext_custom(build_ext):
def run(self):
build_dir = os.path.join(os.getcwd(), "build")
src_dir = os.path.join(os.getcwd(), "vendor", "llama.cpp")
os.makedirs(build_dir, exist_ok=True)
cmake_flags = [
"-DLLAMA_STATIC=Off",
"-DBUILD_SHARED_LIBS=On",
"-DCMAKE_CXX_FLAGS=-fPIC",
"-DCMAKE_C_FLAGS=-fPIC",
]
subprocess.check_call(["cmake", src_dir, *cmake_flags], cwd=build_dir)
subprocess.check_call(["cmake", "--build", "."], cwd=build_dir)
# Move the shared library to the root directory
lib_path = os.path.join(build_dir, "libllama.so")
target_path = os.path.join(os.getcwd(), "libllama.so")
os.rename(lib_path, target_path)
setup(
name="llama_cpp",
description="A Python wrapper for llama.cpp",
version="0.0.1",
author="Andrei Betlen",
author_email="abetlen@gmail.com",
license="MIT",
py_modules=["llama_cpp"],
ext_modules=[
Extension("libllama", ["vendor/llama.cpp"]),
],
cmdclass={"build_ext": build_ext_custom},
)