More interoperability to the original llama.cpp, and arguments now work

This commit is contained in:
Mug 2023-04-07 13:32:19 +02:00
parent 10c7571117
commit 16fc5b5d23
4 changed files with 55 additions and 43 deletions

View file

View file

@ -26,9 +26,6 @@ class GptParams:
model: str = "./models/llama-7B/ggml-model.bin"
prompt: str = ""
input_prefix: str = " "
fix_prefix: str = ""
output_postfix: str = ""
input_echo: bool = True,
antiprompt: List[str] = field(default_factory=list)
@ -47,41 +44,57 @@ class GptParams:
mem_test: bool = False
verbose_prompt: bool = False
file: str = None
# If chat ended prematurely, append this to the conversation to fix it.
# Set to "\nUser:" etc.
# This is an alternative to input_prefix which always adds it, so it potentially duplicates "User:""
fix_prefix: str = " "
output_postfix: str = ""
input_echo: bool = True,
# Default instructions for Alpaca
# switch to "Human" and "Assistant" for Vicuna.
instruct_inp_prefix: str="\n\n### Instruction:\n\n",
instruct_inp_suffix: str="\n\n### Response:\n\n",
# TODO: TBD how they are gonna handle this upstream
instruct_inp_prefix: str="\n\n### Instruction:\n\n"
instruct_inp_suffix: str="\n\n### Response:\n\n"
def gpt_params_parse(argv = None, params: Optional[GptParams] = None):
if params is None:
params = GptParams()
parser = argparse.ArgumentParser()
parser.add_argument("-h", "--help", action="store_true", help="show this help message and exit")
parser.add_argument("-s", "--seed", type=int, default=-1, help="",dest="seed")
parser.add_argument("-t", "--threads", type=int, default=1, help="",dest="n_threads")
parser.add_argument("-p", "--prompt", type=str, default="", help="",dest="prompt")
parser.add_argument("-f", "--file", type=str, default=None, help="")
parser.add_argument("-c", "--ctx_size", type=int, default=512, help="",dest="n_ctx")
parser.add_argument("--memory_f32", action="store_false", help="",dest="memory_f16")
parser.add_argument("--top_p", type=float, default=0.9, help="",dest="top_p")
parser.add_argument("--temp", type=float, default=1.0, help="",dest="temp")
parser.add_argument("--repeat_last_n", type=int, default=64, help="",dest="repeat_last_n")
parser.add_argument("--repeat_penalty", type=float, default=1.0, help="",dest="repeat_penalty")
parser.add_argument("-b", "--batch_size", type=int, default=8, help="",dest="n_batch")
parser.add_argument("--keep", type=int, default=0, help="",dest="n_keep")
parser.add_argument("-m", "--model", type=str, help="",dest="model")
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-s", "--seed", type=int, default=-1, help="RNG seed (use random seed for <= 0)",dest="seed")
parser.add_argument("-t", "--threads", type=int, default=min(4, os.cpu_count() or 1), help="number of threads to use during computation",dest="n_threads")
parser.add_argument("-p", "--prompt", type=str, default="", help="initial prompt",dest="prompt")
parser.add_argument("-f", "--file", type=str, default=None, help="file containing initial prompt to load",dest="file")
parser.add_argument("-c", "--ctx_size", type=int, default=512, help="size of the prompt context",dest="n_ctx")
parser.add_argument("--memory_f32", action="store_false", help="use f32 instead of f16 for memory key+value",dest="memory_f16")
parser.add_argument("--top_p", type=float, default=0.95, help="top-p samplin",dest="top_p")
parser.add_argument("--top_k", type=int, default=40, help="top-k sampling",dest="top_k")
parser.add_argument("--temp", type=float, default=0.80, help="temperature",dest="temp")
parser.add_argument("--n_predict", type=int, default=128, help="number of model parts",dest="n_predict")
parser.add_argument("--repeat_last_n", type=int, default=64, help="last n tokens to consider for penalize ",dest="repeat_last_n")
parser.add_argument("--repeat_penalty", type=float, default=1.10, help="penalize repeat sequence of tokens",dest="repeat_penalty")
parser.add_argument("-b", "--batch_size", type=int, default=8, help="batch size for prompt processing",dest="n_batch")
parser.add_argument("--keep", type=int, default=0, help="number of tokens to keep from the initial prompt",dest="n_keep")
parser.add_argument("-m", "--model", type=str, default="./models/llama-7B/ggml-model.bin", help="model path",dest="model")
parser.add_argument(
"-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive"
)
parser.add_argument("--embedding", action="store_true", help="", dest="embedding")
parser.add_argument("--interactive-start", action="store_true", help="", dest="interactive_start")
parser.add_argument(
"--interactive-start",
action="store_true",
help="run in interactive mode",
dest="interactive"
)
parser.add_argument(
"--interactive-first",
action="store_true",
help="run in interactive mode and wait for input right away",
dest="interactive"
dest="interactive_start"
)
parser.add_argument(
"-ins",
@ -96,24 +109,24 @@ def gpt_params_parse(argv = None, params: Optional[GptParams] = None):
help="colorise output to distinguish prompt and user input from generations",
dest="use_color"
)
parser.add_argument("--mlock", action="store_true",dest="use_mlock")
parser.add_argument("--mtest", action="store_true",dest="mem_test")
parser.add_argument("--mlock", action="store_true",help="force system to keep model in RAM rather than swapping or compressing",dest="use_mlock")
parser.add_argument("--mtest", action="store_true",help="compute maximum memory usage",dest="mem_test")
parser.add_argument(
"-r",
"--reverse-prompt",
type=str,
action='append',
help="run in interactive mode and poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).",
help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).",
dest="antiprompt"
)
parser.add_argument("--perplexity", action="store_true", help="", dest="perplexity")
parser.add_argument("--ignore-eos", action="store_true", help="", dest="ignore_eos")
parser.add_argument("--n_parts", type=int, default=-1, help="", dest="n_parts")
parser.add_argument("--random-prompt", action="store_true", help="", dest="random_prompt")
parser.add_argument("--in-prefix", type=str, default=" ", help="", dest="input_prefix")
parser.add_argument("--fix-prefix", type=str, default=" ", help="", dest="fix_prefix")
parser.add_argument("--out-postfix", type=str, default="", help="", dest="output_postfix")
parser.add_argument("--input-noecho", action="store_false", help="", dest="input_echo")
parser.add_argument("--perplexity", action="store_true", help="compute perplexity over the prompt", dest="perplexity")
parser.add_argument("--ignore-eos", action="store_true", help="ignore end of stream token and continue generating", dest="ignore_eos")
parser.add_argument("--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts")
parser.add_argument("--random-prompt", action="store_true", help="start with a randomized prompt.", dest="random_prompt")
parser.add_argument("--in-prefix", type=str, default="", help="string to prefix user inputs with", dest="input_prefix")
parser.add_argument("--fix-prefix", type=str, default="", help="append to input when generated n_predict tokens", dest="fix_prefix")
parser.add_argument("--out-postfix", type=str, default="", help="append to input", dest="output_postfix")
parser.add_argument("--input-noecho", action="store_false", help="dont output the input", dest="input_echo")
args = parser.parse_args(argv)
return args

View file

@ -6,8 +6,6 @@ Quirks:
* The first antiprompt should be the userprompt like "\nUser:",
because its added when n_predict is reached (aka generation ended prematurely)
* n_predict can be set to -1 for unlimited length responses (or just a really high value)
* It's always in interactive mode, generation ends either by reaching an antiprompt
or running out of n_predict.
* Instruction mode adds its own antiprompt.
You should also still be feeding the model with a "primer" prompt that
shows it the expected format.
@ -59,7 +57,6 @@ specified) expect poor results""", file=sys.stderr)
# runtime args
self.input_consumed = 0
self.embd = []
self.n_past = 0
self.first_antiprompt = []
self.remaining_tokens = self.params.n_predict
@ -74,7 +71,7 @@ specified) expect poor results""", file=sys.stderr)
self.lparams.use_mlock = self.params.use_mlock
self.ctx = llama_cpp.llama_init_from_file(self.params.model.encode("utf8"), self.lparams)
if (self.ctx == 0):
if (not self.ctx):
raise RuntimeError(f"error: failed to load model '{self.params.model}'")
print(file=sys.stderr)
@ -95,7 +92,13 @@ specified) expect poor results""", file=sys.stderr)
# Add a space in front of the first character to match OG llama tokenizer behavior
self.params.prompt = " " + self.params.prompt
# Load prompt file
if (self.params.file):
with open(self.params.file) as f:
self.params.prompt = f.read()
# tokenize the prompt
self.embd = []
self.embd_inp = self._tokenize(self.params.prompt)
if (len(self.embd_inp) > self.params.n_ctx - 4):
@ -384,11 +387,7 @@ The transcript only includes text, it does not include markup like HTML and Mark
{AI_NAME}: Blue
{USER_NAME}:"""
args = gpt_params_parse()
params = GptParams(args)
params = GptParams(**vars(args))
if (args.file):
with open(args.file) as f:
params.prompt = f.read()
with LLaMAInteract() as m:
with LLaMAInteract(params) as m:
m.interact()