Skip to content

Commit

Permalink
feat: Update llama.cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
abetlen committed Jan 8, 2025
1 parent c9dfad4 commit 1d5f534
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 3 deletions.
31 changes: 29 additions & 2 deletions llama_cpp/llama_cpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@
# LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
# LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
# LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
# };
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1
Expand Down Expand Up @@ -251,6 +252,7 @@
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28


# // note: these values should be synchronized with ggml_rope
Expand Down Expand Up @@ -1090,9 +1092,10 @@ def llama_backend_free():
...


# LLAMA_API struct llama_model * llama_load_model_from_file(
# DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
# const char * path_model,
# struct llama_model_params params);
# struct llama_model_params params),
# "use llama_model_load_from_file instead");
@ctypes_function(
"llama_load_model_from_file",
[ctypes.c_char_p, llama_model_params],
Expand All @@ -1104,6 +1107,20 @@ def llama_load_model_from_file(
...


# LLAMA_API struct llama_model * llama_model_load_from_file(
# const char * path_model,
# struct llama_model_params params);
@ctypes_function(
"llama_model_load_from_file",
[ctypes.c_char_p, llama_model_params],
llama_model_p_ctypes,
)
def llama_model_load_from_file(
path_model: bytes, params: llama_model_params, /
) -> Optional[llama_model_p]:
...


# LLAMA_API void llama_free_model(struct llama_model * model);
@ctypes_function(
"llama_free_model",
Expand All @@ -1114,6 +1131,16 @@ def llama_free_model(model: llama_model_p, /):
...


# LLAMA_API void llama_model_free(struct llama_model * model);
@ctypes_function(
"llama_model_free",
[llama_model_p_ctypes],
None,
)
def llama_model_free(model: llama_model_p, /):
...


# LLAMA_API struct llama_context * llama_new_context_with_model(
# struct llama_model * model,
# struct llama_context_params params);
Expand Down
2 changes: 1 addition & 1 deletion vendor/llama.cpp
Submodule llama.cpp updated 141 files

0 comments on commit 1d5f534

Please sign in to comment.