Spaces:
Running
Running
talk-llama : sync llama.cpp
Browse files- examples/talk-llama/llama-arch.cpp +7 -2
- examples/talk-llama/llama-arch.h +3 -1
- examples/talk-llama/llama-chat.cpp +11 -2
- examples/talk-llama/llama-chat.h +1 -0
- examples/talk-llama/llama-grammar.cpp +86 -6
- examples/talk-llama/llama-grammar.h +22 -1
- examples/talk-llama/llama-mmap.cpp +1 -0
- examples/talk-llama/llama-model-loader.cpp +64 -12
- examples/talk-llama/llama-model-loader.h +6 -1
- examples/talk-llama/llama-model.cpp +74 -7
- examples/talk-llama/llama-model.h +0 -2
- examples/talk-llama/llama-quant.cpp +2 -1
- examples/talk-llama/llama-sampling.cpp +47 -4
- examples/talk-llama/llama-vocab.cpp +14 -7
- examples/talk-llama/llama.cpp +216 -177
- examples/talk-llama/llama.h +25 -5
- examples/talk-llama/unicode.cpp +2 -3
examples/talk-llama/llama-arch.cpp
CHANGED
|
@@ -179,6 +179,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
|
|
| 179 |
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
|
| 180 |
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
|
| 181 |
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" },
|
|
|
|
| 182 |
{ LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
|
| 183 |
{ LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
|
| 184 |
{ LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },
|
|
@@ -1023,6 +1024,9 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
|
|
| 1023 |
{ LLM_TENSOR_OUTPUT, "output" },
|
| 1024 |
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
| 1025 |
{ LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
|
|
|
|
|
|
|
|
|
|
| 1026 |
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
| 1027 |
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
| 1028 |
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
@@ -1443,10 +1447,11 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
|
|
| 1443 |
{LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
| 1444 |
};
|
| 1445 |
|
| 1446 |
-
LLM_KV::LLM_KV(llm_arch arch) : arch(arch) {}
|
| 1447 |
|
| 1448 |
std::string LLM_KV::operator()(llm_kv kv) const {
|
| 1449 |
-
return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch))
|
|
|
|
| 1450 |
}
|
| 1451 |
|
| 1452 |
std::string LLM_TN_IMPL::str() const {
|
|
|
|
| 179 |
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
|
| 180 |
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
|
| 181 |
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" },
|
| 182 |
+
{ LLM_KV_TOKENIZER_CHAT_TEMPLATE_N, "tokenizer.chat_template.%s" },
|
| 183 |
{ LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" },
|
| 184 |
{ LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" },
|
| 185 |
{ LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" },
|
|
|
|
| 1024 |
{ LLM_TENSOR_OUTPUT, "output" },
|
| 1025 |
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
| 1026 |
{ LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
|
| 1027 |
+
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
| 1028 |
+
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
| 1029 |
+
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
| 1030 |
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
| 1031 |
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
| 1032 |
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
|
|
| 1447 |
{LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
|
| 1448 |
};
|
| 1449 |
|
| 1450 |
+
LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}
|
| 1451 |
|
| 1452 |
std::string LLM_KV::operator()(llm_kv kv) const {
|
| 1453 |
+
return suffix ? ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch), suffix)
|
| 1454 |
+
: ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
|
| 1455 |
}
|
| 1456 |
|
| 1457 |
std::string LLM_TN_IMPL::str() const {
|
examples/talk-llama/llama-arch.h
CHANGED
|
@@ -177,6 +177,7 @@ enum llm_kv {
|
|
| 177 |
LLM_KV_TOKENIZER_HF_JSON,
|
| 178 |
LLM_KV_TOKENIZER_RWKV,
|
| 179 |
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
|
|
|
|
| 180 |
LLM_KV_TOKENIZER_FIM_PRE_ID,
|
| 181 |
LLM_KV_TOKENIZER_FIM_SUF_ID,
|
| 182 |
LLM_KV_TOKENIZER_FIM_MID_ID,
|
|
@@ -335,9 +336,10 @@ enum llm_tensor_layer {
|
|
| 335 |
};
|
| 336 |
|
| 337 |
struct LLM_KV {
|
| 338 |
-
LLM_KV(llm_arch arch);
|
| 339 |
|
| 340 |
llm_arch arch;
|
|
|
|
| 341 |
|
| 342 |
std::string operator()(llm_kv kv) const;
|
| 343 |
};
|
|
|
|
| 177 |
LLM_KV_TOKENIZER_HF_JSON,
|
| 178 |
LLM_KV_TOKENIZER_RWKV,
|
| 179 |
LLM_KV_TOKENIZER_CHAT_TEMPLATE,
|
| 180 |
+
LLM_KV_TOKENIZER_CHAT_TEMPLATE_N,
|
| 181 |
LLM_KV_TOKENIZER_FIM_PRE_ID,
|
| 182 |
LLM_KV_TOKENIZER_FIM_SUF_ID,
|
| 183 |
LLM_KV_TOKENIZER_FIM_MID_ID,
|
|
|
|
| 336 |
};
|
| 337 |
|
| 338 |
struct LLM_KV {
|
| 339 |
+
LLM_KV(llm_arch arch, const char * suffix = nullptr);
|
| 340 |
|
| 341 |
llm_arch arch;
|
| 342 |
+
const char * suffix;
|
| 343 |
|
| 344 |
std::string operator()(llm_kv kv) const;
|
| 345 |
};
|
examples/talk-llama/llama-chat.cpp
CHANGED
|
@@ -51,6 +51,7 @@ static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
|
|
| 51 |
{ "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 },
|
| 52 |
{ "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 },
|
| 53 |
{ "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 },
|
|
|
|
| 54 |
{ "minicpm", LLM_CHAT_TEMPLATE_MINICPM },
|
| 55 |
{ "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 },
|
| 56 |
{ "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD },
|
|
@@ -115,7 +116,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
|
|
| 115 |
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
|
| 116 |
return LLM_CHAT_TEMPLATE_PHI_3;
|
| 117 |
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
|
| 118 |
-
return LLM_CHAT_TEMPLATE_FALCON_3;
|
| 119 |
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
|
| 120 |
return LLM_CHAT_TEMPLATE_ZEPHYR;
|
| 121 |
} else if (tmpl_contains("bos_token + message['role']")) {
|
|
@@ -152,7 +153,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
|
|
| 152 |
return LLM_CHAT_TEMPLATE_MINICPM;
|
| 153 |
} else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
|
| 154 |
return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
|
| 155 |
-
} else if (tmpl_contains(LU8("
|
| 156 |
return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
|
| 157 |
} else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
|
| 158 |
// ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
|
|
@@ -440,6 +441,14 @@ int32_t llm_chat_apply_template(
|
|
| 440 |
if (add_ass) {
|
| 441 |
ss << "<|assistant|>";
|
| 442 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
} else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
|
| 444 |
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
|
| 445 |
for (auto message : chat) {
|
|
|
|
| 51 |
{ "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 },
|
| 52 |
{ "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 },
|
| 53 |
{ "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 },
|
| 54 |
+
{ "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE },
|
| 55 |
{ "minicpm", LLM_CHAT_TEMPLATE_MINICPM },
|
| 56 |
{ "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 },
|
| 57 |
{ "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD },
|
|
|
|
| 116 |
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
|
| 117 |
return LLM_CHAT_TEMPLATE_PHI_3;
|
| 118 |
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
|
| 119 |
+
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
|
| 120 |
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
|
| 121 |
return LLM_CHAT_TEMPLATE_ZEPHYR;
|
| 122 |
} else if (tmpl_contains("bos_token + message['role']")) {
|
|
|
|
| 153 |
return LLM_CHAT_TEMPLATE_MINICPM;
|
| 154 |
} else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
|
| 155 |
return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
|
| 156 |
+
} else if (tmpl_contains(LU8("<|Assistant|>")) && tmpl_contains(LU8("<|User|>")) && tmpl_contains(LU8("<|end▁of▁sentence|>"))) {
|
| 157 |
return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
|
| 158 |
} else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
|
| 159 |
// ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
|
|
|
|
| 441 |
if (add_ass) {
|
| 442 |
ss << "<|assistant|>";
|
| 443 |
}
|
| 444 |
+
} else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
|
| 445 |
+
for (auto message : chat) {
|
| 446 |
+
std::string role(message->role);
|
| 447 |
+
ss << "<|" << role << "|>" << "\n" << message->content;
|
| 448 |
+
}
|
| 449 |
+
if (add_ass) {
|
| 450 |
+
ss << "<|assistant|>";
|
| 451 |
+
}
|
| 452 |
} else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
|
| 453 |
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
|
| 454 |
for (auto message : chat) {
|
examples/talk-llama/llama-chat.h
CHANGED
|
@@ -31,6 +31,7 @@ enum llm_chat_template {
|
|
| 31 |
LLM_CHAT_TEMPLATE_LLAMA_3,
|
| 32 |
LLM_CHAT_TEMPLATE_CHATGML_3,
|
| 33 |
LLM_CHAT_TEMPLATE_CHATGML_4,
|
|
|
|
| 34 |
LLM_CHAT_TEMPLATE_MINICPM,
|
| 35 |
LLM_CHAT_TEMPLATE_EXAONE_3,
|
| 36 |
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
|
|
|
| 31 |
LLM_CHAT_TEMPLATE_LLAMA_3,
|
| 32 |
LLM_CHAT_TEMPLATE_CHATGML_3,
|
| 33 |
LLM_CHAT_TEMPLATE_CHATGML_4,
|
| 34 |
+
LLM_CHAT_TEMPLATE_GLMEDGE,
|
| 35 |
LLM_CHAT_TEMPLATE_MINICPM,
|
| 36 |
LLM_CHAT_TEMPLATE_EXAONE_3,
|
| 37 |
LLM_CHAT_TEMPLATE_RWKV_WORLD,
|
examples/talk-llama/llama-grammar.cpp
CHANGED
|
@@ -560,7 +560,7 @@ bool llama_grammar_parser::parse(const char * src) {
|
|
| 560 |
}
|
| 561 |
}
|
| 562 |
} catch (const std::exception & err) {
|
| 563 |
-
fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what());
|
| 564 |
rules.clear();
|
| 565 |
return false;
|
| 566 |
}
|
|
@@ -960,10 +960,28 @@ struct llama_grammar * llama_grammar_init_impl(
|
|
| 960 |
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
| 961 |
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
| 962 |
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
| 963 |
-
return new llama_grammar {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 964 |
}
|
| 965 |
|
| 966 |
-
struct llama_grammar * llama_grammar_init_impl(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 967 |
llama_grammar_parser parser;
|
| 968 |
|
| 969 |
// if there is a grammar, parse it
|
|
@@ -1035,10 +1053,31 @@ struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab,
|
|
| 1035 |
}
|
| 1036 |
} while (true);
|
| 1037 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1038 |
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
| 1039 |
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
| 1040 |
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
| 1041 |
-
return new llama_grammar {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1042 |
}
|
| 1043 |
|
| 1044 |
void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
|
@@ -1055,6 +1094,11 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
|
| 1055 |
grammar.rules,
|
| 1056 |
grammar.stacks,
|
| 1057 |
grammar.partial_utf8,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1058 |
};
|
| 1059 |
|
| 1060 |
// redirect elements in stacks to point to new rules
|
|
@@ -1076,6 +1120,10 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra
|
|
| 1076 |
void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) {
|
| 1077 |
GGML_ASSERT(grammar.vocab != nullptr);
|
| 1078 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1079 |
bool allow_eog = false;
|
| 1080 |
for (const auto & stack : grammar.stacks) {
|
| 1081 |
if (stack.empty()) {
|
|
@@ -1115,6 +1163,34 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_
|
|
| 1115 |
void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) {
|
| 1116 |
GGML_ASSERT(grammar.vocab != nullptr);
|
| 1117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1118 |
if (grammar.vocab->is_eog(token)) {
|
| 1119 |
for (const auto & stack : grammar.stacks) {
|
| 1120 |
if (stack.empty()) {
|
|
@@ -1124,8 +1200,10 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
|
|
| 1124 |
GGML_ABORT("fatal error");
|
| 1125 |
}
|
| 1126 |
|
| 1127 |
-
|
|
|
|
| 1128 |
|
|
|
|
| 1129 |
// Note terminating 0 in decoded string
|
| 1130 |
const auto decoded = decode_utf8(piece, grammar.partial_utf8);
|
| 1131 |
const auto & code_points = decoded.first;
|
|
@@ -1135,5 +1213,7 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token
|
|
| 1135 |
}
|
| 1136 |
|
| 1137 |
grammar.partial_utf8 = decoded.second;
|
| 1138 |
-
|
|
|
|
|
|
|
| 1139 |
}
|
|
|
|
| 560 |
}
|
| 561 |
}
|
| 562 |
} catch (const std::exception & err) {
|
| 563 |
+
fprintf(stderr, "%s: error parsing grammar: %s\n\n%s\n", __func__, err.what(), src);
|
| 564 |
rules.clear();
|
| 565 |
return false;
|
| 566 |
}
|
|
|
|
| 960 |
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
| 961 |
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
| 962 |
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
| 963 |
+
return new llama_grammar {
|
| 964 |
+
vocab,
|
| 965 |
+
std::move(vec_rules),
|
| 966 |
+
std::move(stacks),
|
| 967 |
+
/* .partial_utf8 = */ {},
|
| 968 |
+
/* .lazy =*/ false,
|
| 969 |
+
/* .awaiting_trigger = */ false,
|
| 970 |
+
/* .trigger_buffer = */ "",
|
| 971 |
+
/* .trigger_tokens = */ {},
|
| 972 |
+
/* .trigger_words = */ {},
|
| 973 |
+
};
|
| 974 |
}
|
| 975 |
|
| 976 |
+
struct llama_grammar * llama_grammar_init_impl(
|
| 977 |
+
const struct llama_vocab * vocab,
|
| 978 |
+
const char * grammar_str,
|
| 979 |
+
const char * grammar_root,
|
| 980 |
+
bool lazy,
|
| 981 |
+
const char ** trigger_words,
|
| 982 |
+
size_t num_trigger_words,
|
| 983 |
+
const llama_token * trigger_tokens,
|
| 984 |
+
size_t num_trigger_tokens) {
|
| 985 |
llama_grammar_parser parser;
|
| 986 |
|
| 987 |
// if there is a grammar, parse it
|
|
|
|
| 1053 |
}
|
| 1054 |
} while (true);
|
| 1055 |
|
| 1056 |
+
std::vector<llama_token> vec_trigger_tokens;
|
| 1057 |
+
std::vector<std::string> vec_trigger_words;
|
| 1058 |
+
for (size_t i = 0; i < num_trigger_tokens; i++) {
|
| 1059 |
+
GGML_ASSERT(trigger_tokens != nullptr);
|
| 1060 |
+
vec_trigger_tokens.push_back(trigger_tokens[i]);
|
| 1061 |
+
}
|
| 1062 |
+
for (size_t i = 0; i < num_trigger_words; i++) {
|
| 1063 |
+
GGML_ASSERT(trigger_words != nullptr);
|
| 1064 |
+
vec_trigger_words.push_back(trigger_words[i]);
|
| 1065 |
+
}
|
| 1066 |
+
|
| 1067 |
// Important: vec_rules has to be moved here, not copied, because stacks contains
|
| 1068 |
// pointers to elements of vec_rules. If vec_rules were copied into llama_grammar
|
| 1069 |
// then the pointers would be invalidated when the local vec_rules goes out of scope.
|
| 1070 |
+
return new llama_grammar {
|
| 1071 |
+
vocab,
|
| 1072 |
+
std::move(vec_rules),
|
| 1073 |
+
std::move(stacks),
|
| 1074 |
+
/* .partial_utf8 = */ {},
|
| 1075 |
+
/* .lazy = */ lazy,
|
| 1076 |
+
/* .awaiting_trigger = */ lazy,
|
| 1077 |
+
/* .trigger_buffer = */ "",
|
| 1078 |
+
std::move(vec_trigger_tokens),
|
| 1079 |
+
std::move(vec_trigger_words),
|
| 1080 |
+
};
|
| 1081 |
}
|
| 1082 |
|
| 1083 |
void llama_grammar_free_impl(struct llama_grammar * grammar) {
|
|
|
|
| 1094 |
grammar.rules,
|
| 1095 |
grammar.stacks,
|
| 1096 |
grammar.partial_utf8,
|
| 1097 |
+
grammar.lazy,
|
| 1098 |
+
grammar.awaiting_trigger,
|
| 1099 |
+
grammar.trigger_buffer,
|
| 1100 |
+
grammar.trigger_tokens,
|
| 1101 |
+
grammar.trigger_words,
|
| 1102 |
};
|
| 1103 |
|
| 1104 |
// redirect elements in stacks to point to new rules
|
|
|
|
| 1120 |
void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) {
|
| 1121 |
GGML_ASSERT(grammar.vocab != nullptr);
|
| 1122 |
|
| 1123 |
+
if (grammar.awaiting_trigger) {
|
| 1124 |
+
return;
|
| 1125 |
+
}
|
| 1126 |
+
|
| 1127 |
bool allow_eog = false;
|
| 1128 |
for (const auto & stack : grammar.stacks) {
|
| 1129 |
if (stack.empty()) {
|
|
|
|
| 1163 |
void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) {
|
| 1164 |
GGML_ASSERT(grammar.vocab != nullptr);
|
| 1165 |
|
| 1166 |
+
const auto & piece = grammar.vocab->token_to_piece(token);
|
| 1167 |
+
|
| 1168 |
+
if (grammar.awaiting_trigger) {
|
| 1169 |
+
if (std::find(grammar.trigger_tokens.begin(), grammar.trigger_tokens.end(), token) != grammar.trigger_tokens.end()) {
|
| 1170 |
+
grammar.awaiting_trigger = false;
|
| 1171 |
+
grammar.trigger_buffer.clear();
|
| 1172 |
+
llama_grammar_accept_str(grammar, piece);
|
| 1173 |
+
LLAMA_LOG_DEBUG("Grammar triggered on token %u (`%s`)", token, piece.c_str());
|
| 1174 |
+
return;
|
| 1175 |
+
} else {
|
| 1176 |
+
// TODO: consider a smarter incremental substring search algorithm (store last position to search from).
|
| 1177 |
+
grammar.trigger_buffer += piece;
|
| 1178 |
+
for (const auto & word : grammar.trigger_words) {
|
| 1179 |
+
auto pos = grammar.trigger_buffer.find(word);
|
| 1180 |
+
if (pos != std::string::npos) {
|
| 1181 |
+
grammar.awaiting_trigger = false;
|
| 1182 |
+
auto constrained_str = grammar.trigger_buffer.substr(pos);
|
| 1183 |
+
grammar.trigger_buffer.clear();
|
| 1184 |
+
llama_grammar_accept_str(grammar, constrained_str);
|
| 1185 |
+
LLAMA_LOG_DEBUG("Grammar triggered on word `%s`", word.c_str());
|
| 1186 |
+
return;
|
| 1187 |
+
}
|
| 1188 |
+
}
|
| 1189 |
+
LLAMA_LOG_DEBUG("Grammar still awaiting trigger after token %d (`%s`) (buffer: `%s`)\n", token, piece.c_str(), grammar.trigger_buffer.c_str());
|
| 1190 |
+
return;
|
| 1191 |
+
}
|
| 1192 |
+
}
|
| 1193 |
+
|
| 1194 |
if (grammar.vocab->is_eog(token)) {
|
| 1195 |
for (const auto & stack : grammar.stacks) {
|
| 1196 |
if (stack.empty()) {
|
|
|
|
| 1200 |
GGML_ABORT("fatal error");
|
| 1201 |
}
|
| 1202 |
|
| 1203 |
+
llama_grammar_accept_str(grammar, piece);
|
| 1204 |
+
}
|
| 1205 |
|
| 1206 |
+
void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string & piece) {
|
| 1207 |
// Note terminating 0 in decoded string
|
| 1208 |
const auto decoded = decode_utf8(piece, grammar.partial_utf8);
|
| 1209 |
const auto & code_points = decoded.first;
|
|
|
|
| 1213 |
}
|
| 1214 |
|
| 1215 |
grammar.partial_utf8 = decoded.second;
|
| 1216 |
+
if (grammar.stacks.empty()) {
|
| 1217 |
+
throw std::runtime_error("Unexpected empty grammar stack after accepting piece: " + piece);
|
| 1218 |
+
}
|
| 1219 |
}
|
examples/talk-llama/llama-grammar.h
CHANGED
|
@@ -114,6 +114,15 @@ struct llama_grammar {
|
|
| 114 |
|
| 115 |
// buffer for partially generated UTF-8 sequence from accepted tokens
|
| 116 |
llama_partial_utf8 partial_utf8;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
};
|
| 118 |
|
| 119 |
//
|
|
@@ -127,7 +136,15 @@ struct llama_grammar * llama_grammar_init_impl(
|
|
| 127 |
size_t n_rules,
|
| 128 |
size_t start_rule_index);
|
| 129 |
|
| 130 |
-
struct llama_grammar * llama_grammar_init_impl(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
void llama_grammar_free_impl(struct llama_grammar * grammar);
|
| 133 |
|
|
@@ -141,3 +158,7 @@ void llama_grammar_apply_impl(
|
|
| 141 |
void llama_grammar_accept_impl(
|
| 142 |
struct llama_grammar & grammar,
|
| 143 |
llama_token token);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
// buffer for partially generated UTF-8 sequence from accepted tokens
|
| 116 |
llama_partial_utf8 partial_utf8;
|
| 117 |
+
|
| 118 |
+
// lazy grammars wait for trigger words or tokens before constraining the sampling.
|
| 119 |
+
// we still ahve trigger_tokens for non-lazy grammars to force printing of special trigger tokens.
|
| 120 |
+
// (useful e.g. for tool_choice=required)
|
| 121 |
+
bool lazy = false;
|
| 122 |
+
bool awaiting_trigger = false; // Initialized to true for lazy grammars only
|
| 123 |
+
std::string trigger_buffer; // Output buffered by lazy grammar. Will be cleared once trigger is found.
|
| 124 |
+
std::vector<llama_token> trigger_tokens; // Tokens that trigger a lazy grammar, or tokens to force printing of (even if special).
|
| 125 |
+
std::vector<std::string> trigger_words;
|
| 126 |
};
|
| 127 |
|
| 128 |
//
|
|
|
|
| 136 |
size_t n_rules,
|
| 137 |
size_t start_rule_index);
|
| 138 |
|
| 139 |
+
struct llama_grammar * llama_grammar_init_impl(
|
| 140 |
+
const struct llama_vocab * vocab,
|
| 141 |
+
const char * grammar_str,
|
| 142 |
+
const char * grammar_root,
|
| 143 |
+
bool lazy,
|
| 144 |
+
const char ** trigger_words,
|
| 145 |
+
size_t num_trigger_words,
|
| 146 |
+
const llama_token * trigger_tokens,
|
| 147 |
+
size_t num_trigger_tokens);
|
| 148 |
|
| 149 |
void llama_grammar_free_impl(struct llama_grammar * grammar);
|
| 150 |
|
|
|
|
| 158 |
void llama_grammar_accept_impl(
|
| 159 |
struct llama_grammar & grammar,
|
| 160 |
llama_token token);
|
| 161 |
+
|
| 162 |
+
void llama_grammar_accept_str(
|
| 163 |
+
struct llama_grammar & grammar,
|
| 164 |
+
const std::string & piece);
|
examples/talk-llama/llama-mmap.cpp
CHANGED
|
@@ -7,6 +7,7 @@
|
|
| 7 |
#include <cstring>
|
| 8 |
#include <climits>
|
| 9 |
#include <stdexcept>
|
|
|
|
| 10 |
|
| 11 |
#ifdef __has_include
|
| 12 |
#if __has_include(<unistd.h>)
|
|
|
|
| 7 |
#include <cstring>
|
| 8 |
#include <climits>
|
| 9 |
#include <stdexcept>
|
| 10 |
+
#include <cerrno>
|
| 11 |
|
| 12 |
#ifdef __has_include
|
| 13 |
#if __has_include(<unistd.h>)
|
examples/talk-llama/llama-model-loader.cpp
CHANGED
|
@@ -64,6 +64,33 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
|
|
| 64 |
}
|
| 65 |
}
|
| 66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
namespace GGUFMeta {
|
| 68 |
template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int64_t)>
|
| 69 |
struct GKV_Base_Type {
|
|
@@ -413,7 +440,12 @@ namespace GGUFMeta {
|
|
| 413 |
template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
|
| 414 |
template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
|
| 415 |
|
| 416 |
-
llama_model_loader::llama_model_loader(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
int trace = 0;
|
| 418 |
if (getenv("LLAMA_TRACE")) {
|
| 419 |
trace = atoi(getenv("LLAMA_TRACE"));
|
|
@@ -425,6 +457,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
|
|
| 425 |
}
|
| 426 |
}
|
| 427 |
|
|
|
|
| 428 |
struct ggml_context * ctx = NULL;
|
| 429 |
struct gguf_init_params params = {
|
| 430 |
/*.no_alloc = */ true,
|
|
@@ -460,35 +493,54 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
|
|
| 460 |
|
| 461 |
// Load additional GGML contexts
|
| 462 |
if (n_split > 1) {
|
|
|
|
| 463 |
uint16_t idx = 0;
|
| 464 |
-
|
|
|
|
| 465 |
if (idx != 0) {
|
| 466 |
-
throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 467 |
}
|
| 468 |
|
| 469 |
-
|
| 470 |
-
if (
|
| 471 |
-
throw std::runtime_error(format("invalid split
|
| 472 |
}
|
| 473 |
|
| 474 |
if (trace > 0) {
|
| 475 |
LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
|
| 476 |
}
|
| 477 |
|
| 478 |
-
|
| 479 |
for (idx = 1; idx < n_split; idx++) {
|
| 480 |
-
|
| 481 |
|
| 482 |
struct gguf_init_params split_params = {
|
| 483 |
/*.no_alloc = */ true,
|
| 484 |
/*.ctx = */ &ctx,
|
| 485 |
};
|
| 486 |
-
gguf_context_ptr ctx_gguf { gguf_init_from_file(
|
| 487 |
if (!ctx_gguf) {
|
| 488 |
-
throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 489 |
}
|
| 490 |
|
| 491 |
-
files.emplace_back(new llama_file(
|
| 492 |
contexts.emplace_back(ctx);
|
| 493 |
|
| 494 |
// Save tensors data offset info of the shard.
|
|
@@ -767,7 +819,7 @@ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps
|
|
| 767 |
for (const auto & file : files) {
|
| 768 |
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU));
|
| 769 |
auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
|
| 770 |
-
std::unique_ptr<llama_mmap> mapping
|
| 771 |
mmaps_used.emplace_back(mapping->size(), 0);
|
| 772 |
if (mlock_mmaps) {
|
| 773 |
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
|
|
|
|
| 64 |
}
|
| 65 |
}
|
| 66 |
|
| 67 |
+
// return a list of splits for a given path
|
| 68 |
+
// for example, given "<name>-00002-of-00004.gguf", returns list of all 4 splits
|
| 69 |
+
static std::vector<std::string> llama_get_list_splits(const std::string & path, const int idx, const int n_split) {
|
| 70 |
+
std::vector<std::string> paths;
|
| 71 |
+
std::string split_prefix;
|
| 72 |
+
std::vector<char> buf(llama_path_max(), 0);
|
| 73 |
+
|
| 74 |
+
{
|
| 75 |
+
int ret = llama_split_prefix(buf.data(), buf.size(), path.c_str(), idx, n_split);
|
| 76 |
+
if (!ret) {
|
| 77 |
+
throw std::runtime_error(format("invalid split file name: %s", path.c_str()));
|
| 78 |
+
}
|
| 79 |
+
split_prefix = std::string(buf.data(), ret);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
if (split_prefix.empty()) {
|
| 83 |
+
throw std::runtime_error(format("invalid split file: %s", path.c_str()));
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
for (int idx = 0; idx < n_split; ++idx) {
|
| 87 |
+
int ret = llama_split_path(buf.data(), buf.size(), split_prefix.c_str(), idx, n_split);
|
| 88 |
+
paths.push_back(std::string(buf.data(), ret));
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
return paths;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
namespace GGUFMeta {
|
| 95 |
template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int64_t)>
|
| 96 |
struct GKV_Base_Type {
|
|
|
|
| 440 |
template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
|
| 441 |
template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
|
| 442 |
|
| 443 |
+
llama_model_loader::llama_model_loader(
|
| 444 |
+
const std::string & fname,
|
| 445 |
+
std::vector<std::string> & splits,
|
| 446 |
+
bool use_mmap,
|
| 447 |
+
bool check_tensors,
|
| 448 |
+
const struct llama_model_kv_override * param_overrides_p) {
|
| 449 |
int trace = 0;
|
| 450 |
if (getenv("LLAMA_TRACE")) {
|
| 451 |
trace = atoi(getenv("LLAMA_TRACE"));
|
|
|
|
| 457 |
}
|
| 458 |
}
|
| 459 |
|
| 460 |
+
// Load the main GGUF
|
| 461 |
struct ggml_context * ctx = NULL;
|
| 462 |
struct gguf_init_params params = {
|
| 463 |
/*.no_alloc = */ true,
|
|
|
|
| 493 |
|
| 494 |
// Load additional GGML contexts
|
| 495 |
if (n_split > 1) {
|
| 496 |
+
// make sure the main file is loaded first
|
| 497 |
uint16_t idx = 0;
|
| 498 |
+
const std::string kv_split_no = llm_kv(LLM_KV_SPLIT_NO);
|
| 499 |
+
get_key(kv_split_no, idx);
|
| 500 |
if (idx != 0) {
|
| 501 |
+
throw std::runtime_error(format("illegal split file idx: %d (file: %s), model must be loaded with the first split", idx, fname.c_str()));
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
// generate list of splits if needed
|
| 505 |
+
if (splits.empty()) {
|
| 506 |
+
splits = llama_get_list_splits(fname, idx, n_split);
|
| 507 |
}
|
| 508 |
|
| 509 |
+
// in case user give a custom list of splits, check if it matches the expected number
|
| 510 |
+
if (n_split != (uint16_t)splits.size()) {
|
| 511 |
+
throw std::runtime_error(format("invalid split count, given: %zu splits, but expected %d", splits.size(), n_split));
|
| 512 |
}
|
| 513 |
|
| 514 |
if (trace > 0) {
|
| 515 |
LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
|
| 516 |
}
|
| 517 |
|
| 518 |
+
// load other splits
|
| 519 |
for (idx = 1; idx < n_split; idx++) {
|
| 520 |
+
const char * fname_split = splits[idx].c_str();
|
| 521 |
|
| 522 |
struct gguf_init_params split_params = {
|
| 523 |
/*.no_alloc = */ true,
|
| 524 |
/*.ctx = */ &ctx,
|
| 525 |
};
|
| 526 |
+
gguf_context_ptr ctx_gguf { gguf_init_from_file(fname_split, split_params) };
|
| 527 |
if (!ctx_gguf) {
|
| 528 |
+
throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, fname_split));
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
// check idx
|
| 532 |
+
{
|
| 533 |
+
const int kid = gguf_find_key(ctx_gguf.get(), kv_split_no.c_str());
|
| 534 |
+
if (kid < 0) {
|
| 535 |
+
throw std::runtime_error(format("missing key %s in GGUF split %s", kv_split_no.c_str(), fname_split));
|
| 536 |
+
}
|
| 537 |
+
int idx_gguf = gguf_get_val_u16(ctx_gguf.get(), kid);
|
| 538 |
+
if (idx_gguf != idx) {
|
| 539 |
+
throw std::runtime_error(format("invalid split file idx: %d (file: %s), expected %d", idx_gguf, fname_split, idx));
|
| 540 |
+
}
|
| 541 |
}
|
| 542 |
|
| 543 |
+
files.emplace_back(new llama_file(fname_split, "rb"));
|
| 544 |
contexts.emplace_back(ctx);
|
| 545 |
|
| 546 |
// Save tensors data offset info of the shard.
|
|
|
|
| 819 |
for (const auto & file : files) {
|
| 820 |
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU));
|
| 821 |
auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
|
| 822 |
+
std::unique_ptr<llama_mmap> mapping = std::make_unique<llama_mmap>(file.get(), prefetch ? -1 : 0, is_numa_fn());
|
| 823 |
mmaps_used.emplace_back(mapping->size(), 0);
|
| 824 |
if (mlock_mmaps) {
|
| 825 |
std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock());
|
examples/talk-llama/llama-model-loader.h
CHANGED
|
@@ -90,7 +90,12 @@ struct llama_model_loader {
|
|
| 90 |
size_t size_data = 0;
|
| 91 |
std::vector<std::pair<size_t, size_t>> mmaps_used;
|
| 92 |
|
| 93 |
-
llama_model_loader(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
template<typename T>
|
| 96 |
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
|
|
|
| 90 |
size_t size_data = 0;
|
| 91 |
std::vector<std::pair<size_t, size_t>> mmaps_used;
|
| 92 |
|
| 93 |
+
llama_model_loader(
|
| 94 |
+
const std::string & fname,
|
| 95 |
+
std::vector<std::string> & splits, // optional, only need if the split does not follow naming scheme
|
| 96 |
+
bool use_mmap,
|
| 97 |
+
bool check_tensors,
|
| 98 |
+
const struct llama_model_kv_override * param_overrides_p);
|
| 99 |
|
| 100 |
template<typename T>
|
| 101 |
typename std::enable_if<std::is_integral<T>::value, bool>::type
|
examples/talk-llama/llama-model.cpp
CHANGED
|
@@ -1093,8 +1093,20 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|
| 1093 |
{
|
| 1094 |
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
| 1095 |
switch (hparams.n_layer) {
|
| 1096 |
-
case 28:
|
| 1097 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1098 |
default: type = LLM_TYPE_UNKNOWN;
|
| 1099 |
}
|
| 1100 |
} break;
|
|
@@ -1303,10 +1315,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
| 1303 |
const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
|
| 1304 |
auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
|
| 1305 |
if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
|
|
|
|
| 1306 |
return {cpu_dev, &pimpl->cpu_buft_list};
|
| 1307 |
}
|
| 1308 |
const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
|
| 1309 |
auto * dev = devices.at(layer_gpu);
|
|
|
|
| 1310 |
return {dev, &pimpl->gpu_buft_list.at(dev)};
|
| 1311 |
};
|
| 1312 |
|
|
@@ -2203,6 +2217,50 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
| 2203 |
layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
| 2204 |
}
|
| 2205 |
} break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2206 |
case LLM_ARCH_PLAMO:
|
| 2207 |
{
|
| 2208 |
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
|
@@ -3022,9 +3080,17 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|
| 3022 |
auto & layer = layers[i];
|
| 3023 |
|
| 3024 |
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
|
|
|
|
|
|
|
| 3025 |
|
| 3026 |
-
layer.wqkv
|
| 3027 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3028 |
|
| 3029 |
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
|
| 3030 |
|
|
@@ -3717,7 +3783,6 @@ struct llama_model_params llama_model_default_params() {
|
|
| 3717 |
/*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
|
| 3718 |
/*.main_gpu =*/ 0,
|
| 3719 |
/*.tensor_split =*/ nullptr,
|
| 3720 |
-
/*.rpc_servers =*/ nullptr,
|
| 3721 |
/*.progress_callback =*/ nullptr,
|
| 3722 |
/*.progress_callback_user_data =*/ nullptr,
|
| 3723 |
/*.kv_overrides =*/ nullptr,
|
|
@@ -3912,8 +3977,10 @@ uint64_t llama_model_size(const struct llama_model * model) {
|
|
| 3912 |
return model->size();
|
| 3913 |
}
|
| 3914 |
|
| 3915 |
-
const char * llama_model_chat_template(const struct llama_model * model) {
|
| 3916 |
-
const auto
|
|
|
|
|
|
|
| 3917 |
if (it == model->gguf_kv.end()) {
|
| 3918 |
return nullptr;
|
| 3919 |
}
|
|
|
|
| 1093 |
{
|
| 1094 |
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
| 1095 |
switch (hparams.n_layer) {
|
| 1096 |
+
case 28: {
|
| 1097 |
+
if (hparams.n_head(0) == 16) {
|
| 1098 |
+
type = LLM_TYPE_1_5B;
|
| 1099 |
+
} else {
|
| 1100 |
+
type = LLM_TYPE_6B;
|
| 1101 |
+
}
|
| 1102 |
+
} break;
|
| 1103 |
+
case 40: {
|
| 1104 |
+
if (hparams.n_head(0) == 24) {
|
| 1105 |
+
type = LLM_TYPE_4B;
|
| 1106 |
+
} else {
|
| 1107 |
+
type = LLM_TYPE_9B;
|
| 1108 |
+
}
|
| 1109 |
+
} break;
|
| 1110 |
default: type = LLM_TYPE_UNKNOWN;
|
| 1111 |
}
|
| 1112 |
} break;
|
|
|
|
| 1315 |
const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
|
| 1316 |
auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
|
| 1317 |
if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
|
| 1318 |
+
LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(cpu_dev));
|
| 1319 |
return {cpu_dev, &pimpl->cpu_buft_list};
|
| 1320 |
}
|
| 1321 |
const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
|
| 1322 |
auto * dev = devices.at(layer_gpu);
|
| 1323 |
+
LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(dev));
|
| 1324 |
return {dev, &pimpl->gpu_buft_list.at(dev)};
|
| 1325 |
};
|
| 1326 |
|
|
|
|
| 2217 |
layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
| 2218 |
}
|
| 2219 |
} break;
|
| 2220 |
+
case LLM_ARCH_PHIMOE:
|
| 2221 |
+
{
|
| 2222 |
+
const int64_t n_embd_head = n_embd / n_head;
|
| 2223 |
+
|
| 2224 |
+
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
|
| 2225 |
+
|
| 2226 |
+
// output
|
| 2227 |
+
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
|
| 2228 |
+
output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
|
| 2229 |
+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0);
|
| 2230 |
+
output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), { n_vocab }, 0);
|
| 2231 |
+
|
| 2232 |
+
for (int i = 0; i < n_layer; ++i) {
|
| 2233 |
+
auto & layer = layers[i];
|
| 2234 |
+
|
| 2235 |
+
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
|
| 2236 |
+
layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), { n_embd }, 0);
|
| 2237 |
+
|
| 2238 |
+
layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
|
| 2239 |
+
if (layer.wqkv == nullptr) {
|
| 2240 |
+
layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
|
| 2241 |
+
layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0);
|
| 2242 |
+
|
| 2243 |
+
layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
|
| 2244 |
+
layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0);
|
| 2245 |
+
|
| 2246 |
+
layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
|
| 2247 |
+
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0);
|
| 2248 |
+
}
|
| 2249 |
+
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
|
| 2250 |
+
layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, 0);
|
| 2251 |
+
|
| 2252 |
+
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
|
| 2253 |
+
layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), { n_embd }, 0);
|
| 2254 |
+
|
| 2255 |
+
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
|
| 2256 |
+
layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
|
| 2257 |
+
layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
|
| 2258 |
+
layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
|
| 2259 |
+
|
| 2260 |
+
layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
| 2261 |
+
layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
|
| 2262 |
+
}
|
| 2263 |
+
} break;
|
| 2264 |
case LLM_ARCH_PLAMO:
|
| 2265 |
{
|
| 2266 |
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
|
|
|
|
| 3080 |
auto & layer = layers[i];
|
| 3081 |
|
| 3082 |
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
|
| 3083 |
+
layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
| 3084 |
+
layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
| 3085 |
|
| 3086 |
+
if (layer.wqkv == nullptr) {
|
| 3087 |
+
layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
|
| 3088 |
+
layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
|
| 3089 |
+
layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
|
| 3090 |
+
layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
| 3091 |
+
layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
| 3092 |
+
layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
|
| 3093 |
+
}
|
| 3094 |
|
| 3095 |
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
|
| 3096 |
|
|
|
|
| 3783 |
/*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
|
| 3784 |
/*.main_gpu =*/ 0,
|
| 3785 |
/*.tensor_split =*/ nullptr,
|
|
|
|
| 3786 |
/*.progress_callback =*/ nullptr,
|
| 3787 |
/*.progress_callback_user_data =*/ nullptr,
|
| 3788 |
/*.kv_overrides =*/ nullptr,
|
|
|
|
| 3977 |
return model->size();
|
| 3978 |
}
|
| 3979 |
|
| 3980 |
+
const char * llama_model_chat_template(const struct llama_model * model, const char * name) {
|
| 3981 |
+
const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N)
|
| 3982 |
+
: LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
|
| 3983 |
+
const auto & it = model->gguf_kv.find(key);
|
| 3984 |
if (it == model->gguf_kv.end()) {
|
| 3985 |
return nullptr;
|
| 3986 |
}
|
examples/talk-llama/llama-model.h
CHANGED
|
@@ -323,8 +323,6 @@ struct llama_model {
|
|
| 323 |
// gguf metadata
|
| 324 |
std::unordered_map<std::string, std::string> gguf_kv;
|
| 325 |
|
| 326 |
-
std::vector<std::string> rpc_servers;
|
| 327 |
-
|
| 328 |
// list of devices used in this model
|
| 329 |
std::vector<ggml_backend_dev_t> devices;
|
| 330 |
|
|
|
|
| 323 |
// gguf metadata
|
| 324 |
std::unordered_map<std::string, std::string> gguf_kv;
|
| 325 |
|
|
|
|
|
|
|
| 326 |
// list of devices used in this model
|
| 327 |
std::vector<ggml_backend_dev_t> devices;
|
| 328 |
|
examples/talk-llama/llama-quant.cpp
CHANGED
|
@@ -526,7 +526,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|
| 526 |
kv_overrides = v->data();
|
| 527 |
}
|
| 528 |
|
| 529 |
-
|
|
|
|
| 530 |
ml.init_mappings(false); // no prefetching
|
| 531 |
|
| 532 |
llama_model model(llama_model_default_params());
|
|
|
|
| 526 |
kv_overrides = v->data();
|
| 527 |
}
|
| 528 |
|
| 529 |
+
std::vector<std::string> splits = {};
|
| 530 |
+
llama_model_loader ml(fname_inp, splits, use_mmap, /*check_tensors*/ true, kv_overrides);
|
| 531 |
ml.init_mappings(false); // no prefetching
|
| 532 |
|
| 533 |
llama_model model(llama_model_default_params());
|
examples/talk-llama/llama-sampling.cpp
CHANGED
|
@@ -1433,13 +1433,30 @@ static void llama_sampler_grammar_apply(struct llama_sampler * smpl, llama_token
|
|
| 1433 |
}
|
| 1434 |
}
|
| 1435 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1436 |
static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
| 1437 |
auto * ctx = (llama_sampler_grammar *) smpl->ctx;
|
| 1438 |
if (!ctx->grammar) {
|
| 1439 |
return;
|
| 1440 |
}
|
| 1441 |
|
| 1442 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1443 |
|
| 1444 |
llama_grammar_free_impl(ctx->grammar);
|
| 1445 |
ctx->grammar = grammar_new;
|
|
@@ -1448,7 +1465,7 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
|
| 1448 |
static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
|
| 1449 |
const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
|
| 1450 |
|
| 1451 |
-
auto * result =
|
| 1452 |
|
| 1453 |
// copy the state
|
| 1454 |
{
|
|
@@ -1484,7 +1501,15 @@ static struct llama_sampler_i llama_sampler_grammar_i = {
|
|
| 1484 |
/* .free = */ llama_sampler_grammar_free,
|
| 1485 |
};
|
| 1486 |
|
| 1487 |
-
struct llama_sampler *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1488 |
auto * ctx = new llama_sampler_grammar;
|
| 1489 |
|
| 1490 |
if (grammar_str != nullptr && grammar_str[0] != '\0') {
|
|
@@ -1492,7 +1517,7 @@ struct llama_sampler * llama_sampler_init_grammar(const struct llama_vocab * voc
|
|
| 1492 |
/* .vocab = */ vocab,
|
| 1493 |
/* .grammar_str = */ grammar_str,
|
| 1494 |
/* .grammar_root = */ grammar_root,
|
| 1495 |
-
/* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root),
|
| 1496 |
};
|
| 1497 |
} else {
|
| 1498 |
*ctx = {
|
|
@@ -1509,6 +1534,24 @@ struct llama_sampler * llama_sampler_init_grammar(const struct llama_vocab * voc
|
|
| 1509 |
};
|
| 1510 |
}
|
| 1511 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1512 |
// penalties
|
| 1513 |
|
| 1514 |
struct llama_sampler_penalties {
|
|
|
|
| 1433 |
}
|
| 1434 |
}
|
| 1435 |
|
| 1436 |
+
// Fwd declare to break reset --> init_impl --> llama_sampler_grammar_i --> reset cycle.
|
| 1437 |
+
static struct llama_sampler * llama_sampler_init_grammar_impl(
|
| 1438 |
+
const struct llama_vocab * vocab,
|
| 1439 |
+
const char * grammar_str,
|
| 1440 |
+
const char * grammar_root,
|
| 1441 |
+
bool lazy,
|
| 1442 |
+
const char ** trigger_words,
|
| 1443 |
+
size_t num_trigger_words,
|
| 1444 |
+
const llama_token * trigger_tokens,
|
| 1445 |
+
size_t num_trigger_tokens);
|
| 1446 |
+
|
| 1447 |
static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
|
| 1448 |
auto * ctx = (llama_sampler_grammar *) smpl->ctx;
|
| 1449 |
if (!ctx->grammar) {
|
| 1450 |
return;
|
| 1451 |
}
|
| 1452 |
|
| 1453 |
+
std::vector<const char *> trigger_words;
|
| 1454 |
+
for (auto & word : ctx->grammar->trigger_words) {
|
| 1455 |
+
trigger_words.push_back(word.c_str());
|
| 1456 |
+
}
|
| 1457 |
+
auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
|
| 1458 |
+
ctx->grammar->lazy, trigger_words.data(), trigger_words.size(),
|
| 1459 |
+
ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
|
| 1460 |
|
| 1461 |
llama_grammar_free_impl(ctx->grammar);
|
| 1462 |
ctx->grammar = grammar_new;
|
|
|
|
| 1465 |
static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
|
| 1466 |
const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
|
| 1467 |
|
| 1468 |
+
auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0);
|
| 1469 |
|
| 1470 |
// copy the state
|
| 1471 |
{
|
|
|
|
| 1501 |
/* .free = */ llama_sampler_grammar_free,
|
| 1502 |
};
|
| 1503 |
|
| 1504 |
+
static struct llama_sampler * llama_sampler_init_grammar_impl(
|
| 1505 |
+
const struct llama_vocab * vocab,
|
| 1506 |
+
const char * grammar_str,
|
| 1507 |
+
const char * grammar_root,
|
| 1508 |
+
bool lazy,
|
| 1509 |
+
const char ** trigger_words,
|
| 1510 |
+
size_t num_trigger_words,
|
| 1511 |
+
const llama_token * trigger_tokens,
|
| 1512 |
+
size_t num_trigger_tokens) {
|
| 1513 |
auto * ctx = new llama_sampler_grammar;
|
| 1514 |
|
| 1515 |
if (grammar_str != nullptr && grammar_str[0] != '\0') {
|
|
|
|
| 1517 |
/* .vocab = */ vocab,
|
| 1518 |
/* .grammar_str = */ grammar_str,
|
| 1519 |
/* .grammar_root = */ grammar_root,
|
| 1520 |
+
/* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens),
|
| 1521 |
};
|
| 1522 |
} else {
|
| 1523 |
*ctx = {
|
|
|
|
| 1534 |
};
|
| 1535 |
}
|
| 1536 |
|
| 1537 |
+
struct llama_sampler * llama_sampler_init_grammar(
|
| 1538 |
+
const struct llama_vocab * vocab,
|
| 1539 |
+
const char * grammar_str,
|
| 1540 |
+
const char * grammar_root) {
|
| 1541 |
+
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ false, nullptr, 0, nullptr, 0);
|
| 1542 |
+
}
|
| 1543 |
+
|
| 1544 |
+
struct llama_sampler * llama_sampler_init_grammar_lazy(
|
| 1545 |
+
const struct llama_vocab * vocab,
|
| 1546 |
+
const char * grammar_str,
|
| 1547 |
+
const char * grammar_root,
|
| 1548 |
+
const char ** trigger_words,
|
| 1549 |
+
size_t num_trigger_words,
|
| 1550 |
+
const llama_token * trigger_tokens,
|
| 1551 |
+
size_t num_trigger_tokens) {
|
| 1552 |
+
return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens);
|
| 1553 |
+
}
|
| 1554 |
+
|
| 1555 |
// penalties
|
| 1556 |
|
| 1557 |
struct llama_sampler_penalties {
|
examples/talk-llama/llama-vocab.cpp
CHANGED
|
@@ -439,7 +439,7 @@ struct llm_tokenizer_bpe_session {
|
|
| 439 |
"also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
|
| 440 |
"Are you sure this is what you want?\n", __FUNCTION__);
|
| 441 |
}
|
| 442 |
-
if (vocab.
|
| 443 |
LLAMA_LOG_WARN(
|
| 444 |
"%s: Added a EOS token to the prompt as specified by the model but the prompt "
|
| 445 |
"also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
|
|
@@ -1245,8 +1245,13 @@ struct llama_vocab::impl {
|
|
| 1245 |
|
| 1246 |
std::vector<llama_token> cache_special_tokens;
|
| 1247 |
std::vector<std::string> cache_token_to_piece; // llama_token_to_piece(special = true);
|
| 1248 |
-
|
| 1249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1250 |
|
| 1251 |
// set of all tokens that cause "end of generation"
|
| 1252 |
std::set<llama_token> special_eog_ids;
|
|
@@ -1356,8 +1361,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
| 1356 |
|
| 1357 |
// read vocab size from metadata
|
| 1358 |
uint32_t n_tokens = 0;
|
| 1359 |
-
if (
|
| 1360 |
-
LLAMA_LOG_WARN("%s:
|
|
|
|
| 1361 |
}
|
| 1362 |
|
| 1363 |
return;
|
|
@@ -1522,7 +1528,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
| 1522 |
pre_type = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
|
| 1523 |
clean_spaces = false;
|
| 1524 |
} else if (
|
| 1525 |
-
|
|
|
|
| 1526 |
pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
| 1527 |
clean_spaces = false;
|
| 1528 |
} else if (
|
|
@@ -1685,7 +1692,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
| 1685 |
GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
|
| 1686 |
linefeed_id = ids[0];
|
| 1687 |
} else {
|
| 1688 |
-
const std::vector<int> ids = tokenize("\
|
| 1689 |
|
| 1690 |
//GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
|
| 1691 |
if (ids.empty()) {
|
|
|
|
| 439 |
"also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
|
| 440 |
"Are you sure this is what you want?\n", __FUNCTION__);
|
| 441 |
}
|
| 442 |
+
if (vocab.get_add_eos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) {
|
| 443 |
LLAMA_LOG_WARN(
|
| 444 |
"%s: Added a EOS token to the prompt as specified by the model but the prompt "
|
| 445 |
"also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
|
|
|
|
| 1245 |
|
| 1246 |
std::vector<llama_token> cache_special_tokens;
|
| 1247 |
std::vector<std::string> cache_token_to_piece; // llama_token_to_piece(special = true);
|
| 1248 |
+
struct pair_hash {
|
| 1249 |
+
size_t operator()(const std::pair<std::string, std::string> & p) const {
|
| 1250 |
+
return std::hash<std::string>{}(p.first) ^ //create some hash for pair
|
| 1251 |
+
(std::hash<std::string>{}(p.second) << 1);
|
| 1252 |
+
}
|
| 1253 |
+
};
|
| 1254 |
+
std::unordered_map<std::pair<std::string, std::string>, int, pair_hash> bpe_ranks;
|
| 1255 |
|
| 1256 |
// set of all tokens that cause "end of generation"
|
| 1257 |
std::set<llama_token> special_eog_ids;
|
|
|
|
| 1361 |
|
| 1362 |
// read vocab size from metadata
|
| 1363 |
uint32_t n_tokens = 0;
|
| 1364 |
+
if (ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) {
|
| 1365 |
+
LLAMA_LOG_WARN("%s: adding %u dummy tokens\n", __func__, n_tokens);
|
| 1366 |
+
id_to_token.resize(n_tokens);
|
| 1367 |
}
|
| 1368 |
|
| 1369 |
return;
|
|
|
|
| 1528 |
pre_type = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
|
| 1529 |
clean_spaces = false;
|
| 1530 |
} else if (
|
| 1531 |
+
tokenizer_pre == "qwen2" ||
|
| 1532 |
+
tokenizer_pre == "deepseek-r1-qwen") {
|
| 1533 |
pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
| 1534 |
clean_spaces = false;
|
| 1535 |
} else if (
|
|
|
|
| 1692 |
GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
|
| 1693 |
linefeed_id = ids[0];
|
| 1694 |
} else {
|
| 1695 |
+
const std::vector<int> ids = tokenize("\n", false);
|
| 1696 |
|
| 1697 |
//GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
|
| 1698 |
if (ids.empty()) {
|
examples/talk-llama/llama.cpp
CHANGED
|
@@ -31,7 +31,7 @@
|
|
| 31 |
#endif
|
| 32 |
|
| 33 |
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
|
| 34 |
-
static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
|
| 35 |
// loading time will be recalculated after the first eval, so
|
| 36 |
// we take page faults deferred by mmap() into consideration
|
| 37 |
model.t_load_us = 0;
|
|
@@ -40,7 +40,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
|
| 40 |
model.t_start_us = tm.t_start_us;
|
| 41 |
|
| 42 |
try {
|
| 43 |
-
llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
|
| 44 |
|
| 45 |
ml.print_info();
|
| 46 |
|
|
@@ -4642,7 +4642,7 @@ struct llm_build_context {
|
|
| 4642 |
0);
|
| 4643 |
cb(v_states, "v_states", il);
|
| 4644 |
|
| 4645 |
-
q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend
|
| 4646 |
q_pe = ggml_rope_ext(
|
| 4647 |
ctx0, q_pe, inp_pos, rope_factors,
|
| 4648 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
@@ -4651,7 +4651,7 @@ struct llm_build_context {
|
|
| 4651 |
cb(q_pe, "q_pe", il);
|
| 4652 |
|
| 4653 |
// shared RoPE key
|
| 4654 |
-
k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend
|
| 4655 |
k_pe = ggml_rope_ext(
|
| 4656 |
ctx0, k_pe, inp_pos, rope_factors,
|
| 4657 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
@@ -6496,7 +6496,7 @@ struct llm_build_context {
|
|
| 6496 |
0);
|
| 6497 |
cb(v_states, "v_states", il);
|
| 6498 |
|
| 6499 |
-
q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend
|
| 6500 |
q_pe = ggml_rope_ext(
|
| 6501 |
ctx0, q_pe, inp_pos, nullptr,
|
| 6502 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
@@ -6505,7 +6505,7 @@ struct llm_build_context {
|
|
| 6505 |
cb(q_pe, "q_pe", il);
|
| 6506 |
|
| 6507 |
// shared RoPE key
|
| 6508 |
-
k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend
|
| 6509 |
k_pe = ggml_rope_ext(
|
| 6510 |
ctx0, k_pe, inp_pos, nullptr,
|
| 6511 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
@@ -7215,17 +7215,30 @@ struct llm_build_context {
|
|
| 7215 |
struct ggml_tensor * Qcur = nullptr;
|
| 7216 |
struct ggml_tensor * Kcur = nullptr;
|
| 7217 |
struct ggml_tensor * Vcur = nullptr;
|
| 7218 |
-
|
| 7219 |
-
|
| 7220 |
-
|
| 7221 |
-
|
| 7222 |
-
|
| 7223 |
-
|
| 7224 |
-
|
| 7225 |
-
|
| 7226 |
-
|
| 7227 |
-
|
| 7228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7229 |
cb(Qcur, "Qcur", il);
|
| 7230 |
cb(Kcur, "Kcur", il);
|
| 7231 |
cb(Vcur, "Vcur", il);
|
|
@@ -7700,17 +7713,13 @@ struct llm_build_context {
|
|
| 7700 |
1
|
| 7701 |
);
|
| 7702 |
|
|
|
|
| 7703 |
ggml_build_forward_expand(
|
| 7704 |
gf,
|
| 7705 |
ggml_cpy(
|
| 7706 |
ctx0,
|
| 7707 |
-
|
| 7708 |
-
ggml_view_1d(
|
| 7709 |
-
ctx0,
|
| 7710 |
-
kv_self.v_l[il],
|
| 7711 |
-
hparams.n_embd_v_s() * n_seqs,
|
| 7712 |
-
hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il])
|
| 7713 |
-
)
|
| 7714 |
)
|
| 7715 |
);
|
| 7716 |
|
|
@@ -8432,74 +8441,33 @@ static enum ggml_status llama_graph_compute(
|
|
| 8432 |
return status;
|
| 8433 |
}
|
| 8434 |
|
| 8435 |
-
|
| 8436 |
-
|
| 8437 |
-
|
| 8438 |
-
|
| 8439 |
-
//
|
| 8440 |
-
// - lctx: llama context
|
| 8441 |
-
// - batch: batch to evaluate
|
| 8442 |
-
//
|
| 8443 |
-
// return 0 on success
|
| 8444 |
-
// return positive int on warning
|
| 8445 |
-
// return negative int on error
|
| 8446 |
-
//
|
| 8447 |
-
static int llama_decode_impl(
|
| 8448 |
-
llama_context & lctx,
|
| 8449 |
-
llama_batch inp_batch) {
|
| 8450 |
-
|
| 8451 |
-
lctx.is_encoding = false;
|
| 8452 |
-
|
| 8453 |
-
if (inp_batch.n_tokens == 0) {
|
| 8454 |
-
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
|
| 8455 |
-
return -1;
|
| 8456 |
-
}
|
| 8457 |
-
|
| 8458 |
-
// temporary allocate memory for the input batch if needed
|
| 8459 |
-
llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1);
|
| 8460 |
-
|
| 8461 |
-
const llama_batch & batch = batch_allocr.batch;
|
| 8462 |
-
const uint32_t n_tokens_all = batch.n_tokens;
|
| 8463 |
-
|
| 8464 |
const auto & model = lctx.model;
|
| 8465 |
-
const auto & vocab = model.vocab;
|
| 8466 |
const auto & hparams = model.hparams;
|
| 8467 |
const auto & cparams = lctx.cparams;
|
| 8468 |
|
| 8469 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8470 |
|
|
|
|
| 8471 |
if (batch.token) {
|
| 8472 |
for (uint32_t i = 0; i < n_tokens_all; ++i) {
|
| 8473 |
-
if (batch.token[i] < 0 || (
|
| 8474 |
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
|
| 8475 |
return -1;
|
| 8476 |
}
|
| 8477 |
}
|
| 8478 |
}
|
| 8479 |
-
|
| 8480 |
GGML_ASSERT(n_tokens_all <= cparams.n_batch);
|
| 8481 |
-
|
| 8482 |
GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
|
| 8483 |
|
| 8484 |
-
if (lctx.t_compute_start_us == 0) {
|
| 8485 |
-
lctx.t_compute_start_us = ggml_time_us();
|
| 8486 |
-
}
|
| 8487 |
lctx.n_queued_tokens += n_tokens_all;
|
| 8488 |
-
|
| 8489 |
-
auto & kv_self = lctx.kv_self;
|
| 8490 |
-
llama_kv_slot_restorer kv_slot_restorer(kv_self);
|
| 8491 |
-
|
| 8492 |
-
const int64_t n_embd = hparams.n_embd;
|
| 8493 |
-
const int64_t n_vocab = vocab.n_tokens();
|
| 8494 |
-
|
| 8495 |
-
uint32_t n_outputs = 0;
|
| 8496 |
-
uint32_t n_outputs_prev = 0;
|
| 8497 |
-
|
| 8498 |
-
const auto n_ubatch = cparams.n_ubatch;
|
| 8499 |
-
|
| 8500 |
-
// this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
|
| 8501 |
-
const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
|
| 8502 |
-
|
| 8503 |
lctx.embd_seq.clear();
|
| 8504 |
|
| 8505 |
// count outputs
|
|
@@ -8515,7 +8483,7 @@ static int llama_decode_impl(
|
|
| 8515 |
}
|
| 8516 |
|
| 8517 |
lctx.sbatch.from_batch(batch, n_embd,
|
| 8518 |
-
/* simple_split */ !kv_self.recurrent,
|
| 8519 |
/* logits_all */ n_outputs == n_tokens_all);
|
| 8520 |
|
| 8521 |
// reserve output buffer
|
|
@@ -8524,70 +8492,148 @@ static int llama_decode_impl(
|
|
| 8524 |
return -2;
|
| 8525 |
};
|
| 8526 |
|
| 8527 |
-
|
| 8528 |
-
|
| 8529 |
-
|
| 8530 |
-
|
| 8531 |
-
|
| 8532 |
-
|
| 8533 |
-
|
| 8534 |
-
|
| 8535 |
-
|
| 8536 |
-
|
| 8537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8538 |
} else {
|
| 8539 |
-
|
|
|
|
|
|
|
| 8540 |
}
|
| 8541 |
-
|
|
|
|
|
|
|
| 8542 |
|
| 8543 |
-
|
| 8544 |
-
|
| 8545 |
-
|
| 8546 |
|
| 8547 |
-
|
| 8548 |
-
|
| 8549 |
-
|
| 8550 |
-
|
| 8551 |
-
|
| 8552 |
-
|
| 8553 |
-
}
|
| 8554 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8555 |
|
| 8556 |
-
|
| 8557 |
-
|
|
|
|
|
|
|
| 8558 |
}
|
| 8559 |
|
| 8560 |
-
|
| 8561 |
-
|
|
|
|
|
|
|
|
|
|
| 8562 |
|
| 8563 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8564 |
|
| 8565 |
-
|
| 8566 |
-
|
| 8567 |
-
llama_kv_cache_update(&lctx);
|
| 8568 |
|
| 8569 |
-
|
| 8570 |
-
|
| 8571 |
-
|
| 8572 |
-
|
| 8573 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8574 |
|
| 8575 |
-
|
| 8576 |
-
|
| 8577 |
-
|
| 8578 |
-
|
| 8579 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8580 |
|
| 8581 |
-
|
| 8582 |
-
|
| 8583 |
-
|
| 8584 |
-
|
| 8585 |
-
|
| 8586 |
-
|
| 8587 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8588 |
}
|
| 8589 |
}
|
| 8590 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8591 |
//printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
|
| 8592 |
|
| 8593 |
ggml_backend_sched_reset(lctx.sched.get());
|
|
@@ -8640,7 +8686,7 @@ static int llama_decode_impl(
|
|
| 8640 |
|
| 8641 |
// update the kv ring buffer
|
| 8642 |
{
|
| 8643 |
-
kv_self.head += n_tokens;
|
| 8644 |
|
| 8645 |
// Ensure kv cache head points to a valid index.
|
| 8646 |
if (kv_self.head >= kv_self.size) {
|
|
@@ -9374,14 +9420,9 @@ int64_t llama_time_us(void) {
|
|
| 9374 |
return ggml_time_us();
|
| 9375 |
}
|
| 9376 |
|
| 9377 |
-
struct llama_model *
|
| 9378 |
-
const
|
| 9379 |
-
|
| 9380 |
-
return llama_model_load_from_file(path_model, params);
|
| 9381 |
-
}
|
| 9382 |
-
|
| 9383 |
-
struct llama_model * llama_model_load_from_file(
|
| 9384 |
-
const char * path_model,
|
| 9385 |
struct llama_model_params params) {
|
| 9386 |
ggml_time_init();
|
| 9387 |
|
|
@@ -9404,53 +9445,13 @@ struct llama_model * llama_model_load_from_file(
|
|
| 9404 |
};
|
| 9405 |
}
|
| 9406 |
|
| 9407 |
-
if (params.rpc_servers != nullptr && params.rpc_servers[0] != '\0') {
|
| 9408 |
-
// split the servers set them into model->rpc_servers
|
| 9409 |
-
std::string servers(params.rpc_servers);
|
| 9410 |
-
size_t pos = 0;
|
| 9411 |
-
while ((pos = servers.find(',')) != std::string::npos) {
|
| 9412 |
-
std::string server = servers.substr(0, pos);
|
| 9413 |
-
model->rpc_servers.push_back(server);
|
| 9414 |
-
servers.erase(0, pos + 1);
|
| 9415 |
-
}
|
| 9416 |
-
model->rpc_servers.push_back(servers);
|
| 9417 |
-
}
|
| 9418 |
-
|
| 9419 |
-
// add RPC devices
|
| 9420 |
-
if (!model->rpc_servers.empty()) {
|
| 9421 |
-
ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
|
| 9422 |
-
if (!rpc_reg) {
|
| 9423 |
-
LLAMA_LOG_ERROR("%s: failed to find RPC backend\n", __func__);
|
| 9424 |
-
llama_model_free(model);
|
| 9425 |
-
return nullptr;
|
| 9426 |
-
}
|
| 9427 |
-
|
| 9428 |
-
typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
|
| 9429 |
-
ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
|
| 9430 |
-
if (!ggml_backend_rpc_add_device_fn) {
|
| 9431 |
-
LLAMA_LOG_ERROR("%s: failed to find RPC device add function\n", __func__);
|
| 9432 |
-
llama_model_free(model);
|
| 9433 |
-
return nullptr;
|
| 9434 |
-
}
|
| 9435 |
-
|
| 9436 |
-
for (const std::string & server : model->rpc_servers) {
|
| 9437 |
-
ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
|
| 9438 |
-
if (dev) {
|
| 9439 |
-
model->devices.push_back(dev);
|
| 9440 |
-
} else {
|
| 9441 |
-
LLAMA_LOG_ERROR("%s: failed to add RPC device for server '%s'\n", __func__, server.c_str());
|
| 9442 |
-
llama_model_free(model);
|
| 9443 |
-
return nullptr;
|
| 9444 |
-
}
|
| 9445 |
-
}
|
| 9446 |
-
}
|
| 9447 |
-
|
| 9448 |
// create list of devices to use with this model
|
| 9449 |
if (params.devices) {
|
| 9450 |
for (ggml_backend_dev_t * dev = params.devices; *dev; ++dev) {
|
| 9451 |
model->devices.push_back(*dev);
|
| 9452 |
}
|
| 9453 |
} else {
|
|
|
|
| 9454 |
// use all available devices
|
| 9455 |
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
|
| 9456 |
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
|
|
@@ -9461,10 +9462,19 @@ struct llama_model * llama_model_load_from_file(
|
|
| 9461 |
break;
|
| 9462 |
|
| 9463 |
case GGML_BACKEND_DEVICE_TYPE_GPU:
|
| 9464 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9465 |
break;
|
| 9466 |
}
|
| 9467 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9468 |
}
|
| 9469 |
|
| 9470 |
// if using single GPU mode, remove all except the main GPU
|
|
@@ -9485,7 +9495,7 @@ struct llama_model * llama_model_load_from_file(
|
|
| 9485 |
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
|
| 9486 |
}
|
| 9487 |
|
| 9488 |
-
const int status = llama_model_load(path_model, *model, params);
|
| 9489 |
GGML_ASSERT(status <= 0);
|
| 9490 |
if (status < 0) {
|
| 9491 |
if (status == -1) {
|
|
@@ -9501,6 +9511,35 @@ struct llama_model * llama_model_load_from_file(
|
|
| 9501 |
return model;
|
| 9502 |
}
|
| 9503 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9504 |
struct llama_context * llama_init_from_model(
|
| 9505 |
struct llama_model * model,
|
| 9506 |
struct llama_context_params params) {
|
|
|
|
| 31 |
#endif
|
| 32 |
|
| 33 |
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
|
| 34 |
+
static int llama_model_load(const std::string & fname, std::vector<std::string> & splits, llama_model & model, llama_model_params & params) {
|
| 35 |
// loading time will be recalculated after the first eval, so
|
| 36 |
// we take page faults deferred by mmap() into consideration
|
| 37 |
model.t_load_us = 0;
|
|
|
|
| 40 |
model.t_start_us = tm.t_start_us;
|
| 41 |
|
| 42 |
try {
|
| 43 |
+
llama_model_loader ml(fname, splits, params.use_mmap, params.check_tensors, params.kv_overrides);
|
| 44 |
|
| 45 |
ml.print_info();
|
| 46 |
|
|
|
|
| 4642 |
0);
|
| 4643 |
cb(v_states, "v_states", il);
|
| 4644 |
|
| 4645 |
+
q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
|
| 4646 |
q_pe = ggml_rope_ext(
|
| 4647 |
ctx0, q_pe, inp_pos, rope_factors,
|
| 4648 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
|
|
| 4651 |
cb(q_pe, "q_pe", il);
|
| 4652 |
|
| 4653 |
// shared RoPE key
|
| 4654 |
+
k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
|
| 4655 |
k_pe = ggml_rope_ext(
|
| 4656 |
ctx0, k_pe, inp_pos, rope_factors,
|
| 4657 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
|
|
| 6496 |
0);
|
| 6497 |
cb(v_states, "v_states", il);
|
| 6498 |
|
| 6499 |
+
q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
|
| 6500 |
q_pe = ggml_rope_ext(
|
| 6501 |
ctx0, q_pe, inp_pos, nullptr,
|
| 6502 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
|
|
| 6505 |
cb(q_pe, "q_pe", il);
|
| 6506 |
|
| 6507 |
// shared RoPE key
|
| 6508 |
+
k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this
|
| 6509 |
k_pe = ggml_rope_ext(
|
| 6510 |
ctx0, k_pe, inp_pos, nullptr,
|
| 6511 |
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
|
|
| 7215 |
struct ggml_tensor * Qcur = nullptr;
|
| 7216 |
struct ggml_tensor * Kcur = nullptr;
|
| 7217 |
struct ggml_tensor * Vcur = nullptr;
|
| 7218 |
+
if (model.type == LLM_TYPE_1_5B || model.type == LLM_TYPE_4B || model.type == LLM_TYPE_9B) {
|
| 7219 |
+
Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
|
| 7220 |
+
if (model.layers[il].bq) {
|
| 7221 |
+
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
|
| 7222 |
+
}
|
| 7223 |
+
Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
|
| 7224 |
+
if (model.layers[il].bk) {
|
| 7225 |
+
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
|
| 7226 |
+
}
|
| 7227 |
+
Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
|
| 7228 |
+
if (model.layers[il].bv) {
|
| 7229 |
+
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
|
| 7230 |
+
}
|
| 7231 |
+
} else {
|
| 7232 |
+
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
|
| 7233 |
+
cb(cur, "wqkv", il);
|
| 7234 |
+
if (model.layers[il].bqkv) {
|
| 7235 |
+
cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
|
| 7236 |
+
cb(cur, "bqkv", il);
|
| 7237 |
+
}
|
| 7238 |
+
Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
|
| 7239 |
+
Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
|
| 7240 |
+
Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
|
| 7241 |
+
}
|
| 7242 |
cb(Qcur, "Qcur", il);
|
| 7243 |
cb(Kcur, "Kcur", il);
|
| 7244 |
cb(Vcur, "Vcur", il);
|
|
|
|
| 7713 |
1
|
| 7714 |
);
|
| 7715 |
|
| 7716 |
+
struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att));
|
| 7717 |
ggml_build_forward_expand(
|
| 7718 |
gf,
|
| 7719 |
ggml_cpy(
|
| 7720 |
ctx0,
|
| 7721 |
+
ggml_view_1d(ctx0, last_norm_att, n_embd * n_seqs, 0),
|
| 7722 |
+
ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7723 |
)
|
| 7724 |
);
|
| 7725 |
|
|
|
|
| 8441 |
return status;
|
| 8442 |
}
|
| 8443 |
|
| 8444 |
+
static int llama_prepare_sbatch(
|
| 8445 |
+
llama_context & lctx,
|
| 8446 |
+
const llama_batch & batch,
|
| 8447 |
+
uint32_t & n_outputs) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8448 |
const auto & model = lctx.model;
|
|
|
|
| 8449 |
const auto & hparams = model.hparams;
|
| 8450 |
const auto & cparams = lctx.cparams;
|
| 8451 |
|
| 8452 |
+
const uint32_t n_tokens_all = batch.n_tokens;
|
| 8453 |
+
const int64_t n_embd = hparams.n_embd;
|
| 8454 |
+
|
| 8455 |
+
// this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
|
| 8456 |
+
const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
|
| 8457 |
|
| 8458 |
+
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
| 8459 |
if (batch.token) {
|
| 8460 |
for (uint32_t i = 0; i < n_tokens_all; ++i) {
|
| 8461 |
+
if (batch.token[i] < 0 || uint32_t(batch.token[i]) >= model.vocab.n_tokens()) {
|
| 8462 |
LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
|
| 8463 |
return -1;
|
| 8464 |
}
|
| 8465 |
}
|
| 8466 |
}
|
|
|
|
| 8467 |
GGML_ASSERT(n_tokens_all <= cparams.n_batch);
|
|
|
|
| 8468 |
GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
|
| 8469 |
|
|
|
|
|
|
|
|
|
|
| 8470 |
lctx.n_queued_tokens += n_tokens_all;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8471 |
lctx.embd_seq.clear();
|
| 8472 |
|
| 8473 |
// count outputs
|
|
|
|
| 8483 |
}
|
| 8484 |
|
| 8485 |
lctx.sbatch.from_batch(batch, n_embd,
|
| 8486 |
+
/* simple_split */ !lctx.kv_self.recurrent,
|
| 8487 |
/* logits_all */ n_outputs == n_tokens_all);
|
| 8488 |
|
| 8489 |
// reserve output buffer
|
|
|
|
| 8492 |
return -2;
|
| 8493 |
};
|
| 8494 |
|
| 8495 |
+
return 0;
|
| 8496 |
+
}
|
| 8497 |
+
|
| 8498 |
+
static int llama_prepare_ubatch(
|
| 8499 |
+
llama_context & lctx,
|
| 8500 |
+
llama_kv_slot_restorer & kv_slot_restorer,
|
| 8501 |
+
llama_ubatch & ubatch,
|
| 8502 |
+
const uint32_t n_outputs,
|
| 8503 |
+
const uint32_t n_tokens_all) {
|
| 8504 |
+
GGML_ASSERT(lctx.sbatch.n_tokens > 0);
|
| 8505 |
+
|
| 8506 |
+
auto & kv_self = lctx.kv_self;
|
| 8507 |
+
const auto & cparams = lctx.cparams;
|
| 8508 |
+
const auto & hparams = lctx.model.hparams;
|
| 8509 |
+
|
| 8510 |
+
// this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens
|
| 8511 |
+
const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE;
|
| 8512 |
+
|
| 8513 |
+
if (lctx.kv_self.recurrent) {
|
| 8514 |
+
if (embd_pooled) {
|
| 8515 |
+
// Pooled embeddings cannot be split across ubatches (yet)
|
| 8516 |
+
ubatch = lctx.sbatch.split_seq(cparams.n_ubatch);
|
| 8517 |
} else {
|
| 8518 |
+
// recurrent model architectures are easier to implement
|
| 8519 |
+
// with equal-length sequences
|
| 8520 |
+
ubatch = lctx.sbatch.split_equal(cparams.n_ubatch);
|
| 8521 |
}
|
| 8522 |
+
} else {
|
| 8523 |
+
ubatch = lctx.sbatch.split_simple(cparams.n_ubatch);
|
| 8524 |
+
}
|
| 8525 |
|
| 8526 |
+
// count the outputs in this u_batch
|
| 8527 |
+
{
|
| 8528 |
+
int32_t n_outputs_new = 0;
|
| 8529 |
|
| 8530 |
+
if (n_outputs == n_tokens_all) {
|
| 8531 |
+
n_outputs_new = ubatch.n_tokens;
|
| 8532 |
+
} else {
|
| 8533 |
+
GGML_ASSERT(ubatch.output);
|
| 8534 |
+
for (uint32_t i = 0; i < ubatch.n_tokens; i++) {
|
| 8535 |
+
n_outputs_new += int32_t(ubatch.output[i] != 0);
|
|
|
|
| 8536 |
}
|
| 8537 |
+
}
|
| 8538 |
+
|
| 8539 |
+
// needs to happen before the graph is built
|
| 8540 |
+
lctx.n_outputs = n_outputs_new;
|
| 8541 |
+
}
|
| 8542 |
+
|
| 8543 |
+
// non-causal masks do not use the KV cache
|
| 8544 |
+
if (hparams.causal_attn) {
|
| 8545 |
+
llama_kv_cache_update(&lctx);
|
| 8546 |
|
| 8547 |
+
// if we have enough unused cells before the current head ->
|
| 8548 |
+
// better to start searching from the beginning of the cache, hoping to fill it
|
| 8549 |
+
if (kv_self.head > kv_self.used + 2*ubatch.n_tokens) {
|
| 8550 |
+
kv_self.head = 0;
|
| 8551 |
}
|
| 8552 |
|
| 8553 |
+
const auto slot = llama_kv_cache_find_slot(kv_self, ubatch);
|
| 8554 |
+
if (!slot) {
|
| 8555 |
+
return 1;
|
| 8556 |
+
}
|
| 8557 |
+
kv_slot_restorer.save(slot);
|
| 8558 |
|
| 8559 |
+
if (!kv_self.recurrent) {
|
| 8560 |
+
// a heuristic, to avoid attending the full cache if it is not yet utilized
|
| 8561 |
+
// after enough generations, the benefit from this heuristic disappears
|
| 8562 |
+
// if we start defragmenting the cache, the benefit from this will be more important
|
| 8563 |
+
const uint32_t pad = llama_kv_cache_get_padding(cparams);
|
| 8564 |
+
kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad)));
|
| 8565 |
+
//kv_self.n = llama_kv_cache_cell_max(kv_self);
|
| 8566 |
+
}
|
| 8567 |
+
}
|
| 8568 |
|
| 8569 |
+
return 0;
|
| 8570 |
+
}
|
|
|
|
| 8571 |
|
| 8572 |
+
// decode a batch of tokens by evaluating the transformer
|
| 8573 |
+
// in case of unsuccessful decoding (error or warning),
|
| 8574 |
+
// the kv_cache state will be returned to its original state
|
| 8575 |
+
// (for non-recurrent models) or cleaned (for recurrent models)
|
| 8576 |
+
//
|
| 8577 |
+
// - lctx: llama context
|
| 8578 |
+
// - inp_batch: batch to evaluate
|
| 8579 |
+
//
|
| 8580 |
+
// return 0 on success
|
| 8581 |
+
// return positive int on warning
|
| 8582 |
+
// return negative int on error
|
| 8583 |
+
//
|
| 8584 |
+
static int llama_decode_impl(
|
| 8585 |
+
llama_context & lctx,
|
| 8586 |
+
llama_batch inp_batch) {
|
| 8587 |
|
| 8588 |
+
lctx.is_encoding = false;
|
| 8589 |
+
|
| 8590 |
+
if (inp_batch.n_tokens == 0) {
|
| 8591 |
+
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__);
|
| 8592 |
+
return -1;
|
| 8593 |
+
}
|
| 8594 |
+
|
| 8595 |
+
// temporarily allocate memory for the input batch if needed
|
| 8596 |
+
llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1);
|
| 8597 |
+
const llama_batch & batch = batch_allocr.batch;
|
| 8598 |
+
|
| 8599 |
+
const auto & model = lctx.model;
|
| 8600 |
+
const auto & vocab = model.vocab;
|
| 8601 |
+
const auto & hparams = model.hparams;
|
| 8602 |
+
const auto & cparams = lctx.cparams;
|
| 8603 |
|
| 8604 |
+
if (lctx.t_compute_start_us == 0) {
|
| 8605 |
+
lctx.t_compute_start_us = ggml_time_us();
|
| 8606 |
+
}
|
| 8607 |
+
auto & kv_self = lctx.kv_self;
|
| 8608 |
+
llama_kv_slot_restorer kv_slot_restorer(kv_self);
|
| 8609 |
+
|
| 8610 |
+
const int64_t n_embd = hparams.n_embd;
|
| 8611 |
+
const int64_t n_vocab = vocab.n_tokens();
|
| 8612 |
+
|
| 8613 |
+
uint32_t n_outputs = 0;
|
| 8614 |
+
uint32_t n_outputs_prev = 0;
|
| 8615 |
+
|
| 8616 |
+
{
|
| 8617 |
+
const int ret = llama_prepare_sbatch(lctx, batch, n_outputs);
|
| 8618 |
+
if (ret != 0) {
|
| 8619 |
+
return ret;
|
| 8620 |
+
}
|
| 8621 |
+
}
|
| 8622 |
+
|
| 8623 |
+
while (lctx.sbatch.n_tokens > 0) {
|
| 8624 |
+
llama_ubatch ubatch;
|
| 8625 |
+
{
|
| 8626 |
+
const int ret = llama_prepare_ubatch(lctx, kv_slot_restorer, ubatch, n_outputs, batch.n_tokens);
|
| 8627 |
+
if (ret != 0) {
|
| 8628 |
+
return ret;
|
| 8629 |
}
|
| 8630 |
}
|
| 8631 |
|
| 8632 |
+
const int n_threads = ubatch.n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
|
| 8633 |
+
ggml_threadpool_t threadpool = ubatch.n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch;
|
| 8634 |
+
|
| 8635 |
+
GGML_ASSERT(n_threads > 0);
|
| 8636 |
+
|
| 8637 |
//printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
|
| 8638 |
|
| 8639 |
ggml_backend_sched_reset(lctx.sched.get());
|
|
|
|
| 8686 |
|
| 8687 |
// update the kv ring buffer
|
| 8688 |
{
|
| 8689 |
+
kv_self.head += ubatch.n_tokens;
|
| 8690 |
|
| 8691 |
// Ensure kv cache head points to a valid index.
|
| 8692 |
if (kv_self.head >= kv_self.size) {
|
|
|
|
| 9420 |
return ggml_time_us();
|
| 9421 |
}
|
| 9422 |
|
| 9423 |
+
static struct llama_model * llama_model_load_from_file_impl(
|
| 9424 |
+
const std::string & path_model,
|
| 9425 |
+
std::vector<std::string> & splits,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9426 |
struct llama_model_params params) {
|
| 9427 |
ggml_time_init();
|
| 9428 |
|
|
|
|
| 9445 |
};
|
| 9446 |
}
|
| 9447 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9448 |
// create list of devices to use with this model
|
| 9449 |
if (params.devices) {
|
| 9450 |
for (ggml_backend_dev_t * dev = params.devices; *dev; ++dev) {
|
| 9451 |
model->devices.push_back(*dev);
|
| 9452 |
}
|
| 9453 |
} else {
|
| 9454 |
+
std::vector<ggml_backend_dev_t> rpc_servers;
|
| 9455 |
// use all available devices
|
| 9456 |
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
|
| 9457 |
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
|
|
|
|
| 9462 |
break;
|
| 9463 |
|
| 9464 |
case GGML_BACKEND_DEVICE_TYPE_GPU:
|
| 9465 |
+
ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
|
| 9466 |
+
if (ggml_backend_reg_name(reg) == std::string("RPC")) {
|
| 9467 |
+
rpc_servers.push_back(dev);
|
| 9468 |
+
} else {
|
| 9469 |
+
model->devices.push_back(dev);
|
| 9470 |
+
}
|
| 9471 |
break;
|
| 9472 |
}
|
| 9473 |
}
|
| 9474 |
+
// add RPC servers at the front of the list
|
| 9475 |
+
if (!rpc_servers.empty()) {
|
| 9476 |
+
model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end());
|
| 9477 |
+
}
|
| 9478 |
}
|
| 9479 |
|
| 9480 |
// if using single GPU mode, remove all except the main GPU
|
|
|
|
| 9495 |
LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
|
| 9496 |
}
|
| 9497 |
|
| 9498 |
+
const int status = llama_model_load(path_model, splits, *model, params);
|
| 9499 |
GGML_ASSERT(status <= 0);
|
| 9500 |
if (status < 0) {
|
| 9501 |
if (status == -1) {
|
|
|
|
| 9511 |
return model;
|
| 9512 |
}
|
| 9513 |
|
| 9514 |
+
// deprecated
|
| 9515 |
+
struct llama_model * llama_load_model_from_file(
|
| 9516 |
+
const char * path_model,
|
| 9517 |
+
struct llama_model_params params) {
|
| 9518 |
+
return llama_model_load_from_file(path_model, params);
|
| 9519 |
+
}
|
| 9520 |
+
|
| 9521 |
+
struct llama_model * llama_model_load_from_file(
|
| 9522 |
+
const char * path_model,
|
| 9523 |
+
struct llama_model_params params) {
|
| 9524 |
+
std::vector<std::string> splits = {};
|
| 9525 |
+
return llama_model_load_from_file_impl(path_model, splits, params);
|
| 9526 |
+
}
|
| 9527 |
+
|
| 9528 |
+
struct llama_model * llama_model_load_from_splits(
|
| 9529 |
+
const char ** paths,
|
| 9530 |
+
size_t n_paths,
|
| 9531 |
+
struct llama_model_params params) {
|
| 9532 |
+
std::vector<std::string> splits;
|
| 9533 |
+
if (n_paths == 0) {
|
| 9534 |
+
LLAMA_LOG_ERROR("%s: list of splits is empty\n", __func__);
|
| 9535 |
+
return nullptr;
|
| 9536 |
+
}
|
| 9537 |
+
for (size_t i = 0; i < n_paths; ++i) {
|
| 9538 |
+
splits.push_back(paths[i]);
|
| 9539 |
+
}
|
| 9540 |
+
return llama_model_load_from_file_impl(splits.front(), splits, params);
|
| 9541 |
+
}
|
| 9542 |
+
|
| 9543 |
struct llama_context * llama_init_from_model(
|
| 9544 |
struct llama_model * model,
|
| 9545 |
struct llama_context_params params) {
|
examples/talk-llama/llama.h
CHANGED
|
@@ -288,9 +288,6 @@ extern "C" {
|
|
| 288 |
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
| 289 |
const float * tensor_split;
|
| 290 |
|
| 291 |
-
// comma separated list of RPC servers to use for offloading
|
| 292 |
-
const char * rpc_servers;
|
| 293 |
-
|
| 294 |
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
| 295 |
// If the provided progress_callback returns true, model loading continues.
|
| 296 |
// If it returns false, model loading is immediately aborted.
|
|
@@ -418,10 +415,20 @@ extern "C" {
|
|
| 418 |
struct llama_model_params params),
|
| 419 |
"use llama_model_load_from_file instead");
|
| 420 |
|
|
|
|
|
|
|
|
|
|
| 421 |
LLAMA_API struct llama_model * llama_model_load_from_file(
|
| 422 |
const char * path_model,
|
| 423 |
struct llama_model_params params);
|
| 424 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
|
| 426 |
"use llama_model_free instead");
|
| 427 |
|
|
@@ -503,7 +510,8 @@ extern "C" {
|
|
| 503 |
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
|
| 504 |
|
| 505 |
// Get the default chat template. Returns nullptr if not available
|
| 506 |
-
|
|
|
|
| 507 |
|
| 508 |
// Returns the total number of parameters in the model
|
| 509 |
LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
|
|
@@ -951,7 +959,7 @@ extern "C" {
|
|
| 951 |
LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab);
|
| 952 |
LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab);
|
| 953 |
|
| 954 |
-
DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use
|
| 955 |
DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead");
|
| 956 |
DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead");
|
| 957 |
DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead");
|
|
@@ -1191,6 +1199,18 @@ extern "C" {
|
|
| 1191 |
const char * grammar_str,
|
| 1192 |
const char * grammar_root);
|
| 1193 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1194 |
/// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first.
|
| 1195 |
LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
|
| 1196 |
int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
|
|
|
| 288 |
// proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
|
| 289 |
const float * tensor_split;
|
| 290 |
|
|
|
|
|
|
|
|
|
|
| 291 |
// Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
| 292 |
// If the provided progress_callback returns true, model loading continues.
|
| 293 |
// If it returns false, model loading is immediately aborted.
|
|
|
|
| 415 |
struct llama_model_params params),
|
| 416 |
"use llama_model_load_from_file instead");
|
| 417 |
|
| 418 |
+
// Load the model from a file
|
| 419 |
+
// If the file is split into multiple parts, the file name must follow this pattern: <name>-%05d-of-%05d.gguf
|
| 420 |
+
// If the split file name does not follow this pattern, use llama_model_load_from_splits
|
| 421 |
LLAMA_API struct llama_model * llama_model_load_from_file(
|
| 422 |
const char * path_model,
|
| 423 |
struct llama_model_params params);
|
| 424 |
|
| 425 |
+
// Load the model from multiple splits (support custom naming scheme)
|
| 426 |
+
// The paths must be in the correct order
|
| 427 |
+
LLAMA_API struct llama_model * llama_model_load_from_splits(
|
| 428 |
+
const char ** paths,
|
| 429 |
+
size_t n_paths,
|
| 430 |
+
struct llama_model_params params);
|
| 431 |
+
|
| 432 |
DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
|
| 433 |
"use llama_model_free instead");
|
| 434 |
|
|
|
|
| 510 |
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
|
| 511 |
|
| 512 |
// Get the default chat template. Returns nullptr if not available
|
| 513 |
+
// If name is NULL, returns the default chat template
|
| 514 |
+
LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name);
|
| 515 |
|
| 516 |
// Returns the total number of parameters in the model
|
| 517 |
LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
|
|
|
|
| 959 |
LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab);
|
| 960 |
LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab);
|
| 961 |
|
| 962 |
+
DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead");
|
| 963 |
DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead");
|
| 964 |
DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead");
|
| 965 |
DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead");
|
|
|
|
| 1199 |
const char * grammar_str,
|
| 1200 |
const char * grammar_root);
|
| 1201 |
|
| 1202 |
+
/// @details Lazy grammar sampler, introduced in https://github.com/ggerganov/llama.cpp/pull/9639
|
| 1203 |
+
/// @param trigger_words A list of words that will trigger the grammar sampler. This may be updated to a loose regex syntax (w/ ^) in a near future.
|
| 1204 |
+
/// @param trigger_tokens A list of tokens that will trigger the grammar sampler.
|
| 1205 |
+
LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy(
|
| 1206 |
+
const struct llama_vocab * vocab,
|
| 1207 |
+
const char * grammar_str,
|
| 1208 |
+
const char * grammar_root,
|
| 1209 |
+
const char ** trigger_words,
|
| 1210 |
+
size_t num_trigger_words,
|
| 1211 |
+
const llama_token * trigger_tokens,
|
| 1212 |
+
size_t num_trigger_tokens);
|
| 1213 |
+
|
| 1214 |
/// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first.
|
| 1215 |
LLAMA_API struct llama_sampler * llama_sampler_init_penalties(
|
| 1216 |
int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size)
|
examples/talk-llama/unicode.cpp
CHANGED
|
@@ -7,18 +7,17 @@
|
|
| 7 |
|
| 8 |
#include <algorithm>
|
| 9 |
#include <cassert>
|
|
|
|
| 10 |
#include <cstddef>
|
| 11 |
#include <cstdint>
|
|
|
|
| 12 |
#include <map>
|
| 13 |
#include <regex>
|
| 14 |
#include <stdexcept>
|
| 15 |
#include <string>
|
| 16 |
#include <unordered_map>
|
| 17 |
-
#include <unordered_set>
|
| 18 |
#include <utility>
|
| 19 |
#include <vector>
|
| 20 |
-
#include <locale>
|
| 21 |
-
#include <codecvt>
|
| 22 |
|
| 23 |
size_t unicode_len_utf8(char src) {
|
| 24 |
const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|
|
|
|
| 7 |
|
| 8 |
#include <algorithm>
|
| 9 |
#include <cassert>
|
| 10 |
+
#include <codecvt>
|
| 11 |
#include <cstddef>
|
| 12 |
#include <cstdint>
|
| 13 |
+
#include <locale>
|
| 14 |
#include <map>
|
| 15 |
#include <regex>
|
| 16 |
#include <stdexcept>
|
| 17 |
#include <string>
|
| 18 |
#include <unordered_map>
|
|
|
|
| 19 |
#include <utility>
|
| 20 |
#include <vector>
|
|
|
|
|
|
|
| 21 |
|
| 22 |
size_t unicode_len_utf8(char src) {
|
| 23 |
const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
|