From ed926d8833df3c29797edbc98dafd9b575aa0729 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 7 Feb 2025 16:05:34 +0200 Subject: [PATCH] llama : fix defrag logic (#11707) * llama : fix defrag logic ggml-ci * cont : better logic ggml-ci * cont : clamp fragmentation to 0.0 ggml-ci --- src/llama.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index c3da3c43be7ef..3b6a21d81f186 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -8801,12 +8801,14 @@ static int llama_decode_impl( //llama_synchronize(&lctx); // decide if we need to defrag the kv cache - if (cparams.causal_attn && cparams.defrag_thold >= 0.0f) { - const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used)/float(kv_self.n) : 0.0f; + if (cparams.causal_attn && cparams.defrag_thold > 0.0f) { + // - do not defrag small contexts (i.e. < 2048 tokens) + // - count the padding towards the number of used tokens + const float fragmentation = kv_self.n >= 2048 ? std::max(0.0f, 1.0f - float(kv_self.used + llama_kv_cache_get_padding(cparams))/float(kv_self.n)) : 0.0f; // queue defragmentation for next llama_kv_cache_update if (fragmentation > cparams.defrag_thold) { - //LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation); + LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation); llama_kv_cache_defrag(kv_self); }