Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/lcpp_pr_faster_session_size' int…
Browse files Browse the repository at this point in the history
…o rebase_170171
  • Loading branch information
Nexesenex committed Aug 8, 2024
2 parents a0ea2f4 + 06bff76 commit 02b2800
Showing 1 changed file with 28 additions and 11 deletions.
39 changes: 28 additions & 11 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19826,6 +19826,7 @@ bool llama_save_session_file(struct llama_context * ctx, const char * path_sessi
// TODO: replace all non-fatal assertions with returned errors or exceptions
struct llama_data_write {
virtual void write(const void * src, size_t size) = 0;
virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0;
virtual size_t get_size_written() = 0;
virtual ~llama_data_write() = default;

Expand Down Expand Up @@ -19948,9 +19949,8 @@ struct llama_data_write {
// Read each range of cells of k_size length each into tmp_buf and write out
for (const auto & range : cell_ranges) {
const size_t range_size = range.second - range.first;
tmp_buf.resize(range_size * k_size_row);
ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), range.first * k_size_row, range_size * k_size_row);
write(tmp_buf.data(), tmp_buf.size());
const size_t buf_size = range_size * k_size_row;
write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size);
}
}

Expand All @@ -19969,9 +19969,8 @@ struct llama_data_write {
// Read each range of cells of v_size length each into tmp_buf and write out
for (const auto & range : cell_ranges) {
const size_t range_size = range.second - range.first;
tmp_buf.resize(range_size * v_size_row);
ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), range.first * v_size_row, range_size * v_size_row);
write(tmp_buf.data(), tmp_buf.size());
const size_t buf_size = range_size * v_size_row;
write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size);
}
}
} else {
Expand All @@ -19997,9 +19996,8 @@ struct llama_data_write {
for (const auto & range : cell_ranges) {
const size_t range_size = range.second - range.first;
const size_t src_offset = (range.first + j * kv_size) * v_size_el;
tmp_buf.resize(range_size * v_size_el);
ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), src_offset, tmp_buf.size());
write(tmp_buf.data(), tmp_buf.size());
const size_t buf_size = range_size * v_size_el;
write_tensor_data(kv_self.v_l[il], src_offset, buf_size);
}
}
}
Expand Down Expand Up @@ -20358,12 +20356,14 @@ struct llama_data_write_dummy : llama_data_write {

llama_data_write_dummy() {}

// TODO: avoid unnecessary calls to ggml_backend_tensor_get in a dummy context

void write(const void * /* src */, size_t size) override {
size_written += size;
}

void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
size_written += size;
}

size_t get_size_written() override {
return size_written;
}
Expand All @@ -20386,6 +20386,16 @@ struct llama_data_write_buffer : llama_data_write {
buf_size -= size;
}

void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
if (size > buf_size) {
throw std::runtime_error("unexpectedly reached end of buffer");
}
ggml_backend_tensor_get(tensor, ptr, offset, size);
ptr += size;
size_written += size;
buf_size -= size;
}

size_t get_size_written() override {
return size_written;
}
Expand Down Expand Up @@ -20421,6 +20431,7 @@ struct llama_data_read_buffer : llama_data_read {
struct llama_data_write_file : llama_data_write {
llama_file * file;
size_t size_written = 0;
std::vector<uint8_t> temp_buffer;

llama_data_write_file(llama_file * f) : file(f) {}

Expand All @@ -20429,6 +20440,12 @@ struct llama_data_write_file : llama_data_write {
size_written += size;
}

void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
temp_buffer.resize(size);
ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
write(temp_buffer.data(), temp_buffer.size());
}

size_t get_size_written() override {
return size_written;
}
Expand Down

0 comments on commit 02b2800

Please sign in to comment.