Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ggml: add new member in GGML's internal data structure #6815

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2810,6 +2810,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
/* .mem_size = */ ggml_tensor_overhead() * 128 + ggml_graph_overhead(),
/* .mem_buffer = */ nullptr,
/* .no_alloc = */ true,
/* .use_hwaccel= */ false
};
ggml_context * meta_ctx = ggml_init(meta_params);
struct gguf_init_params meta_gguf_params = {
Expand Down Expand Up @@ -2880,6 +2881,7 @@ static llama_control_vector_data llama_control_vector_load_one(const llama_contr
/* .mem_size = */ ggml_tensor_overhead() * n_tensors + n_bytes,
/* .mem_buffer = */ nullptr,
/* .no_alloc = */ false,
/* .use_hwaccel= */ false
};
struct ggml_context * ctx = ggml_init(ggml_params);

Expand Down
2 changes: 2 additions & 0 deletions examples/baby-llama/baby-llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1522,6 +1522,7 @@ int main(int argc, char ** argv) {
/*.mem_size =*/ compute_size,
/*.mem_buffer =*/ compute_addr,
/*.no_alloc =*/ false,
/*.use_hwaccel=*/ false
};

struct ggml_context * ctx0 = ggml_init(params);
Expand Down Expand Up @@ -1598,6 +1599,7 @@ int main(int argc, char ** argv) {
/*.mem_size =*/ compute_size,
/*.mem_buffer =*/ compute_addr,
/*.no_alloc =*/ false,
/*.use_hwaccel=*/ false
};
struct ggml_context * ctx0 = ggml_init(params);

Expand Down
3 changes: 2 additions & 1 deletion examples/benchmark/benchmark-matmult.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,8 @@ int main(int argc, char ** argv) {
struct ggml_init_params params = {
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/* no_alloc =*/ 0
/* no_alloc =*/ 0,
/* use_hwaccel=*/ 0
};

ctx = ggml_init(params);
Expand Down
3 changes: 3 additions & 0 deletions examples/finetune/finetune.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1634,6 +1634,7 @@ int main(int argc, char ** argv) {
ggml_tensor_overhead() * 2, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_input = ggml_init(ctx_input_params);

Expand All @@ -1656,6 +1657,7 @@ int main(int argc, char ** argv) {
estimated_compute_size_wo_data, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_compute = NULL;

Expand Down Expand Up @@ -1825,6 +1827,7 @@ int main(int argc, char ** argv) {
max_work_size, // mem_size
NULL, // mem_buffer
false, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_work = ggml_init(ctx_work_params);

Expand Down
1 change: 1 addition & 0 deletions examples/gguf/gguf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ static bool gguf_ex_write(const std::string & fname) {
/*.mem_size =*/ 128ull*1024ull*1024ull,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ false,
/*.use_hwaccel=*/ false
};

struct ggml_context * ctx_data = ggml_init(params);
Expand Down
6 changes: 4 additions & 2 deletions examples/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
/*.mem_size =*/ ctx->buf_compute_meta.size(),
/*.mem_buffer =*/ ctx->buf_compute_meta.data(),
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};

struct ggml_context * ctx0 = ggml_init(params);
Expand Down Expand Up @@ -1020,9 +1021,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
{
std::vector<uint8_t> read_buf;
struct ggml_init_params params = {
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
/*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};

new_clip->ctx_data = ggml_init(params);
Expand Down
1 change: 1 addition & 0 deletions examples/llava/llava.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *>
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API
/*.use_hwaccel=*/ false
};

// Python reference code for full unpad:
Expand Down
3 changes: 3 additions & 0 deletions examples/train-text-from-scratch/train-text-from-scratch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1044,6 +1044,7 @@ int main(int argc, char ** argv) {
ggml_tensor_overhead() * 2, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_input = ggml_init(ctx_input_params);

Expand All @@ -1066,6 +1067,7 @@ int main(int argc, char ** argv) {
estimated_compute_size_wo_data, // mem_size
NULL, // mem_buffer
true, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_compute = NULL;

Expand Down Expand Up @@ -1218,6 +1220,7 @@ int main(int argc, char ** argv) {
max_work_size, // mem_size
NULL, // mem_buffer
false, // no_alloc
false // use_hwaccel
};
struct ggml_context * ctx_work = ggml_init(ctx_work_params);

Expand Down
6 changes: 4 additions & 2 deletions ggml-backend.c
Original file line number Diff line number Diff line change
Expand Up @@ -1238,7 +1238,8 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
struct ggml_init_params params = {
/* .mem_size = */ sizeof(sched->context_buffer),
/* .mem_buffer = */ sched->context_buffer,
/* .no_alloc = */ true
/* .no_alloc = */ true,
/* .use_hwaccel =*/ false
};

ggml_free(sched->ctx);
Expand Down Expand Up @@ -1980,7 +1981,8 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
struct ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true
/* .no_alloc = */ true,
/* .use_hwaccel =*/ false
};

struct ggml_context * ctx_allocated = ggml_init(params);
Expand Down
6 changes: 6 additions & 0 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -2200,6 +2200,7 @@ struct ggml_context {
bool mem_buffer_owned;
bool no_alloc;
bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers
bool use_hwaccel;

int n_objects;

Expand Down Expand Up @@ -2759,6 +2760,7 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
/*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
/*.no_alloc =*/ params.no_alloc,
/*.no_alloc_save =*/ params.no_alloc,
/*.use_hwaccel =*/ params.use_hwaccel,
/*.n_objects =*/ 0,
/*.objects_begin =*/ NULL,
/*.objects_end =*/ NULL,
Expand Down Expand Up @@ -2990,9 +2992,13 @@ static struct ggml_tensor * ggml_new_tensor_impl(
/*.data =*/ obj_alloc_size > 0 ? (void *)(result + 1) : data,
/*.name =*/ { 0 },
/*.extra =*/ NULL,
/*.rank =*/ n_dims,
/*.padding =*/ { 0 },
};

if (ctx->use_hwaccel)
result->backend = GGML_BACKEND_TYPE_GPU;

// TODO: this should not be needed as long as we don't rely on aligned SIMD loads
//ggml_assert_aligned(result->data);

Expand Down
5 changes: 4 additions & 1 deletion ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,9 @@ extern "C" {

void * extra; // extra things e.g. for ggml-cuda.cu

char padding[8];
int32_t rank;

char padding[20];
};

static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
Expand Down Expand Up @@ -657,6 +659,7 @@ extern "C" {
size_t mem_size; // bytes
void * mem_buffer; // if NULL, memory will be allocated internally
bool no_alloc; // don't allocate memory for the tensor data
bool use_hwaccel;
};


Expand Down
7 changes: 6 additions & 1 deletion llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2378,6 +2378,7 @@ static bool llama_kv_cache_init(
/*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
Expand Down Expand Up @@ -4664,6 +4665,7 @@ static bool llm_load_tensors(
/*.mem_size =*/ ctx_size,
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
Expand Down Expand Up @@ -6535,6 +6537,7 @@ struct llm_build_context {
/*.mem_size =*/ buf_compute_meta.size(),
/*.mem_buffer =*/ buf_compute_meta.data(),
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};

ctx0 = ggml_init(params);
Expand Down Expand Up @@ -14679,6 +14682,7 @@ static int llama_apply_lora_from_file_internal(
/* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
/* .mem_buffer */ nullptr,
/* .no_alloc */ true,
/* .use_hwaccel*/ false
};
ggml_context * lora_ctx = ggml_init(lora_init_params);
if (lora_ctx == nullptr) {
Expand Down Expand Up @@ -14929,7 +14933,7 @@ void llama_backend_init(void) {

// needed to initialize f16 tables
{
struct ggml_init_params params = { 0, NULL, false };
struct ggml_init_params params = { 0, NULL, false, false };
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
Expand Down Expand Up @@ -15540,6 +15544,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
/*.mem_size =*/ n_layers * ggml_tensor_overhead(),
/*.mem_buffer =*/ NULL,
/*.no_alloc =*/ true,
/*.use_hwaccel=*/ false
};
ggml_context * ctx = ggml_init(params);
if (!ctx) {
Expand Down
2 changes: 2 additions & 0 deletions tests/test-backend-ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,7 @@ struct test_case {
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel=*/false
};
ggml_context * ctx = ggml_init(params);

Expand Down Expand Up @@ -520,6 +521,7 @@ struct test_case {
/* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false),
/* .mem_base = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel=*/false
};
ggml_context * ctx = ggml_init(params);

Expand Down
1 change: 1 addition & 0 deletions tests/test-grad0.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,7 @@ int main(int argc, const char ** argv) {
/* .mem_size = */ 256*1024*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ false,
/* .use_hwaccel= */ false
};

int64_t ne[4];
Expand Down
1 change: 1 addition & 0 deletions tests/test-quantize-fns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ int main(int argc, char * argv[]) {
/* .mem_size = */ 1*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel= */ false
};
struct ggml_context * ctx = ggml_init(ggml_params);

Expand Down
1 change: 1 addition & 0 deletions tests/test-quantize-perf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ int main(int argc, char * argv[]) {
/* .mem_size = */ 1*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ true,
/* .use_hwaccel= */ false
};
struct ggml_context * ctx = ggml_init(ggml_params);

Expand Down
1 change: 1 addition & 0 deletions tests/test-rope.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ int main(int /*argc*/, const char ** /*argv*/) {
/* .mem_size = */ 128*1024*1024,
/* .mem_buffer = */ NULL,
/* .no_alloc = */ false,
/* .use_hwaccel= */ false
};

std::vector<uint8_t> work_buffer;
Expand Down
Loading