From b13fa0b9c6a783ffac8662f4b85689ead3e8b6d2 Mon Sep 17 00:00:00 2001 From: edp1096 Date: Wed, 7 Jun 2023 22:50:43 +0900 Subject: [PATCH 1/2] Add opencl release memory --- ggml-opencl.cpp | 9 +++++++++ ggml-opencl.h | 2 ++ llama.cpp | 5 +++++ 3 files changed, 16 insertions(+) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 81a975cf8b4ea..0a34f52ff2cde 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -662,6 +662,15 @@ static void ggml_cl_pool_free(cl_mem mem, size_t size) { clReleaseMemObject(mem); } +void ggml_cl_data_free(const struct ggml_tensor* tensor) { + if (tensor->backend != GGML_BACKEND_GPU) { + return; + } + + cl_mem mem = (cl_mem)tensor->data; + clReleaseMemObject(mem); +} + static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) { cl_int err; const uint64_t ne0 = src->ne[0]; diff --git a/ggml-opencl.h b/ggml-opencl.h index c850bb8ad1d06..293faa6275bae 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -16,6 +16,8 @@ void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); +void ggml_cl_data_free(const struct ggml_tensor* tensor); + void ggml_cl_transform_tensor(struct ggml_tensor * tensor); void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); diff --git a/llama.cpp b/llama.cpp index 16d6f6ef1c68c..25282add1ef99 100644 --- a/llama.cpp +++ b/llama.cpp @@ -211,6 +211,11 @@ struct llama_model { ggml_cuda_free_data(tensors_by_name[i].second); } #endif // GGML_USE_CUBLAS +#if defined(GGML_USE_CLBLAST) + for (size_t i = 0; i < tensors_by_name.size(); ++i) { + ggml_cl_data_free(tensors_by_name[i].second); + } +#endif // GGML_USE_CLBLAST } }; From db8781dfd0db973f65bb24276a886faefe0665ec Mon Sep 17 00:00:00 2001 From: edp1096 Date: Fri, 9 Jun 2023 09:23:52 +0900 Subject: [PATCH 2/2] Rename function name --- ggml-opencl.cpp | 2 +- ggml-opencl.h | 2 +- llama.cpp | 7 +++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 0a34f52ff2cde..7b6daf4a87e85 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -662,7 +662,7 @@ static void ggml_cl_pool_free(cl_mem mem, size_t size) { clReleaseMemObject(mem); } -void ggml_cl_data_free(const struct ggml_tensor* tensor) { +void ggml_cl_free_data(const struct ggml_tensor* tensor) { if (tensor->backend != GGML_BACKEND_GPU) { return; } diff --git a/ggml-opencl.h b/ggml-opencl.h index 293faa6275bae..bf95e5cd0b9de 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -16,7 +16,7 @@ void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor void * ggml_cl_host_malloc(size_t size); void ggml_cl_host_free(void * ptr); -void ggml_cl_data_free(const struct ggml_tensor* tensor); +void ggml_cl_free_data(const struct ggml_tensor* tensor); void ggml_cl_transform_tensor(struct ggml_tensor * tensor); void ggml_cl_load_data(const char * fname, struct ggml_tensor * tensor, size_t offset); diff --git a/llama.cpp b/llama.cpp index 25282add1ef99..f40c5afa2fc4f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -210,12 +210,11 @@ struct llama_model { for (size_t i = 0; i < tensors_by_name.size(); ++i) { ggml_cuda_free_data(tensors_by_name[i].second); } -#endif // GGML_USE_CUBLAS -#if defined(GGML_USE_CLBLAST) +#elif defined(GGML_USE_CLBLAST) for (size_t i = 0; i < tensors_by_name.size(); ++i) { - ggml_cl_data_free(tensors_by_name[i].second); + ggml_cl_free_data(tensors_by_name[i].second); } -#endif // GGML_USE_CLBLAST +#endif } };