Skip to content

Commit aeafca4

Browse files
committed
vad : fix memory leak by storing ggml_context in vad context struct
This commit addresses a memory leak issue in the voice activity detection (VAD) where the ggml_context is not stored within the vad context structure. The motivation for this change that this is causing the context memory to stay allocated and the tensor still point to that memory but this memory is never freed. Resolves: #3452
1 parent 7849aff commit aeafca4

File tree

1 file changed

+7
-6
lines changed

1 file changed

+7
-6
lines changed

src/whisper.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4402,6 +4402,7 @@ struct whisper_vad_context {
44024402
std::vector<ggml_backend_t> backends;
44034403
ggml_backend_buffer_t buffer = nullptr;
44044404
whisper_context_params params;
4405+
ggml_context * ctx = nullptr;
44054406
std::vector<uint8_t> ctx_buf;
44064407
whisper_sched sched;
44074408

@@ -4661,21 +4662,21 @@ static bool whisper_vad_init_context(whisper_vad_context * vctx) {
46614662
/*.no_alloc =*/ true,
46624663
};
46634664

4664-
ggml_context * ctx = ggml_init(params);
4665-
if (!ctx) {
4665+
vctx->ctx = ggml_init(params);
4666+
if (!vctx->ctx) {
46664667
WHISPER_LOG_ERROR("%s: failed to init LSTM state ggml context\n", __func__);
46674668
return false;
46684669
}
46694670

46704671
// LSTM Hidden state
4671-
vctx->h_state = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, lstm_hidden_size);
4672+
vctx->h_state = ggml_new_tensor_1d(vctx->ctx, GGML_TYPE_F32, lstm_hidden_size);
46724673
ggml_set_name(vctx->h_state, "h_state");
46734674

46744675
// LSTM Cell state
4675-
vctx->c_state = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, lstm_hidden_size);
4676+
vctx->c_state = ggml_new_tensor_1d(vctx->ctx, GGML_TYPE_F32, lstm_hidden_size);
46764677
ggml_set_name(vctx->c_state, "c_state");
46774678

4678-
vctx->buffer = ggml_backend_alloc_ctx_tensors(ctx, vctx->backends[0]);
4679+
vctx->buffer = ggml_backend_alloc_ctx_tensors(vctx->ctx, vctx->backends[0]);
46794680
if (!vctx->buffer) {
46804681
WHISPER_LOG_ERROR("%s: failed to allocate memory for the VAD state\n", __func__);
46814682
return false;
@@ -5433,7 +5434,7 @@ void whisper_vad_free(whisper_vad_context * ctx) {
54335434
for (auto & backend : ctx->backends) {
54345435
ggml_backend_free(backend);
54355436
}
5436-
5437+
ggml_free(ctx->ctx);
54375438

54385439
delete ctx;
54395440
}

0 commit comments

Comments
 (0)