summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/journal_io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2025-02-05 19:13:39 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2025-03-14 21:02:13 -0400
commit35282ce9e82f6e4c044e6a74b6fef45dd4996718 (patch)
tree36a85d725079d1df61f74f8880ae1c2806baafd3 /fs/bcachefs/journal_io.c
parentbcachefs: Don't touch journal_buf->data->seq in journal_res_get (diff)
downloadlinux-35282ce9e82f6e4c044e6a74b6fef45dd4996718.tar.gz
linux-35282ce9e82f6e4c044e6a74b6fef45dd4996718.zip
bcachefs: Free journal bufs when not in use
Since we're increasing the number of 'struct journal_bufs', we don't want them all permanently holding onto buffers for the journal data - that'd be 16 * 2MB = 32MB, or potentially more. Add a single-element mempool (open coded, since buffer size varies), this also means we won't be hitting the memory allocator every time we open and close a journal entry/buffer. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/journal_io.c')
-rw-r--r--fs/bcachefs/journal_io.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index f2ff28e6697c..61f71e7baff2 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1640,6 +1640,21 @@ static CLOSURE_CALLBACK(journal_write_done)
j->err_seq = seq;
w->write_done = true;
+ if (!j->free_buf || j->free_buf_size < w->buf_size) {
+ swap(j->free_buf, w->data);
+ swap(j->free_buf_size, w->buf_size);
+ }
+
+ if (w->data) {
+ void *buf = w->data;
+ w->data = NULL;
+ w->buf_size = 0;
+
+ spin_unlock(&j->lock);
+ kvfree(buf);
+ spin_lock(&j->lock);
+ }
+
bool completed = false;
for (seq = journal_last_unwritten_seq(j);
@@ -1649,7 +1664,7 @@ static CLOSURE_CALLBACK(journal_write_done)
if (!w->write_done)
break;
- if (!j->err_seq && !JSET_NO_FLUSH(w->data)) {
+ if (!j->err_seq && !w->noflush) {
j->flushed_seq_ondisk = seq;
j->last_seq_ondisk = w->last_seq;