aboutsummaryrefslogtreecommitdiffstats
path: root/fs/netfs/write_collect.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-12-16 20:40:55 +0000
committerChristian Brauner <brauner@kernel.org>2024-12-20 22:34:02 +0100
commit06fa229ceb36898e68022b5654c017d2c6582d7d (patch)
treedb540d16164012b660f6096819f5288356749f39 /fs/netfs/write_collect.c
parentnetfs: Add a tracepoint to log the lifespan of folio_queue structs (diff)
downloadlinux-06fa229ceb36898e68022b5654c017d2c6582d7d.tar.gz
linux-06fa229ceb36898e68022b5654c017d2c6582d7d.zip
netfs: Abstract out a rolling folio buffer implementation
A rolling buffer is a series of folios held in a list of folio_queues. New folios and folio_queue structs may be inserted at the head simultaneously with spent ones being removed from the tail without the need for locking. The rolling buffer includes an iov_iter and it has to be careful managing this as the list of folio_queues is extended such that an oops doesn't incurred because the iterator was pointing to the end of a folio_queue segment that got appended to and then removed. We need to use the mechanism twice, once for read and once for write, and, in future patches, we will use a second rolling buffer to handle bounce buffering for content encryption. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241216204124.3752367-6-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs/write_collect.c')
-rw-r--r--fs/netfs/write_collect.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index ca3a11ed9b54..364c1f9d5815 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -83,9 +83,9 @@ end_wb:
static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
unsigned int *notes)
{
- struct folio_queue *folioq = wreq->buffer;
+ struct folio_queue *folioq = wreq->buffer.tail;
unsigned long long collected_to = wreq->collected_to;
- unsigned int slot = wreq->buffer_head_slot;
+ unsigned int slot = wreq->buffer.first_tail_slot;
if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
if (netfs_pgpriv2_unlock_copied_folios(wreq))
@@ -94,7 +94,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
}
if (slot >= folioq_nr_slots(folioq)) {
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ return;
slot = 0;
}
@@ -134,9 +136,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
- if (READ_ONCE(wreq->buffer_tail) == folioq)
- break;
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
}
@@ -144,8 +146,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
break;
}
- wreq->buffer = folioq;
- wreq->buffer_head_slot = slot;
+ wreq->buffer.tail = folioq;
+done:
+ wreq->buffer.first_tail_slot = slot;
}
/*