aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoanne Koong <joannelkoong@gmail.com>2025-07-10 15:33:33 +0200
committerChristian Brauner <brauner@kernel.org>2025-07-14 10:51:32 +0200
commitf8b6a94a4ccafa95433798ad486c548c22624028 (patch)
tree6eb843f494ca1199ce096a7d552411717d47324e
parentiomap: rename iomap_writepage_map to iomap_writeback_folio (diff)
downloadlinux-f8b6a94a4ccafa95433798ad486c548c22624028.tar.gz
linux-f8b6a94a4ccafa95433798ad486c548c22624028.zip
iomap: move folio_unlock out of iomap_writeback_folio
Move unlocking the folio out of iomap_writeback_folio into the caller. This means the end writeback machinery is now run with the folio locked when no writeback happened, or writeback completed extremely fast. Note that having the folio locked over the call to folio_end_writeback in iomap_writeback_folio means that the dropbehind handling there will never run because the trylock fails. The only way this can happen is if the writepage either never wrote back any dirty data at all, in which case the dropbehind handling isn't needed, or if all writeback finished instantly, which is rather unlikely. Even in the latter case the dropbehind handling is an optional optimization so skipping it will not cause correctness issues. This prepares for exporting iomap_writeback_folio for use in folio laundering. Signed-off-by: Joanne Koong <joannelkoong@gmail.com> [hch: split from a larger patch] Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/20250710133343.399917-10-hch@lst.de Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--fs/iomap/buffered-io.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e6e4c2d1b399..ca45a6d1cb68 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1652,10 +1652,8 @@ static int iomap_writeback_folio(struct iomap_writepage_ctx *wpc,
trace_iomap_writeback_folio(inode, pos, folio_size(folio));
- if (!iomap_writeback_handle_eof(folio, inode, &end_pos)) {
- folio_unlock(folio);
+ if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
return 0;
- }
WARN_ON_ONCE(end_pos <= pos);
if (i_blocks_per_folio(inode, folio) > 1) {
@@ -1709,7 +1707,6 @@ static int iomap_writeback_folio(struct iomap_writepage_ctx *wpc,
* already at this point. In that case we need to clear the writeback
* bit ourselves right after unlocking the page.
*/
- folio_unlock(folio);
if (ifs) {
if (atomic_dec_and_test(&ifs->write_bytes_pending))
folio_end_writeback(folio);
@@ -1736,8 +1733,10 @@ iomap_writepages(struct iomap_writepage_ctx *wpc)
PF_MEMALLOC))
return -EIO;
- while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error)))
+ while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
error = iomap_writeback_folio(wpc, folio);
+ folio_unlock(folio);
+ }
/*
* If @error is non-zero, it means that we have a situation where some