diff options
| author | Kent Overstreet <kent.overstreet@linux.dev> | 2024-12-04 18:14:14 -0500 |
|---|---|---|
| committer | Kent Overstreet <kent.overstreet@linux.dev> | 2024-12-21 01:36:22 -0500 |
| commit | ff7e7c5367250454ed10a6113695d2e01ccc0cfc (patch) | |
| tree | e510a8dc3dd022edcf3836bde41689215708aae4 /fs/bcachefs/journal_io.c | |
| parent | bcachefs: dev_alloc_list.devs -> dev_alloc_list.data (diff) | |
| download | linux-ff7e7c5367250454ed10a6113695d2e01ccc0cfc.tar.gz linux-ff7e7c5367250454ed10a6113695d2e01ccc0cfc.zip | |
bcachefs: Journal write path refactoring, debug improvements
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/journal_io.c')
| -rw-r--r-- | fs/bcachefs/journal_io.c | 70 |
1 files changed, 39 insertions, 31 deletions
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 9a1647297d11..2f4daa8bd498 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1420,6 +1420,35 @@ fsck_err: /* journal write: */ +static void journal_advance_devs_to_next_bucket(struct journal *j, + struct dev_alloc_list *devs, + unsigned sectors, u64 seq) +{ + struct bch_fs *c = container_of(j, struct bch_fs, journal); + + darray_for_each(*devs, i) { + struct bch_dev *ca = rcu_dereference(c->devs[*i]); + if (!ca) + continue; + + struct journal_device *ja = &ca->journal; + + if (sectors > ja->sectors_free && + sectors <= ca->mi.bucket_size && + bch2_journal_dev_buckets_available(j, ja, + journal_space_discarded)) { + ja->cur_idx = (ja->cur_idx + 1) % ja->nr; + ja->sectors_free = ca->mi.bucket_size; + + /* + * ja->bucket_seq[ja->cur_idx] must always have + * something sensible: + */ + ja->bucket_seq[ja->cur_idx] = le64_to_cpu(seq); + } + } +} + static void __journal_write_alloc(struct journal *j, struct journal_buf *w, struct dev_alloc_list *devs, @@ -1429,9 +1458,6 @@ static void __journal_write_alloc(struct journal *j, { struct bch_fs *c = container_of(j, struct bch_fs, journal); - if (*replicas >= replicas_want) - return; - darray_for_each(*devs, i) { struct bch_dev *ca = rcu_dereference(c->devs[*i]); if (!ca) @@ -1491,6 +1517,7 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w) READ_ONCE(c->opts.metadata_replicas); unsigned replicas_need = min_t(unsigned, replicas_want, READ_ONCE(c->opts.metadata_replicas_required)); + bool advance_done = false; rcu_read_lock(); @@ -1502,45 +1529,26 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w) replicas += ca->mi.durability; } -retry: +retry_target: devs = target_rw_devs(c, BCH_DATA_journal, target); - devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); - +retry_alloc: __journal_write_alloc(j, w, &devs_sorted, sectors, &replicas, replicas_want); - if (replicas >= replicas_want) + if (likely(replicas >= replicas_want)) goto done; - darray_for_each(devs_sorted, i) { - struct bch_dev *ca = rcu_dereference(c->devs[*i]); - if (!ca) - continue; - - struct journal_device *ja = &ca->journal; - - if (sectors > ja->sectors_free && - sectors <= ca->mi.bucket_size && - bch2_journal_dev_buckets_available(j, ja, - journal_space_discarded)) { - ja->cur_idx = (ja->cur_idx + 1) % ja->nr; - ja->sectors_free = ca->mi.bucket_size; - - /* - * ja->bucket_seq[ja->cur_idx] must always have - * something sensible: - */ - ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); - } + if (!advance_done) { + journal_advance_devs_to_next_bucket(j, &devs_sorted, sectors, w->data->seq); + advance_done = true; + goto retry_alloc; } - __journal_write_alloc(j, w, &devs_sorted, - sectors, &replicas, replicas_want); - if (replicas < replicas_want && target) { /* Retry from all devices: */ target = 0; - goto retry; + advance_done = false; + goto retry_target; } done: rcu_read_unlock(); |
