bcachefs: BCH_READ_data_update -> bch_read_bio.data_update

Read flags are codepath dependent and change as they're passed around,
while the fields in rbio._state are mostly fixed properties of that
particular object.

Losing track of BCH_READ_data_update would be bad, and previously it was
not obvious if it was always correctly set in the rbio, so this is a
safety cleanup.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet
2025-03-12 16:56:09 -04:00
parent be31e412ac
commit 3fb8bacb14
4 changed files with 44 additions and 35 deletions
+1
View File
@@ -700,6 +700,7 @@ int bch2_data_update_bios_init(struct data_update *m, struct bch_fs *c,
}
rbio_init(&m->rbio.bio, c, *io_opts, NULL);
m->rbio.data_update = true;
m->rbio.bio.bi_iter.bi_size = buf_bytes;
m->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&m->k.k->k);
m->op.wbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
+32 -25
View File
@@ -103,14 +103,21 @@ static inline bool have_io_error(struct bch_io_failures *failed)
return failed && failed->nr;
}
static bool ptr_being_rewritten(struct bch_read_bio *orig,
unsigned dev,
unsigned flags)
static inline struct data_update *rbio_data_update(struct bch_read_bio *rbio)
{
if (!(flags & BCH_READ_data_update))
EBUG_ON(rbio->split);
return rbio->data_update
? container_of(rbio, struct data_update, rbio)
: NULL;
}
static bool ptr_being_rewritten(struct bch_read_bio *orig, unsigned dev)
{
struct data_update *u = rbio_data_update(orig);
if (!u)
return false;
struct data_update *u = container_of(orig, struct data_update, rbio);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(u->k.k));
unsigned i = 0;
bkey_for_each_ptr(ptrs, ptr) {
@@ -199,7 +206,6 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
struct bpos pos,
struct extent_ptr_decoded *pick,
unsigned sectors,
unsigned flags,
struct bch_read_bio *orig,
struct bch_io_failures *failed)
{
@@ -220,7 +226,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
unsigned ptr_bit = 1;
bkey_for_each_ptr(ptrs, ptr) {
if (bch2_dev_io_failures(failed, ptr->dev) &&
!ptr_being_rewritten(orig, ptr->dev, flags))
!ptr_being_rewritten(orig, ptr->dev))
update_opts.rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
@@ -314,7 +320,7 @@ static struct bch_read_bio *promote_alloc(struct btree_trans *trans,
k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_reflink
: BTREE_ID_extents,
k, pos, pick, sectors, flags, orig, failed);
k, pos, pick, sectors, orig, failed);
if (!promote)
return NULL;
@@ -342,7 +348,7 @@ static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *o
if (ret)
return ret;
if (rbio->flags & BCH_READ_data_update)
if (rbio->data_update)
prt_str(out, "(internal move) ");
return 0;
@@ -505,7 +511,7 @@ static void bch2_rbio_retry(struct work_struct *work)
flags &= ~BCH_READ_last_fragment;
flags |= BCH_READ_must_clone;
int ret = flags & BCH_READ_data_update
int ret = rbio->data_update
? bch2_read_retry_nodecode(c, rbio, iter, &failed, flags)
: __bch2_read(c, rbio, iter, inum, &failed, flags);
@@ -519,7 +525,7 @@ static void bch2_rbio_retry(struct work_struct *work)
bch2_inum_offset_err_msg_trans(trans, &buf,
(subvol_inum) { subvol, read_pos.inode },
read_pos.offset << 9));
if (rbio->flags & BCH_READ_data_update)
if (rbio->data_update)
prt_str(&buf, "(internal move) ");
prt_str(&buf, "successful retry");
@@ -712,9 +718,10 @@ static void __bch2_read_endio(struct work_struct *work)
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
struct bio *src = &rbio->bio;
struct bio *dst = &bch2_rbio_parent(rbio)->bio;
struct bvec_iter dst_iter = rbio->bvec_iter;
struct bch_read_bio *parent = bch2_rbio_parent(rbio);
struct bio *src = &rbio->bio;
struct bio *dst = &parent->bio;
struct bvec_iter dst_iter = rbio->bvec_iter;
struct bch_extent_crc_unpacked crc = rbio->pick.crc;
struct nonce nonce = extent_nonce(rbio->version, crc);
unsigned nofs_flags;
@@ -764,7 +771,7 @@ static void __bch2_read_endio(struct work_struct *work)
if (unlikely(rbio->narrow_crcs))
bch2_rbio_narrow_crcs(rbio);
if (likely(!(rbio->flags & BCH_READ_data_update))) {
if (likely(!parent->data_update)) {
/* Adjust crc to point to subset of data we want: */
crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
@@ -934,6 +941,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bch_read_bio *rbio = NULL;
bool bounce = false, read_full = false, narrow_crcs = false;
struct bpos data_pos = bkey_start_pos(k.k);
struct data_update *u = rbio_data_update(orig);
int ret = 0;
if (bkey_extent_is_inline_data(k.k)) {
@@ -997,7 +1005,7 @@ retry_pick:
goto retry_pick;
}
if (!(flags & BCH_READ_data_update)) {
if (likely(!u)) {
if (!(flags & BCH_READ_last_fragment) ||
bio_flagged(&orig->bio, BIO_CHAIN))
flags |= BCH_READ_must_clone;
@@ -1020,12 +1028,10 @@ retry_pick:
bounce = true;
}
} else {
read_full = true;
/*
* can happen if we retry, and the extent we were going to read
* has been merged in the meantime:
*/
struct data_update *u = container_of(orig, struct data_update, rbio);
if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
percpu_ref_put(&ca->io_ref);
@@ -1034,6 +1040,7 @@ retry_pick:
}
iter.bi_size = pick.crc.compressed_size << 9;
read_full = true;
}
if (orig->opts.promote_target || have_io_error(failed))
@@ -1127,7 +1134,7 @@ retry_pick:
if (rbio->bounce)
trace_and_count(c, io_read_bounce, &rbio->bio);
if (!(flags & BCH_READ_data_update))
if (!u)
this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
else
this_cpu_add(c->counters[BCH_COUNTER_io_move_read], bio_sectors(&rbio->bio));
@@ -1137,7 +1144,7 @@ retry_pick:
* If it's being moved internally, we don't want to flag it as a cache
* hit:
*/
if (ca && pick.ptr.cached && !(flags & BCH_READ_data_update))
if (ca && pick.ptr.cached && !u)
bch2_bucket_io_time_reset(trans, pick.ptr.dev,
PTR_BUCKET_NR(ca, &pick.ptr), READ);
@@ -1234,11 +1241,11 @@ hole:
this_cpu_add(c->counters[BCH_COUNTER_io_read_hole],
bvec_iter_sectors(iter));
/*
* won't normally happen in the BCH_READ_data_update
* (bch2_move_extent()) path, but if we retry and the extent we wanted
* to read no longer exists we have to signal that:
* won't normally happen in the data update (bch2_move_extent()) path,
* but if we retry and the extent we wanted to read no longer exists we
* have to signal that:
*/
if (flags & BCH_READ_data_update)
if (u)
orig->ret = -BCH_ERR_data_read_key_overwritten;
zero_fill_bio_iter(&orig->bio, iter);
@@ -1259,7 +1266,7 @@ int __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
struct bkey_s_c k;
int ret;
BUG_ON(flags & BCH_READ_data_update);
EBUG_ON(rbio->data_update);
bch2_bkey_buf_init(&sk);
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+11 -9
View File
@@ -35,7 +35,8 @@ struct bch_read_bio {
u16 flags;
union {
struct {
u16 promote:1,
u16 data_update:1,
promote:1,
bounce:1,
split:1,
have_ioref:1,
@@ -108,7 +109,6 @@ static inline int bch2_read_indirect_extent(struct btree_trans *trans,
x(retry_if_stale) \
x(may_promote) \
x(user_mapped) \
x(data_update) \
x(last_fragment) \
x(must_bounce) \
x(must_clone) \
@@ -161,12 +161,13 @@ static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio,
{
struct bch_read_bio *rbio = to_rbio(bio);
rbio->c = orig->c;
rbio->_state = 0;
rbio->ret = 0;
rbio->split = true;
rbio->parent = orig;
rbio->opts = orig->opts;
rbio->c = orig->c;
rbio->_state = 0;
rbio->flags = 0;
rbio->ret = 0;
rbio->split = true;
rbio->parent = orig;
rbio->opts = orig->opts;
return rbio;
}
@@ -180,7 +181,8 @@ static inline struct bch_read_bio *rbio_init(struct bio *bio,
rbio->start_time = local_clock();
rbio->c = c;
rbio->_state = 0;
rbio->ret = 0;
rbio->flags = 0;
rbio->ret = 0;
rbio->opts = opts;
rbio->bio.bi_end_io = end_io;
return rbio;
-1
View File
@@ -359,7 +359,6 @@ int bch2_move_extent(struct moving_context *ctxt,
bkey_start_pos(k.k),
iter->btree_id, k, 0,
NULL,
BCH_READ_data_update|
BCH_READ_last_fragment,
data_opts.scrub ? data_opts.read_dev : -1);
return 0;