1

bcachefs: Track held write locks

The upcoming lock cycle detection code will need to know precisely which
locks every btree_trans is holding, including write locks - this patch
updates btree_node_locked_type to include write locks.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-08-22 21:05:31 -04:00
parent c240c3a944
commit 131dcd5af7
3 changed files with 33 additions and 11 deletions

View File

@ -246,6 +246,8 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
return bch2_btree_node_relock(trans, path, level); return bch2_btree_node_relock(trans, path, level);
case BTREE_NODE_INTENT_LOCKED: case BTREE_NODE_INTENT_LOCKED:
break; break;
case BTREE_NODE_WRITE_LOCKED:
BUG();
} }
if (btree_node_intent_locked(path, level)) if (btree_node_intent_locked(path, level))
@ -448,9 +450,17 @@ void bch2_btree_path_verify_locks(struct btree_path *path)
return; return;
} }
for (l = 0; btree_path_node(path, l); l++) for (l = 0; l < BTREE_MAX_DEPTH; l++) {
BUG_ON(btree_lock_want(path, l) != int want = btree_lock_want(path, l);
btree_node_locked_type(path, l)); int have = btree_node_locked_type(path, l);
BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
BUG_ON(is_btree_node(path, l) &&
(want == BTREE_NODE_UNLOCKED ||
have != BTREE_NODE_WRITE_LOCKED) &&
want != have);
}
} }
void bch2_trans_verify_locks(struct btree_trans *trans) void bch2_trans_verify_locks(struct btree_trans *trans)

View File

@ -32,6 +32,7 @@ enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = -1, BTREE_NODE_UNLOCKED = -1,
BTREE_NODE_READ_LOCKED = SIX_LOCK_read, BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent, BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
}; };
static inline int btree_node_locked_type(struct btree_path *path, static inline int btree_node_locked_type(struct btree_path *path,
@ -40,16 +41,19 @@ static inline int btree_node_locked_type(struct btree_path *path,
return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3); return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
} }
static inline bool btree_node_intent_locked(struct btree_path *path, static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
unsigned level)
{ {
return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED; return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
} }
static inline bool btree_node_read_locked(struct btree_path *path, static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
unsigned level)
{ {
return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED; return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
}
static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
{
return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
} }
static inline bool btree_node_locked(struct btree_path *path, unsigned level) static inline bool btree_node_locked(struct btree_path *path, unsigned level)
@ -72,6 +76,7 @@ static inline void mark_btree_node_locked_noreset(struct btree_path *path,
static inline void mark_btree_node_unlocked(struct btree_path *path, static inline void mark_btree_node_unlocked(struct btree_path *path,
unsigned level) unsigned level)
{ {
EBUG_ON(btree_node_write_locked(path, level));
mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED); mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
} }
@ -179,6 +184,9 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
EBUG_ON(path->l[b->c.level].b != b); EBUG_ON(path->l[b->c.level].b != b);
EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq); EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
trans_for_each_path_with_node(trans, b, linked) trans_for_each_path_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq += 2; linked->l[b->c.level].lock_seq += 2;
@ -288,6 +296,8 @@ static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
if (unlikely(!six_trylock_write(&b->c.lock))) if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(trans, b); __bch2_btree_node_lock_write(trans, b);
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
} }
/* relock: */ /* relock: */
@ -311,8 +321,8 @@ static inline bool bch2_btree_node_relock(struct btree_trans *trans,
struct btree_path *path, unsigned level) struct btree_path *path, unsigned level)
{ {
EBUG_ON(btree_node_locked(path, level) && EBUG_ON(btree_node_locked(path, level) &&
btree_node_locked_type(path, level) != !btree_node_write_locked(path, level) &&
__btree_lock_want(path, level)); btree_node_locked_type(path, level) != __btree_lock_want(path, level));
return likely(btree_node_locked(path, level)) || return likely(btree_node_locked(path, level)) ||
__bch2_btree_node_relock(trans, path, level); __bch2_btree_node_relock(trans, path, level);

View File

@ -828,6 +828,8 @@ static inline int trans_lock_write(struct btree_trans *trans)
BUG_ON(ret); BUG_ON(ret);
} }
mark_btree_node_locked_noreset(i->path, i->level, SIX_LOCK_write);
bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b); bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
} }