1

inode: port __I_SYNC to var event

Port the __I_SYNC mechanism to use the new var event mechanism.

Link: https://lore.kernel.org/r/20240823-work-i_state-v3-3-5cd5fd207a57@kernel.org
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2024-08-23 14:47:37 +02:00
parent 2ed634c96e
commit 532980cb1b

View File

@ -1386,12 +1386,13 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
static void inode_sync_complete(struct inode *inode) static void inode_sync_complete(struct inode *inode)
{ {
assert_spin_locked(&inode->i_lock);
inode->i_state &= ~I_SYNC; inode->i_state &= ~I_SYNC;
/* If inode is clean an unused, put it into LRU now... */ /* If inode is clean an unused, put it into LRU now... */
inode_add_lru(inode); inode_add_lru(inode);
/* Waiters must see I_SYNC cleared before being woken up */ /* Called with inode->i_lock which ensures memory ordering. */
smp_mb(); inode_wake_up_bit(inode, __I_SYNC);
wake_up_bit(&inode->i_state, __I_SYNC);
} }
static bool inode_dirtied_after(struct inode *inode, unsigned long t) static bool inode_dirtied_after(struct inode *inode, unsigned long t)
@ -1512,17 +1513,25 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
*/ */
void inode_wait_for_writeback(struct inode *inode) void inode_wait_for_writeback(struct inode *inode)
{ {
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); struct wait_bit_queue_entry wqe;
wait_queue_head_t *wqh; struct wait_queue_head *wq_head;
lockdep_assert_held(&inode->i_lock); assert_spin_locked(&inode->i_lock);
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
while (inode->i_state & I_SYNC) { if (!(inode->i_state & I_SYNC))
return;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
for (;;) {
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
if (!(inode->i_state & I_SYNC))
break;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
__wait_on_bit(wqh, &wq, bit_wait, schedule();
TASK_UNINTERRUPTIBLE);
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
} }
finish_wait(wq_head, &wqe.wq_entry);
} }
/* /*
@ -1533,16 +1542,20 @@ void inode_wait_for_writeback(struct inode *inode)
static void inode_sleep_on_writeback(struct inode *inode) static void inode_sleep_on_writeback(struct inode *inode)
__releases(inode->i_lock) __releases(inode->i_lock)
{ {
DEFINE_WAIT(wait); struct wait_bit_queue_entry wqe;
wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); struct wait_queue_head *wq_head;
int sleep; bool sleep;
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); assert_spin_locked(&inode->i_lock);
sleep = inode->i_state & I_SYNC;
wq_head = inode_bit_waitqueue(&wqe, inode, __I_SYNC);
prepare_to_wait_event(wq_head, &wqe.wq_entry, TASK_UNINTERRUPTIBLE);
/* Checking I_SYNC with inode->i_lock guarantees memory ordering. */
sleep = !!(inode->i_state & I_SYNC);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
if (sleep) if (sleep)
schedule(); schedule();
finish_wait(wqh, &wait); finish_wait(wq_head, &wqe.wq_entry);
} }
/* /*