|
|
|
@@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache;
|
|
|
|
|
/*
|
|
|
|
|
* Locking orders
|
|
|
|
|
*
|
|
|
|
|
* xfs_buf_ioacct_inc:
|
|
|
|
|
* xfs_buf_ioacct_dec:
|
|
|
|
|
* b_sema (caller holds)
|
|
|
|
|
* b_lock
|
|
|
|
|
*
|
|
|
|
|
* xfs_buf_stale:
|
|
|
|
|
* b_sema (caller holds)
|
|
|
|
|
* b_lock
|
|
|
|
@@ -81,51 +76,6 @@ xfs_buf_vmap_len(
|
|
|
|
|
return (bp->b_page_count * PAGE_SIZE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Bump the I/O in flight count on the buftarg if we haven't yet done so for
|
|
|
|
|
* this buffer. The count is incremented once per buffer (per hold cycle)
|
|
|
|
|
* because the corresponding decrement is deferred to buffer release. Buffers
|
|
|
|
|
* can undergo I/O multiple times in a hold-release cycle and per buffer I/O
|
|
|
|
|
* tracking adds unnecessary overhead. This is used for sychronization purposes
|
|
|
|
|
* with unmount (see xfs_buftarg_drain()), so all we really need is a count of
|
|
|
|
|
* in-flight buffers.
|
|
|
|
|
*
|
|
|
|
|
* Buffers that are never released (e.g., superblock, iclog buffers) must set
|
|
|
|
|
* the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
|
|
|
|
|
* never reaches zero and unmount hangs indefinitely.
|
|
|
|
|
*/
|
|
|
|
|
static inline void
|
|
|
|
|
xfs_buf_ioacct_inc(
|
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
|
{
|
|
|
|
|
if (bp->b_flags & XBF_NO_IOACCT)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
ASSERT(bp->b_flags & XBF_ASYNC);
|
|
|
|
|
spin_lock(&bp->b_lock);
|
|
|
|
|
if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
|
|
|
|
|
bp->b_state |= XFS_BSTATE_IN_FLIGHT;
|
|
|
|
|
percpu_counter_inc(&bp->b_target->bt_io_count);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&bp->b_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Clear the in-flight state on a buffer about to be released to the LRU or
|
|
|
|
|
* freed and unaccount from the buftarg.
|
|
|
|
|
*/
|
|
|
|
|
static inline void
|
|
|
|
|
__xfs_buf_ioacct_dec(
|
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
|
{
|
|
|
|
|
lockdep_assert_held(&bp->b_lock);
|
|
|
|
|
|
|
|
|
|
if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
|
|
|
|
|
bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
|
|
|
|
|
percpu_counter_dec(&bp->b_target->bt_io_count);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
|
|
|
|
|
* b_lru_ref count so that the buffer is freed immediately when the buffer
|
|
|
|
@@ -149,15 +99,7 @@ xfs_buf_stale(
|
|
|
|
|
*/
|
|
|
|
|
bp->b_flags &= ~_XBF_DELWRI_Q;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Once the buffer is marked stale and unlocked, a subsequent lookup
|
|
|
|
|
* could reset b_flags. There is no guarantee that the buffer is
|
|
|
|
|
* unaccounted (released to LRU) before that occurs. Drop in-flight
|
|
|
|
|
* status now to preserve accounting consistency.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&bp->b_lock);
|
|
|
|
|
__xfs_buf_ioacct_dec(bp);
|
|
|
|
|
|
|
|
|
|
atomic_set(&bp->b_lru_ref, 0);
|
|
|
|
|
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
|
|
|
|
|
(list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
|
|
|
|
@@ -794,18 +736,13 @@ out_put_perag:
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
|
_xfs_buf_read(
|
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
|
xfs_buf_flags_t flags)
|
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
|
{
|
|
|
|
|
ASSERT(!(flags & XBF_WRITE));
|
|
|
|
|
ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
|
|
|
|
|
|
|
|
|
|
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
|
|
|
|
|
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
|
|
|
|
|
|
|
|
|
|
bp->b_flags |= XBF_READ;
|
|
|
|
|
xfs_buf_submit(bp);
|
|
|
|
|
if (flags & XBF_ASYNC)
|
|
|
|
|
return 0;
|
|
|
|
|
return xfs_buf_iowait(bp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -857,6 +794,8 @@ xfs_buf_read_map(
|
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD)));
|
|
|
|
|
|
|
|
|
|
flags |= XBF_READ;
|
|
|
|
|
*bpp = NULL;
|
|
|
|
|
|
|
|
|
@@ -870,21 +809,11 @@ xfs_buf_read_map(
|
|
|
|
|
/* Initiate the buffer read and wait. */
|
|
|
|
|
XFS_STATS_INC(target->bt_mount, xb_get_read);
|
|
|
|
|
bp->b_ops = ops;
|
|
|
|
|
error = _xfs_buf_read(bp, flags);
|
|
|
|
|
|
|
|
|
|
/* Readahead iodone already dropped the buffer, so exit. */
|
|
|
|
|
if (flags & XBF_ASYNC)
|
|
|
|
|
return 0;
|
|
|
|
|
error = _xfs_buf_read(bp);
|
|
|
|
|
} else {
|
|
|
|
|
/* Buffer already read; all we need to do is check it. */
|
|
|
|
|
error = xfs_buf_reverify(bp, ops);
|
|
|
|
|
|
|
|
|
|
/* Readahead already finished; drop the buffer and exit. */
|
|
|
|
|
if (flags & XBF_ASYNC) {
|
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We do not want read in the flags */
|
|
|
|
|
bp->b_flags &= ~XBF_READ;
|
|
|
|
|
ASSERT(bp->b_ops != NULL || ops == NULL);
|
|
|
|
@@ -936,6 +865,7 @@ xfs_buf_readahead_map(
|
|
|
|
|
int nmaps,
|
|
|
|
|
const struct xfs_buf_ops *ops)
|
|
|
|
|
{
|
|
|
|
|
const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD;
|
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -945,9 +875,21 @@ xfs_buf_readahead_map(
|
|
|
|
|
if (xfs_buftarg_is_mem(target))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
xfs_buf_read_map(target, map, nmaps,
|
|
|
|
|
XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
|
|
|
|
|
__this_address);
|
|
|
|
|
if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp))
|
|
|
|
|
return;
|
|
|
|
|
trace_xfs_buf_readahead(bp, 0, _RET_IP_);
|
|
|
|
|
|
|
|
|
|
if (bp->b_flags & XBF_DONE) {
|
|
|
|
|
xfs_buf_reverify(bp, ops);
|
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
XFS_STATS_INC(target->bt_mount, xb_get_read);
|
|
|
|
|
bp->b_ops = ops;
|
|
|
|
|
bp->b_flags &= ~(XBF_WRITE | XBF_DONE);
|
|
|
|
|
bp->b_flags |= flags;
|
|
|
|
|
percpu_counter_inc(&target->bt_readahead_count);
|
|
|
|
|
xfs_buf_submit(bp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -1003,10 +945,12 @@ xfs_buf_get_uncached(
|
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
|
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
|
|
|
|
|
|
|
|
|
|
/* there are currently no valid flags for xfs_buf_get_uncached */
|
|
|
|
|
ASSERT(flags == 0);
|
|
|
|
|
|
|
|
|
|
*bpp = NULL;
|
|
|
|
|
|
|
|
|
|
/* flags might contain irrelevant bits, pass only what we care about */
|
|
|
|
|
error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
|
|
|
|
|
error = _xfs_buf_alloc(target, &map, 1, flags, &bp);
|
|
|
|
|
if (error)
|
|
|
|
|
return error;
|
|
|
|
|
|
|
|
|
@@ -1060,7 +1004,6 @@ xfs_buf_rele_uncached(
|
|
|
|
|
spin_unlock(&bp->b_lock);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
__xfs_buf_ioacct_dec(bp);
|
|
|
|
|
spin_unlock(&bp->b_lock);
|
|
|
|
|
xfs_buf_free(bp);
|
|
|
|
|
}
|
|
|
|
@@ -1079,20 +1022,12 @@ xfs_buf_rele_cached(
|
|
|
|
|
spin_lock(&bp->b_lock);
|
|
|
|
|
ASSERT(bp->b_hold >= 1);
|
|
|
|
|
if (bp->b_hold > 1) {
|
|
|
|
|
/*
|
|
|
|
|
* Drop the in-flight state if the buffer is already on the LRU
|
|
|
|
|
* and it holds the only reference. This is racy because we
|
|
|
|
|
* haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
|
|
|
|
|
* ensures the decrement occurs only once per-buf.
|
|
|
|
|
*/
|
|
|
|
|
if (--bp->b_hold == 1 && !list_empty(&bp->b_lru))
|
|
|
|
|
__xfs_buf_ioacct_dec(bp);
|
|
|
|
|
bp->b_hold--;
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* we are asked to drop the last reference */
|
|
|
|
|
__xfs_buf_ioacct_dec(bp);
|
|
|
|
|
if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
|
|
|
|
|
if (atomic_read(&bp->b_lru_ref)) {
|
|
|
|
|
/*
|
|
|
|
|
* If the buffer is added to the LRU, keep the reference to the
|
|
|
|
|
* buffer for the LRU and clear the (now stale) dispose list
|
|
|
|
@@ -1345,6 +1280,7 @@ xfs_buf_ioend_handle_error(
|
|
|
|
|
resubmit:
|
|
|
|
|
xfs_buf_ioerror(bp, 0);
|
|
|
|
|
bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
|
|
|
|
|
reinit_completion(&bp->b_iowait);
|
|
|
|
|
xfs_buf_submit(bp);
|
|
|
|
|
return true;
|
|
|
|
|
out_stale:
|
|
|
|
@@ -1355,8 +1291,9 @@ out_stale:
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xfs_buf_ioend(
|
|
|
|
|
/* returns false if the caller needs to resubmit the I/O, else true */
|
|
|
|
|
static bool
|
|
|
|
|
__xfs_buf_ioend(
|
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
|
{
|
|
|
|
|
trace_xfs_buf_iodone(bp, _RET_IP_);
|
|
|
|
@@ -1369,6 +1306,8 @@ xfs_buf_ioend(
|
|
|
|
|
bp->b_ops->verify_read(bp);
|
|
|
|
|
if (!bp->b_error)
|
|
|
|
|
bp->b_flags |= XBF_DONE;
|
|
|
|
|
if (bp->b_flags & XBF_READ_AHEAD)
|
|
|
|
|
percpu_counter_dec(&bp->b_target->bt_readahead_count);
|
|
|
|
|
} else {
|
|
|
|
|
if (!bp->b_error) {
|
|
|
|
|
bp->b_flags &= ~XBF_WRITE_FAIL;
|
|
|
|
@@ -1376,7 +1315,7 @@ xfs_buf_ioend(
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
|
|
|
|
|
return;
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* clear the retry state */
|
|
|
|
|
bp->b_last_error = 0;
|
|
|
|
@@ -1397,7 +1336,15 @@ xfs_buf_ioend(
|
|
|
|
|
|
|
|
|
|
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
|
|
|
|
|
_XBF_LOGRECOVERY);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xfs_buf_ioend(
|
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
|
{
|
|
|
|
|
if (!__xfs_buf_ioend(bp))
|
|
|
|
|
return;
|
|
|
|
|
if (bp->b_flags & XBF_ASYNC)
|
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
|
else
|
|
|
|
@@ -1411,15 +1358,8 @@ xfs_buf_ioend_work(
|
|
|
|
|
struct xfs_buf *bp =
|
|
|
|
|
container_of(work, struct xfs_buf, b_ioend_work);
|
|
|
|
|
|
|
|
|
|
xfs_buf_ioend(bp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
xfs_buf_ioend_async(
|
|
|
|
|
struct xfs_buf *bp)
|
|
|
|
|
{
|
|
|
|
|
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
|
|
|
|
|
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
|
|
|
|
|
if (__xfs_buf_ioend(bp))
|
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
@@ -1491,7 +1431,13 @@ xfs_buf_bio_end_io(
|
|
|
|
|
XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
|
|
|
|
|
xfs_buf_ioerror(bp, -EIO);
|
|
|
|
|
|
|
|
|
|
xfs_buf_ioend_async(bp);
|
|
|
|
|
if (bp->b_flags & XBF_ASYNC) {
|
|
|
|
|
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
|
|
|
|
|
queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
|
|
|
|
|
} else {
|
|
|
|
|
complete(&bp->b_iowait);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bio_put(bio);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1568,9 +1514,11 @@ xfs_buf_iowait(
|
|
|
|
|
{
|
|
|
|
|
ASSERT(!(bp->b_flags & XBF_ASYNC));
|
|
|
|
|
|
|
|
|
|
trace_xfs_buf_iowait(bp, _RET_IP_);
|
|
|
|
|
wait_for_completion(&bp->b_iowait);
|
|
|
|
|
trace_xfs_buf_iowait_done(bp, _RET_IP_);
|
|
|
|
|
do {
|
|
|
|
|
trace_xfs_buf_iowait(bp, _RET_IP_);
|
|
|
|
|
wait_for_completion(&bp->b_iowait);
|
|
|
|
|
trace_xfs_buf_iowait_done(bp, _RET_IP_);
|
|
|
|
|
} while (!__xfs_buf_ioend(bp));
|
|
|
|
|
|
|
|
|
|
return bp->b_error;
|
|
|
|
|
}
|
|
|
|
@@ -1648,9 +1596,6 @@ xfs_buf_submit(
|
|
|
|
|
*/
|
|
|
|
|
bp->b_error = 0;
|
|
|
|
|
|
|
|
|
|
if (bp->b_flags & XBF_ASYNC)
|
|
|
|
|
xfs_buf_ioacct_inc(bp);
|
|
|
|
|
|
|
|
|
|
if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
|
|
|
|
|
xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
|
|
|
|
|
xfs_buf_ioend(bp);
|
|
|
|
@@ -1776,9 +1721,8 @@ xfs_buftarg_wait(
|
|
|
|
|
struct xfs_buftarg *btp)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* First wait on the buftarg I/O count for all in-flight buffers to be
|
|
|
|
|
* released. This is critical as new buffers do not make the LRU until
|
|
|
|
|
* they are released.
|
|
|
|
|
* First wait for all in-flight readahead buffers to be released. This is
|
|
|
|
|
* critical as new buffers do not make the LRU until they are released.
|
|
|
|
|
*
|
|
|
|
|
* Next, flush the buffer workqueue to ensure all completion processing
|
|
|
|
|
* has finished. Just waiting on buffer locks is not sufficient for
|
|
|
|
@@ -1787,7 +1731,7 @@ xfs_buftarg_wait(
|
|
|
|
|
* all reference counts have been dropped before we start walking the
|
|
|
|
|
* LRU list.
|
|
|
|
|
*/
|
|
|
|
|
while (percpu_counter_sum(&btp->bt_io_count))
|
|
|
|
|
while (percpu_counter_sum(&btp->bt_readahead_count))
|
|
|
|
|
delay(100);
|
|
|
|
|
flush_workqueue(btp->bt_mount->m_buf_workqueue);
|
|
|
|
|
}
|
|
|
|
@@ -1904,8 +1848,8 @@ xfs_destroy_buftarg(
|
|
|
|
|
struct xfs_buftarg *btp)
|
|
|
|
|
{
|
|
|
|
|
shrinker_free(btp->bt_shrinker);
|
|
|
|
|
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
|
|
|
|
|
percpu_counter_destroy(&btp->bt_io_count);
|
|
|
|
|
ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
|
|
|
|
|
percpu_counter_destroy(&btp->bt_readahead_count);
|
|
|
|
|
list_lru_destroy(&btp->bt_lru);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1959,7 +1903,7 @@ xfs_init_buftarg(
|
|
|
|
|
|
|
|
|
|
if (list_lru_init(&btp->bt_lru))
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
|
|
|
|
|
if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL))
|
|
|
|
|
goto out_destroy_lru;
|
|
|
|
|
|
|
|
|
|
btp->bt_shrinker =
|
|
|
|
@@ -1973,7 +1917,7 @@ xfs_init_buftarg(
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
out_destroy_io_count:
|
|
|
|
|
percpu_counter_destroy(&btp->bt_io_count);
|
|
|
|
|
percpu_counter_destroy(&btp->bt_readahead_count);
|
|
|
|
|
out_destroy_lru:
|
|
|
|
|
list_lru_destroy(&btp->bt_lru);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|