diff options
Diffstat (limited to 'fs/xfs/xfs_log.c')
-rw-r--r-- | fs/xfs/xfs_log.c | 377 |
1 files changed, 161 insertions, 216 deletions
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 3e5ba1ecc080..2fcd9ed5d075 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -560,7 +560,6 @@ xfs_log_done( */ int xfs_log_notify( - struct xfs_mount *mp, struct xlog_in_core *iclog, xfs_log_callback_t *cb) { @@ -869,7 +868,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) return 0; } - error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); + error = xfs_log_force(mp, XFS_LOG_SYNC); ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); #ifdef DEBUG @@ -1149,7 +1148,7 @@ xlog_assign_tail_lsn_locked( struct xfs_log_item *lip; xfs_lsn_t tail_lsn; - assert_spin_locked(&mp->m_ail->xa_lock); + assert_spin_locked(&mp->m_ail->ail_lock); /* * To make sure we always have a valid LSN for the log tail we keep @@ -1172,9 +1171,9 @@ xlog_assign_tail_lsn( { xfs_lsn_t tail_lsn; - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&mp->m_ail->ail_lock); tail_lsn = xlog_assign_tail_lsn_locked(mp); - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&mp->m_ail->ail_lock); return tail_lsn; } @@ -3304,269 +3303,215 @@ xlog_state_switch_iclogs( * not in the active nor dirty state. */ int -_xfs_log_force( +xfs_log_force( struct xfs_mount *mp, - uint flags, - int *log_flushed) + uint flags) { struct xlog *log = mp->m_log; struct xlog_in_core *iclog; xfs_lsn_t lsn; XFS_STATS_INC(mp, xs_log_force); + trace_xfs_log_force(mp, 0, _RET_IP_); xlog_cil_force(log); spin_lock(&log->l_icloglock); - iclog = log->l_iclog; - if (iclog->ic_state & XLOG_STATE_IOERROR) { - spin_unlock(&log->l_icloglock); - return -EIO; - } + if (iclog->ic_state & XLOG_STATE_IOERROR) + goto out_error; - /* If the head iclog is not active nor dirty, we just attach - * ourselves to the head and go to sleep. - */ - if (iclog->ic_state == XLOG_STATE_ACTIVE || - iclog->ic_state == XLOG_STATE_DIRTY) { + if (iclog->ic_state == XLOG_STATE_DIRTY || + (iclog->ic_state == XLOG_STATE_ACTIVE && + atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { /* - * If the head is dirty or (active and empty), then - * we need to look at the previous iclog. If the previous - * iclog is active or dirty we are done. There is nothing - * to sync out. Otherwise, we attach ourselves to the + * If the head is dirty or (active and empty), then we need to + * look at the previous iclog. + * + * If the previous iclog is active or dirty we are done. There + * is nothing to sync out. Otherwise, we attach ourselves to the * previous iclog and go to sleep. */ - if (iclog->ic_state == XLOG_STATE_DIRTY || - (atomic_read(&iclog->ic_refcnt) == 0 - && iclog->ic_offset == 0)) { - iclog = iclog->ic_prev; - if (iclog->ic_state == XLOG_STATE_ACTIVE || - iclog->ic_state == XLOG_STATE_DIRTY) - goto no_sleep; - else - goto maybe_sleep; - } else { - if (atomic_read(&iclog->ic_refcnt) == 0) { - /* We are the only one with access to this - * iclog. Flush it out now. There should - * be a roundoff of zero to show that someone - * has already taken care of the roundoff from - * the previous sync. - */ - atomic_inc(&iclog->ic_refcnt); - lsn = be64_to_cpu(iclog->ic_header.h_lsn); - xlog_state_switch_iclogs(log, iclog, 0); - spin_unlock(&log->l_icloglock); + iclog = iclog->ic_prev; + if (iclog->ic_state == XLOG_STATE_ACTIVE || + iclog->ic_state == XLOG_STATE_DIRTY) + goto out_unlock; + } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { + if (atomic_read(&iclog->ic_refcnt) == 0) { + /* + * We are the only one with access to this iclog. + * + * Flush it out now. There should be a roundoff of zero + * to show that someone has already taken care of the + * roundoff from the previous sync. + */ + atomic_inc(&iclog->ic_refcnt); + lsn = be64_to_cpu(iclog->ic_header.h_lsn); + xlog_state_switch_iclogs(log, iclog, 0); + spin_unlock(&log->l_icloglock); - if (xlog_state_release_iclog(log, iclog)) - return -EIO; - - if (log_flushed) - *log_flushed = 1; - spin_lock(&log->l_icloglock); - if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && - iclog->ic_state != XLOG_STATE_DIRTY) - goto maybe_sleep; - else - goto no_sleep; - } else { - /* Someone else is writing to this iclog. - * Use its call to flush out the data. However, - * the other thread may not force out this LR, - * so we mark it WANT_SYNC. - */ - xlog_state_switch_iclogs(log, iclog, 0); - goto maybe_sleep; - } - } - } + if (xlog_state_release_iclog(log, iclog)) + return -EIO; - /* By the time we come around again, the iclog could've been filled - * which would give it another lsn. If we have a new lsn, just - * return because the relevant data has been flushed. - */ -maybe_sleep: - if (flags & XFS_LOG_SYNC) { - /* - * We must check if we're shutting down here, before - * we wait, while we're holding the l_icloglock. - * Then we check again after waking up, in case our - * sleep was disturbed by a bad news. - */ - if (iclog->ic_state & XLOG_STATE_IOERROR) { - spin_unlock(&log->l_icloglock); - return -EIO; + spin_lock(&log->l_icloglock); + if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn || + iclog->ic_state == XLOG_STATE_DIRTY) + goto out_unlock; + } else { + /* + * Someone else is writing to this iclog. + * + * Use its call to flush out the data. However, the + * other thread may not force out this LR, so we mark + * it WANT_SYNC. + */ + xlog_state_switch_iclogs(log, iclog, 0); } - XFS_STATS_INC(mp, xs_log_force_sleep); - xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); + } else { /* - * No need to grab the log lock here since we're - * only deciding whether or not to return EIO - * and the memory read should be atomic. + * If the head iclog is not active nor dirty, we just attach + * ourselves to the head and go to sleep if necessary. */ - if (iclog->ic_state & XLOG_STATE_IOERROR) - return -EIO; - } else { - -no_sleep: - spin_unlock(&log->l_icloglock); + ; } + + if (!(flags & XFS_LOG_SYNC)) + goto out_unlock; + + if (iclog->ic_state & XLOG_STATE_IOERROR) + goto out_error; + XFS_STATS_INC(mp, xs_log_force_sleep); + xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); + if (iclog->ic_state & XLOG_STATE_IOERROR) + return -EIO; return 0; -} -/* - * Wrapper for _xfs_log_force(), to be used when caller doesn't care - * about errors or whether the log was flushed or not. This is the normal - * interface to use when trying to unpin items or move the log forward. - */ -void -xfs_log_force( - xfs_mount_t *mp, - uint flags) -{ - trace_xfs_log_force(mp, 0, _RET_IP_); - _xfs_log_force(mp, flags, NULL); +out_unlock: + spin_unlock(&log->l_icloglock); + return 0; +out_error: + spin_unlock(&log->l_icloglock); + return -EIO; } -/* - * Force the in-core log to disk for a specific LSN. - * - * Find in-core log with lsn. - * If it is in the DIRTY state, just return. - * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC - * state and go to sleep or return. - * If it is in any other state, go to sleep or return. - * - * Synchronous forces are implemented with a signal variable. All callers - * to force a given lsn to disk will wait on a the sv attached to the - * specific in-core log. When given in-core log finally completes its - * write to disk, that thread will wake up all threads waiting on the - * sv. - */ -int -_xfs_log_force_lsn( +static int +__xfs_log_force_lsn( struct xfs_mount *mp, xfs_lsn_t lsn, uint flags, - int *log_flushed) + int *log_flushed, + bool already_slept) { struct xlog *log = mp->m_log; struct xlog_in_core *iclog; - int already_slept = 0; - - ASSERT(lsn != 0); - - XFS_STATS_INC(mp, xs_log_force); - lsn = xlog_cil_force_lsn(log, lsn); - if (lsn == NULLCOMMITLSN) - return 0; - -try_again: spin_lock(&log->l_icloglock); iclog = log->l_iclog; - if (iclog->ic_state & XLOG_STATE_IOERROR) { - spin_unlock(&log->l_icloglock); - return -EIO; - } + if (iclog->ic_state & XLOG_STATE_IOERROR) + goto out_error; - do { - if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { - iclog = iclog->ic_next; - continue; - } + while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { + iclog = iclog->ic_next; + if (iclog == log->l_iclog) + goto out_unlock; + } - if (iclog->ic_state == XLOG_STATE_DIRTY) { - spin_unlock(&log->l_icloglock); - return 0; - } + if (iclog->ic_state == XLOG_STATE_DIRTY) + goto out_unlock; - if (iclog->ic_state == XLOG_STATE_ACTIVE) { - /* - * We sleep here if we haven't already slept (e.g. - * this is the first time we've looked at the correct - * iclog buf) and the buffer before us is going to - * be sync'ed. The reason for this is that if we - * are doing sync transactions here, by waiting for - * the previous I/O to complete, we can allow a few - * more transactions into this iclog before we close - * it down. - * - * Otherwise, we mark the buffer WANT_SYNC, and bump - * up the refcnt so we can release the log (which - * drops the ref count). The state switch keeps new - * transaction commits from using this buffer. When - * the current commits finish writing into the buffer, - * the refcount will drop to zero and the buffer will - * go out then. - */ - if (!already_slept && - (iclog->ic_prev->ic_state & - (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { - ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); + if (iclog->ic_state == XLOG_STATE_ACTIVE) { + /* + * We sleep here if we haven't already slept (e.g. this is the + * first time we've looked at the correct iclog buf) and the + * buffer before us is going to be sync'ed. The reason for this + * is that if we are doing sync transactions here, by waiting + * for the previous I/O to complete, we can allow a few more + * transactions into this iclog before we close it down. + * + * Otherwise, we mark the buffer WANT_SYNC, and bump up the + * refcnt so we can release the log (which drops the ref count). + * The state switch keeps new transaction commits from using + * this buffer. When the current commits finish writing into + * the buffer, the refcount will drop to zero and the buffer + * will go out then. + */ + if (!already_slept && + (iclog->ic_prev->ic_state & + (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { + ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); - XFS_STATS_INC(mp, xs_log_force_sleep); + XFS_STATS_INC(mp, xs_log_force_sleep); - xlog_wait(&iclog->ic_prev->ic_write_wait, - &log->l_icloglock); - already_slept = 1; - goto try_again; - } - atomic_inc(&iclog->ic_refcnt); - xlog_state_switch_iclogs(log, iclog, 0); - spin_unlock(&log->l_icloglock); - if (xlog_state_release_iclog(log, iclog)) - return -EIO; - if (log_flushed) - *log_flushed = 1; - spin_lock(&log->l_icloglock); + xlog_wait(&iclog->ic_prev->ic_write_wait, + &log->l_icloglock); + return -EAGAIN; } + atomic_inc(&iclog->ic_refcnt); + xlog_state_switch_iclogs(log, iclog, 0); + spin_unlock(&log->l_icloglock); + if (xlog_state_release_iclog(log, iclog)) + return -EIO; + if (log_flushed) + *log_flushed = 1; + spin_lock(&log->l_icloglock); + } - if ((flags & XFS_LOG_SYNC) && /* sleep */ - !(iclog->ic_state & - (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { - /* - * Don't wait on completion if we know that we've - * gotten a log write error. - */ - if (iclog->ic_state & XLOG_STATE_IOERROR) { - spin_unlock(&log->l_icloglock); - return -EIO; - } - XFS_STATS_INC(mp, xs_log_force_sleep); - xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); - /* - * No need to grab the log lock here since we're - * only deciding whether or not to return EIO - * and the memory read should be atomic. - */ - if (iclog->ic_state & XLOG_STATE_IOERROR) - return -EIO; - } else { /* just return */ - spin_unlock(&log->l_icloglock); - } + if (!(flags & XFS_LOG_SYNC) || + (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) + goto out_unlock; - return 0; - } while (iclog != log->l_iclog); + if (iclog->ic_state & XLOG_STATE_IOERROR) + goto out_error; + + XFS_STATS_INC(mp, xs_log_force_sleep); + xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); + if (iclog->ic_state & XLOG_STATE_IOERROR) + return -EIO; + return 0; +out_unlock: spin_unlock(&log->l_icloglock); return 0; +out_error: + spin_unlock(&log->l_icloglock); + return -EIO; } /* - * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care - * about errors or whether the log was flushed or not. This is the normal - * interface to use when trying to unpin items or move the log forward. + * Force the in-core log to disk for a specific LSN. + * + * Find in-core log with lsn. + * If it is in the DIRTY state, just return. + * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC + * state and go to sleep or return. + * If it is in any other state, go to sleep or return. + * + * Synchronous forces are implemented with a wait queue. All callers trying + * to force a given lsn to disk must wait on the queue attached to the + * specific in-core log. When given in-core log finally completes its write + * to disk, that thread will wake up all threads waiting on the queue. */ -void +int xfs_log_force_lsn( - xfs_mount_t *mp, - xfs_lsn_t lsn, - uint flags) + struct xfs_mount *mp, + xfs_lsn_t lsn, + uint flags, + int *log_flushed) { + int ret; + ASSERT(lsn != 0); + + XFS_STATS_INC(mp, xs_log_force); trace_xfs_log_force(mp, lsn, _RET_IP_); - _xfs_log_force_lsn(mp, lsn, flags, NULL); + + lsn = xlog_cil_force_lsn(mp->m_log, lsn); + if (lsn == NULLCOMMITLSN) + return 0; + + ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, false); + if (ret == -EAGAIN) + ret = __xfs_log_force_lsn(mp, lsn, flags, log_flushed, true); + return ret; } /* @@ -4035,7 +3980,7 @@ xfs_log_force_umount( * to guarantee this. */ if (!logerror) - _xfs_log_force(mp, XFS_LOG_SYNC, NULL); + xfs_log_force(mp, XFS_LOG_SYNC); /* * mark the filesystem and the as in a shutdown state and wake |