123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- */
- #include "xfs.h"
- #include "xfs_fs.h"
- #include "xfs_format.h"
- #include "xfs_log_format.h"
- #include "xfs_trans_resv.h"
- #include "xfs_bit.h"
- #include "xfs_sb.h"
- #include "xfs_mount.h"
- #include "xfs_trans.h"
- #include "xfs_buf_item.h"
- #include "xfs_trans_priv.h"
- #include "xfs_error.h"
- #include "xfs_trace.h"
- #include "xfs_log.h"
- #include "xfs_inode.h"
- kmem_zone_t *xfs_buf_item_zone;
- static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
- {
- return container_of(lip, struct xfs_buf_log_item, bli_item);
- }
- STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
- static inline int
- xfs_buf_log_format_size(
- struct xfs_buf_log_format *blfp)
- {
- return offsetof(struct xfs_buf_log_format, blf_data_map) +
- (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
- }
- /*
- * This returns the number of log iovecs needed to log the
- * given buf log item.
- *
- * It calculates this as 1 iovec for the buf log format structure
- * and 1 for each stretch of non-contiguous chunks to be logged.
- * Contiguous chunks are logged in a single iovec.
- *
- * If the XFS_BLI_STALE flag has been set, then log nothing.
- */
- STATIC void
- xfs_buf_item_size_segment(
- struct xfs_buf_log_item *bip,
- struct xfs_buf_log_format *blfp,
- int *nvecs,
- int *nbytes)
- {
- struct xfs_buf *bp = bip->bli_buf;
- int next_bit;
- int last_bit;
- last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
- if (last_bit == -1)
- return;
- /*
- * initial count for a dirty buffer is 2 vectors - the format structure
- * and the first dirty region.
- */
- *nvecs += 2;
- *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
- while (last_bit != -1) {
- /*
- * This takes the bit number to start looking from and
- * returns the next set bit from there. It returns -1
- * if there are no more bits set or the start bit is
- * beyond the end of the bitmap.
- */
- next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
- last_bit + 1);
- /*
- * If we run out of bits, leave the loop,
- * else if we find a new set of bits bump the number of vecs,
- * else keep scanning the current set of bits.
- */
- if (next_bit == -1) {
- break;
- } else if (next_bit != last_bit + 1) {
- last_bit = next_bit;
- (*nvecs)++;
- } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
- (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
- XFS_BLF_CHUNK)) {
- last_bit = next_bit;
- (*nvecs)++;
- } else {
- last_bit++;
- }
- *nbytes += XFS_BLF_CHUNK;
- }
- }
- /*
- * This returns the number of log iovecs needed to log the given buf log item.
- *
- * It calculates this as 1 iovec for the buf log format structure and 1 for each
- * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
- * in a single iovec.
- *
- * Discontiguous buffers need a format structure per region that that is being
- * logged. This makes the changes in the buffer appear to log recovery as though
- * they came from separate buffers, just like would occur if multiple buffers
- * were used instead of a single discontiguous buffer. This enables
- * discontiguous buffers to be in-memory constructs, completely transparent to
- * what ends up on disk.
- *
- * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
- * format structures.
- */
- STATIC void
- xfs_buf_item_size(
- struct xfs_log_item *lip,
- int *nvecs,
- int *nbytes)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- int i;
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- if (bip->bli_flags & XFS_BLI_STALE) {
- /*
- * The buffer is stale, so all we need to log
- * is the buf log format structure with the
- * cancel flag in it.
- */
- trace_xfs_buf_item_size_stale(bip);
- ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
- *nvecs += bip->bli_format_count;
- for (i = 0; i < bip->bli_format_count; i++) {
- *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
- }
- return;
- }
- ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
- if (bip->bli_flags & XFS_BLI_ORDERED) {
- /*
- * The buffer has been logged just to order it.
- * It is not being included in the transaction
- * commit, so no vectors are used at all.
- */
- trace_xfs_buf_item_size_ordered(bip);
- *nvecs = XFS_LOG_VEC_ORDERED;
- return;
- }
- /*
- * the vector count is based on the number of buffer vectors we have
- * dirty bits in. This will only be greater than one when we have a
- * compound buffer with more than one segment dirty. Hence for compound
- * buffers we need to track which segment the dirty bits correspond to,
- * and when we move from one segment to the next increment the vector
- * count for the extra buf log format structure that will need to be
- * written.
- */
- for (i = 0; i < bip->bli_format_count; i++) {
- xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
- nvecs, nbytes);
- }
- trace_xfs_buf_item_size(bip);
- }
- static inline void
- xfs_buf_item_copy_iovec(
- struct xfs_log_vec *lv,
- struct xfs_log_iovec **vecp,
- struct xfs_buf *bp,
- uint offset,
- int first_bit,
- uint nbits)
- {
- offset += first_bit * XFS_BLF_CHUNK;
- xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
- xfs_buf_offset(bp, offset),
- nbits * XFS_BLF_CHUNK);
- }
- static inline bool
- xfs_buf_item_straddle(
- struct xfs_buf *bp,
- uint offset,
- int next_bit,
- int last_bit)
- {
- return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
- (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
- XFS_BLF_CHUNK);
- }
- static void
- xfs_buf_item_format_segment(
- struct xfs_buf_log_item *bip,
- struct xfs_log_vec *lv,
- struct xfs_log_iovec **vecp,
- uint offset,
- struct xfs_buf_log_format *blfp)
- {
- struct xfs_buf *bp = bip->bli_buf;
- uint base_size;
- int first_bit;
- int last_bit;
- int next_bit;
- uint nbits;
- /* copy the flags across from the base format item */
- blfp->blf_flags = bip->__bli_format.blf_flags;
- /*
- * Base size is the actual size of the ondisk structure - it reflects
- * the actual size of the dirty bitmap rather than the size of the in
- * memory structure.
- */
- base_size = xfs_buf_log_format_size(blfp);
- first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
- if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
- /*
- * If the map is not be dirty in the transaction, mark
- * the size as zero and do not advance the vector pointer.
- */
- return;
- }
- blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
- blfp->blf_size = 1;
- if (bip->bli_flags & XFS_BLI_STALE) {
- /*
- * The buffer is stale, so all we need to log
- * is the buf log format structure with the
- * cancel flag in it.
- */
- trace_xfs_buf_item_format_stale(bip);
- ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
- return;
- }
- /*
- * Fill in an iovec for each set of contiguous chunks.
- */
- last_bit = first_bit;
- nbits = 1;
- for (;;) {
- /*
- * This takes the bit number to start looking from and
- * returns the next set bit from there. It returns -1
- * if there are no more bits set or the start bit is
- * beyond the end of the bitmap.
- */
- next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
- (uint)last_bit + 1);
- /*
- * If we run out of bits fill in the last iovec and get out of
- * the loop. Else if we start a new set of bits then fill in
- * the iovec for the series we were looking at and start
- * counting the bits in the new one. Else we're still in the
- * same set of bits so just keep counting and scanning.
- */
- if (next_bit == -1) {
- xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
- first_bit, nbits);
- blfp->blf_size++;
- break;
- } else if (next_bit != last_bit + 1 ||
- xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
- xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
- first_bit, nbits);
- blfp->blf_size++;
- first_bit = next_bit;
- last_bit = next_bit;
- nbits = 1;
- } else {
- last_bit++;
- nbits++;
- }
- }
- }
- /*
- * This is called to fill in the vector of log iovecs for the
- * given log buf item. It fills the first entry with a buf log
- * format structure, and the rest point to contiguous chunks
- * within the buffer.
- */
- STATIC void
- xfs_buf_item_format(
- struct xfs_log_item *lip,
- struct xfs_log_vec *lv)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- struct xfs_buf *bp = bip->bli_buf;
- struct xfs_log_iovec *vecp = NULL;
- uint offset = 0;
- int i;
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
- (bip->bli_flags & XFS_BLI_STALE));
- ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
- (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
- && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
- ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
- (bip->bli_flags & XFS_BLI_STALE));
- /*
- * If it is an inode buffer, transfer the in-memory state to the
- * format flags and clear the in-memory state.
- *
- * For buffer based inode allocation, we do not transfer
- * this state if the inode buffer allocation has not yet been committed
- * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
- * correct replay of the inode allocation.
- *
- * For icreate item based inode allocation, the buffers aren't written
- * to the journal during allocation, and hence we should always tag the
- * buffer as an inode buffer so that the correct unlinked list replay
- * occurs during recovery.
- */
- if (bip->bli_flags & XFS_BLI_INODE_BUF) {
- if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
- !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
- xfs_log_item_in_current_chkpt(lip)))
- bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
- bip->bli_flags &= ~XFS_BLI_INODE_BUF;
- }
- for (i = 0; i < bip->bli_format_count; i++) {
- xfs_buf_item_format_segment(bip, lv, &vecp, offset,
- &bip->bli_formats[i]);
- offset += BBTOB(bp->b_maps[i].bm_len);
- }
- /*
- * Check to make sure everything is consistent.
- */
- trace_xfs_buf_item_format(bip);
- }
- /*
- * This is called to pin the buffer associated with the buf log item in memory
- * so it cannot be written out.
- *
- * We also always take a reference to the buffer log item here so that the bli
- * is held while the item is pinned in memory. This means that we can
- * unconditionally drop the reference count a transaction holds when the
- * transaction is completed.
- */
- STATIC void
- xfs_buf_item_pin(
- struct xfs_log_item *lip)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
- (bip->bli_flags & XFS_BLI_ORDERED) ||
- (bip->bli_flags & XFS_BLI_STALE));
- trace_xfs_buf_item_pin(bip);
- atomic_inc(&bip->bli_refcount);
- atomic_inc(&bip->bli_buf->b_pin_count);
- }
- /*
- * This is called to unpin the buffer associated with the buf log
- * item which was previously pinned with a call to xfs_buf_item_pin().
- *
- * Also drop the reference to the buf item for the current transaction.
- * If the XFS_BLI_STALE flag is set and we are the last reference,
- * then free up the buf log item and unlock the buffer.
- *
- * If the remove flag is set we are called from uncommit in the
- * forced-shutdown path. If that is true and the reference count on
- * the log item is going to drop to zero we need to free the item's
- * descriptor in the transaction.
- */
- STATIC void
- xfs_buf_item_unpin(
- struct xfs_log_item *lip,
- int remove)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- xfs_buf_t *bp = bip->bli_buf;
- struct xfs_ail *ailp = lip->li_ailp;
- int stale = bip->bli_flags & XFS_BLI_STALE;
- int freed;
- ASSERT(bp->b_log_item == bip);
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- trace_xfs_buf_item_unpin(bip);
- freed = atomic_dec_and_test(&bip->bli_refcount);
- if (atomic_dec_and_test(&bp->b_pin_count))
- wake_up_all(&bp->b_waiters);
- if (freed && stale) {
- ASSERT(bip->bli_flags & XFS_BLI_STALE);
- ASSERT(xfs_buf_islocked(bp));
- ASSERT(bp->b_flags & XBF_STALE);
- ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
- trace_xfs_buf_item_unpin_stale(bip);
- if (remove) {
- /*
- * If we are in a transaction context, we have to
- * remove the log item from the transaction as we are
- * about to release our reference to the buffer. If we
- * don't, the unlock that occurs later in
- * xfs_trans_uncommit() will try to reference the
- * buffer which we no longer have a hold on.
- */
- if (!list_empty(&lip->li_trans))
- xfs_trans_del_item(lip);
- /*
- * Since the transaction no longer refers to the buffer,
- * the buffer should no longer refer to the transaction.
- */
- bp->b_transp = NULL;
- }
- /*
- * If we get called here because of an IO error, we may
- * or may not have the item on the AIL. xfs_trans_ail_delete()
- * will take care of that situation.
- * xfs_trans_ail_delete() drops the AIL lock.
- */
- if (bip->bli_flags & XFS_BLI_STALE_INODE) {
- xfs_buf_do_callbacks(bp);
- bp->b_log_item = NULL;
- list_del_init(&bp->b_li_list);
- bp->b_iodone = NULL;
- } else {
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
- xfs_buf_item_relse(bp);
- ASSERT(bp->b_log_item == NULL);
- }
- xfs_buf_relse(bp);
- } else if (freed && remove) {
- /*
- * There are currently two references to the buffer - the active
- * LRU reference and the buf log item. What we are about to do
- * here - simulate a failed IO completion - requires 3
- * references.
- *
- * The LRU reference is removed by the xfs_buf_stale() call. The
- * buf item reference is removed by the xfs_buf_iodone()
- * callback that is run by xfs_buf_do_callbacks() during ioend
- * processing (via the bp->b_iodone callback), and then finally
- * the ioend processing will drop the IO reference if the buffer
- * is marked XBF_ASYNC.
- *
- * Hence we need to take an additional reference here so that IO
- * completion processing doesn't free the buffer prematurely.
- */
- xfs_buf_lock(bp);
- xfs_buf_hold(bp);
- bp->b_flags |= XBF_ASYNC;
- xfs_buf_ioerror(bp, -EIO);
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- xfs_buf_ioend(bp);
- }
- }
- /*
- * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
- * seconds so as to not spam logs too much on repeated detection of the same
- * buffer being bad..
- */
- static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
- STATIC uint
- xfs_buf_item_push(
- struct xfs_log_item *lip,
- struct list_head *buffer_list)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- struct xfs_buf *bp = bip->bli_buf;
- uint rval = XFS_ITEM_SUCCESS;
- if (xfs_buf_ispinned(bp))
- return XFS_ITEM_PINNED;
- if (!xfs_buf_trylock(bp)) {
- /*
- * If we have just raced with a buffer being pinned and it has
- * been marked stale, we could end up stalling until someone else
- * issues a log force to unpin the stale buffer. Check for the
- * race condition here so xfsaild recognizes the buffer is pinned
- * and queues a log force to move it along.
- */
- if (xfs_buf_ispinned(bp))
- return XFS_ITEM_PINNED;
- return XFS_ITEM_LOCKED;
- }
- ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- trace_xfs_buf_item_push(bip);
- /* has a previous flush failed due to IO errors? */
- if ((bp->b_flags & XBF_WRITE_FAIL) &&
- ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
- xfs_warn(bp->b_target->bt_mount,
- "Failing async write on buffer block 0x%llx. Retrying async write.",
- (long long)bp->b_bn);
- }
- if (!xfs_buf_delwri_queue(bp, buffer_list))
- rval = XFS_ITEM_FLUSHING;
- xfs_buf_unlock(bp);
- return rval;
- }
- /*
- * Drop the buffer log item refcount and take appropriate action. This helper
- * determines whether the bli must be freed or not, since a decrement to zero
- * does not necessarily mean the bli is unused.
- *
- * Return true if the bli is freed, false otherwise.
- */
- bool
- xfs_buf_item_put(
- struct xfs_buf_log_item *bip)
- {
- struct xfs_log_item *lip = &bip->bli_item;
- bool aborted;
- bool dirty;
- /* drop the bli ref and return if it wasn't the last one */
- if (!atomic_dec_and_test(&bip->bli_refcount))
- return false;
- /*
- * We dropped the last ref and must free the item if clean or aborted.
- * If the bli is dirty and non-aborted, the buffer was clean in the
- * transaction but still awaiting writeback from previous changes. In
- * that case, the bli is freed on buffer writeback completion.
- */
- aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
- XFS_FORCED_SHUTDOWN(lip->li_mountp);
- dirty = bip->bli_flags & XFS_BLI_DIRTY;
- if (dirty && !aborted)
- return false;
- /*
- * The bli is aborted or clean. An aborted item may be in the AIL
- * regardless of dirty state. For example, consider an aborted
- * transaction that invalidated a dirty bli and cleared the dirty
- * state.
- */
- if (aborted)
- xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
- xfs_buf_item_relse(bip->bli_buf);
- return true;
- }
- /*
- * Release the buffer associated with the buf log item. If there is no dirty
- * logged data associated with the buffer recorded in the buf log item, then
- * free the buf log item and remove the reference to it in the buffer.
- *
- * This call ignores the recursion count. It is only called when the buffer
- * should REALLY be unlocked, regardless of the recursion count.
- *
- * We unconditionally drop the transaction's reference to the log item. If the
- * item was logged, then another reference was taken when it was pinned, so we
- * can safely drop the transaction reference now. This also allows us to avoid
- * potential races with the unpin code freeing the bli by not referencing the
- * bli after we've dropped the reference count.
- *
- * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
- * if necessary but do not unlock the buffer. This is for support of
- * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
- * free the item.
- */
- STATIC void
- xfs_buf_item_unlock(
- struct xfs_log_item *lip)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- struct xfs_buf *bp = bip->bli_buf;
- bool released;
- bool hold = bip->bli_flags & XFS_BLI_HOLD;
- bool stale = bip->bli_flags & XFS_BLI_STALE;
- #if defined(DEBUG) || defined(XFS_WARN)
- bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
- bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
- #endif
- trace_xfs_buf_item_unlock(bip);
- /*
- * The bli dirty state should match whether the blf has logged segments
- * except for ordered buffers, where only the bli should be dirty.
- */
- ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
- (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
- ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
- /*
- * Clear the buffer's association with this transaction and
- * per-transaction state from the bli, which has been copied above.
- */
- bp->b_transp = NULL;
- bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
- /*
- * Unref the item and unlock the buffer unless held or stale. Stale
- * buffers remain locked until final unpin unless the bli is freed by
- * the unref call. The latter implies shutdown because buffer
- * invalidation dirties the bli and transaction.
- */
- released = xfs_buf_item_put(bip);
- if (hold || (stale && !released))
- return;
- ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
- xfs_buf_relse(bp);
- }
- /*
- * This is called to find out where the oldest active copy of the
- * buf log item in the on disk log resides now that the last log
- * write of it completed at the given lsn.
- * We always re-log all the dirty data in a buffer, so usually the
- * latest copy in the on disk log is the only one that matters. For
- * those cases we simply return the given lsn.
- *
- * The one exception to this is for buffers full of newly allocated
- * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
- * flag set, indicating that only the di_next_unlinked fields from the
- * inodes in the buffers will be replayed during recovery. If the
- * original newly allocated inode images have not yet been flushed
- * when the buffer is so relogged, then we need to make sure that we
- * keep the old images in the 'active' portion of the log. We do this
- * by returning the original lsn of that transaction here rather than
- * the current one.
- */
- STATIC xfs_lsn_t
- xfs_buf_item_committed(
- struct xfs_log_item *lip,
- xfs_lsn_t lsn)
- {
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- trace_xfs_buf_item_committed(bip);
- if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
- return lip->li_lsn;
- return lsn;
- }
- STATIC void
- xfs_buf_item_committing(
- struct xfs_log_item *lip,
- xfs_lsn_t commit_lsn)
- {
- }
- /*
- * This is the ops vector shared by all buf log items.
- */
- static const struct xfs_item_ops xfs_buf_item_ops = {
- .iop_size = xfs_buf_item_size,
- .iop_format = xfs_buf_item_format,
- .iop_pin = xfs_buf_item_pin,
- .iop_unpin = xfs_buf_item_unpin,
- .iop_unlock = xfs_buf_item_unlock,
- .iop_committed = xfs_buf_item_committed,
- .iop_push = xfs_buf_item_push,
- .iop_committing = xfs_buf_item_committing
- };
- STATIC int
- xfs_buf_item_get_format(
- struct xfs_buf_log_item *bip,
- int count)
- {
- ASSERT(bip->bli_formats == NULL);
- bip->bli_format_count = count;
- if (count == 1) {
- bip->bli_formats = &bip->__bli_format;
- return 0;
- }
- bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
- KM_SLEEP);
- if (!bip->bli_formats)
- return -ENOMEM;
- return 0;
- }
- STATIC void
- xfs_buf_item_free_format(
- struct xfs_buf_log_item *bip)
- {
- if (bip->bli_formats != &bip->__bli_format) {
- kmem_free(bip->bli_formats);
- bip->bli_formats = NULL;
- }
- }
- /*
- * Allocate a new buf log item to go with the given buffer.
- * Set the buffer's b_log_item field to point to the new
- * buf log item.
- */
- int
- xfs_buf_item_init(
- struct xfs_buf *bp,
- struct xfs_mount *mp)
- {
- struct xfs_buf_log_item *bip = bp->b_log_item;
- int chunks;
- int map_size;
- int error;
- int i;
- /*
- * Check to see if there is already a buf log item for
- * this buffer. If we do already have one, there is
- * nothing to do here so return.
- */
- ASSERT(bp->b_target->bt_mount == mp);
- if (bip) {
- ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
- ASSERT(!bp->b_transp);
- ASSERT(bip->bli_buf == bp);
- return 0;
- }
- bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
- xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
- bip->bli_buf = bp;
- /*
- * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
- * can be divided into. Make sure not to truncate any pieces.
- * map_size is the size of the bitmap needed to describe the
- * chunks of the buffer.
- *
- * Discontiguous buffer support follows the layout of the underlying
- * buffer. This makes the implementation as simple as possible.
- */
- error = xfs_buf_item_get_format(bip, bp->b_map_count);
- ASSERT(error == 0);
- if (error) { /* to stop gcc throwing set-but-unused warnings */
- kmem_zone_free(xfs_buf_item_zone, bip);
- return error;
- }
- for (i = 0; i < bip->bli_format_count; i++) {
- chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
- XFS_BLF_CHUNK);
- map_size = DIV_ROUND_UP(chunks, NBWORD);
- bip->bli_formats[i].blf_type = XFS_LI_BUF;
- bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
- bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
- bip->bli_formats[i].blf_map_size = map_size;
- }
- bp->b_log_item = bip;
- xfs_buf_hold(bp);
- return 0;
- }
- /*
- * Mark bytes first through last inclusive as dirty in the buf
- * item's bitmap.
- */
- static void
- xfs_buf_item_log_segment(
- uint first,
- uint last,
- uint *map)
- {
- uint first_bit;
- uint last_bit;
- uint bits_to_set;
- uint bits_set;
- uint word_num;
- uint *wordp;
- uint bit;
- uint end_bit;
- uint mask;
- /*
- * Convert byte offsets to bit numbers.
- */
- first_bit = first >> XFS_BLF_SHIFT;
- last_bit = last >> XFS_BLF_SHIFT;
- /*
- * Calculate the total number of bits to be set.
- */
- bits_to_set = last_bit - first_bit + 1;
- /*
- * Get a pointer to the first word in the bitmap
- * to set a bit in.
- */
- word_num = first_bit >> BIT_TO_WORD_SHIFT;
- wordp = &map[word_num];
- /*
- * Calculate the starting bit in the first word.
- */
- bit = first_bit & (uint)(NBWORD - 1);
- /*
- * First set any bits in the first word of our range.
- * If it starts at bit 0 of the word, it will be
- * set below rather than here. That is what the variable
- * bit tells us. The variable bits_set tracks the number
- * of bits that have been set so far. End_bit is the number
- * of the last bit to be set in this word plus one.
- */
- if (bit) {
- end_bit = min(bit + bits_to_set, (uint)NBWORD);
- mask = ((1U << (end_bit - bit)) - 1) << bit;
- *wordp |= mask;
- wordp++;
- bits_set = end_bit - bit;
- } else {
- bits_set = 0;
- }
- /*
- * Now set bits a whole word at a time that are between
- * first_bit and last_bit.
- */
- while ((bits_to_set - bits_set) >= NBWORD) {
- *wordp |= 0xffffffff;
- bits_set += NBWORD;
- wordp++;
- }
- /*
- * Finally, set any bits left to be set in one last partial word.
- */
- end_bit = bits_to_set - bits_set;
- if (end_bit) {
- mask = (1U << end_bit) - 1;
- *wordp |= mask;
- }
- }
- /*
- * Mark bytes first through last inclusive as dirty in the buf
- * item's bitmap.
- */
- void
- xfs_buf_item_log(
- struct xfs_buf_log_item *bip,
- uint first,
- uint last)
- {
- int i;
- uint start;
- uint end;
- struct xfs_buf *bp = bip->bli_buf;
- /*
- * walk each buffer segment and mark them dirty appropriately.
- */
- start = 0;
- for (i = 0; i < bip->bli_format_count; i++) {
- if (start > last)
- break;
- end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
- /* skip to the map that includes the first byte to log */
- if (first > end) {
- start += BBTOB(bp->b_maps[i].bm_len);
- continue;
- }
- /*
- * Trim the range to this segment and mark it in the bitmap.
- * Note that we must convert buffer offsets to segment relative
- * offsets (e.g., the first byte of each segment is byte 0 of
- * that segment).
- */
- if (first < start)
- first = start;
- if (end > last)
- end = last;
- xfs_buf_item_log_segment(first - start, end - start,
- &bip->bli_formats[i].blf_data_map[0]);
- start += BBTOB(bp->b_maps[i].bm_len);
- }
- }
- /*
- * Return true if the buffer has any ranges logged/dirtied by a transaction,
- * false otherwise.
- */
- bool
- xfs_buf_item_dirty_format(
- struct xfs_buf_log_item *bip)
- {
- int i;
- for (i = 0; i < bip->bli_format_count; i++) {
- if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
- bip->bli_formats[i].blf_map_size))
- return true;
- }
- return false;
- }
- STATIC void
- xfs_buf_item_free(
- struct xfs_buf_log_item *bip)
- {
- xfs_buf_item_free_format(bip);
- kmem_free(bip->bli_item.li_lv_shadow);
- kmem_zone_free(xfs_buf_item_zone, bip);
- }
- /*
- * This is called when the buf log item is no longer needed. It should
- * free the buf log item associated with the given buffer and clear
- * the buffer's pointer to the buf log item. If there are no more
- * items in the list, clear the b_iodone field of the buffer (see
- * xfs_buf_attach_iodone() below).
- */
- void
- xfs_buf_item_relse(
- xfs_buf_t *bp)
- {
- struct xfs_buf_log_item *bip = bp->b_log_item;
- trace_xfs_buf_item_relse(bp, _RET_IP_);
- ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
- bp->b_log_item = NULL;
- if (list_empty(&bp->b_li_list))
- bp->b_iodone = NULL;
- xfs_buf_rele(bp);
- xfs_buf_item_free(bip);
- }
- /*
- * Add the given log item with its callback to the list of callbacks
- * to be called when the buffer's I/O completes. If it is not set
- * already, set the buffer's b_iodone() routine to be
- * xfs_buf_iodone_callbacks() and link the log item into the list of
- * items rooted at b_li_list.
- */
- void
- xfs_buf_attach_iodone(
- xfs_buf_t *bp,
- void (*cb)(xfs_buf_t *, xfs_log_item_t *),
- xfs_log_item_t *lip)
- {
- ASSERT(xfs_buf_islocked(bp));
- lip->li_cb = cb;
- list_add_tail(&lip->li_bio_list, &bp->b_li_list);
- ASSERT(bp->b_iodone == NULL ||
- bp->b_iodone == xfs_buf_iodone_callbacks);
- bp->b_iodone = xfs_buf_iodone_callbacks;
- }
- /*
- * We can have many callbacks on a buffer. Running the callbacks individually
- * can cause a lot of contention on the AIL lock, so we allow for a single
- * callback to be able to scan the remaining items in bp->b_li_list for other
- * items of the same type and callback to be processed in the first call.
- *
- * As a result, the loop walking the callback list below will also modify the
- * list. it removes the first item from the list and then runs the callback.
- * The loop then restarts from the new first item int the list. This allows the
- * callback to scan and modify the list attached to the buffer and we don't
- * have to care about maintaining a next item pointer.
- */
- STATIC void
- xfs_buf_do_callbacks(
- struct xfs_buf *bp)
- {
- struct xfs_buf_log_item *blip = bp->b_log_item;
- struct xfs_log_item *lip;
- /* If there is a buf_log_item attached, run its callback */
- if (blip) {
- lip = &blip->bli_item;
- lip->li_cb(bp, lip);
- }
- while (!list_empty(&bp->b_li_list)) {
- lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
- li_bio_list);
- /*
- * Remove the item from the list, so we don't have any
- * confusion if the item is added to another buf.
- * Don't touch the log item after calling its
- * callback, because it could have freed itself.
- */
- list_del_init(&lip->li_bio_list);
- lip->li_cb(bp, lip);
- }
- }
- /*
- * Invoke the error state callback for each log item affected by the failed I/O.
- *
- * If a metadata buffer write fails with a non-permanent error, the buffer is
- * eventually resubmitted and so the completion callbacks are not run. The error
- * state may need to be propagated to the log items attached to the buffer,
- * however, so the next AIL push of the item knows hot to handle it correctly.
- */
- STATIC void
- xfs_buf_do_callbacks_fail(
- struct xfs_buf *bp)
- {
- struct xfs_log_item *lip;
- struct xfs_ail *ailp;
- /*
- * Buffer log item errors are handled directly by xfs_buf_item_push()
- * and xfs_buf_iodone_callback_error, and they have no IO error
- * callbacks. Check only for items in b_li_list.
- */
- if (list_empty(&bp->b_li_list))
- return;
- lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
- li_bio_list);
- ailp = lip->li_ailp;
- spin_lock(&ailp->ail_lock);
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
- if (lip->li_ops->iop_error)
- lip->li_ops->iop_error(lip, bp);
- }
- spin_unlock(&ailp->ail_lock);
- }
- static bool
- xfs_buf_iodone_callback_error(
- struct xfs_buf *bp)
- {
- struct xfs_buf_log_item *bip = bp->b_log_item;
- struct xfs_log_item *lip;
- struct xfs_mount *mp;
- static ulong lasttime;
- static xfs_buftarg_t *lasttarg;
- struct xfs_error_cfg *cfg;
- /*
- * The failed buffer might not have a buf_log_item attached or the
- * log_item list might be empty. Get the mp from the available
- * xfs_log_item
- */
- lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
- li_bio_list);
- mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
- /*
- * If we've already decided to shutdown the filesystem because of
- * I/O errors, there's no point in giving this a retry.
- */
- if (XFS_FORCED_SHUTDOWN(mp))
- goto out_stale;
- if (bp->b_target != lasttarg ||
- time_after(jiffies, (lasttime + 5*HZ))) {
- lasttime = jiffies;
- xfs_buf_ioerror_alert(bp, __func__);
- }
- lasttarg = bp->b_target;
- /* synchronous writes will have callers process the error */
- if (!(bp->b_flags & XBF_ASYNC))
- goto out_stale;
- trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
- ASSERT(bp->b_iodone != NULL);
- cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
- /*
- * If the write was asynchronous then no one will be looking for the
- * error. If this is the first failure of this type, clear the error
- * state and write the buffer out again. This means we always retry an
- * async write failure at least once, but we also need to set the buffer
- * up to behave correctly now for repeated failures.
- */
- if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
- bp->b_last_error != bp->b_error) {
- bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
- bp->b_last_error = bp->b_error;
- if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
- !bp->b_first_retry_time)
- bp->b_first_retry_time = jiffies;
- xfs_buf_ioerror(bp, 0);
- xfs_buf_submit(bp);
- return true;
- }
- /*
- * Repeated failure on an async write. Take action according to the
- * error configuration we have been set up to use.
- */
- if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
- ++bp->b_retries > cfg->max_retries)
- goto permanent_error;
- if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
- time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
- goto permanent_error;
- /* At unmount we may treat errors differently */
- if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
- goto permanent_error;
- /*
- * Still a transient error, run IO completion failure callbacks and let
- * the higher layers retry the buffer.
- */
- xfs_buf_do_callbacks_fail(bp);
- xfs_buf_ioerror(bp, 0);
- xfs_buf_relse(bp);
- return true;
- /*
- * Permanent error - we need to trigger a shutdown if we haven't already
- * to indicate that inconsistency will result from this action.
- */
- permanent_error:
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
- out_stale:
- xfs_buf_stale(bp);
- bp->b_flags |= XBF_DONE;
- trace_xfs_buf_error_relse(bp, _RET_IP_);
- return false;
- }
- /*
- * This is the iodone() function for buffers which have had callbacks attached
- * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
- * callback list, mark the buffer as having no more callbacks and then push the
- * buffer through IO completion processing.
- */
- void
- xfs_buf_iodone_callbacks(
- struct xfs_buf *bp)
- {
- /*
- * If there is an error, process it. Some errors require us
- * to run callbacks after failure processing is done so we
- * detect that and take appropriate action.
- */
- if (bp->b_error && xfs_buf_iodone_callback_error(bp))
- return;
- /*
- * Successful IO or permanent error. Either way, we can clear the
- * retry state here in preparation for the next error that may occur.
- */
- bp->b_last_error = 0;
- bp->b_retries = 0;
- bp->b_first_retry_time = 0;
- xfs_buf_do_callbacks(bp);
- bp->b_log_item = NULL;
- list_del_init(&bp->b_li_list);
- bp->b_iodone = NULL;
- xfs_buf_ioend(bp);
- }
- /*
- * This is the iodone() function for buffers which have been
- * logged. It is called when they are eventually flushed out.
- * It should remove the buf item from the AIL, and free the buf item.
- * It is called by xfs_buf_iodone_callbacks() above which will take
- * care of cleaning up the buffer itself.
- */
- void
- xfs_buf_iodone(
- struct xfs_buf *bp,
- struct xfs_log_item *lip)
- {
- struct xfs_ail *ailp = lip->li_ailp;
- ASSERT(BUF_ITEM(lip)->bli_buf == bp);
- xfs_buf_rele(bp);
- /*
- * If we are forcibly shutting down, this may well be
- * off the AIL already. That's because we simulate the
- * log-committed callbacks to unpin these buffers. Or we may never
- * have put this item on AIL because of the transaction was
- * aborted forcibly. xfs_trans_ail_delete() takes care of these.
- *
- * Either way, AIL is useless if we're forcing a shutdown.
- */
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
- xfs_buf_item_free(BUF_ITEM(lip));
- }
- /*
- * Requeue a failed buffer for writeback.
- *
- * We clear the log item failed state here as well, but we have to be careful
- * about reference counts because the only active reference counts on the buffer
- * may be the failed log items. Hence if we clear the log item failed state
- * before queuing the buffer for IO we can release all active references to
- * the buffer and free it, leading to use after free problems in
- * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
- * order we process them in - the buffer is locked, and we own the buffer list
- * so nothing on them is going to change while we are performing this action.
- *
- * Hence we can safely queue the buffer for IO before we clear the failed log
- * item state, therefore always having an active reference to the buffer and
- * avoiding the transient zero-reference state that leads to use-after-free.
- *
- * Return true if the buffer was added to the buffer list, false if it was
- * already on the buffer list.
- */
- bool
- xfs_buf_resubmit_failed_buffers(
- struct xfs_buf *bp,
- struct list_head *buffer_list)
- {
- struct xfs_log_item *lip;
- bool ret;
- ret = xfs_buf_delwri_queue(bp, buffer_list);
- /*
- * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
- * function already have it acquired
- */
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
- xfs_clear_li_failed(lip);
- return ret;
- }
|