hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IB7V02
--------------------------------
This reverts commit a90cde3fb51e379ec1c26cd7bc60d95515069fbb due to a huge amount of changes. Later patches use simpler fixes.
Fixes: a90cde3fb51e ("xfs: split xfs_mod_freecounter") Signed-off-by: Zizhi Wo wozizhi@huawei.com --- fs/xfs/libxfs/xfs_ag.c | 4 ++- fs/xfs/libxfs/xfs_ag_resv.c | 24 +++++++++---- fs/xfs/libxfs/xfs_ag_resv.h | 2 +- fs/xfs/libxfs/xfs_alloc.c | 4 +-- fs/xfs/libxfs/xfs_bmap.c | 21 ++++++------ fs/xfs/scrub/fscounters.c | 2 +- fs/xfs/xfs_fsops.c | 29 +++++++++++----- fs/xfs/xfs_fsops.h | 2 +- fs/xfs/xfs_mount.c | 67 ++++++++++++++++++------------------- fs/xfs/xfs_mount.h | 27 +++++---------- fs/xfs/xfs_super.c | 6 +++- fs/xfs/xfs_trace.h | 1 + fs/xfs/xfs_trans.c | 21 ++++++++---- 13 files changed, 117 insertions(+), 93 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index ae18c032778e..1531bd0ee359 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -967,7 +967,9 @@ xfs_ag_shrink_space( * Disable perag reservations so it doesn't cause the allocation request * to fail. We'll reestablish reservation before we return. */ - xfs_ag_resv_free(pag); + error = xfs_ag_resv_free(pag); + if (error) + return error;
/* internal log shouldn't also show up in the free space btrees */ error = xfs_alloc_vextent_exact_bno(&args, diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index 7f3e4562e4ac..7fd1fea95552 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -126,13 +126,14 @@ xfs_ag_resv_needed( }
/* Clean out a reservation */ -static void +static int __xfs_ag_resv_free( struct xfs_perag *pag, enum xfs_ag_resv_type type) { struct xfs_ag_resv *resv; xfs_extlen_t oldresv; + int error;
trace_xfs_ag_resv_free(pag, type, 0);
@@ -148,19 +149,30 @@ __xfs_ag_resv_free( oldresv = resv->ar_orig_reserved; else oldresv = resv->ar_reserved; - xfs_add_fdblocks(pag->pag_mount, oldresv); + error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true); resv->ar_reserved = 0; resv->ar_asked = 0; resv->ar_orig_reserved = 0; + + if (error) + trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno, + error, _RET_IP_); + return error; }
/* Free a per-AG reservation. */ -void +int xfs_ag_resv_free( struct xfs_perag *pag) { - __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT); - __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA); + int error; + int err2; + + error = __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT); + err2 = __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA); + if (err2 && !error) + error = err2; + return error; }
static int @@ -204,7 +216,7 @@ __xfs_ag_resv_init( if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL)) error = -ENOSPC; else - error = xfs_dec_fdblocks(mp, hidden_space, true); + error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true); if (error) { trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, error, _RET_IP_); diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h index ff20ed93de77..b74b210008ea 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.h +++ b/fs/xfs/libxfs/xfs_ag_resv.h @@ -6,7 +6,7 @@ #ifndef __XFS_AG_RESV_H__ #define __XFS_AG_RESV_H__
-void xfs_ag_resv_free(struct xfs_perag *pag); +int xfs_ag_resv_free(struct xfs_perag *pag); int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp);
bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 6c31154b26e5..415e96476232 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -79,7 +79,7 @@ xfs_prealloc_blocks( }
/* - * The number of blocks per AG that we withhold from xfs_dec_fdblocks to + * The number of blocks per AG that we withhold from xfs_mod_fdblocks to * guarantee that we can refill the AGFL prior to allocating space in a nearly * full AG. Although the space described by the free space btrees, the * blocks used by the freesp btrees themselves, and the blocks owned by the @@ -89,7 +89,7 @@ xfs_prealloc_blocks( * until the fs goes down, we subtract this many AG blocks from the incore * fdblocks to ensure user allocation does not overcommit the space the * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to - * withhold space from xfs_dec_fdblocks, so we do not account for that here. + * withhold space from xfs_mod_fdblocks, so we do not account for that here. */ #define XFS_ALLOCBT_AGFL_RESERVE 4
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index c78a56b1ecca..9ed22d82c000 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -1941,10 +1941,9 @@ xfs_bmap_add_extent_delay_real( }
/* adjust for changes in reserved delayed indirect blocks */ - if (da_new < da_old) - xfs_add_fdblocks(mp, da_old - da_new); - else if (da_new > da_old) - error = xfs_dec_fdblocks(mp, da_new - da_old, true); + if (da_new != da_old) + error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), + true);
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); done: @@ -2622,8 +2621,8 @@ xfs_bmap_add_extent_hole_delay( } if (oldlen != newlen) { ASSERT(oldlen > newlen); - xfs_add_fdblocks(ip->i_mount, oldlen - newlen); - + xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), + false); /* * Nothing to do for disk quota accounting here. */ @@ -4029,11 +4028,11 @@ xfs_bmapi_reserve_delalloc( indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); ASSERT(indlen > 0);
- error = xfs_dec_fdblocks(mp, alen, false); + error = xfs_mod_fdblocks(mp, -((int64_t)alen), false); if (error) goto out_unreserve_quota;
- error = xfs_dec_fdblocks(mp, indlen, false); + error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false); if (error) goto out_unreserve_blocks;
@@ -4061,7 +4060,7 @@ xfs_bmapi_reserve_delalloc( return 0;
out_unreserve_blocks: - xfs_add_fdblocks(mp, alen); + xfs_mod_fdblocks(mp, alen, false); out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_quota_unreserve_blkres(ip, alen); @@ -4920,7 +4919,7 @@ xfs_bmap_del_extent_delay( uint64_t rtexts = del->br_blockcount;
do_div(rtexts, mp->m_sb.sb_rextsize); - xfs_add_frextents(mp, rtexts); + xfs_mod_frextents(mp, rtexts); }
/* @@ -5008,7 +5007,7 @@ xfs_bmap_del_extent_delay( if (!isrt) da_diff += del->br_blockcount; if (da_diff) { - xfs_add_fdblocks(mp, da_diff); + xfs_mod_fdblocks(mp, da_diff, false); xfs_mod_delalloc(mp, -da_diff); } return error; diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c index 5c6d72440789..5799e9a94f1f 100644 --- a/fs/xfs/scrub/fscounters.c +++ b/fs/xfs/scrub/fscounters.c @@ -516,7 +516,7 @@ xchk_fscounters(
/* * If the filesystem is not frozen, the counter summation calls above - * can race with xfs_dec_freecounter, which subtracts a requested space + * can race with xfs_mod_freecounter, which subtracts a requested space * reservation from the counter and undoes the subtraction if that made * the counter go negative. Therefore, it's possible to see negative * values here, and we should only flag that as a corruption if we diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 6fd6ef8a9c81..c3f0e3cae87e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -213,8 +213,10 @@ xfs_growfs_data_private( struct xfs_perag *pag;
pag = xfs_perag_get(mp, id.agno); - xfs_ag_resv_free(pag); + error = xfs_ag_resv_free(pag); xfs_perag_put(pag); + if (error) + return error; } /* * Reserve AG metadata blocks. ENOSPC here does not mean there @@ -422,14 +424,14 @@ xfs_reserve_blocks( */ if (mp->m_resblks > request) { lcounter = mp->m_resblks_avail - request; - if (lcounter > 0) { /* release unused blocks */ + if (lcounter > 0) { /* release unused blocks */ fdblks_delta = lcounter; mp->m_resblks_avail -= lcounter; } mp->m_resblks = request; if (fdblks_delta) { spin_unlock(&mp->m_sb_lock); - xfs_add_fdblocks(mp, fdblks_delta); + error = xfs_mod_fdblocks(mp, fdblks_delta, 0); spin_lock(&mp->m_sb_lock); }
@@ -465,9 +467,9 @@ xfs_reserve_blocks( */ fdblks_delta = min(free, delta); spin_unlock(&mp->m_sb_lock); - error = xfs_dec_fdblocks(mp, fdblks_delta, 0); + error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); if (!error) - xfs_add_fdblocks(mp, fdblks_delta); + xfs_mod_fdblocks(mp, fdblks_delta, 0); spin_lock(&mp->m_sb_lock); } out: @@ -598,13 +600,24 @@ xfs_fs_reserve_ag_blocks( /* * Free space reserved for per-AG metadata. */ -void +int xfs_fs_unreserve_ag_blocks( struct xfs_mount *mp) { xfs_agnumber_t agno; struct xfs_perag *pag; + int error = 0; + int err2;
- for_each_perag(mp, agno, pag) - xfs_ag_resv_free(pag); + for_each_perag(mp, agno, pag) { + err2 = xfs_ag_resv_free(pag); + if (err2 && !error) + error = err2; + } + + if (error) + xfs_warn(mp, + "Error %d freeing per-AG metadata reserve pool.", error); + + return error; } diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h index dba17c404e7d..2cffe51a31e8 100644 --- a/fs/xfs/xfs_fsops.h +++ b/fs/xfs/xfs_fsops.h @@ -14,6 +14,6 @@ extern int xfs_reserve_blocks(xfs_mount_t *mp, uint64_t *inval, extern int xfs_fs_goingdown(xfs_mount_t *mp, uint32_t inflags);
extern int xfs_fs_reserve_ag_blocks(struct xfs_mount *mp); -extern void xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp); +extern int xfs_fs_unreserve_ag_blocks(struct xfs_mount *mp);
#endif /* __XFS_FSOPS_H__ */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index ece135ddfe98..bc14ca674658 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1142,44 +1142,16 @@ xfs_fs_writable( return true; }
-void -xfs_add_freecounter( - struct xfs_mount *mp, - struct percpu_counter *counter, - uint64_t delta) -{ - bool has_resv_pool = (counter == &mp->m_fdblocks); - uint64_t res_used; - - /* - * If the reserve pool is depleted, put blocks back into it first. - * Most of the time the pool is full. - */ - if (!has_resv_pool || mp->m_resblks == mp->m_resblks_avail) { - percpu_counter_add(counter, delta); - return; - } - - spin_lock(&mp->m_sb_lock); - res_used = mp->m_resblks - mp->m_resblks_avail; - if (res_used > delta) { - mp->m_resblks_avail += delta; - } else { - delta -= res_used; - mp->m_resblks_avail = mp->m_resblks; - percpu_counter_add(counter, delta); - } - spin_unlock(&mp->m_sb_lock); -} - +/* Adjust m_fdblocks or m_frextents. */ int -xfs_dec_freecounter( +xfs_mod_freecounter( struct xfs_mount *mp, struct percpu_counter *counter, - uint64_t delta, + int64_t delta, bool rsvd) { int64_t lcounter; + long long res_used; uint64_t set_aside = 0; s32 batch; bool has_resv_pool; @@ -1189,6 +1161,31 @@ xfs_dec_freecounter( if (rsvd) ASSERT(has_resv_pool);
+ if (delta > 0) { + /* + * If the reserve pool is depleted, put blocks back into it + * first. Most of the time the pool is full. + */ + if (likely(!has_resv_pool || + mp->m_resblks == mp->m_resblks_avail)) { + percpu_counter_add(counter, delta); + return 0; + } + + spin_lock(&mp->m_sb_lock); + res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); + + if (res_used > delta) { + mp->m_resblks_avail += delta; + } else { + delta -= res_used; + mp->m_resblks_avail = mp->m_resblks; + percpu_counter_add(counter, delta); + } + spin_unlock(&mp->m_sb_lock); + return 0; + } + /* * Taking blocks away, need to be more accurate the closer we * are to zero. @@ -1216,7 +1213,7 @@ xfs_dec_freecounter( */ if (has_resv_pool) set_aside = xfs_fdblocks_unavailable(mp); - percpu_counter_add_batch(counter, -((int64_t)delta), batch); + percpu_counter_add_batch(counter, delta, batch); if (__percpu_counter_compare(counter, set_aside, XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ @@ -1228,11 +1225,11 @@ xfs_dec_freecounter( * that took us to ENOSPC. */ spin_lock(&mp->m_sb_lock); - percpu_counter_add(counter, delta); + percpu_counter_add(counter, -delta); if (!has_resv_pool || !rsvd) goto fdblocks_enospc;
- lcounter = (long long)mp->m_resblks_avail - delta; + lcounter = (long long)mp->m_resblks_avail + delta; if (lcounter >= 0) { mp->m_resblks_avail = lcounter; spin_unlock(&mp->m_sb_lock); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 25f3a52964dc..d19cca099bc3 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -532,30 +532,19 @@ xfs_fdblocks_unavailable( return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks); }
-int xfs_dec_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, - uint64_t delta, bool rsvd); -void xfs_add_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, - uint64_t delta); +int xfs_mod_freecounter(struct xfs_mount *mp, struct percpu_counter *counter, + int64_t delta, bool rsvd);
-static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta, - bool reserved) +static inline int +xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved) { - return xfs_dec_freecounter(mp, &mp->m_fdblocks, delta, reserved); + return xfs_mod_freecounter(mp, &mp->m_fdblocks, delta, reserved); }
-static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta) +static inline int +xfs_mod_frextents(struct xfs_mount *mp, int64_t delta) { - xfs_add_freecounter(mp, &mp->m_fdblocks, delta); -} - -static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta) -{ - return xfs_dec_freecounter(mp, &mp->m_frextents, delta, false); -} - -static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta) -{ - xfs_add_freecounter(mp, &mp->m_frextents, delta); + return xfs_mod_freecounter(mp, &mp->m_frextents, delta, false); }
extern int xfs_readsb(xfs_mount_t *, int); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 46b2a220cce4..ca82472d2558 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1889,7 +1889,11 @@ xfs_remount_ro( xfs_inodegc_stop(mp);
/* Free the per-AG metadata reservation pool. */ - xfs_fs_unreserve_ag_blocks(mp); + error = xfs_fs_unreserve_ag_blocks(mp); + if (error) { + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + return error; + }
/* * Before we sync the metadata, we need to free up the reserve block diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 576837e10fe7..d562028281f5 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -2915,6 +2915,7 @@ DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent); DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical); DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed);
+DEFINE_AG_ERROR_EVENT(xfs_ag_resv_free_error); DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
/* refcount tracepoint classes */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index a0e4bae90f77..3385813dc080 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -163,7 +163,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (blocks > 0) { - error = xfs_dec_fdblocks(mp, blocks, rsvd); + error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); if (error != 0) return -ENOSPC; tp->t_blk_res += blocks; @@ -210,7 +210,7 @@ xfs_trans_reserve( * fail if the count would go below zero. */ if (rtextents > 0) { - error = xfs_dec_frextents(mp, rtextents); + error = xfs_mod_frextents(mp, -((int64_t)rtextents)); if (error) { error = -ENOSPC; goto undo_log; @@ -234,7 +234,7 @@ xfs_trans_reserve(
undo_blocks: if (blocks > 0) { - xfs_add_fdblocks(mp, blocks); + xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); tp->t_blk_res = 0; } return error; @@ -600,10 +600,12 @@ xfs_trans_unreserve_and_mod_sb( struct xfs_trans *tp) { struct xfs_mount *mp = tp->t_mountp; + bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; int64_t blkdelta = 0; int64_t rtxdelta = 0; int64_t idelta = 0; int64_t ifreedelta = 0; + int error;
/* calculate deltas */ if (tp->t_blk_res > 0) @@ -626,8 +628,10 @@ xfs_trans_unreserve_and_mod_sb( }
/* apply the per-cpu counters */ - if (blkdelta) - xfs_add_fdblocks(mp, blkdelta); + if (blkdelta) { + error = xfs_mod_fdblocks(mp, blkdelta, rsvd); + ASSERT(!error); + }
if (idelta) percpu_counter_add_batch(&mp->m_icount, idelta, @@ -636,8 +640,10 @@ xfs_trans_unreserve_and_mod_sb( if (ifreedelta) percpu_counter_add(&mp->m_ifree, ifreedelta);
- if (rtxdelta) - xfs_add_frextents(mp, rtxdelta); + if (rtxdelta) { + error = xfs_mod_frextents(mp, rtxdelta); + ASSERT(!error); + }
if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) return; @@ -669,6 +675,7 @@ xfs_trans_unreserve_and_mod_sb( */ ASSERT(mp->m_sb.sb_imax_pct >= 0); ASSERT(mp->m_sb.sb_rextslog >= 0); + return; }
/* Add the given log item to the transaction's list of log items. */