In this patch set, it revert "fs:/dcache.c: fix negative dentry limit not complete problem" in patch 1/2 and rewrite this patch in patch 2/2.
Long Li (2): Revert "fs:/dcache.c: fix negative dentry limit not complete problem" fs:/dcache.c: fix negative dentry limit not complete problem
fs/dcache.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-)
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8WPQW CVE: NA
--------------------------------
This reverts commit 9095bd8685498e1d498f1460d2e3512367978d53.
Look at limit_negative_dentry(), it maybe write dentry->d_flags, so this function should be protect by dentry->d_lock. Howerver, there is no lock to protect limit_negative_dentry() in fast_dput(), so revert this patch.
Signed-off-by: Long Li leo.lilong@huawei.com --- fs/dcache.c | 58 ++++++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 36 deletions(-)
diff --git a/fs/dcache.c b/fs/dcache.c index cd00c22f0005..f5b78cc80a00 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -647,36 +647,10 @@ static inline struct dentry *lock_parent(struct dentry *dentry) return __lock_parent(dentry); }
-/* - * Return true if dentry is negative and exceed negative dentry limit. - */ -static inline bool limit_negative_dentry(struct dentry *dentry) +static inline bool retain_dentry(struct dentry *dentry) { struct dentry *parent;
- parent = dentry->d_parent; - if (unlikely(!parent)) - return false; - - WARN_ON((atomic_read(&parent->d_neg_dnum) < 0)); - if (!dentry->d_inode) { - if (!(dentry->d_flags & DCACHE_NEGATIVE_ACCOUNT)) { - unsigned int flags = READ_ONCE(dentry->d_flags); - - flags |= DCACHE_NEGATIVE_ACCOUNT; - WRITE_ONCE(dentry->d_flags, flags); - atomic_inc(&parent->d_neg_dnum); - } - - if (atomic_read(&parent->d_neg_dnum) >= NEG_DENTRY_LIMIT) - return true; - } - - return false; -} - -static inline bool retain_dentry(struct dentry *dentry) -{ WARN_ON(d_in_lookup(dentry));
/* Unreachable? Get rid of it */ @@ -694,9 +668,27 @@ static inline bool retain_dentry(struct dentry *dentry) if (unlikely(dentry->d_flags & DCACHE_DONTCACHE)) return false;
- if (unlikely(limit_negative_dentry(dentry))) + if (unlikely(!dentry->d_parent)) + goto noparent; + + parent = dentry->d_parent; + /* Return false if it's negative */ + WARN_ON((atomic_read(&parent->d_neg_dnum) < 0)); + if (!dentry->d_inode) { + if (!(dentry->d_flags & DCACHE_NEGATIVE_ACCOUNT)) { + unsigned int flags = READ_ONCE(dentry->d_flags); + + flags |= DCACHE_NEGATIVE_ACCOUNT; + WRITE_ONCE(dentry->d_flags, flags); + atomic_inc(&parent->d_neg_dnum); + } + } + + if (!dentry->d_inode && + atomic_read(&parent->d_neg_dnum) >= NEG_DENTRY_LIMIT) return false;
+noparent: /* retain; LRU fodder */ dentry->d_lockref.count--; if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) @@ -845,14 +837,8 @@ static inline bool fast_dput(struct dentry *dentry) d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
/* Nothing to do? Dropping the reference was all we needed? */ - if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) { - /* - * If dentry is negative and need to be limitted, it maybe need - * to be killed, so shouldn't fast put and retain in memory. - */ - if (likely(!limit_negative_dentry(dentry))) - return true; - } + if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) + return true;
/* * Not the fast normal case? Get the lock. We've already decremented
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8WPQW CVE: NA
--------------------------------
Restrictions of negative dentry can avoid softlock when too many dentry in memory, but restrictions before do not always take effect. For example, removing files which has been created would not enter retain_dentry(), because the dentry of file maybe has been added to lru list of superblock, it caused that fast_dput() reutrn true in last dput().
So, add restriction logic for the file which has been added to the lru list in fast_dput(), it prevents the negative dentry exceeding the limit when deleting existing file. Factor out negative dentry limit logic into limit_negative_dentry(), make the code more clean.
Fixes: c44e1f780873 ("fs/dcache.c: avoid softlock since too many negative dentry") Signed-off-by: Long Li leo.lilong@huawei.com --- fs/dcache.c | 70 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 22 deletions(-)
diff --git a/fs/dcache.c b/fs/dcache.c index f5b78cc80a00..3cf6fadd5550 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -647,10 +647,36 @@ static inline struct dentry *lock_parent(struct dentry *dentry) return __lock_parent(dentry); }
-static inline bool retain_dentry(struct dentry *dentry) +/* + * Return true if dentry is negative and exceed negative dentry limit. + */ +static inline bool limit_negative_dentry(struct dentry *dentry) { struct dentry *parent;
+ parent = dentry->d_parent; + if (unlikely(!parent)) + return false; + + WARN_ON((atomic_read(&parent->d_neg_dnum) < 0)); + if (!dentry->d_inode) { + if (!(dentry->d_flags & DCACHE_NEGATIVE_ACCOUNT)) { + unsigned int flags = READ_ONCE(dentry->d_flags); + + flags |= DCACHE_NEGATIVE_ACCOUNT; + WRITE_ONCE(dentry->d_flags, flags); + atomic_inc(&parent->d_neg_dnum); + } + + if (atomic_read(&parent->d_neg_dnum) >= NEG_DENTRY_LIMIT) + return true; + } + + return false; +} + +static inline bool retain_dentry(struct dentry *dentry) +{ WARN_ON(d_in_lookup(dentry));
/* Unreachable? Get rid of it */ @@ -668,27 +694,9 @@ static inline bool retain_dentry(struct dentry *dentry) if (unlikely(dentry->d_flags & DCACHE_DONTCACHE)) return false;
- if (unlikely(!dentry->d_parent)) - goto noparent; - - parent = dentry->d_parent; - /* Return false if it's negative */ - WARN_ON((atomic_read(&parent->d_neg_dnum) < 0)); - if (!dentry->d_inode) { - if (!(dentry->d_flags & DCACHE_NEGATIVE_ACCOUNT)) { - unsigned int flags = READ_ONCE(dentry->d_flags); - - flags |= DCACHE_NEGATIVE_ACCOUNT; - WRITE_ONCE(dentry->d_flags, flags); - atomic_inc(&parent->d_neg_dnum); - } - } - - if (!dentry->d_inode && - atomic_read(&parent->d_neg_dnum) >= NEG_DENTRY_LIMIT) + if (unlikely(limit_negative_dentry(dentry))) return false;
-noparent: /* retain; LRU fodder */ dentry->d_lockref.count--; if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) @@ -837,8 +845,25 @@ static inline bool fast_dput(struct dentry *dentry) d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
/* Nothing to do? Dropping the reference was all we needed? */ - if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) - return true; + if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) { + /* + * If the dentry is not negative dentry, return true directly, + * avoid holding dentry lock in the fast put path. + */ + if (dentry->d_inode) + return true; + /* + * If dentry is negative and need to be limitted, it maybe need + * to be killed, so shouldn't fast put and retain in memory. + */ + spin_lock(&dentry->d_lock); + if (likely(!limit_negative_dentry(dentry))) { + spin_unlock(&dentry->d_lock); + return true; + } + + goto dentry_locked; + }
/* * Not the fast normal case? Get the lock. We've already decremented @@ -847,6 +872,7 @@ static inline bool fast_dput(struct dentry *dentry) */ spin_lock(&dentry->d_lock);
+dentry_locked: /* * Did somebody else grab a reference to it in the meantime, and * we're no longer the last user after all? Alternatively, somebody