hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7F5L7 CVE: NA
--------------------------------
Revert this patch due to fio performance degradation. This reverts commit fa59b0b02511a925d160070c35ba843f0970b511.
Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/locking/rwsem.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 2c4994e58f73..7bf45b0a1b1d 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -270,14 +270,14 @@ static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem) owner | RWSEM_NONSPINNABLE)); }
-static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) +static inline bool rwsem_read_trylock(struct rw_semaphore *sem) { - *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); + long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
- if (WARN_ON_ONCE(*cntp < 0)) + if (WARN_ON_ONCE(cnt < 0)) rwsem_set_nonspinnable(sem);
- if (!(*cntp & RWSEM_READ_FAILED_MASK)) { + if (!(cnt & RWSEM_READ_FAILED_MASK)) { rwsem_set_reader_owned(sem); return true; } @@ -996,9 +996,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) * Wait for the read lock to be granted */ static struct rw_semaphore __sched * -rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state) +rwsem_down_read_slowpath(struct rw_semaphore *sem, int state) { - long adjustment = -RWSEM_READER_BIAS; + long count, adjustment = -RWSEM_READER_BIAS; struct rwsem_waiter waiter; DEFINE_WAKE_Q(wake_q); bool wake = false; @@ -1344,20 +1344,16 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) */ static inline void __down_read(struct rw_semaphore *sem) { - long count; - - if (!rwsem_read_trylock(sem, &count)) { - rwsem_down_read_slowpath(sem, count, TASK_UNINTERRUPTIBLE); + if (!rwsem_read_trylock(sem)) { + rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE); DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); } }
static inline int __down_read_interruptible(struct rw_semaphore *sem) { - long count; - - if (!rwsem_read_trylock(sem, &count)) { - if (IS_ERR(rwsem_down_read_slowpath(sem, count, TASK_INTERRUPTIBLE))) + if (!rwsem_read_trylock(sem)) { + if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE))) return -EINTR; DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); } @@ -1366,10 +1362,8 @@ static inline int __down_read_interruptible(struct rw_semaphore *sem)
static inline int __down_read_killable(struct rw_semaphore *sem) { - long count; - - if (!rwsem_read_trylock(sem, &count)) { - if (IS_ERR(rwsem_down_read_slowpath(sem, count, TASK_KILLABLE))) + if (!rwsem_read_trylock(sem)) { + if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE))) return -EINTR; DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); }