From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-5.6-rc1 commit 4e5ef02317b12e2ed3d604281ffb6b75261f7612 category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27 CVE: NA ---------------------------
Add percpu_ref_tryget_many(), which works the same way as percpu_ref_tryget(), but grabs specified number of refs.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com Acked-by: Tejun Heo tj@kernel.org Acked-by: Dennis Zhou dennis@kernel.org Cc: Christoph Lameter cl@linux.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: yangerkun yangerkun@huawei.com Reviewed-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com --- include/linux/percpu-refcount.h | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-)
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 0f0240af8520..a01f8b4ebcfe 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -209,15 +209,17 @@ static inline void percpu_ref_get(struct percpu_ref *ref) }
/** - * percpu_ref_tryget - try to increment a percpu refcount + * percpu_ref_tryget_many - try to increment a percpu refcount * @ref: percpu_ref to try-get + * @nr: number of references to get * - * Increment a percpu refcount unless its count already reached zero. + * Increment a percpu refcount by @nr unless its count already reached zero. * Returns %true on success; %false on failure. * * This function is safe to call as long as @ref is between init and exit. */ -static inline bool percpu_ref_tryget(struct percpu_ref *ref) +static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, + unsigned long nr) { unsigned long __percpu *percpu_count; bool ret; @@ -225,10 +227,10 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count)) { - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); ret = true; } else { - ret = atomic_long_inc_not_zero(&ref->count); + ret = atomic_long_add_unless(&ref->count, nr, 0); }
rcu_read_unlock_sched(); @@ -236,6 +238,20 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) return ret; }
+/** + * percpu_ref_tryget - try to increment a percpu refcount + * @ref: percpu_ref to try-get + * + * Increment a percpu refcount unless its count already reached zero. + * Returns %true on success; %false on failure. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget(struct percpu_ref *ref) +{ + return percpu_ref_tryget_many(ref, 1); +} + /** * percpu_ref_tryget_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get