hulk inclusion
category: bugfix
bugzilla: NA
CVE: NA
----------------------------------------
This reverts commit 99294378e022ec2785f88247a7373767fd090e3a.
288e4521f0f6 ("x86/asm: 'Simplify' GEN_*_RMWcc() macros") need
be apply before this commit to avoid compile error.
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
Documentation/core-api/refcount-vs-atomic.rst | 24 +++----------------
arch/x86/include/asm/refcount.h | 18 ++------------
lib/refcount.c | 18 ++++----------
3 files changed, 10 insertions(+), 50 deletions(-)
diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst
index 976e85adffe8..322851bada16 100644
--- a/Documentation/core-api/refcount-vs-atomic.rst
+++ b/Documentation/core-api/refcount-vs-atomic.rst
@@ -54,13 +54,6 @@ must propagate to all other CPUs before the release operation
(A-cumulative property). This is implemented using
:c:func:`smp_store_release`.
-An ACQUIRE memory ordering guarantees that all post loads and
-stores (all po-later instructions) on the same CPU are
-completed after the acquire operation. It also guarantees that all
-po-later stores on the same CPU must propagate to all other CPUs
-after the acquire operation executes. This is implemented using
-:c:func:`smp_acquire__after_ctrl_dep`.
-
A control dependency (on success) for refcounters guarantees that
if a reference for an object was successfully obtained (reference
counter increment or addition happened, function returned true),
@@ -126,24 +119,13 @@ Memory ordering guarantees changes:
result of obtaining pointer to the object!
-case 5) - generic dec/sub decrement-based RMW ops that return a value
----------------------------------------------------------------------
+case 5) - decrement-based RMW ops that return a value
+-----------------------------------------------------
Function changes:
* :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test`
* :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test`
-
-Memory ordering guarantees changes:
-
- * fully ordered --> RELEASE ordering + ACQUIRE ordering on success
-
-
-case 6) other decrement-based RMW ops that return a value
----------------------------------------------------------
-
-Function changes:
-
* no atomic counterpart --> :c:func:`refcount_dec_if_one`
* ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
@@ -154,7 +136,7 @@ Memory ordering guarantees changes:
.. note:: :c:func:`atomic_add_unless` only provides full order on success.
-case 7) - lock-based RMW
+case 6) - lock-based RMW
------------------------
Function changes:
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index 88061df7add6..19b90521954c 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -67,28 +67,14 @@ static __always_inline void refcount_dec(refcount_t *r)
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
- bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
+ GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, "er", i, "%0", e, "cx");
-
- if (ret) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- return false;
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
+ GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, "%0", e, "cx");
-
- if (ret) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
-
- return false;
}
static __always_inline __must_check
diff --git a/lib/refcount.c b/lib/refcount.c
index 6e904af0fb3e..ebcf8cd49e05 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -33,9 +33,6 @@
* Note that the allocator is responsible for ordering things between free()
* and alloc().
*
- * The decrements dec_and_test() and sub_and_test() also provide acquire
- * ordering on success.
- *
*/
#include <linux/mutex.h>
@@ -167,8 +164,8 @@ EXPORT_SYMBOL(refcount_inc_checked);
* at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
- * before, and provides an acquire ordering on success such that free()
- * must come after.
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
@@ -193,12 +190,7 @@ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
- if (!new) {
- smp_acquire__after_ctrl_dep();
- return true;
- }
- return false;
-
+ return !new;
}
EXPORT_SYMBOL(refcount_sub_and_test_checked);
@@ -210,8 +202,8 @@ EXPORT_SYMBOL(refcount_sub_and_test_checked);
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
- * before, and provides an acquire ordering on success such that free()
- * must come after.
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
--
2.25.1