mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
locking/refcounts: Use atomic_try_cmpxchg()
Generates better code (GCC-6.2.1): text filename 1576 defconfig-build/lib/refcount.o.pre 1488 defconfig-build/lib/refcount.o.post Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
a9ebf306f5
commit
b78c0d4712
1 changed files with 15 additions and 32 deletions
|
@ -57,9 +57,9 @@
|
||||||
*/
|
*/
|
||||||
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||||
{
|
{
|
||||||
unsigned int old, new, val = atomic_read(&r->refs);
|
unsigned int new, val = atomic_read(&r->refs);
|
||||||
|
|
||||||
for (;;) {
|
do {
|
||||||
if (!val)
|
if (!val)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -69,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
||||||
new = val + i;
|
new = val + i;
|
||||||
if (new < val)
|
if (new < val)
|
||||||
new = UINT_MAX;
|
new = UINT_MAX;
|
||||||
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
|
|
||||||
if (old == val)
|
|
||||||
break;
|
|
||||||
|
|
||||||
val = old;
|
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
||||||
}
|
|
||||||
|
|
||||||
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
||||||
|
|
||||||
|
@ -118,9 +114,9 @@ EXPORT_SYMBOL_GPL(refcount_add);
|
||||||
*/
|
*/
|
||||||
bool refcount_inc_not_zero(refcount_t *r)
|
bool refcount_inc_not_zero(refcount_t *r)
|
||||||
{
|
{
|
||||||
unsigned int old, new, val = atomic_read(&r->refs);
|
unsigned int new, val = atomic_read(&r->refs);
|
||||||
|
|
||||||
for (;;) {
|
do {
|
||||||
new = val + 1;
|
new = val + 1;
|
||||||
|
|
||||||
if (!val)
|
if (!val)
|
||||||
|
@ -129,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
|
||||||
if (unlikely(!new))
|
if (unlikely(!new))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
old = atomic_cmpxchg_relaxed(&r->refs, val, new);
|
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
|
||||||
if (old == val)
|
|
||||||
break;
|
|
||||||
|
|
||||||
val = old;
|
|
||||||
}
|
|
||||||
|
|
||||||
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
|
||||||
|
|
||||||
|
@ -182,9 +173,9 @@ EXPORT_SYMBOL_GPL(refcount_inc);
|
||||||
*/
|
*/
|
||||||
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
||||||
{
|
{
|
||||||
unsigned int old, new, val = atomic_read(&r->refs);
|
unsigned int new, val = atomic_read(&r->refs);
|
||||||
|
|
||||||
for (;;) {
|
do {
|
||||||
if (unlikely(val == UINT_MAX))
|
if (unlikely(val == UINT_MAX))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -194,12 +185,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
old = atomic_cmpxchg_release(&r->refs, val, new);
|
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
|
||||||
if (old == val)
|
|
||||||
break;
|
|
||||||
|
|
||||||
val = old;
|
|
||||||
}
|
|
||||||
|
|
||||||
return !new;
|
return !new;
|
||||||
}
|
}
|
||||||
|
@ -258,7 +244,9 @@ EXPORT_SYMBOL_GPL(refcount_dec);
|
||||||
*/
|
*/
|
||||||
bool refcount_dec_if_one(refcount_t *r)
|
bool refcount_dec_if_one(refcount_t *r)
|
||||||
{
|
{
|
||||||
return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
|
int val = 1;
|
||||||
|
|
||||||
|
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
|
EXPORT_SYMBOL_GPL(refcount_dec_if_one);
|
||||||
|
|
||||||
|
@ -275,9 +263,9 @@ EXPORT_SYMBOL_GPL(refcount_dec_if_one);
|
||||||
*/
|
*/
|
||||||
bool refcount_dec_not_one(refcount_t *r)
|
bool refcount_dec_not_one(refcount_t *r)
|
||||||
{
|
{
|
||||||
unsigned int old, new, val = atomic_read(&r->refs);
|
unsigned int new, val = atomic_read(&r->refs);
|
||||||
|
|
||||||
for (;;) {
|
do {
|
||||||
if (unlikely(val == UINT_MAX))
|
if (unlikely(val == UINT_MAX))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
@ -290,12 +278,7 @@ bool refcount_dec_not_one(refcount_t *r)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
old = atomic_cmpxchg_release(&r->refs, val, new);
|
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
|
||||||
if (old == val)
|
|
||||||
break;
|
|
||||||
|
|
||||||
val = old;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue