mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
Many subsystems will not use refcount_t unless there is a way to build the kernel so that there is no regression in speed compared to atomic_t. This adds CONFIG_REFCOUNT_FULL to enable the full refcount_t implementation which has the validation but is slightly slower. When not enabled, refcount_t uses the basic unchecked atomic_t routines, which results in no code changes compared to just using atomic_t directly. Signed-off-by: Kees Cook <keescook@chromium.org> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Hellwig <hch@infradead.org> Cc: David S. Miller <davem@davemloft.net> Cc: David Windsor <dwindsor@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Elena Reshetova <elena.reshetova@intel.com> Cc: Eric Biggers <ebiggers3@gmail.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Hans Liljestrand <ishkamiel@gmail.com> Cc: James Bottomley <James.Bottomley@hansenpartnership.com> Cc: Jann Horn <jannh@google.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Serge E. Hallyn <serge@hallyn.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: arozansk@redhat.com Cc: axboe@kernel.dk Cc: linux-arch <linux-arch@vger.kernel.org> Link: http://lkml.kernel.org/r/20170621200026.GA115679@beast Signed-off-by: Ingo Molnar <mingo@kernel.org>
103 lines
2.6 KiB
C
103 lines
2.6 KiB
C
#ifndef _LINUX_REFCOUNT_H
|
|
#define _LINUX_REFCOUNT_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/kernel.h>
|
|
|
|
/**
|
|
* refcount_t - variant of atomic_t specialized for reference counts
|
|
* @refs: atomic_t counter field
|
|
*
|
|
* The counter saturates at UINT_MAX and will not move once
|
|
* there. This avoids wrapping the counter and causing 'spurious'
|
|
* use-after-free bugs.
|
|
*/
|
|
typedef struct refcount_struct {
|
|
atomic_t refs;
|
|
} refcount_t;
|
|
|
|
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
|
|
|
|
/**
|
|
* refcount_set - set a refcount's value
|
|
* @r: the refcount
|
|
* @n: value to which the refcount will be set
|
|
*/
|
|
static inline void refcount_set(refcount_t *r, unsigned int n)
|
|
{
|
|
atomic_set(&r->refs, n);
|
|
}
|
|
|
|
/**
|
|
* refcount_read - get a refcount's value
|
|
* @r: the refcount
|
|
*
|
|
* Return: the refcount's value
|
|
*/
|
|
static inline unsigned int refcount_read(const refcount_t *r)
|
|
{
|
|
return atomic_read(&r->refs);
|
|
}
|
|
|
|
#ifdef CONFIG_REFCOUNT_FULL
|
|
extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
|
|
extern void refcount_add(unsigned int i, refcount_t *r);
|
|
|
|
extern __must_check bool refcount_inc_not_zero(refcount_t *r);
|
|
extern void refcount_inc(refcount_t *r);
|
|
|
|
extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
|
|
extern void refcount_sub(unsigned int i, refcount_t *r);
|
|
|
|
extern __must_check bool refcount_dec_and_test(refcount_t *r);
|
|
extern void refcount_dec(refcount_t *r);
|
|
#else
|
|
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
|
|
{
|
|
return atomic_add_unless(&r->refs, i, 0);
|
|
}
|
|
|
|
static inline void refcount_add(unsigned int i, refcount_t *r)
|
|
{
|
|
atomic_add(i, &r->refs);
|
|
}
|
|
|
|
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
|
|
{
|
|
return atomic_add_unless(&r->refs, 1, 0);
|
|
}
|
|
|
|
static inline void refcount_inc(refcount_t *r)
|
|
{
|
|
atomic_inc(&r->refs);
|
|
}
|
|
|
|
static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
|
|
{
|
|
return atomic_sub_and_test(i, &r->refs);
|
|
}
|
|
|
|
static inline void refcount_sub(unsigned int i, refcount_t *r)
|
|
{
|
|
atomic_sub(i, &r->refs);
|
|
}
|
|
|
|
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
|
|
{
|
|
return atomic_dec_and_test(&r->refs);
|
|
}
|
|
|
|
static inline void refcount_dec(refcount_t *r)
|
|
{
|
|
atomic_dec(&r->refs);
|
|
}
|
|
#endif /* CONFIG_REFCOUNT_FULL */
|
|
|
|
extern __must_check bool refcount_dec_if_one(refcount_t *r);
|
|
extern __must_check bool refcount_dec_not_one(refcount_t *r);
|
|
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
|
|
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
|
|
|
|
#endif /* _LINUX_REFCOUNT_H */
|