R/W lock impl
COPYRIGHT NOTICE: text and code copied or derived from git commit message, mailinglist, or codebase are subject to the linux kernel licensing rules
documentations:
Lock types and their rules
https://docs.kernel.org/locking/locktypes.html
NOTE: this is based on the code reading for spinlocks
rwlock_t
- Non-
PREEMPT_RT
kernels - impl
rwlock_t
as spinning lock and the suffix rules ofspinlock_t
apply accordingly. This implementation is fair, no writer starvation.
PREMMPT_RT
kernels- map
rwlock_t
to art_mutex
based impl.
- all
spinlock_t
changes also apply torwlock_t
- writer cannot grant its priority to multiple readers. a preempted low-priority reader will continue holding the lock, thus starving even high-priority writers. In contrast, because readers can grant their priority to a writer, a preempted low-priority writer will have its priority boosted until it releases the lock, thus preventing that writer from starving the readers.
NON PREEMPT_RT (spinlock based)
typedef struct {
arch_rwlock_t raw_lock;
} rwlock_t;
// x86
// arch/x86/include/asm/spinlock_types.h
// -> include/asm-generic/qrwlock_types.h
- the qrwlock is fair, queued rwlock, introduced @
70af2f8a
- the
rwlock
uses thearch_spinlock_t
as a waitqueue and assuming thearch_spin_lock_t
is a fair lock (ticket, mcs etc..), the resultingrwlock
is a fair lock.
/*
* The queued read/write lock data structure
*/
typedef struct qrwlock {
union {
atomic_t cnts;
struct { /* for big endian the fields order is swapped*/
u8 wlocked; /* Locked for write? */
u8 __lstate[3];
};
};
arch_spinlock_t wait_lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \
{ .cnts = ATOMIC_INIT(0), }, \
.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
}
PREEMPT_RT (rt_mutex based)
// include/linux/rwbase_rt.h
struct rwbase_rt {
atomic_t readers;
struct rt_mutex_base rtmutex;
};
// include/linux/rwlock_types.h
typedef struct {
struct rwbase_rt rwbase;
atomic_t readers;
} rwlock_t;