Skip to content

Commit

Permalink
spinlock: Implement reader/writer locking, remove deadlock recovery
Browse files Browse the repository at this point in the history
- Implement spin_read_lock(), spin_read_unlock() etc, so that multiple reader threads may enter a critical section simultaneously
- Optimize waiters check in spin_unlock()
- Print a debug report and crash on invalid lock/unlock usage
- Upon detecting a possible deadlock, simply print a debug report - deadlock recovery may be abused and turned into a race condition
- Extend deadlock detection timeout to 10 seconds
- Optimize short spin loop in spin_lock_wait() by issuing a `pause` instruction on x86_64, and `isb sy` on arm64
  • Loading branch information
LekKit committed Nov 18, 2024
1 parent 7875e53 commit 3824e60
Show file tree
Hide file tree
Showing 2 changed files with 206 additions and 61 deletions.
157 changes: 122 additions & 35 deletions src/spinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,18 @@ file, You can obtain one at https://mozilla.org/MPL/2.0/.
#include "rvtimer.h"
#include "utils.h"

// Maximum allowed lock time, warns and recovers the lock upon expiration
#define SPINLOCK_MAX_MS 5000
// Maximum allowed bounded locking time, reports a deadlock upon expiration
#define SPINLOCK_MAX_MS 10000

// Attemts to claim the lock before blocking in the kernel
#define SPINLOCK_RETRIES 60
#define SPINLOCK_RETRIES 40

static cond_var_t* global_cond = NULL;

static void spin_deinit(void)
{
cond_var_t* cond = global_cond;
condvar_free(global_cond);
global_cond = NULL;
// Make sure no use-after-free happens on running threads
atomic_fence();
condvar_free(cond);
}

static void spin_global_init(void)
Expand All @@ -38,7 +35,7 @@ static void spin_global_init(void)
});
}

static void spin_lock_debug_report(spinlock_t* lock)
NOINLINE static void spin_lock_debug_report(spinlock_t* lock, bool crash)
{
#ifdef USE_SPINLOCK_DEBUG
rvvm_warn("The lock was previously held at %s", lock->location);
Expand All @@ -49,58 +46,148 @@ static void spin_lock_debug_report(spinlock_t* lock)
rvvm_warn("Version: RVVM v"RVVM_VERSION);
#endif
stacktrace_print();
if (crash) {
rvvm_fatal("Locking issue detected!");
}
}

slow_path void spin_lock_wait(spinlock_t* lock, const char* location)
static inline bool spin_flag_has_writer(uint32_t flag)
{
return !!(flag & 0x1U);
}

static inline bool spin_flag_has_readers(uint32_t flag)
{
return !!(flag & ~0x80000001U);
}

static inline bool spin_flag_has_waiters(uint32_t flag)
{
return !!(flag & 0x80000000U);
}

static inline bool spin_flag_writer_welcome(uint32_t flag)
{
return !flag;
}

static inline bool spin_flag_readers_welcome(uint32_t flag)
{
return !(flag & 0x80000001U);
}

static inline bool spin_lock_possibly_available(uint32_t flag, bool writer)
{
if (writer) {
return spin_flag_writer_welcome(flag);
} else {
return spin_flag_readers_welcome(flag);
}
}

static inline bool spin_try_claim_internal(spinlock_t* lock, const char* location, bool writer)
{
if (writer) {
return spin_try_lock_internal(lock, location);
} else {
return spin_try_read_lock(lock);
}
}

slow_path static void spin_lock_wait_internal(spinlock_t* lock, const char* location, bool writer)
{
for (size_t i = 0; i < SPINLOCK_RETRIES; ++i) {
// Read lock flag until there's any chance to grab it
// Improves performance due to cacheline bouncing elimination
if (atomic_load_uint32_ex(&lock->flag, ATOMIC_ACQUIRE) == 0) {
if (spin_try_lock_real(lock, location)) {
uint32_t flag = atomic_load_uint32_ex(&lock->flag, ATOMIC_RELAXED);
if (spin_lock_possibly_available(flag, writer)) {
if (spin_try_claim_internal(lock, location, writer)) {
// Succesfully claimed the lock
return;
}
// Contention is going on, fallback to kernel wait
break;
}
#if defined(GNU_EXTS) && defined(__x86_64__)
__asm__ volatile ("pause");
#elif defined(GNU_EXTS) && defined(__aarch64__)
__asm__ volatile ("isb sy");
#endif
}

spin_global_init();

rvtimer_t timer = {0};
rvtimer_init(&timer, 1000);
do {
uint32_t flag = atomic_load_uint32_ex(&lock->flag, ATOMIC_ACQUIRE);
if (flag == 0 && spin_try_lock_real(lock, location)) {
// Succesfully grabbed the lock
return;
rvtimer_t deadlock_timer = {0};
bool reset_deadlock_timer = true;
while (true) {
uint32_t flag = atomic_load_uint32_ex(&lock->flag, ATOMIC_RELAXED);
if (spin_lock_possibly_available(flag, writer)) {
if (spin_try_claim_internal(lock, location, writer)) {
// Succesfully claimed the lock
return;
}
// Contention is going on, retry
reset_deadlock_timer = true;
} else {
// Indicate that we're waiting on this lock
if (spin_flag_has_waiters(flag) || atomic_cas_uint32(&lock->flag, flag, flag | 0x80000000U)) {
// Wait till wakeup from lock owner
if (condvar_wait(global_cond, 10) || !spin_flag_has_waiters(flag)) {
// Reset deadlock timer upon noticing any forward progress
reset_deadlock_timer = true;
}
}
}
// Someone else grabbed the lock, indicate that we are still waiting
if (flag != 2 && !atomic_cas_uint32(&lock->flag, 1, 2)) {
// Failed to indicate lock as waiting, retry grabbing
continue;
if (reset_deadlock_timer) {
rvtimer_init(&deadlock_timer, 1000);
reset_deadlock_timer = false;
}
// Wait upon wakeup from lock owner
bool woken = condvar_wait(global_cond, 10);
if (woken || flag != 2) {
// Reset deadlock timer upon noticing any forward progress
rvtimer_init(&timer, 1000);
if (location && rvtimer_get(&deadlock_timer) > SPINLOCK_MAX_MS) {
rvvm_warn("Possible %sdeadlock at %s", writer ? "" : "reader ", location);
spin_lock_debug_report(lock, false);
reset_deadlock_timer = true;
}
} while (location == NULL || rvtimer_get(&timer) < SPINLOCK_MAX_MS);
}
}

rvvm_warn("Possible deadlock at %s", location);
slow_path void spin_lock_wait(spinlock_t* lock, const char* location)
{
spin_lock_wait_internal(lock, location, true);
}

spin_lock_debug_report(lock);

rvvm_warn("Attempting to recover execution...\n * * * * * * *\n");
slow_path void spin_read_lock_wait(spinlock_t* lock, const char* location)
{
spin_lock_wait_internal(lock, location, false);
}

slow_path void spin_lock_wake(spinlock_t* lock, uint32_t prev)
{
if (prev > 1) {
if (spin_flag_has_readers(prev)) {
rvvm_warn("Mismatched unlock of a reader lock!");
spin_lock_debug_report(lock, true);
} else if (!spin_flag_has_writer(prev)) {
rvvm_warn("Unlock of a non-locked writer lock!");
spin_lock_debug_report(lock, true);
} else if (spin_flag_has_waiters(prev)) {
// Wake all readers or waiter
spin_global_init();
condvar_wake_all(global_cond);
}
}

slow_path void spin_read_lock_wake(spinlock_t* lock, uint32_t prev)
{
if (spin_flag_has_writer(prev)) {
rvvm_warn("Mismatched unlock of a writer lock!");
spin_lock_debug_report(lock, true);
} else if (!spin_flag_has_readers(prev)) {
rvvm_warn("Unlock of a non-locked reader lock!");
spin_lock_debug_report(lock, true);
} else if (spin_flag_has_waiters(prev)) {
// Wake writer
atomic_and_uint32(&lock->flag, ~0x80000000U);
spin_global_init();
condvar_wake_all(global_cond);
} else {
rvvm_warn("Unlock of a non-locked lock!");
spin_lock_debug_report(lock);
}
}
110 changes: 84 additions & 26 deletions src/spinlock.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
spinlock.h - Hybrid Spinlock
spinlock.h - Fast hybrid reader/writer lock
Copyright (C) 2021 LekKit <github.com/LekKit>
This Source Code Form is subject to the terms of the Mozilla Public
Expand All @@ -11,21 +11,38 @@ file, You can obtain one at https://mozilla.org/MPL/2.0/.
#define RVVM_SPINLOCK_H

#include <stddef.h>
#include "compiler.h"
#include "atomics.h"

/*
* Lock flags meaning:
* 0x00000001: Writer holds the lock
* 0x7FFFFFFE: Reader count
* 0x80000000: There are waiters
*/

// Static initialization
#define SPINLOCK_INIT {0}

#ifdef USE_SPINLOCK_DEBUG
#define SPINLOCK_DEBUG_LOCATION SOURCE_LINE
#else
#define SPINLOCK_DEBUG_LOCATION NULL
#endif

typedef struct {
uint32_t flag;
#ifdef USE_SPINLOCK_DEBUG
const char* location;
#endif
} spinlock_t;

// Internal locking operations
// Slow path for kernel wait/wake on contended lock
slow_path void spin_lock_wait(spinlock_t* lock, const char* location);
slow_path void spin_lock_wake(spinlock_t* lock, uint32_t prev);

// Static initialization
#define SPINLOCK_INIT {0}
slow_path void spin_read_lock_wait(spinlock_t* lock, const char* location);
slow_path void spin_read_lock_wake(spinlock_t* lock, uint32_t prev);

// Initialize a lock
static inline void spin_init(spinlock_t* lock)
Expand All @@ -36,10 +53,13 @@ static inline void spin_init(spinlock_t* lock)
#endif
}

// Try to claim the lock, returns true on success
static forceinline bool spin_try_lock_real(spinlock_t* lock, const char* location)
/*
* Writer locking
*/

static forceinline bool spin_try_lock_internal(spinlock_t* lock, const char* location)
{
bool ret = atomic_cas_uint32_ex(&lock->flag, 0, 1, false, ATOMIC_ACQUIRE, ATOMIC_ACQUIRE);
bool ret = atomic_cas_uint32_ex(&lock->flag, 0x0, 0x1, false, ATOMIC_ACQUIRE, ATOMIC_RELAXED);
#ifdef USE_SPINLOCK_DEBUG
if (likely(ret)) lock->location = location;
#else
Expand All @@ -48,40 +68,78 @@ static forceinline bool spin_try_lock_real(spinlock_t* lock, const char* locatio
return ret;
}

// Perform locking on small critical section
// Reports a deadlock upon waiting for too long
static forceinline void spin_lock_real(spinlock_t* lock, const char* location)
static forceinline void spin_lock_internal(spinlock_t* lock, const char* location)
{
if (unlikely(!spin_try_lock_real(lock, location))) {
if (unlikely(!spin_try_lock_internal(lock, location))) {
spin_lock_wait(lock, location);
}
}

// Perform locking around heavy operation, wait indefinitely
static forceinline void spin_lock_slow_real(spinlock_t* lock, const char* location)
static forceinline void spin_lock_slow_internal(spinlock_t* lock, const char* location)
{
if (unlikely(!spin_try_lock_real(lock, location))) {
if (unlikely(!spin_try_lock_internal(lock, location))) {
spin_lock_wait(lock, NULL);
}
}

#ifdef USE_SPINLOCK_DEBUG
#define spin_try_lock(lock) spin_try_lock_real(lock, SOURCE_LINE)
#define spin_lock(lock) spin_lock_real(lock, SOURCE_LINE)
#define spin_lock_slow(lock) spin_lock_slow_real(lock, SOURCE_LINE)
#else
#define spin_try_lock(lock) spin_try_lock_real(lock, NULL)
#define spin_lock(lock) spin_lock_real(lock, NULL)
#define spin_lock_slow(lock) spin_lock_slow_real(lock, NULL)
#endif
// Try to claim the writer lock
#define spin_try_lock(lock) spin_try_lock_internal(lock, SPINLOCK_DEBUG_LOCATION)

// Perform writer locking on small, bounded critical section
// Reports a deadlock upon waiting for too long
#define spin_lock(lock) spin_lock_internal(lock, SPINLOCK_DEBUG_LOCATION)

// Perform writer locking around heavy operation, wait indefinitely
#define spin_lock_slow(lock) spin_lock_slow_internal(lock, SPINLOCK_DEBUG_LOCATION)

// Release the lock
// Release the writer lock
static forceinline void spin_unlock(spinlock_t* lock)
{
uint32_t prev = atomic_swap_uint32_ex(&lock->flag, 0, ATOMIC_RELEASE);
if (unlikely(prev != 1)) {
uint32_t prev = atomic_swap_uint32_ex(&lock->flag, 0x0, ATOMIC_RELEASE);
if (unlikely(((int32_t)prev) <= 0)) {
spin_lock_wake(lock, prev);
}
}

/*
* Reader locking
*/

// Try to claim the reader lock
static forceinline bool spin_try_read_lock(spinlock_t* lock)
{
uint32_t prev = 0;
do {
prev = atomic_load_uint32_ex(&lock->flag, ATOMIC_RELAXED);
if (unlikely((prev & 0x1) || ((int32_t)prev) < 0)) {
// Writer owns the lock, writer waiters are present or too much readers (sic!)
return false;
}
} while (unlikely(!atomic_cas_uint32_ex(&lock->flag, prev, prev + 2, true, ATOMIC_ACQUIRE, ATOMIC_RELAXED)));
return true;
}

static forceinline void spin_read_lock_internal(spinlock_t* lock, const char* location)
{
if (unlikely(!spin_try_read_lock(lock))) {
spin_read_lock_wait(lock, location);
}
}

// Perform reader locking on small, bounded critical section
// Reports a deadlock upon waiting for too long
#define spin_read_lock(lock) spin_read_lock_internal(lock, SPINLOCK_DEBUG_LOCATION)

// Perform reader locking around heavy operation, wait indefinitely
#define spin_read_lock_slow(lock) spin_read_lock_internal(lock, NULL)

// Release the reader lock
static forceinline void spin_read_unlock(spinlock_t* lock)
{
uint32_t prev = atomic_sub_uint32_ex(&lock->flag, 0x2, ATOMIC_RELEASE);
if (unlikely(((int32_t)prev) <= 0)) {
spin_read_lock_wake(lock, prev);
}
}

#endif

0 comments on commit 3824e60

Please sign in to comment.