Improve spinlock implementation for ARMv8.0
Current implementation of spinlocks using C11 atomics compiles into an
LDAXR/STXR pair which we observe does not always make forward progress
on Cortex A72 (concurrent_save_restore test livelocks).
This patch inserts a WFE instruction when the LDAXR returns that the
lock is currently taken, pausing the thread until an event is triggered
and easing off the contention. This is observed to help forward progress
on A72 and should also improve power consumption of our spinlocks.
Note that there is still no guarantee of forward progress and threads
can be shown to livelock on the A72 if the loops are extremely tight.
This should be revisited in the future, possibly by leveraging ARMv8.1
LSE atomics.
Test: vcpu_state.concurrent_save_restore
Bug: 141087046
Change-Id: Ic838ba4d287ef4bfecc7dd84c883770d9c445b1d
diff --git a/inc/hf/spinlock.h b/inc/hf/spinlock.h
index dbd7890..80d1cfc 100644
--- a/inc/hf/spinlock.h
+++ b/inc/hf/spinlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 The Hafnium Authors.
+ * Copyright 2019 The Hafnium Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,27 +16,18 @@
#pragma once
-#include <stdatomic.h>
-
-struct spinlock {
- atomic_flag v;
-};
-
-#define SPINLOCK_INIT \
- { \
- .v = ATOMIC_FLAG_INIT \
- }
+/*
+ * Includes the arch-specific definition of 'struct spinlock' and
+ * implementations of:
+ * - SPINLOCK_INIT
+ * - sl_lock()
+ * - sl_unlock()
+ */
+#include "hf/arch/spinlock.h"
static inline void sl_init(struct spinlock *l)
{
- *l = (struct spinlock)SPINLOCK_INIT;
-}
-
-static inline void sl_lock(struct spinlock *l)
-{
- while (atomic_flag_test_and_set_explicit(&l->v, memory_order_acquire)) {
- /* do nothing */
- }
+ *l = SPINLOCK_INIT;
}
/**
@@ -53,8 +44,3 @@
sl_lock(a);
}
}
-
-static inline void sl_unlock(struct spinlock *l)
-{
- atomic_flag_clear_explicit(&l->v, memory_order_release);
-}