MINIX Kernel Documentation
Loading...
Searching...
No Matches
k_spinlock.h
Go to the documentation of this file.
1
13#ifndef K_SPINLOCK_H
14#define K_SPINLOCK_H
15
16#include <minix/sys_config.h> /* For potential CONFIG_SMP or other system configs */
17
18/* Include arch-specific definitions, e.g., for arch_pause() */
19#if defined(__i386__) || defined(__x86_64__)
20#include "arch/i386/include/arch_cpu.h" // Provides arch_pause for x86
21#else
29static inline void arch_pause(void) { /* No-op */ }
30#endif
31
41#define MAX_SPIN_THRESHOLD 100000
42
43#ifndef KERNEL_YIELD_DEFINED
44#define KERNEL_YIELD_DEFINED
57static inline void kernel_yield(void) {
58 /* Placeholder for actual yield. On x86, 'rep nop' is sometimes used
59 * as a more potent pause than single 'pause', or actual scheduler yield.
60 * For now, this can be a kprintf_stub for debugging or just a comment.
61 * A true yield would involve scheduler interaction.
62 */
63 // kprintf_stub("kernel_yield() called (stub)\n"); // Uncomment for debugging
64 // yield calls
65 arch_pause(); // At least do an arch_pause if yielding fully is complex.
66}
67#endif
68
77typedef struct {
86 volatile int locked;
88 unsigned long acquisitions;
91 unsigned long contentions;
92 /* Future: unsigned long total_spin_cycles; // Could be added with more
93 * advanced cycle counting */
95
104static inline void simple_spin_init(simple_spinlock_t *lock) {
105 // Initialize the lock state to 0 (unlocked).
106 lock->locked = 0;
107 // Initialize statistics.
108 lock->acquisitions = 0;
109 lock->contentions = 0;
110}
111
123static inline void simple_spin_lock(simple_spinlock_t *lock) {
124 // Attempt to acquire the lock immediately using atomic test-and-set.
125 // If __sync_lock_test_and_set returns 0, the lock was acquired successfully
126 // (it was 0 and is now 1).
127 if (__sync_lock_test_and_set(&lock->locked, 1) == 0) {
128 lock->acquisitions++; // Successfully acquired on the first try.
129 return; // Lock acquired, no contention.
130 }
131
132 // If the first attempt failed, the lock was already held. This is a
133 // contention.
134 lock->contentions++;
135 int spin_count = 0; // Initialize spin counter for this contention episode.
136
137 // Loop indefinitely, spinning and re-attempting to acquire the lock.
138 while (1) {
139 // Inner busy-wait loop: Spin while the lock is held by someone else.
140 // This inner read loop (checking lock->locked directly) can be slightly
141 // more efficient on some architectures than repeatedly executing the atomic
142 // __sync_lock_test_and_set, as it might reduce bus contention.
143 while (lock->locked != 0) {
144 /* arch_pause() provides a hint to the CPU that this is a spin-wait loop.
145 * On x86, this is the "pause" instruction, which can improve performance
146 * and reduce power consumption during the spin, especially on
147 * hyper-threaded processors by yielding execution resources.
148 */
149 arch_pause();
150
151 spin_count++; // Increment spin counter.
152 if (spin_count > MAX_SPIN_THRESHOLD) {
153 /* If we've spun too many times, call kernel_yield().
154 * This is to prevent CPU monopolization on highly contended locks
155 * by allowing other threads/processes to run.
156 * The actual behavior of kernel_yield() depends on its implementation
157 * (e.g., true scheduler yield or just a more potent pause).
158 */
159 kernel_yield();
160 spin_count = 0; // Reset counter after yielding.
161 }
162 }
163
164 // After observing lock->locked == 0 in the inner loop,
165 // attempt to acquire the lock again using atomic test-and-set.
166 if (__sync_lock_test_and_set(&lock->locked, 1) == 0) {
167 lock->acquisitions++; // Lock acquired after spinning.
168 return; // Exit the function, lock is now held.
169 }
170 // If __sync_lock_test_and_set still returned non-zero, it means another
171 // CPU/thread acquired the lock between our read of lock->locked and our TAS
172 // attempt (a race). In this case, the outer while(1) loop continues, and we
173 // re-enter the inner spin.
174 }
175}
176
184static inline void simple_spin_unlock(simple_spinlock_t *lock) {
185 /* Atomically set lock->locked to 0 (unlocked).
186 * __sync_lock_release provides a release memory barrier. This ensures that
187 * all memory writes made by this thread within the critical section (before
188 * this unlock) are visible to other CPUs before the lock is actually
189 * released.
190 */
191 __sync_lock_release(&lock->locked);
192}
193
194#endif /* K_SPINLOCK_H */
static void kernel_yield(void)
Yields the CPU, typically to the scheduler. (Stub Implementation)
Definition k_spinlock.h:57
static void simple_spin_init(simple_spinlock_t *lock)
Initializes a spinlock to the unlocked state and resets statistics.
Definition k_spinlock.h:104
static void arch_pause(void)
Placeholder for arch_pause on non-x86 architectures.
Definition k_spinlock.h:29
static void simple_spin_lock(simple_spinlock_t *lock)
Acquires a spinlock, busy-waiting if necessary.
Definition k_spinlock.h:123
#define MAX_SPIN_THRESHOLD
Maximum number of spin iterations before attempting to yield.
Definition k_spinlock.h:41
static void simple_spin_unlock(simple_spinlock_t *lock)
Releases a previously acquired spinlock.
Definition k_spinlock.h:184
Structure representing a simple spinlock.
Definition k_spinlock.h:77
unsigned long contentions
Number of times a thread tried to acquire the lock but found it already held, thus entering a spin-wa...
Definition k_spinlock.h:91
unsigned long acquisitions
Number of times the lock was successfully acquired.
Definition k_spinlock.h:88
volatile int locked
The lock state. 0 for unlocked, 1 for locked.
Definition k_spinlock.h:86