1 // Copyright 2016 The Fuchsia Authors 2 // Copyright (c) 2015 Travis Geiselbrecht 3 // 4 // Use of this source code is governed by a MIT-style 5 // license that can be found in the LICENSE file or at 6 // https://opensource.org/licenses/MIT 7 8 #pragma once 9 10 #include <arch/x86.h> 11 #include <arch/x86/mp.h> 12 #include <kernel/atomic.h> 13 #include <stdbool.h> 14 #include <zircon/compiler.h> 15 #include <zircon/thread_annotations.h> 16 17 __BEGIN_CDECLS 18 19 #define SPIN_LOCK_INITIAL_VALUE (spin_lock_t){0} 20 21 typedef struct TA_CAP("mutex") spin_lock { 22 unsigned long value; 23 } spin_lock_t; 24 25 typedef x86_flags_t spin_lock_saved_state_t; 26 typedef uint spin_lock_save_flags_t; 27 28 void arch_spin_lock(spin_lock_t *lock) TA_ACQ(lock); 29 int arch_spin_trylock(spin_lock_t *lock) TA_TRY_ACQ(false, lock); 30 void arch_spin_unlock(spin_lock_t *lock) TA_REL(lock); 31 arch_spin_lock_init(spin_lock_t * lock)32static inline void arch_spin_lock_init(spin_lock_t *lock) 33 { 34 *lock = SPIN_LOCK_INITIAL_VALUE; 35 } 36 arch_spin_lock_holder_cpu(spin_lock_t * lock)37static inline uint arch_spin_lock_holder_cpu(spin_lock_t *lock) 38 { 39 return (uint)__atomic_load_n(&lock->value, __ATOMIC_RELAXED) - 1; 40 } 41 arch_spin_lock_held(spin_lock_t * lock)42static inline bool arch_spin_lock_held(spin_lock_t *lock) 43 { 44 return arch_spin_lock_holder_cpu(lock) == arch_curr_cpu_num(); 45 } 46 47 /* flags are unused on x86 */ 48 #define ARCH_DEFAULT_SPIN_LOCK_FLAG_INTERRUPTS 0 49 50 static inline void arch_interrupt_save(spin_lock_saved_state_t * statep,spin_lock_save_flags_t flags)51arch_interrupt_save(spin_lock_saved_state_t *statep, spin_lock_save_flags_t flags) 52 { 53 *statep = x86_save_flags(); 54 __asm__ volatile("cli"); 55 atomic_signal_fence(); 56 } 57 58 static inline void arch_interrupt_restore(spin_lock_saved_state_t old_state,spin_lock_save_flags_t flags)59arch_interrupt_restore(spin_lock_saved_state_t old_state, spin_lock_save_flags_t flags) 60 { 61 x86_restore_flags(old_state); 62 } 63 64 __END_CDECLS 65