/* * Based on arch/arm64/include/asm/atomic.h * which in turn is * Based on arch/arm/include/asm/atomic.h * * Copyright (C) 1996 Russell King. * Copyright (C) 2002 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #ifndef __ARCH_ARM_ARM64_ATOMIC #define __ARCH_ARM_ARM64_ATOMIC /* * AArch64 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop * to ensure that the update happens. */ static inline void atomic_add(int i, atomic_t *v) { unsigned long tmp; int result; asm volatile("// atomic_add\n" "1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i)); } static inline int atomic_add_return(int i, atomic_t *v) { unsigned long tmp; int result; asm volatile("// atomic_add_return\n" "1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) : "memory"); smp_mb(); return result; } static inline void atomic_sub(int i, atomic_t *v) { unsigned long tmp; int result; asm volatile("// atomic_sub\n" "1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i)); } static inline int atomic_sub_return(int i, atomic_t *v) { unsigned long tmp; int result; asm volatile("// atomic_sub_return\n" "1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "Ir" (i) : "memory"); smp_mb(); return result; } static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { unsigned long tmp; int oldval; smp_mb(); asm volatile("// atomic_cmpxchg\n" "1: ldxr %w1, %2\n" " cmp %w1, %w3\n" " b.ne 2f\n" " stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) : "cc"); smp_mb(); return oldval; } static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) c = old; return c; } #endif /* * Local variables: * mode: C * c-file-style: "BSD" * c-basic-offset: 8 * indent-tabs-mode: t * End: */