1 /* 2 * Copyright (C) 2016-2017 Andes Technology, Inc. 3 * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. 4 */ 5 6 #ifndef _NDS32_BITS_ATOMIC_H 7 #define _NDS32_BITS_ATOMIC_H 8 9 #include <stdint.h> 10 11 typedef int8_t atomic8_t; 12 typedef uint8_t uatomic8_t; 13 typedef int_fast8_t atomic_fast8_t; 14 typedef uint_fast8_t uatomic_fast8_t; 15 16 typedef int16_t atomic16_t; 17 typedef uint16_t uatomic16_t; 18 typedef int_fast16_t atomic_fast16_t; 19 typedef uint_fast16_t uatomic_fast16_t; 20 21 typedef int32_t atomic32_t; 22 typedef uint32_t uatomic32_t; 23 typedef int_fast32_t atomic_fast32_t; 24 typedef uint_fast32_t uatomic_fast32_t; 25 26 typedef int64_t atomic64_t; 27 typedef uint64_t uatomic64_t; 28 typedef int_fast64_t atomic_fast64_t; 29 typedef uint_fast64_t uatomic_fast64_t; 30 31 typedef intptr_t atomicptr_t; 32 typedef uintptr_t uatomicptr_t; 33 typedef intmax_t atomic_max_t; 34 typedef uintmax_t uatomic_max_t; 35 36 37 #ifndef atomic_full_barrier 38 # define atomic_full_barrier() __asm__ ("dsb" ::: "memory") 39 #endif 40 41 #ifndef atomic_read_barrier 42 # define atomic_read_barrier() atomic_full_barrier () 43 #endif 44 45 #ifndef atomic_write_barrier 46 # define atomic_write_barrier() atomic_full_barrier () 47 #endif 48 49 #define atomic_exchange_acq(mem, newval) \ 50 ({ unsigned long val, offset, temp; \ 51 \ 52 __asm__ volatile ( \ 53 "move %2, %4\n\t" \ 54 "move %1, #0x0\n\t" \ 55 "1:\n\t" \ 56 "llw %0, [%3 + %1 << 0]\n\t" \ 57 "move %2, %4\n\t" \ 58 "scw %2, [%3 + %1 << 0]\n\t" \ 59 "beqz %2, 1b\n\t" \ 60 : "=&r" (val), "=&r" (offset), "=&r" (temp) \ 61 : "r" (mem), "r" (newval) \ 62 : "memory" ); \ 63 val; }) 64 65 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ 66 ({ unsigned long val, offset, temp; \ 67 \ 68 __asm__ volatile ( \ 69 "move %1, #0x0\n\t" \ 70 "move %2, %4\n\t" \ 71 "1:\n\t" \ 72 "llw %0, [%3 + %1 << 0]\n\t" \ 73 "bne %0, %5, 2f\n\t" \ 74 "move %2, %4\n\t" \ 75 "scw %2, [%3 + %1 << 0]\n\t" \ 76 "beqz %2, 1b\n\t" \ 77 "j 3f\n\t" \ 78 "2:\n\t" \ 79 "move %2, %0\n\t" \ 80 "scw %2, [%3 + %1 << 0]\n\t" \ 81 "3:\n\t" \ 82 : "=&r" (val), "=&r" (offset), "=&r" (temp) \ 83 : "r" (mem), "r" (newval), "r" (oldval) \ 84 : "memory" ); \ 85 val; }) 86 87 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ 88 ({ unsigned long val, offset, temp; \ 89 \ 90 __asm__ volatile ( \ 91 "move %1, #0x0\n\t" \ 92 "move %2, %4\n\t" \ 93 "1:\n\t" \ 94 "llw %0, [%3 + %1 << 0]\n\t" \ 95 "bne %5, %0, 2f\n\t" \ 96 "move %2, %4\n\t" \ 97 "scw %2, [%3 + %1 << 0]\n\t" \ 98 "beqz %2, 1b\n\t" \ 99 "addi %0, %1, #0\n\t" \ 100 "j 3f\n\t" \ 101 "2:\n\t" \ 102 "scw %0, [%3 + %1 << 0]\n\t" \ 103 "addi %0, %1, #0x1\n\t" \ 104 "3:\n\t" \ 105 : "=&r" (val), "=&r" (offset), "=&r" (temp) \ 106 : "r" (mem), "r" (newval), "r" (oldval) \ 107 : "memory" ); \ 108 val; }) 109 110 #endif 111