| /tools/memory-model/litmus-tests/ |
| A D | MP+pooncerelease+poacquireonce.litmus | 6 * This litmus test demonstrates that smp_store_release() and 16 smp_store_release(flag, 1);
|
| A D | WRC+pooncerelease+fencermbonceonce+Once.litmus | 9 * specifically, this litmus test is forbidden because smp_store_release() 25 smp_store_release(y, 1);
|
| A D | ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus | 19 smp_store_release(y, 1); 27 smp_store_release(z, 1);
|
| A D | Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus | 22 smp_store_release(y, 1); 30 smp_store_release(z, 1);
|
| A D | LB+poacquireonce+pooncerelease.litmus | 18 smp_store_release(y, 1);
|
| A D | S+poonceonces.litmus | 7 * first store against P1()'s final load, if the smp_store_release()
|
| A D | MP+fencewmbonceonce+fencermbonceonce.litmus | 8 * is usually better to use smp_store_release() and smp_load_acquire().
|
| A D | ISA2+poonceonces.litmus | 8 * smp_store_release() invocations are replaced by WRITE_ONCE() and all
|
| A D | dep+plain.litmus | 28 smp_store_release(x, r);
|
| A D | MP+polocks.litmus | 7 * stand in for smp_load_acquire() and smp_store_release(), respectively.
|
| A D | MP+porevlocks.litmus | 7 * stand in for smp_load_acquire() and smp_store_release(), respectively.
|
| /tools/include/asm/ |
| A D | barrier.h | 50 #ifndef smp_store_release 51 # define smp_store_release(p, v) \ macro
|
| /tools/arch/s390/include/asm/ |
| A D | barrier.h | 31 #define smp_store_release(p, v) \ macro
|
| /tools/arch/powerpc/include/asm/ |
| A D | barrier.h | 33 #define smp_store_release(p, v) \ macro
|
| /tools/arch/sparc/include/asm/ |
| A D | barrier_64.h | 43 #define smp_store_release(p, v) \ macro
|
| /tools/include/linux/ |
| A D | ring_buffer.h | 71 smp_store_release(&base->data_tail, tail); in ring_buffer_write_tail()
|
| /tools/arch/x86/include/asm/ |
| A D | barrier.h | 33 #define smp_store_release(p, v) \ macro
|
| /tools/arch/riscv/include/asm/ |
| A D | barrier.h | 26 #define smp_store_release(p, v) \ macro
|
| /tools/arch/arm64/include/asm/ |
| A D | barrier.h | 27 #define smp_store_release(p, v) \ macro
|
| /tools/memory-model/Documentation/ |
| A D | ordering.txt | 261 Release operations include smp_store_release(), atomic_set_release(), 266 For example, use of smp_store_release() saves a line compared to the 270 smp_store_release(&y, 1); 272 More important, smp_store_release() makes it easier to connect up the 274 by the smp_store_release(), in this case "y", will normally be used in 286 smp_store_release(), which still provides the needed ordering of "x" 295 smp_store_release(), but also atomic_set_release(), and 299 smp_store_release() except that: (1) It takes the pointer to 330 As with smp_store_release(), this also makes it easier to connect 357 smp_store_release(&y, 1); [all …]
|
| A D | locking.txt | 120 One way to fix this is to use smp_load_acquire() and smp_store_release() 132 smp_store_release(&flag, 1); 142 problem. The smp_store_release() guarantees that its store will be 144 The smp_store_release() pairs with the smp_load_acquire(), thus ensuring 150 this case, via the smp_load_acquire() and the smp_store_release().
|
| A D | recipes.txt | 220 Use of smp_store_release() and smp_load_acquire() is one way to force 227 smp_store_release(&y, 1); 236 The smp_store_release() macro orders any prior accesses against the 250 use of smp_store_release() and smp_load_acquire(), except that both 277 smp_store_release(), but the rcu_dereference() macro orders the load only 294 It is usually better to use smp_store_release() instead of smp_wmb() 420 smp_store_release(&y, 1); 426 smp_store_release(&z, 1); 449 smp_store_release(&y, 1); 455 smp_store_release(&z, 1);
|
| A D | control-dependencies.txt | 92 you must use explicit memory ordering, for example, smp_store_release(): 96 smp_store_release(&b, 1); 99 smp_store_release(&b, 1); 223 smp_load_acquire(), smp_store_release(), or, in the case of prior 229 smp_store_release(). Please note that it is *not* sufficient to use
|
| /tools/testing/selftests/bpf/progs/ |
| A D | bpf_arena_spin_lock.h | 11 #define arch_mcs_spin_unlock_contended(l) smp_store_release((l), 1) 520 smp_store_release(&lock->locked, 0); in arena_spin_unlock()
|
| /tools/testing/selftests/bpf/ |
| A D | bpf_atomic.h | 96 #define smp_store_release(p, val) \ macro
|