| /tools/testing/selftests/powerpc/ptrace/ |
| A D | ptrace-vsx.h | 70 int compare_vsx_vmx(unsigned long *store, unsigned long *load) in compare_vsx_vmx() argument 75 if (store[1 + 2 * i] != load[1 + 2 * i]) { in compare_vsx_vmx() 77 1 + 2 * i, store[i], in compare_vsx_vmx() 85 if (store[i] != load[i]) { in compare_vsx_vmx() 87 i, store[i], i, load[i]); in compare_vsx_vmx() 93 if (!(i % 2) && (store[i] != load[i+1])) { in compare_vsx_vmx() 95 i, store[i], i+1, load[i+1]); in compare_vsx_vmx() 98 if ((i % 2) && (store[i] != load[i-1])) { in compare_vsx_vmx() 100 i, store[i], i-1, load[i-1]); in compare_vsx_vmx()
|
| /tools/perf/ui/gtk/ |
| A D | hists.c | 122 gtk_tree_store_set(store, &iter, 0, buf, -1); in perf_gtk__add_callchain_flat() 143 gtk_tree_store_set(store, &iter, 0, buf, -1); in perf_gtk__add_callchain_flat() 209 gtk_tree_store_append(store, &iter, parent); in perf_gtk__add_callchain_folded() 212 gtk_tree_store_set(store, &iter, 0, buf, -1); in perf_gtk__add_callchain_folded() 299 GtkTreeStore *store; in perf_gtk__show_hists() local 356 g_object_unref(GTK_TREE_MODEL(store)); in perf_gtk__show_hists() 371 gtk_tree_store_append(store, &iter, NULL); in perf_gtk__show_hists() 407 GtkTreeStore *store, in perf_gtk__add_hierarchy_entries() argument 470 store, &iter, hpp, in perf_gtk__add_hierarchy_entries() 504 GtkTreeStore *store; in perf_gtk__show_hierarchy() local [all …]
|
| A D | annotate.c | 112 GtkListStore *store; in perf_gtk__annotate_symbol() local 122 store = gtk_list_store_newv(MAX_ANN_COLS, col_types); in perf_gtk__annotate_symbol() 133 gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store)); in perf_gtk__annotate_symbol() 134 g_object_unref(GTK_TREE_MODEL(store)); in perf_gtk__annotate_symbol() 140 gtk_list_store_append(store, &iter); in perf_gtk__annotate_symbol() 157 gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); in perf_gtk__annotate_symbol() 159 gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); in perf_gtk__annotate_symbol() 161 gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1); in perf_gtk__annotate_symbol()
|
| /tools/perf/scripts/python/ |
| A D | stat-cpi.py | 23 def store(time, event, cpu, thread, val, ena, run): function 36 store(time, "cycles", cpu, thread, val, ena, run); 39 store(time, "instructions", cpu, thread, val, ena, run); 42 store(time, "cycles", cpu, thread, val, ena, run); 45 store(time, "instructions", cpu, thread, val, ena, run); 48 store(time, "cycles", cpu, thread, val, ena, run); 51 store(time, "instructions", cpu, thread, val, ena, run);
|
| /tools/memory-model/litmus-tests/ |
| A D | S+fencewmbonceonce+poacquireonce.litmus | 7 * store against a subsequent store?
|
| A D | README | 45 As below, but with store-release replaced with WRITE_ONCE() 49 Can a release-acquire chain order a prior store against 63 As above, but with store-release replaced with WRITE_ONCE() 68 load and a store? 99 store and another store? 111 effects of store propagation delays. 117 This is the fully ordered (again, via smp_mb() version of store 138 a prior store against a subsequent store?
|
| A D | R+poonceonces.litmus | 8 * store propagation delays.
|
| A D | S+poonceonces.litmus | 7 * first store against P1()'s final load, if the smp_store_release()
|
| A D | SB+poonceonces.litmus | 7 * to order the store-buffering pattern, where each process writes to the
|
| A D | R+fencembonceonces.litmus | 7 * counterintuitive litmus tests that illustrates the effects of store
|
| A D | SB+fencembonceonces.litmus | 7 * order the store-buffering pattern, where each process writes to the
|
| A D | ISA2+poonceonces.litmus | 6 * Given a release-acquire chain ordering the first process's store
|
| /tools/memory-model/Documentation/ |
| A D | glossary.txt | 35 When an acquire load returns the value stored by a release store 37 from" the release store), then all operations preceding that 38 store "happen before" any operations following that load acquire. 42 Coherence (co): When one CPU's store to a given variable overwrites 43 either the value from another CPU's store or some later value, 56 a "control dependency" extends from that load to that store. 90 extends from that load to that later store. For example: 108 link from the load to the store. 145 CPU's store to the first CPU's load. Reads-from links have the 167 a special operation that includes a store and which orders that [all …]
|
| A D | explanation.txt | 169 store to buf but before the store to flag. In this case, r1 and r2 645 is a store, then the store which R reads from must come before 650 store read by R comes before the store read by R' in the 996 is between the store that CPU 1 reads from and the store that CPU 1 1106 store and a second, po-later load reads from that store: 1186 smp_wmb() forces P0's store to x to propagate to P1 before the store 1301 had executed before its store then the value of the store would have 1304 event, because P1's store came after P0's store in x's coherence 1359 overwritten by P0's store to buf, the fence guarantees that the store 1414 link from P0's store to its load. This is because P0's store gets [all …]
|
| A D | control-dependencies.txt | 32 (usually) guaranteed for load-store control dependencies, as in the 43 the compiler might fuse the store to "b" with other stores. Worse yet, 44 the compiler might convert the store into a load and a check followed 45 by a store, and this compiler-generated load would not be ordered by 87 Now there is no conditional between the load from "a" and the store to 142 between the load from variable "a" and the store to variable "b". It is 149 BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */ 159 must store different values to "b". As in previous examples, if the two 160 values were identical, the compiler could pull this store outside of the 209 instructions and the store depending on them. This means that a weakly [all …]
|
| A D | recipes.txt | 42 from or store to only part of the variable. 45 use READ_ONCE() and WRITE_ONCE() or stronger to prevent load/store 46 tearing, load/store fusing, and invented loads and stores. 199 load buffering, release-acquire chains, store buffering. 211 but the second load does not see the value written by the first store. 237 store, while the smp_load_acquire macro orders the load against any 281 (data dependency), or whether or not a later store is executed in the 360 each load reads the value written by the other CPU's store. In the 403 and the store to data combined with the user's full memory barrier 404 between the load from data and the store to ->data_tail prevents [all …]
|
| A D | ordering.txt | 74 smp_mb(); // Order store to x before load from y. 77 All CPUs will agree that the store to "x" happened before the load 146 smp_mb__after_atomic(); // Order store to x before load from y. 185 store, as described in the "Release Operations" section below. 263 end in _release. These operations order their own store against all 288 to a simple load instruction followed by a simple store instruction. 308 memory-store portion of the RMW operation, and not against the 346 memory-store portion. Note also that conditional operations 416 through an "if" condition to a marked store (WRITE_ONCE() or stronger) 460 primitives require the compiler to emit the corresponding store [all …]
|
| A D | cheatsheet.txt | 6 Relaxed store Y Y
|
| /tools/perf/Documentation/ |
| A D | perf-c2c.txt | 22 On Intel, the tool is based on load latency and precise store facility events 26 sample load and store operations, therefore hardware and kernel support is 33 - type of the access (load and store details) 185 - store access details for each cacheline 221 - sum of all store accesses 224 L1Hit - store accesses that hit L1 225 L1Miss - store accesses that missed L1 226 N/A - store accesses with memory level is not available 251 - % of store accesses that hit L1, missed L1 and N/A (no available) memory
|
| A D | perf-mem.txt | 23 not the pure load (or store latency). Use latency includes any pipeline 26 On Arm64 this uses SPE to sample load and store operations, therefore hardware 31 On AMD this use IBS Op PMU to sample load-store operations. 41 Select the memory operation type: load or store (default: load,store) 130 - op: operation in the sample instruction (load, store, prefetch, ...)
|
| /tools/perf/tests/shell/attr/ |
| A D | README | 7 The general idea is to store 'struct perf_event_attr' details for 23 store 'fd' and 'group_fd' values to allow checking for groups.
|
| /tools/perf/util/ |
| A D | mem-events.h | 56 u32 store; /* count of all stores in trace */ member
|
| A D | bpf-filter.l | 130 store { return constant(PERF_MEM_OP_STORE); }
|
| A D | mem-events.c | 735 stats->store++; in c2c_decode_stats() 771 stats->store += add->store; in c2c_add_stats()
|
| /tools/testing/selftests/bpf/progs/ |
| A D | compute_live_registers.c | 85 __naked void store(void) in store() function
|