/linux-6.3-rc2/tools/perf/Documentation/ |
A D | perf-c2c.txt | 20 you to track down the cacheline contentions. 88 Specify sorting fields for single cacheline display. 134 Group the detection of shared cacheline events into double cacheline 136 feature, which causes cacheline sharing to behave like the cacheline 178 - store access details for each cacheline 184 2) offsets details for each cacheline 190 - zero based index to identify the cacheline 193 - cacheline address (hex number) 199 - cacheline percentage of all peer accesses 245 level for given offset within cacheline [all …]
|
A D | tips.txt | 37 To report cacheline events from previous recording: perf c2c report
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gt/ |
A D | intel_ring.h | 111 #define cacheline(a) round_down(a, CACHELINE_BYTES) in assert_ring_tail_valid() macro 112 GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head); in assert_ring_tail_valid() 113 #undef cacheline in assert_ring_tail_valid()
|
A D | selftest_timeline.c | 97 unsigned long cacheline; in __mock_hwsp_timeline() local 110 cacheline = hwsp_cacheline(tl); in __mock_hwsp_timeline() 111 err = radix_tree_insert(&state->cachelines, cacheline, tl); in __mock_hwsp_timeline() 115 cacheline); in __mock_hwsp_timeline()
|
/linux-6.3-rc2/drivers/soc/qcom/ |
A D | smem.c | 154 __le32 cacheline; member 208 size_t cacheline; member 300 size_t cacheline) in phdr_to_first_cached_entry() argument 305 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry() 334 cached_entry_next(struct smem_private_entry *e, size_t cacheline) in cached_entry_next() argument 338 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next() 591 e = phdr_to_first_cached_entry(phdr, part->cacheline); in qcom_smem_get_private() 619 e = cached_entry_next(e, part->cacheline); in qcom_smem_get_private() 918 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition() 971 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
|
/linux-6.3-rc2/include/asm-generic/ |
A D | vmlinux.lds.h | 1030 #define PERCPU_INPUT(cacheline) \ argument 1035 . = ALIGN(cacheline); \ 1037 . = ALIGN(cacheline); \ 1067 #define PERCPU_VADDR(cacheline, vaddr, phdr) \ argument 1070 PERCPU_INPUT(cacheline) \ 1086 #define PERCPU_SECTION(cacheline) \ argument 1090 PERCPU_INPUT(cacheline) \ 1112 #define RW_DATA(cacheline, pagealigned, inittask) \ argument 1118 CACHELINE_ALIGNED_DATA(cacheline) \ 1119 READ_MOSTLY_DATA(cacheline) \
|
/linux-6.3-rc2/drivers/md/bcache/ |
A D | bset.c | 526 unsigned int cacheline, in cacheline_to_bkey() argument 529 return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8; in cacheline_to_bkey() 538 unsigned int cacheline, in bkey_to_cacheline_offset() argument 541 return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); in bkey_to_cacheline_offset() 558 static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline) in table_to_bkey() argument 560 return cacheline_to_bkey(t, cacheline, t->prev[cacheline]); in table_to_bkey() 694 unsigned int j, cacheline = 1; in bch_bset_build_written_tree() local 715 while (bkey_to_cacheline(t, k) < cacheline) { in bch_bset_build_written_tree() 721 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); in bch_bset_build_written_tree()
|
/linux-6.3-rc2/arch/x86/lib/ |
A D | clear_page_64.S | 128 # call the original thing for less than a cacheline 177 # call the original thing for less than a cacheline
|
/linux-6.3-rc2/Documentation/translations/zh_CN/locking/ |
A D | mutex-design.rst | 60 (cacheline bouncing)这种昂贵的开销。一个类MCS锁是为实现睡眠锁的
|
/linux-6.3-rc2/kernel/ |
A D | Kconfig.hz | 14 contention and cacheline bounces as a result of timer interrupts.
|
/linux-6.3-rc2/Documentation/sparc/ |
A D | adi.rst | 35 size is same as cacheline size which is 64 bytes. A task that sets ADI 103 the corresponding cacheline, a memory corruption trap occurs. By 123 the corresponding cacheline, a memory corruption trap occurs. If
|
/linux-6.3-rc2/arch/sparc/kernel/ |
A D | prom_irqtrans.c | 355 static unsigned char cacheline[64] in tomatillo_wsync_handler() local 366 "i" (FPRS_FEF), "r" (&cacheline[0]), in tomatillo_wsync_handler()
|
A D | cherrs.S | 203 sub %g1, %g2, %g1 ! Move down 1 cacheline 215 subcc %g1, %g2, %g1 ! Next cacheline
|
/linux-6.3-rc2/Documentation/translations/zh_CN/core-api/ |
A D | cachetlb.rst | 196 加载到不同的cacheline中就会出现别名现象。
|
/linux-6.3-rc2/arch/parisc/kernel/ |
A D | perf_asm.S | 132 ; Cacheline start (32-byte cacheline) 145 ; Cacheline start (32-byte cacheline)
|
/linux-6.3-rc2/Documentation/locking/ |
A D | mutex-design.rst | 55 cacheline bouncing that common test-and-set spinlock implementations
|
/linux-6.3-rc2/Documentation/driver-api/ |
A D | edac.rst | 46 lockstep is enabled, the cacheline is doubled, but it generally brings
|
/linux-6.3-rc2/Documentation/ |
A D | atomic_t.txt | 358 loop body. As a result there is no guarantee what so ever the cacheline
|
/linux-6.3-rc2/tools/perf/util/ |
A D | Build | 6 perf-y += cacheline.o
|
/linux-6.3-rc2/Documentation/mm/ |
A D | multigen_lru.rst | 169 promotes hot pages. If the scan was done cacheline efficiently, it
|
/linux-6.3-rc2/Documentation/networking/device_drivers/ethernet/amazon/ |
A D | ena.rst | 28 and CPU cacheline optimized data placement.
|
/linux-6.3-rc2/security/ |
A D | Kconfig.hardening | 339 best effort at restricting randomization to cacheline-sized
|
/linux-6.3-rc2/drivers/char/ |
A D | Kconfig | 117 of threads across a large system which avoids bouncing a cacheline
|
/linux-6.3-rc2/Documentation/core-api/ |
A D | dma-api-howto.rst | 137 buffers were cacheline-aligned. Without that, you'd see cacheline
|
/linux-6.3-rc2/drivers/edac/ |
A D | Kconfig | 96 - inject_section (0..3, 16-byte section of 64-byte cacheline),
|