/linux-6.3-rc2/Documentation/virt/kvm/x86/ |
A D | running-nested-guests.rst | 19 | L1 (Guest Hypervisor) | 33 - L1 – level-1 guest; a VM running on L0; also called the "guest 36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest" 148 able to start an L1 guest with:: 191 On AMD systems, once an L1 guest has started an L2 guest, the L1 guest 238 - Kernel, libvirt and QEMU version from L1 248 - ``cat /sys/cpuinfo`` from L1 252 - ``lscpu`` from L1 256 - Full ``dmesg`` output from L1 266 - Output of: ``x86info -a`` from L1 [all …]
|
/linux-6.3-rc2/arch/arc/kernel/ |
A D | entry-compact.S | 152 ; if L2 IRQ interrupted a L1 ISR, disable preemption 154 ; This is to avoid a potential L1-L2-L1 scenario 155 ; -L1 IRQ taken 156 ; -L2 interrupts L1 (before L1 ISR could run) 160 ; But both L1 and L2 re-enabled, so another L1 can be taken 161 ; while prev L1 is still unserviced 165 ; L2 interrupting L1 implies both L2 and L1 active 167 ; need to check STATUS32_L2 to determine if L1 was active 335 ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None 358 ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier [all …]
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | proc-xsc3.S | 68 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line 113 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB 196 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line 197 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 224 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 229 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 269 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line 271 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line 272 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line 289 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line [all …]
|
/linux-6.3-rc2/security/apparmor/include/ |
A D | label.h | 163 #define next_comb(I, L1, L2) \ argument 174 #define label_for_each_comb(I, L1, L2, P1, P2) \ argument 177 (I) = next_comb(I, L1, L2)) 179 #define fn_for_each_comb(L1, L2, P1, P2, FN) \ argument 183 label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \ 243 #define fn_for_each2_XXX(L1, L2, P, FN, ...) \ argument 247 label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) { \ 253 #define fn_for_each_in_merge(L1, L2, P, FN) \ argument 254 fn_for_each2_XXX((L1), (L2), P, FN, _in_merge) 255 #define fn_for_each_not_in_set(L1, L2, P, FN) \ argument [all …]
|
A D | perms.h | 183 #define xcheck_ns_labels(L1, L2, FN, args...) \ argument 186 fn_for_each((L1), __p1, FN(__p1, (L2), args)); \ 190 #define xcheck_labels_profiles(L1, L2, FN, args...) \ argument 191 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) 193 #define xcheck_labels(L1, L2, P, FN1, FN2) \ argument 194 xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
|
/linux-6.3-rc2/arch/powerpc/perf/ |
A D | power8-pmu.c | 133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); 134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 136 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF); 137 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); 138 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); 139 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 140 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
|
A D | power9-pmu.c | 177 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN); 178 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 179 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF); 180 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); 181 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); 182 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 183 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
|
A D | power10-pmu.c | 133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); 134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 135 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS); 136 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); 137 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); 138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 139 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
|
/linux-6.3-rc2/arch/hexagon/lib/ |
A D | memset.S | 159 if (r2==#0) jump:nt .L1 186 if (p1) jump .L1 197 if (p0.new) jump:nt .L1 208 if (p0.new) jump:nt .L1 284 .L1: label
|
/linux-6.3-rc2/Documentation/driver-api/ |
A D | edac.rst | 145 - CPU caches (L1 and L2) 155 For example, a cache could be composed of L1, L2 and L3 levels of cache. 156 Each CPU core would have its own L1 cache, while sharing L2 and maybe L3 164 cpu/cpu0/.. <L1 and L2 block directory> 165 /L1-cache/ce_count 169 cpu/cpu1/.. <L1 and L2 block directory> 170 /L1-cache/ce_count 176 the L1 and L2 directories would be "edac_device_block's"
|
/linux-6.3-rc2/Documentation/locking/ |
A D | lockdep-design.rst | 145 <L1> -> <L2> 146 <L2> -> <L1> 521 L1 -> L2 608 L1 -> L2 ... -> Ln -> L1 612 L1 -> L2 616 Ln -> L1 620 Firstly let's make one CPU/task get the L1 in L1 -> L2, and then another get 624 And then because we have L1 -> L2, so the holder of L1 is going to acquire L2 625 in L1 -> L2, however since L2 is already held by another CPU/task, plus L1 -> 645 for L1 and holding Ln, so we will have Ln -> L1 in the dependency graph. Similarly, [all …]
|
A D | rt-mutex-design.rst | 47 grab lock L1 (owned by C) 139 Mutexes: L1, L2, L3, L4 141 A owns: L1 142 B blocked on L1 159 F->L5->B->L1->A 180 G->L2->B->L1->A 236 mutex_lock(L1); 240 mutex_unlock(L1); 245 mutex_lock(L1); 251 mutex_unlock(L1); [all …]
|
/linux-6.3-rc2/drivers/pci/pcie/ |
A D | Kconfig | 73 state L0/L0s/L1. 99 Enable PCI Express ASPM L0s and L1 where possible, even if the 106 Same as PCIEASPM_POWERSAVE, except it also enables L1 substates where 107 possible. This would result in higher power savings while staying in L1 114 Disable PCI Express ASPM L0s and L1, even if the BIOS enabled them.
|
/linux-6.3-rc2/arch/m68k/fpsp040/ |
A D | setox.S | 104 | 3.1 R := X + N*L1, where L1 := single-precision(-log2/64). 106 | Notes: a) The way L1 and L2 are chosen ensures L1+L2 approximate 108 | b) N*L1 is exact because N is no longer than 22 bits and 109 | L1 is no longer than 24 bits. 111 | Thus, R is practically X+N(L1+L2) to full 64 bits. 505 fmuls #0xBC317218,%fp0 | ...N * L1, L1 = lead(-log2/64) 506 fmulx L2,%fp2 | ...N * L2, L1+L2 = -log2/64 507 faddx %fp1,%fp0 | ...X + N*L1 671 fmuls #0xBC317218,%fp0 | ...N * L1, L1 = lead(-log2/64) 672 fmulx L2,%fp2 | ...N * L2, L1+L2 = -log2/64 [all …]
|
/linux-6.3-rc2/Documentation/devicetree/bindings/media/ |
A D | st-rc.txt | 10 - rx-mode: can be "infrared" or "uhf". This property specifies the L1 13 - tx-mode: should be "infrared". This property specifies the L1
|
/linux-6.3-rc2/arch/m68k/lib/ |
A D | divsi3.S | 95 jpl L1 102 L1: movel sp@(8), d0 /* d0 = dividend */ label
|
/linux-6.3-rc2/Documentation/translations/zh_CN/arm64/ |
A D | memory.txt | 90 | | +---------------------> [38:30] L1 索引 105 | +-------------------------------> [47:42] L1 索引
|
/linux-6.3-rc2/arch/alpha/boot/ |
A D | bootp.c | 65 #define L1 ((unsigned long *) 0x200802000) macro 77 pcb_va->ptbr = L1[1] >> 32; in pal_init()
|
A D | main.c | 59 #define L1 ((unsigned long *) 0x200802000) macro 71 pcb_va->ptbr = L1[1] >> 32; in pal_init()
|
/linux-6.3-rc2/arch/riscv/lib/ |
A D | tishift.S | 10 beqz a2, .L1 21 .L1: label
|
/linux-6.3-rc2/lib/ |
A D | test_dynamic_debug.c | 92 enum cat_level_names { L0 = 22, L1, L2, L3, L4, L5, L6, L7 }; enumerator 133 prdbg(L1); in do_levels()
|
/linux-6.3-rc2/tools/perf/Documentation/ |
A D | perf-c2c.txt | 217 L1Hit - store accesses that hit L1 218 L1Miss - store accesses that missed L1 221 Core Load Hit - FB, L1, L2 222 - count of load hits in FB (Fill Buffer), L1 and L2 cache 243 Store Refs - L1 Hit, L1 Miss, N/A 244 - % of store accesses that hit L1, missed L1 and N/A (no available) memory
|
/linux-6.3-rc2/arch/arm/mach-omap2/ |
A D | sram242x.S | 39 str r3, [r2] @ go to L1-freq operation 42 mov r9, #0x1 @ set up for L1 voltage call 101 orr r5, r5, r9 @ bulld value for L0/L1-volt operation. 105 str r5, [r4] @ Force transition to L1 196 orr r8, r8, r9 @ bulld value for L0/L1-volt operation. 200 str r8, [r10] @ Force transition to L1
|
A D | sram243x.S | 39 str r3, [r2] @ go to L1-freq operation 42 mov r9, #0x1 @ set up for L1 voltage call 101 orr r5, r5, r9 @ bulld value for L0/L1-volt operation. 105 str r5, [r4] @ Force transition to L1 196 orr r8, r8, r9 @ bulld value for L0/L1-volt operation. 200 str r8, [r10] @ Force transition to L1
|
/linux-6.3-rc2/Documentation/translations/zh_TW/arm64/ |
A D | memory.txt | 94 | | +---------------------> [38:30] L1 索引 109 | +-------------------------------> [47:42] L1 索引
|