Home
last modified time | relevance | path

Searched refs:val (Results 1 – 22 of 22) sorted by relevance

/arch/arm64/include/arch/
A Dreg.h21 uint8_t val; \
23 val; \
26 uint16_t val; \
28 val; \
31 uint32_t val; \
33 val; \
36 uint64_t val; \
38 val; \
41 #define _ARCH_MMIO_WRITE8(addr, val) \ argument
43 #define _ARCH_MMIO_WRITE16(addr, val) \ argument
[all …]
A Darm64.h29 #define ARM64_WRITE_SYSREG(reg, val) \ argument
31 __asm__ volatile("msr " TOSTRING(reg) ", %0" :: "r" (val)); \
/arch/x86/include/arch/
A Dreg.h24 uint8_t val; \
26 val; \
29 uint16_t val; \
31 val; \
34 uint32_t val; \
36 val; \
40 uint64_t val; \
42 val; \
46 #define _ARCH_MMIO_WRITE8(addr, val) \ argument
48 #define _ARCH_MMIO_WRITE16(addr, val) \ argument
[all …]
A Dx86.h501 static inline void x86_write_gs_offset64(uintptr_t offset, uint64_t val) { in x86_write_gs_offset64() argument
502 __asm__("movq %0, %%gs:%1" : : "ir"(val), "m"(*(uint64_t*)(offset)) : "memory"); in x86_write_gs_offset64()
511 static inline void x86_write_gs_offset32(uintptr_t offset, uint32_t val) { in x86_write_gs_offset32() argument
512 __asm__("movl %0, %%gs:%1" : : "ir"(val), "m"(*(uint32_t*)(offset)) : "memory"); in x86_write_gs_offset32()
521 static inline void x86_write_gs_offset_ptr(uintptr_t offset, void *val) { in x86_write_gs_offset_ptr() argument
522 x86_write_gs_offset64(offset, (uint64_t)(val)); in x86_write_gs_offset_ptr()
528 static inline void x86_write_gs_offset_ptr(uintptr_t offset, void *val) { in x86_write_gs_offset_ptr() argument
529 x86_write_gs_offset32(offset, (uint32_t)(val)); in x86_write_gs_offset_ptr()
/arch/arm/include/arch/
A Dreg.h21 uint8_t val; \
23 val; \
26 uint16_t val; \
28 val; \
31 uint32_t val; \
33 val; \
36 #define _ARCH_MMIO_WRITE8(addr, val) \ argument
37 __asm__ volatile("strb %1, %0" : "=m"(*(addr)) : "r"(val) : "memory")
38 #define _ARCH_MMIO_WRITE16(addr, val) \ argument
40 #define _ARCH_MMIO_WRITE32(addr, val) \ argument
[all …]
A Darch_atomic.h17 static inline int atomic_add(volatile int *ptr, int val) { in atomic_add() argument
24 *ptr = temp + val; in atomic_add()
30 static inline int atomic_and(volatile int *ptr, int val) { in atomic_and() argument
37 *ptr = temp & val; in atomic_and()
43 static inline int atomic_or(volatile int *ptr, int val) { in atomic_or() argument
50 *ptr = temp | val; in atomic_or()
56 static inline int atomic_swap(volatile int *ptr, int val) { in atomic_swap() argument
63 *ptr = val; in atomic_swap()
A Darm.h106 uint32_t val; \
107 __asm__ volatile("mrc " #cp ", " #op1 ", %0, " #c1 "," #c2 "," #op2 : "=r" (val)); \
108 return val; \
112 uint32_t val; \
113 __asm__("mrc " #cp ", " #op1 ", %0, " #c1 "," #c2 "," #op2 : "=r" (val)); \
114 return val; \
117 static inline __ALWAYS_INLINE void arm_write_##reg(uint32_t val) { \
118 __asm__ volatile("mcr " #cp ", " #op1 ", %0, " #c1 "," #c2 "," #op2 :: "r" (val)); \
122 static inline __ALWAYS_INLINE void arm_write_##reg##_relaxed(uint32_t val) { \
123 __asm__ volatile("mcr " #cp ", " #op1 ", %0, " #c1 "," #c2 "," #op2 :: "r" (val)); \
/arch/include/arch/
A Datomic.h18 static inline int atomic_add(volatile int *ptr, int val) { in atomic_add() argument
19 return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED); in atomic_add()
22 static inline int atomic_or(volatile int *ptr, int val) { in atomic_or() argument
23 return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED); in atomic_or()
26 static inline int atomic_and(volatile int *ptr, int val) { in atomic_and() argument
27 return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED); in atomic_and()
30 static inline int atomic_swap(volatile int *ptr, int val) { in atomic_swap() argument
39 static int atomic_swap(volatile int *ptr, int val);
40 static int atomic_add(volatile int *ptr, int val);
41 static int atomic_and(volatile int *ptr, int val);
[all …]
/arch/or1k/include/arch/
A Darch_ops.h36 static inline int atomic_add(volatile int *ptr, int val) {
37 return __atomic_fetch_add(ptr, val, __ATOMIC_RELAXED);
40 static inline int atomic_or(volatile int *ptr, int val) {
41 return __atomic_fetch_or(ptr, val, __ATOMIC_RELAXED);
44 static inline int atomic_and(volatile int *ptr, int val) {
45 return __atomic_fetch_and(ptr, val, __ATOMIC_RELAXED);
48 static inline int atomic_swap(volatile int *ptr, int val) {
49 return __atomic_exchange_n(ptr, val, __ATOMIC_RELAXED);
/arch/mips/include/arch/
A Dmips.h17 uint32_t val; \
18 __asm__ volatile("mfc0 %0, $" #regnum ", " #sel : "=r" (val)); \
19 return val; \
23 uint32_t val; \
24 __asm__("mfc0 %0, $" #regnum ", " #sel : "=r" (val)); \
25 return val; \
28 static inline __ALWAYS_INLINE void mips_write_##regname(uint32_t val) { \
29 __asm__ volatile("mtc0 %0, $" #regnum ", " #sel :: "r" (val)); \
32 static inline __ALWAYS_INLINE void mips_write_##regname##_relaxed(uint32_t val) { \
33 __asm__ volatile("mtc0 %0, $" #regnum ", " #sel :: "r" (val)); \
/arch/arm/arm-m/systick/
A Dsystick.c90 static void systick_get_ticks_val(uint64_t *ticks, uint32_t *val) { in systick_get_ticks_val() argument
94 *val = SysTick->VAL; in systick_get_ticks_val()
97 *val = SysTick->VAL; in systick_get_ticks_val()
109 uint32_t val; in current_time() local
111 systick_get_ticks_val(&ticks, &val); in current_time()
114 uint32_t delta = (reload - val) / (tick_rate_mhz * 1000); in current_time()
124 uint32_t val; in current_time_hires() local
126 systick_get_ticks_val(&ticks, &val); in current_time_hires()
129 uint32_t delta = (reload - val) / tick_rate_mhz; in current_time_hires()
/arch/arm/include/arch/arm/
A Dmmu.h114 #define MMU_MEMORY_SET_L1_INNER(val) (((val) & 0x3) << MMU_MEMORY_L1_CB_SHIFT) argument
115 #define MMU_MEMORY_SET_L1_OUTER(val) (((val) & 0x3) << MMU_MEMORY_L1_TEX_SHIFT) argument
129 #define MMU_MEMORY_SET_L2_INNER(val) (((val) & 0x3) << MMU_MEMORY_L2_CB_SHIFT) argument
130 #define MMU_MEMORY_SET_L2_OUTER(val) (((val) & 0x3) << MMU_MEMORY_L2_TEX_SHIFT) argument
A Ddcc.h15 typedef void (*dcc_rx_callback_t)(uint32_t val);
/arch/x86/
A Dlapic.c110 static void lapic_write(enum lapic_regs reg, uint32_t val) { in lapic_write() argument
111 LTRACEF_LEVEL(2, "reg %#x val %#x\n", reg, val); in lapic_write()
114 write_msr(X86_MSR_IA32_X2APIC_BASE + reg / 0x10, val); in lapic_write()
116 mmio_write32(lapic_mmio + reg / 4, val); in lapic_write()
122 uint32_t val; in lapic_wait_for_icr_delivery() local
125 val = read_msr(X86_MSR_IA32_X2APIC_BASE + 0x30); in lapic_wait_for_icr_delivery()
127 val = lapic_read(LAPIC_ICRLO); in lapic_wait_for_icr_delivery()
129 } while (val & (1u << 12)); in lapic_wait_for_icr_delivery()
304 lapic_write(LAPIC_TIMER, val); in lapic_timer_init_percpu()
309 lapic_write(LAPIC_TIMER, val); in lapic_timer_init_percpu()
[all …]
/arch/microblaze/include/arch/
A Dmicroblaze.h18 static inline void mb_write_msr(uint32_t val) { in mb_write_msr() argument
20 "mts rmsr, %0" :: "r" (val)); in mb_write_msr()
/arch/arm/arm/
A Dfpu.c25 uint32_t val; in read_fpexc() local
27 __asm__("mrc p10, 7, %0, c8, c0, 0" : "=r" (val)); in read_fpexc()
28 return val; in read_fpexc()
31 static inline void write_fpexc(uint32_t val) { in write_fpexc() argument
33 __asm__ volatile("mcr p10, 7, %0, c8, c0, 0" :: "r" (val)); in write_fpexc()
A Ddebug.c42 uint32_t val = arm_read_dbgdtrrxint(); in dcc_worker_entry() local
44 dcc->rx_callback(val); in dcc_worker_entry()
94 uint32_t val = arm_read_dbgdtrrxint(); in arm_dcc_read() local
95 *buf++ = val; in arm_dcc_read()
132 static void dcc_rx_callback(uint32_t val) { in dcc_rx_callback() argument
A Darch.c235 uint32_t val = arm_read_cpacr(); in arm_basic_setup() local
236 val |= (3<<22)|(3<<20); in arm_basic_setup()
237 arm_write_cpacr(val); in arm_basic_setup()
240 __asm__ volatile("mrc p10, 7, %0, c8, c0, 0" : "=r" (val)); in arm_basic_setup()
241 val |= (1<<30); in arm_basic_setup()
242 __asm__ volatile("mcr p10, 7, %0, c8, c0, 0" :: "r" (val)); in arm_basic_setup()
/arch/microblaze/
A Darch.c21 uint32_t val = mb_read_msr(); in arch_early_init() local
22 val |= (1 << (31 - 26)) | (1 << (31 - 24)); in arch_early_init()
23 mb_write_msr(val); in arch_early_init()
/arch/m68k/
A Darch.c53 unsigned int __atomic_fetch_add_4 (volatile void *mem, unsigned int val, int model) { in __atomic_fetch_add_4() argument
57 *(volatile unsigned int *)mem = old + val; in __atomic_fetch_add_4()
/arch/arm64/include/arch/arm64/
A Dmmu.h91 #define BM(base, count, val) (((val) & ((1UL << (count)) - 1)) << (base)) argument
93 #define BM(base, count, val) (((val) & ((0x1 << (count)) - 1)) << (base)) argument
263 #define ARM64_TLBI(op, val) \ argument
265 __asm__ volatile("tlbi " #op ", %0" :: "r" (val)); \
/arch/riscv/include/arch/
A Driscv.h182 #define riscv_csr_write(csr, val) \ argument
184 ulong __val = (ulong)(val); \

Completed in 29 milliseconds