1 /*
2 * Based on linux arch/arm64/include/asm/io.h which is in turn
3 * Based on arch/arm/include/asm/io.h
4 *
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20 #ifndef _ARM_ARM64_IO_H
21 #define _ARM_ARM64_IO_H
22
23 #include <xen/byteorder.h>
24
25 #include <asm/system.h>
26 #include <asm/alternative.h>
27
28 /*
29 * Generic IO read/write. These perform native-endian accesses.
30 */
__raw_writeb(u8 val,volatile void __iomem * addr)31 static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
32 {
33 asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
34 }
35
__raw_writew(u16 val,volatile void __iomem * addr)36 static inline void __raw_writew(u16 val, volatile void __iomem *addr)
37 {
38 asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
39 }
40
__raw_writel(u32 val,volatile void __iomem * addr)41 static inline void __raw_writel(u32 val, volatile void __iomem *addr)
42 {
43 asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
44 }
45
__raw_writeq(u64 val,volatile void __iomem * addr)46 static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
47 {
48 asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
49 }
50
__raw_readb(const volatile void __iomem * addr)51 static inline u8 __raw_readb(const volatile void __iomem *addr)
52 {
53 u8 val;
54
55 asm_inline volatile (
56 ALTERNATIVE("ldrb %w0, [%1]",
57 "ldarb %w0, [%1]",
58 ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
59 : "=r" (val) : "r" (addr) );
60
61 return val;
62 }
63
__raw_readw(const volatile void __iomem * addr)64 static inline u16 __raw_readw(const volatile void __iomem *addr)
65 {
66 u16 val;
67 asm_inline volatile (
68 ALTERNATIVE("ldrh %w0, [%1]",
69 "ldarh %w0, [%1]",
70 ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
71 : "=r" (val) : "r" (addr) );
72
73 return val;
74 }
75
__raw_readl(const volatile void __iomem * addr)76 static inline u32 __raw_readl(const volatile void __iomem *addr)
77 {
78 u32 val;
79
80 asm_inline volatile (
81 ALTERNATIVE("ldr %w0, [%1]",
82 "ldar %w0, [%1]",
83 ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
84 : "=r" (val) : "r" (addr) );
85
86 return val;
87 }
88
__raw_readq(const volatile void __iomem * addr)89 static inline u64 __raw_readq(const volatile void __iomem *addr)
90 {
91 u64 val;
92
93 asm_inline volatile (
94 ALTERNATIVE("ldr %0, [%1]",
95 "ldar %0, [%1]",
96 ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
97 : "=r" (val) : "r" (addr) );
98
99 return val;
100 }
101
102 /* IO barriers */
103 #define __iormb() rmb()
104 #define __iowmb() wmb()
105
106 #define mmiowb() do { } while (0)
107
108 /*
109 * Relaxed I/O memory access primitives. These follow the Device memory
110 * ordering rules but do not guarantee any ordering relative to Normal memory
111 * accesses.
112 */
113 #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
114 #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
115 #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
116 #define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; })
117 #define readq_relaxed_non_atomic(c) readq_relaxed(c)
118
119 #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
120 #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
121 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
122 #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
123 #define writeq_relaxed_non_atomic(v,c) writeq_relaxed(v,c)
124
125 /*
126 * I/O memory access primitives. Reads are ordered relative to any
127 * following Normal memory access. Writes are ordered relative to any prior
128 * Normal memory access.
129 */
130 #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
131 #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
132 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
133 #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
134
135 #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
136 #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
137 #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
138 #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
139
140 /*
141 * Emulate x86 io ports for ARM.
142 */
emulate_read(u64 addr)143 static inline int emulate_read(u64 addr)
144 {
145 printk(XENLOG_G_WARNING "Can't access IO %lx\n", addr);
146 return 0;
147 }
148
emulate_write(u64 addr)149 static inline void emulate_write(u64 addr)
150 {
151 printk(XENLOG_G_WARNING "Can't access IO %lx\n", addr);
152 }
153
154 #define inb(c) ( emulate_read(c) )
155 #define inw(c) ( emulate_read(c) )
156 #define inl(c) ( emulate_read(c) )
157
158 #define outb(v, c) ( emulate_write(c) )
159 #define outw(v, c) ( emulate_write(c) )
160 #define outl(v, c) ( emulate_write(c) )
161
162 #endif /* _ARM_ARM64_IO_H */
163