1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2017 Andes Technology Corporation
4  * Rick Chen, Andes Technology Corporation <rick@andestech.com>
5  *
6  */
7 #ifndef __ASM_RISCV_IO_H
8 #define __ASM_RISCV_IO_H
9 
10 #include <linux/types.h>
11 #include <asm/barrier.h>
12 #include <asm/byteorder.h>
13 
sync(void)14 static inline void sync(void)
15 {
16 }
17 
18 #define __arch_getb(a)			(*(volatile unsigned char *)(a))
19 #define __arch_getw(a)			(*(volatile unsigned short *)(a))
20 #define __arch_getl(a)			(*(volatile unsigned int *)(a))
21 #define __arch_getq(a)			(*(volatile unsigned long long *)(a))
22 
23 #define __arch_putb(v, a)		(*(volatile unsigned char *)(a) = (v))
24 #define __arch_putw(v, a)		(*(volatile unsigned short *)(a) = (v))
25 #define __arch_putl(v, a)		(*(volatile unsigned int *)(a) = (v))
26 #define __arch_putq(v, a)		(*(volatile unsigned long long *)(a) = (v))
27 
28 #define __raw_writeb(v, a)		__arch_putb(v, a)
29 #define __raw_writew(v, a)		__arch_putw(v, a)
30 #define __raw_writel(v, a)		__arch_putl(v, a)
31 #define __raw_writeq(v, a)		__arch_putq(v, a)
32 
33 #define __raw_readb(a)			__arch_getb(a)
34 #define __raw_readw(a)			__arch_getw(a)
35 #define __raw_readl(a)			__arch_getl(a)
36 #define __raw_readq(a)			__arch_getq(a)
37 
38 /* adding for cadence_qspi_apb.c */
39 #define memcpy_fromio(a, c, l)		memcpy((a), (c), (l))
40 #define memcpy_toio(c, a, l)		memcpy((c), (a), (l))
41 
42 #define dmb()		mb()
43 #define __iormb()	rmb()
44 #define __iowmb()	wmb()
45 
writeb(u8 val,volatile void __iomem * addr)46 static inline void writeb(u8 val, volatile void __iomem *addr)
47 {
48 	__iowmb();
49 	__arch_putb(val, addr);
50 }
51 
writew(u16 val,volatile void __iomem * addr)52 static inline void writew(u16 val, volatile void __iomem *addr)
53 {
54 	__iowmb();
55 	__arch_putw(val, addr);
56 }
57 
writel(u32 val,volatile void __iomem * addr)58 static inline void writel(u32 val, volatile void __iomem *addr)
59 {
60 	__iowmb();
61 	__arch_putl(val, addr);
62 }
63 
writeq(u64 val,volatile void __iomem * addr)64 static inline void writeq(u64 val, volatile void __iomem *addr)
65 {
66 	__iowmb();
67 	__arch_putq(val, addr);
68 }
69 
readb(const volatile void __iomem * addr)70 static inline u8 readb(const volatile void __iomem *addr)
71 {
72 	u8	val;
73 
74 	val = __arch_getb(addr);
75 	__iormb();
76 	return val;
77 }
78 
readw(const volatile void __iomem * addr)79 static inline u16 readw(const volatile void __iomem *addr)
80 {
81 	u16	val;
82 
83 	val = __arch_getw(addr);
84 	__iormb();
85 	return val;
86 }
87 
readl(const volatile void __iomem * addr)88 static inline u32 readl(const volatile void __iomem *addr)
89 {
90 	u32	val;
91 
92 	val = __arch_getl(addr);
93 	__iormb();
94 	return val;
95 }
96 
readq(const volatile void __iomem * addr)97 static inline u64 readq(const volatile void __iomem *addr)
98 {
99 	u64	val;
100 
101 	val = __arch_getq(addr);
102 	__iormb();
103 	return val;
104 }
105 
106 /*
107  * The compiler seems to be incapable of optimising constants
108  * properly.  Spell it out to the compiler in some cases.
109  * These are only valid for small values of "off" (< 1<<12)
110  */
111 #define __raw_base_writeb(val, base, off)	__arch_base_putb(val, base, off)
112 #define __raw_base_writew(val, base, off)	__arch_base_putw(val, base, off)
113 #define __raw_base_writel(val, base, off)	__arch_base_putl(val, base, off)
114 
115 #define __raw_base_readb(base, off)	__arch_base_getb(base, off)
116 #define __raw_base_readw(base, off)	__arch_base_getw(base, off)
117 #define __raw_base_readl(base, off)	__arch_base_getl(base, off)
118 
119 #define out_arch(type, endian, a, v)	__raw_write##type(cpu_to_##endian(v), a)
120 #define in_arch(type, endian, a)	endian##_to_cpu(__raw_read##type(a))
121 
122 #define out_le32(a, v)			out_arch(l, le32, a, v)
123 #define out_le16(a, v)			out_arch(w, le16, a, v)
124 
125 #define in_le32(a)			in_arch(l, le32, a)
126 #define in_le16(a)			in_arch(w, le16, a)
127 
128 #define out_be32(a, v)			out_arch(l, be32, a, v)
129 #define out_be16(a, v)			out_arch(w, be16, a, v)
130 
131 #define in_be32(a)			in_arch(l, be32, a)
132 #define in_be16(a)			in_arch(w, be16, a)
133 
134 #define out_8(a, v)			__raw_writeb(v, a)
135 #define in_8(a)				__raw_readb(a)
136 
137 /*
138  * Clear and set bits in one shot. These macros can be used to clear and
139  * set multiple bits in a register using a single call. These macros can
140  * also be used to set a multiple-bit bit pattern using a mask, by
141  * specifying the mask in the 'clear' parameter and the new bit pattern
142  * in the 'set' parameter.
143  */
144 
145 #define clrbits(type, addr, clear) \
146 	out_##type((addr), in_##type(addr) & ~(clear))
147 
148 #define setbits(type, addr, set) \
149 	out_##type((addr), in_##type(addr) | (set))
150 
151 #define clrsetbits(type, addr, clear, set) \
152 	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
153 
154 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
155 #define setbits_be32(addr, set) setbits(be32, addr, set)
156 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
157 
158 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
159 #define setbits_le32(addr, set) setbits(le32, addr, set)
160 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
161 
162 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
163 #define setbits_be16(addr, set) setbits(be16, addr, set)
164 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
165 
166 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
167 #define setbits_le16(addr, set) setbits(le16, addr, set)
168 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
169 
170 #define clrbits_8(addr, clear) clrbits(8, addr, clear)
171 #define setbits_8(addr, set) setbits(8, addr, set)
172 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
173 
174 /*
175  * Now, pick up the machine-defined IO definitions
176  * #include <asm/arch/io.h>
177  */
178 
179 /*
180  *  IO port access primitives
181  *  -------------------------
182  *
183  * The RISC-V doesn't have special IO access instructions just like ARM;
184  * all IO is memory mapped.
185  * Note that these are defined to perform little endian accesses
186  * only.  Their primary purpose is to access PCI and ISA peripherals.
187  *
188  * Note that for a big endian machine, this implies that the following
189  * big endian mode connectivity is in place, as described by numerious
190  * ARM documents:
191  *
192  *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
193  *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
194  *
195  * The machine specific io.h include defines __io to translate an "IO"
196  * address to a memory address.
197  *
198  * Note that we prevent GCC re-ordering or caching values in expressions
199  * by introducing sequence points into the in*() definitions.  Note that
200  * __raw_* do not guarantee this behaviour.
201  *
202  * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
203  */
204 #ifdef __io
205 #define outb(v, p)			__raw_writeb(v, __io(p))
206 #define outw(v, p)			__raw_writew(cpu_to_le16(v), __io(p))
207 #define outl(v, p)			__raw_writel(cpu_to_le32(v), __io(p))
208 
209 #define inb(p)	({ unsigned int __v = __raw_readb(__io(p)); __v; })
210 #define inw(p)	({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; })
211 #define inl(p)	({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; })
212 
213 #define outsb(p, d, l)			writesb(__io(p), d, l)
214 #define outsw(p, d, l)			writesw(__io(p), d, l)
215 #define outsl(p, d, l)			writesl(__io(p), d, l)
216 
217 #define insb(p, d, l)			readsb(__io(p), d, l)
218 #define insw(p, d, l)			readsw(__io(p), d, l)
219 #define insl(p, d, l)			readsl(__io(p), d, l)
220 
readsb(const volatile void __iomem * addr,void * data,unsigned int bytelen)221 static inline void readsb(const volatile void __iomem *addr, void *data,
222 			  unsigned int bytelen)
223 {
224 	unsigned char *ptr;
225 	unsigned char *ptr2;
226 
227 	ptr = (unsigned char *)addr;
228 	ptr2 = (unsigned char *)data;
229 
230 	while (bytelen) {
231 		*ptr2 = *ptr;
232 		ptr2++;
233 		bytelen--;
234 	}
235 }
236 
readsw(const volatile void __iomem * addr,void * data,unsigned int wordlen)237 static inline void readsw(const volatile void __iomem *addr, void *data,
238 			  unsigned int wordlen)
239 {
240 	unsigned short *ptr;
241 	unsigned short *ptr2;
242 
243 	ptr = (unsigned short *)addr;
244 	ptr2 = (unsigned short *)data;
245 
246 	while (wordlen) {
247 		*ptr2 = *ptr;
248 		ptr2++;
249 		wordlen--;
250 	}
251 }
252 
readsl(const volatile void __iomem * addr,void * data,unsigned int longlen)253 static inline void readsl(const volatile void __iomem *addr, void *data,
254 			  unsigned int longlen)
255 {
256 	unsigned int *ptr;
257 	unsigned int *ptr2;
258 
259 	ptr = (unsigned int *)addr;
260 	ptr2 = (unsigned int *)data;
261 
262 	while (longlen) {
263 		*ptr2 = *ptr;
264 		ptr2++;
265 		longlen--;
266 	}
267 }
268 
writesb(volatile void __iomem * addr,const void * data,unsigned int bytelen)269 static inline void writesb(volatile void __iomem *addr, const void *data,
270 			   unsigned int bytelen)
271 {
272 	unsigned char *ptr;
273 	unsigned char *ptr2;
274 
275 	ptr = (unsigned char *)addr;
276 	ptr2 = (unsigned char *)data;
277 
278 	while (bytelen) {
279 		*ptr = *ptr2;
280 		ptr2++;
281 		bytelen--;
282 	}
283 }
284 
writesw(volatile void __iomem * addr,const void * data,unsigned int wordlen)285 static inline void writesw(volatile void __iomem *addr, const void *data,
286 			   unsigned int wordlen)
287 {
288 	unsigned short *ptr;
289 	unsigned short *ptr2;
290 
291 	ptr = (unsigned short *)addr;
292 	ptr2 = (unsigned short *)data;
293 
294 	while (wordlen) {
295 		*ptr = *ptr2;
296 		ptr2++;
297 		wordlen--;
298 	}
299 }
300 
writesl(volatile void __iomem * addr,const void * data,unsigned int longlen)301 static inline void writesl(volatile void __iomem *addr, const void *data,
302 			   unsigned int longlen)
303 {
304 	unsigned int *ptr;
305 	unsigned int *ptr2;
306 
307 	ptr = (unsigned int *)addr;
308 	ptr2 = (unsigned int *)data;
309 
310 	while (longlen) {
311 		*ptr = *ptr2;
312 		ptr2++;
313 		longlen--;
314 	}
315 }
316 
317 #define readsb readsb
318 #define readsw readsw
319 #define readsl readsl
320 #define writesb writesb
321 #define writesw writesw
322 #define writesl writesl
323 
324 #endif
325 
326 #define outb_p(val, port)		outb((val), (port))
327 #define outw_p(val, port)		outw((val), (port))
328 #define outl_p(val, port)		outl((val), (port))
329 #define inb_p(port)			inb((port))
330 #define inw_p(port)			inw((port))
331 #define inl_p(port)			inl((port))
332 
333 #define outsb_p(port, from, len)	outsb(port, from, len)
334 #define outsw_p(port, from, len)	outsw(port, from, len)
335 #define outsl_p(port, from, len)	outsl(port, from, len)
336 #define insb_p(port, to, len)		insb(port, to, len)
337 #define insw_p(port, to, len)		insw(port, to, len)
338 #define insl_p(port, to, len)		insl(port, to, len)
339 
340 /*
341  * Unordered I/O memory access primitives.  These are even more relaxed than
342  * the relaxed versions, as they don't even order accesses between successive
343  * operations to the I/O regions.
344  */
345 #define readb_cpu(c)		({ u8  __r = __raw_readb(c); __r; })
346 #define readw_cpu(c)		({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
347 #define readl_cpu(c)		({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
348 
349 #define writeb_cpu(v, c)	((void)__raw_writeb((v), (c)))
350 #define writew_cpu(v, c)	((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
351 #define writel_cpu(v, c)	((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
352 
353 #ifdef CONFIG_64BIT
354 #define readq_cpu(c)		({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
355 #define writeq_cpu(v, c)	((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
356 #endif
357 
358 /*
359  * Relaxed I/O memory access primitives. These follow the Device memory
360  * ordering rules but do not guarantee any ordering relative to Normal memory
361  * accesses.  These are defined to order the indicated access (either a read or
362  * write) with all other I/O memory accesses to the same peripheral. Since the
363  * platform specification defines that all I/O regions are strongly ordered on
364  * channel 0, no explicit fences are required to enforce this ordering.
365  */
366 /* FIXME: These are now the same as asm-generic */
367 #define __io_rbr()		do {} while (0)
368 #define __io_rar()		do {} while (0)
369 #define __io_rbw()		do {} while (0)
370 #define __io_raw()		do {} while (0)
371 
372 #define readb_relaxed(c)	({ u8  __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
373 #define readw_relaxed(c)	({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
374 #define readl_relaxed(c)	({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
375 
376 #define writeb_relaxed(v, c)	({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
377 #define writew_relaxed(v, c)	({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
378 #define writel_relaxed(v, c)	({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
379 
380 #ifdef CONFIG_64BIT
381 #define readq_relaxed(c)	({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
382 #define writeq_relaxed(v, c)	({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
383 #endif
384 
385 #include <asm-generic/io.h>
386 
387 #endif	/* __ASM_RISCV_IO_H */
388