1 /*
2  * I/O device access primitives. Based on early versions from the Linux kernel.
3  *
4  *  Copyright (C) 1996-2000 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef __ASM_ARM_IO_H
11 #define __ASM_ARM_IO_H
12 
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <asm/byteorder.h>
16 #include <asm/memory.h>
17 #include <asm/barriers.h>
18 
sync(void)19 static inline void sync(void)
20 {
21 }
22 
23 #ifdef CONFIG_ARM64
24 #define __W	"w"
25 #else
26 #define __W
27 #endif
28 
29 #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
30 #define __R "l"
31 #define __RM "=l"
32 #else
33 #define __R "r"
34 #define __RM "=r"
35 #endif
36 
37 #ifdef CONFIG_KVM_VIRT_INS
38 /*
39  * The __raw_writeX/__raw_readX below should be converted to static inline
40  * functions. However doing so produces a lot of compilation warnings when
41  * called with a raw address. Convert these once the callers have been fixed.
42  */
43 #define __raw_writeb(val, addr)			\
44 	do {					\
45 		asm volatile("strb %" __W "0, [%1]"	\
46 		:				\
47 		: __R ((u8)(val)), __R (addr));	\
48 	} while (0)
49 
50 #define __raw_readb(addr)				\
51 	({						\
52 		u32 __val;				\
53 		asm volatile("ldrb %" __W "0, [%1]"		\
54 		: __RM (__val)				\
55 		: __R (addr));				\
56 		__val;					\
57 	})
58 
59 #define __raw_writew(val, addr)			\
60 	do {					\
61 		asm volatile("strh %" __W "0, [%1]"	\
62 		:					\
63 		: __R ((u16)(val)), __R (addr));	\
64 	} while (0)
65 
66 #define __raw_readw(addr)				\
67 	({						\
68 		u32 __val;				\
69 		asm volatile("ldrh %" __W "0, [%1]"		\
70 		: __RM (__val)				\
71 		: __R (addr));				\
72 	__val;						\
73     })
74 
75 #define __raw_writel(val, addr)				\
76 	do {						\
77 		asm volatile("str %" __W "0, [%1]"		\
78 		:					\
79 		: __R ((u32)(val)), __R (addr));	\
80 	} while (0)
81 
82 #define __raw_readl(addr)				\
83 	({						\
84 		u32 __val;				\
85 		asm volatile("ldr %" __W "0, [%1]"		\
86 		: __RM (__val)				\
87 		: __R (addr));				\
88 		__val;					\
89 	})
90 
91 #define __raw_writeq(val, addr)				\
92 	do {						\
93 		asm volatile("str %0, [%1]"		\
94 		:					\
95 		: __R ((u64)(val)), __R (addr));	\
96 	} while (0)
97 
98 #define __raw_readq(addr)				\
99 	({						\
100 		u64 __val;				\
101 		asm volatile("ldr %0, [%1]"		\
102 		: __RM (__val)				\
103 		: __R (addr));				\
104 		__val;					\
105 	    })
106 #else
107 /* Generic virtual read/write. */
108 #define __raw_readb(a)			(*(volatile unsigned char *)(a))
109 #define __raw_readw(a)			(*(volatile unsigned short *)(a))
110 #define __raw_readl(a)			(*(volatile unsigned int *)(a))
111 #define __raw_readq(a)			(*(volatile unsigned long long *)(a))
112 
113 #define __raw_writeb(v, a)		(*(volatile unsigned char *)(a) = (v))
114 #define __raw_writew(v, a)		(*(volatile unsigned short *)(a) = (v))
115 #define __raw_writel(v, a)		(*(volatile unsigned int *)(a) = (v))
116 #define __raw_writeq(v, a)		(*(volatile unsigned long long *)(a) = (v))
117 #endif
118 
__raw_writesb(unsigned long addr,const void * data,int bytelen)119 static inline void __raw_writesb(unsigned long addr, const void *data,
120 				 int bytelen)
121 {
122 	uint8_t *buf = (uint8_t *)data;
123 	while(bytelen--)
124 		__raw_writeb(*buf++, addr);
125 }
126 
__raw_writesw(unsigned long addr,const void * data,int wordlen)127 static inline void __raw_writesw(unsigned long addr, const void *data,
128 				 int wordlen)
129 {
130 	uint16_t *buf = (uint16_t *)data;
131 	while(wordlen--)
132 		__raw_writew(*buf++, addr);
133 }
134 
__raw_writesl(unsigned long addr,const void * data,int longlen)135 static inline void __raw_writesl(unsigned long addr, const void *data,
136 				 int longlen)
137 {
138 	uint32_t *buf = (uint32_t *)data;
139 	while(longlen--)
140 		__raw_writel(*buf++, addr);
141 }
142 
__raw_readsb(unsigned long addr,void * data,int bytelen)143 static inline void __raw_readsb(unsigned long addr, void *data, int bytelen)
144 {
145 	uint8_t *buf = (uint8_t *)data;
146 	while(bytelen--)
147 		*buf++ = __raw_readb(addr);
148 }
149 
__raw_readsw(unsigned long addr,void * data,int wordlen)150 static inline void __raw_readsw(unsigned long addr, void *data, int wordlen)
151 {
152 	uint16_t *buf = (uint16_t *)data;
153 	while(wordlen--)
154 		*buf++ = __raw_readw(addr);
155 }
156 
__raw_readsl(unsigned long addr,void * data,int longlen)157 static inline void __raw_readsl(unsigned long addr, void *data, int longlen)
158 {
159 	uint32_t *buf = (uint32_t *)data;
160 	while(longlen--)
161 		*buf++ = __raw_readl(addr);
162 }
163 
164 /*
165  * TODO: The kernel offers some more advanced versions of barriers, it might
166  * have some advantages to use them instead of the simple one here.
167  */
168 #define mb()		dsb()
169 #define rmb()		dsb()
170 #define wmb()		dsb()
171 #define __iormb()	dmb()
172 #define __iowmb()	dmb()
173 
174 #define smp_processor_id()	0
175 
176 #define writeb(v, c)	({ u8  __v = v; __iowmb(); writeb_relaxed(__v, c); __v; })
177 #define writew(v, c)	({ u16 __v = v; __iowmb(); writew_relaxed(__v, c); __v; })
178 #define writel(v, c)	({ u32 __v = v; __iowmb(); writel_relaxed(__v, c); __v; })
179 #define writeq(v, c)	({ u64 __v = v; __iowmb(); writeq_relaxed(__v, c); __v; })
180 
181 #define readb(c)	({ u8  __v = readb_relaxed(c); __iormb(); __v; })
182 #define readw(c)	({ u16 __v = readw_relaxed(c); __iormb(); __v; })
183 #define readl(c)	({ u32 __v = readl_relaxed(c); __iormb(); __v; })
184 #define readq(c)	({ u64 __v = readq_relaxed(c); __iormb(); __v; })
185 
186 /*
187  * Relaxed I/O memory access primitives. These follow the Device memory
188  * ordering rules but do not guarantee any ordering relative to Normal memory
189  * accesses.
190  */
191 #define readb_relaxed(c)	({ u8  __r = __raw_readb(c); __r; })
192 #define readw_relaxed(c)	({ u16 __r = le16_to_cpu((__force __le16) \
193 						__raw_readw(c)); __r; })
194 #define readl_relaxed(c)	({ u32 __r = le32_to_cpu((__force __le32) \
195 						__raw_readl(c)); __r; })
196 #define readq_relaxed(c)	({ u64 __r = le64_to_cpu((__force __le64) \
197 						__raw_readq(c)); __r; })
198 
199 #define writeb_relaxed(v, c)	__raw_writeb((v), (c))
200 #define writew_relaxed(v, c)	__raw_writew((__force u16)cpu_to_le16(v), (c))
201 #define writel_relaxed(v, c)	__raw_writel((__force u32)cpu_to_le32(v), (c))
202 #define writeq_relaxed(v, c)	__raw_writeq((__force u64)cpu_to_le64(v), (c))
203 
204 /*
205  * The compiler seems to be incapable of optimising constants
206  * properly.  Spell it out to the compiler in some cases.
207  * These are only valid for small values of "off" (< 1<<12)
208  */
209 #define __raw_base_writeb(val,base,off)	__arch_base_putb(val,base,off)
210 #define __raw_base_writew(val,base,off)	__arch_base_putw(val,base,off)
211 #define __raw_base_writel(val,base,off)	__arch_base_putl(val,base,off)
212 
213 #define __raw_base_readb(base,off)	__arch_base_getb(base,off)
214 #define __raw_base_readw(base,off)	__arch_base_getw(base,off)
215 #define __raw_base_readl(base,off)	__arch_base_getl(base,off)
216 
217 /*
218  * Clear and set bits in one shot. These macros can be used to clear and
219  * set multiple bits in a register using a single call. These macros can
220  * also be used to set a multiple-bit bit pattern using a mask, by
221  * specifying the mask in the 'clear' parameter and the new bit pattern
222  * in the 'set' parameter.
223  */
224 
225 #define out_arch(type,endian,a,v)	__raw_write##type(cpu_to_##endian(v),a)
226 #define in_arch(type,endian,a)		endian##_to_cpu(__raw_read##type(a))
227 
228 #define out_le64(a,v)	out_arch(q,le64,a,v)
229 #define out_le32(a,v)	out_arch(l,le32,a,v)
230 #define out_le16(a,v)	out_arch(w,le16,a,v)
231 
232 #define in_le64(a)	in_arch(q,le64,a)
233 #define in_le32(a)	in_arch(l,le32,a)
234 #define in_le16(a)	in_arch(w,le16,a)
235 
236 #define out_be64(a,v)	out_arch(l,be64,a,v)
237 #define out_be32(a,v)	out_arch(l,be32,a,v)
238 #define out_be16(a,v)	out_arch(w,be16,a,v)
239 
240 #define in_be64(a)	in_arch(l,be64,a)
241 #define in_be32(a)	in_arch(l,be32,a)
242 #define in_be16(a)	in_arch(w,be16,a)
243 
244 #define out_64(a,v)	__raw_writeq(v,a)
245 #define out_32(a,v)	__raw_writel(v,a)
246 #define out_16(a,v)	__raw_writew(v,a)
247 #define out_8(a,v)	__raw_writeb(v,a)
248 
249 #define in_64(a)	__raw_readq(a)
250 #define in_32(a)	__raw_readl(a)
251 #define in_16(a)	__raw_readw(a)
252 #define in_8(a)		__raw_readb(a)
253 
254 #define clrbits(type, addr, clear) \
255 	out_##type((addr), in_##type(addr) & ~(clear))
256 
257 #define setbits(type, addr, set) \
258 	out_##type((addr), in_##type(addr) | (set))
259 
260 #define clrsetbits(type, addr, clear, set) \
261 	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
262 
263 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
264 #define setbits_be32(addr, set) setbits(be32, addr, set)
265 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
266 
267 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
268 #define setbits_le32(addr, set) setbits(le32, addr, set)
269 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
270 
271 #define clrbits_32(addr, clear) clrbits(32, addr, clear)
272 #define setbits_32(addr, set) setbits(32, addr, set)
273 #define clrsetbits_32(addr, clear, set) clrsetbits(32, addr, clear, set)
274 
275 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
276 #define setbits_be16(addr, set) setbits(be16, addr, set)
277 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
278 
279 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
280 #define setbits_le16(addr, set) setbits(le16, addr, set)
281 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
282 
283 #define clrbits_16(addr, clear) clrbits(16, addr, clear)
284 #define setbits_16(addr, set) setbits(16, addr, set)
285 #define clrsetbits_16(addr, clear, set) clrsetbits(16, addr, clear, set)
286 
287 #define clrbits_8(addr, clear) clrbits(8, addr, clear)
288 #define setbits_8(addr, set) setbits(8, addr, set)
289 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
290 
291 #define clrbits_be64(addr, clear) clrbits(be64, addr, clear)
292 #define setbits_be64(addr, set) setbits(be64, addr, set)
293 #define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
294 
295 #define clrbits_le64(addr, clear) clrbits(le64, addr, clear)
296 #define setbits_le64(addr, set) setbits(le64, addr, set)
297 #define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
298 
299 #define clrbits_64(addr, clear) clrbits(64, addr, clear)
300 #define setbits_64(addr, set) setbits(64, addr, set)
301 #define clrsetbits_64(addr, clear, set) clrsetbits(64, addr, clear, set)
302 
303 /*
304  *  IO port access primitives
305  *  -------------------------
306  *
307  * The ARM doesn't have special IO access instructions; all IO is memory
308  * mapped.  Note that these are defined to perform little endian accesses
309  * only.  Their primary purpose is to access PCI and ISA peripherals.
310  *
311  * Note that for a big endian machine, this implies that the following
312  * big endian mode connectivity is in place, as described by numerous
313  * ARM documents:
314  *
315  *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
316  *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
317  *
318  * The machine specific io.h include defines __io to translate an "IO"
319  * address to a memory address.
320  *
321  * Note that we prevent GCC re-ordering or caching values in expressions
322  * by introducing sequence points into the in*() definitions.  Note that
323  * __raw_* do not guarantee this behaviour.
324  *
325  * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
326  */
327 #ifdef __io
328 #define outb(v,p)			__raw_writeb(v,__io(p))
329 #define outw(v,p)			__raw_writew(cpu_to_le16(v),__io(p))
330 #define outl(v,p)			__raw_writel(cpu_to_le32(v),__io(p))
331 
332 #define inb(p)	({ unsigned int __v = __raw_readb(__io(p)); __v; })
333 #define inw(p)	({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; })
334 #define inl(p)	({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; })
335 
336 #define outsb(p,d,l)			__raw_writesb(__io(p),d,l)
337 #define outsw(p,d,l)			__raw_writesw(__io(p),d,l)
338 #define outsl(p,d,l)			__raw_writesl(__io(p),d,l)
339 
340 #define insb(p,d,l)			__raw_readsb(__io(p),d,l)
341 #define insw(p,d,l)			__raw_readsw(__io(p),d,l)
342 #define insl(p,d,l)			__raw_readsl(__io(p),d,l)
343 #endif
344 
345 #define outb_p(val,port)		outb((val),(port))
346 #define outw_p(val,port)		outw((val),(port))
347 #define outl_p(val,port)		outl((val),(port))
348 #define inb_p(port)			inb((port))
349 #define inw_p(port)			inw((port))
350 #define inl_p(port)			inl((port))
351 
352 #define outsb_p(port,from,len)		outsb(port,from,len)
353 #define outsw_p(port,from,len)		outsw(port,from,len)
354 #define outsl_p(port,from,len)		outsl(port,from,len)
355 #define insb_p(port,to,len)		insb(port,to,len)
356 #define insw_p(port,to,len)		insw(port,to,len)
357 #define insl_p(port,to,len)		insl(port,to,len)
358 
359 #define writesl(a, d, s)	__raw_writesl((unsigned long)a, d, s)
360 #define readsl(a, d, s)		__raw_readsl((unsigned long)a, d, s)
361 #define writesw(a, d, s)	__raw_writesw((unsigned long)a, d, s)
362 #define readsw(a, d, s)		__raw_readsw((unsigned long)a, d, s)
363 #define writesb(a, d, s)	__raw_writesb((unsigned long)a, d, s)
364 #define readsb(a, d, s)		__raw_readsb((unsigned long)a, d, s)
365 
366 /*
367  * String version of IO memory access ops:
368  */
369 extern void _memcpy_fromio(void *, unsigned long, size_t);
370 extern void _memcpy_toio(unsigned long, const void *, size_t);
371 extern void _memset_io(unsigned long, int, size_t);
372 
373 /* Optimized copy functions to read from/write to IO sapce */
374 #ifdef CONFIG_ARM64
375 #include <cpu_func.h>
376 /*
377  * Copy data from IO memory space to "real" memory space.
378  */
379 static inline
__memcpy_fromio(void * to,const volatile void __iomem * from,size_t count)380 void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
381 {
382 	while (count && !IS_ALIGNED((unsigned long)from, 8)) {
383 		*(u8 *)to = __raw_readb(from);
384 		from++;
385 		to++;
386 		count--;
387 	}
388 
389 	if (mmu_status()) {
390 		while (count >= 8) {
391 			*(u64 *)to = __raw_readq(from);
392 			from += 8;
393 			to += 8;
394 			count -= 8;
395 		}
396 	}
397 
398 	while (count) {
399 		*(u8 *)to = __raw_readb(from);
400 		from++;
401 		to++;
402 		count--;
403 	}
404 }
405 
406 /*
407  * Copy data from "real" memory space to IO memory space.
408  */
409 static inline
__memcpy_toio(volatile void __iomem * to,const void * from,size_t count)410 void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
411 {
412 	while (count && !IS_ALIGNED((unsigned long)to, 8)) {
413 		__raw_writeb(*(u8 *)from, to);
414 		from++;
415 		to++;
416 		count--;
417 	}
418 
419 	if (mmu_status()) {
420 		while (count >= 8) {
421 			__raw_writeq(*(u64 *)from, to);
422 			from += 8;
423 			to += 8;
424 			count -= 8;
425 		}
426 	}
427 
428 	while (count) {
429 		__raw_writeb(*(u8 *)from, to);
430 		from++;
431 		to++;
432 		count--;
433 	}
434 }
435 
436 /*
437  * "memset" on IO memory space.
438  */
439 static inline
__memset_io(volatile void __iomem * dst,int c,size_t count)440 void __memset_io(volatile void __iomem *dst, int c, size_t count)
441 {
442 	u64 qc = (u8)c;
443 
444 	qc |= qc << 8;
445 	qc |= qc << 16;
446 	qc |= qc << 32;
447 
448 	while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
449 		__raw_writeb(c, dst);
450 		dst++;
451 		count--;
452 	}
453 
454 	while (count >= 8) {
455 		__raw_writeq(qc, dst);
456 		dst += 8;
457 		count -= 8;
458 	}
459 
460 	while (count) {
461 		__raw_writeb(c, dst);
462 		dst++;
463 		count--;
464 	}
465 }
466 #endif /* CONFIG_ARM64 */
467 
468 #ifdef CONFIG_ARM64
469 #define memset_io(a, b, c)		__memset_io((a), (b), (c))
470 #define memcpy_fromio(a, b, c)		__memcpy_fromio((a), (b), (c))
471 #define memcpy_toio(a, b, c)		__memcpy_toio((a), (b), (c))
472 #else
473 #define memset_io(a, b, c)		memset((void *)(a), (b), (c))
474 #define memcpy_fromio(a, b, c)		memcpy((a), (void *)(b), (c))
475 #define memcpy_toio(a, b, c)		memcpy((void *)(a), (b), (c))
476 #endif
477 
478 #include <asm-generic/io.h>
479 #include <iotrace.h>
480 
481 #endif	/* __ASM_ARM_IO_H */
482