1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #pragma once
10 
11 #include <stddef.h>
12 #include <stdint.h>
13 
14 #include "hf/arch/barriers.h"
15 #include "hf/arch/types.h"
16 
17 #include "hf/assert.h"
18 
19 /* Opaque types for different sized fields of memory mapped IO. */
20 
21 typedef struct {
22 	volatile uint8_t *ptr;
23 } io8_t;
24 
25 typedef struct {
26 	volatile uint16_t *ptr;
27 } io16_t;
28 
29 typedef struct {
30 	volatile uint32_t *ptr;
31 } io32_t;
32 
33 typedef struct {
34 	volatile uint64_t *ptr;
35 } io64_t;
36 
37 typedef struct {
38 	volatile uint8_t *base;
39 	size_t count;
40 } io8_array_t;
41 
42 typedef struct {
43 	volatile uint16_t *base;
44 	size_t count;
45 } io16_array_t;
46 
47 typedef struct {
48 	volatile uint32_t *base;
49 	size_t count;
50 } io32_array_t;
51 
52 typedef struct {
53 	volatile uint64_t *base;
54 	size_t count;
55 } io64_array_t;
56 
57 /* Contructors for literals. */
58 
io8_c(uintpaddr_t addr,uintpaddr_t offset)59 static inline io8_t io8_c(uintpaddr_t addr, uintpaddr_t offset)
60 {
61 	return (io8_t){.ptr = (volatile uint8_t *)(addr + offset)};
62 }
63 
io8_array_c(uintpaddr_t addr,uintpaddr_t offset,uint32_t count)64 static inline io8_array_t io8_array_c(uintpaddr_t addr, uintpaddr_t offset,
65 				      uint32_t count)
66 {
67 	(void)offset;
68 
69 	return (io8_array_t){.base = (volatile uint8_t *)addr, .count = count};
70 }
71 
io16_c(uintpaddr_t addr,uintpaddr_t offset)72 static inline io16_t io16_c(uintpaddr_t addr, uintpaddr_t offset)
73 {
74 	return (io16_t){.ptr = (volatile uint16_t *)(addr + offset)};
75 }
76 
io16_array_c(uintpaddr_t addr,uintpaddr_t offset,uint32_t count)77 static inline io16_array_t io16_array_c(uintpaddr_t addr, uintpaddr_t offset,
78 					uint32_t count)
79 {
80 	(void)offset;
81 
82 	return (io16_array_t){.base = (volatile uint16_t *)addr,
83 			      .count = count};
84 }
85 
io32_c(uintpaddr_t addr,uintpaddr_t offset)86 static inline io32_t io32_c(uintpaddr_t addr, uintpaddr_t offset)
87 {
88 	return (io32_t){.ptr = (volatile uint32_t *)(addr + offset)};
89 }
90 
io32_array_c(uintpaddr_t addr,uintpaddr_t offset,uint32_t count)91 static inline io32_array_t io32_array_c(uintpaddr_t addr, uintpaddr_t offset,
92 					uint32_t count)
93 {
94 	(void)offset;
95 
96 	return (io32_array_t){.base = (volatile uint32_t *)addr,
97 			      .count = count};
98 }
99 
io64_c(uintpaddr_t addr,uintpaddr_t offset)100 static inline io64_t io64_c(uintpaddr_t addr, uintpaddr_t offset)
101 {
102 	return (io64_t){.ptr = (volatile uint64_t *)(addr + offset)};
103 }
104 
io64_array_c(uintpaddr_t addr,uintpaddr_t offset,uint32_t count)105 static inline io64_array_t io64_array_c(uintpaddr_t addr, uintpaddr_t offset,
106 					uint32_t count)
107 {
108 	(void)offset;
109 
110 	return (io64_array_t){.base = (volatile uint64_t *)addr,
111 			      .count = count};
112 }
113 
114 #define IO8_C(addr) io8_c((addr), 0)
115 #define IO16_C(addr) io16_c((addr), 0)
116 #define IO32_C(addr) io32_c((addr), 0)
117 #define IO64_C(addr) io64_c((addr), 0)
118 
119 #define IO8_ARRAY_C(addr, cnt) io8_array_c((addr), 0, cnt)
120 #define IO16_ARRAY_C(addr, cnt) io16_array_c((addr), 0, cnt)
121 #define IO32_ARRAY_C(addr, cnt) io32_array_c((addr), 0, cnt)
122 #define IO64_ARRAY_C(addr, cnt) io64_array_c((addr), 0, cnt)
123 
124 /** Read from memory-mapped IO. */
125 
io_read8(io8_t io)126 static inline uint8_t io_read8(io8_t io)
127 {
128 	return *io.ptr;
129 }
130 
io_read16(io16_t io)131 static inline uint16_t io_read16(io16_t io)
132 {
133 	return *io.ptr;
134 }
135 
io_read32(io32_t io)136 static inline uint32_t io_read32(io32_t io)
137 {
138 	return *io.ptr;
139 }
140 
io_read64(io64_t io)141 static inline uint64_t io_read64(io64_t io)
142 {
143 	return *io.ptr;
144 }
145 
io_read8_array(io8_array_t io,size_t n)146 static inline uint8_t io_read8_array(io8_array_t io, size_t n)
147 {
148 	assert(n < io.count);
149 	return io.base[n];
150 }
151 
io_read16_array(io16_array_t io,size_t n)152 static inline uint16_t io_read16_array(io16_array_t io, size_t n)
153 {
154 	assert(n < io.count);
155 	return io.base[n];
156 }
157 
io_read32_array(io32_array_t io,size_t n)158 static inline uint32_t io_read32_array(io32_array_t io, size_t n)
159 {
160 	assert(n < io.count);
161 	return io.base[n];
162 }
163 
io_read64_array(io64_array_t io,size_t n)164 static inline uint64_t io_read64_array(io64_array_t io, size_t n)
165 {
166 	assert(n < io.count);
167 	return io.base[n];
168 }
169 
170 /**
171  * Read from memory-mapped IO with memory barrier.
172  *
173  * The read is ordered before subsequent memory accesses.
174  */
175 
io_read8_mb(io8_t io)176 static inline uint8_t io_read8_mb(io8_t io)
177 {
178 	uint8_t v = io_read8(io);
179 
180 	data_sync_barrier();
181 	return v;
182 }
183 
io_read16_mb(io16_t io)184 static inline uint16_t io_read16_mb(io16_t io)
185 {
186 	uint16_t v = io_read16(io);
187 
188 	data_sync_barrier();
189 	return v;
190 }
191 
io_read32_mb(io32_t io)192 static inline uint32_t io_read32_mb(io32_t io)
193 {
194 	uint32_t v = io_read32(io);
195 
196 	data_sync_barrier();
197 	return v;
198 }
199 
io_read64_mb(io64_t io)200 static inline uint64_t io_read64_mb(io64_t io)
201 {
202 	uint64_t v = io_read64(io);
203 
204 	data_sync_barrier();
205 	return v;
206 }
207 
io_read8_array_mb(io8_array_t io,size_t n)208 static inline uint8_t io_read8_array_mb(io8_array_t io, size_t n)
209 {
210 	uint8_t v = io_read8_array(io, n);
211 
212 	data_sync_barrier();
213 	return v;
214 }
215 
io_read16_array_mb(io16_array_t io,size_t n)216 static inline uint16_t io_read16_array_mb(io16_array_t io, size_t n)
217 {
218 	uint16_t v = io_read16_array(io, n);
219 
220 	data_sync_barrier();
221 	return v;
222 }
223 
io_read32_array_mb(io32_array_t io,size_t n)224 static inline uint32_t io_read32_array_mb(io32_array_t io, size_t n)
225 {
226 	uint32_t v = io_read32_array(io, n);
227 
228 	data_sync_barrier();
229 	return v;
230 }
231 
io_read64_array_mb(io64_array_t io,size_t n)232 static inline uint64_t io_read64_array_mb(io64_array_t io, size_t n)
233 {
234 	uint64_t v = io_read64_array(io, n);
235 
236 	data_sync_barrier();
237 	return v;
238 }
239 
240 /* Write to memory-mapped IO. */
241 
io_write8(io8_t io,uint8_t v)242 static inline void io_write8(io8_t io, uint8_t v)
243 {
244 	*io.ptr = v;
245 }
246 
io_write16(io16_t io,uint16_t v)247 static inline void io_write16(io16_t io, uint16_t v)
248 {
249 	*io.ptr = v;
250 }
251 
io_write32(io32_t io,uint32_t v)252 static inline void io_write32(io32_t io, uint32_t v)
253 {
254 	*io.ptr = v;
255 }
256 
io_write64(io64_t io,uint64_t v)257 static inline void io_write64(io64_t io, uint64_t v)
258 {
259 	*io.ptr = v;
260 }
261 
io_clrbits32(io32_t io,uint32_t clear)262 static inline void io_clrbits32(io32_t io, uint32_t clear)
263 {
264 	io_write32(io, io_read32(io) & ~clear);
265 }
266 
io_setbits32(io32_t io,uint32_t set)267 static inline void io_setbits32(io32_t io, uint32_t set)
268 {
269 	io_write32(io, io_read32(io) | set);
270 }
271 
io_clrsetbits32(io32_t io,uint32_t clear,uint32_t set)272 static inline void io_clrsetbits32(io32_t io, uint32_t clear, uint32_t set)
273 {
274 	io_write32(io, (io_read32(io) & ~clear) | set);
275 }
276 
io_write8_array(io8_array_t io,size_t n,uint8_t v)277 static inline void io_write8_array(io8_array_t io, size_t n, uint8_t v)
278 {
279 	assert(n < io.count);
280 	io.base[n] = v;
281 }
282 
io_write16_array(io16_array_t io,size_t n,uint16_t v)283 static inline void io_write16_array(io16_array_t io, size_t n, uint16_t v)
284 {
285 	assert(n < io.count);
286 	io.base[n] = v;
287 }
288 
io_write32_array(io32_array_t io,size_t n,uint32_t v)289 static inline void io_write32_array(io32_array_t io, size_t n, uint32_t v)
290 {
291 	assert(n < io.count);
292 	io.base[n] = v;
293 }
294 
io_write64_array(io64_array_t io,size_t n,uint64_t v)295 static inline void io_write64_array(io64_array_t io, size_t n, uint64_t v)
296 {
297 	assert(n < io.count);
298 	io.base[n] = v;
299 }
300 
301 /*
302  * Write to memory-mapped IO with memory barrier.
303  *
304  * The write is ordered after previous memory accesses.
305  */
306 
io_write8_mb(io8_t io,uint8_t v)307 static inline void io_write8_mb(io8_t io, uint8_t v)
308 {
309 	data_sync_barrier();
310 	io_write8(io, v);
311 }
312 
io_write16_mb(io16_t io,uint16_t v)313 static inline void io_write16_mb(io16_t io, uint16_t v)
314 {
315 	data_sync_barrier();
316 	io_write16(io, v);
317 }
318 
io_write32_mb(io32_t io,uint32_t v)319 static inline void io_write32_mb(io32_t io, uint32_t v)
320 {
321 	data_sync_barrier();
322 	io_write32(io, v);
323 }
324 
io_write64_mb(io64_t io,uint64_t v)325 static inline void io_write64_mb(io64_t io, uint64_t v)
326 {
327 	data_sync_barrier();
328 	io_write64(io, v);
329 }
330 
io_write8_array_mb(io8_array_t io,size_t n,uint8_t v)331 static inline void io_write8_array_mb(io8_array_t io, size_t n, uint8_t v)
332 {
333 	data_sync_barrier();
334 	io_write8_array(io, n, v);
335 }
336 
io_write16_array_mb(io16_array_t io,size_t n,uint16_t v)337 static inline void io_write16_array_mb(io16_array_t io, size_t n, uint16_t v)
338 {
339 	data_sync_barrier();
340 	io_write16_array(io, n, v);
341 }
342 
io_write32_array_mb(io32_array_t io,size_t n,uint32_t v)343 static inline void io_write32_array_mb(io32_array_t io, size_t n, uint32_t v)
344 {
345 	data_sync_barrier();
346 	io_write32_array(io, n, v);
347 }
348 
io_write64_array_mb(io64_array_t io,size_t n,uint64_t v)349 static inline void io_write64_array_mb(io64_array_t io, size_t n, uint64_t v)
350 {
351 	data_sync_barrier();
352 	io_write64_array(io, n, v);
353 }
354