1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <ddk/io-buffer.h>
6 #include <ddk/debug.h>
7 #include <ddk/driver.h>
8 #include <zircon/assert.h>
9 #include <zircon/process.h>
10 #include <zircon/syscalls.h>
11 #include <limits.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15
16 // Returns true if a buffer with these parameters was allocated using
17 // zx_vmo_create_contiguous. This is primarily important so we know whether we
18 // need to call COMMIT on it to get the pages to exist.
is_allocated_contiguous(size_t size,uint32_t flags)19 static bool is_allocated_contiguous(size_t size, uint32_t flags) {
20 return (flags & IO_BUFFER_CONTIG) && size > PAGE_SIZE;
21 }
22
pin_contig_buffer(zx_handle_t bti,zx_handle_t vmo,size_t size,zx_paddr_t * phys,zx_handle_t * pmt)23 static zx_status_t pin_contig_buffer(zx_handle_t bti, zx_handle_t vmo, size_t size,
24 zx_paddr_t* phys, zx_handle_t* pmt) {
25 uint32_t options = ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE;
26 if (size > PAGE_SIZE) {
27 options |= ZX_BTI_CONTIGUOUS;
28 }
29 return zx_bti_pin(bti, options, vmo, 0, ROUNDUP(size, PAGE_SIZE), phys, 1, pmt);
30 }
31
io_buffer_init_common(io_buffer_t * buffer,zx_handle_t bti_handle,zx_handle_t vmo_handle,size_t size,zx_off_t offset,uint32_t flags)32 static zx_status_t io_buffer_init_common(io_buffer_t* buffer, zx_handle_t bti_handle,
33 zx_handle_t vmo_handle, size_t size,
34 zx_off_t offset, uint32_t flags) {
35 zx_vaddr_t virt;
36
37 zx_vm_option_t map_options = ZX_VM_PERM_READ;
38 if (flags & IO_BUFFER_RW) {
39 map_options = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
40 }
41
42 zx_status_t status = zx_vmar_map(zx_vmar_root_self(), map_options, 0, vmo_handle, 0, size, &virt);
43 if (status != ZX_OK) {
44 zxlogf(ERROR, "io_buffer: zx_vmar_map failed %d size: %zu\n", status, size);
45 zx_handle_close(vmo_handle);
46 return status;
47 }
48
49 // For contiguous buffers, pre-lookup the physical mapping so
50 // io_buffer_phys() works. For non-contiguous buffers, io_buffer_physmap()
51 // will need to be called.
52 zx_paddr_t phys = IO_BUFFER_INVALID_PHYS;
53 zx_handle_t pmt_handle = ZX_HANDLE_INVALID;
54 if (flags & IO_BUFFER_CONTIG) {
55 ZX_DEBUG_ASSERT(offset == 0);
56 status = pin_contig_buffer(bti_handle, vmo_handle, size, &phys, &pmt_handle);
57 if (status != ZX_OK) {
58 zxlogf(ERROR, "io_buffer: init pin failed %d size: %zu\n", status, size);
59 zx_vmar_unmap(zx_vmar_root_self(), virt, size);
60 zx_handle_close(vmo_handle);
61 return status;
62 }
63 }
64
65 buffer->bti_handle = bti_handle;
66 buffer->vmo_handle = vmo_handle;
67 buffer->pmt_handle = pmt_handle;
68 buffer->size = size;
69 buffer->offset = offset;
70 buffer->virt = (void *)virt;
71 buffer->phys = phys;
72
73 return ZX_OK;
74 }
75
io_buffer_init_aligned(io_buffer_t * buffer,zx_handle_t bti,size_t size,uint32_t alignment_log2,uint32_t flags)76 zx_status_t io_buffer_init_aligned(io_buffer_t* buffer, zx_handle_t bti, size_t size,
77 uint32_t alignment_log2, uint32_t flags) {
78 memset(buffer, 0, sizeof(*buffer));
79
80 if (size == 0) {
81 return ZX_ERR_INVALID_ARGS;
82 }
83 if (flags & ~IO_BUFFER_FLAGS_MASK) {
84 return ZX_ERR_INVALID_ARGS;
85 }
86
87 zx_handle_t vmo_handle;
88 zx_status_t status;
89
90 if (is_allocated_contiguous(size, flags)) {
91 status = zx_vmo_create_contiguous(bti, size, alignment_log2, &vmo_handle);
92 } else {
93 // zx_vmo_create doesn't support passing an alignment.
94 if (alignment_log2 != 0)
95 return ZX_ERR_INVALID_ARGS;
96 status = zx_vmo_create(size, 0, &vmo_handle);
97 }
98 if (status != ZX_OK) {
99 zxlogf(ERROR, "io_buffer: zx_vmo_create failed %d\n", status);
100 return status;
101 }
102
103 if (flags & IO_BUFFER_UNCACHED) {
104 status = zx_vmo_set_cache_policy(vmo_handle, ZX_CACHE_POLICY_UNCACHED);
105 if (status != ZX_OK) {
106 zxlogf(ERROR, "io_buffer: zx_vmo_set_cache_policy failed %d\n", status);
107 zx_handle_close(vmo_handle);
108 return status;
109 }
110 }
111
112 return io_buffer_init_common(buffer, bti, vmo_handle, size, 0, flags);
113 }
114
io_buffer_init(io_buffer_t * buffer,zx_handle_t bti,size_t size,uint32_t flags)115 zx_status_t io_buffer_init(io_buffer_t* buffer, zx_handle_t bti, size_t size, uint32_t flags) {
116 // A zero alignment gets interpreted as PAGE_SIZE_SHIFT.
117 return io_buffer_init_aligned(buffer, bti, size, 0, flags);
118 }
119
io_buffer_init_vmo(io_buffer_t * buffer,zx_handle_t bti,zx_handle_t vmo_handle,zx_off_t offset,uint32_t flags)120 zx_status_t io_buffer_init_vmo(io_buffer_t* buffer, zx_handle_t bti, zx_handle_t vmo_handle,
121 zx_off_t offset, uint32_t flags) {
122 memset(buffer, 0, sizeof(*buffer));
123
124 if (flags != IO_BUFFER_RO && flags != IO_BUFFER_RW) {
125 return ZX_ERR_INVALID_ARGS;
126 }
127
128 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, &vmo_handle);
129 if (status != ZX_OK) return status;
130
131 uint64_t size;
132 status = zx_vmo_get_size(vmo_handle, &size);
133 if (status != ZX_OK) {
134 zx_handle_close(vmo_handle);
135 return status;
136 }
137
138 return io_buffer_init_common(buffer, bti, vmo_handle, size, offset, flags);
139 }
140
io_buffer_init_mmio(io_buffer_t * buffer,zx_handle_t vmo_handle,void * virt,zx_off_t offset,size_t size)141 zx_status_t io_buffer_init_mmio(io_buffer_t* buffer, zx_handle_t vmo_handle, void* virt,
142 zx_off_t offset, size_t size) {
143 memset(buffer, 0, sizeof(*buffer));
144
145 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, &vmo_handle);
146 if (status != ZX_OK) return status;
147
148 buffer->vmo_handle = vmo_handle;
149 buffer->size = size;
150 buffer->offset = offset;
151 buffer->virt = (void *)virt;
152
153 return ZX_OK;
154 }
155
io_buffer_init_physical(io_buffer_t * buffer,zx_handle_t bti,zx_paddr_t addr,size_t size,zx_handle_t resource,uint32_t cache_policy)156 zx_status_t io_buffer_init_physical(io_buffer_t* buffer, zx_handle_t bti, zx_paddr_t addr,
157 size_t size, zx_handle_t resource, uint32_t cache_policy) {
158 memset(buffer, 0, sizeof(*buffer));
159
160 zx_handle_t vmo_handle;
161 zx_status_t status = zx_vmo_create_physical(resource, addr, size, &vmo_handle);
162 if (status != ZX_OK) {
163 zxlogf(ERROR, "io_buffer: zx_vmo_create_physical failed %d\n", status);
164 return status;
165 }
166
167 status = zx_vmo_set_cache_policy(vmo_handle, cache_policy);
168 if (status != ZX_OK) {
169 zxlogf(ERROR, "io_buffer: zx_vmo_set_cache_policy failed %d\n", status);
170 zx_handle_close(vmo_handle);
171 return status;
172 }
173
174 zx_vm_option_t options = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_MAP_RANGE;
175 zx_vaddr_t virt;
176 status = zx_vmar_map(zx_vmar_root_self(), options, 0, vmo_handle, 0, size, &virt);
177 if (status != ZX_OK) {
178 zxlogf(ERROR, "io_buffer: zx_vmar_map failed %d size: %zu\n", status, size);
179 zx_handle_close(vmo_handle);
180 return status;
181 }
182
183 zx_paddr_t phys;
184 zx_handle_t pmt;
185 status = pin_contig_buffer(bti, vmo_handle, size, &phys, &pmt);
186 if (status != ZX_OK) {
187 zxlogf(ERROR, "io_buffer: init pin failed %d size: %zu\n", status, size);
188 zx_vmar_unmap(zx_vmar_root_self(), virt, size);
189 zx_handle_close(vmo_handle);
190 return status;
191 }
192
193 buffer->bti_handle = bti;
194 buffer->vmo_handle = vmo_handle;
195 buffer->pmt_handle = pmt;
196 buffer->size = size;
197 buffer->offset = 0;
198 buffer->virt = (void *)virt;
199 buffer->phys = phys;
200 buffer->phys_list = NULL;
201 buffer->phys_count = 0;
202 return ZX_OK;
203 }
204
io_buffer_release(io_buffer_t * buffer)205 void io_buffer_release(io_buffer_t* buffer) {
206 if (buffer->vmo_handle != ZX_HANDLE_INVALID) {
207 if (buffer->pmt_handle != ZX_HANDLE_INVALID) {
208 zx_status_t status = zx_pmt_unpin(buffer->pmt_handle);
209 ZX_DEBUG_ASSERT(status == ZX_OK);
210 buffer->pmt_handle = ZX_HANDLE_INVALID;
211 }
212
213 zx_vmar_unmap(zx_vmar_root_self(), (uintptr_t)buffer->virt, buffer->size);
214 zx_handle_close(buffer->vmo_handle);
215 buffer->vmo_handle = ZX_HANDLE_INVALID;
216 }
217 if (buffer->phys_list && buffer->pmt_handle != ZX_HANDLE_INVALID) {
218 zx_status_t status = zx_pmt_unpin(buffer->pmt_handle);
219 ZX_DEBUG_ASSERT(status == ZX_OK);
220 buffer->pmt_handle = ZX_HANDLE_INVALID;
221 }
222 free(buffer->phys_list);
223 buffer->phys_list = NULL;
224 buffer->phys = 0;
225 buffer->phys_count = 0;
226 }
227
io_buffer_cache_op(io_buffer_t * buffer,const uint32_t op,const zx_off_t offset,const size_t size)228 zx_status_t io_buffer_cache_op(io_buffer_t* buffer, const uint32_t op,
229 const zx_off_t offset, const size_t size) {
230 if (size > 0) {
231 return zx_vmo_op_range(buffer->vmo_handle, op, buffer->offset + offset, size, NULL, 0);
232 } else {
233 return ZX_OK;
234 }
235 }
236
io_buffer_cache_flush(io_buffer_t * buffer,zx_off_t offset,size_t length)237 zx_status_t io_buffer_cache_flush(io_buffer_t* buffer, zx_off_t offset, size_t length) {
238 if (offset + length < offset || offset + length > buffer->size) {
239 return ZX_ERR_OUT_OF_RANGE;
240 }
241 return zx_cache_flush(io_buffer_virt(buffer) + offset, length, ZX_CACHE_FLUSH_DATA);
242 }
243
io_buffer_cache_flush_invalidate(io_buffer_t * buffer,zx_off_t offset,size_t length)244 zx_status_t io_buffer_cache_flush_invalidate(io_buffer_t* buffer, zx_off_t offset, size_t length) {
245 if (offset + length < offset || offset + length > buffer->size) {
246 return ZX_ERR_OUT_OF_RANGE;
247 }
248 return zx_cache_flush(io_buffer_virt(buffer) + offset, length,
249 ZX_CACHE_FLUSH_DATA | ZX_CACHE_FLUSH_INVALIDATE);
250 }
251
io_buffer_physmap(io_buffer_t * buffer)252 zx_status_t io_buffer_physmap(io_buffer_t* buffer) {
253 if (buffer->phys_count > 0) {
254 return ZX_OK;
255 }
256 if (buffer->size == 0) {
257 return ZX_ERR_INVALID_ARGS;
258 }
259 if (buffer->pmt_handle != ZX_HANDLE_INVALID && buffer->phys == IO_BUFFER_INVALID_PHYS) {
260 return ZX_ERR_BAD_STATE;
261 }
262
263 // zx_bti_pin returns whole pages, so take into account unaligned vmo
264 // offset and length when calculating the amount of pages returned
265 uint64_t page_offset = ROUNDDOWN(buffer->offset, PAGE_SIZE);
266 // The buffer size is the vmo size from offset 0.
267 uint64_t page_length = buffer->size - page_offset;
268 uint64_t pages = ROUNDUP(page_length, PAGE_SIZE) / PAGE_SIZE;
269
270 zx_paddr_t* paddrs = malloc(pages * sizeof(zx_paddr_t));
271 if (paddrs == NULL) {
272 zxlogf(ERROR, "io_buffer: out of memory\n");
273 return ZX_ERR_NO_MEMORY;
274 }
275
276 if (buffer->phys == IO_BUFFER_INVALID_PHYS) {
277 zx_handle_t pmt;
278 zx_status_t status = io_buffer_physmap_range(buffer, page_offset, page_length,
279 pages, paddrs, &pmt);
280 if (status != ZX_OK) {
281 free(paddrs);
282 return status;
283 }
284 buffer->pmt_handle = pmt;
285 } else {
286 // If this is a contiguous io-buffer, just populate the page array
287 // ourselves.
288 for (size_t i = 0; i < pages; ++i) {
289 paddrs[i] = buffer->phys + page_offset + i * PAGE_SIZE;
290 }
291 paddrs[0] += buffer->offset & (PAGE_SIZE - 1);
292 }
293 buffer->phys_list = paddrs;
294 buffer->phys_count = pages;
295 return ZX_OK;
296 }
297
io_buffer_physmap_range(io_buffer_t * buffer,zx_off_t offset,size_t length,size_t phys_count,zx_paddr_t * physmap,zx_handle_t * pmt)298 zx_status_t io_buffer_physmap_range(io_buffer_t* buffer, zx_off_t offset,
299 size_t length, size_t phys_count,
300 zx_paddr_t* physmap, zx_handle_t* pmt) {
301 // TODO(teisenbe): We need to figure out how to integrate lifetime
302 // management of this pin into the io_buffer API...
303 const size_t sub_offset = offset & (PAGE_SIZE - 1);
304 const size_t pin_offset = offset - sub_offset;
305 const size_t pin_length = ROUNDUP(length + sub_offset, PAGE_SIZE);
306
307 if (pin_length / PAGE_SIZE != phys_count) {
308 return ZX_ERR_INVALID_ARGS;
309 }
310
311 uint32_t options = ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE;
312 zx_status_t status = zx_bti_pin(buffer->bti_handle, options, buffer->vmo_handle,
313 pin_offset, pin_length, physmap, phys_count, pmt);
314 if (status != ZX_OK) {
315 return status;
316 }
317 // Account for the initial misalignment if any
318 physmap[0] += sub_offset;
319 return ZX_OK;
320 }
321