1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <ddk/protocol/usb.h>
6 #include <usb/usb-request.h>
7 #include <ddk/debug.h>
8
9 #include <zircon/process.h>
10 #include <zircon/syscalls.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14
15 #define MIN(a, b) ((a) < (b) ? (a) : (b))
16
req_buffer_size(usb_request_t * req,size_t offset)17 static inline size_t req_buffer_size(usb_request_t* req, size_t offset) {
18 size_t remaining = req->size - req->offset - offset;
19 // May overflow.
20 if (remaining > req->size) {
21 remaining = 0;
22 }
23 return remaining;
24 }
25
req_buffer_virt(usb_request_t * req)26 static inline void* req_buffer_virt(usb_request_t* req) {
27 return (void*)(((uintptr_t)req->virt) + req->offset);
28 }
29
usb_request_alloc(usb_request_t ** out,uint64_t data_size,uint8_t ep_address,size_t req_size)30 __EXPORT zx_status_t usb_request_alloc(usb_request_t** out, uint64_t data_size,
31 uint8_t ep_address, size_t req_size) {
32 if (req_size < sizeof(usb_request_t)) {
33 return ZX_ERR_INVALID_ARGS;
34 }
35 usb_request_t* req = calloc(1, req_size);
36 if (!req) {
37 return ZX_ERR_NO_MEMORY;
38 }
39 zx_status_t status = ZX_OK;
40 if (data_size > 0) {
41 status = zx_vmo_create(data_size, 0, &req->vmo_handle);
42 if (status != ZX_OK) {
43 zxlogf(ERROR, "usb_request_alloc: Failed to create vmo: %d\n", status);
44 free(req);
45 return status;
46 }
47
48 zx_vaddr_t mapped_addr;
49 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
50 0, req->vmo_handle, 0, data_size, &mapped_addr);
51
52 if (status != ZX_OK) {
53 zxlogf(ERROR, "usb_request_alloc: Failed to map the vmo: %d\n", status);
54 free(req);
55 return status;
56 }
57
58 req->virt = mapped_addr;
59 req->offset = 0;
60 req->size = data_size;
61 }
62 req->alloc_size = req_size;
63 req->header.ep_address = ep_address;
64 req->header.length = data_size;
65 req->release_frees = true;
66 *out = req;
67 return ZX_OK;
68 }
69
70 // usb_request_alloc_vmo() creates a new usb request with the given VMO.
usb_request_alloc_vmo(usb_request_t ** out,zx_handle_t vmo_handle,uint64_t vmo_offset,uint64_t length,uint8_t ep_address,size_t req_size)71 __EXPORT zx_status_t usb_request_alloc_vmo(usb_request_t** out, zx_handle_t vmo_handle,
72 uint64_t vmo_offset, uint64_t length,
73 uint8_t ep_address, size_t req_size) {
74 usb_request_t* req = calloc(1, req_size);
75 if (!req) {
76 return ZX_ERR_NO_MEMORY;
77 }
78 zx_handle_t dup_handle;
79 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, &dup_handle);
80 if (status != ZX_OK) {
81 zxlogf(ERROR, "usb_request_alloc_vmo: Failed to duplicate handle: %d\n", status);
82 free(req);
83 return status;
84 }
85
86 uint64_t size;
87 status = zx_vmo_get_size(dup_handle, &size);
88 if (status != ZX_OK) {
89 zx_handle_close(dup_handle);
90 free(req);
91 return status;
92 }
93
94 zx_vaddr_t mapped_addr;
95 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
96 0, dup_handle, 0, size, &mapped_addr);
97 if (status != ZX_OK) {
98 zxlogf(ERROR, "usb_request_alloc_vmo: zx_vmar_map failed %d size: %zu\n", status, size);
99 zx_handle_close(dup_handle);
100 free(req);
101 return status;
102 }
103
104 req->alloc_size = req_size;
105 req->vmo_handle = dup_handle;
106 req->virt = mapped_addr;
107 req->offset = vmo_offset;
108 req->size = size;
109
110 req->pmt = ZX_HANDLE_INVALID;
111
112 req->header.ep_address = ep_address;
113 req->header.length = length;
114 req->release_frees = true;
115 *out = req;
116 return ZX_OK;
117 }
118
119 // usb_request_init() initializes the statically allocated usb request with the given VMO.
120 // This will free any resources allocated by the usb request but not the usb request itself.
usb_request_init(usb_request_t * req,zx_handle_t vmo_handle,uint64_t vmo_offset,uint64_t length,uint8_t ep_address)121 __EXPORT zx_status_t usb_request_init(usb_request_t* req, zx_handle_t vmo_handle,
122 uint64_t vmo_offset, uint64_t length, uint8_t ep_address) {
123 memset(req, 0, req->alloc_size);
124
125 zx_handle_t dup_handle;
126 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, &dup_handle);
127 if (status != ZX_OK) {
128 zxlogf(ERROR, "usb_request_init: Failed to duplicate handle: %d\n", status);
129 return status;
130 }
131
132 uint64_t size;
133 status = zx_vmo_get_size(dup_handle, &size);
134 if (status != ZX_OK) {
135 zx_handle_close(dup_handle);
136 return status;
137 }
138
139 //TODO(ravoorir): Do not map the entire vmo. Map only what is needed.
140 zx_vaddr_t mapped_addr;
141 status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
142 0, dup_handle, 0, size, &mapped_addr);
143 if (status != ZX_OK) {
144 zxlogf(ERROR, "usb_request_init: zx_vmar_map failed %d size: %zu\n", status, size);
145 zx_handle_close(dup_handle);
146 return status;
147 }
148
149 req->vmo_handle = dup_handle;
150 req->virt = mapped_addr;
151 req->offset = vmo_offset;
152 req->size = size;
153
154 req->pmt = ZX_HANDLE_INVALID;
155
156 req->header.ep_address = ep_address;
157 req->header.length = length;
158 req->release_frees = false;
159 return ZX_OK;
160 }
161
usb_request_set_sg_list(usb_request_t * req,phys_iter_sg_entry_t * sg_list,size_t sg_count)162 __EXPORT zx_status_t usb_request_set_sg_list(usb_request_t* req,
163 phys_iter_sg_entry_t* sg_list, size_t sg_count) {
164 if (req->sg_list) {
165 free(req->sg_list);
166 req->sg_list = NULL;
167 req->sg_count = 0;
168 }
169 size_t total_length = 0;
170 // TODO(jocelyndang): disallow overlapping entries?
171 for (size_t i = 0; i < sg_count; ++i) {
172 phys_iter_sg_entry_t* entry = &sg_list[i];
173 if (entry->length == 0 || (req_buffer_size(req, entry->offset) < entry->length)) {
174 return ZX_ERR_INVALID_ARGS;
175 }
176 total_length += entry->length;
177 }
178 size_t num_bytes = sg_count * sizeof(phys_iter_sg_entry_t);
179 req->sg_list = malloc(num_bytes);
180 if (req->sg_list == NULL) {
181 zxlogf(ERROR, "usb_request_set_sg_list: out of memory\n");
182 return ZX_ERR_NO_MEMORY;
183 }
184 memcpy(req->sg_list, sg_list, num_bytes);
185 req->sg_count = sg_count;
186 req->header.length = total_length;
187 return ZX_OK;
188 }
189
usb_request_copy_from(usb_request_t * req,void * data,size_t length,size_t offset)190 __EXPORT ssize_t usb_request_copy_from(usb_request_t* req, void* data, size_t length, size_t offset) {
191 length = MIN(req_buffer_size(req, offset), length);
192 memcpy(data, req_buffer_virt(req) + offset, length);
193 return length;
194 }
195
usb_request_copy_to(usb_request_t * req,const void * data,size_t length,size_t offset)196 __EXPORT ssize_t usb_request_copy_to(usb_request_t* req, const void* data, size_t length, size_t offset) {
197 length = MIN(req_buffer_size(req, offset), length);
198 memcpy(req_buffer_virt(req) + offset, data, length);
199 return length;
200 }
201
usb_request_mmap(usb_request_t * req,void ** data)202 __EXPORT zx_status_t usb_request_mmap(usb_request_t* req, void** data) {
203 *data = req_buffer_virt(req);
204 // TODO(jocelyndang): modify this once we start passing usb requests across process boundaries.
205 return ZX_OK;
206 }
207
usb_request_cacheop(usb_request_t * req,uint32_t op,size_t offset,size_t length)208 __EXPORT zx_status_t usb_request_cacheop(usb_request_t* req, uint32_t op, size_t offset, size_t length) {
209 if (length > 0) {
210 return zx_vmo_op_range(req->vmo_handle, op, req->offset + offset, length, NULL, 0);
211 } else {
212 return ZX_OK;
213 }
214 }
215
usb_request_cache_flush(usb_request_t * req,zx_off_t offset,size_t length)216 __EXPORT zx_status_t usb_request_cache_flush(usb_request_t* req, zx_off_t offset, size_t length) {
217 if (offset + length < offset || offset + length > req->size) {
218 return ZX_ERR_OUT_OF_RANGE;
219 }
220 return zx_cache_flush(req_buffer_virt(req) + offset, length, ZX_CACHE_FLUSH_DATA);
221 }
222
usb_request_cache_flush_invalidate(usb_request_t * req,zx_off_t offset,size_t length)223 __EXPORT zx_status_t usb_request_cache_flush_invalidate(usb_request_t* req, zx_off_t offset, size_t length) {
224 if (offset + length < offset || offset + length > req->size) {
225 return ZX_ERR_OUT_OF_RANGE;
226 }
227 return zx_cache_flush(req_buffer_virt(req) + offset, length,
228 ZX_CACHE_FLUSH_DATA | ZX_CACHE_FLUSH_INVALIDATE);
229 }
230
usb_request_physmap(usb_request_t * req,zx_handle_t bti_handle)231 zx_status_t usb_request_physmap(usb_request_t* req, zx_handle_t bti_handle) {
232 if (req->phys_count > 0) {
233 return ZX_OK;
234 }
235 // zx_bti_pin returns whole pages, so take into account unaligned vmo
236 // offset and length when calculating the amount of pages returned
237 uint64_t page_offset = ROUNDDOWN(req->offset, PAGE_SIZE);
238 // The buffer size is the vmo size from offset 0.
239 uint64_t page_length = req->size - page_offset;
240 uint64_t pages = ROUNDUP(page_length, PAGE_SIZE) / PAGE_SIZE;
241
242 zx_paddr_t* paddrs = malloc(pages * sizeof(zx_paddr_t));
243 if (paddrs == NULL) {
244 zxlogf(ERROR, "usb_request_physmap: out of memory\n");
245 return ZX_ERR_NO_MEMORY;
246 }
247 const size_t sub_offset = page_offset & (PAGE_SIZE - 1);
248 const size_t pin_offset = page_offset - sub_offset;
249 const size_t pin_length = ROUNDUP(page_length + sub_offset, PAGE_SIZE);
250
251 if (pin_length / PAGE_SIZE != pages) {
252 return ZX_ERR_INVALID_ARGS;
253 }
254 zx_handle_t pmt;
255 uint32_t options = ZX_BTI_PERM_READ | ZX_BTI_PERM_WRITE;
256 zx_status_t status = zx_bti_pin(bti_handle, options, req->vmo_handle,
257 pin_offset, pin_length, paddrs, pages, &pmt);
258 if (status != ZX_OK) {
259 zxlogf(ERROR, "usb_request_physmap: zx_bti_pin failed:%d\n", status);
260 free(paddrs);
261 return status;
262 }
263 // Account for the initial misalignment if any
264 paddrs[0] += sub_offset;
265 req->phys_list = paddrs;
266 req->phys_count = pages;
267 req->pmt = pmt;
268
269 return ZX_OK;
270 }
271
usb_request_release(usb_request_t * req)272 __EXPORT void usb_request_release(usb_request_t* req) {
273 if (req->vmo_handle != ZX_HANDLE_INVALID) {
274 if (req->pmt != ZX_HANDLE_INVALID) {
275 zx_status_t status = zx_pmt_unpin(req->pmt);
276 ZX_DEBUG_ASSERT(status == ZX_OK);
277 req->pmt = ZX_HANDLE_INVALID;
278 }
279
280 zx_vmar_unmap(zx_vmar_root_self(), (uintptr_t)req->virt, req->size);
281 zx_handle_close(req->vmo_handle);
282 req->vmo_handle = ZX_HANDLE_INVALID;
283 }
284 if (req->phys_list && req->pmt != ZX_HANDLE_INVALID) {
285 zx_status_t status = zx_pmt_unpin(req->pmt);
286 ZX_DEBUG_ASSERT(status == ZX_OK);
287 req->pmt = ZX_HANDLE_INVALID;
288 }
289 free(req->phys_list);
290 req->phys_list = NULL;
291 req->phys_count = 0;
292 free(req->sg_list);
293 req->sg_list = NULL;
294 req->sg_count = 0;
295 if (req->release_frees) {
296 free(req);
297 }
298 }
299
usb_request_complete(usb_request_t * req,zx_status_t status,zx_off_t actual,const usb_request_complete_t * complete_cb)300 __EXPORT void usb_request_complete(usb_request_t* req, zx_status_t status, zx_off_t actual,
301 const usb_request_complete_t* complete_cb) {
302 req->response.status = status;
303 req->response.actual = actual;
304
305 if (complete_cb) {
306 complete_cb->callback(complete_cb->ctx, req);
307 }
308 }
309
usb_request_phys_iter_init(phys_iter_t * iter,usb_request_t * req,size_t max_length)310 __EXPORT void usb_request_phys_iter_init(phys_iter_t* iter, usb_request_t* req, size_t max_length) {
311 phys_iter_buffer_t buf = {
312 .length = req->header.length,
313 .vmo_offset = req->offset,
314 .phys = req->phys_list,
315 .phys_count = req->phys_count,
316 .sg_list = req->sg_list,
317 .sg_count = req->sg_count
318 };
319 phys_iter_init(iter, &buf, max_length);
320 }
321
usb_request_phys_iter_next(phys_iter_t * iter,zx_paddr_t * out_paddr)322 __EXPORT size_t usb_request_phys_iter_next(phys_iter_t* iter, zx_paddr_t* out_paddr) {
323 return phys_iter_next(iter, out_paddr);
324 }
325
usb_request_pool_init(usb_request_pool_t * pool,uint64_t node_offset)326 __EXPORT void usb_request_pool_init(usb_request_pool_t* pool, uint64_t node_offset) {
327 mtx_init(&pool->lock, mtx_plain);
328 list_initialize(&pool->free_reqs);
329 pool->node_offset = node_offset;
330 }
331
usb_request_pool_add(usb_request_pool_t * pool,usb_request_t * req)332 __EXPORT zx_status_t usb_request_pool_add(usb_request_pool_t* pool, usb_request_t* req) {
333 mtx_lock(&pool->lock);
334 if (req->alloc_size < (pool->node_offset + sizeof(list_node_t))) {
335 mtx_unlock(&pool->lock);
336 return ZX_ERR_INVALID_ARGS;
337 }
338 list_add_tail(&pool->free_reqs, (list_node_t*)((uintptr_t)req + pool->node_offset));
339 mtx_unlock(&pool->lock);
340 return ZX_OK;
341 }
342
usb_request_pool_get(usb_request_pool_t * pool,size_t length)343 __EXPORT usb_request_t* usb_request_pool_get(usb_request_pool_t* pool, size_t length) {
344 usb_request_t* req = NULL;
345 bool found = false;
346
347 mtx_lock(&pool->lock);
348 list_node_t* node;
349 list_for_every(&pool->free_reqs, node) {
350 req = (usb_request_t*)((uintptr_t)node - pool->node_offset);
351 if (req->size == length) {
352 found = true;
353 break;
354 }
355 }
356 if (found) {
357 list_delete(node);
358 }
359 mtx_unlock(&pool->lock);
360
361 return found ? req : NULL;
362 }
363
usb_request_pool_release(usb_request_pool_t * pool)364 __EXPORT void usb_request_pool_release(usb_request_pool_t* pool) {
365 mtx_lock(&pool->lock);
366
367 usb_request_t* req;
368 list_node_t* node;
369 while ((node = list_remove_tail(&pool->free_reqs)) != NULL) {
370 req = (usb_request_t*)((uintptr_t)node - pool->node_offset);
371 usb_request_release(req);
372 }
373
374 mtx_unlock(&pool->lock);
375 }
376
usb_req_list_add_head(list_node_t * list,usb_request_t * req,size_t parent_req_size)377 __EXPORT zx_status_t usb_req_list_add_head(list_node_t* list, usb_request_t* req,
378 size_t parent_req_size) {
379 if (req->alloc_size < parent_req_size + sizeof(list_node_t)) {
380 return ZX_ERR_INVALID_ARGS;
381 }
382 usb_req_internal_t* req_int = USB_REQ_TO_REQ_INTERNAL(req, parent_req_size);
383 list_add_head(list, &req_int->node);
384 return ZX_OK;
385 }
386
usb_req_list_add_tail(list_node_t * list,usb_request_t * req,size_t parent_req_size)387 __EXPORT zx_status_t usb_req_list_add_tail(list_node_t* list, usb_request_t* req,
388 size_t parent_req_size) {
389 if (req->alloc_size < parent_req_size + sizeof(list_node_t)) {
390 return ZX_ERR_INVALID_ARGS;
391 }
392 usb_req_internal_t* req_int = USB_REQ_TO_REQ_INTERNAL(req, parent_req_size);
393 list_add_tail(list, &req_int->node);
394 return ZX_OK;
395 }
396
usb_req_list_remove_head(list_node_t * list,size_t parent_req_size)397 __EXPORT usb_request_t* usb_req_list_remove_head(list_node_t* list, size_t parent_req_size) {
398 usb_req_internal_t* req_int = list_remove_head_type(list, usb_req_internal_t, node);
399 if (req_int) {
400 return REQ_INTERNAL_TO_USB_REQ(req_int, parent_req_size);
401 }
402 return NULL;
403 }
404