1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <ddk/debug.h>
6 #include <fuchsia/usb/debug/c/fidl.h>
7 #include <xdc-server-utils/msg.h>
8 #include <xdc-server-utils/stream.h>
9 #include <zircon/hw/usb.h>
10 #include <assert.h>
11 #include <string.h>
12 #include <threads.h>
13 #include <unistd.h>
14
15 #include "xdc.h"
16 #include "xdc-transfer.h"
17 #include "xhci-hw.h"
18 #include "xhci-util.h"
19
20 #ifndef MIN
21 #define MIN(a, b) ((a) < (b) ? (a) : (b))
22 #endif
23
24 // String descriptors use UNICODE UTF-16LE encodings.
25 #define XDC_MANUFACTURER u"Google Inc."
26 #define XDC_PRODUCT u"Fuchsia XDC Target"
27 #define XDC_SERIAL_NUMBER u""
28 #define XDC_VENDOR_ID 0x18D1
29 #define XDC_PRODUCT_ID 0xA0DC
30 #define XDC_REVISION 0x1000
31
32 // Multi-segment event rings are not currently supported.
33 #define ERST_ARRAY_SIZE 1
34 #define EVENT_RING_SIZE (PAGE_SIZE / sizeof(xhci_trb_t))
35
36 // The maximum duration to transition from connected to configured state.
37 #define TRANSITION_CONFIGURED_THRESHOLD ZX_SEC(5)
38
39 #define OUT_EP_ADDR 0x01
40 #define IN_EP_ADDR 0x81
41
42 #define MAX_REQS 30
43 #define MAX_REQ_SIZE (16 * 1024)
44
45 typedef struct xdc_instance {
46 zx_device_t* zxdev;
47 xdc_t* parent;
48
49 // Whether the instance has registered a stream ID.
50 bool has_stream_id;
51 // ID of stream that this instance is reading and writing from.
52 // Only valid if has_stream_id is true.
53 uint32_t stream_id;
54 // Whether the host has registered a stream of the same id.
55 bool connected;
56 bool dead;
57 xdc_packet_state_t cur_read_packet;
58 // Where we've read up to, in the first request of the completed reads list.
59 size_t cur_req_read_offset;
60 list_node_t completed_reads;
61 // Needs to be acquired before accessing the stream_id, dead or read members.
62 mtx_t lock;
63
64 // For storing this instance in the parent's instance_list.
65 list_node_t node;
66 } xdc_instance_t;
67
68 // For tracking streams registered on the host side.
69 typedef struct {
70 uint32_t stream_id;
71 // For storing this in xdc's host_streams list.
72 list_node_t node;
73 } xdc_host_stream_t;
74
75
xdc_req_list_add_head(list_node_t * list,usb_request_t * req,size_t parent_req_size)76 zx_status_t xdc_req_list_add_head(list_node_t* list, usb_request_t* req,
77 size_t parent_req_size) {
78 if (req->alloc_size < parent_req_size + sizeof(list_node_t)) {
79 return ZX_ERR_INVALID_ARGS;
80 }
81 xdc_req_internal_t* req_int = USB_REQ_TO_XDC_INTERNAL(req, parent_req_size);
82 list_add_head(list, &req_int->node);
83 return ZX_OK;
84 }
85
xdc_req_list_add_tail(list_node_t * list,usb_request_t * req,size_t parent_req_size)86 zx_status_t xdc_req_list_add_tail(list_node_t* list, usb_request_t* req,
87 size_t parent_req_size) {
88 if (req->alloc_size < parent_req_size + sizeof(list_node_t)) {
89 return ZX_ERR_INVALID_ARGS;
90 }
91 xdc_req_internal_t* req_int = USB_REQ_TO_XDC_INTERNAL(req, parent_req_size);
92 list_add_tail(list, &req_int->node);
93 return ZX_OK;
94 }
95
xdc_req_list_remove_head(list_node_t * list,size_t parent_req_size)96 usb_request_t* xdc_req_list_remove_head(list_node_t* list, size_t parent_req_size) {
97 xdc_req_internal_t* req_int = list_remove_head_type(list, xdc_req_internal_t, node);
98 if (req_int) {
99 return XDC_INTERNAL_TO_USB_REQ(req_int, parent_req_size);
100 }
101 return NULL;
102 }
103
xdc_req_list_remove_tail(list_node_t * list,size_t parent_req_size)104 usb_request_t* xdc_req_list_remove_tail(list_node_t* list, size_t parent_req_size) {
105 xdc_req_internal_t* req_int = list_remove_tail_type(list, xdc_req_internal_t, node);
106 if (req_int) {
107 return XDC_INTERNAL_TO_USB_REQ(req_int, parent_req_size);
108 }
109 return NULL;
110 }
111
112 static zx_status_t xdc_write(xdc_t* xdc, uint32_t stream_id, const void* buf, size_t count,
113 size_t* actual, bool is_ctrl_msg);
114
xdc_wait_bits(volatile uint32_t * ptr,uint32_t bits,uint32_t expected)115 static void xdc_wait_bits(volatile uint32_t* ptr, uint32_t bits, uint32_t expected) {
116 uint32_t value = XHCI_READ32(ptr);
117 while ((value & bits) != expected) {
118 usleep(1000);
119 value = XHCI_READ32(ptr);
120 }
121 }
122
123 // Populates the pointer to the debug capability in the xdc struct.
xdc_get_debug_cap(xdc_t * xdc)124 static zx_status_t xdc_get_debug_cap(xdc_t* xdc) {
125 uint32_t cap_id = EXT_CAP_USB_DEBUG_CAPABILITY;
126 xdc->debug_cap_regs = (xdc_debug_cap_regs_t*)xhci_get_next_ext_cap(xdc->mmio, nullptr, &cap_id);
127 return xdc->debug_cap_regs ? ZX_OK : ZX_ERR_NOT_FOUND;
128 }
129
130 // Populates the string descriptors and info context (DbCIC) string descriptor metadata.
xdc_str_descs_init(xdc_t * xdc,zx_paddr_t strs_base)131 static void xdc_str_descs_init(xdc_t* xdc, zx_paddr_t strs_base) {
132 xdc_str_descs_t* strs = xdc->str_descs;
133
134 // String Descriptor 0 contains the supported languages as a list of numbers (LANGIDs).
135 // 0x0409: English (United States)
136 strs->str_0_desc.string[0] = 0x09;
137 strs->str_0_desc.string[1] = 0x04;
138 strs->str_0_desc.len = STR_DESC_METADATA_LEN + 2;
139 strs->str_0_desc.type = USB_DT_STRING;
140
141 memcpy(&strs->manufacturer_desc.string, XDC_MANUFACTURER, sizeof(XDC_MANUFACTURER));
142 strs->manufacturer_desc.len = STR_DESC_METADATA_LEN + sizeof(XDC_MANUFACTURER);
143 strs->manufacturer_desc.type = USB_DT_STRING;
144
145 memcpy(&strs->product_desc.string, XDC_PRODUCT, sizeof(XDC_PRODUCT));
146 strs->product_desc.len = STR_DESC_METADATA_LEN + sizeof(XDC_PRODUCT);
147 strs->product_desc.type = USB_DT_STRING;
148
149 memcpy(&strs->serial_num_desc.string, XDC_SERIAL_NUMBER, sizeof(XDC_SERIAL_NUMBER));
150 strs->serial_num_desc.len = STR_DESC_METADATA_LEN + sizeof(XDC_SERIAL_NUMBER);
151 strs->serial_num_desc.type = USB_DT_STRING;
152
153 // Populate the addresses and lengths of the string descriptors in the info context (DbCIC).
154 xdc_dbcic_t* dbcic = &xdc->context_data->dbcic;
155
156 dbcic->str_0_desc_addr = strs_base + offsetof(xdc_str_descs_t, str_0_desc);
157 dbcic->manufacturer_desc_addr = strs_base + offsetof(xdc_str_descs_t, manufacturer_desc);
158 dbcic->product_desc_addr = strs_base + offsetof(xdc_str_descs_t, product_desc);
159 dbcic->serial_num_desc_addr = strs_base + offsetof(xdc_str_descs_t, serial_num_desc);
160
161 dbcic->str_0_desc_len = strs->str_0_desc.len;
162 dbcic->manufacturer_desc_len = strs->manufacturer_desc.len;
163 dbcic->product_desc_len = strs->product_desc.len;
164 dbcic->serial_num_desc_len = strs->serial_num_desc.len;
165 }
166
xdc_endpoint_ctx_init(xdc_t * xdc,uint32_t ep_idx)167 static zx_status_t xdc_endpoint_ctx_init(xdc_t* xdc, uint32_t ep_idx) {
168 if (ep_idx >= NUM_EPS) {
169 return ZX_ERR_INVALID_ARGS;
170 }
171 // Initialize the endpoint.
172 xdc_endpoint_t* ep = &xdc->eps[ep_idx];
173 list_initialize(&ep->queued_reqs);
174 list_initialize(&ep->pending_reqs);
175 ep->direction = ep_idx == IN_EP_IDX ? USB_DIR_IN : USB_DIR_OUT;
176 snprintf(ep->name, MAX_EP_DEBUG_NAME_LEN, ep_idx == IN_EP_IDX ? "IN" : "OUT");
177 ep->state = XDC_EP_STATE_RUNNING;
178
179 zx_status_t status = xhci_transfer_ring_init(&ep->transfer_ring, xdc->bti_handle,
180 TRANSFER_RING_SIZE);
181 if (status != ZX_OK) {
182 return status;
183 }
184 zx_paddr_t tr_dequeue = xhci_transfer_ring_start_phys(&ep->transfer_ring);
185
186 uint32_t max_burst = XHCI_GET_BITS32(&xdc->debug_cap_regs->dcctrl,
187 DCCTRL_MAX_BURST_START, DCCTRL_MAX_BURST_BITS);
188 int avg_trb_length = EP_CTX_MAX_PACKET_SIZE * (max_burst + 1);
189
190
191 xhci_endpoint_context_t* epc =
192 ep_idx == IN_EP_IDX ? &xdc->context_data->in_epc : &xdc->context_data->out_epc;
193
194 XHCI_WRITE32(&epc->epc0, 0);
195
196 XHCI_SET_BITS32(&epc->epc1, EP_CTX_EP_TYPE_START, EP_CTX_EP_TYPE_BITS,
197 ep_idx == IN_EP_IDX ? EP_CTX_EP_TYPE_BULK_IN : EP_CTX_EP_TYPE_BULK_OUT);
198 XHCI_SET_BITS32(&epc->epc1, EP_CTX_MAX_BURST_SIZE_START, EP_CTX_MAX_BURST_SIZE_BITS,
199 max_burst);
200 XHCI_SET_BITS32(&epc->epc1, EP_CTX_MAX_PACKET_SIZE_START, EP_CTX_MAX_PACKET_SIZE_BITS,
201 EP_CTX_MAX_PACKET_SIZE);
202
203 XHCI_WRITE32(&epc->epc2, ((uint32_t)tr_dequeue & EP_CTX_TR_DEQUEUE_LO_MASK) | EP_CTX_DCS);
204 XHCI_WRITE32(&epc->tr_dequeue_hi, (uint32_t)(tr_dequeue >> 32));
205
206 XHCI_SET_BITS32(&epc->epc4, EP_CTX_AVG_TRB_LENGTH_START, EP_CTX_AVG_TRB_LENGTH_BITS,
207 avg_trb_length);
208 // The Endpoint Context Interval, LSA, MaxPStreams, Mult, HID, Cerr, FE and
209 // Max Esit Payload fields do not apply to the DbC. See section 7.6.3.2 of XHCI Spec.
210 return ZX_OK;
211 }
212
xdc_context_data_init(xdc_t * xdc)213 static zx_status_t xdc_context_data_init(xdc_t* xdc) {
214 // Allocate a buffer to store the context data and string descriptors.
215 zx_status_t status = io_buffer_init(&xdc->context_str_descs_buffer,
216 xdc->bti_handle, PAGE_SIZE,
217 IO_BUFFER_RW | IO_BUFFER_CONTIG | IO_BUFFER_UNCACHED);
218 if (status != ZX_OK) {
219 zxlogf(ERROR, "failed to alloc xdc context and strings buffer, err: %d\n", status);
220 return status;
221 }
222 xdc->context_data = static_cast<xdc_context_data_t*>(
223 io_buffer_virt(&xdc->context_str_descs_buffer));
224 zx_paddr_t context_data_phys = io_buffer_phys(&xdc->context_str_descs_buffer);
225
226 // The context data only takes 192 bytes, so we can store the string descriptors after it.
227 xdc->str_descs = reinterpret_cast<xdc_str_descs_t*>(
228 reinterpret_cast<uintptr_t>(xdc->context_data) + sizeof(xdc_context_data_t));
229 zx_paddr_t str_descs_phys = context_data_phys + sizeof(xdc_context_data_t);
230
231 // Populate the string descriptors, and string descriptor metadata in the context data.
232 xdc_str_descs_init(xdc, str_descs_phys);
233
234 // Initialize the endpoint contexts in the context data.
235 for (uint32_t i = 0; i < NUM_EPS; i++) {
236 status = xdc_endpoint_ctx_init(xdc, i);
237 if (status != ZX_OK) {
238 return status;
239 }
240 }
241 XHCI_WRITE64(&xdc->debug_cap_regs->dccp, context_data_phys);
242 return ZX_OK;
243 }
244
245 // Updates the event ring dequeue pointer register to the current event ring position.
xdc_update_erdp(xdc_t * xdc)246 static void xdc_update_erdp(xdc_t* xdc) {
247 uint64_t erdp = xhci_event_ring_current_phys(&xdc->event_ring);
248 XHCI_WRITE64(&xdc->debug_cap_regs->dcerdp, erdp);
249 }
250
251 // Sets up the event ring segment table and buffers.
xdc_event_ring_init(xdc_t * xdc)252 static zx_status_t xdc_event_ring_init(xdc_t* xdc) {
253 // Event Ring Segment Table and Event Ring Segments
254 zx_status_t status = io_buffer_init(&xdc->erst_buffer, xdc->bti_handle, PAGE_SIZE,
255 IO_BUFFER_RW | IO_BUFFER_CONTIG | IO_BUFFER_UNCACHED);
256 if (status != ZX_OK) {
257 zxlogf(ERROR, "failed to alloc xdc erst_buffer, err: %d\n", status);
258 return status;
259 }
260
261 xdc->erst_array = (erst_entry_t *)io_buffer_virt(&xdc->erst_buffer);
262 zx_paddr_t erst_array_phys = io_buffer_phys(&xdc->erst_buffer);
263
264 status = xhci_event_ring_init(&xdc->event_ring, xdc->bti_handle,
265 xdc->erst_array, EVENT_RING_SIZE);
266 if (status != ZX_OK) {
267 zxlogf(ERROR, "xhci_event_ring_init failed, err: %d\n", status);
268 return status;
269 }
270
271 // Update the event ring dequeue pointer.
272 xdc_update_erdp(xdc);
273
274 XHCI_SET32(&xdc->debug_cap_regs->dcerstsz, ERSTSZ_MASK, ERST_ARRAY_SIZE);
275 XHCI_WRITE64(&xdc->debug_cap_regs->dcerstba, erst_array_phys);
276
277 return ZX_OK;
278 }
279
280 // Initializes the debug capability registers and required data structures.
281 // This needs to be called everytime the host controller is reset.
xdc_init_debug_cap(xdc_t * xdc)282 static zx_status_t xdc_init_debug_cap(xdc_t* xdc) {
283 // Initialize the Device Descriptor Info Registers.
284 XHCI_WRITE32(&xdc->debug_cap_regs->dcddi1, XDC_VENDOR_ID << DCDDI1_VENDOR_ID_START);
285 XHCI_WRITE32(&xdc->debug_cap_regs->dcddi2,
286 (XDC_REVISION << DCDDI2_DEVICE_REVISION_START) | XDC_PRODUCT_ID);
287
288 zx_status_t status = xdc_event_ring_init(xdc);
289 if (status != ZX_OK) {
290 return status;
291 }
292 status = xdc_context_data_init(xdc);
293 if (status != ZX_OK) {
294 return status;
295 }
296 return ZX_OK;
297 }
298
xdc_write_instance(void * ctx,const void * buf,size_t count,zx_off_t off,size_t * actual)299 static zx_status_t xdc_write_instance(void* ctx, const void* buf, size_t count,
300 zx_off_t off, size_t* actual) {
301 auto* inst = static_cast<xdc_instance_t*>(ctx);
302
303 mtx_lock(&inst->lock);
304
305 if (inst->dead) {
306 mtx_unlock(&inst->lock);
307 return ZX_ERR_PEER_CLOSED;
308 }
309 if (!inst->has_stream_id) {
310 zxlogf(ERROR, "write failed, instance %p did not register for a stream id\n", inst);
311 mtx_unlock(&inst->lock);
312 return ZX_ERR_BAD_STATE;
313 }
314 if (!inst->connected) {
315 mtx_unlock(&inst->lock);
316 return ZX_ERR_SHOULD_WAIT;
317 }
318 uint32_t stream_id = inst->stream_id;
319
320 mtx_unlock(&inst->lock);
321
322 return xdc_write(inst->parent, stream_id, buf, count, actual, false /* is_ctrl_msg */);
323 }
324
xdc_update_instance_write_signal(xdc_instance_t * inst,bool writable)325 static void xdc_update_instance_write_signal(xdc_instance_t* inst, bool writable) {
326 mtx_lock(&inst->lock);
327
328 if (inst->dead || !inst->has_stream_id) {
329 mtx_unlock(&inst->lock);
330 return;
331 }
332
333 // For an instance to be writable, we need the xdc device to be ready for writing,
334 // and the corresponding stream to be registered on the host.
335 if (writable && inst->connected) {
336 device_state_set(inst->zxdev, DEV_STATE_WRITABLE);
337 } else {
338 device_state_clr(inst->zxdev, DEV_STATE_WRITABLE);
339 }
340
341 mtx_unlock(&inst->lock);
342 }
343
xdc_get_host_stream(xdc_t * xdc,uint32_t stream_id)344 static xdc_host_stream_t* xdc_get_host_stream(xdc_t* xdc, uint32_t stream_id)
345 __TA_REQUIRES(xdc->instance_list_lock) {
346 xdc_host_stream_t* host_stream;
347 list_for_every_entry(&xdc->host_streams, host_stream, xdc_host_stream_t, node) {
348 if (host_stream->stream_id == stream_id) {
349 return host_stream;
350 }
351 }
352 return nullptr;
353 }
354
355 // Sends a message to the host to notify when a xdc device stream becomes online or offline.
356 // If the message cannot be currently sent, it will be queued for later.
xdc_notify_stream_state(xdc_t * xdc,uint32_t stream_id,bool online)357 static void xdc_notify_stream_state(xdc_t* xdc, uint32_t stream_id, bool online) {
358 xdc_msg_t msg = {
359 .opcode = XDC_NOTIFY_STREAM_STATE,
360 .notify_stream_state = { .stream_id = stream_id, .online = online }
361 };
362
363 size_t actual;
364 zx_status_t status = xdc_write(xdc, XDC_MSG_STREAM, &msg, sizeof(msg), &actual,
365 true /* is_ctrl_msg */);
366 if (status == ZX_OK) {
367 // The write size is much less than the max packet size, so it should complete entirely.
368 ZX_DEBUG_ASSERT(actual == sizeof(xdc_msg_t));
369 } else {
370 // xdc_write should always queue ctrl msgs, unless some fatal error occurs e.g. OOM.
371 zxlogf(ERROR, "xdc_write_internal returned err: %d, dropping ctrl msg for stream id %u\n",
372 status, stream_id);
373 }
374 }
375
376 // Sets the stream id for the device instance.
377 // Returns ZX_OK if successful, or ZX_ERR_INVALID_ARGS if the stream id is unavailable.
xdc_register_stream(xdc_instance_t * inst,uint32_t stream_id)378 static zx_status_t xdc_register_stream(xdc_instance_t* inst, uint32_t stream_id) {
379 xdc_t* xdc = inst->parent;
380
381 if (stream_id == DEBUG_STREAM_ID_RESERVED) {
382 return ZX_ERR_INVALID_ARGS;
383 }
384
385 mtx_lock(&xdc->instance_list_lock);
386
387 xdc_instance_t* test_inst;
388 list_for_every_entry(&xdc->instance_list, test_inst, xdc_instance_t, node) {
389 mtx_lock(&test_inst->lock);
390 // We can only register the stream id if no one else already has.
391 if (test_inst->stream_id == stream_id) {
392 zxlogf(ERROR, "stream id %u was already registered\n", stream_id);
393 mtx_unlock(&test_inst->lock);
394 mtx_unlock(&xdc->instance_list_lock);
395 return ZX_ERR_INVALID_ARGS;
396 }
397 mtx_unlock(&test_inst->lock);
398 }
399
400 mtx_lock(&inst->lock);
401 inst->stream_id = stream_id;
402 inst->has_stream_id = true;
403 inst->connected = xdc_get_host_stream(xdc, stream_id) != nullptr;
404 mtx_unlock(&inst->lock);
405
406 mtx_unlock(&xdc->instance_list_lock);
407
408 // Notify the host that this stream id is available on the debug device.
409 xdc_notify_stream_state(xdc, stream_id, true /* online */);
410
411 mtx_lock(&xdc->write_lock);
412 xdc_update_instance_write_signal(inst, xdc->writable);
413 mtx_unlock(&xdc->write_lock);
414
415 zxlogf(TRACE, "registered stream id %u\n", stream_id);
416 return ZX_OK;
417 }
418
419 // Attempts to requeue the request on the IN endpoint.
420 // If not successful, the request is returned to the free_read_reqs list.
xdc_queue_read_locked(xdc_t * xdc,usb_request_t * req)421 static void xdc_queue_read_locked(xdc_t* xdc, usb_request_t* req) __TA_REQUIRES(xdc->read_lock) {
422 zx_status_t status = xdc_queue_transfer(xdc, req, true /** in **/, false /* is_ctrl_msg */);
423 if (status != ZX_OK) {
424 zxlogf(ERROR, "xdc_read failed to re-queue request %d\n", status);
425 status = xdc_req_list_add_tail(&xdc->free_read_reqs, req, sizeof(usb_request_t));
426 ZX_DEBUG_ASSERT(status == ZX_OK);
427 }
428 }
429
xdc_update_instance_read_signal_locked(xdc_instance_t * inst)430 static void xdc_update_instance_read_signal_locked(xdc_instance_t* inst)
431 __TA_REQUIRES(inst->lock) {
432 if (list_length(&inst->completed_reads) > 0) {
433 device_state_set(inst->zxdev, DEV_STATE_READABLE);
434 } else {
435 device_state_clr(inst->zxdev, DEV_STATE_READABLE);
436 }
437 }
438
xdc_read_instance(void * ctx,void * buf,size_t count,zx_off_t off,size_t * actual)439 static zx_status_t xdc_read_instance(void* ctx, void* buf, size_t count,
440 zx_off_t off, size_t* actual) {
441 auto* inst = static_cast<xdc_instance_t*>(ctx);
442 uint64_t usb_req_size = sizeof(usb_request_t);
443
444 mtx_lock(&inst->lock);
445
446 if (inst->dead) {
447 mtx_unlock(&inst->lock);
448 return ZX_ERR_PEER_CLOSED;
449 }
450
451 if (!inst->has_stream_id) {
452 zxlogf(ERROR, "read failed, instance %p did not have a valid stream id\n", inst);
453 mtx_unlock(&inst->lock);
454 return ZX_ERR_BAD_STATE;
455 }
456
457 if (list_is_empty(&inst->completed_reads)) {
458 mtx_unlock(&inst->lock);
459 return ZX_ERR_SHOULD_WAIT;
460 }
461
462 list_node_t done_reqs = LIST_INITIAL_VALUE(done_reqs);
463
464 size_t copied = 0;
465 usb_request_t* req;
466 // Copy up to the requested amount, or until we have no completed read buffers left.
467 while (copied < count) {
468 xdc_req_internal_t* req_int = list_peek_head_type(&inst->completed_reads,
469 xdc_req_internal_t, node);
470 if (req_int == nullptr) {
471 continue;
472 }
473 req = XDC_INTERNAL_TO_USB_REQ(req_int, sizeof(usb_request_t));
474 if (inst->cur_req_read_offset == 0) {
475 bool is_new_packet;
476 void* data;
477 zx_status_t status = usb_request_mmap(req, &data);
478 if (status != ZX_OK) {
479 zxlogf(ERROR, "usb_request_mmap failed, err: %d\n", status);
480 mtx_unlock(&inst->lock);
481 return ZX_ERR_BAD_STATE;
482 }
483
484 status = xdc_update_packet_state(&inst->cur_read_packet,
485 data, req->response.actual, &is_new_packet);
486 if (status != ZX_OK) {
487 mtx_unlock(&inst->lock);
488 return ZX_ERR_BAD_STATE;
489 }
490 if (is_new_packet) {
491 // Skip over the header, which contains internal metadata like stream id.
492 inst->cur_req_read_offset += sizeof(xdc_packet_header_t);
493 }
494 }
495 size_t req_bytes_left = req->response.actual - inst->cur_req_read_offset;
496 size_t to_copy = MIN(count - copied, req_bytes_left);
497 size_t bytes_copied = usb_request_copy_from(req, static_cast<uint8_t*>(buf) + copied,
498 to_copy, inst->cur_req_read_offset);
499
500 copied += bytes_copied;
501 inst->cur_req_read_offset += bytes_copied;
502
503 // Finished copying all the available bytes from this usb request buffer.
504 if (inst->cur_req_read_offset >= req->response.actual) {
505 list_remove_head(&inst->completed_reads);
506 zx_status_t status = xdc_req_list_add_tail(&done_reqs, req, usb_req_size);
507 ZX_DEBUG_ASSERT(status == ZX_OK);
508 inst->cur_req_read_offset = 0;
509 }
510 }
511
512 xdc_update_instance_read_signal_locked(inst);
513 mtx_unlock(&inst->lock);
514
515 xdc_t* xdc = inst->parent;
516 mtx_lock(&xdc->read_lock);
517 while ((req = xdc_req_list_remove_tail(&done_reqs, usb_req_size)) != nullptr) {
518 xdc_queue_read_locked(xdc, req);
519 }
520 mtx_unlock(&xdc->read_lock);
521
522 *actual = copied;
523 return ZX_OK;
524 }
525
fidl_SetStream(void * ctx,uint32_t stream_id,fidl_txn_t * txn)526 static zx_status_t fidl_SetStream(void* ctx, uint32_t stream_id, fidl_txn_t* txn) {
527 auto* inst = static_cast<xdc_instance_t*>(ctx);
528 return fuchsia_usb_debug_DeviceSetStream_reply(txn, xdc_register_stream(inst, stream_id));
529 }
530
531 static fuchsia_usb_debug_Device_ops_t fidl_ops = {
532 .SetStream = fidl_SetStream,
533 };
534
xdc_message(void * ctx,fidl_msg_t * msg,fidl_txn_t * txn)535 static zx_status_t xdc_message(void* ctx, fidl_msg_t* msg, fidl_txn_t* txn) {
536 return fuchsia_usb_debug_Device_dispatch(ctx, txn, msg, &fidl_ops);
537 }
538
xdc_close_instance(void * ctx,uint32_t flags)539 static zx_status_t xdc_close_instance(void* ctx, uint32_t flags) {
540 auto* inst = static_cast<xdc_instance_t*>(ctx);
541
542 list_node_t free_reqs = LIST_INITIAL_VALUE(free_reqs);
543
544 mtx_lock(&inst->lock);
545 inst->dead = true;
546 list_move(&inst->completed_reads, &free_reqs);
547 mtx_unlock(&inst->lock);
548
549 mtx_lock(&inst->parent->instance_list_lock);
550 list_delete(&inst->node);
551 mtx_unlock(&inst->parent->instance_list_lock);
552
553 xdc_t* xdc = inst->parent;
554 // Return any unprocessed requests back to the read queue to be reused.
555 mtx_lock(&xdc->read_lock);
556 usb_request_t* req;
557 uint64_t usb_req_size = sizeof(usb_request_t);
558 while ((req = xdc_req_list_remove_tail(&free_reqs, usb_req_size)) != nullptr) {
559 xdc_queue_read_locked(xdc, req);
560 }
561 mtx_unlock(&xdc->read_lock);
562
563 if (inst->has_stream_id) {
564 // Notify the host that this stream id is now unavailable on the debug device.
565 xdc_notify_stream_state(xdc, inst->stream_id, false /* online */);
566 }
567
568 xdc->num_instances.fetch_add(-1);
569
570 return ZX_OK;
571 }
572
xdc_release_instance(void * ctx)573 static void xdc_release_instance(void* ctx) {
574 auto* inst = static_cast<xdc_instance_t*>(ctx);
575 free(inst);
576 }
577
__anon953c27110202() 578 static zx_protocol_device_t xdc_instance_ops = []() {
579 zx_protocol_device_t device;
580 device.version = DEVICE_OPS_VERSION;
581 device.write = xdc_write_instance;
582 device.read = xdc_read_instance;
583 device.message = xdc_message;
584 device.close = xdc_close_instance;
585 device.release = xdc_release_instance;
586 return device;
587 }();
588
xdc_open(void * ctx,zx_device_t ** dev_out,uint32_t flags)589 static zx_status_t xdc_open(void* ctx, zx_device_t** dev_out, uint32_t flags) {
590 auto* xdc = static_cast<xdc_t*>(ctx);
591
592 auto* inst = static_cast<xdc_instance_t*>(calloc(1, sizeof(xdc_instance_t)));
593 if (inst == nullptr) {
594 return ZX_ERR_NO_MEMORY;
595 }
596
597 device_add_args_t args = {};
598 args.version = DEVICE_ADD_ARGS_VERSION;
599 args.name = "xdc";
600 args.ctx = inst;
601 args.ops = &xdc_instance_ops;
602 args.proto_id = ZX_PROTOCOL_USB_DBC;
603 args.flags = DEVICE_ADD_INSTANCE;
604
605 zx_status_t status;
606 status = device_add(xdc->zxdev, &args, &inst->zxdev);
607 if (status != ZX_OK) {
608 zxlogf(ERROR, "xdc: error creating instance %d\n", status);
609 free(inst);
610 return status;
611 }
612
613 inst->parent = xdc;
614 list_initialize(&inst->completed_reads);
615
616 mtx_lock(&xdc->instance_list_lock);
617 list_add_tail(&xdc->instance_list, &inst->node);
618 mtx_unlock(&xdc->instance_list_lock);
619
620 *dev_out = inst->zxdev;
621
622 xdc->num_instances.fetch_add(1);
623 sync_completion_signal(&xdc->has_instance_completion);
624 return ZX_OK;
625
626 }
627
xdc_shutdown(xdc_t * xdc)628 static void xdc_shutdown(xdc_t* xdc) {
629 zxlogf(TRACE, "xdc_shutdown\n");
630 uint64_t usb_req_size = sizeof(usb_request_t);
631
632 xdc->suspended.store(true);
633 // The poll thread will be waiting on this completion if no instances are open.
634 sync_completion_signal(&xdc->has_instance_completion);
635
636 int res;
637 thrd_join(xdc->start_thread, &res);
638 if (res != 0) {
639 zxlogf(ERROR, "failed to join with xdc start_thread\n");
640 }
641
642 XHCI_WRITE32(&xdc->debug_cap_regs->dcctrl, 0);
643 xdc_wait_bits(&xdc->debug_cap_regs->dcctrl, DCCTRL_DCR, 0);
644
645 mtx_lock(&xdc->lock);
646 xdc->configured = false;
647
648 for (uint32_t i = 0; i < NUM_EPS; ++i) {
649 xdc_endpoint_t* ep = &xdc->eps[i];
650 ep->state = XDC_EP_STATE_DEAD;
651
652 usb_request_t* req;
653 xdc_req_internal_t* req_int;
654 while ((req_int = list_remove_tail_type(&ep->pending_reqs, xdc_req_internal_t, node))
655 != nullptr) {
656 req = XDC_INTERNAL_TO_USB_REQ(req_int, usb_req_size);
657 usb_request_complete(req, ZX_ERR_IO_NOT_PRESENT, 0, &req_int->complete_cb);
658 }
659 while ((req_int = list_remove_tail_type(&ep->queued_reqs, xdc_req_internal_t, node))
660 != nullptr) {
661 req = XDC_INTERNAL_TO_USB_REQ(req_int, usb_req_size);
662 usb_request_complete(req, ZX_ERR_IO_NOT_PRESENT, 0, &req_int->complete_cb);
663 }
664 }
665
666 mtx_unlock(&xdc->lock);
667
668 zxlogf(TRACE, "xdc_shutdown succeeded\n");
669 }
670
xdc_free(xdc_t * xdc)671 static void xdc_free(xdc_t* xdc) {
672 zxlogf(INFO, "xdc_free\n");
673 uint64_t usb_req_size = sizeof(usb_request_t);
674
675 io_buffer_release(&xdc->erst_buffer);
676 io_buffer_release(&xdc->context_str_descs_buffer);
677
678 xhci_event_ring_free(&xdc->event_ring);
679
680 for (uint32_t i = 0; i < NUM_EPS; ++i) {
681 xdc_endpoint_t* ep = &xdc->eps[i];
682 xhci_transfer_ring_free(&ep->transfer_ring);
683 }
684
685 usb_request_pool_release(&xdc->free_write_reqs);
686
687 usb_request_t* req;
688 while ((req = xdc_req_list_remove_tail(&xdc->free_read_reqs, usb_req_size)) != nullptr) {
689 usb_request_release(req);
690 }
691 free(xdc);
692 }
693
xdc_suspend(void * ctx,uint32_t flags)694 static zx_status_t xdc_suspend(void* ctx, uint32_t flags) {
695 zxlogf(TRACE, "xdc_suspend %u\n", flags);
696 auto* xdc = static_cast<xdc_t*>(ctx);
697
698 // TODO(jocelyndang) do different things based on the flags.
699 // For now we shutdown the driver in preparation for mexec.
700 xdc_shutdown(xdc);
701
702 return ZX_OK;
703 }
704
xdc_unbind(void * ctx)705 static void xdc_unbind(void* ctx) {
706 zxlogf(INFO, "xdc_unbind\n");
707 auto* xdc = static_cast<xdc_t*>(ctx);
708 xdc_shutdown(xdc);
709
710 mtx_lock(&xdc->instance_list_lock);
711 xdc_instance_t* inst;
712 list_for_every_entry(&xdc->instance_list, inst, xdc_instance_t, node) {
713 mtx_lock(&inst->lock);
714
715 inst->dead = true;
716 // Signal any waiting instances to wake up, so they will close the instance.
717 device_state_set(inst->zxdev, DEV_STATE_WRITABLE | DEV_STATE_READABLE);
718
719 mtx_unlock(&inst->lock);
720 }
721 mtx_unlock(&xdc->instance_list_lock);
722
723 device_remove(xdc->zxdev);
724 }
725
xdc_release(void * ctx)726 static void xdc_release(void* ctx) {
727 zxlogf(INFO, "xdc_release\n");
728 auto* xdc = static_cast<xdc_t*>(ctx);
729 xdc_free(xdc);
730 }
731
xdc_update_write_signal_locked(xdc_t * xdc,bool online)732 static void xdc_update_write_signal_locked(xdc_t* xdc, bool online)
733 __TA_REQUIRES(xdc->write_lock) {
734 bool was_writable = xdc->writable;
735 xdc->writable = online && xdc_has_free_trbs(xdc, false /* in */);
736 if (was_writable == xdc->writable) {
737 return;
738 }
739
740 mtx_lock(&xdc->instance_list_lock);
741 xdc_instance_t* inst;
742 list_for_every_entry(&xdc->instance_list, inst, xdc_instance_t, node) {
743 xdc_update_instance_write_signal(inst, xdc->writable);
744 }
745 mtx_unlock(&xdc->instance_list_lock);
746 }
747
xdc_write_complete(void * ctx,usb_request_t * req)748 void xdc_write_complete(void* ctx, usb_request_t* req) {
749 auto* xdc = static_cast<xdc_t*>(ctx);
750
751 zx_status_t status = req->response.status;
752 if (status != ZX_OK) {
753 zxlogf(ERROR, "xdc_write_complete got unexpected error: %d\n", req->response.status);
754 }
755
756 mtx_lock(&xdc->write_lock);
757 ZX_DEBUG_ASSERT(usb_request_pool_add(&xdc->free_write_reqs, req) == ZX_OK);
758 xdc_update_write_signal_locked(xdc, status != ZX_ERR_IO_NOT_PRESENT /* online */);
759 mtx_unlock(&xdc->write_lock);
760 }
761
xdc_write(xdc_t * xdc,uint32_t stream_id,const void * buf,size_t count,size_t * actual,bool is_ctrl_msg)762 static zx_status_t xdc_write(xdc_t* xdc, uint32_t stream_id, const void* buf, size_t count,
763 size_t* actual, bool is_ctrl_msg) {
764 // TODO(jocelyndang): we should check for requests that are too big to fit on the transfer ring.
765
766 zx_status_t status = ZX_OK;
767
768 mtx_lock(&xdc->write_lock);
769
770 // We should always queue control messages unless there is an unrecoverable error.
771 if (!is_ctrl_msg && !xdc->writable) {
772 // Need to wait for some requests to complete.
773 mtx_unlock(&xdc->write_lock);
774 return ZX_ERR_SHOULD_WAIT;
775 }
776
777 size_t header_len = sizeof(xdc_packet_header_t);
778 xdc_packet_header_t header = {
779 .stream_id = stream_id,
780 .total_length = header_len + count
781 };
782 usb_request_t* req = usb_request_pool_get(&xdc->free_write_reqs, header.total_length);
783 if (!req) {
784 zx_status_t status = usb_request_alloc(&req, header.total_length, OUT_EP_ADDR,
785 sizeof(usb_request_t) + sizeof(xdc_req_internal_t));
786 if (status != ZX_OK) {
787 goto out;
788 }
789 }
790
791 usb_request_copy_to(req, &header, header_len, 0);
792 usb_request_copy_to(req, buf, count, header_len /* offset */);
793 req->header.length = header.total_length;
794
795 status = xdc_queue_transfer(xdc, req, false /* in */, is_ctrl_msg);
796 if (status != ZX_OK) {
797 zxlogf(ERROR, "xdc_write failed %d\n", status);
798 ZX_DEBUG_ASSERT(usb_request_pool_add(&xdc->free_write_reqs, req) == ZX_OK);
799 goto out;
800 }
801
802 *actual = count;
803
804 out:
805 xdc_update_write_signal_locked(xdc, status != ZX_ERR_IO_NOT_PRESENT /* online */);
806 mtx_unlock(&xdc->write_lock);
807 return status;
808 }
809
xdc_handle_msg(xdc_t * xdc,xdc_msg_t * msg)810 static void xdc_handle_msg(xdc_t* xdc, xdc_msg_t* msg) {
811 switch (msg->opcode) {
812 case XDC_NOTIFY_STREAM_STATE: {
813 xdc_notify_stream_state_t* state = &msg->notify_stream_state;
814
815 mtx_lock(&xdc->instance_list_lock);
816
817 // Find the saved host stream if it exists.
818 xdc_host_stream_t* host_stream = xdc_get_host_stream(xdc, state->stream_id);
819 if (state->online == (host_stream != nullptr)) {
820 zxlogf(ERROR, "cannot set host stream state for id %u as it was already %s\n",
821 state->stream_id, state->online ? "online" : "offline");
822 mtx_unlock(&xdc->instance_list_lock);
823 return;
824 }
825 if (state->online) {
826 auto* host_stream = static_cast<xdc_host_stream_t*>(malloc(sizeof(xdc_host_stream_t)));
827 if (!host_stream) {
828 zxlogf(ERROR, "can't create host stream, out of memory!\n");
829 mtx_unlock(&xdc->instance_list_lock);
830 return;
831 }
832 zxlogf(TRACE, "setting host stream id %u as online\n", state->stream_id);
833 host_stream->stream_id = state->stream_id;
834 list_add_tail(&xdc->host_streams, &host_stream->node);
835 } else {
836 zxlogf(TRACE, "setting host stream id %u as offline\n", state->stream_id);
837 list_delete(&host_stream->node);
838 }
839
840 // Check if any instance is registered to this stream id and update its connected status.
841 xdc_instance_t* test;
842 xdc_instance_t* match = nullptr;
843 list_for_every_entry(&xdc->instance_list, test, xdc_instance_t, node) {
844 mtx_lock(&test->lock);
845 if (test->has_stream_id && test->stream_id == state->stream_id) {
846 zxlogf(TRACE, "stream id %u is now %s to the host\n",
847 state->stream_id, state->online ? "connected" : "disconnected");
848 test->connected = state->online;
849 match = test;
850 mtx_unlock(&test->lock);
851 break;
852 }
853 mtx_unlock(&test->lock);
854 }
855 mtx_unlock(&xdc->instance_list_lock);
856
857 if (match) {
858 // Notify the instance whether they can now write.
859 mtx_lock(&xdc->write_lock);
860 xdc_update_instance_write_signal(match, xdc->writable);
861 mtx_unlock(&xdc->write_lock);
862 }
863 return;
864 }
865 default:
866 zxlogf(ERROR, "unrecognized command: %d\n", msg->opcode);
867 }
868 }
869
xdc_read_complete(void * ctx,usb_request_t * req)870 void xdc_read_complete(void* ctx, usb_request_t* req) {
871 auto* xdc = static_cast<xdc_t*>(ctx);
872
873 mtx_lock(&xdc->read_lock);
874
875 if (req->response.status == ZX_ERR_IO_NOT_PRESENT) {
876 zx_status_t status = xdc_req_list_add_tail(&xdc->free_read_reqs, req,
877 sizeof(usb_request_t));
878 ZX_DEBUG_ASSERT(status == ZX_OK);
879 goto out;
880 }
881
882 if (req->response.status != ZX_OK) {
883 zxlogf(ERROR, "xdc_read_complete: req completion status = %d", req->response.status);
884 xdc_queue_read_locked(xdc, req);
885 goto out;
886 }
887
888 void* data;
889 zx_status_t status;
890 status = usb_request_mmap(req, &data);
891 if (status != ZX_OK) {
892 zxlogf(ERROR, "usb_request_mmap failed, err: %d\n", status);
893 xdc_queue_read_locked(xdc, req);
894 goto out;
895 }
896 bool new_header;
897 status = xdc_update_packet_state(&xdc->cur_read_packet, data, req->response.actual,
898 &new_header);
899 if (status != ZX_OK) {
900 xdc_queue_read_locked(xdc, req);
901 goto out;
902 }
903
904 if (new_header && xdc->cur_read_packet.header.stream_id == XDC_MSG_STREAM) {
905 size_t offset = sizeof(xdc_packet_header_t);
906 if (req->response.actual - offset < sizeof(xdc_msg_t)) {
907 zxlogf(ERROR, "malformed xdc ctrl msg, len was %lu want %lu\n",
908 req->response.actual - offset, sizeof(xdc_msg_t));
909 xdc_queue_read_locked(xdc, req);
910 goto out;
911 }
912 xdc_msg_t msg;
913 usb_request_copy_from(req, &msg, sizeof(xdc_msg_t), offset);
914
915 // We should process the control message outside of the lock, so requeue the request now.
916 xdc_queue_read_locked(xdc, req);
917 mtx_unlock(&xdc->read_lock);
918
919 xdc_handle_msg(xdc, &msg);
920 return;
921 }
922
923 // Find the instance that is registered for the stream id of the message.
924 mtx_lock(&xdc->instance_list_lock);
925
926 bool found;
927 found = false;
928 xdc_instance_t* inst;
929 list_for_every_entry(&xdc->instance_list, inst, xdc_instance_t, node) {
930 mtx_lock(&inst->lock);
931 if (inst->has_stream_id && !inst->dead &&
932 (inst->stream_id == xdc->cur_read_packet.header.stream_id)) {
933 status = xdc_req_list_add_tail(&inst->completed_reads, req, sizeof(usb_request_t));
934 ZX_DEBUG_ASSERT(status == ZX_OK);
935 xdc_update_instance_read_signal_locked(inst);
936 found = true;
937 mtx_unlock(&inst->lock);
938 break;
939 }
940 mtx_unlock(&inst->lock);
941 }
942
943 mtx_unlock(&xdc->instance_list_lock);
944
945 if (!found) {
946 zxlogf(ERROR, "read packet for stream id %u, but it is not currently registered\n",
947 xdc->cur_read_packet.header.stream_id);
948 xdc_queue_read_locked(xdc, req);
949 }
950
951 out:
952 mtx_unlock(&xdc->read_lock);
953 }
954
__anon953c27110302() 955 static zx_protocol_device_t xdc_device_ops = []() {
956 zx_protocol_device_t device;
957 device.version = DEVICE_OPS_VERSION;
958 device.open = xdc_open,
959 device.suspend = xdc_suspend,
960 device.unbind = xdc_unbind,
961 device.release = xdc_release;
962 return device;
963 }();
964
xdc_handle_port_status_change(xdc_t * xdc,xdc_poll_state_t * poll_state)965 static void xdc_handle_port_status_change(xdc_t* xdc, xdc_poll_state_t* poll_state) {
966 uint32_t dcportsc = XHCI_READ32(&xdc->debug_cap_regs->dcportsc);
967
968 if (dcportsc & DCPORTSC_CSC) {
969 poll_state->connected = dcportsc & DCPORTSC_CCS;
970 if (poll_state->connected) {
971 poll_state->last_conn = zx_clock_get_monotonic();
972 }
973 zxlogf(TRACE, "Port: Connect Status Change, connected: %d\n", poll_state->connected != 0);
974 }
975 if (dcportsc & DCPORTSC_PRC) {
976 zxlogf(TRACE, "Port: Port Reset complete\n");
977 }
978 if (dcportsc & DCPORTSC_PLC) {
979 zxlogf(TRACE, "Port: Port Link Status Change\n");
980 }
981 if (dcportsc & DCPORTSC_CEC) {
982 zxlogf(TRACE, "Port: Port Config Error detected\n");
983 }
984
985 // Ack change events.
986 XHCI_WRITE32(&xdc->debug_cap_regs->dcportsc, dcportsc);
987 }
988
xdc_handle_events(xdc_t * xdc,xdc_poll_state_t * poll_state)989 static void xdc_handle_events(xdc_t* xdc, xdc_poll_state_t* poll_state) {
990 xhci_event_ring_t* er = &xdc->event_ring;
991
992 // process all TRBs with cycle bit matching our CCS
993 while ((XHCI_READ32(&er->current->control) & TRB_C) == er->ccs) {
994 uint32_t type = trb_get_type(er->current);
995 switch (type) {
996 case TRB_EVENT_PORT_STATUS_CHANGE:
997 xdc_handle_port_status_change(xdc, poll_state);
998 break;
999 case TRB_EVENT_TRANSFER:
1000 mtx_lock(&xdc->lock);
1001 xdc_handle_transfer_event_locked(xdc, poll_state, er->current);
1002 mtx_unlock(&xdc->lock);
1003 break;
1004 default:
1005 zxlogf(ERROR, "xdc_handle_events: unhandled event type %d\n", type);
1006 break;
1007 }
1008
1009 er->current++;
1010 if (er->current == er->end) {
1011 er->current = er->start;
1012 er->ccs ^= TRB_C;
1013 }
1014 }
1015 xdc_update_erdp(xdc);
1016 }
1017
1018 // Returns whether we just entered the Configured state.
xdc_update_state(xdc_t * xdc,xdc_poll_state_t * poll_state)1019 bool xdc_update_state(xdc_t* xdc, xdc_poll_state_t* poll_state) {
1020 uint32_t dcst = XHCI_GET_BITS32(&xdc->debug_cap_regs->dcst, DCST_ER_NOT_EMPTY_START,
1021 DCST_ER_NOT_EMPTY_BITS);
1022 if (dcst) {
1023 xdc_handle_events(xdc, poll_state);
1024 }
1025
1026 uint32_t dcctrl = XHCI_READ32(&xdc->debug_cap_regs->dcctrl);
1027
1028 if (dcctrl & DCCTRL_DRC) {
1029 zxlogf(TRACE, "xdc configured exit\n");
1030 // Need to clear the bit to re-enable the DCDB.
1031 // TODO(jocelyndang): check if we need to update the transfer ring as per 7.6.4.4.
1032 XHCI_WRITE32(&xdc->debug_cap_regs->dcctrl, dcctrl);
1033 poll_state->configured = false;
1034
1035 mtx_lock(&xdc->lock);
1036 xdc->configured = false;
1037 mtx_unlock(&xdc->lock);
1038 }
1039
1040 bool entered_configured = false;
1041 // Just entered the Configured state.
1042 if (!poll_state->configured && (dcctrl & DCCTRL_DCR)) {
1043 uint32_t port = XHCI_GET_BITS32(&xdc->debug_cap_regs->dcst, DCST_PORT_NUM_START,
1044 DCST_PORT_NUM_BITS);
1045 if (port == 0) {
1046 zxlogf(ERROR, "xdc could not get port number\n");
1047 } else {
1048 entered_configured = true;
1049 poll_state->configured = true;
1050
1051 mtx_lock(&xdc->lock);
1052
1053 xdc->configured = true;
1054 zxlogf(INFO, "xdc configured on port: %u\n", port);
1055
1056 // We just entered configured mode, so endpoints are ready. Queue any waiting messages.
1057 for (int i = 0; i < NUM_EPS; i++) {
1058 xdc_process_transactions_locked(xdc, &xdc->eps[i]);
1059 }
1060
1061 mtx_unlock(&xdc->lock);
1062 }
1063 }
1064
1065 // If it takes too long to enter the configured state, we should toggle the
1066 // DCE bit to retry the Debug Device enumeration process. See last paragraph of
1067 // 7.6.4.1 of XHCI spec.
1068 if (poll_state->connected && !poll_state->configured) {
1069 zx_duration_t waited_ns = zx_clock_get_monotonic() - poll_state->last_conn;
1070
1071 if (waited_ns > TRANSITION_CONFIGURED_THRESHOLD) {
1072 zxlogf(ERROR, "xdc failed to enter configured state, toggling DCE\n");
1073 XHCI_WRITE32(&xdc->debug_cap_regs->dcctrl, 0);
1074 XHCI_WRITE32(&xdc->debug_cap_regs->dcctrl, DCCTRL_LSE | DCCTRL_DCE);
1075
1076 // We won't get the disconnect event from disabling DCE, so update it now.
1077 poll_state->connected = false;
1078 }
1079 }
1080 return entered_configured;
1081 }
1082
xdc_endpoint_set_halt_locked(xdc_t * xdc,xdc_poll_state_t * poll_state,xdc_endpoint_t * ep)1083 void xdc_endpoint_set_halt_locked(xdc_t* xdc, xdc_poll_state_t* poll_state, xdc_endpoint_t* ep)
1084 __TA_REQUIRES(xdc->lock) {
1085 bool* halt_state = ep->direction == USB_DIR_OUT ? &poll_state->halt_out : &poll_state->halt_in;
1086 *halt_state = true;
1087
1088 switch (ep->state) {
1089 case XDC_EP_STATE_DEAD:
1090 return;
1091 case XDC_EP_STATE_RUNNING:
1092 zxlogf(TRACE, "%s ep transitioned from running to halted\n", ep->name);
1093 ep->state = XDC_EP_STATE_HALTED;
1094 return;
1095 case XDC_EP_STATE_STOPPED:
1096 // This shouldn't happen as we don't schedule new TRBs when stopped.
1097 zxlogf(ERROR, "%s ep transitioned from stopped to halted\n", ep->name);
1098 ep->state = XDC_EP_STATE_HALTED;
1099 return;
1100 case XDC_EP_STATE_HALTED:
1101 return; // No change in state.
1102 default:
1103 zxlogf(ERROR, "unknown ep state: %d\n", ep->state);
1104 return;
1105 }
1106 }
1107
xdc_endpoint_clear_halt_locked(xdc_t * xdc,xdc_poll_state_t * poll_state,xdc_endpoint_t * ep)1108 static void xdc_endpoint_clear_halt_locked(xdc_t* xdc, xdc_poll_state_t* poll_state,
1109 xdc_endpoint_t* ep) __TA_REQUIRES(xdc->lock) {
1110 bool* halt_state = ep->direction == USB_DIR_OUT ? &poll_state->halt_out : &poll_state->halt_in;
1111 *halt_state = false;
1112
1113 switch (ep->state) {
1114 case XDC_EP_STATE_DEAD:
1115 case XDC_EP_STATE_RUNNING:
1116 return; // No change in state.
1117 case XDC_EP_STATE_STOPPED:
1118 break; // Already cleared the halt.
1119 case XDC_EP_STATE_HALTED:
1120 // The DbC has received the ClearFeature(ENDPOINT_HALT) request from the host.
1121 zxlogf(TRACE, "%s ep transitioned from halted to stopped\n", ep->name);
1122 ep->state = XDC_EP_STATE_STOPPED;
1123 break;
1124 default:
1125 zxlogf(ERROR, "unknown ep state: %d\n", ep->state);
1126 return;
1127 }
1128
1129 // If we get here, we are now in the STOPPED state and the halt has been cleared.
1130 // We should have processed the error events on the event ring once the halt flag was set,
1131 // but double-check this is the case.
1132 if (ep->got_err_event) {
1133 zx_status_t status = xdc_restart_transfer_ring_locked(xdc, ep);
1134 if (status != ZX_OK) {
1135 // This should never fail. If it does, disable the debug capability.
1136 // TODO(jocelyndang): the polling thread should re-initialize everything
1137 // if DCE is cleared.
1138 zxlogf(ERROR, "xdc_restart_transfer_ring got err %d, clearing DCE\n", status);
1139 XHCI_WRITE32(&xdc->debug_cap_regs->dcctrl, 0);
1140 }
1141 ep->got_err_event = false;
1142 }
1143 }
1144
xdc_update_endpoint_state(xdc_t * xdc,xdc_poll_state_t * poll_state,xdc_endpoint_t * ep)1145 void xdc_update_endpoint_state(xdc_t* xdc, xdc_poll_state_t* poll_state, xdc_endpoint_t* ep) {
1146 uint32_t dcctrl = XHCI_READ32(&xdc->debug_cap_regs->dcctrl);
1147 if (!(dcctrl & DCCTRL_DCR)) {
1148 // Halt bits are irrelevant when the debug capability isn't in Run Mode.
1149 return;
1150 }
1151 bool halt_state = ep->direction == USB_DIR_OUT ? poll_state->halt_out : poll_state->halt_in;
1152
1153 uint32_t bit = ep->direction == USB_DIR_OUT ? DCCTRL_HOT : DCCTRL_HIT;
1154 if (halt_state == !!(dcctrl & bit)) {
1155 // Nothing has changed.
1156 return;
1157 }
1158
1159 mtx_lock(&xdc->lock);
1160 if (dcctrl & bit) {
1161 xdc_endpoint_set_halt_locked(xdc, poll_state, ep);
1162 } else {
1163 xdc_endpoint_clear_halt_locked(xdc, poll_state, ep);
1164 }
1165 mtx_unlock(&xdc->lock);
1166 }
1167
xdc_poll(xdc_t * xdc)1168 zx_status_t xdc_poll(xdc_t* xdc) {
1169 xdc_poll_state_t poll_state;
1170 list_initialize(&poll_state.completed_reqs);
1171 uint64_t usb_req_size = sizeof(usb_request_t);
1172
1173 for (;;) {
1174 zxlogf(TRACE, "xdc_poll: waiting for a new instance\n");
1175 // Wait for at least one active instance before polling.
1176 sync_completion_wait(&xdc->has_instance_completion, ZX_TIME_INFINITE);
1177 zxlogf(TRACE, "xdc_poll: instance completion signaled, about to enter poll loop\n");
1178 sync_completion_reset(&xdc->has_instance_completion);
1179
1180 for (;;) {
1181 if (xdc->suspended.load()) {
1182 zxlogf(INFO, "xdc_poll: suspending xdc, shutting down poll thread\n");
1183 return ZX_OK;
1184 }
1185 if (xdc->num_instances.load() == 0) {
1186 // If all pending writes have completed, exit the poll loop.
1187 mtx_lock(&xdc->lock);
1188 if (list_is_empty(&xdc->eps[OUT_EP_IDX].pending_reqs)) {
1189 zxlogf(TRACE, "xdc_poll: no active instances, exiting inner poll loop\n");
1190 mtx_unlock(&xdc->lock);
1191 // Wait for a new instance to be active.
1192 break;
1193 }
1194 mtx_unlock(&xdc->lock);
1195 }
1196 bool entered_configured = xdc_update_state(xdc, &poll_state);
1197
1198 // Check if any EP has halted or recovered.
1199 for (int i = 0; i < NUM_EPS; i++) {
1200 xdc_endpoint_t* ep = &xdc->eps[i];
1201 xdc_update_endpoint_state(xdc, &poll_state, ep);
1202 }
1203
1204 // If we just entered the configured state, we should schedule the read requests.
1205 if (entered_configured) {
1206 mtx_lock(&xdc->read_lock);
1207 usb_request_t* req;
1208 while ((req = xdc_req_list_remove_tail(&xdc->free_read_reqs,
1209 usb_req_size)) != nullptr) {
1210 xdc_queue_read_locked(xdc, req);
1211 }
1212 mtx_unlock(&xdc->read_lock);
1213
1214 mtx_lock(&xdc->write_lock);
1215 xdc_update_write_signal_locked(xdc, true /* online */);
1216 mtx_unlock(&xdc->write_lock);
1217 }
1218
1219 // Call complete callbacks out of the lock.
1220 // TODO(jocelyndang): might want a separate thread for this.
1221 xdc_req_internal_t* req_int;
1222 usb_request_t* req;
1223 while ((req_int = list_remove_head_type(&poll_state.completed_reqs,
1224 xdc_req_internal_t, node)) != nullptr) {
1225 req = XDC_INTERNAL_TO_USB_REQ(req_int, usb_req_size);
1226 usb_request_complete(req, req->response.status, req->response.actual,
1227 &req_int->complete_cb);
1228 }
1229 }
1230 }
1231 return ZX_OK;
1232 }
1233
xdc_start_thread(void * arg)1234 static int xdc_start_thread(void* arg) {
1235 auto* xdc = static_cast<xdc_t*>(arg);
1236
1237 zxlogf(TRACE, "about to enable XHCI DBC\n");
1238 XHCI_WRITE32(&xdc->debug_cap_regs->dcctrl, DCCTRL_LSE | DCCTRL_DCE);
1239
1240 return xdc_poll(xdc);
1241 }
1242
1243 // This should only be called once in xdc_bind.
xdc_init_internal(xdc_t * xdc)1244 static zx_status_t xdc_init_internal(xdc_t* xdc) {
1245 mtx_init(&xdc->lock, mtx_plain);
1246
1247 list_initialize(&xdc->instance_list);
1248 mtx_init(&xdc->instance_list_lock, mtx_plain);
1249
1250 list_initialize(&xdc->host_streams);
1251
1252 sync_completion_reset(&xdc->has_instance_completion);
1253
1254 uint64_t usb_req_size = sizeof(usb_request_t);
1255 uint64_t total_req_size = usb_req_size + sizeof(xdc_req_internal_t);
1256 usb_request_pool_init(&xdc->free_write_reqs, usb_req_size +
1257 offsetof(xdc_req_internal_t, node));
1258 mtx_init(&xdc->write_lock, mtx_plain);
1259
1260 list_initialize(&xdc->free_read_reqs);
1261 mtx_init(&xdc->read_lock, mtx_plain);
1262
1263 // Allocate the usb requests for write / read.
1264 for (int i = 0; i < MAX_REQS; i++) {
1265 usb_request_t* req;
1266 zx_status_t status = usb_request_alloc(&req, MAX_REQ_SIZE, OUT_EP_ADDR, total_req_size);
1267 if (status != ZX_OK) {
1268 zxlogf(ERROR, "xdc failed to alloc write usb requests, err: %d\n", status);
1269 return status;
1270 }
1271 ZX_DEBUG_ASSERT(usb_request_pool_add(&xdc->free_write_reqs, req) == ZX_OK);
1272 }
1273 for (int i = 0; i < MAX_REQS; i++) {
1274 usb_request_t* req;
1275 zx_status_t status = usb_request_alloc(&req, MAX_REQ_SIZE, IN_EP_ADDR, total_req_size);
1276 if (status != ZX_OK) {
1277 zxlogf(ERROR, "xdc failed to alloc read usb requests, err: %d\n", status);
1278 return status;
1279 }
1280 status = xdc_req_list_add_head(&xdc->free_read_reqs, req, usb_req_size);
1281 ZX_DEBUG_ASSERT(status == ZX_OK);
1282 }
1283 return ZX_OK;
1284 }
1285
xdc_bind(zx_device_t * parent,zx_handle_t bti_handle,void * mmio)1286 zx_status_t xdc_bind(zx_device_t* parent, zx_handle_t bti_handle, void* mmio) {
1287 auto* xdc = static_cast<xdc_t*>(calloc(1, sizeof(xdc_t)));
1288 if (!xdc) {
1289 return ZX_ERR_NO_MEMORY;
1290 }
1291 xdc->bti_handle = bti_handle;
1292 xdc->mmio = mmio;
1293
1294 zx_status_t status = xdc_init_internal(xdc);
1295 if (status != ZX_OK) {
1296 goto error_return;
1297 }
1298 status = xdc_get_debug_cap(xdc);
1299 if (status != ZX_OK) {
1300 zxlogf(ERROR, "xdc_get_debug_cap, err: %d\n", status);
1301 goto error_return;
1302 }
1303 status = xdc_init_debug_cap(xdc);
1304 if (status != ZX_OK) {
1305 zxlogf(ERROR, "xdc_init failed, err: %d\n", status);
1306 goto error_return;
1307 }
1308
1309 device_add_args_t args;
1310 args = {};
1311 args.version = DEVICE_ADD_ARGS_VERSION;
1312 args.name = "xdc";
1313 args.ctx = xdc;
1314 args.ops = &xdc_device_ops;
1315 args.proto_id = ZX_PROTOCOL_USB_DBC;
1316 args.flags = DEVICE_ADD_NON_BINDABLE;
1317
1318 status = device_add(parent, &args, &xdc->zxdev);
1319 if (status != ZX_OK) {
1320 goto error_return;
1321 }
1322
1323 int ret;
1324 ret = thrd_create_with_name(&xdc->start_thread, xdc_start_thread, xdc, "xdc_start_thread");
1325 if (ret != thrd_success) {
1326 device_remove(xdc->zxdev);
1327 return ZX_ERR_BAD_STATE;
1328 }
1329 return ZX_OK;
1330
1331 error_return:
1332 zxlogf(ERROR, "xdc_bind failed: %d\n", status);
1333 xdc_free(xdc);
1334 return status;
1335 }
1336