1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <ddk/debug.h>
6
7 #include "xdc.h"
8 #include "xdc-transfer.h"
9
10 // Reads a range of bits from an integer.
11 #define READ_FIELD(i, start, bits) (((i) >> (start)) & ((1 << (bits)) - 1))
12
xdc_ring_doorbell(xdc_t * xdc,xdc_endpoint_t * ep)13 static void xdc_ring_doorbell(xdc_t* xdc, xdc_endpoint_t* ep) {
14 uint8_t doorbell_val = ep->direction == USB_DIR_IN ? DCDB_DB_EP_IN : DCDB_DB_EP_OUT;
15 XHCI_SET_BITS32(&xdc->debug_cap_regs->dcdb, DCDB_DB_START, DCDB_DB_BITS, doorbell_val);
16 }
17
18 // Stores the value of the Dequeue Pointer into out_dequeue.
19 // Returns ZX_OK if successful, or ZX_ERR_BAD_STATE if the endpoint was not in the Stopped state.
xdc_get_dequeue_ptr_locked(xdc_t * xdc,xdc_endpoint_t * ep,uint64_t * out_dequeue)20 static zx_status_t xdc_get_dequeue_ptr_locked(xdc_t* xdc, xdc_endpoint_t* ep,
21 uint64_t* out_dequeue) __TA_REQUIRES(xdc->lock) {
22 if (ep->state != XDC_EP_STATE_STOPPED) {
23 zxlogf(ERROR, "tried to read dequeue pointer of %s EP while not stopped, state is: %d\n",
24 ep->name, ep->state);
25 return ZX_ERR_BAD_STATE;
26 }
27 xdc_context_data_t* ctx = xdc->context_data;
28 xhci_endpoint_context_t* epc = ep->direction == USB_DIR_OUT ? &ctx->out_epc : &ctx->in_epc;
29
30 uint64_t dequeue_ptr_hi = XHCI_READ32(&epc->tr_dequeue_hi);
31 uint32_t dequeue_ptr_lo = XHCI_READ32(&epc->epc2) & EP_CTX_TR_DEQUEUE_LO_MASK;
32 *out_dequeue = (dequeue_ptr_hi << 32 | dequeue_ptr_lo);
33 return ZX_OK;
34 }
35
36 // Returns ZX_OK if the request was scheduled successfully, or ZX_ERR_SHOULD_WAIT
37 // if we ran out of TRBs.
xdc_schedule_transfer_locked(xdc_t * xdc,xdc_endpoint_t * ep,usb_request_t * req)38 static zx_status_t xdc_schedule_transfer_locked(xdc_t* xdc, xdc_endpoint_t* ep,
39 usb_request_t* req) __TA_REQUIRES(xdc->lock) {
40 xhci_transfer_ring_t* ring = &ep->transfer_ring;
41
42 // Need to clean the cache for both IN and OUT transfers, invalidate only for IN.
43 if (ep->direction == USB_DIR_IN) {
44 usb_request_cache_flush_invalidate(req, 0, req->header.length);
45 } else {
46 usb_request_cache_flush(req, 0, req->header.length);
47 }
48
49 zx_status_t status = xhci_queue_data_trbs(ring, &ep->transfer_state, req,
50 0 /* interrupter */, false /* isochronous */);
51 if (status != ZX_OK) {
52 return status;
53 }
54
55 // If we get here, then we are ready to ring the doorbell.
56 // Save the ring position so we can update the ring dequeue ptr once the transfer completes.
57 xdc_req_internal_t* req_int = USB_REQ_TO_XDC_INTERNAL(req, sizeof(usb_request_t));
58 req_int->context = ring->current;
59 xdc_ring_doorbell(xdc, ep);
60
61 return ZX_OK;
62 }
63
64 // Schedules any queued requests on the endpoint's transfer ring, until we fill our
65 // transfer ring or have no more requests.
xdc_process_transactions_locked(xdc_t * xdc,xdc_endpoint_t * ep)66 void xdc_process_transactions_locked(xdc_t* xdc, xdc_endpoint_t* ep) __TA_REQUIRES(xdc->lock) {
67 uint64_t usb_req_size = sizeof(usb_request_t);
68 zx_status_t status;
69 while (1) {
70 if (xhci_transfer_ring_free_trbs(&ep->transfer_ring) == 0) {
71 // No available TRBs - need to wait for some to complete.
72 return;
73 }
74
75 if (!ep->current_req) {
76 // Start the next transaction in the queue.
77 usb_request_t* req = xdc_req_list_remove_head(&ep->queued_reqs, usb_req_size);
78 if (!req) {
79 // No requests waiting.
80 return;
81 }
82 xhci_transfer_state_init(&ep->transfer_state, req,
83 USB_ENDPOINT_BULK, EP_CTX_MAX_PACKET_SIZE);
84 status = xdc_req_list_add_tail(&ep->pending_reqs, req, usb_req_size);
85 ZX_DEBUG_ASSERT(status == ZX_OK);
86 ep->current_req = req;
87 }
88
89 usb_request_t* req = ep->current_req;
90 status = xdc_schedule_transfer_locked(xdc, ep, req);
91 if (status == ZX_ERR_SHOULD_WAIT) {
92 // No available TRBs - need to wait for some to complete.
93 return;
94 } else {
95 ep->current_req = nullptr;
96 }
97 }
98 }
99
xdc_queue_transfer(xdc_t * xdc,usb_request_t * req,bool in,bool is_ctrl_msg)100 zx_status_t xdc_queue_transfer(xdc_t* xdc, usb_request_t* req, bool in, bool is_ctrl_msg) {
101 xdc_endpoint_t* ep = in ? &xdc->eps[IN_EP_IDX] : &xdc->eps[OUT_EP_IDX];
102
103 mtx_lock(&xdc->lock);
104
105 // We should always queue control messages unless there is an unrecoverable error.
106 if (!is_ctrl_msg && (!xdc->configured || ep->state == XDC_EP_STATE_DEAD)) {
107 mtx_unlock(&xdc->lock);
108 return ZX_ERR_IO_NOT_PRESENT;
109 }
110
111 if (req->header.length > 0) {
112 zx_status_t status = usb_request_physmap(req, xdc->bti_handle);
113 if (status != ZX_OK) {
114 zxlogf(ERROR, "%s: usb_request_physmap failed: %d\n", __FUNCTION__, status);
115 mtx_unlock(&xdc->lock);
116 return status;
117 }
118 }
119
120 xdc_req_internal_t* req_int = USB_REQ_TO_XDC_INTERNAL(req, sizeof(usb_request_t));
121 req_int->complete_cb.callback = in ? xdc_read_complete : xdc_write_complete;
122 req_int->complete_cb.ctx = xdc;
123
124 list_add_tail(&ep->queued_reqs, &req_int->node);
125
126 // We can still queue requests for later while waiting for the xdc device to be configured,
127 // or while the endpoint is halted. Before scheduling the TRBs however, we should wait
128 // for the device to be configured, and/or the halt is cleared by DbC and we've cleaned
129 // up the transfer ring.
130 if (xdc->configured && ep->state == XDC_EP_STATE_RUNNING) {
131 xdc_process_transactions_locked(xdc, ep);
132 }
133
134 mtx_unlock(&xdc->lock);
135
136 return ZX_OK;
137 }
138
xdc_has_free_trbs(xdc_t * xdc,bool in)139 bool xdc_has_free_trbs(xdc_t* xdc, bool in) {
140 mtx_lock(&xdc->lock);
141
142 xdc_endpoint_t* ep = in ? &xdc->eps[IN_EP_IDX] : &xdc->eps[OUT_EP_IDX];
143 bool has_trbs = xhci_transfer_ring_free_trbs(&ep->transfer_ring) > 0;
144
145 mtx_unlock(&xdc->lock);
146 return has_trbs;
147 }
148
xdc_restart_transfer_ring_locked(xdc_t * xdc,xdc_endpoint_t * ep)149 zx_status_t xdc_restart_transfer_ring_locked(xdc_t* xdc, xdc_endpoint_t* ep) {
150 // Once the DbC clears the halt flag for the endpoint, the address stored in the
151 // TR Dequeue Pointer field is the next TRB to be executed (see XHCI Spec 7.6.4.3).
152 // There seems to be no guarantee which TRB this will point to.
153 //
154 // The easiest way to deal with this is to convert all scheduled TRBs to NO-OPs,
155 // and reschedule pending requests.
156
157 uint64_t dequeue_ptr;
158 zx_status_t status = xdc_get_dequeue_ptr_locked(xdc, ep, &dequeue_ptr);
159 if (status != ZX_OK) {
160 return status;
161 }
162 xhci_transfer_ring_t* ring = &ep->transfer_ring;
163 xhci_trb_t* trb = xhci_transfer_ring_phys_to_trb(ring, dequeue_ptr);
164 if (!trb) {
165 zxlogf(ERROR, "no valid TRB corresponding to dequeue_ptr: %lu\n", dequeue_ptr);
166 return ZX_ERR_BAD_STATE;
167 }
168
169 // Reset our copy of the dequeue pointer.
170 xhci_set_dequeue_ptr(ring, trb);
171
172 // Convert all pending TRBs on the transfer ring into NO-OPs TRBs.
173 // ring->current is just after our last queued TRB.
174 xhci_trb_t* last_trb = nullptr;
175 while (trb != ring->current) {
176 xhci_set_transfer_noop_trb(trb);
177 last_trb = trb;
178 trb = xhci_get_next_trb(ring, trb);
179 }
180 if (last_trb) {
181 // Set IOC (Interrupt on Completion) on the last NO-OP TRB, so we know
182 // when we can overwrite them in the transfer ring.
183 uint32_t control = XHCI_READ32(&last_trb->control);
184 XHCI_WRITE32(&last_trb->control, control | XFER_TRB_IOC);
185 }
186 // Restart the transfer ring.
187 xdc_ring_doorbell(xdc, ep);
188 ep->state = XDC_EP_STATE_RUNNING;
189
190 // Requeue and reschedule the requests.
191 usb_request_t* req;
192 uint64_t usb_req_size = sizeof(usb_request_t);
193 while ((req = xdc_req_list_remove_tail(&ep->pending_reqs, usb_req_size)) != nullptr) {
194 status = xdc_req_list_add_head(&ep->queued_reqs, req, usb_req_size);
195 ZX_DEBUG_ASSERT(status == ZX_OK);
196 }
197 xdc_process_transactions_locked(xdc, ep);
198 return ZX_OK;
199 }
200
xdc_handle_transfer_event_locked(xdc_t * xdc,xdc_poll_state_t * poll_state,xhci_trb_t * trb)201 void xdc_handle_transfer_event_locked(xdc_t* xdc, xdc_poll_state_t* poll_state, xhci_trb_t* trb) {
202 uint32_t control = XHCI_READ32(&trb->control);
203 uint32_t status = XHCI_READ32(&trb->status);
204 uint32_t ep_dev_ctx_idx = READ_FIELD(control, TRB_ENDPOINT_ID_START, TRB_ENDPOINT_ID_BITS);
205 uint8_t xdc_ep_idx = ep_dev_ctx_idx == EP_IN_DEV_CTX_IDX ? IN_EP_IDX : OUT_EP_IDX;
206 xdc_endpoint_t* ep = &xdc->eps[xdc_ep_idx];
207 xhci_transfer_ring_t* ring = &ep->transfer_ring;
208 uint64_t usb_req_size = sizeof(usb_request_t);
209
210 uint32_t cc = READ_FIELD(status, EVT_TRB_CC_START, EVT_TRB_CC_BITS);
211 uint32_t length = READ_FIELD(status, EVT_TRB_XFER_LENGTH_START, EVT_TRB_XFER_LENGTH_BITS);
212 usb_request_t* req = nullptr;
213 bool error = false;
214
215 switch (cc) {
216 case TRB_CC_SUCCESS:
217 case TRB_CC_SHORT_PACKET:
218 break;
219 case TRB_CC_BABBLE_DETECTED_ERROR:
220 case TRB_CC_USB_TRANSACTION_ERROR:
221 case TRB_CC_TRB_ERROR:
222 case TRB_CC_STALL_ERROR:
223 zxlogf(ERROR, "xdc_handle_transfer_event: error condition code: %d\n", cc);
224 error = true;
225 break;
226 default:
227 zxlogf(ERROR, "xdc_handle_transfer_event: unexpected condition code %d\n", cc);
228 error = true;
229 break;
230 }
231
232 // Even though the main poll loop checks for changes in the halt registers,
233 // it's possible we missed the halt register being set if the halt was cleared fast enough.
234 if (error) {
235 if (ep->state == XDC_EP_STATE_RUNNING) {
236 xdc_endpoint_set_halt_locked(xdc, poll_state, ep);
237 }
238 ep->got_err_event = true;
239 // We're going to requeue the transfer when we restart the transfer ring,
240 // so nothing else to do.
241 return;
242 }
243
244 if (control & EVT_TRB_ED) {
245 // An Event Data TRB generated the completion event, so the TRB Pointer field
246 // will contain the usb request pointer we previously stored.
247 req = reinterpret_cast<usb_request_t*>(trb->ptr);
248 } else {
249 // Get the pointer to the TRB that generated the event.
250 trb = xhci_read_trb_ptr(ring, trb);
251 if (trb_get_type(trb) == TRB_TRANSFER_NOOP) {
252 // If it's the NO-OP TRB we queued when dealing with the halt condition,
253 // there won't be a corresponding usb request.
254 zxlogf(TRACE, "xdc_handle_transfer_event: got a NO-OP TRB\n");
255 xhci_set_dequeue_ptr(ring, xhci_get_next_trb(ring, trb));
256 xdc_process_transactions_locked(xdc, ep);
257 return;
258 }
259
260 // Look for the Event Data TRB which will have the usb request pointer.
261 for (uint i = 0; i < TRANSFER_RING_SIZE && trb; i++) {
262 if (trb_get_type(trb) == TRB_TRANSFER_EVENT_DATA) {
263 req = reinterpret_cast<usb_request_t*>(trb->ptr);
264 break;
265 }
266 trb = xhci_get_next_trb(ring, trb);
267 }
268 }
269
270 if (!req) {
271 zxlogf(ERROR, "xdc_handle_transfer_event: unable to find request to complete\n");
272 return;
273 }
274
275 // Find the usb request in the pending list.
276 bool found_req = false;
277 usb_request_t* test;
278 xdc_req_internal_t* test_int;
279 list_for_every_entry(&ep->pending_reqs, test_int, xdc_req_internal_t, node) {
280 test = XDC_INTERNAL_TO_USB_REQ(test_int, usb_req_size);
281 if (test == req) {
282 found_req = true;
283 break;
284 }
285 }
286 if (!found_req) {
287 zxlogf(ERROR, "xdc_handle_transfer_event: ignoring event for completed transfer\n");
288 return;
289 }
290 // Remove request from pending_reqs.
291 list_delete(&test_int->node);
292
293 // Update our copy of the dequeue_ptr to the TRB following this transaction.
294 xhci_set_dequeue_ptr(ring, static_cast<xhci_trb_t*>(test_int->context));
295 xdc_process_transactions_locked(xdc, ep);
296
297 // Save the request to be completed later out of the lock.
298 req->response.status = ZX_OK;
299 req->response.actual = length;
300 status = xdc_req_list_add_tail(&poll_state->completed_reqs, req, usb_req_size);
301 ZX_DEBUG_ASSERT(status == ZX_OK);
302 }
303