1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - control channel and configuration commands
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/dmapool.h>
14 #include <linux/workqueue.h>
15
16 #include "ctl.h"
17
18
19 #define TB_CTL_RX_PKG_COUNT 10
20 #define TB_CTL_RETRIES 4
21
22 /**
23 * struct tb_ctl - Thunderbolt control channel
24 * @nhi: Pointer to the NHI structure
25 * @tx: Transmit ring
26 * @rx: Receive ring
27 * @frame_pool: DMA pool for control messages
28 * @rx_packets: Received control messages
29 * @request_queue_lock: Lock protecting @request_queue
30 * @request_queue: List of outstanding requests
31 * @running: Is the control channel running at the moment
32 * @timeout_msec: Default timeout for non-raw control messages
33 * @callback: Callback called when hotplug message is received
34 * @callback_data: Data passed to @callback
35 */
36 struct tb_ctl {
37 struct tb_nhi *nhi;
38 struct tb_ring *tx;
39 struct tb_ring *rx;
40
41 struct dma_pool *frame_pool;
42 struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
43 struct mutex request_queue_lock;
44 struct list_head request_queue;
45 bool running;
46
47 int timeout_msec;
48 event_cb callback;
49 void *callback_data;
50 };
51
52
53 #define tb_ctl_WARN(ctl, format, arg...) \
54 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
55
56 #define tb_ctl_err(ctl, format, arg...) \
57 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
58
59 #define tb_ctl_warn(ctl, format, arg...) \
60 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
61
62 #define tb_ctl_info(ctl, format, arg...) \
63 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
64
65 #define tb_ctl_dbg(ctl, format, arg...) \
66 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
67
68 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
69 /* Serializes access to request kref_get/put */
70 static DEFINE_MUTEX(tb_cfg_request_lock);
71
72 /**
73 * tb_cfg_request_alloc() - Allocates a new config request
74 *
75 * This is refcounted object so when you are done with this, call
76 * tb_cfg_request_put() to it.
77 */
tb_cfg_request_alloc(void)78 struct tb_cfg_request *tb_cfg_request_alloc(void)
79 {
80 struct tb_cfg_request *req;
81
82 req = kzalloc(sizeof(*req), GFP_KERNEL);
83 if (!req)
84 return NULL;
85
86 kref_init(&req->kref);
87
88 return req;
89 }
90
91 /**
92 * tb_cfg_request_get() - Increase refcount of a request
93 * @req: Request whose refcount is increased
94 */
tb_cfg_request_get(struct tb_cfg_request * req)95 void tb_cfg_request_get(struct tb_cfg_request *req)
96 {
97 mutex_lock(&tb_cfg_request_lock);
98 kref_get(&req->kref);
99 mutex_unlock(&tb_cfg_request_lock);
100 }
101
tb_cfg_request_destroy(struct kref * kref)102 static void tb_cfg_request_destroy(struct kref *kref)
103 {
104 struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
105
106 kfree(req);
107 }
108
109 /**
110 * tb_cfg_request_put() - Decrease refcount and possibly release the request
111 * @req: Request whose refcount is decreased
112 *
113 * Call this function when you are done with the request. When refcount
114 * goes to %0 the object is released.
115 */
tb_cfg_request_put(struct tb_cfg_request * req)116 void tb_cfg_request_put(struct tb_cfg_request *req)
117 {
118 mutex_lock(&tb_cfg_request_lock);
119 kref_put(&req->kref, tb_cfg_request_destroy);
120 mutex_unlock(&tb_cfg_request_lock);
121 }
122
tb_cfg_request_enqueue(struct tb_ctl * ctl,struct tb_cfg_request * req)123 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
124 struct tb_cfg_request *req)
125 {
126 WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
127 WARN_ON(req->ctl);
128
129 mutex_lock(&ctl->request_queue_lock);
130 if (!ctl->running) {
131 mutex_unlock(&ctl->request_queue_lock);
132 return -ENOTCONN;
133 }
134 req->ctl = ctl;
135 list_add_tail(&req->list, &ctl->request_queue);
136 set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
137 mutex_unlock(&ctl->request_queue_lock);
138 return 0;
139 }
140
tb_cfg_request_dequeue(struct tb_cfg_request * req)141 static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
142 {
143 struct tb_ctl *ctl = req->ctl;
144
145 mutex_lock(&ctl->request_queue_lock);
146 list_del(&req->list);
147 clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
148 if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
149 wake_up(&tb_cfg_request_cancel_queue);
150 mutex_unlock(&ctl->request_queue_lock);
151 }
152
tb_cfg_request_is_active(struct tb_cfg_request * req)153 static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
154 {
155 return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
156 }
157
158 static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl * ctl,struct ctl_pkg * pkg)159 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
160 {
161 struct tb_cfg_request *req = NULL, *iter;
162
163 mutex_lock(&pkg->ctl->request_queue_lock);
164 list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
165 tb_cfg_request_get(iter);
166 if (iter->match(iter, pkg)) {
167 req = iter;
168 break;
169 }
170 tb_cfg_request_put(iter);
171 }
172 mutex_unlock(&pkg->ctl->request_queue_lock);
173
174 return req;
175 }
176
177 /* utility functions */
178
179
check_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)180 static int check_header(const struct ctl_pkg *pkg, u32 len,
181 enum tb_cfg_pkg_type type, u64 route)
182 {
183 struct tb_cfg_header *header = pkg->buffer;
184
185 /* check frame, TODO: frame flags */
186 if (WARN(len != pkg->frame.size,
187 "wrong framesize (expected %#x, got %#x)\n",
188 len, pkg->frame.size))
189 return -EIO;
190 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
191 type, pkg->frame.eof))
192 return -EIO;
193 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
194 pkg->frame.sof))
195 return -EIO;
196
197 /* check header */
198 if (WARN(header->unknown != 1 << 9,
199 "header->unknown is %#x\n", header->unknown))
200 return -EIO;
201 if (WARN(route != tb_cfg_get_route(header),
202 "wrong route (expected %llx, got %llx)",
203 route, tb_cfg_get_route(header)))
204 return -EIO;
205 return 0;
206 }
207
check_config_address(struct tb_cfg_address addr,enum tb_cfg_space space,u32 offset,u32 length)208 static int check_config_address(struct tb_cfg_address addr,
209 enum tb_cfg_space space, u32 offset,
210 u32 length)
211 {
212 if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
213 return -EIO;
214 if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
215 space, addr.space))
216 return -EIO;
217 if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
218 offset, addr.offset))
219 return -EIO;
220 if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
221 length, addr.length))
222 return -EIO;
223 /*
224 * We cannot check addr->port as it is set to the upstream port of the
225 * sender.
226 */
227 return 0;
228 }
229
decode_error(const struct ctl_pkg * response)230 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
231 {
232 struct cfg_error_pkg *pkg = response->buffer;
233 struct tb_cfg_result res = { 0 };
234 res.response_route = tb_cfg_get_route(&pkg->header);
235 res.response_port = 0;
236 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
237 tb_cfg_get_route(&pkg->header));
238 if (res.err)
239 return res;
240
241 res.err = 1;
242 res.tb_error = pkg->error;
243 res.response_port = pkg->port;
244 return res;
245
246 }
247
parse_header(const struct ctl_pkg * pkg,u32 len,enum tb_cfg_pkg_type type,u64 route)248 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
249 enum tb_cfg_pkg_type type, u64 route)
250 {
251 struct tb_cfg_header *header = pkg->buffer;
252 struct tb_cfg_result res = { 0 };
253
254 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
255 return decode_error(pkg);
256
257 res.response_port = 0; /* will be updated later for cfg_read/write */
258 res.response_route = tb_cfg_get_route(header);
259 res.err = check_header(pkg, len, type, route);
260 return res;
261 }
262
tb_cfg_print_error(struct tb_ctl * ctl,const struct tb_cfg_result * res)263 static void tb_cfg_print_error(struct tb_ctl *ctl,
264 const struct tb_cfg_result *res)
265 {
266 WARN_ON(res->err != 1);
267 switch (res->tb_error) {
268 case TB_CFG_ERROR_PORT_NOT_CONNECTED:
269 /* Port is not connected. This can happen during surprise
270 * removal. Do not warn. */
271 return;
272 case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
273 /*
274 * Invalid cfg_space/offset/length combination in
275 * cfg_read/cfg_write.
276 */
277 tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
278 res->response_route, res->response_port);
279 return;
280 case TB_CFG_ERROR_NO_SUCH_PORT:
281 /*
282 * - The route contains a non-existent port.
283 * - The route contains a non-PHY port (e.g. PCIe).
284 * - The port in cfg_read/cfg_write does not exist.
285 */
286 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
287 res->response_route, res->response_port);
288 return;
289 case TB_CFG_ERROR_LOOP:
290 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
291 res->response_route, res->response_port);
292 return;
293 case TB_CFG_ERROR_LOCK:
294 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
295 res->response_route, res->response_port);
296 return;
297 default:
298 /* 5,6,7,9 and 11 are also valid error codes */
299 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
300 res->response_route, res->response_port);
301 return;
302 }
303 }
304
tb_crc(const void * data,size_t len)305 static __be32 tb_crc(const void *data, size_t len)
306 {
307 return cpu_to_be32(~__crc32c_le(~0, data, len));
308 }
309
tb_ctl_pkg_free(struct ctl_pkg * pkg)310 static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
311 {
312 if (pkg) {
313 dma_pool_free(pkg->ctl->frame_pool,
314 pkg->buffer, pkg->frame.buffer_phy);
315 kfree(pkg);
316 }
317 }
318
tb_ctl_pkg_alloc(struct tb_ctl * ctl)319 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
320 {
321 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
322 if (!pkg)
323 return NULL;
324 pkg->ctl = ctl;
325 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
326 &pkg->frame.buffer_phy);
327 if (!pkg->buffer) {
328 kfree(pkg);
329 return NULL;
330 }
331 return pkg;
332 }
333
334
335 /* RX/TX handling */
336
tb_ctl_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)337 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
338 bool canceled)
339 {
340 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
341 tb_ctl_pkg_free(pkg);
342 }
343
344 /*
345 * tb_cfg_tx() - transmit a packet on the control channel
346 *
347 * len must be a multiple of four.
348 *
349 * Return: Returns 0 on success or an error code on failure.
350 */
tb_ctl_tx(struct tb_ctl * ctl,const void * data,size_t len,enum tb_cfg_pkg_type type)351 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
352 enum tb_cfg_pkg_type type)
353 {
354 int res;
355 struct ctl_pkg *pkg;
356 if (len % 4 != 0) { /* required for le->be conversion */
357 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
358 return -EINVAL;
359 }
360 if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
361 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
362 len, TB_FRAME_SIZE - 4);
363 return -EINVAL;
364 }
365 pkg = tb_ctl_pkg_alloc(ctl);
366 if (!pkg)
367 return -ENOMEM;
368 pkg->frame.callback = tb_ctl_tx_callback;
369 pkg->frame.size = len + 4;
370 pkg->frame.sof = type;
371 pkg->frame.eof = type;
372 cpu_to_be32_array(pkg->buffer, data, len / 4);
373 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
374
375 res = tb_ring_tx(ctl->tx, &pkg->frame);
376 if (res) /* ring is stopped */
377 tb_ctl_pkg_free(pkg);
378 return res;
379 }
380
381 /*
382 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
383 */
tb_ctl_handle_event(struct tb_ctl * ctl,enum tb_cfg_pkg_type type,struct ctl_pkg * pkg,size_t size)384 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
385 struct ctl_pkg *pkg, size_t size)
386 {
387 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
388 }
389
tb_ctl_rx_submit(struct ctl_pkg * pkg)390 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
391 {
392 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
393 * We ignore failures during stop.
394 * All rx packets are referenced
395 * from ctl->rx_packets, so we do
396 * not loose them.
397 */
398 }
399
tb_async_error(const struct ctl_pkg * pkg)400 static int tb_async_error(const struct ctl_pkg *pkg)
401 {
402 const struct cfg_error_pkg *error = pkg->buffer;
403
404 if (pkg->frame.eof != TB_CFG_PKG_ERROR)
405 return false;
406
407 switch (error->error) {
408 case TB_CFG_ERROR_LINK_ERROR:
409 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
410 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
411 case TB_CFG_ERROR_DP_BW:
412 return true;
413
414 default:
415 return false;
416 }
417 }
418
tb_ctl_rx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)419 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
420 bool canceled)
421 {
422 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
423 struct tb_cfg_request *req;
424 __be32 crc32;
425
426 if (canceled)
427 return; /*
428 * ring is stopped, packet is referenced from
429 * ctl->rx_packets.
430 */
431
432 if (frame->size < 4 || frame->size % 4 != 0) {
433 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
434 frame->size);
435 goto rx;
436 }
437
438 frame->size -= 4; /* remove checksum */
439 crc32 = tb_crc(pkg->buffer, frame->size);
440 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
441
442 switch (frame->eof) {
443 case TB_CFG_PKG_READ:
444 case TB_CFG_PKG_WRITE:
445 case TB_CFG_PKG_ERROR:
446 case TB_CFG_PKG_OVERRIDE:
447 case TB_CFG_PKG_RESET:
448 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
449 tb_ctl_err(pkg->ctl,
450 "RX: checksum mismatch, dropping packet\n");
451 goto rx;
452 }
453 if (tb_async_error(pkg)) {
454 tb_ctl_handle_event(pkg->ctl, frame->eof,
455 pkg, frame->size);
456 goto rx;
457 }
458 break;
459
460 case TB_CFG_PKG_EVENT:
461 case TB_CFG_PKG_XDOMAIN_RESP:
462 case TB_CFG_PKG_XDOMAIN_REQ:
463 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
464 tb_ctl_err(pkg->ctl,
465 "RX: checksum mismatch, dropping packet\n");
466 goto rx;
467 }
468 fallthrough;
469 case TB_CFG_PKG_ICM_EVENT:
470 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
471 goto rx;
472 break;
473
474 default:
475 break;
476 }
477
478 /*
479 * The received packet will be processed only if there is an
480 * active request and that the packet is what is expected. This
481 * prevents packets such as replies coming after timeout has
482 * triggered from messing with the active requests.
483 */
484 req = tb_cfg_request_find(pkg->ctl, pkg);
485 if (req) {
486 if (req->copy(req, pkg))
487 schedule_work(&req->work);
488 tb_cfg_request_put(req);
489 }
490
491 rx:
492 tb_ctl_rx_submit(pkg);
493 }
494
tb_cfg_request_work(struct work_struct * work)495 static void tb_cfg_request_work(struct work_struct *work)
496 {
497 struct tb_cfg_request *req = container_of(work, typeof(*req), work);
498
499 if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
500 req->callback(req->callback_data);
501
502 tb_cfg_request_dequeue(req);
503 tb_cfg_request_put(req);
504 }
505
506 /**
507 * tb_cfg_request() - Start control request not waiting for it to complete
508 * @ctl: Control channel to use
509 * @req: Request to start
510 * @callback: Callback called when the request is completed
511 * @callback_data: Data to be passed to @callback
512 *
513 * This queues @req on the given control channel without waiting for it
514 * to complete. When the request completes @callback is called.
515 */
tb_cfg_request(struct tb_ctl * ctl,struct tb_cfg_request * req,void (* callback)(void *),void * callback_data)516 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
517 void (*callback)(void *), void *callback_data)
518 {
519 int ret;
520
521 req->flags = 0;
522 req->callback = callback;
523 req->callback_data = callback_data;
524 INIT_WORK(&req->work, tb_cfg_request_work);
525 INIT_LIST_HEAD(&req->list);
526
527 tb_cfg_request_get(req);
528 ret = tb_cfg_request_enqueue(ctl, req);
529 if (ret)
530 goto err_put;
531
532 ret = tb_ctl_tx(ctl, req->request, req->request_size,
533 req->request_type);
534 if (ret)
535 goto err_dequeue;
536
537 if (!req->response)
538 schedule_work(&req->work);
539
540 return 0;
541
542 err_dequeue:
543 tb_cfg_request_dequeue(req);
544 err_put:
545 tb_cfg_request_put(req);
546
547 return ret;
548 }
549
550 /**
551 * tb_cfg_request_cancel() - Cancel a control request
552 * @req: Request to cancel
553 * @err: Error to assign to the request
554 *
555 * This function can be used to cancel ongoing request. It will wait
556 * until the request is not active anymore.
557 */
tb_cfg_request_cancel(struct tb_cfg_request * req,int err)558 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
559 {
560 set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
561 schedule_work(&req->work);
562 wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
563 req->result.err = err;
564 }
565
tb_cfg_request_complete(void * data)566 static void tb_cfg_request_complete(void *data)
567 {
568 complete(data);
569 }
570
571 /**
572 * tb_cfg_request_sync() - Start control request and wait until it completes
573 * @ctl: Control channel to use
574 * @req: Request to start
575 * @timeout_msec: Timeout how long to wait @req to complete
576 *
577 * Starts a control request and waits until it completes. If timeout
578 * triggers the request is canceled before function returns. Note the
579 * caller needs to make sure only one message for given switch is active
580 * at a time.
581 */
tb_cfg_request_sync(struct tb_ctl * ctl,struct tb_cfg_request * req,int timeout_msec)582 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
583 struct tb_cfg_request *req,
584 int timeout_msec)
585 {
586 unsigned long timeout = msecs_to_jiffies(timeout_msec);
587 struct tb_cfg_result res = { 0 };
588 DECLARE_COMPLETION_ONSTACK(done);
589 int ret;
590
591 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
592 if (ret) {
593 res.err = ret;
594 return res;
595 }
596
597 if (!wait_for_completion_timeout(&done, timeout))
598 tb_cfg_request_cancel(req, -ETIMEDOUT);
599
600 flush_work(&req->work);
601
602 return req->result;
603 }
604
605 /* public interface, alloc/start/stop/free */
606
607 /**
608 * tb_ctl_alloc() - allocate a control channel
609 * @nhi: Pointer to NHI
610 * @timeout_msec: Default timeout used with non-raw control messages
611 * @cb: Callback called for plug events
612 * @cb_data: Data passed to @cb
613 *
614 * cb will be invoked once for every hot plug event.
615 *
616 * Return: Returns a pointer on success or NULL on failure.
617 */
tb_ctl_alloc(struct tb_nhi * nhi,int timeout_msec,event_cb cb,void * cb_data)618 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
619 void *cb_data)
620 {
621 int i;
622 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
623 if (!ctl)
624 return NULL;
625 ctl->nhi = nhi;
626 ctl->timeout_msec = timeout_msec;
627 ctl->callback = cb;
628 ctl->callback_data = cb_data;
629
630 mutex_init(&ctl->request_queue_lock);
631 INIT_LIST_HEAD(&ctl->request_queue);
632 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
633 TB_FRAME_SIZE, 4, 0);
634 if (!ctl->frame_pool)
635 goto err;
636
637 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
638 if (!ctl->tx)
639 goto err;
640
641 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
642 0xffff, NULL, NULL);
643 if (!ctl->rx)
644 goto err;
645
646 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
647 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
648 if (!ctl->rx_packets[i])
649 goto err;
650 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
651 }
652
653 tb_ctl_dbg(ctl, "control channel created\n");
654 return ctl;
655 err:
656 tb_ctl_free(ctl);
657 return NULL;
658 }
659
660 /**
661 * tb_ctl_free() - free a control channel
662 * @ctl: Control channel to free
663 *
664 * Must be called after tb_ctl_stop.
665 *
666 * Must NOT be called from ctl->callback.
667 */
tb_ctl_free(struct tb_ctl * ctl)668 void tb_ctl_free(struct tb_ctl *ctl)
669 {
670 int i;
671
672 if (!ctl)
673 return;
674
675 if (ctl->rx)
676 tb_ring_free(ctl->rx);
677 if (ctl->tx)
678 tb_ring_free(ctl->tx);
679
680 /* free RX packets */
681 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
682 tb_ctl_pkg_free(ctl->rx_packets[i]);
683
684
685 dma_pool_destroy(ctl->frame_pool);
686 kfree(ctl);
687 }
688
689 /**
690 * tb_ctl_start() - start/resume the control channel
691 * @ctl: Control channel to start
692 */
tb_ctl_start(struct tb_ctl * ctl)693 void tb_ctl_start(struct tb_ctl *ctl)
694 {
695 int i;
696 tb_ctl_dbg(ctl, "control channel starting...\n");
697 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
698 tb_ring_start(ctl->rx);
699 for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
700 tb_ctl_rx_submit(ctl->rx_packets[i]);
701
702 ctl->running = true;
703 }
704
705 /**
706 * tb_ctl_stop() - pause the control channel
707 * @ctl: Control channel to stop
708 *
709 * All invocations of ctl->callback will have finished after this method
710 * returns.
711 *
712 * Must NOT be called from ctl->callback.
713 */
tb_ctl_stop(struct tb_ctl * ctl)714 void tb_ctl_stop(struct tb_ctl *ctl)
715 {
716 mutex_lock(&ctl->request_queue_lock);
717 ctl->running = false;
718 mutex_unlock(&ctl->request_queue_lock);
719
720 tb_ring_stop(ctl->rx);
721 tb_ring_stop(ctl->tx);
722
723 if (!list_empty(&ctl->request_queue))
724 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
725 INIT_LIST_HEAD(&ctl->request_queue);
726 tb_ctl_dbg(ctl, "control channel stopped\n");
727 }
728
729 /* public interface, commands */
730
731 /**
732 * tb_cfg_ack_notification() - Ack notification
733 * @ctl: Control channel to use
734 * @route: Router that originated the event
735 * @error: Pointer to the notification package
736 *
737 * Call this as response for non-plug notification to ack it. Returns
738 * %0 on success or an error code on failure.
739 */
tb_cfg_ack_notification(struct tb_ctl * ctl,u64 route,const struct cfg_error_pkg * error)740 int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
741 const struct cfg_error_pkg *error)
742 {
743 struct cfg_ack_pkg pkg = {
744 .header = tb_cfg_make_header(route),
745 };
746 const char *name;
747
748 switch (error->error) {
749 case TB_CFG_ERROR_LINK_ERROR:
750 name = "link error";
751 break;
752 case TB_CFG_ERROR_HEC_ERROR_DETECTED:
753 name = "HEC error";
754 break;
755 case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
756 name = "flow control error";
757 break;
758 case TB_CFG_ERROR_DP_BW:
759 name = "DP_BW";
760 break;
761 default:
762 name = "unknown";
763 break;
764 }
765
766 tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
767 error->error, route);
768
769 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
770 }
771
772 /**
773 * tb_cfg_ack_plug() - Ack hot plug/unplug event
774 * @ctl: Control channel to use
775 * @route: Router that originated the event
776 * @port: Port where the hot plug/unplug happened
777 * @unplug: Ack hot plug or unplug
778 *
779 * Call this as response for hot plug/unplug event to ack it.
780 * Returns %0 on success or an error code on failure.
781 */
tb_cfg_ack_plug(struct tb_ctl * ctl,u64 route,u32 port,bool unplug)782 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
783 {
784 struct cfg_error_pkg pkg = {
785 .header = tb_cfg_make_header(route),
786 .port = port,
787 .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
788 .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
789 : TB_CFG_ERROR_PG_HOT_PLUG,
790 };
791 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
792 unplug ? "un" : "", route, port);
793 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
794 }
795
tb_cfg_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)796 static bool tb_cfg_match(const struct tb_cfg_request *req,
797 const struct ctl_pkg *pkg)
798 {
799 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
800
801 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
802 return true;
803
804 if (pkg->frame.eof != req->response_type)
805 return false;
806 if (route != tb_cfg_get_route(req->request))
807 return false;
808 if (pkg->frame.size != req->response_size)
809 return false;
810
811 if (pkg->frame.eof == TB_CFG_PKG_READ ||
812 pkg->frame.eof == TB_CFG_PKG_WRITE) {
813 const struct cfg_read_pkg *req_hdr = req->request;
814 const struct cfg_read_pkg *res_hdr = pkg->buffer;
815
816 if (req_hdr->addr.seq != res_hdr->addr.seq)
817 return false;
818 }
819
820 return true;
821 }
822
tb_cfg_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)823 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
824 {
825 struct tb_cfg_result res;
826
827 /* Now make sure it is in expected format */
828 res = parse_header(pkg, req->response_size, req->response_type,
829 tb_cfg_get_route(req->request));
830 if (!res.err)
831 memcpy(req->response, pkg->buffer, req->response_size);
832
833 req->result = res;
834
835 /* Always complete when first response is received */
836 return true;
837 }
838
839 /**
840 * tb_cfg_reset() - send a reset packet and wait for a response
841 * @ctl: Control channel pointer
842 * @route: Router string for the router to send reset
843 *
844 * If the switch at route is incorrectly configured then we will not receive a
845 * reply (even though the switch will reset). The caller should check for
846 * -ETIMEDOUT and attempt to reconfigure the switch.
847 */
tb_cfg_reset(struct tb_ctl * ctl,u64 route)848 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
849 {
850 struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
851 struct tb_cfg_result res = { 0 };
852 struct tb_cfg_header reply;
853 struct tb_cfg_request *req;
854
855 req = tb_cfg_request_alloc();
856 if (!req) {
857 res.err = -ENOMEM;
858 return res;
859 }
860
861 req->match = tb_cfg_match;
862 req->copy = tb_cfg_copy;
863 req->request = &request;
864 req->request_size = sizeof(request);
865 req->request_type = TB_CFG_PKG_RESET;
866 req->response = &reply;
867 req->response_size = sizeof(reply);
868 req->response_type = TB_CFG_PKG_RESET;
869
870 res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
871
872 tb_cfg_request_put(req);
873
874 return res;
875 }
876
877 /**
878 * tb_cfg_read_raw() - read from config space into buffer
879 * @ctl: Pointer to the control channel
880 * @buffer: Buffer where the data is read
881 * @route: Route string of the router
882 * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
883 * @space: Config space selector
884 * @offset: Dword word offset of the register to start reading
885 * @length: Number of dwords to read
886 * @timeout_msec: Timeout in ms how long to wait for the response
887 *
888 * Reads from router config space without translating the possible error.
889 */
tb_cfg_read_raw(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)890 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
891 u64 route, u32 port, enum tb_cfg_space space,
892 u32 offset, u32 length, int timeout_msec)
893 {
894 struct tb_cfg_result res = { 0 };
895 struct cfg_read_pkg request = {
896 .header = tb_cfg_make_header(route),
897 .addr = {
898 .port = port,
899 .space = space,
900 .offset = offset,
901 .length = length,
902 },
903 };
904 struct cfg_write_pkg reply;
905 int retries = 0;
906
907 while (retries < TB_CTL_RETRIES) {
908 struct tb_cfg_request *req;
909
910 req = tb_cfg_request_alloc();
911 if (!req) {
912 res.err = -ENOMEM;
913 return res;
914 }
915
916 request.addr.seq = retries++;
917
918 req->match = tb_cfg_match;
919 req->copy = tb_cfg_copy;
920 req->request = &request;
921 req->request_size = sizeof(request);
922 req->request_type = TB_CFG_PKG_READ;
923 req->response = &reply;
924 req->response_size = 12 + 4 * length;
925 req->response_type = TB_CFG_PKG_READ;
926
927 res = tb_cfg_request_sync(ctl, req, timeout_msec);
928
929 tb_cfg_request_put(req);
930
931 if (res.err != -ETIMEDOUT)
932 break;
933
934 /* Wait a bit (arbitrary time) until we send a retry */
935 usleep_range(10, 100);
936 }
937
938 if (res.err)
939 return res;
940
941 res.response_port = reply.addr.port;
942 res.err = check_config_address(reply.addr, space, offset, length);
943 if (!res.err)
944 memcpy(buffer, &reply.data, 4 * length);
945 return res;
946 }
947
948 /**
949 * tb_cfg_write_raw() - write from buffer into config space
950 * @ctl: Pointer to the control channel
951 * @buffer: Data to write
952 * @route: Route string of the router
953 * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
954 * @space: Config space selector
955 * @offset: Dword word offset of the register to start writing
956 * @length: Number of dwords to write
957 * @timeout_msec: Timeout in ms how long to wait for the response
958 *
959 * Writes to router config space without translating the possible error.
960 */
tb_cfg_write_raw(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length,int timeout_msec)961 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
962 u64 route, u32 port, enum tb_cfg_space space,
963 u32 offset, u32 length, int timeout_msec)
964 {
965 struct tb_cfg_result res = { 0 };
966 struct cfg_write_pkg request = {
967 .header = tb_cfg_make_header(route),
968 .addr = {
969 .port = port,
970 .space = space,
971 .offset = offset,
972 .length = length,
973 },
974 };
975 struct cfg_read_pkg reply;
976 int retries = 0;
977
978 memcpy(&request.data, buffer, length * 4);
979
980 while (retries < TB_CTL_RETRIES) {
981 struct tb_cfg_request *req;
982
983 req = tb_cfg_request_alloc();
984 if (!req) {
985 res.err = -ENOMEM;
986 return res;
987 }
988
989 request.addr.seq = retries++;
990
991 req->match = tb_cfg_match;
992 req->copy = tb_cfg_copy;
993 req->request = &request;
994 req->request_size = 12 + 4 * length;
995 req->request_type = TB_CFG_PKG_WRITE;
996 req->response = &reply;
997 req->response_size = sizeof(reply);
998 req->response_type = TB_CFG_PKG_WRITE;
999
1000 res = tb_cfg_request_sync(ctl, req, timeout_msec);
1001
1002 tb_cfg_request_put(req);
1003
1004 if (res.err != -ETIMEDOUT)
1005 break;
1006
1007 /* Wait a bit (arbitrary time) until we send a retry */
1008 usleep_range(10, 100);
1009 }
1010
1011 if (res.err)
1012 return res;
1013
1014 res.response_port = reply.addr.port;
1015 res.err = check_config_address(reply.addr, space, offset, length);
1016 return res;
1017 }
1018
tb_cfg_get_error(struct tb_ctl * ctl,enum tb_cfg_space space,const struct tb_cfg_result * res)1019 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
1020 const struct tb_cfg_result *res)
1021 {
1022 /*
1023 * For unimplemented ports access to port config space may return
1024 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
1025 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
1026 * that the caller can mark the port as disabled.
1027 */
1028 if (space == TB_CFG_PORT &&
1029 res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
1030 return -ENODEV;
1031
1032 tb_cfg_print_error(ctl, res);
1033
1034 if (res->tb_error == TB_CFG_ERROR_LOCK)
1035 return -EACCES;
1036 else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1037 return -ENOTCONN;
1038
1039 return -EIO;
1040 }
1041
tb_cfg_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)1042 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1043 enum tb_cfg_space space, u32 offset, u32 length)
1044 {
1045 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1046 space, offset, length, ctl->timeout_msec);
1047 switch (res.err) {
1048 case 0:
1049 /* Success */
1050 break;
1051
1052 case 1:
1053 /* Thunderbolt error, tb_error holds the actual number */
1054 return tb_cfg_get_error(ctl, space, &res);
1055
1056 case -ETIMEDOUT:
1057 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1058 route, space, offset);
1059 break;
1060
1061 default:
1062 WARN(1, "tb_cfg_read: %d\n", res.err);
1063 break;
1064 }
1065 return res.err;
1066 }
1067
tb_cfg_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,enum tb_cfg_space space,u32 offset,u32 length)1068 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
1069 enum tb_cfg_space space, u32 offset, u32 length)
1070 {
1071 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1072 space, offset, length, ctl->timeout_msec);
1073 switch (res.err) {
1074 case 0:
1075 /* Success */
1076 break;
1077
1078 case 1:
1079 /* Thunderbolt error, tb_error holds the actual number */
1080 return tb_cfg_get_error(ctl, space, &res);
1081
1082 case -ETIMEDOUT:
1083 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1084 route, space, offset);
1085 break;
1086
1087 default:
1088 WARN(1, "tb_cfg_write: %d\n", res.err);
1089 break;
1090 }
1091 return res.err;
1092 }
1093
1094 /**
1095 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1096 * @ctl: Pointer to the control channel
1097 * @route: Route string of the router
1098 *
1099 * Reads the first dword from the switches TB_CFG_SWITCH config area and
1100 * returns the port number from which the reply originated.
1101 *
1102 * Return: Returns the upstream port number on success or an error code on
1103 * failure.
1104 */
tb_cfg_get_upstream_port(struct tb_ctl * ctl,u64 route)1105 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1106 {
1107 u32 dummy;
1108 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1109 TB_CFG_SWITCH, 0, 1,
1110 ctl->timeout_msec);
1111 if (res.err == 1)
1112 return -EIO;
1113 if (res.err)
1114 return res.err;
1115 return res.response_port;
1116 }
1117