1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
12
13 #include "sb_regs.h"
14 #include "tb.h"
15
16 #define USB4_DATA_RETRIES 3
17
18 enum usb4_sb_target {
19 USB4_SB_TARGET_ROUTER,
20 USB4_SB_TARGET_PARTNER,
21 USB4_SB_TARGET_RETIMER,
22 };
23
24 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
25 #define USB4_NVM_READ_OFFSET_SHIFT 2
26 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
27 #define USB4_NVM_READ_LENGTH_SHIFT 24
28
29 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
30 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
31
32 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
33 #define USB4_DROM_ADDRESS_SHIFT 2
34 #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
35 #define USB4_DROM_SIZE_SHIFT 15
36
37 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
38
39 #define USB4_BA_LENGTH_MASK GENMASK(7, 0)
40 #define USB4_BA_INDEX_MASK GENMASK(15, 0)
41
42 enum usb4_ba_index {
43 USB4_BA_MAX_USB3 = 0x1,
44 USB4_BA_MIN_DP_AUX = 0x2,
45 USB4_BA_MIN_DP_MAIN = 0x3,
46 USB4_BA_MAX_PCIE = 0x4,
47 USB4_BA_MAX_HI = 0x5,
48 };
49
50 #define USB4_BA_VALUE_MASK GENMASK(31, 16)
51 #define USB4_BA_VALUE_SHIFT 16
52
usb4_native_switch_op(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status,const void * tx_data,size_t tx_dwords,void * rx_data,size_t rx_dwords)53 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
54 u32 *metadata, u8 *status,
55 const void *tx_data, size_t tx_dwords,
56 void *rx_data, size_t rx_dwords)
57 {
58 u32 val;
59 int ret;
60
61 if (metadata) {
62 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
63 if (ret)
64 return ret;
65 }
66 if (tx_dwords) {
67 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
68 tx_dwords);
69 if (ret)
70 return ret;
71 }
72
73 val = opcode | ROUTER_CS_26_OV;
74 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
75 if (ret)
76 return ret;
77
78 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
79 if (ret)
80 return ret;
81
82 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
83 if (ret)
84 return ret;
85
86 if (val & ROUTER_CS_26_ONS)
87 return -EOPNOTSUPP;
88
89 if (status)
90 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
91 ROUTER_CS_26_STATUS_SHIFT;
92
93 if (metadata) {
94 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
95 if (ret)
96 return ret;
97 }
98 if (rx_dwords) {
99 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
100 rx_dwords);
101 if (ret)
102 return ret;
103 }
104
105 return 0;
106 }
107
__usb4_switch_op(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status,const void * tx_data,size_t tx_dwords,void * rx_data,size_t rx_dwords)108 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
109 u8 *status, const void *tx_data, size_t tx_dwords,
110 void *rx_data, size_t rx_dwords)
111 {
112 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
113
114 if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
115 return -EINVAL;
116
117 /*
118 * If the connection manager implementation provides USB4 router
119 * operation proxy callback, call it here instead of running the
120 * operation natively.
121 */
122 if (cm_ops->usb4_switch_op) {
123 int ret;
124
125 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
126 tx_data, tx_dwords, rx_data,
127 rx_dwords);
128 if (ret != -EOPNOTSUPP)
129 return ret;
130
131 /*
132 * If the proxy was not supported then run the native
133 * router operation instead.
134 */
135 }
136
137 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
138 tx_dwords, rx_data, rx_dwords);
139 }
140
usb4_switch_op(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status)141 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
142 u32 *metadata, u8 *status)
143 {
144 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
145 }
146
usb4_switch_op_data(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status,const void * tx_data,size_t tx_dwords,void * rx_data,size_t rx_dwords)147 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
148 u32 *metadata, u8 *status,
149 const void *tx_data, size_t tx_dwords,
150 void *rx_data, size_t rx_dwords)
151 {
152 return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
153 tx_dwords, rx_data, rx_dwords);
154 }
155
usb4_switch_check_wakes(struct tb_switch * sw)156 static void usb4_switch_check_wakes(struct tb_switch *sw)
157 {
158 bool wakeup_usb4 = false;
159 struct usb4_port *usb4;
160 struct tb_port *port;
161 bool wakeup = false;
162 u32 val;
163
164 if (!device_may_wakeup(&sw->dev))
165 return;
166
167 if (tb_route(sw)) {
168 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
169 return;
170
171 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
172 (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
173 (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
174
175 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
176 }
177
178 /*
179 * Check for any downstream ports for USB4 wake,
180 * connection wake and disconnection wake.
181 */
182 tb_switch_for_each_port(sw, port) {
183 if (!port->cap_usb4)
184 continue;
185
186 if (tb_port_read(port, &val, TB_CFG_PORT,
187 port->cap_usb4 + PORT_CS_18, 1))
188 break;
189
190 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
191 (val & PORT_CS_18_WOU4S) ? "yes" : "no",
192 (val & PORT_CS_18_WOCS) ? "yes" : "no",
193 (val & PORT_CS_18_WODS) ? "yes" : "no");
194
195 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
196 PORT_CS_18_WODS);
197
198 usb4 = port->usb4;
199 if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
200 pm_wakeup_event(&usb4->dev, 0);
201
202 wakeup |= wakeup_usb4;
203 }
204
205 if (wakeup)
206 pm_wakeup_event(&sw->dev, 0);
207 }
208
link_is_usb4(struct tb_port * port)209 static bool link_is_usb4(struct tb_port *port)
210 {
211 u32 val;
212
213 if (!port->cap_usb4)
214 return false;
215
216 if (tb_port_read(port, &val, TB_CFG_PORT,
217 port->cap_usb4 + PORT_CS_18, 1))
218 return false;
219
220 return !(val & PORT_CS_18_TCM);
221 }
222
223 /**
224 * usb4_switch_setup() - Additional setup for USB4 device
225 * @sw: USB4 router to setup
226 *
227 * USB4 routers need additional settings in order to enable all the
228 * tunneling. This function enables USB and PCIe tunneling if it can be
229 * enabled (e.g the parent switch also supports them). If USB tunneling
230 * is not available for some reason (like that there is Thunderbolt 3
231 * switch upstream) then the internal xHCI controller is enabled
232 * instead.
233 */
usb4_switch_setup(struct tb_switch * sw)234 int usb4_switch_setup(struct tb_switch *sw)
235 {
236 struct tb_port *downstream_port;
237 struct tb_switch *parent;
238 bool tbt3, xhci;
239 u32 val = 0;
240 int ret;
241
242 usb4_switch_check_wakes(sw);
243
244 if (!tb_route(sw))
245 return 0;
246
247 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
248 if (ret)
249 return ret;
250
251 parent = tb_switch_parent(sw);
252 downstream_port = tb_port_at(tb_route(sw), parent);
253 sw->link_usb4 = link_is_usb4(downstream_port);
254 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
255
256 xhci = val & ROUTER_CS_6_HCI;
257 tbt3 = !(val & ROUTER_CS_6_TNS);
258
259 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
260 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
261
262 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
263 if (ret)
264 return ret;
265
266 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
267 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
268 val |= ROUTER_CS_5_UTO;
269 xhci = false;
270 }
271
272 /*
273 * Only enable PCIe tunneling if the parent router supports it
274 * and it is not disabled.
275 */
276 if (tb_acpi_may_tunnel_pcie() &&
277 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
278 val |= ROUTER_CS_5_PTO;
279 /*
280 * xHCI can be enabled if PCIe tunneling is supported
281 * and the parent does not have any USB3 dowstream
282 * adapters (so we cannot do USB 3.x tunneling).
283 */
284 if (xhci)
285 val |= ROUTER_CS_5_HCO;
286 }
287
288 /* TBT3 supported by the CM */
289 val |= ROUTER_CS_5_C3S;
290 /* Tunneling configuration is ready now */
291 val |= ROUTER_CS_5_CV;
292
293 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
294 if (ret)
295 return ret;
296
297 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
298 ROUTER_CS_6_CR, 50);
299 }
300
301 /**
302 * usb4_switch_read_uid() - Read UID from USB4 router
303 * @sw: USB4 router
304 * @uid: UID is stored here
305 *
306 * Reads 64-bit UID from USB4 router config space.
307 */
usb4_switch_read_uid(struct tb_switch * sw,u64 * uid)308 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
309 {
310 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
311 }
312
usb4_switch_drom_read_block(void * data,unsigned int dwaddress,void * buf,size_t dwords)313 static int usb4_switch_drom_read_block(void *data,
314 unsigned int dwaddress, void *buf,
315 size_t dwords)
316 {
317 struct tb_switch *sw = data;
318 u8 status = 0;
319 u32 metadata;
320 int ret;
321
322 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
323 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
324 USB4_DROM_ADDRESS_MASK;
325
326 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
327 &status, NULL, 0, buf, dwords);
328 if (ret)
329 return ret;
330
331 return status ? -EIO : 0;
332 }
333
334 /**
335 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
336 * @sw: USB4 router
337 * @address: Byte address inside DROM to start reading
338 * @buf: Buffer where the DROM content is stored
339 * @size: Number of bytes to read from DROM
340 *
341 * Uses USB4 router operations to read router DROM. For devices this
342 * should always work but for hosts it may return %-EOPNOTSUPP in which
343 * case the host router does not have DROM.
344 */
usb4_switch_drom_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)345 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
346 size_t size)
347 {
348 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
349 usb4_switch_drom_read_block, sw);
350 }
351
352 /**
353 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
354 * @sw: USB4 router
355 *
356 * Checks whether conditions are met so that lane bonding can be
357 * established with the upstream router. Call only for device routers.
358 */
usb4_switch_lane_bonding_possible(struct tb_switch * sw)359 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
360 {
361 struct tb_port *up;
362 int ret;
363 u32 val;
364
365 up = tb_upstream_port(sw);
366 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
367 if (ret)
368 return false;
369
370 return !!(val & PORT_CS_18_BE);
371 }
372
373 /**
374 * usb4_switch_set_wake() - Enabled/disable wake
375 * @sw: USB4 router
376 * @flags: Wakeup flags (%0 to disable)
377 *
378 * Enables/disables router to wake up from sleep.
379 */
usb4_switch_set_wake(struct tb_switch * sw,unsigned int flags)380 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
381 {
382 struct usb4_port *usb4;
383 struct tb_port *port;
384 u64 route = tb_route(sw);
385 u32 val;
386 int ret;
387
388 /*
389 * Enable wakes coming from all USB4 downstream ports (from
390 * child routers). For device routers do this also for the
391 * upstream USB4 port.
392 */
393 tb_switch_for_each_port(sw, port) {
394 if (!tb_port_is_null(port))
395 continue;
396 if (!route && tb_is_upstream_port(port))
397 continue;
398 if (!port->cap_usb4)
399 continue;
400
401 ret = tb_port_read(port, &val, TB_CFG_PORT,
402 port->cap_usb4 + PORT_CS_19, 1);
403 if (ret)
404 return ret;
405
406 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
407
408 if (tb_is_upstream_port(port)) {
409 val |= PORT_CS_19_WOU4;
410 } else {
411 bool configured = val & PORT_CS_19_PC;
412 usb4 = port->usb4;
413
414 if (((flags & TB_WAKE_ON_CONNECT) |
415 device_may_wakeup(&usb4->dev)) && !configured)
416 val |= PORT_CS_19_WOC;
417 if (((flags & TB_WAKE_ON_DISCONNECT) |
418 device_may_wakeup(&usb4->dev)) && configured)
419 val |= PORT_CS_19_WOD;
420 if ((flags & TB_WAKE_ON_USB4) && configured)
421 val |= PORT_CS_19_WOU4;
422 }
423
424 ret = tb_port_write(port, &val, TB_CFG_PORT,
425 port->cap_usb4 + PORT_CS_19, 1);
426 if (ret)
427 return ret;
428 }
429
430 /*
431 * Enable wakes from PCIe, USB 3.x and DP on this router. Only
432 * needed for device routers.
433 */
434 if (route) {
435 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
436 if (ret)
437 return ret;
438
439 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
440 if (flags & TB_WAKE_ON_USB3)
441 val |= ROUTER_CS_5_WOU;
442 if (flags & TB_WAKE_ON_PCIE)
443 val |= ROUTER_CS_5_WOP;
444 if (flags & TB_WAKE_ON_DP)
445 val |= ROUTER_CS_5_WOD;
446
447 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
448 if (ret)
449 return ret;
450 }
451
452 return 0;
453 }
454
455 /**
456 * usb4_switch_set_sleep() - Prepare the router to enter sleep
457 * @sw: USB4 router
458 *
459 * Sets sleep bit for the router. Returns when the router sleep ready
460 * bit has been asserted.
461 */
usb4_switch_set_sleep(struct tb_switch * sw)462 int usb4_switch_set_sleep(struct tb_switch *sw)
463 {
464 int ret;
465 u32 val;
466
467 /* Set sleep bit and wait for sleep ready to be asserted */
468 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
469 if (ret)
470 return ret;
471
472 val |= ROUTER_CS_5_SLP;
473
474 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
475 if (ret)
476 return ret;
477
478 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
479 ROUTER_CS_6_SLPR, 500);
480 }
481
482 /**
483 * usb4_switch_nvm_sector_size() - Return router NVM sector size
484 * @sw: USB4 router
485 *
486 * If the router supports NVM operations this function returns the NVM
487 * sector size in bytes. If NVM operations are not supported returns
488 * %-EOPNOTSUPP.
489 */
usb4_switch_nvm_sector_size(struct tb_switch * sw)490 int usb4_switch_nvm_sector_size(struct tb_switch *sw)
491 {
492 u32 metadata;
493 u8 status;
494 int ret;
495
496 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
497 &status);
498 if (ret)
499 return ret;
500
501 if (status)
502 return status == 0x2 ? -EOPNOTSUPP : -EIO;
503
504 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
505 }
506
usb4_switch_nvm_read_block(void * data,unsigned int dwaddress,void * buf,size_t dwords)507 static int usb4_switch_nvm_read_block(void *data,
508 unsigned int dwaddress, void *buf, size_t dwords)
509 {
510 struct tb_switch *sw = data;
511 u8 status = 0;
512 u32 metadata;
513 int ret;
514
515 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
516 USB4_NVM_READ_LENGTH_MASK;
517 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
518 USB4_NVM_READ_OFFSET_MASK;
519
520 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
521 &status, NULL, 0, buf, dwords);
522 if (ret)
523 return ret;
524
525 return status ? -EIO : 0;
526 }
527
528 /**
529 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
530 * @sw: USB4 router
531 * @address: Starting address in bytes
532 * @buf: Read data is placed here
533 * @size: How many bytes to read
534 *
535 * Reads NVM contents of the router. If NVM is not supported returns
536 * %-EOPNOTSUPP.
537 */
usb4_switch_nvm_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)538 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
539 size_t size)
540 {
541 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
542 usb4_switch_nvm_read_block, sw);
543 }
544
545 /**
546 * usb4_switch_nvm_set_offset() - Set NVM write offset
547 * @sw: USB4 router
548 * @address: Start offset
549 *
550 * Explicitly sets NVM write offset. Normally when writing to NVM this
551 * is done automatically by usb4_switch_nvm_write().
552 *
553 * Returns %0 in success and negative errno if there was a failure.
554 */
usb4_switch_nvm_set_offset(struct tb_switch * sw,unsigned int address)555 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
556 {
557 u32 metadata, dwaddress;
558 u8 status = 0;
559 int ret;
560
561 dwaddress = address / 4;
562 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
563 USB4_NVM_SET_OFFSET_MASK;
564
565 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
566 &status);
567 if (ret)
568 return ret;
569
570 return status ? -EIO : 0;
571 }
572
usb4_switch_nvm_write_next_block(void * data,unsigned int dwaddress,const void * buf,size_t dwords)573 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
574 const void *buf, size_t dwords)
575 {
576 struct tb_switch *sw = data;
577 u8 status;
578 int ret;
579
580 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
581 buf, dwords, NULL, 0);
582 if (ret)
583 return ret;
584
585 return status ? -EIO : 0;
586 }
587
588 /**
589 * usb4_switch_nvm_write() - Write to the router NVM
590 * @sw: USB4 router
591 * @address: Start address where to write in bytes
592 * @buf: Pointer to the data to write
593 * @size: Size of @buf in bytes
594 *
595 * Writes @buf to the router NVM using USB4 router operations. If NVM
596 * write is not supported returns %-EOPNOTSUPP.
597 */
usb4_switch_nvm_write(struct tb_switch * sw,unsigned int address,const void * buf,size_t size)598 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
599 const void *buf, size_t size)
600 {
601 int ret;
602
603 ret = usb4_switch_nvm_set_offset(sw, address);
604 if (ret)
605 return ret;
606
607 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
608 usb4_switch_nvm_write_next_block, sw);
609 }
610
611 /**
612 * usb4_switch_nvm_authenticate() - Authenticate new NVM
613 * @sw: USB4 router
614 *
615 * After the new NVM has been written via usb4_switch_nvm_write(), this
616 * function triggers NVM authentication process. The router gets power
617 * cycled and if the authentication is successful the new NVM starts
618 * running. In case of failure returns negative errno.
619 *
620 * The caller should call usb4_switch_nvm_authenticate_status() to read
621 * the status of the authentication after power cycle. It should be the
622 * first router operation to avoid the status being lost.
623 */
usb4_switch_nvm_authenticate(struct tb_switch * sw)624 int usb4_switch_nvm_authenticate(struct tb_switch *sw)
625 {
626 int ret;
627
628 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
629 switch (ret) {
630 /*
631 * The router is power cycled once NVM_AUTH is started so it is
632 * expected to get any of the following errors back.
633 */
634 case -EACCES:
635 case -ENOTCONN:
636 case -ETIMEDOUT:
637 return 0;
638
639 default:
640 return ret;
641 }
642 }
643
644 /**
645 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
646 * @sw: USB4 router
647 * @status: Status code of the operation
648 *
649 * The function checks if there is status available from the last NVM
650 * authenticate router operation. If there is status then %0 is returned
651 * and the status code is placed in @status. Returns negative errno in case
652 * of failure.
653 *
654 * Must be called before any other router operation.
655 */
usb4_switch_nvm_authenticate_status(struct tb_switch * sw,u32 * status)656 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
657 {
658 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
659 u16 opcode;
660 u32 val;
661 int ret;
662
663 if (cm_ops->usb4_switch_nvm_authenticate_status) {
664 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
665 if (ret != -EOPNOTSUPP)
666 return ret;
667 }
668
669 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
670 if (ret)
671 return ret;
672
673 /* Check that the opcode is correct */
674 opcode = val & ROUTER_CS_26_OPCODE_MASK;
675 if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
676 if (val & ROUTER_CS_26_OV)
677 return -EBUSY;
678 if (val & ROUTER_CS_26_ONS)
679 return -EOPNOTSUPP;
680
681 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
682 ROUTER_CS_26_STATUS_SHIFT;
683 } else {
684 *status = 0;
685 }
686
687 return 0;
688 }
689
690 /**
691 * usb4_switch_credits_init() - Read buffer allocation parameters
692 * @sw: USB4 router
693 *
694 * Reads @sw buffer allocation parameters and initializes @sw buffer
695 * allocation fields accordingly. Specifically @sw->credits_allocation
696 * is set to %true if these parameters can be used in tunneling.
697 *
698 * Returns %0 on success and negative errno otherwise.
699 */
usb4_switch_credits_init(struct tb_switch * sw)700 int usb4_switch_credits_init(struct tb_switch *sw)
701 {
702 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
703 int ret, length, i, nports;
704 const struct tb_port *port;
705 u32 data[NVM_DATA_DWORDS];
706 u32 metadata = 0;
707 u8 status = 0;
708
709 memset(data, 0, sizeof(data));
710 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
711 &status, NULL, 0, data, ARRAY_SIZE(data));
712 if (ret)
713 return ret;
714 if (status)
715 return -EIO;
716
717 length = metadata & USB4_BA_LENGTH_MASK;
718 if (WARN_ON(length > ARRAY_SIZE(data)))
719 return -EMSGSIZE;
720
721 max_usb3 = -1;
722 min_dp_aux = -1;
723 min_dp_main = -1;
724 max_pcie = -1;
725 max_dma = -1;
726
727 tb_sw_dbg(sw, "credit allocation parameters:\n");
728
729 for (i = 0; i < length; i++) {
730 u16 index, value;
731
732 index = data[i] & USB4_BA_INDEX_MASK;
733 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
734
735 switch (index) {
736 case USB4_BA_MAX_USB3:
737 tb_sw_dbg(sw, " USB3: %u\n", value);
738 max_usb3 = value;
739 break;
740 case USB4_BA_MIN_DP_AUX:
741 tb_sw_dbg(sw, " DP AUX: %u\n", value);
742 min_dp_aux = value;
743 break;
744 case USB4_BA_MIN_DP_MAIN:
745 tb_sw_dbg(sw, " DP main: %u\n", value);
746 min_dp_main = value;
747 break;
748 case USB4_BA_MAX_PCIE:
749 tb_sw_dbg(sw, " PCIe: %u\n", value);
750 max_pcie = value;
751 break;
752 case USB4_BA_MAX_HI:
753 tb_sw_dbg(sw, " DMA: %u\n", value);
754 max_dma = value;
755 break;
756 default:
757 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
758 index);
759 break;
760 }
761 }
762
763 /*
764 * Validate the buffer allocation preferences. If we find
765 * issues, log a warning and fall back using the hard-coded
766 * values.
767 */
768
769 /* Host router must report baMaxHI */
770 if (!tb_route(sw) && max_dma < 0) {
771 tb_sw_warn(sw, "host router is missing baMaxHI\n");
772 goto err_invalid;
773 }
774
775 nports = 0;
776 tb_switch_for_each_port(sw, port) {
777 if (tb_port_is_null(port))
778 nports++;
779 }
780
781 /* Must have DP buffer allocation (multiple USB4 ports) */
782 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
783 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
784 goto err_invalid;
785 }
786
787 tb_switch_for_each_port(sw, port) {
788 if (tb_port_is_dpout(port) && min_dp_main < 0) {
789 tb_sw_warn(sw, "missing baMinDPmain");
790 goto err_invalid;
791 }
792 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
793 min_dp_aux < 0) {
794 tb_sw_warn(sw, "missing baMinDPaux");
795 goto err_invalid;
796 }
797 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
798 max_usb3 < 0) {
799 tb_sw_warn(sw, "missing baMaxUSB3");
800 goto err_invalid;
801 }
802 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
803 max_pcie < 0) {
804 tb_sw_warn(sw, "missing baMaxPCIe");
805 goto err_invalid;
806 }
807 }
808
809 /*
810 * Buffer allocation passed the validation so we can use it in
811 * path creation.
812 */
813 sw->credit_allocation = true;
814 if (max_usb3 > 0)
815 sw->max_usb3_credits = max_usb3;
816 if (min_dp_aux > 0)
817 sw->min_dp_aux_credits = min_dp_aux;
818 if (min_dp_main > 0)
819 sw->min_dp_main_credits = min_dp_main;
820 if (max_pcie > 0)
821 sw->max_pcie_credits = max_pcie;
822 if (max_dma > 0)
823 sw->max_dma_credits = max_dma;
824
825 return 0;
826
827 err_invalid:
828 return -EINVAL;
829 }
830
831 /**
832 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
833 * @sw: USB4 router
834 * @in: DP IN adapter
835 *
836 * For DP tunneling this function can be used to query availability of
837 * DP IN resource. Returns true if the resource is available for DP
838 * tunneling, false otherwise.
839 */
usb4_switch_query_dp_resource(struct tb_switch * sw,struct tb_port * in)840 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
841 {
842 u32 metadata = in->port;
843 u8 status;
844 int ret;
845
846 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
847 &status);
848 /*
849 * If DP resource allocation is not supported assume it is
850 * always available.
851 */
852 if (ret == -EOPNOTSUPP)
853 return true;
854 else if (ret)
855 return false;
856
857 return !status;
858 }
859
860 /**
861 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
862 * @sw: USB4 router
863 * @in: DP IN adapter
864 *
865 * Allocates DP IN resource for DP tunneling using USB4 router
866 * operations. If the resource was allocated returns %0. Otherwise
867 * returns negative errno, in particular %-EBUSY if the resource is
868 * already allocated.
869 */
usb4_switch_alloc_dp_resource(struct tb_switch * sw,struct tb_port * in)870 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
871 {
872 u32 metadata = in->port;
873 u8 status;
874 int ret;
875
876 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
877 &status);
878 if (ret == -EOPNOTSUPP)
879 return 0;
880 else if (ret)
881 return ret;
882
883 return status ? -EBUSY : 0;
884 }
885
886 /**
887 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
888 * @sw: USB4 router
889 * @in: DP IN adapter
890 *
891 * Releases the previously allocated DP IN resource.
892 */
usb4_switch_dealloc_dp_resource(struct tb_switch * sw,struct tb_port * in)893 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
894 {
895 u32 metadata = in->port;
896 u8 status;
897 int ret;
898
899 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
900 &status);
901 if (ret == -EOPNOTSUPP)
902 return 0;
903 else if (ret)
904 return ret;
905
906 return status ? -EIO : 0;
907 }
908
usb4_port_idx(const struct tb_switch * sw,const struct tb_port * port)909 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
910 {
911 struct tb_port *p;
912 int usb4_idx = 0;
913
914 /* Assume port is primary */
915 tb_switch_for_each_port(sw, p) {
916 if (!tb_port_is_null(p))
917 continue;
918 if (tb_is_upstream_port(p))
919 continue;
920 if (!p->link_nr) {
921 if (p == port)
922 break;
923 usb4_idx++;
924 }
925 }
926
927 return usb4_idx;
928 }
929
930 /**
931 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
932 * @sw: USB4 router
933 * @port: USB4 port
934 *
935 * USB4 routers have direct mapping between USB4 ports and PCIe
936 * downstream adapters where the PCIe topology is extended. This
937 * function returns the corresponding downstream PCIe adapter or %NULL
938 * if no such mapping was possible.
939 */
usb4_switch_map_pcie_down(struct tb_switch * sw,const struct tb_port * port)940 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
941 const struct tb_port *port)
942 {
943 int usb4_idx = usb4_port_idx(sw, port);
944 struct tb_port *p;
945 int pcie_idx = 0;
946
947 /* Find PCIe down port matching usb4_port */
948 tb_switch_for_each_port(sw, p) {
949 if (!tb_port_is_pcie_down(p))
950 continue;
951
952 if (pcie_idx == usb4_idx)
953 return p;
954
955 pcie_idx++;
956 }
957
958 return NULL;
959 }
960
961 /**
962 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
963 * @sw: USB4 router
964 * @port: USB4 port
965 *
966 * USB4 routers have direct mapping between USB4 ports and USB 3.x
967 * downstream adapters where the USB 3.x topology is extended. This
968 * function returns the corresponding downstream USB 3.x adapter or
969 * %NULL if no such mapping was possible.
970 */
usb4_switch_map_usb3_down(struct tb_switch * sw,const struct tb_port * port)971 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
972 const struct tb_port *port)
973 {
974 int usb4_idx = usb4_port_idx(sw, port);
975 struct tb_port *p;
976 int usb_idx = 0;
977
978 /* Find USB3 down port matching usb4_port */
979 tb_switch_for_each_port(sw, p) {
980 if (!tb_port_is_usb3_down(p))
981 continue;
982
983 if (usb_idx == usb4_idx)
984 return p;
985
986 usb_idx++;
987 }
988
989 return NULL;
990 }
991
992 /**
993 * usb4_switch_add_ports() - Add USB4 ports for this router
994 * @sw: USB4 router
995 *
996 * For USB4 router finds all USB4 ports and registers devices for each.
997 * Can be called to any router.
998 *
999 * Return %0 in case of success and negative errno in case of failure.
1000 */
usb4_switch_add_ports(struct tb_switch * sw)1001 int usb4_switch_add_ports(struct tb_switch *sw)
1002 {
1003 struct tb_port *port;
1004
1005 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
1006 return 0;
1007
1008 tb_switch_for_each_port(sw, port) {
1009 struct usb4_port *usb4;
1010
1011 if (!tb_port_is_null(port))
1012 continue;
1013 if (!port->cap_usb4)
1014 continue;
1015
1016 usb4 = usb4_port_device_add(port);
1017 if (IS_ERR(usb4)) {
1018 usb4_switch_remove_ports(sw);
1019 return PTR_ERR(usb4);
1020 }
1021
1022 port->usb4 = usb4;
1023 }
1024
1025 return 0;
1026 }
1027
1028 /**
1029 * usb4_switch_remove_ports() - Removes USB4 ports from this router
1030 * @sw: USB4 router
1031 *
1032 * Unregisters previously registered USB4 ports.
1033 */
usb4_switch_remove_ports(struct tb_switch * sw)1034 void usb4_switch_remove_ports(struct tb_switch *sw)
1035 {
1036 struct tb_port *port;
1037
1038 tb_switch_for_each_port(sw, port) {
1039 if (port->usb4) {
1040 usb4_port_device_remove(port->usb4);
1041 port->usb4 = NULL;
1042 }
1043 }
1044 }
1045
1046 /**
1047 * usb4_port_unlock() - Unlock USB4 downstream port
1048 * @port: USB4 port to unlock
1049 *
1050 * Unlocks USB4 downstream port so that the connection manager can
1051 * access the router below this port.
1052 */
usb4_port_unlock(struct tb_port * port)1053 int usb4_port_unlock(struct tb_port *port)
1054 {
1055 int ret;
1056 u32 val;
1057
1058 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1059 if (ret)
1060 return ret;
1061
1062 val &= ~ADP_CS_4_LCK;
1063 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1064 }
1065
1066 /**
1067 * usb4_port_hotplug_enable() - Enables hotplug for a port
1068 * @port: USB4 port to operate on
1069 *
1070 * Enables hot plug events on a given port. This is only intended
1071 * to be used on lane, DP-IN, and DP-OUT adapters.
1072 */
usb4_port_hotplug_enable(struct tb_port * port)1073 int usb4_port_hotplug_enable(struct tb_port *port)
1074 {
1075 int ret;
1076 u32 val;
1077
1078 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1079 if (ret)
1080 return ret;
1081
1082 val &= ~ADP_CS_5_DHP;
1083 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1084 }
1085
usb4_port_set_configured(struct tb_port * port,bool configured)1086 static int usb4_port_set_configured(struct tb_port *port, bool configured)
1087 {
1088 int ret;
1089 u32 val;
1090
1091 if (!port->cap_usb4)
1092 return -EINVAL;
1093
1094 ret = tb_port_read(port, &val, TB_CFG_PORT,
1095 port->cap_usb4 + PORT_CS_19, 1);
1096 if (ret)
1097 return ret;
1098
1099 if (configured)
1100 val |= PORT_CS_19_PC;
1101 else
1102 val &= ~PORT_CS_19_PC;
1103
1104 return tb_port_write(port, &val, TB_CFG_PORT,
1105 port->cap_usb4 + PORT_CS_19, 1);
1106 }
1107
1108 /**
1109 * usb4_port_configure() - Set USB4 port configured
1110 * @port: USB4 router
1111 *
1112 * Sets the USB4 link to be configured for power management purposes.
1113 */
usb4_port_configure(struct tb_port * port)1114 int usb4_port_configure(struct tb_port *port)
1115 {
1116 return usb4_port_set_configured(port, true);
1117 }
1118
1119 /**
1120 * usb4_port_unconfigure() - Set USB4 port unconfigured
1121 * @port: USB4 router
1122 *
1123 * Sets the USB4 link to be unconfigured for power management purposes.
1124 */
usb4_port_unconfigure(struct tb_port * port)1125 void usb4_port_unconfigure(struct tb_port *port)
1126 {
1127 usb4_port_set_configured(port, false);
1128 }
1129
usb4_set_xdomain_configured(struct tb_port * port,bool configured)1130 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
1131 {
1132 int ret;
1133 u32 val;
1134
1135 if (!port->cap_usb4)
1136 return -EINVAL;
1137
1138 ret = tb_port_read(port, &val, TB_CFG_PORT,
1139 port->cap_usb4 + PORT_CS_19, 1);
1140 if (ret)
1141 return ret;
1142
1143 if (configured)
1144 val |= PORT_CS_19_PID;
1145 else
1146 val &= ~PORT_CS_19_PID;
1147
1148 return tb_port_write(port, &val, TB_CFG_PORT,
1149 port->cap_usb4 + PORT_CS_19, 1);
1150 }
1151
1152 /**
1153 * usb4_port_configure_xdomain() - Configure port for XDomain
1154 * @port: USB4 port connected to another host
1155 * @xd: XDomain that is connected to the port
1156 *
1157 * Marks the USB4 port as being connected to another host and updates
1158 * the link type. Returns %0 in success and negative errno in failure.
1159 */
usb4_port_configure_xdomain(struct tb_port * port,struct tb_xdomain * xd)1160 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
1161 {
1162 xd->link_usb4 = link_is_usb4(port);
1163 return usb4_set_xdomain_configured(port, true);
1164 }
1165
1166 /**
1167 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
1168 * @port: USB4 port that was connected to another host
1169 *
1170 * Clears USB4 port from being marked as XDomain.
1171 */
usb4_port_unconfigure_xdomain(struct tb_port * port)1172 void usb4_port_unconfigure_xdomain(struct tb_port *port)
1173 {
1174 usb4_set_xdomain_configured(port, false);
1175 }
1176
usb4_port_wait_for_bit(struct tb_port * port,u32 offset,u32 bit,u32 value,int timeout_msec)1177 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
1178 u32 value, int timeout_msec)
1179 {
1180 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1181
1182 do {
1183 u32 val;
1184 int ret;
1185
1186 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
1187 if (ret)
1188 return ret;
1189
1190 if ((val & bit) == value)
1191 return 0;
1192
1193 usleep_range(50, 100);
1194 } while (ktime_before(ktime_get(), timeout));
1195
1196 return -ETIMEDOUT;
1197 }
1198
usb4_port_read_data(struct tb_port * port,void * data,size_t dwords)1199 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
1200 {
1201 if (dwords > NVM_DATA_DWORDS)
1202 return -EINVAL;
1203
1204 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1205 dwords);
1206 }
1207
usb4_port_write_data(struct tb_port * port,const void * data,size_t dwords)1208 static int usb4_port_write_data(struct tb_port *port, const void *data,
1209 size_t dwords)
1210 {
1211 if (dwords > NVM_DATA_DWORDS)
1212 return -EINVAL;
1213
1214 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1215 dwords);
1216 }
1217
usb4_port_sb_read(struct tb_port * port,enum usb4_sb_target target,u8 index,u8 reg,void * buf,u8 size)1218 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
1219 u8 index, u8 reg, void *buf, u8 size)
1220 {
1221 size_t dwords = DIV_ROUND_UP(size, 4);
1222 int ret;
1223 u32 val;
1224
1225 if (!port->cap_usb4)
1226 return -EINVAL;
1227
1228 val = reg;
1229 val |= size << PORT_CS_1_LENGTH_SHIFT;
1230 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1231 if (target == USB4_SB_TARGET_RETIMER)
1232 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1233 val |= PORT_CS_1_PND;
1234
1235 ret = tb_port_write(port, &val, TB_CFG_PORT,
1236 port->cap_usb4 + PORT_CS_1, 1);
1237 if (ret)
1238 return ret;
1239
1240 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1241 PORT_CS_1_PND, 0, 500);
1242 if (ret)
1243 return ret;
1244
1245 ret = tb_port_read(port, &val, TB_CFG_PORT,
1246 port->cap_usb4 + PORT_CS_1, 1);
1247 if (ret)
1248 return ret;
1249
1250 if (val & PORT_CS_1_NR)
1251 return -ENODEV;
1252 if (val & PORT_CS_1_RC)
1253 return -EIO;
1254
1255 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1256 }
1257
usb4_port_sb_write(struct tb_port * port,enum usb4_sb_target target,u8 index,u8 reg,const void * buf,u8 size)1258 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1259 u8 index, u8 reg, const void *buf, u8 size)
1260 {
1261 size_t dwords = DIV_ROUND_UP(size, 4);
1262 int ret;
1263 u32 val;
1264
1265 if (!port->cap_usb4)
1266 return -EINVAL;
1267
1268 if (buf) {
1269 ret = usb4_port_write_data(port, buf, dwords);
1270 if (ret)
1271 return ret;
1272 }
1273
1274 val = reg;
1275 val |= size << PORT_CS_1_LENGTH_SHIFT;
1276 val |= PORT_CS_1_WNR_WRITE;
1277 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1278 if (target == USB4_SB_TARGET_RETIMER)
1279 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1280 val |= PORT_CS_1_PND;
1281
1282 ret = tb_port_write(port, &val, TB_CFG_PORT,
1283 port->cap_usb4 + PORT_CS_1, 1);
1284 if (ret)
1285 return ret;
1286
1287 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1288 PORT_CS_1_PND, 0, 500);
1289 if (ret)
1290 return ret;
1291
1292 ret = tb_port_read(port, &val, TB_CFG_PORT,
1293 port->cap_usb4 + PORT_CS_1, 1);
1294 if (ret)
1295 return ret;
1296
1297 if (val & PORT_CS_1_NR)
1298 return -ENODEV;
1299 if (val & PORT_CS_1_RC)
1300 return -EIO;
1301
1302 return 0;
1303 }
1304
usb4_port_sb_op(struct tb_port * port,enum usb4_sb_target target,u8 index,enum usb4_sb_opcode opcode,int timeout_msec)1305 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1306 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1307 {
1308 ktime_t timeout;
1309 u32 val;
1310 int ret;
1311
1312 val = opcode;
1313 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1314 sizeof(val));
1315 if (ret)
1316 return ret;
1317
1318 timeout = ktime_add_ms(ktime_get(), timeout_msec);
1319
1320 do {
1321 /* Check results */
1322 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1323 &val, sizeof(val));
1324 if (ret)
1325 return ret;
1326
1327 switch (val) {
1328 case 0:
1329 return 0;
1330
1331 case USB4_SB_OPCODE_ERR:
1332 return -EAGAIN;
1333
1334 case USB4_SB_OPCODE_ONS:
1335 return -EOPNOTSUPP;
1336
1337 default:
1338 if (val != opcode)
1339 return -EIO;
1340 break;
1341 }
1342 } while (ktime_before(ktime_get(), timeout));
1343
1344 return -ETIMEDOUT;
1345 }
1346
usb4_port_set_router_offline(struct tb_port * port,bool offline)1347 static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
1348 {
1349 u32 val = !offline;
1350 int ret;
1351
1352 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1353 USB4_SB_METADATA, &val, sizeof(val));
1354 if (ret)
1355 return ret;
1356
1357 val = USB4_SB_OPCODE_ROUTER_OFFLINE;
1358 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1359 USB4_SB_OPCODE, &val, sizeof(val));
1360 }
1361
1362 /**
1363 * usb4_port_router_offline() - Put the USB4 port to offline mode
1364 * @port: USB4 port
1365 *
1366 * This function puts the USB4 port into offline mode. In this mode the
1367 * port does not react on hotplug events anymore. This needs to be
1368 * called before retimer access is done when the USB4 links is not up.
1369 *
1370 * Returns %0 in case of success and negative errno if there was an
1371 * error.
1372 */
usb4_port_router_offline(struct tb_port * port)1373 int usb4_port_router_offline(struct tb_port *port)
1374 {
1375 return usb4_port_set_router_offline(port, true);
1376 }
1377
1378 /**
1379 * usb4_port_router_online() - Put the USB4 port back to online
1380 * @port: USB4 port
1381 *
1382 * Makes the USB4 port functional again.
1383 */
usb4_port_router_online(struct tb_port * port)1384 int usb4_port_router_online(struct tb_port *port)
1385 {
1386 return usb4_port_set_router_offline(port, false);
1387 }
1388
1389 /**
1390 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1391 * @port: USB4 port
1392 *
1393 * This forces the USB4 port to send broadcast RT transaction which
1394 * makes the retimers on the link to assign index to themselves. Returns
1395 * %0 in case of success and negative errno if there was an error.
1396 */
usb4_port_enumerate_retimers(struct tb_port * port)1397 int usb4_port_enumerate_retimers(struct tb_port *port)
1398 {
1399 u32 val;
1400
1401 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1402 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1403 USB4_SB_OPCODE, &val, sizeof(val));
1404 }
1405
1406 /**
1407 * usb4_port_clx_supported() - Check if CLx is supported by the link
1408 * @port: Port to check for CLx support for
1409 *
1410 * PORT_CS_18_CPS bit reflects if the link supports CLx including
1411 * active cables (if connected on the link).
1412 */
usb4_port_clx_supported(struct tb_port * port)1413 bool usb4_port_clx_supported(struct tb_port *port)
1414 {
1415 int ret;
1416 u32 val;
1417
1418 ret = tb_port_read(port, &val, TB_CFG_PORT,
1419 port->cap_usb4 + PORT_CS_18, 1);
1420 if (ret)
1421 return false;
1422
1423 return !!(val & PORT_CS_18_CPS);
1424 }
1425
1426 /**
1427 * usb4_port_margining_caps() - Read USB4 port marginig capabilities
1428 * @port: USB4 port
1429 * @caps: Array with at least two elements to hold the results
1430 *
1431 * Reads the USB4 port lane margining capabilities into @caps.
1432 */
usb4_port_margining_caps(struct tb_port * port,u32 * caps)1433 int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
1434 {
1435 int ret;
1436
1437 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1438 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
1439 if (ret)
1440 return ret;
1441
1442 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1443 USB4_SB_DATA, caps, sizeof(*caps) * 2);
1444 }
1445
1446 /**
1447 * usb4_port_hw_margin() - Run hardware lane margining on port
1448 * @port: USB4 port
1449 * @lanes: Which lanes to run (must match the port capabilities). Can be
1450 * %0, %1 or %7.
1451 * @ber_level: BER level contour value
1452 * @timing: Perform timing margining instead of voltage
1453 * @right_high: Use Right/high margin instead of left/low
1454 * @results: Array with at least two elements to hold the results
1455 *
1456 * Runs hardware lane margining on USB4 port and returns the result in
1457 * @results.
1458 */
usb4_port_hw_margin(struct tb_port * port,unsigned int lanes,unsigned int ber_level,bool timing,bool right_high,u32 * results)1459 int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
1460 unsigned int ber_level, bool timing, bool right_high,
1461 u32 *results)
1462 {
1463 u32 val;
1464 int ret;
1465
1466 val = lanes;
1467 if (timing)
1468 val |= USB4_MARGIN_HW_TIME;
1469 if (right_high)
1470 val |= USB4_MARGIN_HW_RH;
1471 if (ber_level)
1472 val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
1473 USB4_MARGIN_HW_BER_MASK;
1474
1475 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1476 USB4_SB_METADATA, &val, sizeof(val));
1477 if (ret)
1478 return ret;
1479
1480 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1481 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
1482 if (ret)
1483 return ret;
1484
1485 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1486 USB4_SB_DATA, results, sizeof(*results) * 2);
1487 }
1488
1489 /**
1490 * usb4_port_sw_margin() - Run software lane margining on port
1491 * @port: USB4 port
1492 * @lanes: Which lanes to run (must match the port capabilities). Can be
1493 * %0, %1 or %7.
1494 * @timing: Perform timing margining instead of voltage
1495 * @right_high: Use Right/high margin instead of left/low
1496 * @counter: What to do with the error counter
1497 *
1498 * Runs software lane margining on USB4 port. Read back the error
1499 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
1500 * success and negative errno otherwise.
1501 */
usb4_port_sw_margin(struct tb_port * port,unsigned int lanes,bool timing,bool right_high,u32 counter)1502 int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
1503 bool right_high, u32 counter)
1504 {
1505 u32 val;
1506 int ret;
1507
1508 val = lanes;
1509 if (timing)
1510 val |= USB4_MARGIN_SW_TIME;
1511 if (right_high)
1512 val |= USB4_MARGIN_SW_RH;
1513 val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
1514 USB4_MARGIN_SW_COUNTER_MASK;
1515
1516 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1517 USB4_SB_METADATA, &val, sizeof(val));
1518 if (ret)
1519 return ret;
1520
1521 return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1522 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
1523 }
1524
1525 /**
1526 * usb4_port_sw_margin_errors() - Read the software margining error counters
1527 * @port: USB4 port
1528 * @errors: Error metadata is copied here.
1529 *
1530 * This reads back the software margining error counters from the port.
1531 * Returns %0 in success and negative errno otherwise.
1532 */
usb4_port_sw_margin_errors(struct tb_port * port,u32 * errors)1533 int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
1534 {
1535 int ret;
1536
1537 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1538 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
1539 if (ret)
1540 return ret;
1541
1542 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1543 USB4_SB_METADATA, errors, sizeof(*errors));
1544 }
1545
usb4_port_retimer_op(struct tb_port * port,u8 index,enum usb4_sb_opcode opcode,int timeout_msec)1546 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1547 enum usb4_sb_opcode opcode,
1548 int timeout_msec)
1549 {
1550 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1551 timeout_msec);
1552 }
1553
1554 /**
1555 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
1556 * @port: USB4 port
1557 * @index: Retimer index
1558 *
1559 * Enables sideband channel transations on SBTX. Can be used when USB4
1560 * link does not go up, for example if there is no device connected.
1561 */
usb4_port_retimer_set_inbound_sbtx(struct tb_port * port,u8 index)1562 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
1563 {
1564 int ret;
1565
1566 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1567 500);
1568
1569 if (ret != -ENODEV)
1570 return ret;
1571
1572 /*
1573 * Per the USB4 retimer spec, the retimer is not required to
1574 * send an RT (Retimer Transaction) response for the first
1575 * SET_INBOUND_SBTX command
1576 */
1577 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1578 500);
1579 }
1580
1581 /**
1582 * usb4_port_retimer_read() - Read from retimer sideband registers
1583 * @port: USB4 port
1584 * @index: Retimer index
1585 * @reg: Sideband register to read
1586 * @buf: Data from @reg is stored here
1587 * @size: Number of bytes to read
1588 *
1589 * Function reads retimer sideband registers starting from @reg. The
1590 * retimer is connected to @port at @index. Returns %0 in case of
1591 * success, and read data is copied to @buf. If there is no retimer
1592 * present at given @index returns %-ENODEV. In any other failure
1593 * returns negative errno.
1594 */
usb4_port_retimer_read(struct tb_port * port,u8 index,u8 reg,void * buf,u8 size)1595 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1596 u8 size)
1597 {
1598 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1599 size);
1600 }
1601
1602 /**
1603 * usb4_port_retimer_write() - Write to retimer sideband registers
1604 * @port: USB4 port
1605 * @index: Retimer index
1606 * @reg: Sideband register to write
1607 * @buf: Data that is written starting from @reg
1608 * @size: Number of bytes to write
1609 *
1610 * Writes retimer sideband registers starting from @reg. The retimer is
1611 * connected to @port at @index. Returns %0 in case of success. If there
1612 * is no retimer present at given @index returns %-ENODEV. In any other
1613 * failure returns negative errno.
1614 */
usb4_port_retimer_write(struct tb_port * port,u8 index,u8 reg,const void * buf,u8 size)1615 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1616 const void *buf, u8 size)
1617 {
1618 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1619 size);
1620 }
1621
1622 /**
1623 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1624 * @port: USB4 port
1625 * @index: Retimer index
1626 *
1627 * If the retimer at @index is last one (connected directly to the
1628 * Type-C port) this function returns %1. If it is not returns %0. If
1629 * the retimer is not present returns %-ENODEV. Otherwise returns
1630 * negative errno.
1631 */
usb4_port_retimer_is_last(struct tb_port * port,u8 index)1632 int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1633 {
1634 u32 metadata;
1635 int ret;
1636
1637 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1638 500);
1639 if (ret)
1640 return ret;
1641
1642 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1643 sizeof(metadata));
1644 return ret ? ret : metadata & 1;
1645 }
1646
1647 /**
1648 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1649 * @port: USB4 port
1650 * @index: Retimer index
1651 *
1652 * Reads NVM sector size (in bytes) of a retimer at @index. This
1653 * operation can be used to determine whether the retimer supports NVM
1654 * upgrade for example. Returns sector size in bytes or negative errno
1655 * in case of error. Specifically returns %-ENODEV if there is no
1656 * retimer at @index.
1657 */
usb4_port_retimer_nvm_sector_size(struct tb_port * port,u8 index)1658 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1659 {
1660 u32 metadata;
1661 int ret;
1662
1663 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1664 500);
1665 if (ret)
1666 return ret;
1667
1668 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1669 sizeof(metadata));
1670 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1671 }
1672
1673 /**
1674 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
1675 * @port: USB4 port
1676 * @index: Retimer index
1677 * @address: Start offset
1678 *
1679 * Exlicitly sets NVM write offset. Normally when writing to NVM this is
1680 * done automatically by usb4_port_retimer_nvm_write().
1681 *
1682 * Returns %0 in success and negative errno if there was a failure.
1683 */
usb4_port_retimer_nvm_set_offset(struct tb_port * port,u8 index,unsigned int address)1684 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1685 unsigned int address)
1686 {
1687 u32 metadata, dwaddress;
1688 int ret;
1689
1690 dwaddress = address / 4;
1691 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1692 USB4_NVM_SET_OFFSET_MASK;
1693
1694 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1695 sizeof(metadata));
1696 if (ret)
1697 return ret;
1698
1699 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1700 500);
1701 }
1702
1703 struct retimer_info {
1704 struct tb_port *port;
1705 u8 index;
1706 };
1707
usb4_port_retimer_nvm_write_next_block(void * data,unsigned int dwaddress,const void * buf,size_t dwords)1708 static int usb4_port_retimer_nvm_write_next_block(void *data,
1709 unsigned int dwaddress, const void *buf, size_t dwords)
1710
1711 {
1712 const struct retimer_info *info = data;
1713 struct tb_port *port = info->port;
1714 u8 index = info->index;
1715 int ret;
1716
1717 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1718 buf, dwords * 4);
1719 if (ret)
1720 return ret;
1721
1722 return usb4_port_retimer_op(port, index,
1723 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1724 }
1725
1726 /**
1727 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1728 * @port: USB4 port
1729 * @index: Retimer index
1730 * @address: Byte address where to start the write
1731 * @buf: Data to write
1732 * @size: Size in bytes how much to write
1733 *
1734 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1735 * upgrade. Returns %0 if the data was written successfully and negative
1736 * errno in case of failure. Specifically returns %-ENODEV if there is
1737 * no retimer at @index.
1738 */
usb4_port_retimer_nvm_write(struct tb_port * port,u8 index,unsigned int address,const void * buf,size_t size)1739 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1740 const void *buf, size_t size)
1741 {
1742 struct retimer_info info = { .port = port, .index = index };
1743 int ret;
1744
1745 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1746 if (ret)
1747 return ret;
1748
1749 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
1750 usb4_port_retimer_nvm_write_next_block, &info);
1751 }
1752
1753 /**
1754 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1755 * @port: USB4 port
1756 * @index: Retimer index
1757 *
1758 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1759 * this function can be used to trigger the NVM upgrade process. If
1760 * successful the retimer restarts with the new NVM and may not have the
1761 * index set so one needs to call usb4_port_enumerate_retimers() to
1762 * force index to be assigned.
1763 */
usb4_port_retimer_nvm_authenticate(struct tb_port * port,u8 index)1764 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1765 {
1766 u32 val;
1767
1768 /*
1769 * We need to use the raw operation here because once the
1770 * authentication completes the retimer index is not set anymore
1771 * so we do not get back the status now.
1772 */
1773 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1774 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1775 USB4_SB_OPCODE, &val, sizeof(val));
1776 }
1777
1778 /**
1779 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1780 * @port: USB4 port
1781 * @index: Retimer index
1782 * @status: Raw status code read from metadata
1783 *
1784 * This can be called after usb4_port_retimer_nvm_authenticate() and
1785 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1786 *
1787 * Returns %0 if the authentication status was successfully read. The
1788 * completion metadata (the result) is then stored into @status. If
1789 * reading the status fails, returns negative errno.
1790 */
usb4_port_retimer_nvm_authenticate_status(struct tb_port * port,u8 index,u32 * status)1791 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1792 u32 *status)
1793 {
1794 u32 metadata, val;
1795 int ret;
1796
1797 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1798 sizeof(val));
1799 if (ret)
1800 return ret;
1801
1802 switch (val) {
1803 case 0:
1804 *status = 0;
1805 return 0;
1806
1807 case USB4_SB_OPCODE_ERR:
1808 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1809 &metadata, sizeof(metadata));
1810 if (ret)
1811 return ret;
1812
1813 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1814 return 0;
1815
1816 case USB4_SB_OPCODE_ONS:
1817 return -EOPNOTSUPP;
1818
1819 default:
1820 return -EIO;
1821 }
1822 }
1823
usb4_port_retimer_nvm_read_block(void * data,unsigned int dwaddress,void * buf,size_t dwords)1824 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1825 void *buf, size_t dwords)
1826 {
1827 const struct retimer_info *info = data;
1828 struct tb_port *port = info->port;
1829 u8 index = info->index;
1830 u32 metadata;
1831 int ret;
1832
1833 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1834 if (dwords < NVM_DATA_DWORDS)
1835 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1836
1837 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1838 sizeof(metadata));
1839 if (ret)
1840 return ret;
1841
1842 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1843 if (ret)
1844 return ret;
1845
1846 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1847 dwords * 4);
1848 }
1849
1850 /**
1851 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1852 * @port: USB4 port
1853 * @index: Retimer index
1854 * @address: NVM address (in bytes) to start reading
1855 * @buf: Data read from NVM is stored here
1856 * @size: Number of bytes to read
1857 *
1858 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1859 * read was successful and negative errno in case of failure.
1860 * Specifically returns %-ENODEV if there is no retimer at @index.
1861 */
usb4_port_retimer_nvm_read(struct tb_port * port,u8 index,unsigned int address,void * buf,size_t size)1862 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1863 unsigned int address, void *buf, size_t size)
1864 {
1865 struct retimer_info info = { .port = port, .index = index };
1866
1867 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
1868 usb4_port_retimer_nvm_read_block, &info);
1869 }
1870
1871 /**
1872 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1873 * @port: USB3 adapter port
1874 *
1875 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1876 * Negative errno in case of error.
1877 */
usb4_usb3_port_max_link_rate(struct tb_port * port)1878 int usb4_usb3_port_max_link_rate(struct tb_port *port)
1879 {
1880 int ret, lr;
1881 u32 val;
1882
1883 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1884 return -EINVAL;
1885
1886 ret = tb_port_read(port, &val, TB_CFG_PORT,
1887 port->cap_adap + ADP_USB3_CS_4, 1);
1888 if (ret)
1889 return ret;
1890
1891 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1892 return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1893 }
1894
1895 /**
1896 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1897 * @port: USB3 adapter port
1898 *
1899 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1900 * link is not up returns %0 and negative errno in case of failure.
1901 */
usb4_usb3_port_actual_link_rate(struct tb_port * port)1902 int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1903 {
1904 int ret, lr;
1905 u32 val;
1906
1907 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1908 return -EINVAL;
1909
1910 ret = tb_port_read(port, &val, TB_CFG_PORT,
1911 port->cap_adap + ADP_USB3_CS_4, 1);
1912 if (ret)
1913 return ret;
1914
1915 if (!(val & ADP_USB3_CS_4_ULV))
1916 return 0;
1917
1918 lr = val & ADP_USB3_CS_4_ALR_MASK;
1919 return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1920 }
1921
usb4_usb3_port_cm_request(struct tb_port * port,bool request)1922 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1923 {
1924 int ret;
1925 u32 val;
1926
1927 if (!tb_port_is_usb3_down(port))
1928 return -EINVAL;
1929 if (tb_route(port->sw))
1930 return -EINVAL;
1931
1932 ret = tb_port_read(port, &val, TB_CFG_PORT,
1933 port->cap_adap + ADP_USB3_CS_2, 1);
1934 if (ret)
1935 return ret;
1936
1937 if (request)
1938 val |= ADP_USB3_CS_2_CMR;
1939 else
1940 val &= ~ADP_USB3_CS_2_CMR;
1941
1942 ret = tb_port_write(port, &val, TB_CFG_PORT,
1943 port->cap_adap + ADP_USB3_CS_2, 1);
1944 if (ret)
1945 return ret;
1946
1947 /*
1948 * We can use val here directly as the CMR bit is in the same place
1949 * as HCA. Just mask out others.
1950 */
1951 val &= ADP_USB3_CS_2_CMR;
1952 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1953 ADP_USB3_CS_1_HCA, val, 1500);
1954 }
1955
usb4_usb3_port_set_cm_request(struct tb_port * port)1956 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1957 {
1958 return usb4_usb3_port_cm_request(port, true);
1959 }
1960
usb4_usb3_port_clear_cm_request(struct tb_port * port)1961 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1962 {
1963 return usb4_usb3_port_cm_request(port, false);
1964 }
1965
usb3_bw_to_mbps(u32 bw,u8 scale)1966 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1967 {
1968 unsigned long uframes;
1969
1970 uframes = bw * 512UL << scale;
1971 return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1972 }
1973
mbps_to_usb3_bw(unsigned int mbps,u8 scale)1974 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1975 {
1976 unsigned long uframes;
1977
1978 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1979 uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
1980 return DIV_ROUND_UP(uframes, 512UL << scale);
1981 }
1982
usb4_usb3_port_read_allocated_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)1983 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1984 int *upstream_bw,
1985 int *downstream_bw)
1986 {
1987 u32 val, bw, scale;
1988 int ret;
1989
1990 ret = tb_port_read(port, &val, TB_CFG_PORT,
1991 port->cap_adap + ADP_USB3_CS_2, 1);
1992 if (ret)
1993 return ret;
1994
1995 ret = tb_port_read(port, &scale, TB_CFG_PORT,
1996 port->cap_adap + ADP_USB3_CS_3, 1);
1997 if (ret)
1998 return ret;
1999
2000 scale &= ADP_USB3_CS_3_SCALE_MASK;
2001
2002 bw = val & ADP_USB3_CS_2_AUBW_MASK;
2003 *upstream_bw = usb3_bw_to_mbps(bw, scale);
2004
2005 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
2006 *downstream_bw = usb3_bw_to_mbps(bw, scale);
2007
2008 return 0;
2009 }
2010
2011 /**
2012 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
2013 * @port: USB3 adapter port
2014 * @upstream_bw: Allocated upstream bandwidth is stored here
2015 * @downstream_bw: Allocated downstream bandwidth is stored here
2016 *
2017 * Stores currently allocated USB3 bandwidth into @upstream_bw and
2018 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
2019 * errno in failure.
2020 */
usb4_usb3_port_allocated_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2021 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
2022 int *downstream_bw)
2023 {
2024 int ret;
2025
2026 ret = usb4_usb3_port_set_cm_request(port);
2027 if (ret)
2028 return ret;
2029
2030 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
2031 downstream_bw);
2032 usb4_usb3_port_clear_cm_request(port);
2033
2034 return ret;
2035 }
2036
usb4_usb3_port_read_consumed_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2037 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
2038 int *upstream_bw,
2039 int *downstream_bw)
2040 {
2041 u32 val, bw, scale;
2042 int ret;
2043
2044 ret = tb_port_read(port, &val, TB_CFG_PORT,
2045 port->cap_adap + ADP_USB3_CS_1, 1);
2046 if (ret)
2047 return ret;
2048
2049 ret = tb_port_read(port, &scale, TB_CFG_PORT,
2050 port->cap_adap + ADP_USB3_CS_3, 1);
2051 if (ret)
2052 return ret;
2053
2054 scale &= ADP_USB3_CS_3_SCALE_MASK;
2055
2056 bw = val & ADP_USB3_CS_1_CUBW_MASK;
2057 *upstream_bw = usb3_bw_to_mbps(bw, scale);
2058
2059 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
2060 *downstream_bw = usb3_bw_to_mbps(bw, scale);
2061
2062 return 0;
2063 }
2064
usb4_usb3_port_write_allocated_bandwidth(struct tb_port * port,int upstream_bw,int downstream_bw)2065 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
2066 int upstream_bw,
2067 int downstream_bw)
2068 {
2069 u32 val, ubw, dbw, scale;
2070 int ret;
2071
2072 /* Read the used scale, hardware default is 0 */
2073 ret = tb_port_read(port, &scale, TB_CFG_PORT,
2074 port->cap_adap + ADP_USB3_CS_3, 1);
2075 if (ret)
2076 return ret;
2077
2078 scale &= ADP_USB3_CS_3_SCALE_MASK;
2079 ubw = mbps_to_usb3_bw(upstream_bw, scale);
2080 dbw = mbps_to_usb3_bw(downstream_bw, scale);
2081
2082 ret = tb_port_read(port, &val, TB_CFG_PORT,
2083 port->cap_adap + ADP_USB3_CS_2, 1);
2084 if (ret)
2085 return ret;
2086
2087 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
2088 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
2089 val |= ubw;
2090
2091 return tb_port_write(port, &val, TB_CFG_PORT,
2092 port->cap_adap + ADP_USB3_CS_2, 1);
2093 }
2094
2095 /**
2096 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
2097 * @port: USB3 adapter port
2098 * @upstream_bw: New upstream bandwidth
2099 * @downstream_bw: New downstream bandwidth
2100 *
2101 * This can be used to set how much bandwidth is allocated for the USB3
2102 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
2103 * new values programmed to the USB3 adapter allocation registers. If
2104 * the values are lower than what is currently consumed the allocation
2105 * is set to what is currently consumed instead (consumed bandwidth
2106 * cannot be taken away by CM). The actual new values are returned in
2107 * @upstream_bw and @downstream_bw.
2108 *
2109 * Returns %0 in case of success and negative errno if there was a
2110 * failure.
2111 */
usb4_usb3_port_allocate_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2112 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
2113 int *downstream_bw)
2114 {
2115 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
2116
2117 ret = usb4_usb3_port_set_cm_request(port);
2118 if (ret)
2119 return ret;
2120
2121 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2122 &consumed_down);
2123 if (ret)
2124 goto err_request;
2125
2126 /* Don't allow it go lower than what is consumed */
2127 allocate_up = max(*upstream_bw, consumed_up);
2128 allocate_down = max(*downstream_bw, consumed_down);
2129
2130 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
2131 allocate_down);
2132 if (ret)
2133 goto err_request;
2134
2135 *upstream_bw = allocate_up;
2136 *downstream_bw = allocate_down;
2137
2138 err_request:
2139 usb4_usb3_port_clear_cm_request(port);
2140 return ret;
2141 }
2142
2143 /**
2144 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
2145 * @port: USB3 adapter port
2146 * @upstream_bw: New allocated upstream bandwidth
2147 * @downstream_bw: New allocated downstream bandwidth
2148 *
2149 * Releases USB3 allocated bandwidth down to what is actually consumed.
2150 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
2151 *
2152 * Returns 0% in success and negative errno in case of failure.
2153 */
usb4_usb3_port_release_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2154 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
2155 int *downstream_bw)
2156 {
2157 int ret, consumed_up, consumed_down;
2158
2159 ret = usb4_usb3_port_set_cm_request(port);
2160 if (ret)
2161 return ret;
2162
2163 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2164 &consumed_down);
2165 if (ret)
2166 goto err_request;
2167
2168 /*
2169 * Always keep 1000 Mb/s to make sure xHCI has at least some
2170 * bandwidth available for isochronous traffic.
2171 */
2172 if (consumed_up < 1000)
2173 consumed_up = 1000;
2174 if (consumed_down < 1000)
2175 consumed_down = 1000;
2176
2177 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
2178 consumed_down);
2179 if (ret)
2180 goto err_request;
2181
2182 *upstream_bw = consumed_up;
2183 *downstream_bw = consumed_down;
2184
2185 err_request:
2186 usb4_usb3_port_clear_cm_request(port);
2187 return ret;
2188 }
2189
is_usb4_dpin(const struct tb_port * port)2190 static bool is_usb4_dpin(const struct tb_port *port)
2191 {
2192 if (!tb_port_is_dpin(port))
2193 return false;
2194 if (!tb_switch_is_usb4(port->sw))
2195 return false;
2196 return true;
2197 }
2198
2199 /**
2200 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
2201 * @port: DP IN adapter
2202 * @cm_id: CM ID to assign
2203 *
2204 * Sets CM ID for the @port. Returns %0 on success and negative errno
2205 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
2206 * support this.
2207 */
usb4_dp_port_set_cm_id(struct tb_port * port,int cm_id)2208 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
2209 {
2210 u32 val;
2211 int ret;
2212
2213 if (!is_usb4_dpin(port))
2214 return -EOPNOTSUPP;
2215
2216 ret = tb_port_read(port, &val, TB_CFG_PORT,
2217 port->cap_adap + ADP_DP_CS_2, 1);
2218 if (ret)
2219 return ret;
2220
2221 val &= ~ADP_DP_CS_2_CM_ID_MASK;
2222 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
2223
2224 return tb_port_write(port, &val, TB_CFG_PORT,
2225 port->cap_adap + ADP_DP_CS_2, 1);
2226 }
2227
2228 /**
2229 * usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
2230 * @port: DP IN adapter to check
2231 *
2232 * Can be called to any DP IN adapter. Returns true if the adapter
2233 * supports USB4 bandwidth allocation mode, false otherwise.
2234 */
usb4_dp_port_bw_mode_supported(struct tb_port * port)2235 bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
2236 {
2237 int ret;
2238 u32 val;
2239
2240 if (!is_usb4_dpin(port))
2241 return false;
2242
2243 ret = tb_port_read(port, &val, TB_CFG_PORT,
2244 port->cap_adap + DP_LOCAL_CAP, 1);
2245 if (ret)
2246 return false;
2247
2248 return !!(val & DP_COMMON_CAP_BW_MODE);
2249 }
2250
2251 /**
2252 * usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
2253 * @port: DP IN adapter to check
2254 *
2255 * Can be called to any DP IN adapter. Returns true if the bandwidth
2256 * allocation mode has been enabled, false otherwise.
2257 */
usb4_dp_port_bw_mode_enabled(struct tb_port * port)2258 bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
2259 {
2260 int ret;
2261 u32 val;
2262
2263 if (!is_usb4_dpin(port))
2264 return false;
2265
2266 ret = tb_port_read(port, &val, TB_CFG_PORT,
2267 port->cap_adap + ADP_DP_CS_8, 1);
2268 if (ret)
2269 return false;
2270
2271 return !!(val & ADP_DP_CS_8_DPME);
2272 }
2273
2274 /**
2275 * usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
2276 * @port: DP IN adapter
2277 * @supported: Does the CM support bandwidth allocation mode
2278 *
2279 * Can be called to any DP IN adapter. Sets or clears the CM support bit
2280 * of the DP IN adapter. Returns %0 in success and negative errno
2281 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
2282 * does not support this.
2283 */
usb4_dp_port_set_cm_bw_mode_supported(struct tb_port * port,bool supported)2284 int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
2285 {
2286 u32 val;
2287 int ret;
2288
2289 if (!is_usb4_dpin(port))
2290 return -EOPNOTSUPP;
2291
2292 ret = tb_port_read(port, &val, TB_CFG_PORT,
2293 port->cap_adap + ADP_DP_CS_2, 1);
2294 if (ret)
2295 return ret;
2296
2297 if (supported)
2298 val |= ADP_DP_CS_2_CMMS;
2299 else
2300 val &= ~ADP_DP_CS_2_CMMS;
2301
2302 return tb_port_write(port, &val, TB_CFG_PORT,
2303 port->cap_adap + ADP_DP_CS_2, 1);
2304 }
2305
2306 /**
2307 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
2308 * @port: DP IN adapter
2309 *
2310 * Reads bandwidth allocation Group ID from the DP IN adapter and
2311 * returns it. If the adapter does not support setting Group_ID
2312 * %-EOPNOTSUPP is returned.
2313 */
usb4_dp_port_group_id(struct tb_port * port)2314 int usb4_dp_port_group_id(struct tb_port *port)
2315 {
2316 u32 val;
2317 int ret;
2318
2319 if (!is_usb4_dpin(port))
2320 return -EOPNOTSUPP;
2321
2322 ret = tb_port_read(port, &val, TB_CFG_PORT,
2323 port->cap_adap + ADP_DP_CS_2, 1);
2324 if (ret)
2325 return ret;
2326
2327 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
2328 }
2329
2330 /**
2331 * usb4_dp_port_set_group_id() - Set adapter Group ID
2332 * @port: DP IN adapter
2333 * @group_id: Group ID for the adapter
2334 *
2335 * Sets bandwidth allocation mode Group ID for the DP IN adapter.
2336 * Returns %0 in case of success and negative errno otherwise.
2337 * Specifically returns %-EOPNOTSUPP if the adapter does not support
2338 * this.
2339 */
usb4_dp_port_set_group_id(struct tb_port * port,int group_id)2340 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
2341 {
2342 u32 val;
2343 int ret;
2344
2345 if (!is_usb4_dpin(port))
2346 return -EOPNOTSUPP;
2347
2348 ret = tb_port_read(port, &val, TB_CFG_PORT,
2349 port->cap_adap + ADP_DP_CS_2, 1);
2350 if (ret)
2351 return ret;
2352
2353 val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
2354 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
2355
2356 return tb_port_write(port, &val, TB_CFG_PORT,
2357 port->cap_adap + ADP_DP_CS_2, 1);
2358 }
2359
2360 /**
2361 * usb4_dp_port_nrd() - Read non-reduced rate and lanes
2362 * @port: DP IN adapter
2363 * @rate: Non-reduced rate in Mb/s is placed here
2364 * @lanes: Non-reduced lanes are placed here
2365 *
2366 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
2367 * %0 in success and negative errno otherwise. Specifically returns
2368 * %-EOPNOTSUPP if the adapter does not support this.
2369 */
usb4_dp_port_nrd(struct tb_port * port,int * rate,int * lanes)2370 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
2371 {
2372 u32 val, tmp;
2373 int ret;
2374
2375 if (!is_usb4_dpin(port))
2376 return -EOPNOTSUPP;
2377
2378 ret = tb_port_read(port, &val, TB_CFG_PORT,
2379 port->cap_adap + ADP_DP_CS_2, 1);
2380 if (ret)
2381 return ret;
2382
2383 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
2384 switch (tmp) {
2385 case DP_COMMON_CAP_RATE_RBR:
2386 *rate = 1620;
2387 break;
2388 case DP_COMMON_CAP_RATE_HBR:
2389 *rate = 2700;
2390 break;
2391 case DP_COMMON_CAP_RATE_HBR2:
2392 *rate = 5400;
2393 break;
2394 case DP_COMMON_CAP_RATE_HBR3:
2395 *rate = 8100;
2396 break;
2397 }
2398
2399 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
2400 switch (tmp) {
2401 case DP_COMMON_CAP_1_LANE:
2402 *lanes = 1;
2403 break;
2404 case DP_COMMON_CAP_2_LANES:
2405 *lanes = 2;
2406 break;
2407 case DP_COMMON_CAP_4_LANES:
2408 *lanes = 4;
2409 break;
2410 }
2411
2412 return 0;
2413 }
2414
2415 /**
2416 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
2417 * @port: DP IN adapter
2418 * @rate: Non-reduced rate in Mb/s
2419 * @lanes: Non-reduced lanes
2420 *
2421 * Before the capabilities reduction this function can be used to set
2422 * the non-reduced values for the DP IN adapter. Returns %0 in success
2423 * and negative errno otherwise. If the adapter does not support this
2424 * %-EOPNOTSUPP is returned.
2425 */
usb4_dp_port_set_nrd(struct tb_port * port,int rate,int lanes)2426 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
2427 {
2428 u32 val;
2429 int ret;
2430
2431 if (!is_usb4_dpin(port))
2432 return -EOPNOTSUPP;
2433
2434 ret = tb_port_read(port, &val, TB_CFG_PORT,
2435 port->cap_adap + ADP_DP_CS_2, 1);
2436 if (ret)
2437 return ret;
2438
2439 val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
2440
2441 switch (rate) {
2442 case 1620:
2443 break;
2444 case 2700:
2445 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
2446 & ADP_DP_CS_2_NRD_MLR_MASK;
2447 break;
2448 case 5400:
2449 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
2450 & ADP_DP_CS_2_NRD_MLR_MASK;
2451 break;
2452 case 8100:
2453 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
2454 & ADP_DP_CS_2_NRD_MLR_MASK;
2455 break;
2456 default:
2457 return -EINVAL;
2458 }
2459
2460 val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
2461
2462 switch (lanes) {
2463 case 1:
2464 break;
2465 case 2:
2466 val |= DP_COMMON_CAP_2_LANES;
2467 break;
2468 case 4:
2469 val |= DP_COMMON_CAP_4_LANES;
2470 break;
2471 default:
2472 return -EINVAL;
2473 }
2474
2475 return tb_port_write(port, &val, TB_CFG_PORT,
2476 port->cap_adap + ADP_DP_CS_2, 1);
2477 }
2478
2479 /**
2480 * usb4_dp_port_granularity() - Return granularity for the bandwidth values
2481 * @port: DP IN adapter
2482 *
2483 * Reads the programmed granularity from @port. If the DP IN adapter does
2484 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
2485 * errno in other error cases.
2486 */
usb4_dp_port_granularity(struct tb_port * port)2487 int usb4_dp_port_granularity(struct tb_port *port)
2488 {
2489 u32 val;
2490 int ret;
2491
2492 if (!is_usb4_dpin(port))
2493 return -EOPNOTSUPP;
2494
2495 ret = tb_port_read(port, &val, TB_CFG_PORT,
2496 port->cap_adap + ADP_DP_CS_2, 1);
2497 if (ret)
2498 return ret;
2499
2500 val &= ADP_DP_CS_2_GR_MASK;
2501 val >>= ADP_DP_CS_2_GR_SHIFT;
2502
2503 switch (val) {
2504 case ADP_DP_CS_2_GR_0_25G:
2505 return 250;
2506 case ADP_DP_CS_2_GR_0_5G:
2507 return 500;
2508 case ADP_DP_CS_2_GR_1G:
2509 return 1000;
2510 }
2511
2512 return -EINVAL;
2513 }
2514
2515 /**
2516 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
2517 * @port: DP IN adapter
2518 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
2519 *
2520 * Sets the granularity used with the estimated, allocated and requested
2521 * bandwidth. Returns %0 in success and negative errno otherwise. If the
2522 * adapter does not support this %-EOPNOTSUPP is returned.
2523 */
usb4_dp_port_set_granularity(struct tb_port * port,int granularity)2524 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
2525 {
2526 u32 val;
2527 int ret;
2528
2529 if (!is_usb4_dpin(port))
2530 return -EOPNOTSUPP;
2531
2532 ret = tb_port_read(port, &val, TB_CFG_PORT,
2533 port->cap_adap + ADP_DP_CS_2, 1);
2534 if (ret)
2535 return ret;
2536
2537 val &= ~ADP_DP_CS_2_GR_MASK;
2538
2539 switch (granularity) {
2540 case 250:
2541 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
2542 break;
2543 case 500:
2544 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
2545 break;
2546 case 1000:
2547 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
2548 break;
2549 default:
2550 return -EINVAL;
2551 }
2552
2553 return tb_port_write(port, &val, TB_CFG_PORT,
2554 port->cap_adap + ADP_DP_CS_2, 1);
2555 }
2556
2557 /**
2558 * usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
2559 * @port: DP IN adapter
2560 * @bw: Estimated bandwidth in Mb/s.
2561 *
2562 * Sets the estimated bandwidth to @bw. Set the granularity by calling
2563 * usb4_dp_port_set_granularity() before calling this. The @bw is round
2564 * down to the closest granularity multiplier. Returns %0 in success
2565 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
2566 * the adapter does not support this.
2567 */
usb4_dp_port_set_estimated_bw(struct tb_port * port,int bw)2568 int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
2569 {
2570 u32 val, granularity;
2571 int ret;
2572
2573 if (!is_usb4_dpin(port))
2574 return -EOPNOTSUPP;
2575
2576 ret = usb4_dp_port_granularity(port);
2577 if (ret < 0)
2578 return ret;
2579 granularity = ret;
2580
2581 ret = tb_port_read(port, &val, TB_CFG_PORT,
2582 port->cap_adap + ADP_DP_CS_2, 1);
2583 if (ret)
2584 return ret;
2585
2586 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
2587 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
2588
2589 return tb_port_write(port, &val, TB_CFG_PORT,
2590 port->cap_adap + ADP_DP_CS_2, 1);
2591 }
2592
2593 /**
2594 * usb4_dp_port_allocated_bw() - Return allocated bandwidth
2595 * @port: DP IN adapter
2596 *
2597 * Reads and returns allocated bandwidth for @port in Mb/s (taking into
2598 * account the programmed granularity). Returns negative errno in case
2599 * of error.
2600 */
usb4_dp_port_allocated_bw(struct tb_port * port)2601 int usb4_dp_port_allocated_bw(struct tb_port *port)
2602 {
2603 u32 val, granularity;
2604 int ret;
2605
2606 if (!is_usb4_dpin(port))
2607 return -EOPNOTSUPP;
2608
2609 ret = usb4_dp_port_granularity(port);
2610 if (ret < 0)
2611 return ret;
2612 granularity = ret;
2613
2614 ret = tb_port_read(port, &val, TB_CFG_PORT,
2615 port->cap_adap + DP_STATUS, 1);
2616 if (ret)
2617 return ret;
2618
2619 val &= DP_STATUS_ALLOCATED_BW_MASK;
2620 val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
2621
2622 return val * granularity;
2623 }
2624
__usb4_dp_port_set_cm_ack(struct tb_port * port,bool ack)2625 static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
2626 {
2627 u32 val;
2628 int ret;
2629
2630 ret = tb_port_read(port, &val, TB_CFG_PORT,
2631 port->cap_adap + ADP_DP_CS_2, 1);
2632 if (ret)
2633 return ret;
2634
2635 if (ack)
2636 val |= ADP_DP_CS_2_CA;
2637 else
2638 val &= ~ADP_DP_CS_2_CA;
2639
2640 return tb_port_write(port, &val, TB_CFG_PORT,
2641 port->cap_adap + ADP_DP_CS_2, 1);
2642 }
2643
usb4_dp_port_set_cm_ack(struct tb_port * port)2644 static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
2645 {
2646 return __usb4_dp_port_set_cm_ack(port, true);
2647 }
2648
usb4_dp_port_wait_and_clear_cm_ack(struct tb_port * port,int timeout_msec)2649 static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
2650 int timeout_msec)
2651 {
2652 ktime_t end;
2653 u32 val;
2654 int ret;
2655
2656 ret = __usb4_dp_port_set_cm_ack(port, false);
2657 if (ret)
2658 return ret;
2659
2660 end = ktime_add_ms(ktime_get(), timeout_msec);
2661 do {
2662 ret = tb_port_read(port, &val, TB_CFG_PORT,
2663 port->cap_adap + ADP_DP_CS_8, 1);
2664 if (ret)
2665 return ret;
2666
2667 if (!(val & ADP_DP_CS_8_DR))
2668 break;
2669
2670 usleep_range(50, 100);
2671 } while (ktime_before(ktime_get(), end));
2672
2673 if (val & ADP_DP_CS_8_DR)
2674 return -ETIMEDOUT;
2675
2676 ret = tb_port_read(port, &val, TB_CFG_PORT,
2677 port->cap_adap + ADP_DP_CS_2, 1);
2678 if (ret)
2679 return ret;
2680
2681 val &= ~ADP_DP_CS_2_CA;
2682 return tb_port_write(port, &val, TB_CFG_PORT,
2683 port->cap_adap + ADP_DP_CS_2, 1);
2684 }
2685
2686 /**
2687 * usb4_dp_port_allocate_bw() - Set allocated bandwidth
2688 * @port: DP IN adapter
2689 * @bw: New allocated bandwidth in Mb/s
2690 *
2691 * Communicates the new allocated bandwidth with the DPCD (graphics
2692 * driver). Takes into account the programmed granularity. Returns %0 in
2693 * success and negative errno in case of error.
2694 */
usb4_dp_port_allocate_bw(struct tb_port * port,int bw)2695 int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
2696 {
2697 u32 val, granularity;
2698 int ret;
2699
2700 if (!is_usb4_dpin(port))
2701 return -EOPNOTSUPP;
2702
2703 ret = usb4_dp_port_granularity(port);
2704 if (ret < 0)
2705 return ret;
2706 granularity = ret;
2707
2708 ret = tb_port_read(port, &val, TB_CFG_PORT,
2709 port->cap_adap + DP_STATUS, 1);
2710 if (ret)
2711 return ret;
2712
2713 val &= ~DP_STATUS_ALLOCATED_BW_MASK;
2714 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
2715
2716 ret = tb_port_write(port, &val, TB_CFG_PORT,
2717 port->cap_adap + DP_STATUS, 1);
2718 if (ret)
2719 return ret;
2720
2721 ret = usb4_dp_port_set_cm_ack(port);
2722 if (ret)
2723 return ret;
2724
2725 return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
2726 }
2727
2728 /**
2729 * usb4_dp_port_requested_bw() - Read requested bandwidth
2730 * @port: DP IN adapter
2731 *
2732 * Reads the DPCD (graphics driver) requested bandwidth and returns it
2733 * in Mb/s. Takes the programmed granularity into account. In case of
2734 * error returns negative errno. Specifically returns %-EOPNOTSUPP if
2735 * the adapter does not support bandwidth allocation mode, and %ENODATA
2736 * if there is no active bandwidth request from the graphics driver.
2737 */
usb4_dp_port_requested_bw(struct tb_port * port)2738 int usb4_dp_port_requested_bw(struct tb_port *port)
2739 {
2740 u32 val, granularity;
2741 int ret;
2742
2743 if (!is_usb4_dpin(port))
2744 return -EOPNOTSUPP;
2745
2746 ret = usb4_dp_port_granularity(port);
2747 if (ret < 0)
2748 return ret;
2749 granularity = ret;
2750
2751 ret = tb_port_read(port, &val, TB_CFG_PORT,
2752 port->cap_adap + ADP_DP_CS_8, 1);
2753 if (ret)
2754 return ret;
2755
2756 if (!(val & ADP_DP_CS_8_DR))
2757 return -ENODATA;
2758
2759 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
2760 }
2761