Lines Matching refs:sw
41 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) in __nvm_get_auth_status() argument
46 if (uuid_equal(&st->uuid, sw->uuid)) in __nvm_get_auth_status()
53 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) in nvm_get_auth_status() argument
58 st = __nvm_get_auth_status(sw); in nvm_get_auth_status()
64 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) in nvm_set_auth_status() argument
68 if (WARN_ON(!sw->uuid)) in nvm_set_auth_status()
72 st = __nvm_get_auth_status(sw); in nvm_set_auth_status()
79 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); in nvm_set_auth_status()
89 static void nvm_clear_auth_status(const struct tb_switch *sw) in nvm_clear_auth_status() argument
94 st = __nvm_get_auth_status(sw); in nvm_clear_auth_status()
102 static int nvm_validate_and_write(struct tb_switch *sw) in nvm_validate_and_write() argument
108 ret = tb_nvm_validate(sw->nvm); in nvm_validate_and_write()
112 ret = tb_nvm_write_headers(sw->nvm); in nvm_validate_and_write()
116 buf = sw->nvm->buf_data_start; in nvm_validate_and_write()
117 image_size = sw->nvm->buf_data_size; in nvm_validate_and_write()
119 if (tb_switch_is_usb4(sw)) in nvm_validate_and_write()
120 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); in nvm_validate_and_write()
122 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); in nvm_validate_and_write()
126 sw->nvm->flushed = true; in nvm_validate_and_write()
130 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) in nvm_authenticate_host_dma_port() argument
139 if (!sw->safe_mode) { in nvm_authenticate_host_dma_port()
142 ret = tb_domain_disconnect_all_paths(sw->tb); in nvm_authenticate_host_dma_port()
149 ret = dma_port_flash_update_auth(sw->dma_port); in nvm_authenticate_host_dma_port()
157 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); in nvm_authenticate_host_dma_port()
158 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) in nvm_authenticate_host_dma_port()
159 nvm_set_auth_status(sw, status); in nvm_authenticate_host_dma_port()
166 dma_port_power_cycle(sw->dma_port); in nvm_authenticate_host_dma_port()
170 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) in nvm_authenticate_device_dma_port() argument
174 ret = dma_port_flash_update_auth(sw->dma_port); in nvm_authenticate_device_dma_port()
195 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); in nvm_authenticate_device_dma_port()
200 tb_sw_warn(sw, "failed to authenticate NVM\n"); in nvm_authenticate_device_dma_port()
201 nvm_set_auth_status(sw, status); in nvm_authenticate_device_dma_port()
204 tb_sw_info(sw, "power cycling the switch now\n"); in nvm_authenticate_device_dma_port()
205 dma_port_power_cycle(sw->dma_port); in nvm_authenticate_device_dma_port()
215 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) in nvm_authenticate_start_dma_port() argument
225 root_port = pcie_find_root_port(sw->tb->nhi->pdev); in nvm_authenticate_start_dma_port()
230 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) in nvm_authenticate_complete_dma_port() argument
234 root_port = pcie_find_root_port(sw->tb->nhi->pdev); in nvm_authenticate_complete_dma_port()
239 static inline bool nvm_readable(struct tb_switch *sw) in nvm_readable() argument
241 if (tb_switch_is_usb4(sw)) { in nvm_readable()
248 return usb4_switch_nvm_sector_size(sw) > 0; in nvm_readable()
252 return !!sw->dma_port; in nvm_readable()
255 static inline bool nvm_upgradeable(struct tb_switch *sw) in nvm_upgradeable() argument
257 if (sw->no_nvm_upgrade) in nvm_upgradeable()
259 return nvm_readable(sw); in nvm_upgradeable()
262 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) in nvm_authenticate() argument
266 if (tb_switch_is_usb4(sw)) { in nvm_authenticate()
268 ret = usb4_switch_nvm_set_offset(sw, 0); in nvm_authenticate()
272 sw->nvm->authenticating = true; in nvm_authenticate()
273 return usb4_switch_nvm_authenticate(sw); in nvm_authenticate()
278 sw->nvm->authenticating = true; in nvm_authenticate()
279 if (!tb_route(sw)) { in nvm_authenticate()
280 nvm_authenticate_start_dma_port(sw); in nvm_authenticate()
281 ret = nvm_authenticate_host_dma_port(sw); in nvm_authenticate()
283 ret = nvm_authenticate_device_dma_port(sw); in nvm_authenticate()
300 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, in tb_switch_nvm_read() argument
303 if (tb_switch_is_usb4(sw)) in tb_switch_nvm_read()
304 return usb4_switch_nvm_read(sw, address, buf, size); in tb_switch_nvm_read()
305 return dma_port_flash_read(sw->dma_port, address, buf, size); in tb_switch_nvm_read()
311 struct tb_switch *sw = tb_to_switch(nvm->dev); in nvm_read() local
314 pm_runtime_get_sync(&sw->dev); in nvm_read()
316 if (!mutex_trylock(&sw->tb->lock)) { in nvm_read()
321 ret = tb_switch_nvm_read(sw, offset, val, bytes); in nvm_read()
322 mutex_unlock(&sw->tb->lock); in nvm_read()
325 pm_runtime_mark_last_busy(&sw->dev); in nvm_read()
326 pm_runtime_put_autosuspend(&sw->dev); in nvm_read()
334 struct tb_switch *sw = tb_to_switch(nvm->dev); in nvm_write() local
337 if (!mutex_trylock(&sw->tb->lock)) in nvm_write()
347 mutex_unlock(&sw->tb->lock); in nvm_write()
352 static int tb_switch_nvm_add(struct tb_switch *sw) in tb_switch_nvm_add() argument
357 if (!nvm_readable(sw)) in tb_switch_nvm_add()
360 nvm = tb_nvm_alloc(&sw->dev); in tb_switch_nvm_add()
375 if (!sw->safe_mode) { in tb_switch_nvm_add()
381 if (!sw->no_nvm_upgrade) { in tb_switch_nvm_add()
387 sw->nvm = nvm; in tb_switch_nvm_add()
391 tb_sw_dbg(sw, "NVM upgrade disabled\n"); in tb_switch_nvm_add()
392 sw->no_nvm_upgrade = true; in tb_switch_nvm_add()
399 static void tb_switch_nvm_remove(struct tb_switch *sw) in tb_switch_nvm_remove() argument
403 nvm = sw->nvm; in tb_switch_nvm_remove()
404 sw->nvm = NULL; in tb_switch_nvm_remove()
411 nvm_clear_auth_status(sw); in tb_switch_nvm_remove()
574 if (credits == 0 || port->sw->is_unplugged) in tb_port_add_nfc_credits()
581 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) in tb_port_add_nfc_credits()
623 if (tb_switch_is_icm(port->sw)) in tb_port_unlock()
627 if (tb_switch_is_usb4(port->sw)) in tb_port_unlock()
704 tb_dbg(port->sw->tb, " Port %d: not implemented\n", in tb_init_port()
730 if (tb_switch_is_usb4(port->sw)) { in tb_init_port()
749 tb_dump_port(port->sw->tb, port); in tb_init_port()
829 const struct tb_switch *sw) in tb_switch_is_reachable() argument
832 return (tb_route(parent) & mask) == (tb_route(sw) & mask); in tb_switch_is_reachable()
858 if (prev->sw == end->sw) { in tb_next_port_on_path()
864 if (tb_switch_is_reachable(prev->sw, end->sw)) { in tb_next_port_on_path()
865 next = tb_port_at(tb_route(end->sw), prev->sw); in tb_next_port_on_path()
874 next = tb_upstream_port(prev->sw); in tb_next_port_on_path()
1229 if (tb_switch_is_usb4(port->sw)) { in tb_port_clx_supported()
1319 if (tb_switch_is_usb4(port->sw)) in tb_port_start_lane_initialization()
1494 if (tb_switch_is_usb4(port->sw)) in tb_dp_port_set_hops()
1560 static const char *tb_switch_generation_name(const struct tb_switch *sw) in tb_switch_generation_name() argument
1562 switch (sw->generation) { in tb_switch_generation_name()
1576 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) in tb_dump_switch() argument
1578 const struct tb_regs_switch_header *regs = &sw->config; in tb_dump_switch()
1581 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, in tb_dump_switch()
1600 int tb_switch_reset(struct tb_switch *sw) in tb_switch_reset() argument
1604 if (sw->generation > 1) in tb_switch_reset()
1607 tb_sw_dbg(sw, "resetting switch\n"); in tb_switch_reset()
1609 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, in tb_switch_reset()
1613 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); in tb_switch_reset()
1631 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, in tb_switch_wait_for_bit() argument
1640 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); in tb_switch_wait_for_bit()
1660 static int tb_plug_events_active(struct tb_switch *sw, bool active) in tb_plug_events_active() argument
1665 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) in tb_plug_events_active()
1668 sw->config.plug_events_delay = 0xff; in tb_plug_events_active()
1669 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); in tb_plug_events_active()
1673 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); in tb_plug_events_active()
1679 switch (sw->config.device_id) { in tb_plug_events_active()
1690 if (!tb_switch_is_alpine_ridge(sw)) in tb_plug_events_active()
1696 return tb_sw_write(sw, &data, TB_CFG_SWITCH, in tb_plug_events_active()
1697 sw->cap_plug_events + 1, 1); in tb_plug_events_active()
1704 struct tb_switch *sw = tb_to_switch(dev); in authorized_show() local
1706 return sysfs_emit(buf, "%u\n", sw->authorized); in authorized_show()
1712 struct tb_switch *sw; in disapprove_switch() local
1714 sw = tb_to_switch(dev); in disapprove_switch()
1715 if (sw && sw->authorized) { in disapprove_switch()
1719 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); in disapprove_switch()
1723 ret = tb_domain_disapprove_switch(sw->tb, sw); in disapprove_switch()
1727 sw->authorized = 0; in disapprove_switch()
1728 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); in disapprove_switch()
1734 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) in tb_switch_set_authorized() argument
1740 if (!mutex_trylock(&sw->tb->lock)) in tb_switch_set_authorized()
1743 if (!!sw->authorized == !!val) in tb_switch_set_authorized()
1749 if (tb_route(sw)) { in tb_switch_set_authorized()
1750 ret = disapprove_switch(&sw->dev, NULL); in tb_switch_set_authorized()
1757 if (sw->key) in tb_switch_set_authorized()
1758 ret = tb_domain_approve_switch_key(sw->tb, sw); in tb_switch_set_authorized()
1760 ret = tb_domain_approve_switch(sw->tb, sw); in tb_switch_set_authorized()
1765 if (sw->key) in tb_switch_set_authorized()
1766 ret = tb_domain_challenge_switch_key(sw->tb, sw); in tb_switch_set_authorized()
1774 sw->authorized = val; in tb_switch_set_authorized()
1779 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); in tb_switch_set_authorized()
1780 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); in tb_switch_set_authorized()
1784 mutex_unlock(&sw->tb->lock); in tb_switch_set_authorized()
1792 struct tb_switch *sw = tb_to_switch(dev); in authorized_store() local
1802 pm_runtime_get_sync(&sw->dev); in authorized_store()
1803 ret = tb_switch_set_authorized(sw, val); in authorized_store()
1804 pm_runtime_mark_last_busy(&sw->dev); in authorized_store()
1805 pm_runtime_put_autosuspend(&sw->dev); in authorized_store()
1814 struct tb_switch *sw = tb_to_switch(dev); in boot_show() local
1816 return sysfs_emit(buf, "%u\n", sw->boot); in boot_show()
1823 struct tb_switch *sw = tb_to_switch(dev); in device_show() local
1825 return sysfs_emit(buf, "%#x\n", sw->device); in device_show()
1832 struct tb_switch *sw = tb_to_switch(dev); in device_name_show() local
1834 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); in device_name_show()
1841 struct tb_switch *sw = tb_to_switch(dev); in generation_show() local
1843 return sysfs_emit(buf, "%u\n", sw->generation); in generation_show()
1850 struct tb_switch *sw = tb_to_switch(dev); in key_show() local
1853 if (!mutex_trylock(&sw->tb->lock)) in key_show()
1856 if (sw->key) in key_show()
1857 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); in key_show()
1861 mutex_unlock(&sw->tb->lock); in key_show()
1868 struct tb_switch *sw = tb_to_switch(dev); in key_store() local
1878 if (!mutex_trylock(&sw->tb->lock)) in key_store()
1881 if (sw->authorized) { in key_store()
1884 kfree(sw->key); in key_store()
1886 sw->key = NULL; in key_store()
1888 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); in key_store()
1889 if (!sw->key) in key_store()
1894 mutex_unlock(&sw->tb->lock); in key_store()
1902 struct tb_switch *sw = tb_to_switch(dev); in speed_show() local
1904 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); in speed_show()
1917 struct tb_switch *sw = tb_to_switch(dev); in lanes_show() local
1919 return sysfs_emit(buf, "%u\n", sw->link_width); in lanes_show()
1932 struct tb_switch *sw = tb_to_switch(dev); in nvm_authenticate_show() local
1935 nvm_get_auth_status(sw, &status); in nvm_authenticate_show()
1942 struct tb_switch *sw = tb_to_switch(dev); in nvm_authenticate_sysfs() local
1945 pm_runtime_get_sync(&sw->dev); in nvm_authenticate_sysfs()
1947 if (!mutex_trylock(&sw->tb->lock)) { in nvm_authenticate_sysfs()
1952 if (sw->no_nvm_upgrade) { in nvm_authenticate_sysfs()
1958 if (!sw->nvm) { in nvm_authenticate_sysfs()
1968 nvm_clear_auth_status(sw); in nvm_authenticate_sysfs()
1975 ret = nvm_authenticate(sw, true); in nvm_authenticate_sysfs()
1977 if (!sw->nvm->flushed) { in nvm_authenticate_sysfs()
1978 if (!sw->nvm->buf) { in nvm_authenticate_sysfs()
1983 ret = nvm_validate_and_write(sw); in nvm_authenticate_sysfs()
1989 ret = tb_lc_force_power(sw); in nvm_authenticate_sysfs()
1991 ret = nvm_authenticate(sw, false); in nvm_authenticate_sysfs()
1997 mutex_unlock(&sw->tb->lock); in nvm_authenticate_sysfs()
1999 pm_runtime_mark_last_busy(&sw->dev); in nvm_authenticate_sysfs()
2000 pm_runtime_put_autosuspend(&sw->dev); in nvm_authenticate_sysfs()
2034 struct tb_switch *sw = tb_to_switch(dev); in nvm_version_show() local
2037 if (!mutex_trylock(&sw->tb->lock)) in nvm_version_show()
2040 if (sw->safe_mode) in nvm_version_show()
2042 else if (!sw->nvm) in nvm_version_show()
2045 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); in nvm_version_show()
2047 mutex_unlock(&sw->tb->lock); in nvm_version_show()
2056 struct tb_switch *sw = tb_to_switch(dev); in vendor_show() local
2058 return sysfs_emit(buf, "%#x\n", sw->vendor); in vendor_show()
2065 struct tb_switch *sw = tb_to_switch(dev); in vendor_name_show() local
2067 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); in vendor_name_show()
2074 struct tb_switch *sw = tb_to_switch(dev); in unique_id_show() local
2076 return sysfs_emit(buf, "%pUb\n", sw->uuid); in unique_id_show()
2104 struct tb_switch *sw = tb_to_switch(dev); in switch_attr_is_visible() local
2107 if (sw->tb->security_level == TB_SECURITY_NOPCIE || in switch_attr_is_visible()
2108 sw->tb->security_level == TB_SECURITY_DPONLY) in switch_attr_is_visible()
2111 if (!sw->device) in switch_attr_is_visible()
2114 if (!sw->device_name) in switch_attr_is_visible()
2117 if (!sw->vendor) in switch_attr_is_visible()
2120 if (!sw->vendor_name) in switch_attr_is_visible()
2123 if (tb_route(sw) && in switch_attr_is_visible()
2124 sw->tb->security_level == TB_SECURITY_SECURE && in switch_attr_is_visible()
2125 sw->security_level == TB_SECURITY_SECURE) in switch_attr_is_visible()
2132 if (tb_route(sw)) in switch_attr_is_visible()
2136 if (nvm_upgradeable(sw)) in switch_attr_is_visible()
2140 if (nvm_readable(sw)) in switch_attr_is_visible()
2144 if (tb_route(sw)) in switch_attr_is_visible()
2148 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) in switch_attr_is_visible()
2153 return sw->safe_mode ? 0 : attr->mode; in switch_attr_is_visible()
2168 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_release() local
2171 dma_port_free(sw->dma_port); in tb_switch_release()
2173 tb_switch_for_each_port(sw, port) { in tb_switch_release()
2178 kfree(sw->uuid); in tb_switch_release()
2179 kfree(sw->device_name); in tb_switch_release()
2180 kfree(sw->vendor_name); in tb_switch_release()
2181 kfree(sw->ports); in tb_switch_release()
2182 kfree(sw->drom); in tb_switch_release()
2183 kfree(sw->key); in tb_switch_release()
2184 kfree(sw); in tb_switch_release()
2189 const struct tb_switch *sw = tb_to_switch(dev); in tb_switch_uevent() local
2192 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { in tb_switch_uevent()
2197 if (!tb_route(sw)) { in tb_switch_uevent()
2204 tb_switch_for_each_port(sw, port) { in tb_switch_uevent()
2226 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_runtime_suspend() local
2227 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; in tb_switch_runtime_suspend()
2230 return cm_ops->runtime_suspend_switch(sw); in tb_switch_runtime_suspend()
2237 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_runtime_resume() local
2238 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; in tb_switch_runtime_resume()
2241 return cm_ops->runtime_resume_switch(sw); in tb_switch_runtime_resume()
2257 static int tb_switch_get_generation(struct tb_switch *sw) in tb_switch_get_generation() argument
2259 switch (sw->config.device_id) { in tb_switch_get_generation()
2288 if (tb_switch_is_usb4(sw)) in tb_switch_get_generation()
2295 tb_sw_warn(sw, "unsupported switch device id %#x\n", in tb_switch_get_generation()
2296 sw->config.device_id); in tb_switch_get_generation()
2301 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) in tb_switch_exceeds_max_depth() argument
2305 if (tb_switch_is_usb4(sw) || in tb_switch_exceeds_max_depth()
2306 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) in tb_switch_exceeds_max_depth()
2331 struct tb_switch *sw; in tb_switch_alloc() local
2350 sw = kzalloc(sizeof(*sw), GFP_KERNEL); in tb_switch_alloc()
2351 if (!sw) in tb_switch_alloc()
2354 sw->tb = tb; in tb_switch_alloc()
2355 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); in tb_switch_alloc()
2359 sw->generation = tb_switch_get_generation(sw); in tb_switch_alloc()
2362 tb_dump_switch(tb, sw); in tb_switch_alloc()
2365 sw->config.upstream_port_number = upstream_port; in tb_switch_alloc()
2366 sw->config.depth = depth; in tb_switch_alloc()
2367 sw->config.route_hi = upper_32_bits(route); in tb_switch_alloc()
2368 sw->config.route_lo = lower_32_bits(route); in tb_switch_alloc()
2369 sw->config.enabled = 0; in tb_switch_alloc()
2372 if (tb_switch_exceeds_max_depth(sw, depth)) { in tb_switch_alloc()
2378 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), in tb_switch_alloc()
2380 if (!sw->ports) { in tb_switch_alloc()
2385 for (i = 0; i <= sw->config.max_port_number; i++) { in tb_switch_alloc()
2387 sw->ports[i].sw = sw; in tb_switch_alloc()
2388 sw->ports[i].port = i; in tb_switch_alloc()
2392 ida_init(&sw->ports[i].in_hopids); in tb_switch_alloc()
2393 ida_init(&sw->ports[i].out_hopids); in tb_switch_alloc()
2397 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); in tb_switch_alloc()
2399 sw->cap_plug_events = ret; in tb_switch_alloc()
2401 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); in tb_switch_alloc()
2403 sw->cap_vsec_tmu = ret; in tb_switch_alloc()
2405 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); in tb_switch_alloc()
2407 sw->cap_lc = ret; in tb_switch_alloc()
2409 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); in tb_switch_alloc()
2411 sw->cap_lp = ret; in tb_switch_alloc()
2415 sw->authorized = true; in tb_switch_alloc()
2417 device_initialize(&sw->dev); in tb_switch_alloc()
2418 sw->dev.parent = parent; in tb_switch_alloc()
2419 sw->dev.bus = &tb_bus_type; in tb_switch_alloc()
2420 sw->dev.type = &tb_switch_type; in tb_switch_alloc()
2421 sw->dev.groups = switch_groups; in tb_switch_alloc()
2422 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); in tb_switch_alloc()
2424 return sw; in tb_switch_alloc()
2427 kfree(sw->ports); in tb_switch_alloc()
2428 kfree(sw); in tb_switch_alloc()
2450 struct tb_switch *sw; in tb_switch_alloc_safe_mode() local
2452 sw = kzalloc(sizeof(*sw), GFP_KERNEL); in tb_switch_alloc_safe_mode()
2453 if (!sw) in tb_switch_alloc_safe_mode()
2456 sw->tb = tb; in tb_switch_alloc_safe_mode()
2457 sw->config.depth = tb_route_length(route); in tb_switch_alloc_safe_mode()
2458 sw->config.route_hi = upper_32_bits(route); in tb_switch_alloc_safe_mode()
2459 sw->config.route_lo = lower_32_bits(route); in tb_switch_alloc_safe_mode()
2460 sw->safe_mode = true; in tb_switch_alloc_safe_mode()
2462 device_initialize(&sw->dev); in tb_switch_alloc_safe_mode()
2463 sw->dev.parent = parent; in tb_switch_alloc_safe_mode()
2464 sw->dev.bus = &tb_bus_type; in tb_switch_alloc_safe_mode()
2465 sw->dev.type = &tb_switch_type; in tb_switch_alloc_safe_mode()
2466 sw->dev.groups = switch_groups; in tb_switch_alloc_safe_mode()
2467 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); in tb_switch_alloc_safe_mode()
2469 return sw; in tb_switch_alloc_safe_mode()
2483 int tb_switch_configure(struct tb_switch *sw) in tb_switch_configure() argument
2485 struct tb *tb = sw->tb; in tb_switch_configure()
2489 route = tb_route(sw); in tb_switch_configure()
2492 sw->config.enabled ? "restoring" : "initializing", route, in tb_switch_configure()
2493 tb_route_length(route), sw->config.upstream_port_number); in tb_switch_configure()
2495 sw->config.enabled = 1; in tb_switch_configure()
2497 if (tb_switch_is_usb4(sw)) { in tb_switch_configure()
2503 sw->config.cmuv = USB4_VERSION_1_0; in tb_switch_configure()
2504 sw->config.plug_events_delay = 0xa; in tb_switch_configure()
2507 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, in tb_switch_configure()
2512 ret = usb4_switch_setup(sw); in tb_switch_configure()
2514 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) in tb_switch_configure()
2515 tb_sw_warn(sw, "unknown switch vendor id %#x\n", in tb_switch_configure()
2516 sw->config.vendor_id); in tb_switch_configure()
2518 if (!sw->cap_plug_events) { in tb_switch_configure()
2519 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); in tb_switch_configure()
2524 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, in tb_switch_configure()
2530 return tb_plug_events_active(sw, true); in tb_switch_configure()
2533 static int tb_switch_set_uuid(struct tb_switch *sw) in tb_switch_set_uuid() argument
2539 if (sw->uuid) in tb_switch_set_uuid()
2542 if (tb_switch_is_usb4(sw)) { in tb_switch_set_uuid()
2543 ret = usb4_switch_read_uid(sw, &sw->uid); in tb_switch_set_uuid()
2552 ret = tb_lc_read_uuid(sw, uuid); in tb_switch_set_uuid()
2567 uuid[0] = sw->uid & 0xffffffff; in tb_switch_set_uuid()
2568 uuid[1] = (sw->uid >> 32) & 0xffffffff; in tb_switch_set_uuid()
2573 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); in tb_switch_set_uuid()
2574 if (!sw->uuid) in tb_switch_set_uuid()
2579 static int tb_switch_add_dma_port(struct tb_switch *sw) in tb_switch_add_dma_port() argument
2584 switch (sw->generation) { in tb_switch_add_dma_port()
2587 if (tb_route(sw)) in tb_switch_add_dma_port()
2593 ret = tb_switch_set_uuid(sw); in tb_switch_add_dma_port()
2603 if (!sw->safe_mode) in tb_switch_add_dma_port()
2608 if (sw->no_nvm_upgrade) in tb_switch_add_dma_port()
2611 if (tb_switch_is_usb4(sw)) { in tb_switch_add_dma_port()
2612 ret = usb4_switch_nvm_authenticate_status(sw, &status); in tb_switch_add_dma_port()
2617 tb_sw_info(sw, "switch flash authentication failed\n"); in tb_switch_add_dma_port()
2618 nvm_set_auth_status(sw, status); in tb_switch_add_dma_port()
2625 if (!tb_route(sw) && !tb_switch_is_icm(sw)) in tb_switch_add_dma_port()
2628 sw->dma_port = dma_port_alloc(sw); in tb_switch_add_dma_port()
2629 if (!sw->dma_port) in tb_switch_add_dma_port()
2638 nvm_get_auth_status(sw, &status); in tb_switch_add_dma_port()
2640 if (!tb_route(sw)) in tb_switch_add_dma_port()
2641 nvm_authenticate_complete_dma_port(sw); in tb_switch_add_dma_port()
2650 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); in tb_switch_add_dma_port()
2655 if (!tb_route(sw)) in tb_switch_add_dma_port()
2656 nvm_authenticate_complete_dma_port(sw); in tb_switch_add_dma_port()
2659 tb_sw_info(sw, "switch flash authentication failed\n"); in tb_switch_add_dma_port()
2660 nvm_set_auth_status(sw, status); in tb_switch_add_dma_port()
2663 tb_sw_info(sw, "power cycling the switch now\n"); in tb_switch_add_dma_port()
2664 dma_port_power_cycle(sw->dma_port); in tb_switch_add_dma_port()
2673 static void tb_switch_default_link_ports(struct tb_switch *sw) in tb_switch_default_link_ports() argument
2677 for (i = 1; i <= sw->config.max_port_number; i++) { in tb_switch_default_link_ports()
2678 struct tb_port *port = &sw->ports[i]; in tb_switch_default_link_ports()
2685 if (i == sw->config.max_port_number || in tb_switch_default_link_ports()
2686 !tb_port_is_null(&sw->ports[i + 1])) in tb_switch_default_link_ports()
2690 subordinate = &sw->ports[i + 1]; in tb_switch_default_link_ports()
2697 tb_sw_dbg(sw, "linked ports %d <-> %d\n", in tb_switch_default_link_ports()
2703 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) in tb_switch_lane_bonding_possible() argument
2705 const struct tb_port *up = tb_upstream_port(sw); in tb_switch_lane_bonding_possible()
2710 if (tb_switch_is_usb4(sw)) in tb_switch_lane_bonding_possible()
2711 return usb4_switch_lane_bonding_possible(sw); in tb_switch_lane_bonding_possible()
2712 return tb_lc_lane_bonding_possible(sw); in tb_switch_lane_bonding_possible()
2715 static int tb_switch_update_link_attributes(struct tb_switch *sw) in tb_switch_update_link_attributes() argument
2721 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_update_link_attributes()
2724 up = tb_upstream_port(sw); in tb_switch_update_link_attributes()
2729 if (sw->link_speed != ret) in tb_switch_update_link_attributes()
2731 sw->link_speed = ret; in tb_switch_update_link_attributes()
2736 if (sw->link_width != ret) in tb_switch_update_link_attributes()
2738 sw->link_width = ret; in tb_switch_update_link_attributes()
2741 if (device_is_registered(&sw->dev) && change) in tb_switch_update_link_attributes()
2742 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); in tb_switch_update_link_attributes()
2755 int tb_switch_lane_bonding_enable(struct tb_switch *sw) in tb_switch_lane_bonding_enable() argument
2757 struct tb_switch *parent = tb_to_switch(sw->dev.parent); in tb_switch_lane_bonding_enable()
2759 u64 route = tb_route(sw); in tb_switch_lane_bonding_enable()
2765 if (!tb_switch_lane_bonding_possible(sw)) in tb_switch_lane_bonding_enable()
2768 up = tb_upstream_port(sw); in tb_switch_lane_bonding_enable()
2796 tb_switch_update_link_attributes(sw); in tb_switch_lane_bonding_enable()
2798 tb_sw_dbg(sw, "lane bonding enabled\n"); in tb_switch_lane_bonding_enable()
2809 void tb_switch_lane_bonding_disable(struct tb_switch *sw) in tb_switch_lane_bonding_disable() argument
2811 struct tb_switch *parent = tb_to_switch(sw->dev.parent); in tb_switch_lane_bonding_disable()
2814 if (!tb_route(sw)) in tb_switch_lane_bonding_disable()
2817 up = tb_upstream_port(sw); in tb_switch_lane_bonding_disable()
2821 down = tb_port_at(tb_route(sw), parent); in tb_switch_lane_bonding_disable()
2831 tb_sw_warn(sw, "timeout disabling lane bonding\n"); in tb_switch_lane_bonding_disable()
2835 tb_switch_update_link_attributes(sw); in tb_switch_lane_bonding_disable()
2837 tb_sw_dbg(sw, "lane bonding disabled\n"); in tb_switch_lane_bonding_disable()
2852 int tb_switch_configure_link(struct tb_switch *sw) in tb_switch_configure_link() argument
2857 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_configure_link()
2860 up = tb_upstream_port(sw); in tb_switch_configure_link()
2861 if (tb_switch_is_usb4(up->sw)) in tb_switch_configure_link()
2869 if (tb_switch_is_usb4(down->sw)) in tb_switch_configure_link()
2881 void tb_switch_unconfigure_link(struct tb_switch *sw) in tb_switch_unconfigure_link() argument
2885 if (sw->is_unplugged) in tb_switch_unconfigure_link()
2887 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_unconfigure_link()
2890 up = tb_upstream_port(sw); in tb_switch_unconfigure_link()
2891 if (tb_switch_is_usb4(up->sw)) in tb_switch_unconfigure_link()
2897 if (tb_switch_is_usb4(down->sw)) in tb_switch_unconfigure_link()
2903 static void tb_switch_credits_init(struct tb_switch *sw) in tb_switch_credits_init() argument
2905 if (tb_switch_is_icm(sw)) in tb_switch_credits_init()
2907 if (!tb_switch_is_usb4(sw)) in tb_switch_credits_init()
2909 if (usb4_switch_credits_init(sw)) in tb_switch_credits_init()
2910 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); in tb_switch_credits_init()
2913 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) in tb_switch_port_hotplug_enable() argument
2917 if (tb_switch_is_icm(sw)) in tb_switch_port_hotplug_enable()
2920 tb_switch_for_each_port(sw, port) { in tb_switch_port_hotplug_enable()
2945 int tb_switch_add(struct tb_switch *sw) in tb_switch_add() argument
2956 ret = tb_switch_add_dma_port(sw); in tb_switch_add()
2958 dev_err(&sw->dev, "failed to add DMA port\n"); in tb_switch_add()
2962 if (!sw->safe_mode) { in tb_switch_add()
2963 tb_switch_credits_init(sw); in tb_switch_add()
2966 ret = tb_drom_read(sw); in tb_switch_add()
2968 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); in tb_switch_add()
2969 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); in tb_switch_add()
2971 tb_check_quirks(sw); in tb_switch_add()
2973 ret = tb_switch_set_uuid(sw); in tb_switch_add()
2975 dev_err(&sw->dev, "failed to set UUID\n"); in tb_switch_add()
2979 for (i = 0; i <= sw->config.max_port_number; i++) { in tb_switch_add()
2980 if (sw->ports[i].disabled) { in tb_switch_add()
2981 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); in tb_switch_add()
2984 ret = tb_init_port(&sw->ports[i]); in tb_switch_add()
2986 dev_err(&sw->dev, "failed to initialize port %d\n", i); in tb_switch_add()
2991 tb_switch_default_link_ports(sw); in tb_switch_add()
2993 ret = tb_switch_update_link_attributes(sw); in tb_switch_add()
2997 ret = tb_switch_tmu_init(sw); in tb_switch_add()
3002 ret = tb_switch_port_hotplug_enable(sw); in tb_switch_add()
3006 ret = device_add(&sw->dev); in tb_switch_add()
3008 dev_err(&sw->dev, "failed to add device: %d\n", ret); in tb_switch_add()
3012 if (tb_route(sw)) { in tb_switch_add()
3013 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", in tb_switch_add()
3014 sw->vendor, sw->device); in tb_switch_add()
3015 if (sw->vendor_name && sw->device_name) in tb_switch_add()
3016 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, in tb_switch_add()
3017 sw->device_name); in tb_switch_add()
3020 ret = usb4_switch_add_ports(sw); in tb_switch_add()
3022 dev_err(&sw->dev, "failed to add USB4 ports\n"); in tb_switch_add()
3026 ret = tb_switch_nvm_add(sw); in tb_switch_add()
3028 dev_err(&sw->dev, "failed to add NVM devices\n"); in tb_switch_add()
3037 device_init_wakeup(&sw->dev, true); in tb_switch_add()
3039 pm_runtime_set_active(&sw->dev); in tb_switch_add()
3040 if (sw->rpm) { in tb_switch_add()
3041 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); in tb_switch_add()
3042 pm_runtime_use_autosuspend(&sw->dev); in tb_switch_add()
3043 pm_runtime_mark_last_busy(&sw->dev); in tb_switch_add()
3044 pm_runtime_enable(&sw->dev); in tb_switch_add()
3045 pm_request_autosuspend(&sw->dev); in tb_switch_add()
3048 tb_switch_debugfs_init(sw); in tb_switch_add()
3052 usb4_switch_remove_ports(sw); in tb_switch_add()
3054 device_del(&sw->dev); in tb_switch_add()
3067 void tb_switch_remove(struct tb_switch *sw) in tb_switch_remove() argument
3071 tb_switch_debugfs_remove(sw); in tb_switch_remove()
3073 if (sw->rpm) { in tb_switch_remove()
3074 pm_runtime_get_sync(&sw->dev); in tb_switch_remove()
3075 pm_runtime_disable(&sw->dev); in tb_switch_remove()
3079 tb_switch_for_each_port(sw, port) { in tb_switch_remove()
3081 tb_switch_remove(port->remote->sw); in tb_switch_remove()
3092 if (!sw->is_unplugged) in tb_switch_remove()
3093 tb_plug_events_active(sw, false); in tb_switch_remove()
3095 tb_switch_nvm_remove(sw); in tb_switch_remove()
3096 usb4_switch_remove_ports(sw); in tb_switch_remove()
3098 if (tb_route(sw)) in tb_switch_remove()
3099 dev_info(&sw->dev, "device disconnected\n"); in tb_switch_remove()
3100 device_unregister(&sw->dev); in tb_switch_remove()
3107 void tb_sw_set_unplugged(struct tb_switch *sw) in tb_sw_set_unplugged() argument
3111 if (sw == sw->tb->root_switch) { in tb_sw_set_unplugged()
3112 tb_sw_WARN(sw, "cannot unplug root switch\n"); in tb_sw_set_unplugged()
3115 if (sw->is_unplugged) { in tb_sw_set_unplugged()
3116 tb_sw_WARN(sw, "is_unplugged already set\n"); in tb_sw_set_unplugged()
3119 sw->is_unplugged = true; in tb_sw_set_unplugged()
3120 tb_switch_for_each_port(sw, port) { in tb_sw_set_unplugged()
3122 tb_sw_set_unplugged(port->remote->sw); in tb_sw_set_unplugged()
3128 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) in tb_switch_set_wake() argument
3131 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); in tb_switch_set_wake()
3133 tb_sw_dbg(sw, "disabling wakeup\n"); in tb_switch_set_wake()
3135 if (tb_switch_is_usb4(sw)) in tb_switch_set_wake()
3136 return usb4_switch_set_wake(sw, flags); in tb_switch_set_wake()
3137 return tb_lc_set_wake(sw, flags); in tb_switch_set_wake()
3140 int tb_switch_resume(struct tb_switch *sw) in tb_switch_resume() argument
3145 tb_sw_dbg(sw, "resuming switch\n"); in tb_switch_resume()
3151 if (tb_route(sw)) { in tb_switch_resume()
3159 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); in tb_switch_resume()
3161 tb_sw_info(sw, "switch not present anymore\n"); in tb_switch_resume()
3166 if (!sw->uid) in tb_switch_resume()
3169 if (tb_switch_is_usb4(sw)) in tb_switch_resume()
3170 err = usb4_switch_read_uid(sw, &uid); in tb_switch_resume()
3172 err = tb_drom_read_uid_only(sw, &uid); in tb_switch_resume()
3174 tb_sw_warn(sw, "uid read failed\n"); in tb_switch_resume()
3177 if (sw->uid != uid) { in tb_switch_resume()
3178 tb_sw_info(sw, in tb_switch_resume()
3180 sw->uid, uid); in tb_switch_resume()
3185 err = tb_switch_configure(sw); in tb_switch_resume()
3190 tb_switch_set_wake(sw, 0); in tb_switch_resume()
3192 err = tb_switch_tmu_init(sw); in tb_switch_resume()
3197 tb_switch_for_each_port(sw, port) { in tb_switch_resume()
3208 tb_sw_set_unplugged(port->remote->sw); in tb_switch_resume()
3218 if (port->remote && tb_switch_resume(port->remote->sw)) { in tb_switch_resume()
3221 tb_sw_set_unplugged(port->remote->sw); in tb_switch_resume()
3238 void tb_switch_suspend(struct tb_switch *sw, bool runtime) in tb_switch_suspend() argument
3244 tb_sw_dbg(sw, "suspending switch\n"); in tb_switch_suspend()
3251 if (tb_switch_is_clx_enabled(sw, TB_CL1)) { in tb_switch_suspend()
3252 if (tb_switch_disable_clx(sw, TB_CL1)) in tb_switch_suspend()
3253 tb_sw_warn(sw, "failed to disable %s on upstream port\n", in tb_switch_suspend()
3257 err = tb_plug_events_active(sw, false); in tb_switch_suspend()
3261 tb_switch_for_each_port(sw, port) { in tb_switch_suspend()
3263 tb_switch_suspend(port->remote->sw, runtime); in tb_switch_suspend()
3271 } else if (device_may_wakeup(&sw->dev)) { in tb_switch_suspend()
3275 tb_switch_set_wake(sw, flags); in tb_switch_suspend()
3277 if (tb_switch_is_usb4(sw)) in tb_switch_suspend()
3278 usb4_switch_set_sleep(sw); in tb_switch_suspend()
3280 tb_lc_set_sleep(sw); in tb_switch_suspend()
3291 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) in tb_switch_query_dp_resource() argument
3293 if (tb_switch_is_usb4(sw)) in tb_switch_query_dp_resource()
3294 return usb4_switch_query_dp_resource(sw, in); in tb_switch_query_dp_resource()
3295 return tb_lc_dp_sink_query(sw, in); in tb_switch_query_dp_resource()
3307 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) in tb_switch_alloc_dp_resource() argument
3311 if (tb_switch_is_usb4(sw)) in tb_switch_alloc_dp_resource()
3312 ret = usb4_switch_alloc_dp_resource(sw, in); in tb_switch_alloc_dp_resource()
3314 ret = tb_lc_dp_sink_alloc(sw, in); in tb_switch_alloc_dp_resource()
3317 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", in tb_switch_alloc_dp_resource()
3320 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); in tb_switch_alloc_dp_resource()
3333 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) in tb_switch_dealloc_dp_resource() argument
3337 if (tb_switch_is_usb4(sw)) in tb_switch_dealloc_dp_resource()
3338 ret = usb4_switch_dealloc_dp_resource(sw, in); in tb_switch_dealloc_dp_resource()
3340 ret = tb_lc_dp_sink_dealloc(sw, in); in tb_switch_dealloc_dp_resource()
3343 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", in tb_switch_dealloc_dp_resource()
3346 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); in tb_switch_dealloc_dp_resource()
3359 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_match() local
3362 if (!sw) in tb_switch_match()
3364 if (sw->tb != lookup->tb) in tb_switch_match()
3368 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); in tb_switch_match()
3371 return sw->config.route_lo == lower_32_bits(lookup->route) && in tb_switch_match()
3372 sw->config.route_hi == upper_32_bits(lookup->route); in tb_switch_match()
3377 return !sw->depth; in tb_switch_match()
3379 return sw->link == lookup->link && sw->depth == lookup->depth; in tb_switch_match()
3464 struct tb_port *tb_switch_find_port(struct tb_switch *sw, in tb_switch_find_port() argument
3469 tb_switch_for_each_port(sw, port) { in tb_switch_find_port()
3477 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) in tb_switch_pm_secondary_resolve() argument
3479 struct tb_switch *parent = tb_switch_parent(sw); in tb_switch_pm_secondary_resolve()
3483 if (!tb_route(sw)) in tb_switch_pm_secondary_resolve()
3486 up = tb_upstream_port(sw); in tb_switch_pm_secondary_resolve()
3487 down = tb_port_at(tb_route(sw), parent); in tb_switch_pm_secondary_resolve()
3495 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) in __tb_switch_enable_clx() argument
3497 struct tb_switch *parent = tb_switch_parent(sw); in __tb_switch_enable_clx()
3502 if (!tb_switch_is_clx_supported(sw)) in __tb_switch_enable_clx()
3509 if (!tb_route(sw)) in __tb_switch_enable_clx()
3516 ret = tb_switch_pm_secondary_resolve(sw); in __tb_switch_enable_clx()
3520 up = tb_upstream_port(sw); in __tb_switch_enable_clx()
3521 down = tb_port_at(tb_route(sw), parent); in __tb_switch_enable_clx()
3544 ret = tb_switch_mask_clx_objections(sw); in __tb_switch_enable_clx()
3551 sw->clx = clx; in __tb_switch_enable_clx()
3571 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) in tb_switch_enable_clx() argument
3573 struct tb_switch *root_sw = sw->tb->root_switch; in tb_switch_enable_clx()
3588 return __tb_switch_enable_clx(sw, clx); in tb_switch_enable_clx()
3595 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) in __tb_switch_disable_clx() argument
3597 struct tb_switch *parent = tb_switch_parent(sw); in __tb_switch_disable_clx()
3601 if (!tb_switch_is_clx_supported(sw)) in __tb_switch_disable_clx()
3608 if (!tb_route(sw)) in __tb_switch_disable_clx()
3615 up = tb_upstream_port(sw); in __tb_switch_disable_clx()
3616 down = tb_port_at(tb_route(sw), parent); in __tb_switch_disable_clx()
3625 sw->clx = TB_CLX_DISABLE; in __tb_switch_disable_clx()
3638 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) in tb_switch_disable_clx() argument
3646 return __tb_switch_disable_clx(sw, clx); in tb_switch_disable_clx()
3661 int tb_switch_mask_clx_objections(struct tb_switch *sw) in tb_switch_mask_clx_objections() argument
3663 int up_port = sw->config.upstream_port_number; in tb_switch_mask_clx_objections()
3668 if (!tb_switch_is_titan_ridge(sw)) in tb_switch_mask_clx_objections()
3671 if (!tb_route(sw)) in tb_switch_mask_clx_objections()
3691 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, in tb_switch_mask_clx_objections()
3692 sw->cap_lp + offset, ARRAY_SIZE(val)); in tb_switch_mask_clx_objections()
3701 return tb_sw_write(sw, &val, TB_CFG_SWITCH, in tb_switch_mask_clx_objections()
3702 sw->cap_lp + offset, ARRAY_SIZE(val)); in tb_switch_mask_clx_objections()
3709 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, in tb_switch_pcie_bridge_write() argument
3715 if (sw->generation != 3) in tb_switch_pcie_bridge_write()
3718 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; in tb_switch_pcie_bridge_write()
3719 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); in tb_switch_pcie_bridge_write()
3730 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; in tb_switch_pcie_bridge_write()
3732 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); in tb_switch_pcie_bridge_write()
3736 ret = tb_switch_wait_for_bit(sw, offset, in tb_switch_pcie_bridge_write()
3741 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); in tb_switch_pcie_bridge_write()
3760 int tb_switch_pcie_l1_enable(struct tb_switch *sw) in tb_switch_pcie_l1_enable() argument
3762 struct tb_switch *parent = tb_switch_parent(sw); in tb_switch_pcie_l1_enable()
3765 if (!tb_route(sw)) in tb_switch_pcie_l1_enable()
3768 if (!tb_switch_is_titan_ridge(sw)) in tb_switch_pcie_l1_enable()
3776 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); in tb_switch_pcie_l1_enable()
3781 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); in tb_switch_pcie_l1_enable()
3794 int tb_switch_xhci_connect(struct tb_switch *sw) in tb_switch_xhci_connect() argument
3799 if (sw->generation != 3) in tb_switch_xhci_connect()
3802 port1 = &sw->ports[1]; in tb_switch_xhci_connect()
3803 port3 = &sw->ports[3]; in tb_switch_xhci_connect()
3805 if (tb_switch_is_alpine_ridge(sw)) { in tb_switch_xhci_connect()
3821 } else if (tb_switch_is_titan_ridge(sw)) { in tb_switch_xhci_connect()
3838 void tb_switch_xhci_disconnect(struct tb_switch *sw) in tb_switch_xhci_disconnect() argument
3840 if (sw->generation == 3) { in tb_switch_xhci_disconnect()
3841 struct tb_port *port1 = &sw->ports[1]; in tb_switch_xhci_disconnect()
3842 struct tb_port *port3 = &sw->ports[3]; in tb_switch_xhci_disconnect()