thunderbolt: Changes for v6.5 merge window

This includes following Thunderbolt/USB4 changes for the v6.5 merge
 window:
 
   - Improve debug logging
   - Rework for TMU and CL states handling
   - Retimer access improvements
   - Initial support for USB4 v2 features:
 
     * 80G symmetric link support
     * New notifications
     * PCIe extended encapsulation
     * enhanced uni-directional TMU mode
     * CL2 link low power state
     * DisplayPort 2.x tunneling
 
   - Support for Intel Barlow Ridge Thunderbolt/USB4 controller
   - Minor fixes and improvements.
 
 All these have been in linux-next with no reported issues.
 -----BEGIN PGP SIGNATURE-----
 
 iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmSSoXQgHG1pa2Eud2Vz
 dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKB9eA/8CPi8+1RByJsn
 CDx5880erO/YJVvtctBoESJo1MZS4l3JkBKtwkEVJcyTTtS8tN7QBk7dBr0rj9Fp
 vRvOdvQY05JYp1JGYxIouzLVIg3ffrxigLd1CURWdS10YgJrJ15P9A5cYuQOlzo8
 DoKFmZdt0R6Mj5gvYGjvfsVp9Pp0QbsizRPzau8zccUZWS43rsaiE3JLjqZSuNw5
 2ajm699i1/wC0se4/RqYQl4+6clMDC0Rmt4UJQxGUQaInOtqXDyJYCkYSmptuBOQ
 FsPce7yXWnSeYYHEn5WFFPpRfHDwyKMF59/Yq3dL5fvy6qBcM7AJ0NAjTzJaAcor
 LdQaaYwFpZ0+6pBk77y9wCI8UDldmPcmnwNTFzLsXJMsMwePn4GCuvwtmOGmoME5
 8SDTQM8wrM4A5aBO08noFsse1wbYg577rhTj5fmGnxtTjMHuX0bfaRwvyGDAbOyD
 qEj++vjK9xucwR70M++tufB/Orsc77Q8HNxWLAYNSQWW11kknKtwZfsXLRUD8meW
 MQDShD9MtL0tJpqdCr0cPHVKnlsoqwopIg92T4N1sffVzQzdkJsQ0K7Iweg5Y72b
 9qYB2KbjRADjIkGa3ZmnYNIvlJMpILkuJM12NHa5y2vwY8bt9bzoKt3KBdXp5kU7
 ZoD4rMoAWZI2HG5YtbCHf1lJlmjjLcM=
 =DaGL
 -----END PGP SIGNATURE-----

Merge tag 'thunderbolt-for-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v6.5 merge window

This includes following Thunderbolt/USB4 changes for the v6.5 merge
window:

  - Improve debug logging
  - Rework for TMU and CL states handling
  - Retimer access improvements
  - Initial support for USB4 v2 features:

    * 80G symmetric link support
    * New notifications
    * PCIe extended encapsulation
    * enhanced uni-directional TMU mode
    * CL2 link low power state
    * DisplayPort 2.x tunneling

  - Support for Intel Barlow Ridge Thunderbolt/USB4 controller
  - Minor fixes and improvements.

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (55 commits)
  thunderbolt: Add test case for 3 DisplayPort tunnels
  thunderbolt: Add DisplayPort 2.x tunneling support
  thunderbolt: Make bandwidth allocation mode function names consistent
  thunderbolt: Enable CL2 low power state
  thunderbolt: Add support for enhanced uni-directional TMU mode
  thunderbolt: Increase NVM_MAX_SIZE to support Intel Barlow Ridge controller
  thunderbolt: Move constants related to NVM into nvm.c
  thunderbolt: Limit Intel Barlow Ridge USB3 bandwidth
  thunderbolt: Add Intel Barlow Ridge PCI ID
  thunderbolt: Fix PCIe adapter capability length for USB4 v2 routers
  thunderbolt: Fix DisplayPort IN adapter capability length for USB4 v2 routers
  thunderbolt: Add two additional double words for adapters TMU for USB4 v2 routers
  thunderbolt: Enable USB4 v2 PCIe TLP/DLLP extended encapsulation
  thunderbolt: Announce USB4 v2 connection manager support
  thunderbolt: Reset USB4 v2 host router
  thunderbolt: Add the new USB4 v2 notification types
  thunderbolt: Add support for USB4 v2 80 Gb/s link
  thunderbolt: Identify USB4 v2 routers
  thunderbolt: Do not touch lane 1 adapter path config space
  thunderbolt: Ignore data CRC mismatch for USB4 routers
  ...
This commit is contained in:
Greg Kroah-Hartman 2023-06-21 18:02:00 +02:00
commit aed1a2a5a6
25 changed files with 2182 additions and 1017 deletions

View File

@ -2,7 +2,7 @@
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o
thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o clx.o
thunderbolt-${CONFIG_ACPI} += acpi.o
thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o

View File

@ -296,16 +296,15 @@ static bool tb_acpi_bus_match(struct device *dev)
static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
{
struct tb_switch *parent_sw = tb_switch_parent(sw);
struct acpi_device *adev = NULL;
struct tb_switch *parent_sw;
/*
* Device routers exists under the downstream facing USB4 port
* of the parent router. Their _ADR is always 0.
*/
parent_sw = tb_switch_parent(sw);
if (parent_sw) {
struct tb_port *port = tb_port_at(tb_route(sw), parent_sw);
struct tb_port *port = tb_switch_downstream_port(sw);
struct acpi_device *port_adev;
port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev),

423
drivers/thunderbolt/clx.c Normal file
View File

@ -0,0 +1,423 @@
// SPDX-License-Identifier: GPL-2.0
/*
* CLx support
*
* Copyright (C) 2020 - 2023, Intel Corporation
* Authors: Gil Fine <gil.fine@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/module.h>
#include "tb.h"
static bool clx_enabled = true;
module_param_named(clx, clx_enabled, bool, 0444);
MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
static const char *clx_name(unsigned int clx)
{
switch (clx) {
case TB_CL0S | TB_CL1 | TB_CL2:
return "CL0s/CL1/CL2";
case TB_CL1 | TB_CL2:
return "CL1/CL2";
case TB_CL0S | TB_CL2:
return "CL0s/CL2";
case TB_CL0S | TB_CL1:
return "CL0s/CL1";
case TB_CL0S:
return "CL0s";
case 0:
return "disabled";
default:
return "unknown";
}
}
static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
u32 phy;
int ret;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (secondary)
phy |= LANE_ADP_CS_1_PMS;
else
phy &= ~LANE_ADP_CS_1_PMS;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_pm_secondary_enable(struct tb_port *port)
{
return tb_port_pm_secondary_set(port, true);
}
static int tb_port_pm_secondary_disable(struct tb_port *port)
{
return tb_port_pm_secondary_set(port, false);
}
/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
{
u32 val, mask = 0;
bool ret;
/* Don't enable CLx in case of two single-lane links */
if (!port->bonded && port->dual_link_port)
return false;
/* Don't enable CLx in case of inter-domain link */
if (port->xdomain)
return false;
if (tb_switch_is_usb4(port->sw)) {
if (!usb4_port_clx_supported(port))
return false;
} else if (!tb_lc_is_clx_supported(port)) {
return false;
}
if (clx & TB_CL0S)
mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
if (clx & TB_CL1)
mask |= LANE_ADP_CS_0_CL1_SUPPORT;
if (clx & TB_CL2)
mask |= LANE_ADP_CS_0_CL2_SUPPORT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
return !!(val & mask);
}
static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
{
u32 phy, mask = 0;
int ret;
if (clx & TB_CL0S)
mask |= LANE_ADP_CS_1_CL0S_ENABLE;
if (clx & TB_CL1)
mask |= LANE_ADP_CS_1_CL1_ENABLE;
if (clx & TB_CL2)
mask |= LANE_ADP_CS_1_CL2_ENABLE;
if (!mask)
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy |= mask;
else
phy &= ~mask;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
{
return tb_port_clx_set(port, clx, false);
}
static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
{
return tb_port_clx_set(port, clx, true);
}
static int tb_port_clx(struct tb_port *port)
{
u32 val;
int ret;
if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (val & LANE_ADP_CS_1_CL0S_ENABLE)
ret |= TB_CL0S;
if (val & LANE_ADP_CS_1_CL1_ENABLE)
ret |= TB_CL1;
if (val & LANE_ADP_CS_1_CL2_ENABLE)
ret |= TB_CL2;
return ret;
}
/**
* tb_port_clx_is_enabled() - Is given CL state enabled
* @port: USB4 port to check
* @clx: Mask of CL states to check
*
* Returns true if any of the given CL states is enabled for @port.
*/
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
{
return !!(tb_port_clx(port) & clx);
}
/**
* tb_switch_clx_init() - Initialize router CL states
* @sw: Router
*
* Can be called for any router. Initializes the current CL state by
* reading it from the hardware.
*
* Returns %0 in case of success and negative errno in case of failure.
*/
int tb_switch_clx_init(struct tb_switch *sw)
{
struct tb_port *up, *down;
unsigned int clx, tmp;
if (tb_switch_is_icm(sw))
return 0;
if (!tb_route(sw))
return 0;
if (!tb_switch_clx_is_supported(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
clx = tb_port_clx(up);
tmp = tb_port_clx(down);
if (clx != tmp)
tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
clx, tmp);
tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
sw->clx = clx;
return 0;
}
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_pm_secondary_enable(up);
if (ret)
return ret;
return tb_port_pm_secondary_disable(down);
}
static int tb_switch_mask_clx_objections(struct tb_switch *sw)
{
int up_port = sw->config.upstream_port_number;
u32 offset, val[2], mask_obj, unmask_obj;
int ret, i;
/* Only Titan Ridge of pre-USB4 devices support CLx states */
if (!tb_switch_is_titan_ridge(sw))
return 0;
if (!tb_route(sw))
return 0;
/*
* In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
* Port A consists of lane adapters 1,2 and
* Port B consists of lane adapters 3,4
* If upstream port is A, (lanes are 1,2), we mask objections from
* port B (lanes 3,4) and unmask objections from Port A and vice-versa.
*/
if (up_port == 1) {
mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
offset = TB_LOW_PWR_C1_CL1;
} else {
mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
offset = TB_LOW_PWR_C3_CL1;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(val); i++) {
val[i] |= mask_obj;
val[i] &= ~unmask_obj;
}
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
}
/**
* tb_switch_clx_is_supported() - Is CLx supported on this type of router
* @sw: The router to check CLx support for
*/
bool tb_switch_clx_is_supported(const struct tb_switch *sw)
{
if (!clx_enabled)
return false;
if (sw->quirks & QUIRK_NO_CLX)
return false;
/*
* CLx is not enabled and validated on Intel USB4 platforms
* before Alder Lake.
*/
if (tb_switch_is_tiger_lake(sw))
return false;
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
static bool validate_mask(unsigned int clx)
{
/* Previous states need to be enabled */
if (clx & TB_CL1)
return (clx & TB_CL0S) == TB_CL0S;
return true;
}
/**
* tb_switch_clx_enable() - Enable CLx on upstream port of specified router
* @sw: Router to enable CLx for
* @clx: The CLx state to enable
*
* CLx is enabled only if both sides of the link support CLx, and if both sides
* of the link are not configured as two single lane links and only if the link
* is not inter-domain link. The complete set of conditions is described in CM
* Guide 1.0 section 8.1.
*
* Returns %0 on success or an error code on failure.
*/
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
{
bool up_clx_support, down_clx_support;
struct tb_switch *parent_sw;
struct tb_port *up, *down;
int ret;
if (!clx || sw->clx == clx)
return 0;
if (!validate_mask(clx))
return -EINVAL;
parent_sw = tb_switch_parent(sw);
if (!parent_sw)
return 0;
if (!tb_switch_clx_is_supported(parent_sw) ||
!tb_switch_clx_is_supported(sw))
return 0;
/* Only support CL2 for v2 routers */
if ((clx & TB_CL2) &&
(usb4_switch_version(parent_sw) < 2 ||
usb4_switch_version(sw) < 2))
return -EOPNOTSUPP;
ret = tb_switch_pm_secondary_resolve(sw);
if (ret)
return ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
up_clx_support = tb_port_clx_supported(up, clx);
down_clx_support = tb_port_clx_supported(down, clx);
tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
up_clx_support ? "" : "not ");
tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
down_clx_support ? "" : "not ");
if (!up_clx_support || !down_clx_support)
return -EOPNOTSUPP;
ret = tb_port_clx_enable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_enable(down, clx);
if (ret) {
tb_port_clx_disable(up, clx);
return ret;
}
ret = tb_switch_mask_clx_objections(sw);
if (ret) {
tb_port_clx_disable(up, clx);
tb_port_clx_disable(down, clx);
return ret;
}
sw->clx |= clx;
tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
return 0;
}
/**
* tb_switch_clx_disable() - Disable CLx on upstream port of specified router
* @sw: Router to disable CLx for
*
* Disables all CL states of the given router. Can be called on any
* router and if the states were not enabled already does nothing.
*
* Returns the CL states that were disabled or negative errno in case of
* failure.
*/
int tb_switch_clx_disable(struct tb_switch *sw)
{
unsigned int clx = sw->clx;
struct tb_port *up, *down;
int ret;
if (!tb_switch_clx_is_supported(sw))
return 0;
if (!clx)
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_clx_disable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_disable(down, clx);
if (ret)
return ret;
sw->clx = 0;
tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
return clx;
}

View File

@ -409,6 +409,13 @@ static int tb_async_error(const struct ctl_pkg *pkg)
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
case TB_CFG_ERROR_DP_BW:
case TB_CFG_ERROR_ROP_CMPLT:
case TB_CFG_ERROR_POP_CMPLT:
case TB_CFG_ERROR_PCIE_WAKE:
case TB_CFG_ERROR_DP_CON_CHANGE:
case TB_CFG_ERROR_DPTX_DISCOVERY:
case TB_CFG_ERROR_LINK_RECOVERY:
case TB_CFG_ERROR_ASYM_LINK:
return true;
default:
@ -758,6 +765,27 @@ int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
case TB_CFG_ERROR_DP_BW:
name = "DP_BW";
break;
case TB_CFG_ERROR_ROP_CMPLT:
name = "router operation completion";
break;
case TB_CFG_ERROR_POP_CMPLT:
name = "port operation completion";
break;
case TB_CFG_ERROR_PCIE_WAKE:
name = "PCIe wake";
break;
case TB_CFG_ERROR_DP_CON_CHANGE:
name = "DP connector change";
break;
case TB_CFG_ERROR_DPTX_DISCOVERY:
name = "DPTX discovery";
break;
case TB_CFG_ERROR_LINK_RECOVERY:
name = "link recovery";
break;
case TB_CFG_ERROR_ASYM_LINK:
name = "asymmetric link";
break;
default:
name = "unknown";
break;

View File

@ -14,12 +14,15 @@
#include "tb.h"
#include "sb_regs.h"
#define PORT_CAP_PCIE_LEN 1
#define PORT_CAP_V1_PCIE_LEN 1
#define PORT_CAP_V2_PCIE_LEN 2
#define PORT_CAP_POWER_LEN 2
#define PORT_CAP_LANE_LEN 3
#define PORT_CAP_USB3_LEN 5
#define PORT_CAP_DP_LEN 8
#define PORT_CAP_TMU_LEN 8
#define PORT_CAP_DP_V1_LEN 9
#define PORT_CAP_DP_V2_LEN 14
#define PORT_CAP_TMU_V1_LEN 8
#define PORT_CAP_TMU_V2_LEN 10
#define PORT_CAP_BASIC_LEN 9
#define PORT_CAP_USB4_LEN 20
@ -553,8 +556,9 @@ static int margining_run_write(void *data, u64 val)
struct usb4_port *usb4 = port->usb4;
struct tb_switch *sw = port->sw;
struct tb_margining *margining;
struct tb_switch *down_sw;
struct tb *tb = sw->tb;
int ret;
int ret, clx;
if (val != 1)
return -EINVAL;
@ -566,15 +570,24 @@ static int margining_run_write(void *data, u64 val)
goto out_rpm_put;
}
/*
* CL states may interfere with lane margining so inform the user know
* and bail out.
*/
if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) {
tb_port_warn(port,
"CL states are enabled, Disable them with clx=0 and re-connect\n");
ret = -EINVAL;
goto out_unlock;
if (tb_is_upstream_port(port))
down_sw = sw;
else if (port->remote)
down_sw = port->remote->sw;
else
down_sw = NULL;
if (down_sw) {
/*
* CL states may interfere with lane margining so
* disable them temporarily now.
*/
ret = tb_switch_clx_disable(down_sw);
if (ret < 0) {
tb_sw_warn(down_sw, "failed to disable CL states\n");
goto out_unlock;
}
clx = ret;
}
margining = usb4->margining;
@ -586,7 +599,7 @@ static int margining_run_write(void *data, u64 val)
margining->right_high,
USB4_MARGIN_SW_COUNTER_CLEAR);
if (ret)
goto out_unlock;
goto out_clx;
ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
} else {
@ -600,6 +613,9 @@ static int margining_run_write(void *data, u64 val)
margining->right_high, margining->results);
}
out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
@ -1148,7 +1164,10 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
break;
case TB_PORT_CAP_TIME1:
length = PORT_CAP_TMU_LEN;
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_TMU_V1_LEN;
else
length = PORT_CAP_TMU_V2_LEN;
break;
case TB_PORT_CAP_POWER:
@ -1157,12 +1176,17 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
case TB_PORT_CAP_ADAP:
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
length = PORT_CAP_PCIE_LEN;
} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
if (usb4_dp_port_bw_mode_supported(port))
length = PORT_CAP_DP_LEN + 1;
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_V1_PCIE_LEN;
else
length = PORT_CAP_DP_LEN;
length = PORT_CAP_V2_PCIE_LEN;
} else if (tb_port_is_dpin(port)) {
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_DP_V1_LEN;
else
length = PORT_CAP_DP_V2_LEN;
} else if (tb_port_is_dpout(port)) {
length = PORT_CAP_DP_V1_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;

View File

@ -192,9 +192,9 @@ static int dma_test_start_rings(struct dma_test *dt)
}
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0,
dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0);
dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret) {
dma_test_free_rings(dt);
return ret;
@ -218,9 +218,9 @@ static void dma_test_stop_rings(struct dma_test *dt)
tb_ring_stop(dt->tx_ring);
ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0,
dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0);
dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
@ -412,6 +412,7 @@ static void speed_get(const struct dma_test *dt, u64 *val)
static int speed_validate(u64 val)
{
switch (val) {
case 40:
case 20:
case 10:
case 0:
@ -489,9 +490,12 @@ static void dma_test_check_errors(struct dma_test *dt, int ret)
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
} else if (dt->link_width &&
dt->xd->link_width != dt->link_width) {
dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->link_width) {
const struct tb_xdomain *xd = dt->xd;
if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
(dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
dt->crc_errors || dt->buffer_overflow_errors) {
@ -756,5 +760,5 @@ module_exit(dma_test_exit);
MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("DMA traffic test driver");
MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver");
MODULE_LICENSE("GPL v2");

View File

@ -605,9 +605,8 @@ static int usb4_drom_parse(struct tb_switch *sw)
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n",
"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
return -EINVAL;
}
return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);

View File

@ -644,13 +644,14 @@ static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
return ret;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
u64 route, u8 connection_id, u8 connection_key,
u8 link, u8 depth, bool boot)
static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id,
u8 connection_key, u8 link, u8 depth, bool boot)
{
struct tb_switch *parent_sw = tb_switch_parent(sw);
/* Disconnect from parent */
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
/* Re-connect via updated port*/
tb_switch_downstream_port(sw)->remote = NULL;
/* Re-connect via updated port */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
/* Update with the new addressing information */
@ -671,10 +672,7 @@ static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
static void remove_switch(struct tb_switch *sw)
{
struct tb_switch *parent_sw;
parent_sw = tb_to_switch(sw->dev.parent);
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
tb_switch_downstream_port(sw)->remote = NULL;
tb_switch_remove(sw);
}
@ -755,7 +753,6 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (sw) {
u8 phy_port, sw_phy_port;
parent_sw = tb_to_switch(sw->dev.parent);
sw_phy_port = tb_phy_port_from_link(sw->link);
phy_port = tb_phy_port_from_link(link);
@ -785,7 +782,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
route = tb_route(sw);
}
update_switch(parent_sw, sw, route, pkg->connection_id,
update_switch(sw, route, pkg->connection_id,
pkg->connection_key, link, depth, boot);
tb_switch_put(sw);
return;
@ -853,7 +850,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
sw->link_width = dual_lane ? 2 : 1;
sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
TB_LINK_WIDTH_SINGLE;
sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
if (add_switch(parent_sw, sw))
@ -1236,9 +1234,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
if (sw) {
/* Update the switch if it is still in the same place */
if (tb_route(sw) == route && !!sw->authorized == authorized) {
parent_sw = tb_to_switch(sw->dev.parent);
update_switch(parent_sw, sw, route, pkg->connection_id,
0, 0, 0, boot);
update_switch(sw, route, pkg->connection_id, 0, 0, 0,
boot);
tb_switch_put(sw);
return;
}
@ -1276,7 +1273,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
sw->link_width = dual_lane ? 2 : 1;
sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
TB_LINK_WIDTH_SINGLE;
sw->rpm = force_rtd3;
if (!sw->rpm)
sw->rpm = intel_vss_is_rtd3(pkg->ep_name,

View File

@ -46,6 +46,10 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
static bool host_reset = true;
module_param(host_reset, bool, 0444);
MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
@ -56,9 +60,14 @@ static int ring_interrupt_index(const struct tb_ring *ring)
static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
{
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
u32 val;
val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
} else {
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
}
}
static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
@ -1212,6 +1221,37 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
str_enabled_disabled(port_ok));
}
static void nhi_reset(struct tb_nhi *nhi)
{
ktime_t timeout;
u32 val;
val = ioread32(nhi->iobase + REG_CAPS);
/* Reset only v2 and later routers */
if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
return;
if (!host_reset) {
dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
return;
}
iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
msleep(100);
timeout = ktime_add_ms(ktime_get(), 500);
do {
val = ioread32(nhi->iobase + REG_RESET);
if (!(val & REG_RESET_HRR)) {
dev_warn(&nhi->pdev->dev, "host router reset successful\n");
return;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
}
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
@ -1312,7 +1352,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
@ -1325,6 +1365,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
nhi_reset(nhi);
res = nhi_init_msi(nhi);
if (res)
return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
@ -1475,6 +1517,8 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
@ -1483,6 +1527,7 @@ static struct pci_device_id nhi_ids[] = {
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {

View File

@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4
#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3

View File

@ -37,7 +37,7 @@ struct ring_desc {
/* NHI registers in bar 0 */
/*
* 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 16 bytes per entry, one entry for every hop (REG_CAPS)
* 00: physical pointer to an array of struct ring_desc
* 08: ring tail (set by NHI)
* 10: ring head (index of first non posted descriptor)
@ -46,7 +46,7 @@ struct ring_desc {
#define REG_TX_RING_BASE 0x00000
/*
* 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 16 bytes per entry, one entry for every hop (REG_CAPS)
* 00: physical pointer to an array of struct ring_desc
* 08: ring head (index of first not posted descriptor)
* 10: ring tail (set by NHI)
@ -56,7 +56,7 @@ struct ring_desc {
#define REG_RX_RING_BASE 0x08000
/*
* 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 32 bytes per entry, one entry for every hop (REG_CAPS)
* 00: enum_ring_flags
* 04: isoch time stamp ?? (write 0)
* ..: unknown
@ -64,7 +64,7 @@ struct ring_desc {
#define REG_TX_OPTIONS_BASE 0x19800
/*
* 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 32 bytes per entry, one entry for every hop (REG_CAPS)
* 00: enum ring_flags
* If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
* the corresponding TX hop id.
@ -77,7 +77,7 @@ struct ring_desc {
/*
* three bitfields: tx, rx, rx overflow
* Every bitfield contains one bit for every hop (REG_HOP_COUNT).
* Every bitfield contains one bit for every hop (REG_CAPS).
* New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
@ -87,7 +87,7 @@ struct ring_desc {
/*
* two bitfields: rx, tx
* Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
* Both bitfields contains one bit for every hop (REG_CAPS). To
* enable/disable interrupts set/clear the corresponding bits.
*/
#define REG_RING_INTERRUPT_BASE 0x38200
@ -104,12 +104,17 @@ struct ring_desc {
#define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS)
/* The last 11 bits contain the number of hops supported by the NHI port. */
#define REG_HOP_COUNT 0x39640
#define REG_CAPS 0x39640
#define REG_CAPS_VERSION_MASK GENMASK(23, 16)
#define REG_CAPS_VERSION_2 0x40
#define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)
#define REG_RESET 0x39898
#define REG_RESET_HRR BIT(0)
#define REG_INMAIL_DATA 0x39900
#define REG_INMAIL_CMD 0x39904

View File

@ -12,6 +12,10 @@
#include "tb.h"
#define NVM_MIN_SIZE SZ_32K
#define NVM_MAX_SIZE SZ_1M
#define NVM_DATA_DWORDS 16
/* Intel specific NVM offsets */
#define INTEL_NVM_DEVID 0x05
#define INTEL_NVM_VERSION 0x08

View File

@ -10,6 +10,7 @@
static void quirk_force_power_link(struct tb_switch *sw)
{
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
tb_sw_dbg(sw, "forcing power to link controller\n");
}
static void quirk_dp_credit_allocation(struct tb_switch *sw)
@ -74,6 +75,14 @@ static const struct tb_quirk tb_quirks[] = {
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
@ -105,6 +114,7 @@ void tb_check_quirks(struct tb_switch *sw)
if (q->device && q->device != sw->device)
continue;
tb_sw_dbg(sw, "running %ps\n", q->hook);
q->hook(sw);
}
}

View File

@ -187,10 +187,34 @@ static ssize_t nvm_authenticate_show(struct device *dev,
return ret;
}
static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
{
int i;
tb_port_dbg(port, "reading NVM authentication status of retimers\n");
/*
* Before doing anything else, read the authentication status.
* If the retimer has it set, store it for the new retimer
* device instance.
*/
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
}
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
{
int i;
/*
* When USB4 port is online sideband communications are
* already up.
*/
if (!usb4_port_device_is_offline(port->usb4))
return;
tb_port_dbg(port, "enabling sideband transactions\n");
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_set_inbound_sbtx(port, i);
}
@ -199,6 +223,16 @@ static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
{
int i;
/*
* When USB4 port is offline we need to keep the sideband
* communications up to make it possible to communicate with
* the connected retimers.
*/
if (usb4_port_device_is_offline(port->usb4))
return;
tb_port_dbg(port, "disabling sideband transactions\n");
for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
usb4_port_retimer_unset_inbound_sbtx(port, i);
}
@ -229,6 +263,13 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0;
if (val) {
/*
* When NVM authentication starts the retimer is not
* accessible so calling tb_retimer_unset_inbound_sbtx()
* will fail and therefore we do not call it. Exception
* is when the validation fails or we only write the new
* NVM image without authentication.
*/
tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
@ -249,7 +290,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
}
exit_unlock:
tb_retimer_unset_inbound_sbtx(rt->port);
if (ret || val == WRITE_ONLY)
tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&rt->dev);
@ -341,12 +383,6 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
return ret;
}
if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
vendor);
return -EOPNOTSUPP;
}
/*
* Check that it supports NVM operations. If not then don't add
* the device at all.
@ -454,20 +490,18 @@ int tb_retimer_scan(struct tb_port *port, bool add)
if (ret)
return ret;
/*
* Immediately after sending enumerate retimers read the
* authentication status of each retimer.
*/
tb_retimer_nvm_authenticate_status(port, status);
/*
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
*/
tb_retimer_set_inbound_sbtx(port);
/*
* Before doing anything else, read the authentication status.
* If the retimer has it set, store it for the new retimer
* device instance.
*/
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
* Last retimer is true only for the last on-board

View File

@ -26,10 +26,6 @@ struct nvm_auth_status {
u32 status;
};
static bool clx_enabled = true;
module_param_named(clx, clx_enabled, bool, 0444);
MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
/*
* Hold NVM authentication failure status per switch This information
* needs to stay around even when the switch gets power cycled so we
@ -727,7 +723,7 @@ static int tb_init_port(struct tb_port *port)
* can be read from the path config space. Legacy
* devices we use hard-coded value.
*/
if (tb_switch_is_usb4(port->sw)) {
if (port->cap_usb4) {
struct tb_regs_hop hop;
if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
@ -907,15 +903,23 @@ int tb_port_get_link_speed(struct tb_port *port)
speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
switch (speed) {
case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
return 40;
case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
return 20;
default:
return 10;
}
}
/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
* Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
* or negative errno in case of failure.
* Returns link width. Return the link width as encoded in &enum
* tb_link_width or negative errno in case of failure.
*/
int tb_port_get_link_width(struct tb_port *port)
{
@ -930,11 +934,13 @@ int tb_port_get_link_width(struct tb_port *port)
if (ret)
return ret;
/* Matches the values in enum tb_link_width */
return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
}
static bool tb_port_is_width_supported(struct tb_port *port, int width)
static bool tb_port_is_width_supported(struct tb_port *port,
unsigned int width_mask)
{
u32 phy, widths;
int ret;
@ -950,20 +956,25 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
return !!(widths & width);
return widths & width_mask;
}
static bool is_gen4_link(struct tb_port *port)
{
return tb_port_get_link_speed(port) > 20;
}
/**
* tb_port_set_link_width() - Set target link width of the lane adapter
* @port: Lane adapter
* @width: Target link width (%1 or %2)
* @width: Target link width
*
* Sets the target link width of the lane adapter to @width. Does not
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_set_link_width(struct tb_port *port, unsigned int width)
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
{
u32 val;
int ret;
@ -978,11 +989,14 @@ int tb_port_set_link_width(struct tb_port *port, unsigned int width)
val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
switch (width) {
case 1:
case TB_LINK_WIDTH_SINGLE:
/* Gen 4 link cannot be single */
if (is_gen4_link(port))
return -EOPNOTSUPP;
val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
case 2:
case TB_LINK_WIDTH_DUAL:
val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
@ -1004,12 +1018,9 @@ int tb_port_set_link_width(struct tb_port *port, unsigned int width)
* cases one should use tb_port_lane_bonding_enable() instead to enable
* lane bonding.
*
* As a side effect sets @port->bonding accordingly (and does the same
* for lane 1 too).
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
{
u32 val;
int ret;
@ -1027,19 +1038,8 @@ int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
else
val &= ~LANE_ADP_CS_1_LB;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
/*
* When lane 0 bonding is set it will affect lane 1 too so
* update both.
*/
port->bonded = bonding;
port->dual_link_port->bonded = bonding;
return 0;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
@ -1056,36 +1056,52 @@ int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
*/
int tb_port_lane_bonding_enable(struct tb_port *port)
{
enum tb_link_width width;
int ret;
/*
* Enable lane bonding for both links if not already enabled by
* for example the boot firmware.
*/
ret = tb_port_get_link_width(port);
if (ret == 1) {
ret = tb_port_set_link_width(port, 2);
width = tb_port_get_link_width(port);
if (width == TB_LINK_WIDTH_SINGLE) {
ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
if (ret)
goto err_lane0;
}
ret = tb_port_get_link_width(port->dual_link_port);
if (ret == 1) {
ret = tb_port_set_link_width(port->dual_link_port, 2);
width = tb_port_get_link_width(port->dual_link_port);
if (width == TB_LINK_WIDTH_SINGLE) {
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
goto err_lane0;
}
ret = tb_port_set_lane_bonding(port, true);
if (ret)
goto err_lane1;
/*
* Only set bonding if the link was not already bonded. This
* avoids the lane adapter to re-enter bonding state.
*/
if (width == TB_LINK_WIDTH_SINGLE) {
ret = tb_port_set_lane_bonding(port, true);
if (ret)
goto err_lane1;
}
/*
* When lane 0 bonding is set it will affect lane 1 too so
* update both.
*/
port->bonded = true;
port->dual_link_port->bonded = true;
return 0;
err_lane1:
tb_port_set_link_width(port->dual_link_port, 1);
tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
err_lane0:
tb_port_set_link_width(port, 1);
tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
return ret;
}
@ -1099,27 +1115,34 @@ err_lane0:
void tb_port_lane_bonding_disable(struct tb_port *port)
{
tb_port_set_lane_bonding(port, false);
tb_port_set_link_width(port->dual_link_port, 1);
tb_port_set_link_width(port, 1);
tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
port->dual_link_port->bonded = false;
port->bonded = false;
}
/**
* tb_port_wait_for_link_width() - Wait until link reaches specific width
* @port: Port to wait for
* @width: Expected link width (%1 or %2)
* @width_mask: Expected link width mask
* @timeout_msec: Timeout in ms how long to wait
*
* Should be used after both ends of the link have been bonded (or
* bonding has been disabled) to wait until the link actually reaches
* the expected state. Returns %-ETIMEDOUT if the @width was not reached
* within the given timeout, %0 if it did.
* the expected state. Returns %-ETIMEDOUT if the width was not reached
* within the given timeout, %0 if it did. Can be passed a mask of
* expected widths and succeeds if any of the widths is reached.
*/
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
int ret;
/* Gen 4 link does not support single lane */
if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
return -EOPNOTSUPP;
do {
ret = tb_port_get_link_width(port);
if (ret < 0) {
@ -1130,7 +1153,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, int width,
*/
if (ret != -EACCES)
return ret;
} else if (ret == width) {
} else if (ret & width_mask) {
return 0;
}
@ -1183,135 +1206,6 @@ int tb_port_update_credits(struct tb_port *port)
return tb_port_do_update_credits(port->dual_link_port);
}
static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
u32 phy;
int ret;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (secondary)
phy |= LANE_ADP_CS_1_PMS;
else
phy &= ~LANE_ADP_CS_1_PMS;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_pm_secondary_enable(struct tb_port *port)
{
return __tb_port_pm_secondary_set(port, true);
}
static int tb_port_pm_secondary_disable(struct tb_port *port)
{
return __tb_port_pm_secondary_set(port, false);
}
/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
{
u32 val, mask = 0;
bool ret;
/* Don't enable CLx in case of two single-lane links */
if (!port->bonded && port->dual_link_port)
return false;
/* Don't enable CLx in case of inter-domain link */
if (port->xdomain)
return false;
if (tb_switch_is_usb4(port->sw)) {
if (!usb4_port_clx_supported(port))
return false;
} else if (!tb_lc_is_clx_supported(port)) {
return false;
}
if (clx_mask & TB_CL1) {
/* CL0s and CL1 are enabled and supported together */
mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
}
if (clx_mask & TB_CL2)
mask |= LANE_ADP_CS_0_CL2_SUPPORT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
return !!(val & mask);
}
static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
u32 phy, mask;
int ret;
/* CL0s and CL1 are enabled and supported together */
if (clx == TB_CL1)
mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
else
/* For now we support only CL0s and CL1. Not CL2 */
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy |= mask;
else
phy &= ~mask;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_clx_set(port, clx, false);
}
static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
return __tb_port_clx_set(port, clx, true);
}
/**
* tb_port_is_clx_enabled() - Is given CL state enabled
* @port: USB4 port to check
* @clx_mask: Mask of CL states to check
*
* Returns true if any of the given CL states is enabled for @port.
*/
bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
{
u32 val, mask = 0;
int ret;
if (!tb_port_clx_supported(port, clx_mask))
return false;
if (clx_mask & TB_CL1)
mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
if (clx_mask & TB_CL2)
mask |= LANE_ADP_CS_1_CL2_ENABLE;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return false;
return !!(val & mask);
}
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
@ -1911,20 +1805,57 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
unsigned int width;
return sysfs_emit(buf, "%u\n", sw->link_width);
switch (sw->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
/*
* Currently link has same amount of lanes both directions (1 or 2) but
* expose them separately to allow possible asymmetric links in the future.
*/
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
unsigned int width;
switch (sw->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
@ -2189,8 +2120,9 @@ static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *en
const struct tb_switch *sw = tb_to_switch(dev);
const char *type;
if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
if (add_uevent_var(env, "USB4_VERSION=1.0"))
if (tb_switch_is_usb4(sw)) {
if (add_uevent_var(env, "USB4_VERSION=%u.0",
usb4_switch_version(sw)))
return -ENOMEM;
}
@ -2498,9 +2430,13 @@ int tb_switch_configure(struct tb_switch *sw)
/*
* For USB4 devices, we need to program the CM version
* accordingly so that it knows to expose all the
* additional capabilities.
* additional capabilities. Program it according to USB4
* version to avoid changing existing (v1) routers behaviour.
*/
sw->config.cmuv = USB4_VERSION_1_0;
if (usb4_switch_version(sw) < 2)
sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
else
sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
@ -2530,6 +2466,22 @@ int tb_switch_configure(struct tb_switch *sw)
return tb_plug_events_active(sw, true);
}
/**
* tb_switch_configuration_valid() - Set the tunneling configuration to be valid
* @sw: Router to configure
*
* Needs to be called before any tunnels can be setup through the
* router. Can be called to any router.
*
* Returns %0 in success and negative errno otherwise.
*/
int tb_switch_configuration_valid(struct tb_switch *sw)
{
if (tb_switch_is_usb4(sw))
return usb4_switch_configuration_valid(sw);
return 0;
}
static int tb_switch_set_uuid(struct tb_switch *sw)
{
bool uid = false;
@ -2754,9 +2706,9 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
*/
int tb_switch_lane_bonding_enable(struct tb_switch *sw)
{
struct tb_switch *parent = tb_to_switch(sw->dev.parent);
struct tb_port *up, *down;
u64 route = tb_route(sw);
unsigned int width_mask;
int ret;
if (!route)
@ -2766,10 +2718,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
return 0;
up = tb_upstream_port(sw);
down = tb_port_at(route, parent);
down = tb_switch_downstream_port(sw);
if (!tb_port_is_width_supported(up, 2) ||
!tb_port_is_width_supported(down, 2))
if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
!tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
return 0;
ret = tb_port_lane_bonding_enable(up);
@ -2785,7 +2737,11 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
return ret;
}
ret = tb_port_wait_for_link_width(down, 2, 100);
/* Any of the widths are all bonded */
width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
TB_LINK_WIDTH_ASYM_RX;
ret = tb_port_wait_for_link_width(down, width_mask, 100);
if (ret) {
tb_port_warn(down, "timeout enabling lane bonding\n");
return ret;
@ -2808,8 +2764,8 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
*/
void tb_switch_lane_bonding_disable(struct tb_switch *sw)
{
struct tb_switch *parent = tb_to_switch(sw->dev.parent);
struct tb_port *up, *down;
int ret;
if (!tb_route(sw))
return;
@ -2818,7 +2774,7 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
if (!up->bonded)
return;
down = tb_port_at(tb_route(sw), parent);
down = tb_switch_downstream_port(sw);
tb_port_lane_bonding_disable(up);
tb_port_lane_bonding_disable(down);
@ -2827,7 +2783,8 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
* It is fine if we get other errors as the router might have
* been unplugged.
*/
if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
if (ret == -ETIMEDOUT)
tb_sw_warn(sw, "timeout disabling lane bonding\n");
tb_port_update_credits(down);
@ -2994,6 +2951,10 @@ int tb_switch_add(struct tb_switch *sw)
if (ret)
return ret;
ret = tb_switch_clx_init(sw);
if (ret)
return ret;
ret = tb_switch_tmu_init(sw);
if (ret)
return ret;
@ -3246,13 +3207,8 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
/*
* Actually only needed for Titan Ridge but for simplicity can be
* done for USB4 device too as CLx is re-enabled at resume.
* CL0s and CL1 are enabled and supported together.
*/
if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
if (tb_switch_disable_clx(sw, TB_CL1))
tb_sw_warn(sw, "failed to disable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
}
tb_switch_clx_disable(sw);
err = tb_plug_events_active(sw, false);
if (err)
@ -3474,234 +3430,6 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
if (!tb_route(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_port_pm_secondary_enable(up);
if (ret)
return ret;
return tb_port_pm_secondary_disable(down);
}
static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
bool up_clx_support, down_clx_support;
struct tb_port *up, *down;
int ret;
if (!tb_switch_is_clx_supported(sw))
return 0;
/*
* Enable CLx for host router's downstream port as part of the
* downstream router enabling procedure.
*/
if (!tb_route(sw))
return 0;
/* Enable CLx only for first hop router (depth = 1) */
if (tb_route(parent))
return 0;
ret = tb_switch_pm_secondary_resolve(sw);
if (ret)
return ret;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
up_clx_support = tb_port_clx_supported(up, clx);
down_clx_support = tb_port_clx_supported(down, clx);
tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
up_clx_support ? "" : "not ");
tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
down_clx_support ? "" : "not ");
if (!up_clx_support || !down_clx_support)
return -EOPNOTSUPP;
ret = tb_port_clx_enable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_enable(down, clx);
if (ret) {
tb_port_clx_disable(up, clx);
return ret;
}
ret = tb_switch_mask_clx_objections(sw);
if (ret) {
tb_port_clx_disable(up, clx);
tb_port_clx_disable(down, clx);
return ret;
}
sw->clx = clx;
tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
return 0;
}
/**
* tb_switch_enable_clx() - Enable CLx on upstream port of specified router
* @sw: Router to enable CLx for
* @clx: The CLx state to enable
*
* Enable CLx state only for first hop router. That is the most common
* use-case, that is intended for better thermal management, and so helps
* to improve performance. CLx is enabled only if both sides of the link
* support CLx, and if both sides of the link are not configured as two
* single lane links and only if the link is not inter-domain link. The
* complete set of conditions is described in CM Guide 1.0 section 8.1.
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *root_sw = sw->tb->root_switch;
if (!clx_enabled)
return 0;
/*
* CLx is not enabled and validated on Intel USB4 platforms before
* Alder Lake.
*/
if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
return 0;
switch (clx) {
case TB_CL1:
/* CL0s and CL1 are enabled and supported together */
return __tb_switch_enable_clx(sw, clx);
default:
return -EOPNOTSUPP;
}
}
static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
if (!tb_switch_is_clx_supported(sw))
return 0;
/*
* Disable CLx for host router's downstream port as part of the
* downstream router enabling procedure.
*/
if (!tb_route(sw))
return 0;
/* Disable CLx only for first hop router (depth = 1) */
if (tb_route(parent))
return 0;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_port_clx_disable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_disable(down, clx);
if (ret)
return ret;
sw->clx = TB_CLX_DISABLE;
tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
return 0;
}
/**
* tb_switch_disable_clx() - Disable CLx on upstream port of specified router
* @sw: Router to disable CLx for
* @clx: The CLx state to disable
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
{
if (!clx_enabled)
return 0;
switch (clx) {
case TB_CL1:
/* CL0s and CL1 are enabled and supported together */
return __tb_switch_disable_clx(sw, clx);
default:
return -EOPNOTSUPP;
}
}
/**
* tb_switch_mask_clx_objections() - Mask CLx objections for a router
* @sw: Router to mask objections for
*
* Mask the objections coming from the second depth routers in order to
* stop these objections from interfering with the CLx states of the first
* depth link.
*/
int tb_switch_mask_clx_objections(struct tb_switch *sw)
{
int up_port = sw->config.upstream_port_number;
u32 offset, val[2], mask_obj, unmask_obj;
int ret, i;
/* Only Titan Ridge of pre-USB4 devices support CLx states */
if (!tb_switch_is_titan_ridge(sw))
return 0;
if (!tb_route(sw))
return 0;
/*
* In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
* Port A consists of lane adapters 1,2 and
* Port B consists of lane adapters 3,4
* If upstream port is A, (lanes are 1,2), we mask objections from
* port B (lanes 3,4) and unmask objections from Port A and vice-versa.
*/
if (up_port == 1) {
mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
offset = TB_LOW_PWR_C1_CL1;
} else {
mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
offset = TB_LOW_PWR_C3_CL1;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(val); i++) {
val[i] |= mask_obj;
val[i] &= ~unmask_obj;
}
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
}
/*
* Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
* device. For now used only for Titan Ridge.

View File

@ -131,7 +131,7 @@ tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
struct tb_port *out)
{
if (usb4_dp_port_bw_mode_enabled(in)) {
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
int index, i;
index = usb4_dp_port_group_id(in);
@ -240,6 +240,147 @@ static void tb_discover_dp_resources(struct tb *tb)
}
}
/* Enables CL states up to host router */
static int tb_enable_clx(struct tb_switch *sw)
{
struct tb_cm *tcm = tb_priv(sw->tb);
unsigned int clx = TB_CL0S | TB_CL1;
const struct tb_tunnel *tunnel;
int ret;
/*
* Currently only enable CLx for the first link. This is enough
* to allow the CPU to save energy at least on Intel hardware
* and makes it slightly simpler to implement. We may change
* this in the future to cover the whole topology if it turns
* out to be beneficial.
*/
while (sw && sw->config.depth > 1)
sw = tb_switch_parent(sw);
if (!sw)
return 0;
if (sw->config.depth != 1)
return 0;
/*
* If we are re-enabling then check if there is an active DMA
* tunnel and in that case bail out.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dma(tunnel)) {
if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
return 0;
}
}
/*
* Initially try with CL2. If that's not supported by the
* topology try with CL0s and CL1 and then give up.
*/
ret = tb_switch_clx_enable(sw, clx | TB_CL2);
if (ret == -EOPNOTSUPP)
ret = tb_switch_clx_enable(sw, clx);
return ret == -EOPNOTSUPP ? 0 : ret;
}
/* Disables CL states up to the host router */
static void tb_disable_clx(struct tb_switch *sw)
{
do {
if (tb_switch_clx_disable(sw) < 0)
tb_sw_warn(sw, "failed to disable CL states\n");
sw = tb_switch_parent(sw);
} while (sw);
}
static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
{
struct tb_switch *sw;
sw = tb_to_switch(dev);
if (!sw)
return 0;
if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
enum tb_switch_tmu_mode mode;
int ret;
if (tb_switch_clx_is_enabled(sw, TB_CL1))
mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
else
mode = TB_SWITCH_TMU_MODE_HIFI_BI;
ret = tb_switch_tmu_configure(sw, mode);
if (ret)
return ret;
return tb_switch_tmu_enable(sw);
}
return 0;
}
static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
{
struct tb_switch *sw;
if (!tunnel)
return;
/*
* Once first DP tunnel is established we change the TMU
* accuracy of first depth child routers (and the host router)
* to the highest. This is needed for the DP tunneling to work
* but also allows CL0s.
*
* If both routers are v2 then we don't need to do anything as
* they are using enhanced TMU mode that allows all CLx.
*/
sw = tunnel->tb->root_switch;
device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
}
static int tb_enable_tmu(struct tb_switch *sw)
{
int ret;
/*
* If both routers at the end of the link are v2 we simply
* enable the enhanched uni-directional mode. That covers all
* the CL states. For v1 and before we need to use the normal
* rate to allow CL1 (when supported). Otherwise we keep the TMU
* running at the highest accuracy.
*/
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
if (ret == -EOPNOTSUPP) {
if (tb_switch_clx_is_enabled(sw, TB_CL1))
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_LOWRES);
else
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_HIFI_BI);
}
if (ret)
return ret;
/* If it is already enabled in correct mode, don't touch it */
if (tb_switch_tmu_is_enabled(sw))
return 0;
ret = tb_switch_tmu_disable(sw);
if (ret)
return ret;
ret = tb_switch_tmu_post_time(sw);
if (ret)
return ret;
return tb_switch_tmu_enable(sw);
}
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
@ -253,13 +394,7 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
/*
* In case of DP tunnel exists, change host router's
* 1st children TMU mode to HiFi for CL0s to work.
*/
if (tunnel)
tb_switch_enable_tmu_1st_child(tb->root_switch,
TB_SWITCH_TMU_RATE_HIFI);
tb_increase_tmu_accuracy(tunnel);
break;
case TB_TYPE_PCIE_DOWN:
@ -357,25 +492,6 @@ static void tb_scan_xdomain(struct tb_port *port)
}
}
static int tb_enable_tmu(struct tb_switch *sw)
{
int ret;
/* If it is already enabled in correct mode, don't touch it */
if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
ret = tb_switch_tmu_disable(sw);
if (ret)
return ret;
ret = tb_switch_tmu_post_time(sw);
if (ret)
return ret;
return tb_switch_tmu_enable(sw);
}
/**
* tb_find_unused_port() - return the first inactive port on @sw
* @sw: Switch to find the port on
@ -480,7 +596,8 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
usb3_consumed_down = 0;
}
*available_up = *available_down = 40000;
/* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
*available_up = *available_down = 120000;
/* Find the minimum available bandwidth over all links */
tb_for_each_port_on_path(src_port, dst_port, port) {
@ -491,18 +608,45 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
if (tb_is_upstream_port(port)) {
link_speed = port->sw->link_speed;
/*
* sw->link_width is from upstream perspective
* so we use the opposite for downstream of the
* host router.
*/
if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
up_bw = link_speed * 3 * 1000;
down_bw = link_speed * 1 * 1000;
} else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
} else {
up_bw = link_speed * port->sw->link_width * 1000;
down_bw = up_bw;
}
} else {
link_speed = tb_port_get_link_speed(port);
if (link_speed < 0)
return link_speed;
link_width = tb_port_get_link_width(port);
if (link_width < 0)
return link_width;
if (link_width == TB_LINK_WIDTH_ASYM_TX) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
} else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
up_bw = link_speed * 3 * 1000;
down_bw = link_speed * 1 * 1000;
} else {
up_bw = link_speed * link_width * 1000;
down_bw = up_bw;
}
}
link_width = port->bonded ? 2 : 1;
up_bw = link_speed * link_width * 1000; /* Mb/s */
/* Leave 10% guard band */
up_bw -= up_bw / 10;
down_bw = up_bw;
down_bw -= down_bw / 10;
tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
down_bw);
@ -628,7 +772,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
* Look up available down port. Since we are chaining it should
* be found right above this switch.
*/
port = tb_port_at(tb_route(sw), parent);
port = tb_switch_downstream_port(sw);
down = tb_find_usb3_down(parent, port);
if (!down)
return 0;
@ -737,8 +881,8 @@ static void tb_scan_port(struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(port->sw->tb);
struct tb_port *upstream_port;
bool discovery = false;
struct tb_switch *sw;
int ret;
if (tb_is_upstream_port(port))
return;
@ -804,8 +948,10 @@ static void tb_scan_port(struct tb_port *port)
* tunnels and know which switches were authorized already by
* the boot firmware.
*/
if (!tcm->hotplug_active)
if (!tcm->hotplug_active) {
dev_set_uevent_suppress(&sw->dev, true);
discovery = true;
}
/*
* At the moment Thunderbolt 2 and beyond (devices with LC) we
@ -835,24 +981,20 @@ static void tb_scan_port(struct tb_port *port)
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
ret = tb_switch_enable_clx(sw, TB_CL1);
if (ret && ret != -EOPNOTSUPP)
tb_sw_warn(sw, "failed to enable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
if (tb_switch_is_clx_enabled(sw, TB_CL1))
/*
* To support highest CLx state, we set router's TMU to
* Normal-Uni mode.
*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
else
/* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
if (discovery)
tb_sw_dbg(sw, "discovery, not touching CL states\n");
else if (tb_enable_clx(sw))
tb_sw_warn(sw, "failed to enable CL states\n");
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
/*
* Configuration valid needs to be set after the TMU has been
* enabled for the upstream port of the router so we do it here.
*/
tb_switch_configuration_valid(sw);
/* Scan upstream retimers */
tb_retimer_scan(upstream_port, true);
@ -1027,7 +1169,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
struct tb_tunnel *tunnel;
struct tb_port *out;
if (!usb4_dp_port_bw_mode_enabled(in))
if (!usb4_dp_port_bandwidth_mode_enabled(in))
continue;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
@ -1075,7 +1217,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
else
estimated_bw = estimated_up;
if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
tb_port_warn(in, "failed to update estimated bandwidth\n");
}
@ -1256,8 +1398,7 @@ static void tb_tunnel_dp(struct tb *tb)
* In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work.
*/
tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI);
tb_increase_tmu_accuracy(tunnel);
return;
err_free:
@ -1371,7 +1512,6 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
{
struct tb_port *up, *down, *port;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *parent_sw;
struct tb_tunnel *tunnel;
up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
@ -1382,9 +1522,8 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
* Look up available down port. Since we are chaining it should
* be found right above this switch.
*/
parent_sw = tb_to_switch(sw->dev.parent);
port = tb_port_at(tb_route(sw), parent_sw);
down = tb_find_pcie_down(parent_sw, port);
port = tb_switch_downstream_port(sw);
down = tb_find_pcie_down(tb_switch_parent(sw), port);
if (!down)
return 0;
@ -1421,30 +1560,45 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
struct tb_port *nhi_port, *dst_port;
struct tb_tunnel *tunnel;
struct tb_switch *sw;
int ret;
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock);
/*
* When tunneling DMA paths the link should not enter CL states
* so disable them now.
*/
tb_disable_clx(sw);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
transmit_ring, receive_path, receive_ring);
if (!tunnel) {
mutex_unlock(&tb->lock);
return -ENOMEM;
ret = -ENOMEM;
goto err_clx;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(nhi_port,
"DMA tunnel activation failed, aborting\n");
tb_tunnel_free(tunnel);
mutex_unlock(&tb->lock);
return -EIO;
ret = -EIO;
goto err_free;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
mutex_unlock(&tb->lock);
return 0;
err_free:
tb_tunnel_free(tunnel);
err_clx:
tb_enable_clx(sw);
mutex_unlock(&tb->lock);
return ret;
}
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
@ -1470,6 +1624,13 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
receive_path, receive_ring))
tb_deactivate_and_free_tunnel(tunnel);
}
/*
* Try to re-enable CL states now, it is OK if this fails
* because we may still have another DMA tunnel active through
* the same host router USB4 downstream port.
*/
tb_enable_clx(sw);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
@ -1751,12 +1912,12 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
if (!usb4_dp_port_bw_mode_enabled(in)) {
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
goto unlock;
}
ret = usb4_dp_port_requested_bw(in);
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
if (ret == -ENODATA)
tb_port_dbg(in, "no bandwidth request active\n");
@ -1823,17 +1984,26 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
static void tb_handle_notification(struct tb *tb, u64 route,
const struct cfg_error_pkg *error)
{
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n", route);
switch (error->error) {
case TB_CFG_ERROR_PCIE_WAKE:
case TB_CFG_ERROR_DP_CON_CHANGE:
case TB_CFG_ERROR_DPTX_DISCOVERY:
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
break;
case TB_CFG_ERROR_DP_BW:
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
tb_queue_dp_bandwidth_request(tb, route, error->port);
break;
default:
/* Ack is enough */
return;
/* Ignore for now */
break;
}
}
@ -1948,8 +2118,7 @@ static int tb_start(struct tb *tb)
* To support highest CLx state, we set host router's TMU to
* Normal mode.
*/
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL,
false);
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
@ -1990,34 +2159,19 @@ static int tb_suspend_noirq(struct tb *tb)
static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
int ret;
/* No need to restore if the router is already unplugged */
if (sw->is_unplugged)
return;
/*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx re-enabling in case CLx is not supported.
*/
ret = tb_switch_enable_clx(sw, TB_CL1);
if (ret && ret != -EOPNOTSUPP)
tb_sw_warn(sw, "failed to re-enable %s on upstream port\n",
tb_switch_clx_name(TB_CL1));
if (tb_switch_is_clx_enabled(sw, TB_CL1))
/*
* To support highest CLx state, we set router's TMU to
* Normal-Uni mode.
*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true);
else
/* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/
tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false);
if (tb_enable_clx(sw))
tb_sw_warn(sw, "failed to re-enable CL states\n");
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
tb_switch_configuration_valid(sw);
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;

View File

@ -19,10 +19,6 @@
#include "ctl.h"
#include "dma_port.h"
#define NVM_MIN_SIZE SZ_32K
#define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16
/* Keep link controller awake during update */
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
@ -77,51 +73,37 @@ enum tb_nvm_write_ops {
#define USB4_SWITCH_MAX_DEPTH 5
/**
* enum tb_switch_tmu_rate - TMU refresh rate
* @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
* @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
* transmission of the Delay Request TSNOS
* (Time Sync Notification Ordered Set) on a Link
* @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
* transmission of the Delay Request TSNOS on
* a Link
* enum tb_switch_tmu_mode - TMU mode
* @TB_SWITCH_TMU_MODE_OFF: TMU is off
* @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode
* @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode
* @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode
* @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode
*
* Ordering is based on TMU accuracy level (highest last).
*/
enum tb_switch_tmu_rate {
TB_SWITCH_TMU_RATE_OFF = 0,
TB_SWITCH_TMU_RATE_HIFI = 16,
TB_SWITCH_TMU_RATE_NORMAL = 1000,
enum tb_switch_tmu_mode {
TB_SWITCH_TMU_MODE_OFF,
TB_SWITCH_TMU_MODE_LOWRES,
TB_SWITCH_TMU_MODE_HIFI_UNI,
TB_SWITCH_TMU_MODE_HIFI_BI,
TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI,
};
/**
* struct tb_switch_tmu - Structure holding switch TMU configuration
* struct tb_switch_tmu - Structure holding router TMU configuration
* @cap: Offset to the TMU capability (%0 if not found)
* @has_ucap: Does the switch support uni-directional mode
* @rate: TMU refresh rate related to upstream switch. In case of root
* switch this holds the domain rate. Reflects the HW setting.
* @unidirectional: Is the TMU in uni-directional or bi-directional mode
* related to upstream switch. Don't care for root switch.
* Reflects the HW setting.
* @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional
* that is requested to be set. Related to upstream switch.
* Don't care for root switch.
* @rate_request: TMU new refresh rate related to upstream switch that is
* requested to be set. In case of root switch, this holds
* the new domain rate that is requested to be set.
* @mode: TMU mode related to the upstream router. Reflects the HW
* setting. Don't care for host router.
* @mode_request: TMU mode requested to set. Related to upstream router.
* Don't care for host router.
*/
struct tb_switch_tmu {
int cap;
bool has_ucap;
enum tb_switch_tmu_rate rate;
bool unidirectional;
bool unidirectional_request;
enum tb_switch_tmu_rate rate_request;
};
enum tb_clx {
TB_CLX_DISABLE,
/* CL0s and CL1 are enabled and supported together */
TB_CL1 = BIT(0),
TB_CL2 = BIT(1),
enum tb_switch_tmu_mode mode;
enum tb_switch_tmu_mode mode_request;
};
/**
@ -142,7 +124,7 @@ enum tb_clx {
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2)
* @link_width: Width of the upstream facing link
* @link_usb4: Upstream link is USB4
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
@ -174,12 +156,17 @@ enum tb_clx {
* @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
* @clx: CLx state on the upstream link of the router
* @clx: CLx states on the upstream link of the router
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
*
* In USB4 terminology this structure represents a router.
*
* Note @link_width is not the same as whether link is bonded or not.
* For Gen 4 links the link is also bonded when it is asymmetric. The
* correct way to find out whether the link is bonded or not is to look
* @bonded field of the upstream port.
*/
struct tb_switch {
struct device dev;
@ -195,7 +182,7 @@ struct tb_switch {
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
unsigned int link_width;
enum tb_link_width link_width;
bool link_usb4;
unsigned int generation;
int cap_plug_events;
@ -225,7 +212,7 @@ struct tb_switch {
unsigned int min_dp_main_credits;
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
enum tb_clx clx;
unsigned int clx;
};
/**
@ -455,6 +442,11 @@ struct tb_path {
#define TB_WAKE_ON_PCIE BIT(4)
#define TB_WAKE_ON_DP BIT(5)
/* CL states */
#define TB_CL0S BIT(0)
#define TB_CL1 BIT(1)
#define TB_CL2 BIT(2)
/**
* struct tb_cm_ops - Connection manager specific operations vector
* @driver_ready: Called right after control channel is started. Used by
@ -802,6 +794,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
struct device *parent, u64 route);
int tb_switch_configure(struct tb_switch *sw);
int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
@ -857,6 +850,20 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
return tb_to_switch(sw->dev.parent);
}
/**
* tb_switch_downstream_port() - Return downstream facing port of parent router
* @sw: Device router pointer
*
* Only call for device routers. Returns the downstream facing port of
* the parent router.
*/
static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
{
if (WARN_ON(!tb_route(sw)))
return NULL;
return tb_port_at(tb_route(sw), tb_switch_parent(sw));
}
static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
@ -935,17 +942,6 @@ static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
return false;
}
/**
* tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check
*
* Returns true if the @sw is USB4 compliant router, false otherwise.
*/
static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
{
return sw->config.thunderbolt_version == USB4_VERSION_1_0;
}
/**
* tb_switch_is_icm() - Is the switch handled by ICM firmware
* @sw: Switch to check
@ -973,68 +969,58 @@ int tb_switch_tmu_init(struct tb_switch *sw);
int tb_switch_tmu_post_time(struct tb_switch *sw);
int tb_switch_tmu_disable(struct tb_switch *sw);
int tb_switch_tmu_enable(struct tb_switch *sw);
void tb_switch_tmu_configure(struct tb_switch *sw,
enum tb_switch_tmu_rate rate,
bool unidirectional);
void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
enum tb_switch_tmu_rate rate);
int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode);
/**
* tb_switch_tmu_is_configured() - Is given TMU mode configured
* @sw: Router whose mode to check
* @mode: Mode to check
*
* Checks if given router TMU mode is configured to @mode. Note the
* router TMU might not be enabled to this mode.
*/
static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
enum tb_switch_tmu_mode mode)
{
return sw->tmu.mode_request == mode;
}
/**
* tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
* @sw: Router whose TMU mode to check
* @unidirectional: If uni-directional (bi-directional otherwise)
*
* Return true if hardware TMU configuration matches the one passed in
* as parameter. That is HiFi/Normal and either uni-directional or bi-directional.
* Return true if hardware TMU configuration matches the requested
* configuration (and is not %TB_SWITCH_TMU_MODE_OFF).
*/
static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw,
bool unidirectional)
static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
{
return sw->tmu.rate == sw->tmu.rate_request &&
sw->tmu.unidirectional == unidirectional;
return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF &&
sw->tmu.mode == sw->tmu.mode_request;
}
static inline const char *tb_switch_clx_name(enum tb_clx clx)
{
switch (clx) {
/* CL0s and CL1 are enabled and supported together */
case TB_CL1:
return "CL0s/CL1";
default:
return "unknown";
}
}
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
int tb_switch_clx_init(struct tb_switch *sw);
bool tb_switch_clx_is_supported(const struct tb_switch *sw);
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx);
int tb_switch_clx_disable(struct tb_switch *sw);
/**
* tb_switch_is_clx_enabled() - Checks if the CLx is enabled
* tb_switch_clx_is_enabled() - Checks if the CLx is enabled
* @sw: Router to check for the CLx
* @clx: The CLx state to check for
* @clx: The CLx states to check for
*
* Checks if the specified CLx is enabled on the router upstream link.
* Returns true if any of the given states is enabled.
*
* Not applicable for a host router.
*/
static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
enum tb_clx clx)
static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw,
unsigned int clx)
{
return sw->clx == clx;
return sw->clx & clx;
}
/**
* tb_switch_is_clx_supported() - Is CLx supported on this type of router
* @sw: The router to check CLx support for
*/
static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
{
if (sw->quirks & QUIRK_NO_CLX)
return false;
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
int tb_switch_mask_clx_objections(struct tb_switch *sw);
int tb_switch_pcie_l1_enable(struct tb_switch *sw);
int tb_switch_xhci_connect(struct tb_switch *sw);
@ -1073,14 +1059,12 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
int tb_port_get_link_speed(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
int tb_port_set_link_width(struct tb_port *port, unsigned int width);
int tb_port_set_lane_bonding(struct tb_port *port, bool bonding);
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@ -1183,6 +1167,17 @@ static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
return tb_to_switch(xd->dev.parent);
}
/**
* tb_xdomain_downstream_port() - Return downstream facing port of parent router
* @xd: Xdomain pointer
*
* Returns the downstream port the XDomain is connected to.
*/
static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd)
{
return tb_port_at(xd->route, tb_xdomain_parent(xd));
}
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
size_t size);
int tb_retimer_scan(struct tb_port *port, bool add);
@ -1200,7 +1195,31 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
return NULL;
}
/**
* usb4_switch_version() - Returns USB4 version of the router
* @sw: Router to check
*
* Returns major version of USB4 router (%1 for v1, %2 for v2 and so
* on). Can be called to pre-USB4 router too and in that case returns %0.
*/
static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
{
return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version);
}
/**
* tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check
*
* Returns true if the @sw is USB4 compliant router, false otherwise.
*/
static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
{
return usb4_switch_version(sw) > 0;
}
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
@ -1273,19 +1292,22 @@ int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
bool usb4_dp_port_bw_mode_supported(struct tb_port *port);
bool usb4_dp_port_bw_mode_enabled(struct tb_port *port);
int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported);
bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port);
bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port);
int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
bool supported);
int usb4_dp_port_group_id(struct tb_port *port);
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
int usb4_dp_port_granularity(struct tb_port *port);
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw);
int usb4_dp_port_allocated_bw(struct tb_port *port);
int usb4_dp_port_allocate_bw(struct tb_port *port, int bw);
int usb4_dp_port_requested_bw(struct tb_port *port);
int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw);
int usb4_dp_port_allocated_bandwidth(struct tb_port *port);
int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw);
int usb4_dp_port_requested_bandwidth(struct tb_port *port);
int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable);
static inline bool tb_is_usb4_port_device(const struct device *dev)
{
@ -1303,6 +1325,11 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4);
static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4)
{
return usb4->offline;
}
void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI

View File

@ -30,6 +30,13 @@ enum tb_cfg_error {
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
TB_CFG_ERROR_DP_BW = 32,
TB_CFG_ERROR_ROP_CMPLT = 33,
TB_CFG_ERROR_POP_CMPLT = 34,
TB_CFG_ERROR_PCIE_WAKE = 35,
TB_CFG_ERROR_DP_CON_CHANGE = 36,
TB_CFG_ERROR_DPTX_DISCOVERY = 37,
TB_CFG_ERROR_LINK_RECOVERY = 38,
TB_CFG_ERROR_ASYM_LINK = 39,
};
/* common header */

View File

@ -190,11 +190,14 @@ struct tb_regs_switch_header {
u32 thunderbolt_version:8;
} __packed;
/* USB4 version 1.0 */
#define USB4_VERSION_1_0 0x20
/* Used with the router thunderbolt_version */
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
#define ROUTER_CS_4_CMUV_V2 0x20
#define ROUTER_CS_5 0x05
#define ROUTER_CS_5_SLP BIT(0)
#define ROUTER_CS_5_WOP BIT(1)
@ -249,11 +252,13 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
#define TMU_RTR_CS_15 0xf
#define TMU_RTR_CS_15 0x0f
#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0)
#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6)
#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12)
#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18)
#define TMU_RTR_CS_18 0x12
#define TMU_RTR_CS_18_DELTA_AVG_CONST_MASK GENMASK(23, 16)
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
#define TMU_RTR_CS_25 0x19
@ -319,6 +324,14 @@ struct tb_regs_port_header {
#define TMU_ADP_CS_3_UDM BIT(29)
#define TMU_ADP_CS_6 0x06
#define TMU_ADP_CS_6_DTS BIT(1)
#define TMU_ADP_CS_8 0x08
#define TMU_ADP_CS_8_REPL_TIMEOUT_MASK GENMASK(14, 0)
#define TMU_ADP_CS_8_EUDM BIT(15)
#define TMU_ADP_CS_8_REPL_THRESHOLD_MASK GENMASK(25, 16)
#define TMU_ADP_CS_9 0x09
#define TMU_ADP_CS_9_REPL_N_MASK GENMASK(7, 0)
#define TMU_ADP_CS_9_DIRSWITCH_N_MASK GENMASK(15, 8)
#define TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK GENMASK(31, 16)
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
@ -346,6 +359,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN4 0x2
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
#define LANE_ADP_CS_1_PMS BIT(30)
@ -436,6 +450,9 @@ struct tb_regs_port_header {
#define DP_COMMON_CAP_1_LANE 0x0
#define DP_COMMON_CAP_2_LANES 0x1
#define DP_COMMON_CAP_4_LANES 0x2
#define DP_COMMON_CAP_UHBR10 BIT(17)
#define DP_COMMON_CAP_UHBR20 BIT(18)
#define DP_COMMON_CAP_UHBR13_5 BIT(19)
#define DP_COMMON_CAP_LTTPR_NS BIT(27)
#define DP_COMMON_CAP_BW_MODE BIT(28)
#define DP_COMMON_CAP_DPRX_DONE BIT(31)
@ -447,6 +464,8 @@ struct tb_regs_port_header {
/* PCIe adapter registers */
#define ADP_PCIE_CS_0 0x00
#define ADP_PCIE_CS_0_PE BIT(31)
#define ADP_PCIE_CS_1 0x01
#define ADP_PCIE_CS_1_EE BIT(0)
/* USB adapter registers */
#define ADP_USB3_CS_0 0x00

View File

@ -170,6 +170,23 @@ static struct tb_switch *alloc_host_usb4(struct kunit *test)
return sw;
}
static struct tb_switch *alloc_host_br(struct kunit *test)
{
struct tb_switch *sw;
sw = alloc_host_usb4(test);
if (!sw)
return NULL;
sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[10].config.max_in_hop_id = 9;
sw->ports[10].config.max_out_hop_id = 9;
sw->ports[10].cap_adap = -1;
sw->ports[10].disabled = false;
return sw;
}
static struct tb_switch *alloc_dev_default(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
@ -1583,6 +1600,71 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
/*
* Create 3 DP tunnels from Host to Devices #2, #5 and #4.
*
* [Host]
* 3 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
* | 5
* | 1
* [Device #5]
*/
host = alloc_host_br(test);
dev1 = alloc_dev_default(test, host, 0x3, true);
dev2 = alloc_dev_default(test, dev1, 0x303, true);
dev3 = alloc_dev_default(test, dev1, 0x503, true);
dev4 = alloc_dev_default(test, dev1, 0x703, true);
dev5 = alloc_dev_default(test, dev3, 0x50503, true);
in1 = &host->ports[5];
in2 = &host->ports[6];
in3 = &host->ports[10];
out1 = &dev2->ports[13];
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
tb_tunnel_free(tunnel2);
tb_tunnel_free(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
@ -2790,6 +2872,7 @@ static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_tunnel_dp_chain),
KUNIT_CASE(tb_test_tunnel_dp_tree),
KUNIT_CASE(tb_test_tunnel_dp_max_length),
KUNIT_CASE(tb_test_tunnel_3dp),
KUNIT_CASE(tb_test_tunnel_port_on_path),
KUNIT_CASE(tb_test_tunnel_usb3),
KUNIT_CASE(tb_test_tunnel_dma),

View File

@ -11,23 +11,63 @@
#include "tb.h"
static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
enum tb_switch_tmu_rate rate)
static const unsigned int tmu_rates[] = {
[TB_SWITCH_TMU_MODE_OFF] = 0,
[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
};
const struct {
unsigned int freq_meas_window;
unsigned int avg_const;
unsigned int delta_avg_const;
unsigned int repl_timeout;
unsigned int repl_threshold;
unsigned int repl_n;
unsigned int dirswitch_n;
} tmu_params[] = {
[TB_SWITCH_TMU_MODE_OFF] = { },
[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
800, 4, 0, 3125, 25, 128, 255,
},
};
static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
{
switch (mode) {
case TB_SWITCH_TMU_MODE_OFF:
return "off";
case TB_SWITCH_TMU_MODE_LOWRES:
return "uni-directional, LowRes";
case TB_SWITCH_TMU_MODE_HIFI_UNI:
return "uni-directional, HiFi";
case TB_SWITCH_TMU_MODE_HIFI_BI:
return "bi-directional, HiFi";
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
return "enhanced uni-directional, MedRes";
default:
return "unknown";
}
}
static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
{
return usb4_switch_version(sw) > 1;
}
static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
enum tb_switch_tmu_mode mode)
{
u32 freq_meas_wind[2] = { 30, 800 };
u32 avg_const[2] = { 4, 8 };
u32 freq, avg, val;
int ret;
if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
freq = freq_meas_wind[0];
avg = avg_const[0];
} else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
freq = freq_meas_wind[1];
avg = avg_const[1];
} else {
return 0;
}
freq = tmu_params[mode].freq_meas_window;
avg = tmu_params[mode].avg_const;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
@ -56,37 +96,30 @@ static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
}
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
if (ret)
return ret;
static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
{
bool root_switch = !tb_route(sw);
if (tb_switch_tmu_enhanced_is_supported(sw)) {
u32 delta_avg = tmu_params[mode].delta_avg_const;
switch (sw->tmu.rate) {
case TB_SWITCH_TMU_RATE_OFF:
return "off";
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_18, 1);
if (ret)
return ret;
case TB_SWITCH_TMU_RATE_HIFI:
/* Root switch does not have upstream directionality */
if (root_switch)
return "HiFi";
if (sw->tmu.unidirectional)
return "uni-directional, HiFi";
return "bi-directional, HiFi";
val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
case TB_SWITCH_TMU_RATE_NORMAL:
if (root_switch)
return "normal";
return "uni-directional, normal";
default:
return "unknown";
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_18, 1);
}
return ret;
}
static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
{
int ret;
u32 val;
@ -182,6 +215,103 @@ static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
return val & TMU_ADP_CS_3_UDM;
}
static bool tb_port_tmu_is_enhanced(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return false;
return val & TMU_ADP_CS_8_EUDM;
}
/* Can be called to non-v2 lane adapters too */
static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
{
int ret;
u32 val;
if (!tb_switch_tmu_enhanced_is_supported(port->sw))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return ret;
if (enable)
val |= TMU_ADP_CS_8_EUDM;
else
val &= ~TMU_ADP_CS_8_EUDM;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
}
static int tb_port_set_tmu_mode_params(struct tb_port *port,
enum tb_switch_tmu_mode mode)
{
u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
int ret;
repl_timeout = tmu_params[mode].repl_timeout;
repl_threshold = tmu_params[mode].repl_threshold;
repl_n = tmu_params[mode].repl_n;
dirswitch_n = tmu_params[mode].dirswitch_n;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return ret;
val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
if (ret)
return ret;
val &= ~TMU_ADP_CS_9_REPL_N_MASK;
val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
}
/* Can be called to non-v2 lane adapters too */
static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
{
int ret;
u32 val;
if (!tb_switch_tmu_enhanced_is_supported(port->sw))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
if (ret)
return ret;
val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
}
static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
{
u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
@ -224,6 +354,50 @@ static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
}
static int tmu_mode_init(struct tb_switch *sw)
{
bool enhanced, ucap;
int ret, rate;
ucap = tb_switch_tmu_ucap_is_supported(sw);
if (ucap)
tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
enhanced = tb_switch_tmu_enhanced_is_supported(sw);
if (enhanced)
tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
ret = tb_switch_tmu_rate_read(sw);
if (ret < 0)
return ret;
rate = ret;
/* Off by default */
sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
if (tb_route(sw)) {
struct tb_port *up = tb_upstream_port(sw);
if (enhanced && tb_port_tmu_is_enhanced(up)) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
} else if (rate) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
}
} else if (rate) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
}
/* Update the initial request to match the current mode */
sw->tmu.mode_request = sw->tmu.mode;
sw->tmu.has_ucap = ucap;
return 0;
}
/**
* tb_switch_tmu_init() - Initialize switch TMU structures
* @sw: Switch to initialized
@ -252,27 +426,11 @@ int tb_switch_tmu_init(struct tb_switch *sw)
port->cap_tmu = cap;
}
ret = tb_switch_tmu_rate_read(sw);
if (ret < 0)
ret = tmu_mode_init(sw);
if (ret)
return ret;
sw->tmu.rate = ret;
sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
if (sw->tmu.has_ucap) {
tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
if (tb_route(sw)) {
struct tb_port *up = tb_upstream_port(sw);
sw->tmu.unidirectional =
tb_port_tmu_is_unidirectional(up);
}
} else {
sw->tmu.unidirectional = false;
}
tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
return 0;
}
@ -308,7 +466,7 @@ int tb_switch_tmu_post_time(struct tb_switch *sw)
return ret;
for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
gm_local_time[i]);
/* Convert to nanoseconds (drop fractional part) */
@ -375,6 +533,23 @@ out:
return ret;
}
static int disable_enhanced(struct tb_port *up, struct tb_port *down)
{
int ret;
/*
* Router may already been disconnected so ignore errors on the
* upstream port.
*/
tb_port_tmu_rate_write(up, 0);
tb_port_tmu_enhanced_enable(up, false);
ret = tb_port_tmu_rate_write(down, 0);
if (ret)
return ret;
return tb_port_tmu_enhanced_enable(down, false);
}
/**
* tb_switch_tmu_disable() - Disable TMU of a switch
* @sw: Switch whose TMU to disable
@ -383,26 +558,15 @@ out:
*/
int tb_switch_tmu_disable(struct tb_switch *sw)
{
/*
* No need to disable TMU on devices that don't support CLx since
* on these devices e.g. Alpine Ridge and earlier, the TMU mode
* HiFi bi-directional is enabled by default and we don't change it.
*/
if (!tb_switch_is_clx_supported(sw))
return 0;
/* Already disabled? */
if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
return 0;
if (tb_route(sw)) {
bool unidirectional = sw->tmu.unidirectional;
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down, *up;
int ret;
down = tb_port_at(tb_route(sw), parent);
down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of uni-directional time sync, TMU handshake is
@ -415,37 +579,49 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
* uni-directional mode and we don't want to change it's TMU
* mode.
*/
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
tb_port_tmu_time_sync_disable(up);
ret = tb_port_tmu_time_sync_disable(down);
if (ret)
return ret;
if (unidirectional) {
switch (sw->tmu.mode) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
/* The switch may be unplugged so ignore any errors */
tb_port_tmu_unidirectional_disable(up);
ret = tb_port_tmu_unidirectional_disable(down);
if (ret)
return ret;
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
ret = disable_enhanced(up, down);
if (ret)
return ret;
break;
default:
break;
}
} else {
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
}
sw->tmu.unidirectional = false;
sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
tb_sw_dbg(sw, "TMU: disabled\n");
return 0;
}
static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
/* Called only when there is failure enabling requested mode */
static void tb_switch_tmu_off(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
struct tb_port *down, *up;
down = tb_port_at(tb_route(sw), parent);
down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when setting
@ -456,28 +632,38 @@ static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
*/
tb_port_tmu_time_sync_disable(down);
tb_port_tmu_time_sync_disable(up);
if (unidirectional)
tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
else
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
disable_enhanced(up, down);
break;
default:
break;
}
/* Always set the rate to 0 */
tb_switch_tmu_rate_write(sw, rate);
tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
tb_port_tmu_unidirectional_disable(down);
tb_port_tmu_unidirectional_disable(up);
}
/*
* This function is called when the previous TMU mode was
* TB_SWITCH_TMU_RATE_OFF.
* TB_SWITCH_TMU_MODE_OFF.
*/
static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
down = tb_switch_downstream_port(sw);
ret = tb_port_tmu_unidirectional_disable(up);
if (ret)
@ -487,7 +673,7 @@ static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
if (ret)
goto out;
ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
if (ret)
goto out;
@ -502,12 +688,14 @@ static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
return 0;
out:
__tb_switch_tmu_off(sw, false);
tb_switch_tmu_off(sw);
return ret;
}
static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
/* Only needed for Titan Ridge */
static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
{
struct tb_port *up = tb_upstream_port(sw);
u32 val;
int ret;
@ -518,36 +706,34 @@ static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
}
static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
{
struct tb_port *up = tb_upstream_port(sw);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
if (ret)
return ret;
return tb_port_tmu_write(up, TMU_ADP_CS_6,
TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
}
/*
* This function is called when the previous TMU mode was
* TB_SWITCH_TMU_RATE_OFF.
* TB_SWITCH_TMU_MODE_OFF.
*/
static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
down = tb_switch_downstream_port(sw);
ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
tmu_rates[sw->tmu.mode_request]);
if (ret)
return ret;
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
@ -570,16 +756,65 @@ static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
return 0;
out:
__tb_switch_tmu_off(sw, true);
tb_switch_tmu_off(sw);
return ret;
}
static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
/*
* This function is called when the previous TMU mode was
* TB_SWITCH_TMU_RATE_OFF.
*/
static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
unsigned int rate = tmu_rates[sw->tmu.mode_request];
struct tb_port *up, *down;
int ret;
/* Router specific parameters first */
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
if (ret)
goto out;
ret = tb_port_tmu_rate_write(up, rate);
if (ret)
goto out;
ret = tb_port_tmu_enhanced_enable(up, true);
if (ret)
goto out;
ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
if (ret)
goto out;
ret = tb_port_tmu_rate_write(down, rate);
if (ret)
goto out;
ret = tb_port_tmu_enhanced_enable(down, true);
if (ret)
goto out;
return 0;
out:
tb_switch_tmu_off(sw);
return ret;
}
static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
{
unsigned int rate = tmu_rates[sw->tmu.mode];
struct tb_port *down, *up;
down = tb_port_at(tb_route(sw), parent);
down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when change mode,
@ -587,42 +822,97 @@ static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
* In case of additional failures in the functions below,
* ignore them since the caller shall already report a failure.
*/
tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
if (sw->tmu.unidirectional_request)
tb_switch_tmu_rate_write(parent, sw->tmu.rate);
else
tb_switch_tmu_rate_write(sw, sw->tmu.rate);
switch (sw->tmu.mode) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
tb_port_tmu_set_unidirectional(down, true);
tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
break;
tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
case TB_SWITCH_TMU_MODE_HIFI_BI:
tb_port_tmu_set_unidirectional(down, false);
tb_switch_tmu_rate_write(sw, rate);
break;
default:
break;
}
tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
switch (sw->tmu.mode) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
tb_port_tmu_set_unidirectional(up, true);
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
tb_port_tmu_set_unidirectional(up, false);
break;
default:
break;
}
}
static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
static int tb_switch_tmu_change_mode(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
unsigned int rate = tmu_rates[sw->tmu.mode_request];
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
if (ret)
goto out;
down = tb_switch_downstream_port(sw);
if (sw->tmu.unidirectional_request)
ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
else
ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
/* Program the upstream router downstream facing lane adapter */
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
ret = tb_port_tmu_set_unidirectional(down, true);
if (ret)
goto out;
ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
if (ret)
goto out;
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
ret = tb_port_tmu_set_unidirectional(down, false);
if (ret)
goto out;
ret = tb_switch_tmu_rate_write(sw, rate);
if (ret)
goto out;
break;
default:
/* Not allowed to change modes from other than above */
return -EINVAL;
}
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
if (ret)
return ret;
/* Program the new mode and the downstream router lane adapter */
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
ret = tb_port_tmu_set_unidirectional(up, true);
if (ret)
goto out;
break;
ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
if (ret)
goto out;
case TB_SWITCH_TMU_MODE_HIFI_BI:
ret = tb_port_tmu_set_unidirectional(up, false);
if (ret)
goto out;
break;
default:
/* Not allowed to change modes from other than above */
return -EINVAL;
}
ret = tb_port_tmu_time_sync_enable(down);
if (ret)
@ -635,7 +925,7 @@ static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
return 0;
out:
__tb_switch_tmu_change_mode_prev(sw);
tb_switch_tmu_change_mode_prev(sw);
return ret;
}
@ -643,45 +933,21 @@ out:
* tb_switch_tmu_enable() - Enable TMU on a router
* @sw: Router whose TMU to enable
*
* Enables TMU of a router to be in uni-directional Normal/HiFi
* or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
* before calling this function, to select the mode Normal/HiFi and
* directionality (uni-directional/bi-directional).
* In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
* work. Uni-directional mode is required for CLx (Link Low-Power) to work.
* Enables TMU of a router to be in uni-directional Normal/HiFi or
* bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
* required before calling this function.
*/
int tb_switch_tmu_enable(struct tb_switch *sw)
{
bool unidirectional = sw->tmu.unidirectional_request;
int ret;
if (unidirectional && !sw->tmu.has_ucap)
return -EOPNOTSUPP;
/*
* No need to enable TMU on devices that don't support CLx since on
* these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
* bi-directional is enabled by default.
*/
if (!tb_switch_is_clx_supported(sw))
if (tb_switch_tmu_is_enabled(sw))
return 0;
if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
if (tb_switch_is_titan_ridge(sw) && unidirectional) {
/*
* Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
* enabled and supported together.
*/
if (!tb_switch_is_clx_enabled(sw, TB_CL1))
return -EOPNOTSUPP;
ret = tb_switch_tmu_objection_mask(sw);
if (ret)
return ret;
ret = tb_switch_tmu_unidirectional_enable(sw);
if (tb_switch_is_titan_ridge(sw) &&
(sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
ret = tb_switch_tmu_disable_objections(sw);
if (ret)
return ret;
}
@ -696,19 +962,30 @@ int tb_switch_tmu_enable(struct tb_switch *sw)
* HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
* HiFi-Uni.
*/
if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
if (unidirectional)
ret = __tb_switch_tmu_enable_unidirectional(sw);
else
ret = __tb_switch_tmu_enable_bidirectional(sw);
if (ret)
return ret;
} else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
ret = __tb_switch_tmu_change_mode(sw);
if (ret)
return ret;
if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
ret = tb_switch_tmu_enable_unidirectional(sw);
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
ret = tb_switch_tmu_enable_bidirectional(sw);
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
ret = tb_switch_tmu_enable_enhanced(sw);
break;
default:
ret = -EINVAL;
break;
}
} else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
ret = tb_switch_tmu_change_mode(sw);
} else {
ret = -EINVAL;
}
sw->tmu.unidirectional = unidirectional;
} else {
/*
* Host router port configurations are written as
@ -716,58 +993,68 @@ int tb_switch_tmu_enable(struct tb_switch *sw)
* of the child node - see above.
* Here only the host router' rate configuration is written.
*/
ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
if (ret)
return ret;
ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
}
sw->tmu.rate = sw->tmu.rate_request;
if (ret) {
tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
tmu_mode_name(sw->tmu.mode_request), ret);
} else {
sw->tmu.mode = sw->tmu.mode_request;
tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
}
tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
return tb_switch_tmu_set_time_disruption(sw, false);
}
/**
* tb_switch_tmu_configure() - Configure the TMU rate and directionality
* tb_switch_tmu_configure() - Configure the TMU mode
* @sw: Router whose mode to change
* @rate: Rate to configure Off/Normal/HiFi
* @unidirectional: If uni-directional (bi-directional otherwise)
* @mode: Mode to configure
*
* Selects the rate of the TMU and directionality (uni-directional or
* bi-directional). Must be called before tb_switch_tmu_enable().
* Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
* next called.
*
* Returns %0 in success and negative errno otherwise. Specifically
* returns %-EOPNOTSUPP if the requested mode is not possible (not
* supported by the router and/or topology).
*/
void tb_switch_tmu_configure(struct tb_switch *sw,
enum tb_switch_tmu_rate rate, bool unidirectional)
int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
{
sw->tmu.unidirectional_request = unidirectional;
sw->tmu.rate_request = rate;
}
switch (mode) {
case TB_SWITCH_TMU_MODE_OFF:
break;
static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
{
if (tb_is_switch(dev)) {
struct tb_switch *sw = tb_to_switch(dev);
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
if (!sw->tmu.has_ucap)
return -EOPNOTSUPP;
break;
tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
tb_switch_is_clx_enabled(sw, TB_CL1));
if (tb_switch_tmu_enable(sw))
tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
case TB_SWITCH_TMU_MODE_HIFI_BI:
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
const struct tb_switch *parent_sw = tb_switch_parent(sw);
if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
return -EOPNOTSUPP;
if (!tb_switch_tmu_enhanced_is_supported(sw))
return -EOPNOTSUPP;
break;
}
default:
tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
return -EINVAL;
}
if (sw->tmu.mode_request != mode) {
tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
sw->tmu.mode_request = mode;
}
return 0;
}
/**
* tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
* @sw: The router to configure and enable it's children TMU
* @rate: Rate of the TMU to configure the router's chidren to
*
* Configures and enables the TMU mode of 1st depth children of the specified
* router to the specified rate.
*/
void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
enum tb_switch_tmu_rate rate)
{
device_for_each_child(&sw->dev, &rate,
tb_switch_tmu_config_enable);
}

View File

@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/ktime.h>
#include <linux/string_helpers.h>
#include "tunnel.h"
#include "tb.h"
@ -41,9 +42,14 @@
* Number of credits we try to allocate for each DMA path if not limited
* by the host router baMaxHI.
*/
#define TB_DMA_CREDITS 14U
#define TB_DMA_CREDITS 14
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1U
#define TB_MIN_DMA_CREDITS 1
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
__MODULE_STRING(TB_DMA_CREDITS) ")");
static bool bw_alloc_mode = true;
module_param(bw_alloc_mode, bool, 0444);
@ -95,7 +101,7 @@ static unsigned int tb_available_credits(const struct tb_port *port,
pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
if (tb_acpi_is_xdomain_allowed()) {
spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
spare = min_not_zero(sw->max_dma_credits, dma_credits);
/* Add some credits for potential second DMA tunnel */
spare += TB_MIN_DMA_CREDITS;
} else {
@ -148,18 +154,49 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
return tunnel;
}
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
int ret;
/* Only supported of both routers are at least USB4 v2 */
if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
usb4_switch_version(tunnel->dst_port->sw) < 2)
return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
if (ret)
return ret;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
if (ret)
return ret;
tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
str_enabled_disabled(enable));
return 0;
}
static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
{
int res;
if (activate) {
res = tb_pci_set_ext_encapsulation(tunnel, activate);
if (res)
return res;
}
res = tb_pci_port_enable(tunnel->src_port, activate);
if (res)
return res;
if (tb_port_is_pcie_up(tunnel->dst_port))
return tb_pci_port_enable(tunnel->dst_port, activate);
if (tb_port_is_pcie_up(tunnel->dst_port)) {
res = tb_pci_port_enable(tunnel->dst_port, activate);
if (res)
return res;
}
return 0;
return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
}
static int tb_pci_init_credits(struct tb_path_hop *hop)
@ -381,6 +418,10 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
return -ETIMEDOUT;
}
/*
* Returns maximum possible rate from capability supporting only DP 2.0
* and below. Used when DP BW allocation mode is not enabled.
*/
static inline u32 tb_dp_cap_get_rate(u32 val)
{
u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
@ -399,6 +440,28 @@ static inline u32 tb_dp_cap_get_rate(u32 val)
}
}
/*
* Returns maximum possible rate from capability supporting DP 2.1
* UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
* mode is enabled.
*/
static inline u32 tb_dp_cap_get_rate_ext(u32 val)
{
if (val & DP_COMMON_CAP_UHBR20)
return 20000;
else if (val & DP_COMMON_CAP_UHBR13_5)
return 13500;
else if (val & DP_COMMON_CAP_UHBR10)
return 10000;
return tb_dp_cap_get_rate(val);
}
static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
{
return rate >= 10000;
}
static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
{
val &= ~DP_COMMON_CAP_RATE_MASK;
@ -461,7 +524,9 @@ static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
{
/* Tunneling removes the DP 8b/10b encoding */
/* Tunneling removes the DP 8b/10b 128/132b encoding */
if (tb_dp_is_uhbr_rate(rate))
return rate * lanes * 128 / 132;
return rate * lanes * 8 / 10;
}
@ -526,7 +591,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
* Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place.
*/
ret = tb_dp_cm_handshake(in, out, 1500);
ret = tb_dp_cm_handshake(in, out, 3000);
if (ret)
return ret;
@ -604,7 +669,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in->cap_adap + DP_REMOTE_CAP, 1);
}
static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
{
int ret, estimated_bw, granularity, tmp;
struct tb_port *out = tunnel->dst_port;
@ -616,7 +681,7 @@ static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
if (!bw_alloc_mode)
return 0;
ret = usb4_dp_port_set_cm_bw_mode_supported(in, true);
ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
if (ret)
return ret;
@ -654,6 +719,19 @@ static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
if (ret)
return ret;
/*
* Pick up granularity that supports maximum possible bandwidth.
* For that we use the UHBR rates too.
*/
in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
rate = min(in_rate, out_rate);
tmp = tb_dp_bandwidth(rate, lanes);
tb_port_dbg(in,
"maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tmp);
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
@ -680,12 +758,12 @@ static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bw(in, estimated_bw);
ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
if (ret)
return ret;
/* Initial allocation should be 0 according the spec */
ret = usb4_dp_port_allocate_bw(in, 0);
ret = usb4_dp_port_allocate_bandwidth(in, 0);
if (ret)
return ret;
@ -707,7 +785,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (!tb_switch_is_usb4(sw))
return 0;
if (!usb4_dp_port_bw_mode_supported(in))
if (!usb4_dp_port_bandwidth_mode_supported(in))
return 0;
tb_port_dbg(in, "bandwidth allocation mode supported\n");
@ -716,17 +794,17 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (ret)
return ret;
return tb_dp_bw_alloc_mode_enable(tunnel);
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
static void tb_dp_deinit(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
if (!usb4_dp_port_bw_mode_supported(in))
if (!usb4_dp_port_bandwidth_mode_supported(in))
return;
if (usb4_dp_port_bw_mode_enabled(in)) {
usb4_dp_port_set_cm_bw_mode_supported(in, false);
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
tb_port_dbg(in, "bandwidth allocation mode disabled\n");
}
}
@ -769,15 +847,42 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
}
/* max_bw is rounded up to next granularity */
static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
int *max_bw)
{
struct tb_port *in = tunnel->src_port;
int ret, rate, lanes, nrd_bw;
u32 cap;
ret = usb4_dp_port_nrd(in, &rate, &lanes);
/*
* DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
* read parameter values so this so we can use this to determine
* the maximum possible bandwidth over this link.
*
* See USB4 v2 spec 1.0 10.4.4.5.
*/
ret = tb_port_read(in, &cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
if (tb_dp_is_uhbr_rate(rate)) {
/*
* When UHBR is used there is no reduction in lanes so
* we can use this directly.
*/
lanes = tb_dp_cap_get_lanes(cap);
} else {
/*
* If there is no UHBR supported then check the
* non-reduced rate and lanes.
*/
ret = usb4_dp_port_nrd(in, &rate, &lanes);
if (ret)
return ret;
}
nrd_bw = tb_dp_bandwidth(rate, lanes);
if (max_bw) {
@ -790,26 +895,27 @@ static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
return nrd_bw;
}
static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int ret, allocated_bw, max_bw;
if (!usb4_dp_port_bw_mode_enabled(in))
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
if (!tunnel->bw_mode)
return -EOPNOTSUPP;
/* Read what was allocated previously if any */
ret = usb4_dp_port_allocated_bw(in);
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
@ -839,15 +945,15 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* If we have already set the allocated bandwidth then use that.
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) {
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
int ret, allocated_bw, max_bw;
ret = usb4_dp_port_allocated_bw(in);
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
@ -874,23 +980,23 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
struct tb_port *in = tunnel->src_port;
int max_bw, ret, tmp;
if (!usb4_dp_port_bw_mode_enabled(in))
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (in->sw->config.depth < out->sw->config.depth) {
tmp = min(*alloc_down, max_bw);
ret = usb4_dp_port_allocate_bw(in, tmp);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
tmp = min(*alloc_up, max_bw);
ret = usb4_dp_port_allocate_bw(in, tmp);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = 0;
@ -900,6 +1006,9 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
tmp);
return 0;
}
@ -974,23 +1083,20 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
struct tb_port *in = tunnel->src_port;
u32 rate, lanes;
int ret;
/*
* DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read
* parameter values so this so we can use this to determine the
* maximum possible bandwidth over this link.
*/
ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes);
if (ret)
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
*max_up = 0;
*max_down = tb_dp_bandwidth(rate, lanes);
*max_down = ret;
} else {
*max_up = tb_dp_bandwidth(rate, lanes);
*max_up = ret;
*max_down = 0;
}
@ -1011,8 +1117,8 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
* mode is enabled first and then read the bandwidth
* through those registers.
*/
ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up,
consumed_down);
ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
consumed_down);
if (ret < 0) {
if (ret != -EOPNOTSUPP)
return ret;
@ -1132,6 +1238,47 @@ static int tb_dp_init_video_path(struct tb_path *path)
return 0;
}
static void tb_dp_dump(struct tb_tunnel *tunnel)
{
struct tb_port *in, *out;
u32 dp_cap, rate, lanes;
in = tunnel->src_port;
out = tunnel->dst_port;
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1))
return;
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
out = tunnel->dst_port;
if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
out->cap_adap + DP_LOCAL_CAP, 1))
return;
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1))
return;
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
}
/**
* tb_tunnel_discover_dp() - Discover existing Display Port tunnels
* @tb: Pointer to the domain structure
@ -1209,6 +1356,8 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
goto err_deactivate;
}
tb_dp_dump(tunnel);
tb_tunnel_dbg(tunnel, "discovered\n");
return tunnel;
@ -1452,6 +1601,10 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_path *path;
int credits;
/* Ring 0 is reserved for control channel */
if (WARN_ON(!receive_ring || !transmit_ring))
return NULL;
if (receive_ring > 0)
npaths++;
if (transmit_ring > 0)
@ -1468,7 +1621,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->dst_port = dst;
tunnel->deinit = tb_dma_deinit;
credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
if (receive_ring > 0) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,

View File

@ -15,6 +15,7 @@
#include "tb.h"
#define USB4_DATA_RETRIES 3
#define USB4_DATA_DWORDS 16
enum usb4_sb_target {
USB4_SB_TARGET_ROUTER,
@ -112,7 +113,7 @@ static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
return -EINVAL;
/*
@ -231,11 +232,14 @@ static bool link_is_usb4(struct tb_port *port)
* is not available for some reason (like that there is Thunderbolt 3
* switch upstream) then the internal xHCI controller is enabled
* instead.
*
* This does not set the configuration valid bit of the router. To do
* that call usb4_switch_configuration_valid().
*/
int usb4_switch_setup(struct tb_switch *sw)
{
struct tb_port *downstream_port;
struct tb_switch *parent;
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down;
bool tbt3, xhci;
u32 val = 0;
int ret;
@ -249,9 +253,8 @@ int usb4_switch_setup(struct tb_switch *sw)
if (ret)
return ret;
parent = tb_switch_parent(sw);
downstream_port = tb_port_at(tb_route(sw), parent);
sw->link_usb4 = link_is_usb4(downstream_port);
down = tb_switch_downstream_port(sw);
sw->link_usb4 = link_is_usb4(down);
tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
xhci = val & ROUTER_CS_6_HCI;
@ -288,7 +291,33 @@ int usb4_switch_setup(struct tb_switch *sw)
/* TBT3 supported by the CM */
val |= ROUTER_CS_5_C3S;
/* Tunneling configuration is ready now */
return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
}
/**
* usb4_switch_configuration_valid() - Set tunneling configuration to be valid
* @sw: USB4 router
*
* Sets configuration valid bit for the router. Must be called before
* any tunnels can be set through the router and after
* usb4_switch_setup() has been called. Can be called to host and device
* routers (does nothing for the latter).
*
* Returns %0 in success and negative errno otherwise.
*/
int usb4_switch_configuration_valid(struct tb_switch *sw)
{
u32 val;
int ret;
if (!tb_route(sw))
return 0;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
val |= ROUTER_CS_5_CV;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
@ -703,7 +732,7 @@ int usb4_switch_credits_init(struct tb_switch *sw)
int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
int ret, length, i, nports;
const struct tb_port *port;
u32 data[NVM_DATA_DWORDS];
u32 data[USB4_DATA_DWORDS];
u32 metadata = 0;
u8 status = 0;
@ -1199,7 +1228,7 @@ static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
{
if (dwords > NVM_DATA_DWORDS)
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
@ -1209,7 +1238,7 @@ static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
static int usb4_port_write_data(struct tb_port *port, const void *data,
size_t dwords)
{
if (dwords > NVM_DATA_DWORDS)
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
@ -1845,7 +1874,7 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
int ret;
metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
if (dwords < NVM_DATA_DWORDS)
if (dwords < USB4_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
@ -2265,13 +2294,14 @@ int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
}
/**
* usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
* usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
* supported
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the adapter
* supports USB4 bandwidth allocation mode, false otherwise.
*/
bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
{
int ret;
u32 val;
@ -2288,13 +2318,14 @@ bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
}
/**
* usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
* usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
* enabled
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the bandwidth
* allocation mode has been enabled, false otherwise.
*/
bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
{
int ret;
u32 val;
@ -2311,7 +2342,8 @@ bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
}
/**
* usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
* usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
* bandwidth allocation mode
* @port: DP IN adapter
* @supported: Does the CM support bandwidth allocation mode
*
@ -2320,7 +2352,8 @@ bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
* otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
* does not support this.
*/
int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
bool supported)
{
u32 val;
int ret;
@ -2594,7 +2627,7 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
}
/**
* usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
* usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
* @port: DP IN adapter
* @bw: Estimated bandwidth in Mb/s.
*
@ -2604,7 +2637,7 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
* and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
* the adapter does not support this.
*/
int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
@ -2630,14 +2663,14 @@ int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
}
/**
* usb4_dp_port_allocated_bw() - Return allocated bandwidth
* usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
* @port: DP IN adapter
*
* Reads and returns allocated bandwidth for @port in Mb/s (taking into
* account the programmed granularity). Returns negative errno in case
* of error.
*/
int usb4_dp_port_allocated_bw(struct tb_port *port)
int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
@ -2723,7 +2756,7 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
}
/**
* usb4_dp_port_allocate_bw() - Set allocated bandwidth
* usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
* @port: DP IN adapter
* @bw: New allocated bandwidth in Mb/s
*
@ -2731,7 +2764,7 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
* driver). Takes into account the programmed granularity. Returns %0 in
* success and negative errno in case of error.
*/
int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
@ -2765,7 +2798,7 @@ int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
}
/**
* usb4_dp_port_requested_bw() - Read requested bandwidth
* usb4_dp_port_requested_bandwidth() - Read requested bandwidth
* @port: DP IN adapter
*
* Reads the DPCD (graphics driver) requested bandwidth and returns it
@ -2774,7 +2807,7 @@ int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
* the adapter does not support bandwidth allocation mode, and %ENODATA
* if there is no active bandwidth request from the graphics driver.
*/
int usb4_dp_port_requested_bw(struct tb_port *port)
int usb4_dp_port_requested_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
@ -2797,3 +2830,34 @@ int usb4_dp_port_requested_bw(struct tb_port *port)
return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
}
/**
* usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
* @port: PCIe adapter
* @enable: Enable/disable extended encapsulation
*
* Enables or disables extended encapsulation used in PCIe tunneling. Caller
* needs to make sure both adapters support this before enabling. Returns %0 on
* success and negative errno otherwise.
*/
int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
{
u32 val;
int ret;
if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_1, 1);
if (ret)
return ret;
if (enable)
val |= ADP_PCIE_CS_1_EE;
else
val &= ~ADP_PCIE_CS_1_EE;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_1, 1);
}

View File

@ -537,9 +537,8 @@ static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
struct tb_xdomain *xd, u8 sequence)
{
struct tb_switch *sw = tb_to_switch(xd->dev.parent);
struct tb_xdp_link_state_status_response res;
struct tb_port *port = tb_port_at(xd->route, sw);
struct tb_port *port = tb_xdomain_downstream_port(xd);
u32 val[2];
int ret;
@ -1137,7 +1136,7 @@ static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
struct tb_port *port;
int ret;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
ret = tb_port_get_link_speed(port);
if (ret < 0)
@ -1251,8 +1250,7 @@ static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
unsigned int width)
{
struct tb_switch *sw = tb_to_switch(xd->dev.parent);
struct tb_port *port = tb_port_at(xd->route, sw);
struct tb_port *port = tb_xdomain_downstream_port(xd);
struct tb *tb = xd->tb;
u8 tlw, tls;
u32 val;
@ -1292,13 +1290,16 @@ static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
{
unsigned int width, width_mask;
struct tb_port *port;
int ret, width;
int ret;
if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
width = 1;
width = TB_LINK_WIDTH_SINGLE;
width_mask = width;
} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
width = 2;
width = TB_LINK_WIDTH_DUAL;
width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
} else {
if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
@ -1309,7 +1310,7 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
return -ETIMEDOUT;
}
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
/*
* We can't use tb_xdomain_lane_bonding_enable() here because it
@ -1330,15 +1331,16 @@ static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
return ret;
}
ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
ret = tb_port_wait_for_link_width(port, width_mask,
XDOMAIN_BONDING_TIMEOUT);
if (ret) {
dev_warn(&xd->dev, "error waiting for link width to become %d\n",
width);
width_mask);
return ret;
}
port->bonded = width == 2;
port->dual_link_port->bonded = width == 2;
port->bonded = width > TB_LINK_WIDTH_SINGLE;
port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
@ -1425,7 +1427,7 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
if (xd->bonding_possible) {
struct tb_port *port;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
if (!port->bonded)
tb_port_disable(port->dual_link_port);
}
@ -1737,16 +1739,57 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
unsigned int width;
return sysfs_emit(buf, "%u\n", xd->link_width);
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
unsigned int width;
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr,
@ -1976,10 +2019,11 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
*/
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
unsigned int width_mask;
struct tb_port *port;
int ret;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
if (!port->dual_link_port)
return -ENODEV;
@ -1999,7 +2043,12 @@ int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
return ret;
}
ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
/* Any of the widths are all bonded */
width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
TB_LINK_WIDTH_ASYM_RX;
ret = tb_port_wait_for_link_width(port, width_mask,
XDOMAIN_BONDING_TIMEOUT);
if (ret) {
tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
@ -2024,10 +2073,13 @@ void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
{
struct tb_port *port;
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
port = tb_xdomain_downstream_port(xd);
if (port->dual_link_port) {
int ret;
tb_port_lane_bonding_disable(port);
if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
if (ret == -ETIMEDOUT)
tb_port_warn(port, "timeout disabling lane bonding\n");
tb_port_disable(port->dual_link_port);
tb_port_update_credits(port);

View File

@ -171,6 +171,20 @@ struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
/**
* enum tb_link_width - Thunderbolt/USB4 link width
* @TB_LINK_WIDTH_SINGLE: Single lane link
* @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
* @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters
* @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
*/
enum tb_link_width {
TB_LINK_WIDTH_SINGLE = BIT(0),
TB_LINK_WIDTH_DUAL = BIT(1),
TB_LINK_WIDTH_ASYM_TX = BIT(2),
TB_LINK_WIDTH_ASYM_RX = BIT(3),
};
/**
* struct tb_xdomain - Cross-domain (XDomain) connection
* @dev: XDomain device
@ -186,7 +200,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2)
* @link_width: Width of the downstream facing link
* @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
@ -234,7 +248,7 @@ struct tb_xdomain {
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
unsigned int link_width;
enum tb_link_width link_width;
bool link_usb4;
bool is_unplugged;
bool needs_uuid;