clx.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * CLx support
  4. *
  5. * Copyright (C) 2020 - 2023, Intel Corporation
  6. * Authors: Gil Fine <gil.fine@intel.com>
  7. * Mika Westerberg <mika.westerberg@linux.intel.com>
  8. */
  9. #include <linux/module.h>
  10. #include "tb.h"
  11. static bool clx_enabled = true;
  12. module_param_named(clx, clx_enabled, bool, 0444);
  13. MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
  14. static const char *clx_name(unsigned int clx)
  15. {
  16. switch (clx) {
  17. case TB_CL0S | TB_CL1 | TB_CL2:
  18. return "CL0s/CL1/CL2";
  19. case TB_CL1 | TB_CL2:
  20. return "CL1/CL2";
  21. case TB_CL0S | TB_CL2:
  22. return "CL0s/CL2";
  23. case TB_CL0S | TB_CL1:
  24. return "CL0s/CL1";
  25. case TB_CL0S:
  26. return "CL0s";
  27. case 0:
  28. return "disabled";
  29. default:
  30. return "unknown";
  31. }
  32. }
  33. static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
  34. {
  35. u32 phy;
  36. int ret;
  37. ret = tb_port_read(port, &phy, TB_CFG_PORT,
  38. port->cap_phy + LANE_ADP_CS_1, 1);
  39. if (ret)
  40. return ret;
  41. if (secondary)
  42. phy |= LANE_ADP_CS_1_PMS;
  43. else
  44. phy &= ~LANE_ADP_CS_1_PMS;
  45. return tb_port_write(port, &phy, TB_CFG_PORT,
  46. port->cap_phy + LANE_ADP_CS_1, 1);
  47. }
  48. static int tb_port_pm_secondary_enable(struct tb_port *port)
  49. {
  50. return tb_port_pm_secondary_set(port, true);
  51. }
  52. static int tb_port_pm_secondary_disable(struct tb_port *port)
  53. {
  54. return tb_port_pm_secondary_set(port, false);
  55. }
  56. /* Called for USB4 or Titan Ridge routers only */
  57. static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
  58. {
  59. u32 val, mask = 0;
  60. bool ret;
  61. /* Don't enable CLx in case of two single-lane links */
  62. if (!port->bonded && port->dual_link_port)
  63. return false;
  64. /* Don't enable CLx in case of inter-domain link */
  65. if (port->xdomain)
  66. return false;
  67. if (tb_switch_is_usb4(port->sw)) {
  68. if (!usb4_port_clx_supported(port))
  69. return false;
  70. } else if (!tb_lc_is_clx_supported(port)) {
  71. return false;
  72. }
  73. if (clx & TB_CL0S)
  74. mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
  75. if (clx & TB_CL1)
  76. mask |= LANE_ADP_CS_0_CL1_SUPPORT;
  77. if (clx & TB_CL2)
  78. mask |= LANE_ADP_CS_0_CL2_SUPPORT;
  79. ret = tb_port_read(port, &val, TB_CFG_PORT,
  80. port->cap_phy + LANE_ADP_CS_0, 1);
  81. if (ret)
  82. return false;
  83. return !!(val & mask);
  84. }
  85. static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
  86. {
  87. u32 phy, mask = 0;
  88. int ret;
  89. if (clx & TB_CL0S)
  90. mask |= LANE_ADP_CS_1_CL0S_ENABLE;
  91. if (clx & TB_CL1)
  92. mask |= LANE_ADP_CS_1_CL1_ENABLE;
  93. if (clx & TB_CL2)
  94. mask |= LANE_ADP_CS_1_CL2_ENABLE;
  95. if (!mask)
  96. return -EOPNOTSUPP;
  97. ret = tb_port_read(port, &phy, TB_CFG_PORT,
  98. port->cap_phy + LANE_ADP_CS_1, 1);
  99. if (ret)
  100. return ret;
  101. if (enable)
  102. phy |= mask;
  103. else
  104. phy &= ~mask;
  105. return tb_port_write(port, &phy, TB_CFG_PORT,
  106. port->cap_phy + LANE_ADP_CS_1, 1);
  107. }
  108. static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
  109. {
  110. return tb_port_clx_set(port, clx, false);
  111. }
  112. static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
  113. {
  114. return tb_port_clx_set(port, clx, true);
  115. }
  116. static int tb_port_clx(struct tb_port *port)
  117. {
  118. u32 val;
  119. int ret;
  120. if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
  121. return 0;
  122. ret = tb_port_read(port, &val, TB_CFG_PORT,
  123. port->cap_phy + LANE_ADP_CS_1, 1);
  124. if (ret)
  125. return ret;
  126. if (val & LANE_ADP_CS_1_CL0S_ENABLE)
  127. ret |= TB_CL0S;
  128. if (val & LANE_ADP_CS_1_CL1_ENABLE)
  129. ret |= TB_CL1;
  130. if (val & LANE_ADP_CS_1_CL2_ENABLE)
  131. ret |= TB_CL2;
  132. return ret;
  133. }
  134. /**
  135. * tb_port_clx_is_enabled() - Is given CL state enabled
  136. * @port: USB4 port to check
  137. * @clx: Mask of CL states to check
  138. *
  139. * Returns true if any of the given CL states is enabled for @port.
  140. */
  141. bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
  142. {
  143. return !!(tb_port_clx(port) & clx);
  144. }
  145. /**
  146. * tb_switch_clx_is_supported() - Is CLx supported on this type of router
  147. * @sw: The router to check CLx support for
  148. */
  149. static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
  150. {
  151. if (!clx_enabled)
  152. return false;
  153. if (sw->quirks & QUIRK_NO_CLX)
  154. return false;
  155. /*
  156. * CLx is not enabled and validated on Intel USB4 platforms
  157. * before Alder Lake.
  158. */
  159. if (tb_switch_is_tiger_lake(sw))
  160. return false;
  161. return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
  162. }
  163. /**
  164. * tb_switch_clx_init() - Initialize router CL states
  165. * @sw: Router
  166. *
  167. * Can be called for any router. Initializes the current CL state by
  168. * reading it from the hardware.
  169. *
  170. * Returns %0 in case of success and negative errno in case of failure.
  171. */
  172. int tb_switch_clx_init(struct tb_switch *sw)
  173. {
  174. struct tb_port *up, *down;
  175. unsigned int clx, tmp;
  176. if (tb_switch_is_icm(sw))
  177. return 0;
  178. if (!tb_route(sw))
  179. return 0;
  180. if (!tb_switch_clx_is_supported(sw))
  181. return 0;
  182. up = tb_upstream_port(sw);
  183. down = tb_switch_downstream_port(sw);
  184. clx = tb_port_clx(up);
  185. tmp = tb_port_clx(down);
  186. if (clx != tmp)
  187. tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
  188. clx, tmp);
  189. tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
  190. sw->clx = clx;
  191. return 0;
  192. }
  193. static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
  194. {
  195. struct tb_port *up, *down;
  196. int ret;
  197. if (!tb_route(sw))
  198. return 0;
  199. up = tb_upstream_port(sw);
  200. down = tb_switch_downstream_port(sw);
  201. ret = tb_port_pm_secondary_enable(up);
  202. if (ret)
  203. return ret;
  204. return tb_port_pm_secondary_disable(down);
  205. }
  206. static int tb_switch_mask_clx_objections(struct tb_switch *sw)
  207. {
  208. int up_port = sw->config.upstream_port_number;
  209. u32 offset, val[2], mask_obj, unmask_obj;
  210. int ret, i;
  211. /* Only Titan Ridge of pre-USB4 devices support CLx states */
  212. if (!tb_switch_is_titan_ridge(sw))
  213. return 0;
  214. if (!tb_route(sw))
  215. return 0;
  216. /*
  217. * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
  218. * Port A consists of lane adapters 1,2 and
  219. * Port B consists of lane adapters 3,4
  220. * If upstream port is A, (lanes are 1,2), we mask objections from
  221. * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
  222. */
  223. if (up_port == 1) {
  224. mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
  225. unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
  226. offset = TB_LOW_PWR_C1_CL1;
  227. } else {
  228. mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
  229. unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
  230. offset = TB_LOW_PWR_C3_CL1;
  231. }
  232. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  233. sw->cap_lp + offset, ARRAY_SIZE(val));
  234. if (ret)
  235. return ret;
  236. for (i = 0; i < ARRAY_SIZE(val); i++) {
  237. val[i] |= mask_obj;
  238. val[i] &= ~unmask_obj;
  239. }
  240. return tb_sw_write(sw, &val, TB_CFG_SWITCH,
  241. sw->cap_lp + offset, ARRAY_SIZE(val));
  242. }
  243. static bool validate_mask(unsigned int clx)
  244. {
  245. /* Previous states need to be enabled */
  246. if (clx & TB_CL1)
  247. return (clx & TB_CL0S) == TB_CL0S;
  248. return true;
  249. }
  250. /**
  251. * tb_switch_clx_enable() - Enable CLx on upstream port of specified router
  252. * @sw: Router to enable CLx for
  253. * @clx: The CLx state to enable
  254. *
  255. * CLx is enabled only if both sides of the link support CLx, and if both sides
  256. * of the link are not configured as two single lane links and only if the link
  257. * is not inter-domain link. The complete set of conditions is described in CM
  258. * Guide 1.0 section 8.1.
  259. *
  260. * Returns %0 on success or an error code on failure.
  261. */
  262. int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
  263. {
  264. bool up_clx_support, down_clx_support;
  265. struct tb_switch *parent_sw;
  266. struct tb_port *up, *down;
  267. int ret;
  268. if (!clx || sw->clx == clx)
  269. return 0;
  270. if (!validate_mask(clx))
  271. return -EINVAL;
  272. parent_sw = tb_switch_parent(sw);
  273. if (!parent_sw)
  274. return 0;
  275. if (!tb_switch_clx_is_supported(parent_sw) ||
  276. !tb_switch_clx_is_supported(sw))
  277. return 0;
  278. /* Only support CL2 for v2 routers */
  279. if ((clx & TB_CL2) &&
  280. (usb4_switch_version(parent_sw) < 2 ||
  281. usb4_switch_version(sw) < 2))
  282. return -EOPNOTSUPP;
  283. ret = tb_switch_pm_secondary_resolve(sw);
  284. if (ret)
  285. return ret;
  286. up = tb_upstream_port(sw);
  287. down = tb_switch_downstream_port(sw);
  288. up_clx_support = tb_port_clx_supported(up, clx);
  289. down_clx_support = tb_port_clx_supported(down, clx);
  290. tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
  291. up_clx_support ? "" : "not ");
  292. tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
  293. down_clx_support ? "" : "not ");
  294. if (!up_clx_support || !down_clx_support)
  295. return -EOPNOTSUPP;
  296. ret = tb_port_clx_enable(up, clx);
  297. if (ret)
  298. return ret;
  299. ret = tb_port_clx_enable(down, clx);
  300. if (ret) {
  301. tb_port_clx_disable(up, clx);
  302. return ret;
  303. }
  304. ret = tb_switch_mask_clx_objections(sw);
  305. if (ret) {
  306. tb_port_clx_disable(up, clx);
  307. tb_port_clx_disable(down, clx);
  308. return ret;
  309. }
  310. sw->clx |= clx;
  311. tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
  312. return 0;
  313. }
  314. /**
  315. * tb_switch_clx_disable() - Disable CLx on upstream port of specified router
  316. * @sw: Router to disable CLx for
  317. *
  318. * Disables all CL states of the given router. Can be called on any
  319. * router and if the states were not enabled already does nothing.
  320. *
  321. * Returns the CL states that were disabled or negative errno in case of
  322. * failure.
  323. */
  324. int tb_switch_clx_disable(struct tb_switch *sw)
  325. {
  326. unsigned int clx = sw->clx;
  327. struct tb_port *up, *down;
  328. int ret;
  329. if (!tb_switch_clx_is_supported(sw))
  330. return 0;
  331. if (!clx)
  332. return 0;
  333. if (sw->is_unplugged)
  334. return clx;
  335. up = tb_upstream_port(sw);
  336. down = tb_switch_downstream_port(sw);
  337. ret = tb_port_clx_disable(up, clx);
  338. if (ret)
  339. return ret;
  340. ret = tb_port_clx_disable(down, clx);
  341. if (ret)
  342. return ret;
  343. sw->clx = 0;
  344. tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
  345. return clx;
  346. }