tmu.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt Time Management Unit (TMU) support
  4. *
  5. * Copyright (C) 2019, Intel Corporation
  6. * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  7. * Rajmohan Mani <rajmohan.mani@intel.com>
  8. */
  9. #include <linux/delay.h>
  10. #include "tb.h"
  11. static const unsigned int tmu_rates[] = {
  12. [TB_SWITCH_TMU_MODE_OFF] = 0,
  13. [TB_SWITCH_TMU_MODE_LOWRES] = 1000,
  14. [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
  15. [TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
  16. [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
  17. };
  18. static const struct {
  19. unsigned int freq_meas_window;
  20. unsigned int avg_const;
  21. unsigned int delta_avg_const;
  22. unsigned int repl_timeout;
  23. unsigned int repl_threshold;
  24. unsigned int repl_n;
  25. unsigned int dirswitch_n;
  26. } tmu_params[] = {
  27. [TB_SWITCH_TMU_MODE_OFF] = { },
  28. [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
  29. [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
  30. [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
  31. [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
  32. 800, 4, 0, 3125, 25, 128, 255,
  33. },
  34. };
  35. static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
  36. {
  37. switch (mode) {
  38. case TB_SWITCH_TMU_MODE_OFF:
  39. return "off";
  40. case TB_SWITCH_TMU_MODE_LOWRES:
  41. return "uni-directional, LowRes";
  42. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  43. return "uni-directional, HiFi";
  44. case TB_SWITCH_TMU_MODE_HIFI_BI:
  45. return "bi-directional, HiFi";
  46. case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
  47. return "enhanced uni-directional, MedRes";
  48. default:
  49. return "unknown";
  50. }
  51. }
  52. static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
  53. {
  54. return usb4_switch_version(sw) > 1;
  55. }
  56. static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
  57. enum tb_switch_tmu_mode mode)
  58. {
  59. u32 freq, avg, val;
  60. int ret;
  61. freq = tmu_params[mode].freq_meas_window;
  62. avg = tmu_params[mode].avg_const;
  63. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  64. sw->tmu.cap + TMU_RTR_CS_0, 1);
  65. if (ret)
  66. return ret;
  67. val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
  68. val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
  69. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  70. sw->tmu.cap + TMU_RTR_CS_0, 1);
  71. if (ret)
  72. return ret;
  73. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  74. sw->tmu.cap + TMU_RTR_CS_15, 1);
  75. if (ret)
  76. return ret;
  77. val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
  78. ~TMU_RTR_CS_15_DELAY_AVG_MASK &
  79. ~TMU_RTR_CS_15_OFFSET_AVG_MASK &
  80. ~TMU_RTR_CS_15_ERROR_AVG_MASK;
  81. val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
  82. FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
  83. FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
  84. FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
  85. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  86. sw->tmu.cap + TMU_RTR_CS_15, 1);
  87. if (ret)
  88. return ret;
  89. if (tb_switch_tmu_enhanced_is_supported(sw)) {
  90. u32 delta_avg = tmu_params[mode].delta_avg_const;
  91. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  92. sw->tmu.cap + TMU_RTR_CS_18, 1);
  93. if (ret)
  94. return ret;
  95. val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
  96. val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
  97. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  98. sw->tmu.cap + TMU_RTR_CS_18, 1);
  99. }
  100. return ret;
  101. }
  102. static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
  103. {
  104. int ret;
  105. u32 val;
  106. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  107. sw->tmu.cap + TMU_RTR_CS_0, 1);
  108. if (ret)
  109. return false;
  110. return !!(val & TMU_RTR_CS_0_UCAP);
  111. }
  112. static int tb_switch_tmu_rate_read(struct tb_switch *sw)
  113. {
  114. int ret;
  115. u32 val;
  116. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  117. sw->tmu.cap + TMU_RTR_CS_3, 1);
  118. if (ret)
  119. return ret;
  120. val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
  121. return val;
  122. }
  123. static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
  124. {
  125. int ret;
  126. u32 val;
  127. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  128. sw->tmu.cap + TMU_RTR_CS_3, 1);
  129. if (ret)
  130. return ret;
  131. val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
  132. val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
  133. return tb_sw_write(sw, &val, TB_CFG_SWITCH,
  134. sw->tmu.cap + TMU_RTR_CS_3, 1);
  135. }
  136. static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
  137. u32 value)
  138. {
  139. u32 data;
  140. int ret;
  141. ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
  142. if (ret)
  143. return ret;
  144. data &= ~mask;
  145. data |= value;
  146. return tb_port_write(port, &data, TB_CFG_PORT,
  147. port->cap_tmu + offset, 1);
  148. }
  149. static int tb_port_tmu_set_unidirectional(struct tb_port *port,
  150. bool unidirectional)
  151. {
  152. u32 val;
  153. if (!port->sw->tmu.has_ucap)
  154. return 0;
  155. val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
  156. return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
  157. }
  158. static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
  159. {
  160. return tb_port_tmu_set_unidirectional(port, false);
  161. }
  162. static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
  163. {
  164. return tb_port_tmu_set_unidirectional(port, true);
  165. }
  166. static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
  167. {
  168. int ret;
  169. u32 val;
  170. ret = tb_port_read(port, &val, TB_CFG_PORT,
  171. port->cap_tmu + TMU_ADP_CS_3, 1);
  172. if (ret)
  173. return false;
  174. return val & TMU_ADP_CS_3_UDM;
  175. }
  176. static bool tb_port_tmu_is_enhanced(struct tb_port *port)
  177. {
  178. int ret;
  179. u32 val;
  180. ret = tb_port_read(port, &val, TB_CFG_PORT,
  181. port->cap_tmu + TMU_ADP_CS_8, 1);
  182. if (ret)
  183. return false;
  184. return val & TMU_ADP_CS_8_EUDM;
  185. }
  186. /* Can be called to non-v2 lane adapters too */
  187. static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
  188. {
  189. int ret;
  190. u32 val;
  191. if (!tb_switch_tmu_enhanced_is_supported(port->sw))
  192. return 0;
  193. ret = tb_port_read(port, &val, TB_CFG_PORT,
  194. port->cap_tmu + TMU_ADP_CS_8, 1);
  195. if (ret)
  196. return ret;
  197. if (enable)
  198. val |= TMU_ADP_CS_8_EUDM;
  199. else
  200. val &= ~TMU_ADP_CS_8_EUDM;
  201. return tb_port_write(port, &val, TB_CFG_PORT,
  202. port->cap_tmu + TMU_ADP_CS_8, 1);
  203. }
  204. static int tb_port_set_tmu_mode_params(struct tb_port *port,
  205. enum tb_switch_tmu_mode mode)
  206. {
  207. u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
  208. int ret;
  209. repl_timeout = tmu_params[mode].repl_timeout;
  210. repl_threshold = tmu_params[mode].repl_threshold;
  211. repl_n = tmu_params[mode].repl_n;
  212. dirswitch_n = tmu_params[mode].dirswitch_n;
  213. ret = tb_port_read(port, &val, TB_CFG_PORT,
  214. port->cap_tmu + TMU_ADP_CS_8, 1);
  215. if (ret)
  216. return ret;
  217. val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
  218. val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
  219. val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
  220. val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
  221. ret = tb_port_write(port, &val, TB_CFG_PORT,
  222. port->cap_tmu + TMU_ADP_CS_8, 1);
  223. if (ret)
  224. return ret;
  225. ret = tb_port_read(port, &val, TB_CFG_PORT,
  226. port->cap_tmu + TMU_ADP_CS_9, 1);
  227. if (ret)
  228. return ret;
  229. val &= ~TMU_ADP_CS_9_REPL_N_MASK;
  230. val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
  231. val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
  232. val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
  233. return tb_port_write(port, &val, TB_CFG_PORT,
  234. port->cap_tmu + TMU_ADP_CS_9, 1);
  235. }
  236. /* Can be called to non-v2 lane adapters too */
  237. static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
  238. {
  239. int ret;
  240. u32 val;
  241. if (!tb_switch_tmu_enhanced_is_supported(port->sw))
  242. return 0;
  243. ret = tb_port_read(port, &val, TB_CFG_PORT,
  244. port->cap_tmu + TMU_ADP_CS_9, 1);
  245. if (ret)
  246. return ret;
  247. val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
  248. val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
  249. return tb_port_write(port, &val, TB_CFG_PORT,
  250. port->cap_tmu + TMU_ADP_CS_9, 1);
  251. }
  252. static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
  253. {
  254. u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
  255. return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
  256. }
  257. static int tb_port_tmu_time_sync_disable(struct tb_port *port)
  258. {
  259. return tb_port_tmu_time_sync(port, true);
  260. }
  261. static int tb_port_tmu_time_sync_enable(struct tb_port *port)
  262. {
  263. return tb_port_tmu_time_sync(port, false);
  264. }
  265. static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
  266. {
  267. u32 val, offset, bit;
  268. int ret;
  269. if (tb_switch_is_usb4(sw)) {
  270. offset = sw->tmu.cap + TMU_RTR_CS_0;
  271. bit = TMU_RTR_CS_0_TD;
  272. } else {
  273. offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
  274. bit = TB_TIME_VSEC_3_CS_26_TD;
  275. }
  276. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
  277. if (ret)
  278. return ret;
  279. if (set)
  280. val |= bit;
  281. else
  282. val &= ~bit;
  283. return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
  284. }
  285. static int tmu_mode_init(struct tb_switch *sw)
  286. {
  287. bool enhanced, ucap;
  288. int ret, rate;
  289. ucap = tb_switch_tmu_ucap_is_supported(sw);
  290. if (ucap)
  291. tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
  292. enhanced = tb_switch_tmu_enhanced_is_supported(sw);
  293. if (enhanced)
  294. tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
  295. ret = tb_switch_tmu_rate_read(sw);
  296. if (ret < 0)
  297. return ret;
  298. rate = ret;
  299. /* Off by default */
  300. sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
  301. if (tb_route(sw)) {
  302. struct tb_port *up = tb_upstream_port(sw);
  303. if (enhanced && tb_port_tmu_is_enhanced(up)) {
  304. sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
  305. } else if (ucap && tb_port_tmu_is_unidirectional(up)) {
  306. if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
  307. sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
  308. else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
  309. sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
  310. } else if (rate) {
  311. sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
  312. }
  313. } else if (rate) {
  314. sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
  315. }
  316. /* Update the initial request to match the current mode */
  317. sw->tmu.mode_request = sw->tmu.mode;
  318. sw->tmu.has_ucap = ucap;
  319. return 0;
  320. }
  321. /**
  322. * tb_switch_tmu_init() - Initialize switch TMU structures
  323. * @sw: Switch to initialized
  324. *
  325. * This function must be called before other TMU related functions to
  326. * makes the internal structures are filled in correctly. Does not
  327. * change any hardware configuration.
  328. */
  329. int tb_switch_tmu_init(struct tb_switch *sw)
  330. {
  331. struct tb_port *port;
  332. int ret;
  333. if (tb_switch_is_icm(sw))
  334. return 0;
  335. ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
  336. if (ret > 0)
  337. sw->tmu.cap = ret;
  338. tb_switch_for_each_port(sw, port) {
  339. int cap;
  340. cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
  341. if (cap > 0)
  342. port->cap_tmu = cap;
  343. }
  344. ret = tmu_mode_init(sw);
  345. if (ret)
  346. return ret;
  347. tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
  348. return 0;
  349. }
  350. /**
  351. * tb_switch_tmu_post_time() - Update switch local time
  352. * @sw: Switch whose time to update
  353. *
  354. * Updates switch local time using time posting procedure.
  355. */
  356. int tb_switch_tmu_post_time(struct tb_switch *sw)
  357. {
  358. unsigned int post_time_high_offset, post_time_high = 0;
  359. unsigned int post_local_time_offset, post_time_offset;
  360. struct tb_switch *root_switch = sw->tb->root_switch;
  361. u64 hi, mid, lo, local_time, post_time;
  362. int i, ret, retries = 100;
  363. u32 gm_local_time[3];
  364. if (!tb_route(sw))
  365. return 0;
  366. if (!tb_switch_is_usb4(sw))
  367. return 0;
  368. /* Need to be able to read the grand master time */
  369. if (!root_switch->tmu.cap)
  370. return 0;
  371. ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
  372. root_switch->tmu.cap + TMU_RTR_CS_1,
  373. ARRAY_SIZE(gm_local_time));
  374. if (ret)
  375. return ret;
  376. for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
  377. tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
  378. gm_local_time[i]);
  379. /* Convert to nanoseconds (drop fractional part) */
  380. hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
  381. mid = gm_local_time[1];
  382. lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
  383. TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
  384. local_time = hi << 48 | mid << 16 | lo;
  385. /* Tell the switch that time sync is disrupted for a while */
  386. ret = tb_switch_tmu_set_time_disruption(sw, true);
  387. if (ret)
  388. return ret;
  389. post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
  390. post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
  391. post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
  392. /*
  393. * Write the Grandmaster time to the Post Local Time registers
  394. * of the new switch.
  395. */
  396. ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
  397. post_local_time_offset, 2);
  398. if (ret)
  399. goto out;
  400. /*
  401. * Have the new switch update its local time by:
  402. * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
  403. * Post Time High register.
  404. * 2) write 0 to Post Time High register and then wait for
  405. * the completion of the post_time register becomes 0.
  406. * This means the time has been converged properly.
  407. */
  408. post_time = 0xffffffff00000001ULL;
  409. ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
  410. if (ret)
  411. goto out;
  412. ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
  413. post_time_high_offset, 1);
  414. if (ret)
  415. goto out;
  416. do {
  417. usleep_range(5, 10);
  418. ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
  419. post_time_offset, 2);
  420. if (ret)
  421. goto out;
  422. } while (--retries && post_time);
  423. if (!retries) {
  424. ret = -ETIMEDOUT;
  425. goto out;
  426. }
  427. tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
  428. out:
  429. tb_switch_tmu_set_time_disruption(sw, false);
  430. return ret;
  431. }
  432. static int disable_enhanced(struct tb_port *up, struct tb_port *down)
  433. {
  434. int ret;
  435. /*
  436. * Router may already been disconnected so ignore errors on the
  437. * upstream port.
  438. */
  439. tb_port_tmu_rate_write(up, 0);
  440. tb_port_tmu_enhanced_enable(up, false);
  441. ret = tb_port_tmu_rate_write(down, 0);
  442. if (ret)
  443. return ret;
  444. return tb_port_tmu_enhanced_enable(down, false);
  445. }
  446. /**
  447. * tb_switch_tmu_disable() - Disable TMU of a switch
  448. * @sw: Switch whose TMU to disable
  449. *
  450. * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
  451. */
  452. int tb_switch_tmu_disable(struct tb_switch *sw)
  453. {
  454. /* Already disabled? */
  455. if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
  456. return 0;
  457. if (tb_route(sw)) {
  458. struct tb_port *down, *up;
  459. int ret;
  460. down = tb_switch_downstream_port(sw);
  461. up = tb_upstream_port(sw);
  462. /*
  463. * In case of uni-directional time sync, TMU handshake is
  464. * initiated by upstream router. In case of bi-directional
  465. * time sync, TMU handshake is initiated by downstream router.
  466. * We change downstream router's rate to off for both uni/bidir
  467. * cases although it is needed only for the bi-directional mode.
  468. * We avoid changing upstream router's mode since it might
  469. * have another downstream router plugged, that is set to
  470. * uni-directional mode and we don't want to change it's TMU
  471. * mode.
  472. */
  473. ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
  474. if (ret)
  475. return ret;
  476. tb_port_tmu_time_sync_disable(up);
  477. ret = tb_port_tmu_time_sync_disable(down);
  478. if (ret)
  479. return ret;
  480. switch (sw->tmu.mode) {
  481. case TB_SWITCH_TMU_MODE_LOWRES:
  482. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  483. /* The switch may be unplugged so ignore any errors */
  484. tb_port_tmu_unidirectional_disable(up);
  485. ret = tb_port_tmu_unidirectional_disable(down);
  486. if (ret)
  487. return ret;
  488. break;
  489. case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
  490. ret = disable_enhanced(up, down);
  491. if (ret)
  492. return ret;
  493. break;
  494. default:
  495. break;
  496. }
  497. } else {
  498. tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
  499. }
  500. sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
  501. tb_sw_dbg(sw, "TMU: disabled\n");
  502. return 0;
  503. }
  504. /* Called only when there is failure enabling requested mode */
  505. static void tb_switch_tmu_off(struct tb_switch *sw)
  506. {
  507. unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
  508. struct tb_port *down, *up;
  509. down = tb_switch_downstream_port(sw);
  510. up = tb_upstream_port(sw);
  511. /*
  512. * In case of any failure in one of the steps when setting
  513. * bi-directional or uni-directional TMU mode, get back to the TMU
  514. * configurations in off mode. In case of additional failures in
  515. * the functions below, ignore them since the caller shall already
  516. * report a failure.
  517. */
  518. tb_port_tmu_time_sync_disable(down);
  519. tb_port_tmu_time_sync_disable(up);
  520. switch (sw->tmu.mode_request) {
  521. case TB_SWITCH_TMU_MODE_LOWRES:
  522. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  523. tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
  524. break;
  525. case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
  526. disable_enhanced(up, down);
  527. break;
  528. default:
  529. break;
  530. }
  531. /* Always set the rate to 0 */
  532. tb_switch_tmu_rate_write(sw, rate);
  533. tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
  534. tb_port_tmu_unidirectional_disable(down);
  535. tb_port_tmu_unidirectional_disable(up);
  536. }
  537. /*
  538. * This function is called when the previous TMU mode was
  539. * TB_SWITCH_TMU_MODE_OFF.
  540. */
  541. static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
  542. {
  543. struct tb_port *up, *down;
  544. int ret;
  545. up = tb_upstream_port(sw);
  546. down = tb_switch_downstream_port(sw);
  547. ret = tb_port_tmu_unidirectional_disable(up);
  548. if (ret)
  549. return ret;
  550. ret = tb_port_tmu_unidirectional_disable(down);
  551. if (ret)
  552. goto out;
  553. ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
  554. if (ret)
  555. goto out;
  556. ret = tb_port_tmu_time_sync_enable(up);
  557. if (ret)
  558. goto out;
  559. ret = tb_port_tmu_time_sync_enable(down);
  560. if (ret)
  561. goto out;
  562. return 0;
  563. out:
  564. tb_switch_tmu_off(sw);
  565. return ret;
  566. }
  567. /* Only needed for Titan Ridge */
  568. static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
  569. {
  570. struct tb_port *up = tb_upstream_port(sw);
  571. u32 val;
  572. int ret;
  573. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  574. sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
  575. if (ret)
  576. return ret;
  577. val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
  578. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  579. sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
  580. if (ret)
  581. return ret;
  582. return tb_port_tmu_write(up, TMU_ADP_CS_6,
  583. TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
  584. TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
  585. TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
  586. }
  587. /*
  588. * This function is called when the previous TMU mode was
  589. * TB_SWITCH_TMU_MODE_OFF.
  590. */
  591. static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
  592. {
  593. struct tb_port *up, *down;
  594. int ret;
  595. up = tb_upstream_port(sw);
  596. down = tb_switch_downstream_port(sw);
  597. ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
  598. tmu_rates[sw->tmu.mode_request]);
  599. if (ret)
  600. return ret;
  601. ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
  602. if (ret)
  603. return ret;
  604. ret = tb_port_tmu_unidirectional_enable(up);
  605. if (ret)
  606. goto out;
  607. ret = tb_port_tmu_time_sync_enable(up);
  608. if (ret)
  609. goto out;
  610. ret = tb_port_tmu_unidirectional_enable(down);
  611. if (ret)
  612. goto out;
  613. ret = tb_port_tmu_time_sync_enable(down);
  614. if (ret)
  615. goto out;
  616. return 0;
  617. out:
  618. tb_switch_tmu_off(sw);
  619. return ret;
  620. }
  621. /*
  622. * This function is called when the previous TMU mode was
  623. * TB_SWITCH_TMU_RATE_OFF.
  624. */
  625. static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
  626. {
  627. unsigned int rate = tmu_rates[sw->tmu.mode_request];
  628. struct tb_port *up, *down;
  629. int ret;
  630. /* Router specific parameters first */
  631. ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
  632. if (ret)
  633. return ret;
  634. up = tb_upstream_port(sw);
  635. down = tb_switch_downstream_port(sw);
  636. ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
  637. if (ret)
  638. goto out;
  639. ret = tb_port_tmu_rate_write(up, rate);
  640. if (ret)
  641. goto out;
  642. ret = tb_port_tmu_enhanced_enable(up, true);
  643. if (ret)
  644. goto out;
  645. ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
  646. if (ret)
  647. goto out;
  648. ret = tb_port_tmu_rate_write(down, rate);
  649. if (ret)
  650. goto out;
  651. ret = tb_port_tmu_enhanced_enable(down, true);
  652. if (ret)
  653. goto out;
  654. return 0;
  655. out:
  656. tb_switch_tmu_off(sw);
  657. return ret;
  658. }
  659. static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
  660. {
  661. unsigned int rate = tmu_rates[sw->tmu.mode];
  662. struct tb_port *down, *up;
  663. down = tb_switch_downstream_port(sw);
  664. up = tb_upstream_port(sw);
  665. /*
  666. * In case of any failure in one of the steps when change mode,
  667. * get back to the TMU configurations in previous mode.
  668. * In case of additional failures in the functions below,
  669. * ignore them since the caller shall already report a failure.
  670. */
  671. switch (sw->tmu.mode) {
  672. case TB_SWITCH_TMU_MODE_LOWRES:
  673. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  674. tb_port_tmu_set_unidirectional(down, true);
  675. tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
  676. break;
  677. case TB_SWITCH_TMU_MODE_HIFI_BI:
  678. tb_port_tmu_set_unidirectional(down, false);
  679. tb_switch_tmu_rate_write(sw, rate);
  680. break;
  681. default:
  682. break;
  683. }
  684. tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
  685. switch (sw->tmu.mode) {
  686. case TB_SWITCH_TMU_MODE_LOWRES:
  687. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  688. tb_port_tmu_set_unidirectional(up, true);
  689. break;
  690. case TB_SWITCH_TMU_MODE_HIFI_BI:
  691. tb_port_tmu_set_unidirectional(up, false);
  692. break;
  693. default:
  694. break;
  695. }
  696. }
  697. static int tb_switch_tmu_change_mode(struct tb_switch *sw)
  698. {
  699. unsigned int rate = tmu_rates[sw->tmu.mode_request];
  700. struct tb_port *up, *down;
  701. int ret;
  702. up = tb_upstream_port(sw);
  703. down = tb_switch_downstream_port(sw);
  704. /* Program the upstream router downstream facing lane adapter */
  705. switch (sw->tmu.mode_request) {
  706. case TB_SWITCH_TMU_MODE_LOWRES:
  707. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  708. ret = tb_port_tmu_set_unidirectional(down, true);
  709. if (ret)
  710. goto out;
  711. ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
  712. if (ret)
  713. goto out;
  714. break;
  715. case TB_SWITCH_TMU_MODE_HIFI_BI:
  716. ret = tb_port_tmu_set_unidirectional(down, false);
  717. if (ret)
  718. goto out;
  719. ret = tb_switch_tmu_rate_write(sw, rate);
  720. if (ret)
  721. goto out;
  722. break;
  723. default:
  724. /* Not allowed to change modes from other than above */
  725. return -EINVAL;
  726. }
  727. ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
  728. if (ret)
  729. goto out;
  730. /* Program the new mode and the downstream router lane adapter */
  731. switch (sw->tmu.mode_request) {
  732. case TB_SWITCH_TMU_MODE_LOWRES:
  733. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  734. ret = tb_port_tmu_set_unidirectional(up, true);
  735. if (ret)
  736. goto out;
  737. break;
  738. case TB_SWITCH_TMU_MODE_HIFI_BI:
  739. ret = tb_port_tmu_set_unidirectional(up, false);
  740. if (ret)
  741. goto out;
  742. break;
  743. default:
  744. /* Not allowed to change modes from other than above */
  745. return -EINVAL;
  746. }
  747. ret = tb_port_tmu_time_sync_enable(down);
  748. if (ret)
  749. goto out;
  750. ret = tb_port_tmu_time_sync_enable(up);
  751. if (ret)
  752. goto out;
  753. return 0;
  754. out:
  755. tb_switch_tmu_change_mode_prev(sw);
  756. return ret;
  757. }
  758. /**
  759. * tb_switch_tmu_enable() - Enable TMU on a router
  760. * @sw: Router whose TMU to enable
  761. *
  762. * Enables TMU of a router to be in uni-directional Normal/HiFi or
  763. * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
  764. * required before calling this function.
  765. */
  766. int tb_switch_tmu_enable(struct tb_switch *sw)
  767. {
  768. int ret;
  769. if (tb_switch_tmu_is_enabled(sw))
  770. return 0;
  771. if (tb_switch_is_titan_ridge(sw) &&
  772. (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
  773. sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
  774. ret = tb_switch_tmu_disable_objections(sw);
  775. if (ret)
  776. return ret;
  777. }
  778. ret = tb_switch_tmu_set_time_disruption(sw, true);
  779. if (ret)
  780. return ret;
  781. if (tb_route(sw)) {
  782. /*
  783. * The used mode changes are from OFF to
  784. * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
  785. * HiFi-Uni.
  786. */
  787. if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
  788. switch (sw->tmu.mode_request) {
  789. case TB_SWITCH_TMU_MODE_LOWRES:
  790. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  791. ret = tb_switch_tmu_enable_unidirectional(sw);
  792. break;
  793. case TB_SWITCH_TMU_MODE_HIFI_BI:
  794. ret = tb_switch_tmu_enable_bidirectional(sw);
  795. break;
  796. case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
  797. ret = tb_switch_tmu_enable_enhanced(sw);
  798. break;
  799. default:
  800. ret = -EINVAL;
  801. break;
  802. }
  803. } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
  804. sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
  805. sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
  806. ret = tb_switch_tmu_change_mode(sw);
  807. } else {
  808. ret = -EINVAL;
  809. }
  810. } else {
  811. /*
  812. * Host router port configurations are written as
  813. * part of configurations for downstream port of the parent
  814. * of the child node - see above.
  815. * Here only the host router' rate configuration is written.
  816. */
  817. ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
  818. }
  819. if (ret) {
  820. tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
  821. tmu_mode_name(sw->tmu.mode_request), ret);
  822. } else {
  823. sw->tmu.mode = sw->tmu.mode_request;
  824. tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
  825. }
  826. return tb_switch_tmu_set_time_disruption(sw, false);
  827. }
  828. /**
  829. * tb_switch_tmu_configure() - Configure the TMU mode
  830. * @sw: Router whose mode to change
  831. * @mode: Mode to configure
  832. *
  833. * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
  834. * next called.
  835. *
  836. * Returns %0 in success and negative errno otherwise. Specifically
  837. * returns %-EOPNOTSUPP if the requested mode is not possible (not
  838. * supported by the router and/or topology).
  839. */
  840. int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
  841. {
  842. switch (mode) {
  843. case TB_SWITCH_TMU_MODE_OFF:
  844. break;
  845. case TB_SWITCH_TMU_MODE_LOWRES:
  846. case TB_SWITCH_TMU_MODE_HIFI_UNI:
  847. if (!sw->tmu.has_ucap)
  848. return -EOPNOTSUPP;
  849. break;
  850. case TB_SWITCH_TMU_MODE_HIFI_BI:
  851. break;
  852. case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
  853. const struct tb_switch *parent_sw = tb_switch_parent(sw);
  854. if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
  855. return -EOPNOTSUPP;
  856. if (!tb_switch_tmu_enhanced_is_supported(sw))
  857. return -EOPNOTSUPP;
  858. break;
  859. }
  860. default:
  861. tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
  862. return -EINVAL;
  863. }
  864. if (sw->tmu.mode_request != mode) {
  865. tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
  866. tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
  867. sw->tmu.mode_request = mode;
  868. }
  869. return 0;
  870. }