ntb_hw_switchtec.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588
  1. /*
  2. * Microsemi Switchtec(tm) PCIe Management Driver
  3. * Copyright (c) 2017, Microsemi Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/switchtec.h>
  16. #include <linux/module.h>
  17. #include <linux/delay.h>
  18. #include <linux/kthread.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/ntb.h>
  21. #include <linux/pci.h>
  22. MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
  23. MODULE_VERSION("0.1");
  24. MODULE_LICENSE("GPL");
  25. MODULE_AUTHOR("Microsemi Corporation");
  26. static ulong max_mw_size = SZ_2M;
  27. module_param(max_mw_size, ulong, 0644);
  28. MODULE_PARM_DESC(max_mw_size,
  29. "Max memory window size reported to the upper layer");
  30. static bool use_lut_mws;
  31. module_param(use_lut_mws, bool, 0644);
  32. MODULE_PARM_DESC(use_lut_mws,
  33. "Enable the use of the LUT based memory windows");
  34. #ifndef ioread64
  35. #ifdef readq
  36. #define ioread64 readq
  37. #else
  38. #define ioread64 _ioread64
  39. static inline u64 _ioread64(void __iomem *mmio)
  40. {
  41. u64 low, high;
  42. low = ioread32(mmio);
  43. high = ioread32(mmio + sizeof(u32));
  44. return low | (high << 32);
  45. }
  46. #endif
  47. #endif
  48. #ifndef iowrite64
  49. #ifdef writeq
  50. #define iowrite64 writeq
  51. #else
  52. #define iowrite64 _iowrite64
  53. static inline void _iowrite64(u64 val, void __iomem *mmio)
  54. {
  55. iowrite32(val, mmio);
  56. iowrite32(val >> 32, mmio + sizeof(u32));
  57. }
  58. #endif
  59. #endif
  60. #define SWITCHTEC_NTB_MAGIC 0x45CC0001
  61. #define MAX_MWS 128
  62. struct shared_mw {
  63. u32 magic;
  64. u32 link_sta;
  65. u32 partition_id;
  66. u64 mw_sizes[MAX_MWS];
  67. u32 spad[128];
  68. };
  69. #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
  70. #define LUT_SIZE SZ_64K
  71. struct switchtec_ntb {
  72. struct ntb_dev ntb;
  73. struct switchtec_dev *stdev;
  74. int self_partition;
  75. int peer_partition;
  76. int doorbell_irq;
  77. int message_irq;
  78. struct ntb_info_regs __iomem *mmio_ntb;
  79. struct ntb_ctrl_regs __iomem *mmio_ctrl;
  80. struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
  81. struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
  82. struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
  83. struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
  84. struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
  85. void __iomem *mmio_xlink_win;
  86. struct shared_mw *self_shared;
  87. struct shared_mw __iomem *peer_shared;
  88. dma_addr_t self_shared_dma;
  89. u64 db_mask;
  90. u64 db_valid_mask;
  91. int db_shift;
  92. int db_peer_shift;
  93. /* synchronize rmw access of db_mask and hw reg */
  94. spinlock_t db_mask_lock;
  95. int nr_direct_mw;
  96. int nr_lut_mw;
  97. int nr_rsvd_luts;
  98. int direct_mw_to_bar[MAX_DIRECT_MW];
  99. int peer_nr_direct_mw;
  100. int peer_nr_lut_mw;
  101. int peer_direct_mw_to_bar[MAX_DIRECT_MW];
  102. bool link_is_up;
  103. enum ntb_speed link_speed;
  104. enum ntb_width link_width;
  105. struct work_struct link_reinit_work;
  106. };
  107. static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
  108. {
  109. return container_of(ntb, struct switchtec_ntb, ntb);
  110. }
  111. static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
  112. struct ntb_ctrl_regs __iomem *ctl,
  113. u32 op, int wait_status)
  114. {
  115. static const char * const op_text[] = {
  116. [NTB_CTRL_PART_OP_LOCK] = "lock",
  117. [NTB_CTRL_PART_OP_CFG] = "configure",
  118. [NTB_CTRL_PART_OP_RESET] = "reset",
  119. };
  120. int i;
  121. u32 ps;
  122. int status;
  123. switch (op) {
  124. case NTB_CTRL_PART_OP_LOCK:
  125. status = NTB_CTRL_PART_STATUS_LOCKING;
  126. break;
  127. case NTB_CTRL_PART_OP_CFG:
  128. status = NTB_CTRL_PART_STATUS_CONFIGURING;
  129. break;
  130. case NTB_CTRL_PART_OP_RESET:
  131. status = NTB_CTRL_PART_STATUS_RESETTING;
  132. break;
  133. default:
  134. return -EINVAL;
  135. }
  136. iowrite32(op, &ctl->partition_op);
  137. for (i = 0; i < 1000; i++) {
  138. if (msleep_interruptible(50) != 0) {
  139. iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
  140. return -EINTR;
  141. }
  142. ps = ioread32(&ctl->partition_status) & 0xFFFF;
  143. if (ps != status)
  144. break;
  145. }
  146. if (ps == wait_status)
  147. return 0;
  148. if (ps == status) {
  149. dev_err(&sndev->stdev->dev,
  150. "Timed out while performing %s (%d). (%08x)\n",
  151. op_text[op], op,
  152. ioread32(&ctl->partition_status));
  153. return -ETIMEDOUT;
  154. }
  155. return -EIO;
  156. }
  157. static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
  158. u32 val)
  159. {
  160. if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
  161. return -EINVAL;
  162. iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
  163. return 0;
  164. }
  165. static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
  166. {
  167. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  168. int nr_direct_mw = sndev->peer_nr_direct_mw;
  169. int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
  170. if (pidx != NTB_DEF_PEER_IDX)
  171. return -EINVAL;
  172. if (!use_lut_mws)
  173. nr_lut_mw = 0;
  174. return nr_direct_mw + nr_lut_mw;
  175. }
  176. static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
  177. {
  178. return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
  179. }
  180. static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
  181. {
  182. return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
  183. }
  184. static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
  185. int widx, resource_size_t *addr_align,
  186. resource_size_t *size_align,
  187. resource_size_t *size_max)
  188. {
  189. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  190. int lut;
  191. resource_size_t size;
  192. if (pidx != NTB_DEF_PEER_IDX)
  193. return -EINVAL;
  194. lut = widx >= sndev->peer_nr_direct_mw;
  195. size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
  196. if (size == 0)
  197. return -EINVAL;
  198. if (addr_align)
  199. *addr_align = lut ? size : SZ_4K;
  200. if (size_align)
  201. *size_align = lut ? size : SZ_4K;
  202. if (size_max)
  203. *size_max = size;
  204. return 0;
  205. }
  206. static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
  207. {
  208. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  209. int bar = sndev->peer_direct_mw_to_bar[idx];
  210. u32 ctl_val;
  211. ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
  212. ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
  213. iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
  214. iowrite32(0, &ctl->bar_entry[bar].win_size);
  215. iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
  216. }
  217. static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
  218. {
  219. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  220. iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
  221. }
  222. static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
  223. dma_addr_t addr, resource_size_t size)
  224. {
  225. int xlate_pos = ilog2(size);
  226. int bar = sndev->peer_direct_mw_to_bar[idx];
  227. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  228. u32 ctl_val;
  229. ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
  230. ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
  231. iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
  232. iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
  233. iowrite64(sndev->self_partition | addr,
  234. &ctl->bar_entry[bar].xlate_addr);
  235. }
  236. static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
  237. dma_addr_t addr, resource_size_t size)
  238. {
  239. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  240. iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
  241. &ctl->lut_entry[peer_lut_index(sndev, idx)]);
  242. }
  243. static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
  244. dma_addr_t addr, resource_size_t size)
  245. {
  246. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  247. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
  248. int xlate_pos = ilog2(size);
  249. int nr_direct_mw = sndev->peer_nr_direct_mw;
  250. int rc;
  251. if (pidx != NTB_DEF_PEER_IDX)
  252. return -EINVAL;
  253. dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
  254. widx, pidx, &addr, &size);
  255. if (widx >= switchtec_ntb_mw_count(ntb, pidx))
  256. return -EINVAL;
  257. if (xlate_pos < 12)
  258. return -EINVAL;
  259. if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
  260. /*
  261. * In certain circumstances we can get a buffer that is
  262. * not aligned to its size. (Most of the time
  263. * dma_alloc_coherent ensures this). This can happen when
  264. * using large buffers allocated by the CMA
  265. * (see CMA_CONFIG_ALIGNMENT)
  266. */
  267. dev_err(&sndev->stdev->dev,
  268. "ERROR: Memory window address is not aligned to it's size!\n");
  269. return -EINVAL;
  270. }
  271. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
  272. NTB_CTRL_PART_STATUS_LOCKED);
  273. if (rc)
  274. return rc;
  275. if (addr == 0 || size == 0) {
  276. if (widx < nr_direct_mw)
  277. switchtec_ntb_mw_clr_direct(sndev, widx);
  278. else
  279. switchtec_ntb_mw_clr_lut(sndev, widx);
  280. } else {
  281. if (widx < nr_direct_mw)
  282. switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
  283. else
  284. switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
  285. }
  286. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  287. NTB_CTRL_PART_STATUS_NORMAL);
  288. if (rc == -EIO) {
  289. dev_err(&sndev->stdev->dev,
  290. "Hardware reported an error configuring mw %d: %08x\n",
  291. widx, ioread32(&ctl->bar_error));
  292. if (widx < nr_direct_mw)
  293. switchtec_ntb_mw_clr_direct(sndev, widx);
  294. else
  295. switchtec_ntb_mw_clr_lut(sndev, widx);
  296. switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  297. NTB_CTRL_PART_STATUS_NORMAL);
  298. }
  299. return rc;
  300. }
  301. static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
  302. {
  303. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  304. int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
  305. return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
  306. }
  307. static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
  308. int idx, phys_addr_t *base,
  309. resource_size_t *size)
  310. {
  311. int bar = sndev->direct_mw_to_bar[idx];
  312. size_t offset = 0;
  313. if (bar < 0)
  314. return -EINVAL;
  315. if (idx == 0) {
  316. /*
  317. * This is the direct BAR shared with the LUTs
  318. * which means the actual window will be offset
  319. * by the size of all the LUT entries.
  320. */
  321. offset = LUT_SIZE * sndev->nr_lut_mw;
  322. }
  323. if (base)
  324. *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
  325. if (size) {
  326. *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
  327. if (offset && *size > offset)
  328. *size = offset;
  329. if (*size > max_mw_size)
  330. *size = max_mw_size;
  331. }
  332. return 0;
  333. }
  334. static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
  335. int idx, phys_addr_t *base,
  336. resource_size_t *size)
  337. {
  338. int bar = sndev->direct_mw_to_bar[0];
  339. int offset;
  340. offset = LUT_SIZE * lut_index(sndev, idx);
  341. if (base)
  342. *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
  343. if (size)
  344. *size = LUT_SIZE;
  345. return 0;
  346. }
  347. static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
  348. phys_addr_t *base,
  349. resource_size_t *size)
  350. {
  351. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  352. if (idx < sndev->nr_direct_mw)
  353. return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
  354. else if (idx < switchtec_ntb_peer_mw_count(ntb))
  355. return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
  356. else
  357. return -EINVAL;
  358. }
  359. static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
  360. int partition,
  361. enum ntb_speed *speed,
  362. enum ntb_width *width)
  363. {
  364. struct switchtec_dev *stdev = sndev->stdev;
  365. u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
  366. u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
  367. if (speed)
  368. *speed = (linksta >> 16) & 0xF;
  369. if (width)
  370. *width = (linksta >> 20) & 0x3F;
  371. }
  372. static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
  373. {
  374. enum ntb_speed self_speed, peer_speed;
  375. enum ntb_width self_width, peer_width;
  376. if (!sndev->link_is_up) {
  377. sndev->link_speed = NTB_SPEED_NONE;
  378. sndev->link_width = NTB_WIDTH_NONE;
  379. return;
  380. }
  381. switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
  382. &self_speed, &self_width);
  383. switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
  384. &peer_speed, &peer_width);
  385. sndev->link_speed = min(self_speed, peer_speed);
  386. sndev->link_width = min(self_width, peer_width);
  387. }
  388. static int crosslink_is_enabled(struct switchtec_ntb *sndev)
  389. {
  390. struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
  391. return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
  392. }
  393. static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
  394. {
  395. int i;
  396. u32 msg_map = 0;
  397. if (!crosslink_is_enabled(sndev))
  398. return;
  399. for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
  400. int m = i | sndev->self_partition << 2;
  401. msg_map |= m << i * 8;
  402. }
  403. iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
  404. iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
  405. &sndev->mmio_peer_dbmsg->odb_mask);
  406. }
  407. enum switchtec_msg {
  408. LINK_MESSAGE = 0,
  409. MSG_LINK_UP = 1,
  410. MSG_LINK_DOWN = 2,
  411. MSG_CHECK_LINK = 3,
  412. MSG_LINK_FORCE_DOWN = 4,
  413. };
  414. static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
  415. static void link_reinit_work(struct work_struct *work)
  416. {
  417. struct switchtec_ntb *sndev;
  418. sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
  419. switchtec_ntb_reinit_peer(sndev);
  420. }
  421. static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
  422. enum switchtec_msg msg)
  423. {
  424. int link_sta;
  425. int old = sndev->link_is_up;
  426. if (msg == MSG_LINK_FORCE_DOWN) {
  427. schedule_work(&sndev->link_reinit_work);
  428. if (sndev->link_is_up) {
  429. sndev->link_is_up = 0;
  430. ntb_link_event(&sndev->ntb);
  431. dev_info(&sndev->stdev->dev, "ntb link forced down\n");
  432. }
  433. return;
  434. }
  435. link_sta = sndev->self_shared->link_sta;
  436. if (link_sta) {
  437. u64 peer = ioread64(&sndev->peer_shared->magic);
  438. if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
  439. link_sta = peer >> 32;
  440. else
  441. link_sta = 0;
  442. }
  443. sndev->link_is_up = link_sta;
  444. switchtec_ntb_set_link_speed(sndev);
  445. if (link_sta != old) {
  446. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
  447. ntb_link_event(&sndev->ntb);
  448. dev_info(&sndev->stdev->dev, "ntb link %s\n",
  449. link_sta ? "up" : "down");
  450. if (link_sta)
  451. crosslink_init_dbmsgs(sndev);
  452. }
  453. }
  454. static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
  455. {
  456. struct switchtec_ntb *sndev = stdev->sndev;
  457. switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
  458. }
  459. static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
  460. enum ntb_speed *speed,
  461. enum ntb_width *width)
  462. {
  463. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  464. if (speed)
  465. *speed = sndev->link_speed;
  466. if (width)
  467. *width = sndev->link_width;
  468. return sndev->link_is_up;
  469. }
  470. static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
  471. enum ntb_speed max_speed,
  472. enum ntb_width max_width)
  473. {
  474. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  475. dev_dbg(&sndev->stdev->dev, "enabling link\n");
  476. sndev->self_shared->link_sta = 1;
  477. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
  478. switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
  479. return 0;
  480. }
  481. static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
  482. {
  483. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  484. dev_dbg(&sndev->stdev->dev, "disabling link\n");
  485. sndev->self_shared->link_sta = 0;
  486. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
  487. switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
  488. return 0;
  489. }
  490. static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
  491. {
  492. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  493. return sndev->db_valid_mask;
  494. }
  495. static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
  496. {
  497. return 1;
  498. }
  499. static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
  500. {
  501. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  502. if (db_vector < 0 || db_vector > 1)
  503. return 0;
  504. return sndev->db_valid_mask;
  505. }
  506. static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
  507. {
  508. u64 ret;
  509. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  510. ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
  511. return ret & sndev->db_valid_mask;
  512. }
  513. static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
  514. {
  515. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  516. iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
  517. return 0;
  518. }
  519. static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
  520. {
  521. unsigned long irqflags;
  522. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  523. if (db_bits & ~sndev->db_valid_mask)
  524. return -EINVAL;
  525. spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
  526. sndev->db_mask |= db_bits << sndev->db_shift;
  527. iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
  528. spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
  529. return 0;
  530. }
  531. static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
  532. {
  533. unsigned long irqflags;
  534. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  535. if (db_bits & ~sndev->db_valid_mask)
  536. return -EINVAL;
  537. spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
  538. sndev->db_mask &= ~(db_bits << sndev->db_shift);
  539. iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
  540. spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
  541. return 0;
  542. }
  543. static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
  544. {
  545. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  546. return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
  547. }
  548. static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
  549. phys_addr_t *db_addr,
  550. resource_size_t *db_size)
  551. {
  552. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  553. unsigned long offset;
  554. offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
  555. (unsigned long)sndev->stdev->mmio;
  556. offset += sndev->db_shift / 8;
  557. if (db_addr)
  558. *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
  559. if (db_size)
  560. *db_size = sizeof(u32);
  561. return 0;
  562. }
  563. static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
  564. {
  565. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  566. iowrite64(db_bits << sndev->db_peer_shift,
  567. &sndev->mmio_peer_dbmsg->odb);
  568. return 0;
  569. }
  570. static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
  571. {
  572. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  573. return ARRAY_SIZE(sndev->self_shared->spad);
  574. }
  575. static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
  576. {
  577. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  578. if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
  579. return 0;
  580. if (!sndev->self_shared)
  581. return 0;
  582. return sndev->self_shared->spad[idx];
  583. }
  584. static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
  585. {
  586. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  587. if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
  588. return -EINVAL;
  589. if (!sndev->self_shared)
  590. return -EIO;
  591. sndev->self_shared->spad[idx] = val;
  592. return 0;
  593. }
  594. static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
  595. int sidx)
  596. {
  597. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  598. if (pidx != NTB_DEF_PEER_IDX)
  599. return -EINVAL;
  600. if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
  601. return 0;
  602. if (!sndev->peer_shared)
  603. return 0;
  604. return ioread32(&sndev->peer_shared->spad[sidx]);
  605. }
  606. static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
  607. int sidx, u32 val)
  608. {
  609. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  610. if (pidx != NTB_DEF_PEER_IDX)
  611. return -EINVAL;
  612. if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
  613. return -EINVAL;
  614. if (!sndev->peer_shared)
  615. return -EIO;
  616. iowrite32(val, &sndev->peer_shared->spad[sidx]);
  617. return 0;
  618. }
  619. static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
  620. int sidx, phys_addr_t *spad_addr)
  621. {
  622. struct switchtec_ntb *sndev = ntb_sndev(ntb);
  623. unsigned long offset;
  624. if (pidx != NTB_DEF_PEER_IDX)
  625. return -EINVAL;
  626. offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
  627. (unsigned long)sndev->stdev->mmio;
  628. if (spad_addr)
  629. *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
  630. return 0;
  631. }
  632. static const struct ntb_dev_ops switchtec_ntb_ops = {
  633. .mw_count = switchtec_ntb_mw_count,
  634. .mw_get_align = switchtec_ntb_mw_get_align,
  635. .mw_set_trans = switchtec_ntb_mw_set_trans,
  636. .peer_mw_count = switchtec_ntb_peer_mw_count,
  637. .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
  638. .link_is_up = switchtec_ntb_link_is_up,
  639. .link_enable = switchtec_ntb_link_enable,
  640. .link_disable = switchtec_ntb_link_disable,
  641. .db_valid_mask = switchtec_ntb_db_valid_mask,
  642. .db_vector_count = switchtec_ntb_db_vector_count,
  643. .db_vector_mask = switchtec_ntb_db_vector_mask,
  644. .db_read = switchtec_ntb_db_read,
  645. .db_clear = switchtec_ntb_db_clear,
  646. .db_set_mask = switchtec_ntb_db_set_mask,
  647. .db_clear_mask = switchtec_ntb_db_clear_mask,
  648. .db_read_mask = switchtec_ntb_db_read_mask,
  649. .peer_db_addr = switchtec_ntb_peer_db_addr,
  650. .peer_db_set = switchtec_ntb_peer_db_set,
  651. .spad_count = switchtec_ntb_spad_count,
  652. .spad_read = switchtec_ntb_spad_read,
  653. .spad_write = switchtec_ntb_spad_write,
  654. .peer_spad_read = switchtec_ntb_peer_spad_read,
  655. .peer_spad_write = switchtec_ntb_peer_spad_write,
  656. .peer_spad_addr = switchtec_ntb_peer_spad_addr,
  657. };
  658. static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
  659. {
  660. u64 tpart_vec;
  661. int self;
  662. u64 part_map;
  663. int bit;
  664. sndev->ntb.pdev = sndev->stdev->pdev;
  665. sndev->ntb.topo = NTB_TOPO_SWITCH;
  666. sndev->ntb.ops = &switchtec_ntb_ops;
  667. INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
  668. sndev->self_partition = sndev->stdev->partition;
  669. sndev->mmio_ntb = sndev->stdev->mmio_ntb;
  670. self = sndev->self_partition;
  671. tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
  672. tpart_vec <<= 32;
  673. tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
  674. part_map = ioread64(&sndev->mmio_ntb->ep_map);
  675. part_map &= ~(1 << sndev->self_partition);
  676. if (!ffs(tpart_vec)) {
  677. if (sndev->stdev->partition_count != 2) {
  678. dev_err(&sndev->stdev->dev,
  679. "ntb target partition not defined\n");
  680. return -ENODEV;
  681. }
  682. bit = ffs(part_map);
  683. if (!bit) {
  684. dev_err(&sndev->stdev->dev,
  685. "peer partition is not NT partition\n");
  686. return -ENODEV;
  687. }
  688. sndev->peer_partition = bit - 1;
  689. } else {
  690. if (ffs(tpart_vec) != fls(tpart_vec)) {
  691. dev_err(&sndev->stdev->dev,
  692. "ntb driver only supports 1 pair of 1-1 ntb mapping\n");
  693. return -ENODEV;
  694. }
  695. sndev->peer_partition = ffs(tpart_vec) - 1;
  696. if (!(part_map & (1ULL << sndev->peer_partition))) {
  697. dev_err(&sndev->stdev->dev,
  698. "ntb target partition is not NT partition\n");
  699. return -ENODEV;
  700. }
  701. }
  702. dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
  703. sndev->self_partition, sndev->stdev->partition_count);
  704. sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
  705. SWITCHTEC_NTB_REG_CTRL_OFFSET;
  706. sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
  707. SWITCHTEC_NTB_REG_DBMSG_OFFSET;
  708. sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
  709. sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
  710. sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
  711. sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
  712. return 0;
  713. }
  714. static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
  715. struct ntb_ctrl_regs __iomem *ctl,
  716. int lut_idx, int partition, u64 addr)
  717. {
  718. int peer_bar = sndev->peer_direct_mw_to_bar[0];
  719. u32 ctl_val;
  720. int rc;
  721. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
  722. NTB_CTRL_PART_STATUS_LOCKED);
  723. if (rc)
  724. return rc;
  725. ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
  726. ctl_val &= 0xFF;
  727. ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
  728. ctl_val |= ilog2(LUT_SIZE) << 8;
  729. ctl_val |= (sndev->nr_lut_mw - 1) << 14;
  730. iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
  731. iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
  732. &ctl->lut_entry[lut_idx]);
  733. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  734. NTB_CTRL_PART_STATUS_NORMAL);
  735. if (rc) {
  736. u32 bar_error, lut_error;
  737. bar_error = ioread32(&ctl->bar_error);
  738. lut_error = ioread32(&ctl->lut_error);
  739. dev_err(&sndev->stdev->dev,
  740. "Error setting up reserved lut window: %08x / %08x\n",
  741. bar_error, lut_error);
  742. return rc;
  743. }
  744. return 0;
  745. }
  746. static int config_req_id_table(struct switchtec_ntb *sndev,
  747. struct ntb_ctrl_regs __iomem *mmio_ctrl,
  748. int *req_ids, int count)
  749. {
  750. int i, rc = 0;
  751. u32 error;
  752. u32 proxy_id;
  753. if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
  754. dev_err(&sndev->stdev->dev,
  755. "Not enough requester IDs available.\n");
  756. return -EFAULT;
  757. }
  758. rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
  759. NTB_CTRL_PART_OP_LOCK,
  760. NTB_CTRL_PART_STATUS_LOCKED);
  761. if (rc)
  762. return rc;
  763. iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
  764. &mmio_ctrl->partition_ctrl);
  765. for (i = 0; i < count; i++) {
  766. iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
  767. &mmio_ctrl->req_id_table[i]);
  768. proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
  769. dev_dbg(&sndev->stdev->dev,
  770. "Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
  771. req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
  772. req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
  773. (proxy_id >> 1) & 0x7);
  774. }
  775. rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
  776. NTB_CTRL_PART_OP_CFG,
  777. NTB_CTRL_PART_STATUS_NORMAL);
  778. if (rc == -EIO) {
  779. error = ioread32(&mmio_ctrl->req_id_error);
  780. dev_err(&sndev->stdev->dev,
  781. "Error setting up the requester ID table: %08x\n",
  782. error);
  783. }
  784. return 0;
  785. }
  786. static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
  787. u64 *mw_addrs, int mw_count)
  788. {
  789. int rc, i;
  790. struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
  791. u64 addr;
  792. size_t size, offset;
  793. int bar;
  794. int xlate_pos;
  795. u32 ctl_val;
  796. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
  797. NTB_CTRL_PART_STATUS_LOCKED);
  798. if (rc)
  799. return rc;
  800. for (i = 0; i < sndev->nr_lut_mw; i++) {
  801. if (i == ntb_lut_idx)
  802. continue;
  803. addr = mw_addrs[0] + LUT_SIZE * i;
  804. iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
  805. addr),
  806. &ctl->lut_entry[i]);
  807. }
  808. sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
  809. for (i = 0; i < sndev->nr_direct_mw; i++) {
  810. bar = sndev->direct_mw_to_bar[i];
  811. offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
  812. addr = mw_addrs[i] + offset;
  813. size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
  814. xlate_pos = ilog2(size);
  815. if (offset && size > offset)
  816. size = offset;
  817. ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
  818. ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
  819. iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
  820. iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
  821. iowrite64(sndev->peer_partition | addr,
  822. &ctl->bar_entry[bar].xlate_addr);
  823. }
  824. rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
  825. NTB_CTRL_PART_STATUS_NORMAL);
  826. if (rc) {
  827. u32 bar_error, lut_error;
  828. bar_error = ioread32(&ctl->bar_error);
  829. lut_error = ioread32(&ctl->lut_error);
  830. dev_err(&sndev->stdev->dev,
  831. "Error setting up cross link windows: %08x / %08x\n",
  832. bar_error, lut_error);
  833. return rc;
  834. }
  835. return 0;
  836. }
  837. static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
  838. struct ntb_ctrl_regs __iomem *mmio_ctrl)
  839. {
  840. int req_ids[16];
  841. int i;
  842. u32 proxy_id;
  843. for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
  844. proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
  845. if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
  846. break;
  847. req_ids[i] = ((proxy_id >> 1) & 0xFF);
  848. }
  849. return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
  850. }
  851. /*
  852. * In crosslink configuration there is a virtual partition in the
  853. * middle of the two switches. The BARs in this partition have to be
  854. * enumerated and assigned addresses.
  855. */
  856. static int crosslink_enum_partition(struct switchtec_ntb *sndev,
  857. u64 *bar_addrs)
  858. {
  859. struct part_cfg_regs __iomem *part_cfg =
  860. &sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
  861. u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
  862. struct pff_csr_regs __iomem *mmio_pff =
  863. &sndev->stdev->mmio_pff_csr[pff];
  864. const u64 bar_space = 0x1000000000LL;
  865. u64 bar_addr;
  866. int bar_cnt = 0;
  867. int i;
  868. iowrite16(0x6, &mmio_pff->pcicmd);
  869. for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
  870. iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
  871. bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
  872. bar_addr &= ~0xf;
  873. dev_dbg(&sndev->stdev->dev,
  874. "Crosslink BAR%d addr: %llx\n",
  875. i*2, bar_addr);
  876. if (bar_addr != bar_space * i)
  877. continue;
  878. bar_addrs[bar_cnt++] = bar_addr;
  879. }
  880. return bar_cnt;
  881. }
  882. static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
  883. {
  884. int rc;
  885. int bar = sndev->direct_mw_to_bar[0];
  886. const int ntb_lut_idx = 1;
  887. u64 bar_addrs[6];
  888. u64 addr;
  889. int offset;
  890. int bar_cnt;
  891. if (!crosslink_is_enabled(sndev))
  892. return 0;
  893. dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
  894. sndev->ntb.topo = NTB_TOPO_CROSSLINK;
  895. bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
  896. if (bar_cnt < sndev->nr_direct_mw + 1) {
  897. dev_err(&sndev->stdev->dev,
  898. "Error enumerating crosslink partition\n");
  899. return -EINVAL;
  900. }
  901. addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
  902. SWITCHTEC_NTB_REG_DBMSG_OFFSET +
  903. sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
  904. offset = addr & (LUT_SIZE - 1);
  905. addr -= offset;
  906. rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
  907. sndev->peer_partition, addr);
  908. if (rc)
  909. return rc;
  910. rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
  911. bar_cnt - 1);
  912. if (rc)
  913. return rc;
  914. rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
  915. if (rc)
  916. return rc;
  917. sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
  918. LUT_SIZE, LUT_SIZE);
  919. if (!sndev->mmio_xlink_win) {
  920. rc = -ENOMEM;
  921. return rc;
  922. }
  923. sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
  924. sndev->nr_rsvd_luts++;
  925. crosslink_init_dbmsgs(sndev);
  926. return 0;
  927. }
  928. static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
  929. {
  930. if (sndev->mmio_xlink_win)
  931. pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
  932. }
  933. static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
  934. {
  935. int i;
  936. int cnt = 0;
  937. for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
  938. u32 r = ioread32(&ctrl->bar_entry[i].ctl);
  939. if (r & NTB_CTRL_BAR_VALID)
  940. map[cnt++] = i;
  941. }
  942. return cnt;
  943. }
  944. static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
  945. {
  946. sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
  947. sndev->mmio_self_ctrl);
  948. sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
  949. sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
  950. dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
  951. sndev->nr_direct_mw, sndev->nr_lut_mw);
  952. sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
  953. sndev->mmio_peer_ctrl);
  954. sndev->peer_nr_lut_mw =
  955. ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
  956. sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
  957. dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
  958. sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
  959. }
  960. /*
  961. * There are 64 doorbells in the switch hardware but this is
  962. * shared among all partitions. So we must split them in half
  963. * (32 for each partition). However, the message interrupts are
  964. * also shared with the top 4 doorbells so we just limit this to
  965. * 28 doorbells per partition.
  966. *
  967. * In crosslink mode, each side has it's own dbmsg register so
  968. * they can each use all 60 of the available doorbells.
  969. */
  970. static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
  971. {
  972. sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
  973. if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
  974. sndev->db_shift = 0;
  975. sndev->db_peer_shift = 0;
  976. sndev->db_valid_mask = sndev->db_mask;
  977. } else if (sndev->self_partition < sndev->peer_partition) {
  978. sndev->db_shift = 0;
  979. sndev->db_peer_shift = 32;
  980. sndev->db_valid_mask = 0x0FFFFFFF;
  981. } else {
  982. sndev->db_shift = 32;
  983. sndev->db_peer_shift = 0;
  984. sndev->db_valid_mask = 0x0FFFFFFF;
  985. }
  986. iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
  987. iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
  988. &sndev->mmio_peer_dbmsg->odb_mask);
  989. dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
  990. sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
  991. }
  992. static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
  993. {
  994. int i;
  995. u32 msg_map = 0;
  996. for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
  997. int m = i | sndev->peer_partition << 2;
  998. msg_map |= m << i * 8;
  999. }
  1000. iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
  1001. for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
  1002. iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
  1003. &sndev->mmio_self_dbmsg->imsg[i]);
  1004. }
  1005. static int
  1006. switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
  1007. {
  1008. int req_ids[2];
  1009. /*
  1010. * Root Complex Requester ID (which is 0:00.0)
  1011. */
  1012. req_ids[0] = 0;
  1013. /*
  1014. * Host Bridge Requester ID (as read from the mmap address)
  1015. */
  1016. req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
  1017. return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
  1018. ARRAY_SIZE(req_ids));
  1019. }
  1020. static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
  1021. {
  1022. int i;
  1023. memset(sndev->self_shared, 0, LUT_SIZE);
  1024. sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
  1025. sndev->self_shared->partition_id = sndev->stdev->partition;
  1026. for (i = 0; i < sndev->nr_direct_mw; i++) {
  1027. int bar = sndev->direct_mw_to_bar[i];
  1028. resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
  1029. if (i == 0)
  1030. sz = min_t(resource_size_t, sz,
  1031. LUT_SIZE * sndev->nr_lut_mw);
  1032. sndev->self_shared->mw_sizes[i] = sz;
  1033. }
  1034. for (i = 0; i < sndev->nr_lut_mw; i++) {
  1035. int idx = sndev->nr_direct_mw + i;
  1036. sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
  1037. }
  1038. }
  1039. static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
  1040. {
  1041. int self_bar = sndev->direct_mw_to_bar[0];
  1042. int rc;
  1043. sndev->nr_rsvd_luts++;
  1044. sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
  1045. LUT_SIZE,
  1046. &sndev->self_shared_dma,
  1047. GFP_KERNEL);
  1048. if (!sndev->self_shared) {
  1049. dev_err(&sndev->stdev->dev,
  1050. "unable to allocate memory for shared mw\n");
  1051. return -ENOMEM;
  1052. }
  1053. switchtec_ntb_init_shared(sndev);
  1054. rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
  1055. sndev->self_partition,
  1056. sndev->self_shared_dma);
  1057. if (rc)
  1058. goto unalloc_and_exit;
  1059. sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
  1060. if (!sndev->peer_shared) {
  1061. rc = -ENOMEM;
  1062. goto unalloc_and_exit;
  1063. }
  1064. dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
  1065. return 0;
  1066. unalloc_and_exit:
  1067. dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
  1068. sndev->self_shared, sndev->self_shared_dma);
  1069. return rc;
  1070. }
  1071. static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
  1072. {
  1073. if (sndev->peer_shared)
  1074. pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
  1075. if (sndev->self_shared)
  1076. dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
  1077. sndev->self_shared,
  1078. sndev->self_shared_dma);
  1079. sndev->nr_rsvd_luts--;
  1080. }
  1081. static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
  1082. {
  1083. struct switchtec_ntb *sndev = dev;
  1084. dev_dbg(&sndev->stdev->dev, "doorbell\n");
  1085. ntb_db_event(&sndev->ntb, 0);
  1086. return IRQ_HANDLED;
  1087. }
  1088. static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
  1089. {
  1090. int i;
  1091. struct switchtec_ntb *sndev = dev;
  1092. for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
  1093. u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
  1094. if (msg & NTB_DBMSG_IMSG_STATUS) {
  1095. dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
  1096. i, (u32)msg);
  1097. iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
  1098. if (i == LINK_MESSAGE)
  1099. switchtec_ntb_check_link(sndev, msg);
  1100. }
  1101. }
  1102. return IRQ_HANDLED;
  1103. }
  1104. static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
  1105. {
  1106. int i;
  1107. int rc;
  1108. int doorbell_irq = 0;
  1109. int message_irq = 0;
  1110. int event_irq;
  1111. int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
  1112. event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
  1113. while (doorbell_irq == event_irq)
  1114. doorbell_irq++;
  1115. while (message_irq == doorbell_irq ||
  1116. message_irq == event_irq)
  1117. message_irq++;
  1118. dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
  1119. event_irq, doorbell_irq, message_irq);
  1120. for (i = 0; i < idb_vecs - 4; i++)
  1121. iowrite8(doorbell_irq,
  1122. &sndev->mmio_self_dbmsg->idb_vec_map[i]);
  1123. for (; i < idb_vecs; i++)
  1124. iowrite8(message_irq,
  1125. &sndev->mmio_self_dbmsg->idb_vec_map[i]);
  1126. sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
  1127. sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
  1128. rc = request_irq(sndev->doorbell_irq,
  1129. switchtec_ntb_doorbell_isr, 0,
  1130. "switchtec_ntb_doorbell", sndev);
  1131. if (rc)
  1132. return rc;
  1133. rc = request_irq(sndev->message_irq,
  1134. switchtec_ntb_message_isr, 0,
  1135. "switchtec_ntb_message", sndev);
  1136. if (rc) {
  1137. free_irq(sndev->doorbell_irq, sndev);
  1138. return rc;
  1139. }
  1140. return 0;
  1141. }
  1142. static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
  1143. {
  1144. free_irq(sndev->doorbell_irq, sndev);
  1145. free_irq(sndev->message_irq, sndev);
  1146. }
  1147. static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
  1148. {
  1149. dev_info(&sndev->stdev->dev, "peer reinitialized\n");
  1150. switchtec_ntb_deinit_shared_mw(sndev);
  1151. switchtec_ntb_init_mw(sndev);
  1152. return switchtec_ntb_init_shared_mw(sndev);
  1153. }
  1154. static int switchtec_ntb_add(struct device *dev,
  1155. struct class_interface *class_intf)
  1156. {
  1157. struct switchtec_dev *stdev = to_stdev(dev);
  1158. struct switchtec_ntb *sndev;
  1159. int rc;
  1160. stdev->sndev = NULL;
  1161. if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
  1162. return -ENODEV;
  1163. sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
  1164. if (!sndev)
  1165. return -ENOMEM;
  1166. sndev->stdev = stdev;
  1167. rc = switchtec_ntb_init_sndev(sndev);
  1168. if (rc)
  1169. goto free_and_exit;
  1170. switchtec_ntb_init_mw(sndev);
  1171. rc = switchtec_ntb_init_req_id_table(sndev);
  1172. if (rc)
  1173. goto free_and_exit;
  1174. rc = switchtec_ntb_init_crosslink(sndev);
  1175. if (rc)
  1176. goto free_and_exit;
  1177. switchtec_ntb_init_db(sndev);
  1178. switchtec_ntb_init_msgs(sndev);
  1179. rc = switchtec_ntb_init_shared_mw(sndev);
  1180. if (rc)
  1181. goto deinit_crosslink;
  1182. rc = switchtec_ntb_init_db_msg_irq(sndev);
  1183. if (rc)
  1184. goto deinit_shared_and_exit;
  1185. /*
  1186. * If this host crashed, the other host may think the link is
  1187. * still up. Tell them to force it down (it will go back up
  1188. * once we register the ntb device).
  1189. */
  1190. switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
  1191. rc = ntb_register_device(&sndev->ntb);
  1192. if (rc)
  1193. goto deinit_and_exit;
  1194. stdev->sndev = sndev;
  1195. stdev->link_notifier = switchtec_ntb_link_notification;
  1196. dev_info(dev, "NTB device registered\n");
  1197. return 0;
  1198. deinit_and_exit:
  1199. switchtec_ntb_deinit_db_msg_irq(sndev);
  1200. deinit_shared_and_exit:
  1201. switchtec_ntb_deinit_shared_mw(sndev);
  1202. deinit_crosslink:
  1203. switchtec_ntb_deinit_crosslink(sndev);
  1204. free_and_exit:
  1205. kfree(sndev);
  1206. dev_err(dev, "failed to register ntb device: %d\n", rc);
  1207. return rc;
  1208. }
  1209. static void switchtec_ntb_remove(struct device *dev,
  1210. struct class_interface *class_intf)
  1211. {
  1212. struct switchtec_dev *stdev = to_stdev(dev);
  1213. struct switchtec_ntb *sndev = stdev->sndev;
  1214. if (!sndev)
  1215. return;
  1216. stdev->link_notifier = NULL;
  1217. stdev->sndev = NULL;
  1218. ntb_unregister_device(&sndev->ntb);
  1219. switchtec_ntb_deinit_db_msg_irq(sndev);
  1220. switchtec_ntb_deinit_shared_mw(sndev);
  1221. switchtec_ntb_deinit_crosslink(sndev);
  1222. kfree(sndev);
  1223. dev_info(dev, "ntb device unregistered\n");
  1224. }
  1225. static struct class_interface switchtec_interface = {
  1226. .add_dev = switchtec_ntb_add,
  1227. .remove_dev = switchtec_ntb_remove,
  1228. };
  1229. static int __init switchtec_ntb_init(void)
  1230. {
  1231. switchtec_interface.class = switchtec_class;
  1232. return class_interface_register(&switchtec_interface);
  1233. }
  1234. module_init(switchtec_ntb_init);
  1235. static void __exit switchtec_ntb_exit(void)
  1236. {
  1237. class_interface_unregister(&switchtec_interface);
  1238. }
  1239. module_exit(switchtec_ntb_exit);