ntb_hw_gen4.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
  2. /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
  3. #include <linux/debugfs.h>
  4. #include <linux/delay.h>
  5. #include <linux/init.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/module.h>
  8. #include <linux/pci.h>
  9. #include <linux/random.h>
  10. #include <linux/slab.h>
  11. #include <linux/ntb.h>
  12. #include <linux/log2.h>
  13. #include "ntb_hw_intel.h"
  14. #include "ntb_hw_gen1.h"
  15. #include "ntb_hw_gen3.h"
  16. #include "ntb_hw_gen4.h"
  17. static int gen4_poll_link(struct intel_ntb_dev *ndev);
  18. static int gen4_link_is_up(struct intel_ntb_dev *ndev);
  19. static const struct intel_ntb_reg gen4_reg = {
  20. .poll_link = gen4_poll_link,
  21. .link_is_up = gen4_link_is_up,
  22. .db_ioread = gen3_db_ioread,
  23. .db_iowrite = gen3_db_iowrite,
  24. .db_size = sizeof(u32),
  25. .ntb_ctl = GEN4_NTBCNTL_OFFSET,
  26. .mw_bar = {2, 4},
  27. };
  28. static const struct intel_ntb_alt_reg gen4_pri_reg = {
  29. .db_clear = GEN4_IM_INT_STATUS_OFFSET,
  30. .db_mask = GEN4_IM_INT_DISABLE_OFFSET,
  31. .spad = GEN4_IM_SPAD_OFFSET,
  32. };
  33. static const struct intel_ntb_xlat_reg gen4_sec_xlat = {
  34. .bar2_limit = GEN4_IM23XLMT_OFFSET,
  35. .bar2_xlat = GEN4_IM23XBASE_OFFSET,
  36. .bar2_idx = GEN4_IM23XBASEIDX_OFFSET,
  37. };
  38. static const struct intel_ntb_alt_reg gen4_b2b_reg = {
  39. .db_bell = GEN4_IM_DOORBELL_OFFSET,
  40. .spad = GEN4_EM_SPAD_OFFSET,
  41. };
  42. static int gen4_poll_link(struct intel_ntb_dev *ndev)
  43. {
  44. u16 reg_val;
  45. /*
  46. * We need to write to DLLSCS bit in the SLOTSTS before we
  47. * can clear the hardware link interrupt on ICX NTB.
  48. */
  49. iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS);
  50. ndev->reg->db_iowrite(ndev->db_link_mask,
  51. ndev->self_mmio +
  52. ndev->self_reg->db_clear);
  53. reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET);
  54. if (reg_val == ndev->lnk_sta)
  55. return 0;
  56. ndev->lnk_sta = reg_val;
  57. return 1;
  58. }
  59. static int gen4_link_is_up(struct intel_ntb_dev *ndev)
  60. {
  61. return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
  62. }
  63. static int gen4_init_isr(struct intel_ntb_dev *ndev)
  64. {
  65. int i;
  66. /*
  67. * The MSIX vectors and the interrupt status bits are not lined up
  68. * on Gen3 (Skylake) and Gen4. By default the link status bit is bit
  69. * 32, however it is by default MSIX vector0. We need to fixup to
  70. * line them up. The vectors at reset is 1-32,0. We need to reprogram
  71. * to 0-32.
  72. */
  73. for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++)
  74. iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i);
  75. return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT,
  76. GEN4_DB_MSIX_VECTOR_COUNT,
  77. GEN4_DB_MSIX_VECTOR_SHIFT,
  78. GEN4_DB_TOTAL_SHIFT);
  79. }
  80. static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev,
  81. const struct intel_b2b_addr *addr,
  82. const struct intel_b2b_addr *peer_addr)
  83. {
  84. struct pci_dev *pdev;
  85. void __iomem *mmio;
  86. phys_addr_t bar_addr;
  87. pdev = ndev->ntb.pdev;
  88. mmio = ndev->self_mmio;
  89. /* setup incoming bar limits == base addrs (zero length windows) */
  90. bar_addr = addr->bar2_addr64;
  91. iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET);
  92. bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
  93. dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr);
  94. bar_addr = addr->bar4_addr64;
  95. iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET);
  96. bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
  97. dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr);
  98. /* zero incoming translation addrs */
  99. iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET);
  100. iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET);
  101. ndev->peer_mmio = ndev->self_mmio;
  102. return 0;
  103. }
  104. static int gen4_init_ntb(struct intel_ntb_dev *ndev)
  105. {
  106. int rc;
  107. ndev->mw_count = XEON_MW_COUNT;
  108. ndev->spad_count = GEN4_SPAD_COUNT;
  109. ndev->db_count = GEN4_DB_COUNT;
  110. ndev->db_link_mask = GEN4_DB_LINK_BIT;
  111. ndev->self_reg = &gen4_pri_reg;
  112. ndev->xlat_reg = &gen4_sec_xlat;
  113. ndev->peer_reg = &gen4_b2b_reg;
  114. if (ndev->ntb.topo == NTB_TOPO_B2B_USD)
  115. rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr,
  116. &xeon_b2b_usd_addr);
  117. else
  118. rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr,
  119. &xeon_b2b_dsd_addr);
  120. if (rc)
  121. return rc;
  122. ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
  123. ndev->reg->db_iowrite(ndev->db_valid_mask,
  124. ndev->self_mmio +
  125. ndev->self_reg->db_mask);
  126. return 0;
  127. }
  128. static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
  129. {
  130. switch (ppd & GEN4_PPD_TOPO_MASK) {
  131. case GEN4_PPD_TOPO_B2B_USD:
  132. return NTB_TOPO_B2B_USD;
  133. case GEN4_PPD_TOPO_B2B_DSD:
  134. return NTB_TOPO_B2B_DSD;
  135. }
  136. return NTB_TOPO_NONE;
  137. }
  138. static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
  139. {
  140. switch (ppd & SPR_PPD_TOPO_MASK) {
  141. case SPR_PPD_TOPO_B2B_USD:
  142. return NTB_TOPO_B2B_USD;
  143. case SPR_PPD_TOPO_B2B_DSD:
  144. return NTB_TOPO_B2B_DSD;
  145. }
  146. return NTB_TOPO_NONE;
  147. }
  148. int gen4_init_dev(struct intel_ntb_dev *ndev)
  149. {
  150. struct pci_dev *pdev = ndev->ntb.pdev;
  151. u32 ppd1/*, ppd0*/;
  152. u16 lnkctl;
  153. int rc;
  154. ndev->reg = &gen4_reg;
  155. if (pdev_is_ICX(pdev)) {
  156. ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
  157. ndev->hwerr_flags |= NTB_HWERR_LTR_BAD;
  158. }
  159. ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
  160. if (pdev_is_ICX(pdev))
  161. ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
  162. else if (pdev_is_SPR(pdev) || pdev_is_gen5(pdev))
  163. ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
  164. dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
  165. ntb_topo_string(ndev->ntb.topo));
  166. if (ndev->ntb.topo == NTB_TOPO_NONE)
  167. return -EINVAL;
  168. rc = gen4_init_ntb(ndev);
  169. if (rc)
  170. return rc;
  171. /* init link setup */
  172. lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
  173. lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
  174. iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
  175. return gen4_init_isr(ndev);
  176. }
  177. ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
  178. size_t count, loff_t *offp)
  179. {
  180. struct intel_ntb_dev *ndev;
  181. void __iomem *mmio;
  182. char *buf;
  183. size_t buf_size;
  184. ssize_t ret, off;
  185. union { u64 v64; u32 v32; u16 v16; } u;
  186. ndev = filp->private_data;
  187. mmio = ndev->self_mmio;
  188. buf_size = min(count, 0x800ul);
  189. buf = kmalloc(buf_size, GFP_KERNEL);
  190. if (!buf)
  191. return -ENOMEM;
  192. off = 0;
  193. off += scnprintf(buf + off, buf_size - off,
  194. "NTB Device Information:\n");
  195. off += scnprintf(buf + off, buf_size - off,
  196. "Connection Topology -\t%s\n",
  197. ntb_topo_string(ndev->ntb.topo));
  198. off += scnprintf(buf + off, buf_size - off,
  199. "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
  200. off += scnprintf(buf + off, buf_size - off,
  201. "LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta);
  202. if (!ndev->reg->link_is_up(ndev))
  203. off += scnprintf(buf + off, buf_size - off,
  204. "Link Status -\t\tDown\n");
  205. else {
  206. off += scnprintf(buf + off, buf_size - off,
  207. "Link Status -\t\tUp\n");
  208. off += scnprintf(buf + off, buf_size - off,
  209. "Link Speed -\t\tPCI-E Gen %u\n",
  210. NTB_LNK_STA_SPEED(ndev->lnk_sta));
  211. off += scnprintf(buf + off, buf_size - off,
  212. "Link Width -\t\tx%u\n",
  213. NTB_LNK_STA_WIDTH(ndev->lnk_sta));
  214. }
  215. off += scnprintf(buf + off, buf_size - off,
  216. "Memory Window Count -\t%u\n", ndev->mw_count);
  217. off += scnprintf(buf + off, buf_size - off,
  218. "Scratchpad Count -\t%u\n", ndev->spad_count);
  219. off += scnprintf(buf + off, buf_size - off,
  220. "Doorbell Count -\t%u\n", ndev->db_count);
  221. off += scnprintf(buf + off, buf_size - off,
  222. "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
  223. off += scnprintf(buf + off, buf_size - off,
  224. "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
  225. off += scnprintf(buf + off, buf_size - off,
  226. "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
  227. off += scnprintf(buf + off, buf_size - off,
  228. "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
  229. off += scnprintf(buf + off, buf_size - off,
  230. "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
  231. u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
  232. off += scnprintf(buf + off, buf_size - off,
  233. "Doorbell Mask -\t\t%#llx\n", u.v64);
  234. off += scnprintf(buf + off, buf_size - off,
  235. "\nNTB Incoming XLAT:\n");
  236. u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET);
  237. off += scnprintf(buf + off, buf_size - off,
  238. "IM23XBASE -\t\t%#018llx\n", u.v64);
  239. u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET);
  240. off += scnprintf(buf + off, buf_size - off,
  241. "IM45XBASE -\t\t%#018llx\n", u.v64);
  242. u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
  243. off += scnprintf(buf + off, buf_size - off,
  244. "IM23XLMT -\t\t\t%#018llx\n", u.v64);
  245. u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
  246. off += scnprintf(buf + off, buf_size - off,
  247. "IM45XLMT -\t\t\t%#018llx\n", u.v64);
  248. off += scnprintf(buf + off, buf_size - off,
  249. "\nNTB Statistics:\n");
  250. off += scnprintf(buf + off, buf_size - off,
  251. "\nNTB Hardware Errors:\n");
  252. if (!pci_read_config_word(ndev->ntb.pdev,
  253. GEN4_DEVSTS_OFFSET, &u.v16))
  254. off += scnprintf(buf + off, buf_size - off,
  255. "DEVSTS -\t\t%#06x\n", u.v16);
  256. u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET);
  257. off += scnprintf(buf + off, buf_size - off,
  258. "LNKSTS -\t\t%#06x\n", u.v16);
  259. if (!pci_read_config_dword(ndev->ntb.pdev,
  260. GEN4_UNCERRSTS_OFFSET, &u.v32))
  261. off += scnprintf(buf + off, buf_size - off,
  262. "UNCERRSTS -\t\t%#06x\n", u.v32);
  263. if (!pci_read_config_dword(ndev->ntb.pdev,
  264. GEN4_CORERRSTS_OFFSET, &u.v32))
  265. off += scnprintf(buf + off, buf_size - off,
  266. "CORERRSTS -\t\t%#06x\n", u.v32);
  267. ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
  268. kfree(buf);
  269. return ret;
  270. }
  271. static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
  272. dma_addr_t addr, resource_size_t size)
  273. {
  274. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  275. unsigned long xlat_reg, limit_reg, idx_reg;
  276. unsigned short base_idx, reg_val16;
  277. resource_size_t bar_size, mw_size;
  278. void __iomem *mmio;
  279. u64 base, limit, reg_val;
  280. int bar;
  281. if (pidx != NTB_DEF_PEER_IDX)
  282. return -EINVAL;
  283. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  284. idx += 1;
  285. bar = ndev_mw_to_bar(ndev, idx);
  286. if (bar < 0)
  287. return bar;
  288. bar_size = pci_resource_len(ndev->ntb.pdev, bar);
  289. if (idx == ndev->b2b_idx)
  290. mw_size = bar_size - ndev->b2b_off;
  291. else
  292. mw_size = bar_size;
  293. if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
  294. /* hardware requires that addr is aligned to bar size */
  295. if (addr & (bar_size - 1))
  296. return -EINVAL;
  297. } else {
  298. if (addr & (PAGE_SIZE - 1))
  299. return -EINVAL;
  300. }
  301. /* make sure the range fits in the usable mw size */
  302. if (size > mw_size)
  303. return -EINVAL;
  304. mmio = ndev->self_mmio;
  305. xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
  306. limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
  307. base = pci_resource_start(ndev->ntb.pdev, bar);
  308. /* Set the limit if supported, if size is not mw_size */
  309. if (limit_reg && size != mw_size) {
  310. limit = base + size;
  311. base_idx = __ilog2_u64(size);
  312. } else {
  313. limit = base + mw_size;
  314. base_idx = __ilog2_u64(mw_size);
  315. }
  316. /* set and verify setting the translation address */
  317. iowrite64(addr, mmio + xlat_reg);
  318. reg_val = ioread64(mmio + xlat_reg);
  319. if (reg_val != addr) {
  320. iowrite64(0, mmio + xlat_reg);
  321. return -EIO;
  322. }
  323. dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val);
  324. /* set and verify setting the limit */
  325. iowrite64(limit, mmio + limit_reg);
  326. reg_val = ioread64(mmio + limit_reg);
  327. if (reg_val != limit) {
  328. iowrite64(base, mmio + limit_reg);
  329. iowrite64(0, mmio + xlat_reg);
  330. return -EIO;
  331. }
  332. dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val);
  333. if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
  334. idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2);
  335. iowrite16(base_idx, mmio + idx_reg);
  336. reg_val16 = ioread16(mmio + idx_reg);
  337. if (reg_val16 != base_idx) {
  338. iowrite64(base, mmio + limit_reg);
  339. iowrite64(0, mmio + xlat_reg);
  340. iowrite16(0, mmio + idx_reg);
  341. return -EIO;
  342. }
  343. dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16);
  344. }
  345. return 0;
  346. }
  347. static int intel_ntb4_link_enable(struct ntb_dev *ntb,
  348. enum ntb_speed max_speed, enum ntb_width max_width)
  349. {
  350. struct intel_ntb_dev *ndev;
  351. u32 ntb_ctl, ppd0;
  352. u16 lnkctl;
  353. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  354. dev_dbg(&ntb->pdev->dev,
  355. "Enabling link with max_speed %d max_width %d\n",
  356. max_speed, max_width);
  357. if (max_speed != NTB_SPEED_AUTO)
  358. dev_dbg(&ntb->pdev->dev,
  359. "ignoring max_speed %d\n", max_speed);
  360. if (max_width != NTB_WIDTH_AUTO)
  361. dev_dbg(&ntb->pdev->dev,
  362. "ignoring max_width %d\n", max_width);
  363. if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD)) {
  364. u32 ltr;
  365. /* Setup active snoop LTR values */
  366. ltr = NTB_LTR_ACTIVE_REQMNT | NTB_LTR_ACTIVE_VAL | NTB_LTR_ACTIVE_LATSCALE;
  367. /* Setup active non-snoop values */
  368. ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
  369. iowrite32(ltr, ndev->self_mmio + GEN4_LTR_ACTIVE_OFFSET);
  370. /* Setup idle snoop LTR values */
  371. ltr = NTB_LTR_IDLE_VAL | NTB_LTR_IDLE_LATSCALE | NTB_LTR_IDLE_REQMNT;
  372. /* Setup idle non-snoop values */
  373. ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
  374. iowrite32(ltr, ndev->self_mmio + GEN4_LTR_IDLE_OFFSET);
  375. /* setup PCIe LTR to active */
  376. iowrite8(NTB_LTR_SWSEL_ACTIVE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
  377. }
  378. ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
  379. ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
  380. iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
  381. lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
  382. lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE;
  383. iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
  384. /* start link training in PPD0 */
  385. ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
  386. ppd0 |= GEN4_PPD_LINKTRN;
  387. iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET);
  388. /* make sure link training has started */
  389. ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
  390. if (!(ppd0 & GEN4_PPD_LINKTRN)) {
  391. dev_warn(&ntb->pdev->dev, "Link is not training\n");
  392. return -ENXIO;
  393. }
  394. ndev->dev_up = 1;
  395. return 0;
  396. }
  397. static int intel_ntb4_link_disable(struct ntb_dev *ntb)
  398. {
  399. struct intel_ntb_dev *ndev;
  400. u32 ntb_cntl;
  401. u16 lnkctl;
  402. ndev = container_of(ntb, struct intel_ntb_dev, ntb);
  403. dev_dbg(&ntb->pdev->dev, "Disabling link\n");
  404. /* clear the snoop bits */
  405. ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
  406. ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP);
  407. ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP);
  408. iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
  409. lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
  410. lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
  411. iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
  412. /* set LTR to idle */
  413. if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD))
  414. iowrite8(NTB_LTR_SWSEL_IDLE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
  415. ndev->dev_up = 0;
  416. return 0;
  417. }
  418. static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
  419. resource_size_t *addr_align,
  420. resource_size_t *size_align,
  421. resource_size_t *size_max)
  422. {
  423. struct intel_ntb_dev *ndev = ntb_ndev(ntb);
  424. resource_size_t bar_size, mw_size;
  425. int bar;
  426. if (pidx != NTB_DEF_PEER_IDX)
  427. return -EINVAL;
  428. if (idx >= ndev->b2b_idx && !ndev->b2b_off)
  429. idx += 1;
  430. bar = ndev_mw_to_bar(ndev, idx);
  431. if (bar < 0)
  432. return bar;
  433. bar_size = pci_resource_len(ndev->ntb.pdev, bar);
  434. if (idx == ndev->b2b_idx)
  435. mw_size = bar_size - ndev->b2b_off;
  436. else
  437. mw_size = bar_size;
  438. if (addr_align) {
  439. if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN)
  440. *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
  441. else
  442. *addr_align = PAGE_SIZE;
  443. }
  444. if (size_align)
  445. *size_align = 1;
  446. if (size_max)
  447. *size_max = mw_size;
  448. return 0;
  449. }
  450. const struct ntb_dev_ops intel_ntb4_ops = {
  451. .mw_count = intel_ntb_mw_count,
  452. .mw_get_align = intel_ntb4_mw_get_align,
  453. .mw_set_trans = intel_ntb4_mw_set_trans,
  454. .peer_mw_count = intel_ntb_peer_mw_count,
  455. .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
  456. .link_is_up = intel_ntb_link_is_up,
  457. .link_enable = intel_ntb4_link_enable,
  458. .link_disable = intel_ntb4_link_disable,
  459. .db_valid_mask = intel_ntb_db_valid_mask,
  460. .db_vector_count = intel_ntb_db_vector_count,
  461. .db_vector_mask = intel_ntb_db_vector_mask,
  462. .db_read = intel_ntb3_db_read,
  463. .db_clear = intel_ntb3_db_clear,
  464. .db_set_mask = intel_ntb_db_set_mask,
  465. .db_clear_mask = intel_ntb_db_clear_mask,
  466. .peer_db_addr = intel_ntb3_peer_db_addr,
  467. .peer_db_set = intel_ntb3_peer_db_set,
  468. .spad_is_unsafe = intel_ntb_spad_is_unsafe,
  469. .spad_count = intel_ntb_spad_count,
  470. .spad_read = intel_ntb_spad_read,
  471. .spad_write = intel_ntb_spad_write,
  472. .peer_spad_addr = intel_ntb_peer_spad_addr,
  473. .peer_spad_read = intel_ntb_peer_spad_read,
  474. .peer_spad_write = intel_ntb_peer_spad_write,
  475. };