pcie-cadence.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. // Copyright (c) 2017 Cadence
  3. // Cadence PCIe controller driver.
  4. // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
  5. #ifndef _PCIE_CADENCE_H
  6. #define _PCIE_CADENCE_H
  7. #include <linux/kernel.h>
  8. #include <linux/pci.h>
  9. #include <linux/pci-epf.h>
  10. #include <linux/phy/phy.h>
  11. /* Parameters for the waiting for link up routine */
  12. #define LINK_WAIT_MAX_RETRIES 10
  13. #define LINK_WAIT_USLEEP_MIN 90000
  14. #define LINK_WAIT_USLEEP_MAX 100000
  15. /*
  16. * Local Management Registers
  17. */
  18. #define CDNS_PCIE_LM_BASE 0x00100000
  19. /* Vendor ID Register */
  20. #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
  21. #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
  22. #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
  23. #define CDNS_PCIE_LM_ID_VENDOR(vid) \
  24. (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
  25. #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
  26. #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
  27. #define CDNS_PCIE_LM_ID_SUBSYS(sub) \
  28. (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
  29. /* Root Port Requester ID Register */
  30. #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
  31. #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
  32. #define CDNS_PCIE_LM_RP_RID_SHIFT 0
  33. #define CDNS_PCIE_LM_RP_RID_(rid) \
  34. (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
  35. /* Endpoint Bus and Device Number Register */
  36. #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
  37. #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
  38. #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
  39. #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
  40. #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
  41. /* Endpoint Function f BAR b Configuration Registers */
  42. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
  43. (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
  44. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
  45. (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
  46. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
  47. (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
  48. #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
  49. (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
  50. #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
  51. (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
  52. #define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
  53. (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
  54. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
  55. (GENMASK(4, 0) << ((b) * 8))
  56. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
  57. (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
  58. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
  59. (GENMASK(7, 5) << ((b) * 8))
  60. #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
  61. (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
  62. /* Endpoint Function Configuration Register */
  63. #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
  64. /* Root Complex BAR Configuration Register */
  65. #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
  66. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
  67. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
  68. (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
  69. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
  70. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
  71. (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
  72. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
  73. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
  74. (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
  75. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
  76. #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
  77. (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
  78. #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
  79. #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
  80. #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
  81. #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
  82. #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
  83. #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
  84. #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
  85. /* BAR control values applicable to both Endpoint Function and Root Complex */
  86. #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
  87. #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
  88. #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
  89. #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
  90. #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
  91. #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
  92. #define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
  93. (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
  94. #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
  95. (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
  96. #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
  97. (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
  98. #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
  99. (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
  100. #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
  101. (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
  102. #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
  103. (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
  104. #define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
  105. (((aperture) - 2) << ((bar) * 8))
  106. /* PTM Control Register */
  107. #define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
  108. #define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
  109. /*
  110. * Endpoint Function Registers (PCI configuration space for endpoint functions)
  111. */
  112. #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
  113. #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
  114. #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
  115. #define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
  116. #define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
  117. /*
  118. * Endpoint PF Registers
  119. */
  120. #define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
  121. #define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
  122. /*
  123. * Root Port Registers (PCI configuration space for the root port function)
  124. */
  125. #define CDNS_PCIE_RP_BASE 0x00200000
  126. #define CDNS_PCIE_RP_CAP_OFFSET 0xc0
  127. /*
  128. * Address Translation Registers
  129. */
  130. #define CDNS_PCIE_AT_BASE 0x00400000
  131. /* Region r Outbound AXI to PCIe Address Translation Register 0 */
  132. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
  133. (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
  134. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
  135. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
  136. (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
  137. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
  138. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
  139. (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
  140. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
  141. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
  142. (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
  143. /* Region r Outbound AXI to PCIe Address Translation Register 1 */
  144. #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
  145. (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
  146. /* Region r Outbound PCIe Descriptor Register 0 */
  147. #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
  148. (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
  149. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
  150. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
  151. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
  152. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
  153. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
  154. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
  155. #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
  156. /* Bit 23 MUST be set in RC mode. */
  157. #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
  158. #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
  159. #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
  160. (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
  161. /* Region r Outbound PCIe Descriptor Register 1 */
  162. #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
  163. (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
  164. #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
  165. #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
  166. ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
  167. /* Region r AXI Region Base Address Register 0 */
  168. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
  169. (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
  170. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
  171. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
  172. (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
  173. /* Region r AXI Region Base Address Register 1 */
  174. #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
  175. (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
  176. /* Root Port BAR Inbound PCIe to AXI Address Translation Register */
  177. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
  178. (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
  179. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
  180. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
  181. (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
  182. #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
  183. (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
  184. /* AXI link down register */
  185. #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
  186. /* LTSSM Capabilities register */
  187. #define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
  188. #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
  189. #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
  190. #define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
  191. (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
  192. CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
  193. enum cdns_pcie_rp_bar {
  194. RP_BAR_UNDEFINED = -1,
  195. RP_BAR0,
  196. RP_BAR1,
  197. RP_NO_BAR
  198. };
  199. #define CDNS_PCIE_RP_MAX_IB 0x3
  200. #define CDNS_PCIE_MAX_OB 32
  201. struct cdns_pcie_rp_ib_bar {
  202. u64 size;
  203. bool free;
  204. };
  205. /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
  206. #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
  207. (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
  208. #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
  209. (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
  210. /* Normal/Vendor specific message access: offset inside some outbound region */
  211. #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
  212. #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
  213. (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
  214. #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
  215. #define CDNS_PCIE_NORMAL_MSG_CODE(code) \
  216. (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
  217. #define CDNS_PCIE_MSG_NO_DATA BIT(16)
  218. struct cdns_pcie;
  219. enum cdns_pcie_msg_code {
  220. MSG_CODE_ASSERT_INTA = 0x20,
  221. MSG_CODE_ASSERT_INTB = 0x21,
  222. MSG_CODE_ASSERT_INTC = 0x22,
  223. MSG_CODE_ASSERT_INTD = 0x23,
  224. MSG_CODE_DEASSERT_INTA = 0x24,
  225. MSG_CODE_DEASSERT_INTB = 0x25,
  226. MSG_CODE_DEASSERT_INTC = 0x26,
  227. MSG_CODE_DEASSERT_INTD = 0x27,
  228. };
  229. enum cdns_pcie_msg_routing {
  230. /* Route to Root Complex */
  231. MSG_ROUTING_TO_RC,
  232. /* Use Address Routing */
  233. MSG_ROUTING_BY_ADDR,
  234. /* Use ID Routing */
  235. MSG_ROUTING_BY_ID,
  236. /* Route as Broadcast Message from Root Complex */
  237. MSG_ROUTING_BCAST,
  238. /* Local message; terminate at receiver (INTx messages) */
  239. MSG_ROUTING_LOCAL,
  240. /* Gather & route to Root Complex (PME_TO_Ack message) */
  241. MSG_ROUTING_GATHER,
  242. };
  243. struct cdns_pcie_ops {
  244. int (*start_link)(struct cdns_pcie *pcie);
  245. void (*stop_link)(struct cdns_pcie *pcie);
  246. bool (*link_up)(struct cdns_pcie *pcie);
  247. u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
  248. };
  249. /**
  250. * struct cdns_pcie - private data for Cadence PCIe controller drivers
  251. * @reg_base: IO mapped register base
  252. * @mem_res: start/end offsets in the physical system memory to map PCI accesses
  253. * @dev: PCIe controller
  254. * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
  255. * @phy_count: number of supported PHY devices
  256. * @phy: list of pointers to specific PHY control blocks
  257. * @link: list of pointers to corresponding device link representations
  258. * @ops: Platform-specific ops to control various inputs from Cadence PCIe
  259. * wrapper
  260. */
  261. struct cdns_pcie {
  262. void __iomem *reg_base;
  263. struct resource *mem_res;
  264. struct device *dev;
  265. bool is_rc;
  266. int phy_count;
  267. struct phy **phy;
  268. struct device_link **link;
  269. const struct cdns_pcie_ops *ops;
  270. };
  271. /**
  272. * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
  273. * @pcie: Cadence PCIe controller
  274. * @cfg_res: start/end offsets in the physical system memory to map PCI
  275. * configuration space accesses
  276. * @cfg_base: IO mapped window to access the PCI configuration space of a
  277. * single function at a time
  278. * @vendor_id: PCI vendor ID
  279. * @device_id: PCI device ID
  280. * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
  281. * available
  282. * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
  283. * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
  284. */
  285. struct cdns_pcie_rc {
  286. struct cdns_pcie pcie;
  287. struct resource *cfg_res;
  288. void __iomem *cfg_base;
  289. u32 vendor_id;
  290. u32 device_id;
  291. bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
  292. unsigned int quirk_retrain_flag:1;
  293. unsigned int quirk_detect_quiet_flag:1;
  294. };
  295. /**
  296. * struct cdns_pcie_epf - Structure to hold info about endpoint function
  297. * @epf: Info about virtual functions attached to the physical function
  298. * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
  299. */
  300. struct cdns_pcie_epf {
  301. struct cdns_pcie_epf *epf;
  302. struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
  303. };
  304. /**
  305. * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
  306. * @pcie: Cadence PCIe controller
  307. * @max_regions: maximum number of regions supported by hardware
  308. * @ob_region_map: bitmask of mapped outbound regions
  309. * @ob_addr: base addresses in the AXI bus where the outbound regions start
  310. * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
  311. * dedicated outbound regions is mapped.
  312. * @irq_cpu_addr: base address in the CPU space where a write access triggers
  313. * the sending of a memory write (MSI) / normal message (INTX
  314. * IRQ) TLP through the PCIe bus.
  315. * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
  316. * dedicated outbound region.
  317. * @irq_pci_fn: the latest PCI function that has updated the mapping of
  318. * the MSI/INTX IRQ dedicated outbound region.
  319. * @irq_pending: bitmask of asserted INTX IRQs.
  320. * @lock: spin lock to disable interrupts while modifying PCIe controller
  321. * registers fields (RMW) accessible by both remote RC and EP to
  322. * minimize time between read and write
  323. * @epf: Structure to hold info about endpoint function
  324. * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
  325. * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
  326. */
  327. struct cdns_pcie_ep {
  328. struct cdns_pcie pcie;
  329. u32 max_regions;
  330. unsigned long ob_region_map;
  331. phys_addr_t *ob_addr;
  332. phys_addr_t irq_phys_addr;
  333. void __iomem *irq_cpu_addr;
  334. u64 irq_pci_addr;
  335. u8 irq_pci_fn;
  336. u8 irq_pending;
  337. /* protect writing to PCI_STATUS while raising INTX interrupts */
  338. spinlock_t lock;
  339. struct cdns_pcie_epf *epf;
  340. unsigned int quirk_detect_quiet_flag:1;
  341. unsigned int quirk_disable_flr:1;
  342. };
  343. /* Register access */
  344. static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
  345. {
  346. writel(value, pcie->reg_base + reg);
  347. }
  348. static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
  349. {
  350. return readl(pcie->reg_base + reg);
  351. }
  352. static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
  353. {
  354. void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
  355. unsigned int offset = (unsigned long)addr & 0x3;
  356. u32 val = readl(aligned_addr);
  357. if (!IS_ALIGNED((uintptr_t)addr, size)) {
  358. pr_warn("Address %p and size %d are not aligned\n", addr, size);
  359. return 0;
  360. }
  361. if (size > 2)
  362. return val;
  363. return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
  364. }
  365. static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
  366. {
  367. void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
  368. unsigned int offset = (unsigned long)addr & 0x3;
  369. u32 mask;
  370. u32 val;
  371. if (!IS_ALIGNED((uintptr_t)addr, size)) {
  372. pr_warn("Address %p and size %d are not aligned\n", addr, size);
  373. return;
  374. }
  375. if (size > 2) {
  376. writel(value, addr);
  377. return;
  378. }
  379. mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
  380. val = readl(aligned_addr) & mask;
  381. val |= value << (offset * 8);
  382. writel(val, aligned_addr);
  383. }
  384. /* Root Port register access */
  385. static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
  386. u32 reg, u8 value)
  387. {
  388. void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
  389. cdns_pcie_write_sz(addr, 0x1, value);
  390. }
  391. static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
  392. u32 reg, u16 value)
  393. {
  394. void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
  395. cdns_pcie_write_sz(addr, 0x2, value);
  396. }
  397. static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
  398. {
  399. void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
  400. return cdns_pcie_read_sz(addr, 0x2);
  401. }
  402. /* Endpoint Function register access */
  403. static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
  404. u32 reg, u8 value)
  405. {
  406. void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
  407. cdns_pcie_write_sz(addr, 0x1, value);
  408. }
  409. static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
  410. u32 reg, u16 value)
  411. {
  412. void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
  413. cdns_pcie_write_sz(addr, 0x2, value);
  414. }
  415. static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
  416. u32 reg, u32 value)
  417. {
  418. writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
  419. }
  420. static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
  421. {
  422. void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
  423. return cdns_pcie_read_sz(addr, 0x2);
  424. }
  425. static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
  426. {
  427. return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
  428. }
  429. static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
  430. {
  431. if (pcie->ops->start_link)
  432. return pcie->ops->start_link(pcie);
  433. return 0;
  434. }
  435. static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
  436. {
  437. if (pcie->ops->stop_link)
  438. pcie->ops->stop_link(pcie);
  439. }
  440. static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
  441. {
  442. if (pcie->ops->link_up)
  443. return pcie->ops->link_up(pcie);
  444. return true;
  445. }
  446. #ifdef CONFIG_PCIE_CADENCE_HOST
  447. int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
  448. int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
  449. int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
  450. void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
  451. int where);
  452. #else
  453. static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
  454. {
  455. return 0;
  456. }
  457. static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
  458. {
  459. return 0;
  460. }
  461. static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
  462. {
  463. return 0;
  464. }
  465. static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
  466. int where)
  467. {
  468. return NULL;
  469. }
  470. #endif
  471. #ifdef CONFIG_PCIE_CADENCE_EP
  472. int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
  473. #else
  474. static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
  475. {
  476. return 0;
  477. }
  478. #endif
  479. void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
  480. void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
  481. u32 r, bool is_io,
  482. u64 cpu_addr, u64 pci_addr, size_t size);
  483. void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
  484. u8 busnr, u8 fn,
  485. u32 r, u64 cpu_addr);
  486. void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
  487. void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
  488. int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
  489. int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
  490. extern const struct dev_pm_ops cdns_pcie_pm_ops;
  491. #endif /* _PCIE_CADENCE_H */