pcie_ecam_synquacer.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SynQuacer PCIE host driver
  4. *
  5. * Based on drivers/pci/pcie_ecam_generic.c
  6. *
  7. * Copyright (C) 2016 Imagination Technologies
  8. * Copyright (C) 2021 Linaro Ltd.
  9. */
  10. #include <common.h>
  11. #include <dm.h>
  12. #include <pci.h>
  13. #include <log.h>
  14. #include <asm/io.h>
  15. #include <linux/bitops.h>
  16. #include <linux/delay.h>
  17. /* iATU registers */
  18. #define IATU_VIEWPORT_OFF 0x900
  19. #define IATU_VIEWPORT_INBOUND BIT(31)
  20. #define IATU_VIEWPORT_OUTBOUND 0
  21. #define IATU_VIEWPORT_REGION_INDEX(idx) ((idx) & 7)
  22. #define IATU_REGION_CTRL_1_OFF_OUTBOUND_0 0x904
  23. #define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM 0x0
  24. #define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO 0x2
  25. #define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0 0x4
  26. #define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1 0x5
  27. #define IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH BIT(12)
  28. #define IATU_REGION_CTRL_2_OFF_OUTBOUND_0 0x908
  29. #define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN BIT(31)
  30. #define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE BIT(28)
  31. #define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT 0xF
  32. #define IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_64BIT 0xFF
  33. #define IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 0x90C
  34. #define IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 0x910
  35. #define IATU_LIMIT_ADDR_OFF_OUTBOUND_0 0x914
  36. #define IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 0x918
  37. #define IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 0x91C
  38. /* Clock and resets */
  39. #define CORE_CONTROL 0x000
  40. #define APP_LTSSM_ENABLE BIT(4)
  41. #define DEVICE_TYPE (BIT(3) | BIT(2) | BIT(1) | BIT(0))
  42. #define AXI_CLK_STOP 0x004
  43. #define DBI_ACLK_STOP BIT(8)
  44. #define SLV_ACLK_STOP BIT(4)
  45. #define MSTR_ACLK_STOP BIT(0)
  46. #define DBI_CSYSREQ_REG BIT(9)
  47. #define SLV_CSYSREQ_REG BIT(5)
  48. #define MSTR_CSYSREQ_REG BIT(1)
  49. #define RESET_CONTROL_1 0x00C
  50. #define PERST_N_O_REG BIT(5)
  51. #define PERST_N_I_REG BIT(4)
  52. #define BUTTON_RST_N_REG BIT(1)
  53. #define PWUP_RST_N_REG BIT(0)
  54. #define RESET_CONTROL_2 0x010
  55. #define RESET_SELECT_1 0x014
  56. #define SQU_RST_SEL BIT(29)
  57. #define PHY_RST_SEL BIT(28)
  58. #define PWR_RST_SEL BIT(24)
  59. #define STI_RST_SEL BIT(20)
  60. #define N_STI_RST_SEL BIT(16)
  61. #define CORE_RST_SEL BIT(12)
  62. #define PERST_SEL BIT(4)
  63. #define BUTTON_RST_SEL BIT(1)
  64. #define PWUP_RST_SEL BIT(0)
  65. #define RESET_SELECT_2 0x018
  66. #define DBI_ARST_SEL BIT(8)
  67. #define SLV_ARST_SEL BIT(4)
  68. #define MSTR_ARST_SEL BIT(0)
  69. #define EM_CONTROL 0x030
  70. #define PRE_DET_STT_REG BIT(4)
  71. #define EM_SELECT 0x034
  72. #define PRE_DET_STT_SEL BIT(4)
  73. #define PM_CONTROL_2 0x050
  74. #define SYS_AUX_PWR_DET BIT(8)
  75. #define PHY_CONFIG_COM_6 0x114
  76. #define PIPE_PORT_SEL GENMASK(1, 0)
  77. #define LINK_MONITOR 0x210
  78. #define SMLH_LINK_UP BIT(0)
  79. #define LINK_CAPABILITIES_REG 0x07C
  80. #define PCIE_CAP_MAX_LINK_WIDTH GENMASK(7, 4)
  81. #define PCIE_CAP_MAX_LINK_SPEED GENMASK(3, 0)
  82. #define LINK_CONTROL_LINK_STATUS_REG 0x080
  83. #define PCIE_CAP_NEGO_LINK_WIDTH GENMASK(23, 20)
  84. #define PCIE_CAP_LINK_SPEED GENMASK(19, 16)
  85. #define TYPE1_CLASS_CODE_REV_ID_REG 0x008
  86. #define BASE_CLASS_CODE 0xFF000000
  87. #define BASE_CLASS_CODE_VALUE 0x06
  88. #define SUBCLASS_CODE 0x00FF0000
  89. #define SUBCLASS_CODE_VALUE 0x04
  90. #define PROGRAM_INTERFACE 0x0000FF00
  91. #define PROGRAM_INTERFACE_VALUE 0x00
  92. #define GEN2_CONTROL_OFF 0x80c
  93. #define DIRECT_SPEED_CHANGE BIT(17)
  94. #define MISC_CONTROL_1_OFF 0x8BC
  95. #define DBI_RO_WR_EN BIT(0)
  96. static void or_writel(void *base, u32 offs, u32 val)
  97. {
  98. writel(readl(base + offs) | val, base + offs);
  99. }
  100. static void masked_writel(void *base, u32 offs, u32 mask, u32 val)
  101. {
  102. u32 data;
  103. int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */
  104. if (val && shift > 1)
  105. val <<= shift - 1;
  106. if (mask != ~0)
  107. data = (readl(base + offs) & ~mask) | val;
  108. else
  109. data = val;
  110. writel(data, base + offs);
  111. }
  112. static u32 masked_readl(void *base, u32 offs, u32 mask)
  113. {
  114. u32 data;
  115. int shift = ffs(mask); /* Note that ffs() returns 1 for 0x1 */
  116. data = readl(base + offs);
  117. if (mask != ~0)
  118. data &= mask;
  119. if (shift > 1)
  120. data >>= shift - 1;
  121. return data;
  122. }
  123. /*
  124. * Since SynQuacer's PCIe RC is expected to be initialized in the
  125. * firmware (including U-Boot), devicetree doesn't have control
  126. * blocks.
  127. *
  128. * Thus, this will initialize the PCIe RC with fixed addresses.
  129. */
  130. #define SYNQUACER_PCI_SEG0_CONFIG_BASE 0x60000000
  131. #define SYNQUACER_PCI_SEG0_CONFIG_SIZE 0x07f00000
  132. #define SYNQUACER_PCI_SEG0_DBI_BASE 0x583d0000
  133. #define SYNQUACER_PCI_SEG0_EXS_BASE 0x58390000
  134. #define SYNQUACER_PCI_SEG1_CONFIG_BASE 0x70000000
  135. #define SYNQUACER_PCI_SEG1_CONFIG_SIZE 0x07f00000
  136. #define SYNQUACER_PCI_SEG1_DBI_BASE 0x583c0000
  137. #define SYNQUACER_PCI_SEG1_EXS_BASE 0x58380000
  138. #define SIZE_16KB 0x00004000
  139. #define SIZE_64KB 0x00010000
  140. #define SIZE_1MB 0x00100000
  141. #define SYNQUACER_PCI_DBI_SIZE SIZE_16KB
  142. #define SYNQUACER_PCI_EXS_SIZE SIZE_64KB
  143. #define NUM_SQ_PCI_RC 2
  144. static const struct synquacer_pcie_base {
  145. phys_addr_t cfg_base;
  146. phys_addr_t dbi_base;
  147. phys_addr_t exs_base;
  148. } synquacer_pci_bases[NUM_SQ_PCI_RC] = {
  149. {
  150. .cfg_base = SYNQUACER_PCI_SEG0_CONFIG_BASE,
  151. .dbi_base = SYNQUACER_PCI_SEG0_DBI_BASE,
  152. .exs_base = SYNQUACER_PCI_SEG0_EXS_BASE,
  153. }, {
  154. .cfg_base = SYNQUACER_PCI_SEG1_CONFIG_BASE,
  155. .dbi_base = SYNQUACER_PCI_SEG1_DBI_BASE,
  156. .exs_base = SYNQUACER_PCI_SEG1_EXS_BASE,
  157. },
  158. };
  159. /**
  160. * struct synquacer_ecam_pcie - synquacer_ecam PCIe controller state
  161. * @cfg_base: The base address of memory mapped configuration space
  162. */
  163. struct synquacer_ecam_pcie {
  164. void *cfg_base;
  165. pci_size_t size;
  166. void *dbi_base;
  167. void *exs_base;
  168. int first_busno;
  169. struct pci_region mem;
  170. struct pci_region io;
  171. struct pci_region mem64;
  172. };
  173. DECLARE_GLOBAL_DATA_PTR;
  174. /**
  175. * pci_synquacer_ecam_conf_address() - Calculate the address of a config access
  176. * @bus: Pointer to the PCI bus
  177. * @bdf: Identifies the PCIe device to access
  178. * @offset: The offset into the device's configuration space
  179. * @paddress: Pointer to the pointer to write the calculates address to
  180. *
  181. * Calculates the address that should be accessed to perform a PCIe
  182. * configuration space access for a given device identified by the PCIe
  183. * controller device @pcie and the bus, device & function numbers in @bdf. If
  184. * access to the device is not valid then the function will return an error
  185. * code. Otherwise the address to access will be written to the pointer pointed
  186. * to by @paddress.
  187. */
  188. static int pci_synquacer_ecam_conf_address(const struct udevice *bus,
  189. pci_dev_t bdf, uint offset,
  190. void **paddress)
  191. {
  192. struct synquacer_ecam_pcie *pcie = dev_get_priv(bus);
  193. void *addr;
  194. addr = pcie->cfg_base;
  195. addr += PCIE_ECAM_OFFSET(PCI_BUS(bdf) - pcie->first_busno,
  196. PCI_DEV(bdf), PCI_FUNC(bdf), offset);
  197. *paddress = addr;
  198. return 0;
  199. }
  200. static bool pci_synquacer_ecam_addr_valid(const struct udevice *bus,
  201. pci_dev_t bdf)
  202. {
  203. struct synquacer_ecam_pcie *pcie = dev_get_priv(bus);
  204. int num_buses = DIV_ROUND_UP(pcie->size, 1 << 16);
  205. /*
  206. * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
  207. * type 0 config TLPs sent to devices 1 and up on its downstream port,
  208. * resulting in devices appearing multiple times on bus 0 unless we
  209. * filter out those accesses here.
  210. */
  211. if (PCI_BUS(bdf) == pcie->first_busno && PCI_DEV(bdf) > 0)
  212. return false;
  213. return (PCI_BUS(bdf) >= pcie->first_busno &&
  214. PCI_BUS(bdf) < pcie->first_busno + num_buses);
  215. }
  216. /**
  217. * pci_synquacer_ecam_read_config() - Read from configuration space
  218. * @bus: Pointer to the PCI bus
  219. * @bdf: Identifies the PCIe device to access
  220. * @offset: The offset into the device's configuration space
  221. * @valuep: A pointer at which to store the read value
  222. * @size: Indicates the size of access to perform
  223. *
  224. * Read a value of size @size from offset @offset within the configuration
  225. * space of the device identified by the bus, device & function numbers in @bdf
  226. * on the PCI bus @bus.
  227. */
  228. static int pci_synquacer_ecam_read_config(const struct udevice *bus,
  229. pci_dev_t bdf, uint offset,
  230. ulong *valuep, enum pci_size_t size)
  231. {
  232. if (!pci_synquacer_ecam_addr_valid(bus, bdf)) {
  233. *valuep = pci_get_ff(size);
  234. return 0;
  235. }
  236. return pci_generic_mmap_read_config(bus, pci_synquacer_ecam_conf_address,
  237. bdf, offset, valuep, size);
  238. }
  239. /**
  240. * pci_synquacer_ecam_write_config() - Write to configuration space
  241. * @bus: Pointer to the PCI bus
  242. * @bdf: Identifies the PCIe device to access
  243. * @offset: The offset into the device's configuration space
  244. * @value: The value to write
  245. * @size: Indicates the size of access to perform
  246. *
  247. * Write the value @value of size @size from offset @offset within the
  248. * configuration space of the device identified by the bus, device & function
  249. * numbers in @bdf on the PCI bus @bus.
  250. */
  251. static int pci_synquacer_ecam_write_config(struct udevice *bus, pci_dev_t bdf,
  252. uint offset, ulong value,
  253. enum pci_size_t size)
  254. {
  255. if (!pci_synquacer_ecam_addr_valid(bus, bdf))
  256. return 0;
  257. return pci_generic_mmap_write_config(bus, pci_synquacer_ecam_conf_address,
  258. bdf, offset, value, size);
  259. }
  260. /**
  261. * pci_synquacer_ecam_of_to_plat() - Translate from DT to device state
  262. * @dev: A pointer to the device being operated on
  263. *
  264. * Translate relevant data from the device tree pertaining to device @dev into
  265. * state that the driver will later make use of. This state is stored in the
  266. * device's private data structure.
  267. *
  268. * Return: 0 on success, else -EINVAL
  269. */
  270. static int pci_synquacer_ecam_of_to_plat(struct udevice *dev)
  271. {
  272. struct synquacer_ecam_pcie *pcie = dev_get_priv(dev);
  273. struct fdt_resource reg_res;
  274. int i, err;
  275. debug("%s: called for %s\n", __func__, dev->name);
  276. err = fdt_get_resource(gd->fdt_blob, dev_of_offset(dev), "reg",
  277. 0, &reg_res);
  278. if (err < 0) {
  279. pr_err("\"reg\" resource not found\n");
  280. return err;
  281. }
  282. /* Find the correct pair of the DBI/EXS base address */
  283. for (i = 0; i < NUM_SQ_PCI_RC; i++) {
  284. if (synquacer_pci_bases[i].cfg_base == reg_res.start)
  285. break;
  286. }
  287. if (i == NUM_SQ_PCI_RC) {
  288. pr_err("Unknown ECAM base address %lx.\n",
  289. (unsigned long)reg_res.start);
  290. return -ENOENT;
  291. }
  292. pcie->dbi_base = map_physmem(synquacer_pci_bases[i].dbi_base,
  293. SYNQUACER_PCI_DBI_SIZE, MAP_NOCACHE);
  294. if (!pcie->dbi_base) {
  295. pr_err("Failed to map DBI for %s\n", dev->name);
  296. return -ENOMEM;
  297. }
  298. pcie->exs_base = map_physmem(synquacer_pci_bases[i].exs_base,
  299. SYNQUACER_PCI_EXS_SIZE, MAP_NOCACHE);
  300. if (!pcie->exs_base) {
  301. pr_err("Failed to map EXS for %s\n", dev->name);
  302. return -ENOMEM;
  303. }
  304. pcie->size = fdt_resource_size(&reg_res);
  305. pcie->cfg_base = map_physmem(reg_res.start, pcie->size, MAP_NOCACHE);
  306. if (!pcie->cfg_base) {
  307. pr_err("Failed to map config space for %s\n", dev->name);
  308. return -ENOMEM;
  309. }
  310. debug("mappings DBI: %p EXS: %p CFG: %p\n", pcie->dbi_base, pcie->exs_base, pcie->cfg_base);
  311. return 0;
  312. }
  313. static void pci_synquacer_pre_init(struct synquacer_ecam_pcie *pcie)
  314. {
  315. void *base = pcie->exs_base;
  316. masked_writel(base, EM_SELECT, PRE_DET_STT_SEL, 0);
  317. masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 0);
  318. masked_writel(base, EM_CONTROL, PRE_DET_STT_REG, 1);
  319. /* 1: Assert all PHY / LINK resets */
  320. masked_writel(base, RESET_SELECT_1, PERST_SEL, 0);
  321. masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 0);
  322. masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 0);
  323. /* Device Reset(PERST#) is effective afrer Set device_type (RC) */
  324. masked_writel(base, RESET_SELECT_1, PWUP_RST_SEL, 0);
  325. masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 0);
  326. masked_writel(base, RESET_SELECT_1, BUTTON_RST_SEL, 0);
  327. masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 0);
  328. masked_writel(base, RESET_SELECT_1, PWR_RST_SEL, 1);
  329. masked_writel(base, RESET_SELECT_2, MSTR_ARST_SEL, 1);
  330. masked_writel(base, RESET_SELECT_2, SLV_ARST_SEL, 1);
  331. masked_writel(base, RESET_SELECT_2, DBI_ARST_SEL, 1);
  332. masked_writel(base, RESET_SELECT_1, CORE_RST_SEL, 1);
  333. masked_writel(base, RESET_SELECT_1, STI_RST_SEL, 1);
  334. masked_writel(base, RESET_SELECT_1, N_STI_RST_SEL, 1);
  335. masked_writel(base, RESET_SELECT_1, SQU_RST_SEL, 1);
  336. masked_writel(base, RESET_SELECT_1, PHY_RST_SEL, 1);
  337. /* 2: Set P<n>_app_ltssm_enable='0' for reprogramming before linkup. */
  338. masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 0);
  339. /* 3: Set device_type (RC) */
  340. masked_writel(base, CORE_CONTROL, DEVICE_TYPE, 4);
  341. }
  342. static void pci_synquacer_dbi_init(void *dbi_base)
  343. {
  344. masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 1);
  345. /* 4 Lanes */
  346. masked_writel(dbi_base, LINK_CAPABILITIES_REG,
  347. PCIE_CAP_MAX_LINK_WIDTH, 4);
  348. /* Gen 2 */
  349. masked_writel(dbi_base, LINK_CAPABILITIES_REG,
  350. PCIE_CAP_MAX_LINK_SPEED, 2);
  351. masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
  352. BASE_CLASS_CODE, BASE_CLASS_CODE_VALUE);
  353. masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
  354. SUBCLASS_CODE, SUBCLASS_CODE_VALUE);
  355. masked_writel(dbi_base, TYPE1_CLASS_CODE_REV_ID_REG,
  356. PROGRAM_INTERFACE, PROGRAM_INTERFACE_VALUE);
  357. masked_writel(dbi_base, MISC_CONTROL_1_OFF, DBI_RO_WR_EN, 0);
  358. }
  359. static void pcie_sq_prog_outbound_atu(void *dbi_base, int index,
  360. u64 cpu_base, u64 pci_base, u64 size,
  361. u32 type, u32 flags)
  362. {
  363. debug("%s: %p, %d, %llx, %llx, %llx, %x, %x\n", __func__,
  364. dbi_base, index, cpu_base, pci_base, size, type, flags);
  365. writel(IATU_VIEWPORT_OUTBOUND | IATU_VIEWPORT_REGION_INDEX(index),
  366. dbi_base + IATU_VIEWPORT_OFF);
  367. writel((u32)(cpu_base & 0xffffffff),
  368. dbi_base + IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0);
  369. writel((u32)(cpu_base >> 32),
  370. dbi_base + IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0);
  371. writel((u32)(cpu_base + size - 1),
  372. dbi_base + IATU_LIMIT_ADDR_OFF_OUTBOUND_0);
  373. writel((u32)(pci_base & 0xffffffff),
  374. dbi_base + IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0);
  375. writel((u32)(pci_base >> 32),
  376. dbi_base + IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0);
  377. writel(type, dbi_base + IATU_REGION_CTRL_1_OFF_OUTBOUND_0);
  378. writel(IATU_REGION_CTRL_2_OFF_OUTBOUND_0_REGION_EN | flags,
  379. dbi_base + IATU_REGION_CTRL_2_OFF_OUTBOUND_0);
  380. }
  381. static void pci_synquacer_post_init(struct synquacer_ecam_pcie *pcie)
  382. {
  383. void *base = pcie->exs_base;
  384. /*
  385. * 4: Set Bifurcation 1=disable 4=able
  386. * 5: Supply Reference (It has executed)
  387. * 6: Wait for 10usec (Reference Clocks is stable)
  388. * 7: De assert PERST#
  389. */
  390. masked_writel(base, RESET_CONTROL_1, PERST_N_I_REG, 1);
  391. masked_writel(base, RESET_CONTROL_1, PERST_N_O_REG, 1);
  392. /* 8: Assert SYS_AUX_PWR_DET */
  393. masked_writel(base, PM_CONTROL_2, SYS_AUX_PWR_DET, 1);
  394. /* 9: Supply following clocks */
  395. masked_writel(base, AXI_CLK_STOP, MSTR_CSYSREQ_REG, 1);
  396. masked_writel(base, AXI_CLK_STOP, MSTR_ACLK_STOP, 0);
  397. masked_writel(base, AXI_CLK_STOP, SLV_CSYSREQ_REG, 1);
  398. masked_writel(base, AXI_CLK_STOP, SLV_ACLK_STOP, 0);
  399. masked_writel(base, AXI_CLK_STOP, DBI_CSYSREQ_REG, 1);
  400. masked_writel(base, AXI_CLK_STOP, DBI_ACLK_STOP, 0);
  401. /*
  402. * 10: De assert PHY reset
  403. * 11: De assert LINK's PMC reset
  404. */
  405. masked_writel(base, RESET_CONTROL_1, PWUP_RST_N_REG, 1);
  406. masked_writel(base, RESET_CONTROL_1, BUTTON_RST_N_REG, 1);
  407. /* 12: PHY auto
  408. * 13: Wrapper auto
  409. * 14-17: PHY auto
  410. * 18: Wrapper auto
  411. * 19: Update registers through DBI AXI Slave interface
  412. */
  413. pci_synquacer_dbi_init(pcie->dbi_base);
  414. or_writel(pcie->dbi_base, PCI_COMMAND,
  415. PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  416. /* Force link speed change to Gen2 at link up */
  417. or_writel(pcie->dbi_base, GEN2_CONTROL_OFF, DIRECT_SPEED_CHANGE);
  418. /* Region 0: MMIO32 range */
  419. pcie_sq_prog_outbound_atu(pcie->dbi_base, 0,
  420. pcie->mem.phys_start,
  421. pcie->mem.bus_start,
  422. pcie->mem.size,
  423. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM |
  424. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH,
  425. IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT);
  426. /* Region 1: Type 0 config space */
  427. pcie_sq_prog_outbound_atu(pcie->dbi_base, 1,
  428. (u64)pcie->cfg_base,
  429. 0,
  430. SIZE_64KB,
  431. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG0,
  432. IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE);
  433. /* Region 2: Type 1 config space */
  434. pcie_sq_prog_outbound_atu(pcie->dbi_base, 2,
  435. (u64)pcie->cfg_base + SIZE_64KB,
  436. 0,
  437. (u64)pcie->io.phys_start - (u64)pcie->cfg_base - SIZE_64KB,
  438. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_CFG1,
  439. IATU_REGION_CTRL_2_OFF_OUTBOUND_0_CFG_SHIFT_MODE);
  440. /* Region 3: port I/O range */
  441. pcie_sq_prog_outbound_atu(pcie->dbi_base, 3,
  442. pcie->io.phys_start,
  443. pcie->io.bus_start,
  444. pcie->io.size,
  445. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_IO,
  446. 0);
  447. /* Region 4: MMIO64 range */
  448. pcie_sq_prog_outbound_atu(pcie->dbi_base, 4,
  449. pcie->mem64.phys_start,
  450. pcie->mem64.bus_start,
  451. pcie->mem64.size,
  452. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TYPE_MEM |
  453. IATU_REGION_CTRL_1_OFF_OUTBOUND_0_TH,
  454. IATU_REGION_CTRL_2_OFF_OUTBOUND_0_MSG_CODE_32BIT);
  455. /* enable link */
  456. if (masked_readl(base, CORE_CONTROL, APP_LTSSM_ENABLE) == 0)
  457. masked_writel(base, CORE_CONTROL, APP_LTSSM_ENABLE, 1);
  458. }
  459. static int pci_synquacer_ecam_probe(struct udevice *dev)
  460. {
  461. struct synquacer_ecam_pcie *pcie = dev_get_priv(dev);
  462. struct udevice *ctlr = pci_get_controller(dev);
  463. struct pci_controller *hose = dev_get_uclass_priv(ctlr);
  464. debug("Probe synquacer pcie for bus %d\n", dev_seq(dev));
  465. pcie->first_busno = dev_seq(dev);
  466. /* Store the IO and MEM windows settings for configuring ATU */
  467. pcie->io.phys_start = hose->regions[0].phys_start; /* IO base */
  468. pcie->io.bus_start = hose->regions[0].bus_start; /* IO_bus_addr */
  469. pcie->io.size = hose->regions[0].size; /* IO size */
  470. pcie->mem.phys_start = hose->regions[1].phys_start; /* MEM base */
  471. pcie->mem.bus_start = hose->regions[1].bus_start; /* MEM_bus_addr */
  472. pcie->mem.size = hose->regions[1].size; /* MEM size */
  473. pcie->mem64.phys_start = hose->regions[2].phys_start; /* MEM64 base */
  474. pcie->mem64.bus_start = hose->regions[2].bus_start; /* MEM64_bus_addr */
  475. pcie->mem64.size = hose->regions[2].size; /* MEM64 size */
  476. pci_synquacer_pre_init(pcie);
  477. mdelay(150);
  478. pci_synquacer_post_init(pcie);
  479. /* It takes a while to stabilize the PCIe bus for scanning */
  480. mdelay(100);
  481. return 0;
  482. }
  483. static const struct dm_pci_ops pci_synquacer_ecam_ops = {
  484. .read_config = pci_synquacer_ecam_read_config,
  485. .write_config = pci_synquacer_ecam_write_config,
  486. };
  487. static const struct udevice_id pci_synquacer_ecam_ids[] = {
  488. { .compatible = "socionext,synquacer-pcie-ecam" },
  489. { }
  490. };
  491. U_BOOT_DRIVER(pci_synquacer_ecam) = {
  492. .name = "pci_synquacer_ecam",
  493. .id = UCLASS_PCI,
  494. .of_match = pci_synquacer_ecam_ids,
  495. .ops = &pci_synquacer_ecam_ops,
  496. .probe = pci_synquacer_ecam_probe,
  497. .of_to_plat = pci_synquacer_ecam_of_to_plat,
  498. .priv_auto = sizeof(struct synquacer_ecam_pcie),
  499. };