pci-uclass.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) 2014 Google, Inc
  4. * Written by Simon Glass <sjg@chromium.org>
  5. */
  6. #define LOG_CATEGORY UCLASS_PCI
  7. #include <common.h>
  8. #include <dm.h>
  9. #include <errno.h>
  10. #include <init.h>
  11. #include <log.h>
  12. #include <malloc.h>
  13. #include <pci.h>
  14. #include <spl.h>
  15. #include <asm/global_data.h>
  16. #include <asm/io.h>
  17. #include <dm/device-internal.h>
  18. #include <dm/lists.h>
  19. #include <dm/uclass-internal.h>
  20. #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
  21. #include <asm/fsp/fsp_support.h>
  22. #endif
  23. #include <dt-bindings/pci/pci.h>
  24. #include <linux/delay.h>
  25. #include "pci_internal.h"
  26. DECLARE_GLOBAL_DATA_PTR;
  27. int pci_get_bus(int busnum, struct udevice **busp)
  28. {
  29. int ret;
  30. ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
  31. /* Since buses may not be numbered yet try a little harder with bus 0 */
  32. if (ret == -ENODEV) {
  33. ret = uclass_first_device_err(UCLASS_PCI, busp);
  34. if (ret)
  35. return ret;
  36. ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
  37. }
  38. return ret;
  39. }
  40. struct udevice *pci_get_controller(struct udevice *dev)
  41. {
  42. while (device_is_on_pci_bus(dev))
  43. dev = dev->parent;
  44. return dev;
  45. }
  46. pci_dev_t dm_pci_get_bdf(const struct udevice *dev)
  47. {
  48. struct pci_child_plat *pplat = dev_get_parent_plat(dev);
  49. struct udevice *bus = dev->parent;
  50. /*
  51. * This error indicates that @dev is a device on an unprobed PCI bus.
  52. * The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below
  53. * will produce a bad BDF>
  54. *
  55. * A common cause of this problem is that this function is called in the
  56. * of_to_plat() method of @dev. Accessing the PCI bus in that
  57. * method is not allowed, since it has not yet been probed. To fix this,
  58. * move that access to the probe() method of @dev instead.
  59. */
  60. if (!device_active(bus))
  61. log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name,
  62. bus->name);
  63. return PCI_ADD_BUS(dev_seq(bus), pplat->devfn);
  64. }
  65. /**
  66. * pci_get_bus_max() - returns the bus number of the last active bus
  67. *
  68. * Return: last bus number, or -1 if no active buses
  69. */
  70. static int pci_get_bus_max(void)
  71. {
  72. struct udevice *bus;
  73. struct uclass *uc;
  74. int ret = -1;
  75. ret = uclass_get(UCLASS_PCI, &uc);
  76. uclass_foreach_dev(bus, uc) {
  77. if (dev_seq(bus) > ret)
  78. ret = dev_seq(bus);
  79. }
  80. debug("%s: ret=%d\n", __func__, ret);
  81. return ret;
  82. }
  83. int pci_last_busno(void)
  84. {
  85. return pci_get_bus_max();
  86. }
  87. int pci_get_ff(enum pci_size_t size)
  88. {
  89. switch (size) {
  90. case PCI_SIZE_8:
  91. return 0xff;
  92. case PCI_SIZE_16:
  93. return 0xffff;
  94. default:
  95. return 0xffffffff;
  96. }
  97. }
  98. static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf,
  99. ofnode *rnode)
  100. {
  101. struct fdt_pci_addr addr;
  102. ofnode node;
  103. int ret;
  104. dev_for_each_subnode(node, bus) {
  105. ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg",
  106. &addr);
  107. if (ret)
  108. continue;
  109. if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf))
  110. continue;
  111. *rnode = node;
  112. break;
  113. }
  114. };
  115. int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn,
  116. struct udevice **devp)
  117. {
  118. struct udevice *dev;
  119. for (device_find_first_child(bus, &dev);
  120. dev;
  121. device_find_next_child(&dev)) {
  122. struct pci_child_plat *pplat;
  123. pplat = dev_get_parent_plat(dev);
  124. if (pplat && pplat->devfn == find_devfn) {
  125. *devp = dev;
  126. return 0;
  127. }
  128. }
  129. return -ENODEV;
  130. }
  131. int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
  132. {
  133. struct udevice *bus;
  134. int ret;
  135. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  136. if (ret)
  137. return ret;
  138. return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
  139. }
  140. static int pci_device_matches_ids(struct udevice *dev,
  141. const struct pci_device_id *ids)
  142. {
  143. struct pci_child_plat *pplat;
  144. int i;
  145. pplat = dev_get_parent_plat(dev);
  146. if (!pplat)
  147. return -EINVAL;
  148. for (i = 0; ids[i].vendor != 0; i++) {
  149. if (pplat->vendor == ids[i].vendor &&
  150. pplat->device == ids[i].device)
  151. return i;
  152. }
  153. return -EINVAL;
  154. }
  155. int pci_bus_find_devices(struct udevice *bus, const struct pci_device_id *ids,
  156. int *indexp, struct udevice **devp)
  157. {
  158. struct udevice *dev;
  159. /* Scan all devices on this bus */
  160. for (device_find_first_child(bus, &dev);
  161. dev;
  162. device_find_next_child(&dev)) {
  163. if (pci_device_matches_ids(dev, ids) >= 0) {
  164. if ((*indexp)-- <= 0) {
  165. *devp = dev;
  166. return 0;
  167. }
  168. }
  169. }
  170. return -ENODEV;
  171. }
  172. int pci_find_device_id(const struct pci_device_id *ids, int index,
  173. struct udevice **devp)
  174. {
  175. struct udevice *bus;
  176. /* Scan all known buses */
  177. for (uclass_first_device(UCLASS_PCI, &bus);
  178. bus;
  179. uclass_next_device(&bus)) {
  180. if (!pci_bus_find_devices(bus, ids, &index, devp))
  181. return 0;
  182. }
  183. *devp = NULL;
  184. return -ENODEV;
  185. }
  186. static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
  187. unsigned int device, int *indexp,
  188. struct udevice **devp)
  189. {
  190. struct pci_child_plat *pplat;
  191. struct udevice *dev;
  192. for (device_find_first_child(bus, &dev);
  193. dev;
  194. device_find_next_child(&dev)) {
  195. pplat = dev_get_parent_plat(dev);
  196. if (pplat->vendor == vendor && pplat->device == device) {
  197. if (!(*indexp)--) {
  198. *devp = dev;
  199. return 0;
  200. }
  201. }
  202. }
  203. return -ENODEV;
  204. }
  205. int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
  206. struct udevice **devp)
  207. {
  208. struct udevice *bus;
  209. /* Scan all known buses */
  210. for (uclass_first_device(UCLASS_PCI, &bus);
  211. bus;
  212. uclass_next_device(&bus)) {
  213. if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
  214. return device_probe(*devp);
  215. }
  216. *devp = NULL;
  217. return -ENODEV;
  218. }
  219. int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
  220. {
  221. struct udevice *dev;
  222. /* Scan all known buses */
  223. for (pci_find_first_device(&dev);
  224. dev;
  225. pci_find_next_device(&dev)) {
  226. struct pci_child_plat *pplat = dev_get_parent_plat(dev);
  227. if (pplat->class == find_class && !index--) {
  228. *devp = dev;
  229. return device_probe(*devp);
  230. }
  231. }
  232. *devp = NULL;
  233. return -ENODEV;
  234. }
  235. int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
  236. unsigned long value, enum pci_size_t size)
  237. {
  238. struct dm_pci_ops *ops;
  239. ops = pci_get_ops(bus);
  240. if (!ops->write_config)
  241. return -ENOSYS;
  242. if (offset < 0 || offset >= 4096)
  243. return -EINVAL;
  244. return ops->write_config(bus, bdf, offset, value, size);
  245. }
  246. int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
  247. u32 clr, u32 set)
  248. {
  249. ulong val;
  250. int ret;
  251. ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
  252. if (ret)
  253. return ret;
  254. val &= ~clr;
  255. val |= set;
  256. return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
  257. }
  258. static int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
  259. enum pci_size_t size)
  260. {
  261. struct udevice *bus;
  262. int ret;
  263. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  264. if (ret)
  265. return ret;
  266. return pci_bus_write_config(bus, bdf, offset, value, size);
  267. }
  268. int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
  269. enum pci_size_t size)
  270. {
  271. struct udevice *bus;
  272. for (bus = dev; device_is_on_pci_bus(bus);)
  273. bus = bus->parent;
  274. return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
  275. size);
  276. }
  277. int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
  278. {
  279. return pci_write_config(bdf, offset, value, PCI_SIZE_32);
  280. }
  281. int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
  282. {
  283. return pci_write_config(bdf, offset, value, PCI_SIZE_16);
  284. }
  285. int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
  286. {
  287. return pci_write_config(bdf, offset, value, PCI_SIZE_8);
  288. }
  289. int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
  290. {
  291. return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
  292. }
  293. int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
  294. {
  295. return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
  296. }
  297. int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
  298. {
  299. return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
  300. }
  301. int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset,
  302. unsigned long *valuep, enum pci_size_t size)
  303. {
  304. struct dm_pci_ops *ops;
  305. ops = pci_get_ops(bus);
  306. if (!ops->read_config) {
  307. *valuep = pci_conv_32_to_size(~0, offset, size);
  308. return -ENOSYS;
  309. }
  310. if (offset < 0 || offset >= 4096) {
  311. *valuep = pci_conv_32_to_size(0, offset, size);
  312. return -EINVAL;
  313. }
  314. return ops->read_config(bus, bdf, offset, valuep, size);
  315. }
  316. static int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
  317. enum pci_size_t size)
  318. {
  319. struct udevice *bus;
  320. int ret;
  321. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  322. if (ret)
  323. return ret;
  324. return pci_bus_read_config(bus, bdf, offset, valuep, size);
  325. }
  326. int dm_pci_read_config(const struct udevice *dev, int offset,
  327. unsigned long *valuep, enum pci_size_t size)
  328. {
  329. const struct udevice *bus;
  330. for (bus = dev; device_is_on_pci_bus(bus);)
  331. bus = bus->parent;
  332. return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
  333. size);
  334. }
  335. int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
  336. {
  337. unsigned long value;
  338. int ret;
  339. ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
  340. if (ret)
  341. return ret;
  342. *valuep = value;
  343. return 0;
  344. }
  345. int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
  346. {
  347. unsigned long value;
  348. int ret;
  349. ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
  350. if (ret)
  351. return ret;
  352. *valuep = value;
  353. return 0;
  354. }
  355. int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
  356. {
  357. unsigned long value;
  358. int ret;
  359. ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
  360. if (ret)
  361. return ret;
  362. *valuep = value;
  363. return 0;
  364. }
  365. int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep)
  366. {
  367. unsigned long value;
  368. int ret;
  369. ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
  370. if (ret)
  371. return ret;
  372. *valuep = value;
  373. return 0;
  374. }
  375. int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep)
  376. {
  377. unsigned long value;
  378. int ret;
  379. ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
  380. if (ret)
  381. return ret;
  382. *valuep = value;
  383. return 0;
  384. }
  385. int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep)
  386. {
  387. unsigned long value;
  388. int ret;
  389. ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
  390. if (ret)
  391. return ret;
  392. *valuep = value;
  393. return 0;
  394. }
  395. int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
  396. {
  397. u8 val;
  398. int ret;
  399. ret = dm_pci_read_config8(dev, offset, &val);
  400. if (ret)
  401. return ret;
  402. val &= ~clr;
  403. val |= set;
  404. return dm_pci_write_config8(dev, offset, val);
  405. }
  406. int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
  407. {
  408. u16 val;
  409. int ret;
  410. ret = dm_pci_read_config16(dev, offset, &val);
  411. if (ret)
  412. return ret;
  413. val &= ~clr;
  414. val |= set;
  415. return dm_pci_write_config16(dev, offset, val);
  416. }
  417. int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
  418. {
  419. u32 val;
  420. int ret;
  421. ret = dm_pci_read_config32(dev, offset, &val);
  422. if (ret)
  423. return ret;
  424. val &= ~clr;
  425. val |= set;
  426. return dm_pci_write_config32(dev, offset, val);
  427. }
  428. static void set_vga_bridge_bits(struct udevice *dev)
  429. {
  430. struct udevice *parent = dev->parent;
  431. u16 bc;
  432. while (dev_seq(parent) != 0) {
  433. dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
  434. bc |= PCI_BRIDGE_CTL_VGA;
  435. dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
  436. parent = parent->parent;
  437. }
  438. }
  439. int pci_auto_config_devices(struct udevice *bus)
  440. {
  441. struct pci_controller *hose = dev_get_uclass_priv(bus);
  442. struct pci_child_plat *pplat;
  443. unsigned int sub_bus;
  444. struct udevice *dev;
  445. int ret;
  446. sub_bus = dev_seq(bus);
  447. debug("%s: start\n", __func__);
  448. pciauto_config_init(hose);
  449. for (ret = device_find_first_child(bus, &dev);
  450. !ret && dev;
  451. ret = device_find_next_child(&dev)) {
  452. unsigned int max_bus;
  453. int ret;
  454. debug("%s: device %s\n", __func__, dev->name);
  455. if (dev_has_ofnode(dev) &&
  456. dev_read_bool(dev, "pci,no-autoconfig"))
  457. continue;
  458. ret = dm_pciauto_config_device(dev);
  459. if (ret < 0)
  460. return log_msg_ret("auto", ret);
  461. max_bus = ret;
  462. sub_bus = max(sub_bus, max_bus);
  463. if (dev_get_parent(dev) == bus)
  464. continue;
  465. pplat = dev_get_parent_plat(dev);
  466. if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
  467. set_vga_bridge_bits(dev);
  468. }
  469. if (hose->last_busno < sub_bus)
  470. hose->last_busno = sub_bus;
  471. debug("%s: done\n", __func__);
  472. return log_msg_ret("sub", sub_bus);
  473. }
  474. int pci_generic_mmap_write_config(
  475. const struct udevice *bus,
  476. int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
  477. void **addrp),
  478. pci_dev_t bdf,
  479. uint offset,
  480. ulong value,
  481. enum pci_size_t size)
  482. {
  483. void *address;
  484. if (addr_f(bus, bdf, offset, &address) < 0)
  485. return 0;
  486. switch (size) {
  487. case PCI_SIZE_8:
  488. writeb(value, address);
  489. return 0;
  490. case PCI_SIZE_16:
  491. writew(value, address);
  492. return 0;
  493. case PCI_SIZE_32:
  494. writel(value, address);
  495. return 0;
  496. default:
  497. return -EINVAL;
  498. }
  499. }
  500. int pci_generic_mmap_read_config(
  501. const struct udevice *bus,
  502. int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
  503. void **addrp),
  504. pci_dev_t bdf,
  505. uint offset,
  506. ulong *valuep,
  507. enum pci_size_t size)
  508. {
  509. void *address;
  510. if (addr_f(bus, bdf, offset, &address) < 0) {
  511. *valuep = pci_get_ff(size);
  512. return 0;
  513. }
  514. switch (size) {
  515. case PCI_SIZE_8:
  516. *valuep = readb(address);
  517. return 0;
  518. case PCI_SIZE_16:
  519. *valuep = readw(address);
  520. return 0;
  521. case PCI_SIZE_32:
  522. *valuep = readl(address);
  523. return 0;
  524. default:
  525. return -EINVAL;
  526. }
  527. }
  528. int dm_pci_hose_probe_bus(struct udevice *bus)
  529. {
  530. u8 header_type;
  531. int sub_bus;
  532. int ret;
  533. int ea_pos;
  534. u8 reg;
  535. debug("%s\n", __func__);
  536. dm_pci_read_config8(bus, PCI_HEADER_TYPE, &header_type);
  537. header_type &= 0x7f;
  538. if (header_type != PCI_HEADER_TYPE_BRIDGE) {
  539. debug("%s: Skipping PCI device %d with Non-Bridge Header Type 0x%x\n",
  540. __func__, PCI_DEV(dm_pci_get_bdf(bus)), header_type);
  541. return log_msg_ret("probe", -EINVAL);
  542. }
  543. if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
  544. ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
  545. else
  546. ea_pos = 0;
  547. if (ea_pos) {
  548. dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
  549. &reg);
  550. sub_bus = reg;
  551. } else {
  552. sub_bus = pci_get_bus_max() + 1;
  553. }
  554. debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
  555. dm_pciauto_prescan_setup_bridge(bus, sub_bus);
  556. ret = device_probe(bus);
  557. if (ret) {
  558. debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
  559. ret);
  560. return log_msg_ret("probe", ret);
  561. }
  562. if (!ea_pos)
  563. sub_bus = pci_get_bus_max();
  564. dm_pciauto_postscan_setup_bridge(bus, sub_bus);
  565. return sub_bus;
  566. }
  567. /**
  568. * pci_match_one_device - Tell if a PCI device structure has a matching
  569. * PCI device id structure
  570. * @id: single PCI device id structure to match
  571. * @find: the PCI device id structure to match against
  572. *
  573. * Returns true if the finding pci_device_id structure matched or false if
  574. * there is no match.
  575. */
  576. static bool pci_match_one_id(const struct pci_device_id *id,
  577. const struct pci_device_id *find)
  578. {
  579. if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
  580. (id->device == PCI_ANY_ID || id->device == find->device) &&
  581. (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
  582. (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
  583. !((id->class ^ find->class) & id->class_mask))
  584. return true;
  585. return false;
  586. }
  587. /**
  588. * pci_need_device_pre_reloc() - Check if a device should be bound
  589. *
  590. * This checks a list of vendor/device-ID values indicating devices that should
  591. * be bound before relocation.
  592. *
  593. * @bus: Bus to check
  594. * @vendor: Vendor ID to check
  595. * @device: Device ID to check
  596. * Return: true if the vendor/device is in the list, false if not
  597. */
  598. static bool pci_need_device_pre_reloc(struct udevice *bus, uint vendor,
  599. uint device)
  600. {
  601. u32 vendev;
  602. int index;
  603. if (spl_phase() == PHASE_SPL && CONFIG_IS_ENABLED(PCI_PNP))
  604. return true;
  605. for (index = 0;
  606. !dev_read_u32_index(bus, "u-boot,pci-pre-reloc", index,
  607. &vendev);
  608. index++) {
  609. if (vendev == PCI_VENDEV(vendor, device))
  610. return true;
  611. }
  612. return false;
  613. }
  614. /**
  615. * pci_find_and_bind_driver() - Find and bind the right PCI driver
  616. *
  617. * This only looks at certain fields in the descriptor.
  618. *
  619. * @parent: Parent bus
  620. * @find_id: Specification of the driver to find
  621. * @bdf: Bus/device/function addreess - see PCI_BDF()
  622. * @devp: Returns a pointer to the device created
  623. * Return: 0 if OK, -EPERM if the device is not needed before relocation and
  624. * therefore was not created, other -ve value on error
  625. */
  626. static int pci_find_and_bind_driver(struct udevice *parent,
  627. struct pci_device_id *find_id,
  628. pci_dev_t bdf, struct udevice **devp)
  629. {
  630. struct pci_driver_entry *start, *entry;
  631. ofnode node = ofnode_null();
  632. const char *drv;
  633. int n_ents;
  634. int ret;
  635. char name[30], *str;
  636. bool bridge;
  637. *devp = NULL;
  638. debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
  639. find_id->vendor, find_id->device);
  640. /* Determine optional OF node */
  641. if (ofnode_valid(dev_ofnode(parent)))
  642. pci_dev_find_ofnode(parent, bdf, &node);
  643. if (ofnode_valid(node) && !ofnode_is_enabled(node)) {
  644. debug("%s: Ignoring disabled device\n", __func__);
  645. return log_msg_ret("dis", -EPERM);
  646. }
  647. start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
  648. n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
  649. for (entry = start; entry != start + n_ents; entry++) {
  650. const struct pci_device_id *id;
  651. struct udevice *dev;
  652. const struct driver *drv;
  653. for (id = entry->match;
  654. id->vendor || id->subvendor || id->class_mask;
  655. id++) {
  656. if (!pci_match_one_id(id, find_id))
  657. continue;
  658. drv = entry->driver;
  659. /*
  660. * In the pre-relocation phase, we only bind devices
  661. * whose driver has the DM_FLAG_PRE_RELOC set, to save
  662. * precious memory space as on some platforms as that
  663. * space is pretty limited (ie: using Cache As RAM).
  664. */
  665. if (!(gd->flags & GD_FLG_RELOC) &&
  666. !(drv->flags & DM_FLAG_PRE_RELOC) &&
  667. (!CONFIG_IS_ENABLED(PCI_PNP) ||
  668. spl_phase() != PHASE_SPL))
  669. return log_msg_ret("pre", -EPERM);
  670. /*
  671. * We could pass the descriptor to the driver as
  672. * plat (instead of NULL) and allow its bind()
  673. * method to return -ENOENT if it doesn't support this
  674. * device. That way we could continue the search to
  675. * find another driver. For now this doesn't seem
  676. * necesssary, so just bind the first match.
  677. */
  678. ret = device_bind(parent, drv, drv->name, NULL, node,
  679. &dev);
  680. if (ret)
  681. goto error;
  682. debug("%s: Match found: %s\n", __func__, drv->name);
  683. dev->driver_data = id->driver_data;
  684. *devp = dev;
  685. return 0;
  686. }
  687. }
  688. bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
  689. /*
  690. * In the pre-relocation phase, we only bind bridge devices to save
  691. * precious memory space as on some platforms as that space is pretty
  692. * limited (ie: using Cache As RAM).
  693. */
  694. if (!(gd->flags & GD_FLG_RELOC) && !bridge &&
  695. !pci_need_device_pre_reloc(parent, find_id->vendor,
  696. find_id->device))
  697. return log_msg_ret("notbr", -EPERM);
  698. /* Bind a generic driver so that the device can be used */
  699. sprintf(name, "pci_%x:%x.%x", dev_seq(parent), PCI_DEV(bdf),
  700. PCI_FUNC(bdf));
  701. str = strdup(name);
  702. if (!str)
  703. return -ENOMEM;
  704. drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
  705. ret = device_bind_driver_to_node(parent, drv, str, node, devp);
  706. if (ret) {
  707. debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
  708. free(str);
  709. return ret;
  710. }
  711. debug("%s: No match found: bound generic driver instead\n", __func__);
  712. return 0;
  713. error:
  714. debug("%s: No match found: error %d\n", __func__, ret);
  715. return ret;
  716. }
  717. __weak extern void board_pci_fixup_dev(struct udevice *bus, struct udevice *dev)
  718. {
  719. }
  720. int pci_bind_bus_devices(struct udevice *bus)
  721. {
  722. ulong vendor, device;
  723. ulong header_type;
  724. pci_dev_t bdf, end;
  725. bool found_multi;
  726. int ari_off;
  727. int ret;
  728. found_multi = false;
  729. end = PCI_BDF(dev_seq(bus), PCI_MAX_PCI_DEVICES - 1,
  730. PCI_MAX_PCI_FUNCTIONS - 1);
  731. for (bdf = PCI_BDF(dev_seq(bus), 0, 0); bdf <= end;
  732. bdf += PCI_BDF(0, 0, 1)) {
  733. struct pci_child_plat *pplat;
  734. struct udevice *dev;
  735. ulong class;
  736. if (!PCI_FUNC(bdf))
  737. found_multi = false;
  738. if (PCI_FUNC(bdf) && !found_multi)
  739. continue;
  740. /* Check only the first access, we don't expect problems */
  741. ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
  742. PCI_SIZE_16);
  743. if (ret || vendor == 0xffff || vendor == 0x0000)
  744. continue;
  745. pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
  746. &header_type, PCI_SIZE_8);
  747. if (!PCI_FUNC(bdf))
  748. found_multi = header_type & 0x80;
  749. debug("%s: bus %d/%s: found device %x, function %d", __func__,
  750. dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
  751. pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
  752. PCI_SIZE_16);
  753. pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
  754. PCI_SIZE_32);
  755. class >>= 8;
  756. /* Find this device in the device tree */
  757. ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
  758. debug(": find ret=%d\n", ret);
  759. /* If nothing in the device tree, bind a device */
  760. if (ret == -ENODEV) {
  761. struct pci_device_id find_id;
  762. ulong val;
  763. memset(&find_id, '\0', sizeof(find_id));
  764. find_id.vendor = vendor;
  765. find_id.device = device;
  766. find_id.class = class;
  767. if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
  768. pci_bus_read_config(bus, bdf,
  769. PCI_SUBSYSTEM_VENDOR_ID,
  770. &val, PCI_SIZE_32);
  771. find_id.subvendor = val & 0xffff;
  772. find_id.subdevice = val >> 16;
  773. }
  774. ret = pci_find_and_bind_driver(bus, &find_id, bdf,
  775. &dev);
  776. } else {
  777. debug("device: %s\n", dev->name);
  778. }
  779. if (ret == -EPERM)
  780. continue;
  781. else if (ret)
  782. return ret;
  783. /* Update the platform data */
  784. pplat = dev_get_parent_plat(dev);
  785. pplat->devfn = PCI_MASK_BUS(bdf);
  786. pplat->vendor = vendor;
  787. pplat->device = device;
  788. pplat->class = class;
  789. if (IS_ENABLED(CONFIG_PCI_ARID)) {
  790. ari_off = dm_pci_find_ext_capability(dev,
  791. PCI_EXT_CAP_ID_ARI);
  792. if (ari_off) {
  793. u16 ari_cap;
  794. /*
  795. * Read Next Function number in ARI Cap
  796. * Register
  797. */
  798. dm_pci_read_config16(dev, ari_off + 4,
  799. &ari_cap);
  800. /*
  801. * Update next scan on this function number,
  802. * subtract 1 in BDF to satisfy loop increment.
  803. */
  804. if (ari_cap & 0xff00) {
  805. bdf = PCI_BDF(PCI_BUS(bdf),
  806. PCI_DEV(ari_cap),
  807. PCI_FUNC(ari_cap));
  808. bdf = bdf - 0x100;
  809. }
  810. }
  811. }
  812. board_pci_fixup_dev(bus, dev);
  813. }
  814. return 0;
  815. }
  816. static int decode_regions(struct pci_controller *hose, ofnode parent_node,
  817. ofnode node)
  818. {
  819. int pci_addr_cells, addr_cells, size_cells;
  820. int cells_per_record;
  821. struct bd_info *bd;
  822. const u32 *prop;
  823. int max_regions;
  824. int len;
  825. int i;
  826. /* handle booting from coreboot, etc. */
  827. if (!ll_boot_init())
  828. return 0;
  829. prop = ofnode_get_property(node, "ranges", &len);
  830. if (!prop) {
  831. debug("%s: Cannot decode regions\n", __func__);
  832. return -EINVAL;
  833. }
  834. pci_addr_cells = ofnode_read_simple_addr_cells(node);
  835. addr_cells = ofnode_read_simple_addr_cells(parent_node);
  836. size_cells = ofnode_read_simple_size_cells(node);
  837. /* PCI addresses are always 3-cells */
  838. len /= sizeof(u32);
  839. cells_per_record = pci_addr_cells + addr_cells + size_cells;
  840. hose->region_count = 0;
  841. debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
  842. cells_per_record);
  843. /* Dynamically allocate the regions array */
  844. max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
  845. hose->regions = (struct pci_region *)
  846. calloc(1, max_regions * sizeof(struct pci_region));
  847. if (!hose->regions)
  848. return -ENOMEM;
  849. for (i = 0; i < max_regions; i++, len -= cells_per_record) {
  850. u64 pci_addr, addr, size;
  851. int space_code;
  852. u32 flags;
  853. int type;
  854. int pos;
  855. if (len < cells_per_record)
  856. break;
  857. flags = fdt32_to_cpu(prop[0]);
  858. space_code = (flags >> 24) & 3;
  859. pci_addr = fdtdec_get_number(prop + 1, 2);
  860. prop += pci_addr_cells;
  861. addr = fdtdec_get_number(prop, addr_cells);
  862. prop += addr_cells;
  863. size = fdtdec_get_number(prop, size_cells);
  864. prop += size_cells;
  865. debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n",
  866. __func__, hose->region_count, pci_addr, addr, size, space_code);
  867. if (space_code & 2) {
  868. type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
  869. PCI_REGION_MEM;
  870. } else if (space_code & 1) {
  871. type = PCI_REGION_IO;
  872. } else {
  873. continue;
  874. }
  875. if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
  876. type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
  877. debug(" - pci_addr beyond the 32-bit boundary, ignoring\n");
  878. continue;
  879. }
  880. if (!IS_ENABLED(CONFIG_PHYS_64BIT) && upper_32_bits(addr)) {
  881. debug(" - addr beyond the 32-bit boundary, ignoring\n");
  882. continue;
  883. }
  884. if (~((pci_addr_t)0) - pci_addr < size) {
  885. debug(" - PCI range exceeds max address, ignoring\n");
  886. continue;
  887. }
  888. if (~((phys_addr_t)0) - addr < size) {
  889. debug(" - phys range exceeds max address, ignoring\n");
  890. continue;
  891. }
  892. pos = -1;
  893. if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
  894. for (i = 0; i < hose->region_count; i++) {
  895. if (hose->regions[i].flags == type)
  896. pos = i;
  897. }
  898. }
  899. if (pos == -1)
  900. pos = hose->region_count++;
  901. debug(" - type=%d, pos=%d\n", type, pos);
  902. pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
  903. }
  904. /* Add a region for our local memory */
  905. bd = gd->bd;
  906. if (!bd)
  907. return 0;
  908. for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
  909. if (bd->bi_dram[i].size) {
  910. phys_addr_t start = bd->bi_dram[i].start;
  911. if (IS_ENABLED(CONFIG_PCI_MAP_SYSTEM_MEMORY))
  912. start = virt_to_phys((void *)(uintptr_t)bd->bi_dram[i].start);
  913. pci_set_region(hose->regions + hose->region_count++,
  914. start, start, bd->bi_dram[i].size,
  915. PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
  916. }
  917. }
  918. return 0;
  919. }
  920. static int pci_uclass_pre_probe(struct udevice *bus)
  921. {
  922. struct pci_controller *hose;
  923. struct uclass *uc;
  924. int ret;
  925. debug("%s, bus=%d/%s, parent=%s\n", __func__, dev_seq(bus), bus->name,
  926. bus->parent->name);
  927. hose = dev_get_uclass_priv(bus);
  928. /*
  929. * Set the sequence number, if device_bind() doesn't. We want control
  930. * of this so that numbers are allocated as devices are probed. That
  931. * ensures that sub-bus numbered is correct (sub-buses must get numbers
  932. * higher than their parents)
  933. */
  934. if (dev_seq(bus) == -1) {
  935. ret = uclass_get(UCLASS_PCI, &uc);
  936. if (ret)
  937. return ret;
  938. bus->seq_ = uclass_find_next_free_seq(uc);
  939. }
  940. /* For bridges, use the top-level PCI controller */
  941. if (!device_is_on_pci_bus(bus)) {
  942. hose->ctlr = bus;
  943. ret = decode_regions(hose, dev_ofnode(bus->parent),
  944. dev_ofnode(bus));
  945. if (ret)
  946. return ret;
  947. } else {
  948. struct pci_controller *parent_hose;
  949. parent_hose = dev_get_uclass_priv(bus->parent);
  950. hose->ctlr = parent_hose->bus;
  951. }
  952. hose->bus = bus;
  953. hose->first_busno = dev_seq(bus);
  954. hose->last_busno = dev_seq(bus);
  955. if (dev_has_ofnode(bus)) {
  956. hose->skip_auto_config_until_reloc =
  957. dev_read_bool(bus,
  958. "u-boot,skip-auto-config-until-reloc");
  959. }
  960. return 0;
  961. }
  962. static int pci_uclass_post_probe(struct udevice *bus)
  963. {
  964. struct pci_controller *hose = dev_get_uclass_priv(bus);
  965. int ret;
  966. debug("%s: probing bus %d\n", __func__, dev_seq(bus));
  967. ret = pci_bind_bus_devices(bus);
  968. if (ret)
  969. return log_msg_ret("bind", ret);
  970. if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() &&
  971. (!hose->skip_auto_config_until_reloc ||
  972. (gd->flags & GD_FLG_RELOC))) {
  973. ret = pci_auto_config_devices(bus);
  974. if (ret < 0)
  975. return log_msg_ret("cfg", ret);
  976. }
  977. #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
  978. /*
  979. * Per Intel FSP specification, we should call FSP notify API to
  980. * inform FSP that PCI enumeration has been done so that FSP will
  981. * do any necessary initialization as required by the chipset's
  982. * BIOS Writer's Guide (BWG).
  983. *
  984. * Unfortunately we have to put this call here as with driver model,
  985. * the enumeration is all done on a lazy basis as needed, so until
  986. * something is touched on PCI it won't happen.
  987. *
  988. * Note we only call this 1) after U-Boot is relocated, and 2)
  989. * root bus has finished probing.
  990. */
  991. if ((gd->flags & GD_FLG_RELOC) && dev_seq(bus) == 0 && ll_boot_init()) {
  992. ret = fsp_init_phase_pci();
  993. if (ret)
  994. return log_msg_ret("fsp", ret);
  995. }
  996. #endif
  997. return 0;
  998. }
  999. static int pci_uclass_child_post_bind(struct udevice *dev)
  1000. {
  1001. struct pci_child_plat *pplat;
  1002. if (!dev_has_ofnode(dev))
  1003. return 0;
  1004. pplat = dev_get_parent_plat(dev);
  1005. /* Extract vendor id and device id if available */
  1006. ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device);
  1007. /* Extract the devfn from fdt_pci_addr */
  1008. pplat->devfn = pci_get_devfn(dev);
  1009. return 0;
  1010. }
  1011. static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf,
  1012. uint offset, ulong *valuep,
  1013. enum pci_size_t size)
  1014. {
  1015. struct pci_controller *hose = dev_get_uclass_priv(bus);
  1016. return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
  1017. }
  1018. static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
  1019. uint offset, ulong value,
  1020. enum pci_size_t size)
  1021. {
  1022. struct pci_controller *hose = dev_get_uclass_priv(bus);
  1023. return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
  1024. }
  1025. static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
  1026. {
  1027. struct udevice *dev;
  1028. /*
  1029. * Scan through all the PCI controllers. On x86 there will only be one
  1030. * but that is not necessarily true on other hardware.
  1031. */
  1032. while (bus) {
  1033. device_find_first_child(bus, &dev);
  1034. if (dev) {
  1035. *devp = dev;
  1036. return 0;
  1037. }
  1038. uclass_next_device(&bus);
  1039. }
  1040. return 0;
  1041. }
  1042. int pci_find_next_device(struct udevice **devp)
  1043. {
  1044. struct udevice *child = *devp;
  1045. struct udevice *bus = child->parent;
  1046. /* First try all the siblings */
  1047. *devp = NULL;
  1048. while (child) {
  1049. device_find_next_child(&child);
  1050. if (child) {
  1051. *devp = child;
  1052. return 0;
  1053. }
  1054. }
  1055. /* We ran out of siblings. Try the next bus */
  1056. uclass_next_device(&bus);
  1057. return bus ? skip_to_next_device(bus, devp) : 0;
  1058. }
  1059. int pci_find_first_device(struct udevice **devp)
  1060. {
  1061. struct udevice *bus;
  1062. *devp = NULL;
  1063. uclass_first_device(UCLASS_PCI, &bus);
  1064. return skip_to_next_device(bus, devp);
  1065. }
  1066. ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
  1067. {
  1068. switch (size) {
  1069. case PCI_SIZE_8:
  1070. return (value >> ((offset & 3) * 8)) & 0xff;
  1071. case PCI_SIZE_16:
  1072. return (value >> ((offset & 2) * 8)) & 0xffff;
  1073. default:
  1074. return value;
  1075. }
  1076. }
  1077. ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
  1078. enum pci_size_t size)
  1079. {
  1080. uint off_mask;
  1081. uint val_mask, shift;
  1082. ulong ldata, mask;
  1083. switch (size) {
  1084. case PCI_SIZE_8:
  1085. off_mask = 3;
  1086. val_mask = 0xff;
  1087. break;
  1088. case PCI_SIZE_16:
  1089. off_mask = 2;
  1090. val_mask = 0xffff;
  1091. break;
  1092. default:
  1093. return value;
  1094. }
  1095. shift = (offset & off_mask) * 8;
  1096. ldata = (value & val_mask) << shift;
  1097. mask = val_mask << shift;
  1098. value = (old & ~mask) | ldata;
  1099. return value;
  1100. }
  1101. int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index)
  1102. {
  1103. int pci_addr_cells, addr_cells, size_cells;
  1104. int cells_per_record;
  1105. const u32 *prop;
  1106. int len;
  1107. int i = 0;
  1108. prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len);
  1109. if (!prop) {
  1110. log_err("PCI: Device '%s': Cannot decode dma-ranges\n",
  1111. dev->name);
  1112. return -EINVAL;
  1113. }
  1114. pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev));
  1115. addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent));
  1116. size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev));
  1117. /* PCI addresses are always 3-cells */
  1118. len /= sizeof(u32);
  1119. cells_per_record = pci_addr_cells + addr_cells + size_cells;
  1120. debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
  1121. cells_per_record);
  1122. while (len) {
  1123. memp->bus_start = fdtdec_get_number(prop + 1, 2);
  1124. prop += pci_addr_cells;
  1125. memp->phys_start = fdtdec_get_number(prop, addr_cells);
  1126. prop += addr_cells;
  1127. memp->size = fdtdec_get_number(prop, size_cells);
  1128. prop += size_cells;
  1129. if (i == index)
  1130. return 0;
  1131. i++;
  1132. len -= cells_per_record;
  1133. }
  1134. return -EINVAL;
  1135. }
  1136. int pci_get_regions(struct udevice *dev, struct pci_region **iop,
  1137. struct pci_region **memp, struct pci_region **prefp)
  1138. {
  1139. struct udevice *bus = pci_get_controller(dev);
  1140. struct pci_controller *hose = dev_get_uclass_priv(bus);
  1141. int i;
  1142. *iop = NULL;
  1143. *memp = NULL;
  1144. *prefp = NULL;
  1145. for (i = 0; i < hose->region_count; i++) {
  1146. switch (hose->regions[i].flags) {
  1147. case PCI_REGION_IO:
  1148. if (!*iop || (*iop)->size < hose->regions[i].size)
  1149. *iop = hose->regions + i;
  1150. break;
  1151. case PCI_REGION_MEM:
  1152. if (!*memp || (*memp)->size < hose->regions[i].size)
  1153. *memp = hose->regions + i;
  1154. break;
  1155. case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
  1156. if (!*prefp || (*prefp)->size < hose->regions[i].size)
  1157. *prefp = hose->regions + i;
  1158. break;
  1159. }
  1160. }
  1161. return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
  1162. }
  1163. u32 dm_pci_read_bar32(const struct udevice *dev, int barnum)
  1164. {
  1165. u32 addr;
  1166. int bar;
  1167. bar = PCI_BASE_ADDRESS_0 + barnum * 4;
  1168. dm_pci_read_config32(dev, bar, &addr);
  1169. /*
  1170. * If we get an invalid address, return this so that comparisons with
  1171. * FDT_ADDR_T_NONE work correctly
  1172. */
  1173. if (addr == 0xffffffff)
  1174. return addr;
  1175. else if (addr & PCI_BASE_ADDRESS_SPACE_IO)
  1176. return addr & PCI_BASE_ADDRESS_IO_MASK;
  1177. else
  1178. return addr & PCI_BASE_ADDRESS_MEM_MASK;
  1179. }
  1180. void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
  1181. {
  1182. int bar;
  1183. bar = PCI_BASE_ADDRESS_0 + barnum * 4;
  1184. dm_pci_write_config32(dev, bar, addr);
  1185. }
  1186. phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
  1187. size_t len, unsigned long mask,
  1188. unsigned long flags)
  1189. {
  1190. struct udevice *ctlr;
  1191. struct pci_controller *hose;
  1192. struct pci_region *res;
  1193. pci_addr_t offset;
  1194. int i;
  1195. /* The root controller has the region information */
  1196. ctlr = pci_get_controller(dev);
  1197. hose = dev_get_uclass_priv(ctlr);
  1198. if (hose->region_count == 0)
  1199. return bus_addr;
  1200. for (i = 0; i < hose->region_count; i++) {
  1201. res = &hose->regions[i];
  1202. if ((res->flags & mask) != flags)
  1203. continue;
  1204. if (bus_addr < res->bus_start)
  1205. continue;
  1206. offset = bus_addr - res->bus_start;
  1207. if (offset >= res->size)
  1208. continue;
  1209. if (len > res->size - offset)
  1210. continue;
  1211. return res->phys_start + offset;
  1212. }
  1213. puts("pci_hose_bus_to_phys: invalid physical address\n");
  1214. return 0;
  1215. }
  1216. pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
  1217. size_t len, unsigned long mask,
  1218. unsigned long flags)
  1219. {
  1220. struct udevice *ctlr;
  1221. struct pci_controller *hose;
  1222. struct pci_region *res;
  1223. phys_addr_t offset;
  1224. int i;
  1225. /* The root controller has the region information */
  1226. ctlr = pci_get_controller(dev);
  1227. hose = dev_get_uclass_priv(ctlr);
  1228. if (hose->region_count == 0)
  1229. return phys_addr;
  1230. for (i = 0; i < hose->region_count; i++) {
  1231. res = &hose->regions[i];
  1232. if ((res->flags & mask) != flags)
  1233. continue;
  1234. if (phys_addr < res->phys_start)
  1235. continue;
  1236. offset = phys_addr - res->phys_start;
  1237. if (offset >= res->size)
  1238. continue;
  1239. if (len > res->size - offset)
  1240. continue;
  1241. return res->bus_start + offset;
  1242. }
  1243. puts("pci_hose_phys_to_bus: invalid physical address\n");
  1244. return 0;
  1245. }
  1246. static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
  1247. struct pci_child_plat *pdata)
  1248. {
  1249. phys_addr_t addr = 0;
  1250. /*
  1251. * In the case of a Virtual Function device using BAR
  1252. * base and size, add offset for VFn BAR(1, 2, 3...n)
  1253. */
  1254. if (pdata->is_virtfn) {
  1255. size_t sz;
  1256. u32 ea_entry;
  1257. /* MaxOffset, 1st DW */
  1258. dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
  1259. sz = ea_entry & PCI_EA_FIELD_MASK;
  1260. /* Fill up lower 2 bits */
  1261. sz |= (~PCI_EA_FIELD_MASK);
  1262. if (ea_entry & PCI_EA_IS_64) {
  1263. /* MaxOffset 2nd DW */
  1264. dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
  1265. sz |= ((u64)ea_entry) << 32;
  1266. }
  1267. addr = (pdata->virtid - 1) * (sz + 1);
  1268. }
  1269. return addr;
  1270. }
  1271. static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, size_t offset,
  1272. size_t len, int ea_off,
  1273. struct pci_child_plat *pdata)
  1274. {
  1275. int ea_cnt, i, entry_size;
  1276. int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
  1277. u32 ea_entry;
  1278. phys_addr_t addr;
  1279. if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
  1280. /*
  1281. * In the case of a Virtual Function device, device is
  1282. * Physical function, so pdata will point to required VF
  1283. * specific data.
  1284. */
  1285. if (pdata->is_virtfn)
  1286. bar_id += PCI_EA_BEI_VF_BAR0;
  1287. }
  1288. /* EA capability structure header */
  1289. dm_pci_read_config32(dev, ea_off, &ea_entry);
  1290. ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
  1291. ea_off += PCI_EA_FIRST_ENT;
  1292. for (i = 0; i < ea_cnt; i++, ea_off += entry_size) {
  1293. /* Entry header */
  1294. dm_pci_read_config32(dev, ea_off, &ea_entry);
  1295. entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2;
  1296. if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id)
  1297. continue;
  1298. /* Base address, 1st DW */
  1299. dm_pci_read_config32(dev, ea_off + 4, &ea_entry);
  1300. addr = ea_entry & PCI_EA_FIELD_MASK;
  1301. if (ea_entry & PCI_EA_IS_64) {
  1302. /* Base address, 2nd DW, skip over 4B MaxOffset */
  1303. dm_pci_read_config32(dev, ea_off + 12, &ea_entry);
  1304. addr |= ((u64)ea_entry) << 32;
  1305. }
  1306. if (IS_ENABLED(CONFIG_PCI_SRIOV))
  1307. addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
  1308. if (~((phys_addr_t)0) - addr < offset)
  1309. return NULL;
  1310. /* size ignored for now */
  1311. return map_physmem(addr + offset, len, MAP_NOCACHE);
  1312. }
  1313. return 0;
  1314. }
  1315. void *dm_pci_map_bar(struct udevice *dev, int bar, size_t offset, size_t len,
  1316. unsigned long mask, unsigned long flags)
  1317. {
  1318. struct pci_child_plat *pdata = dev_get_parent_plat(dev);
  1319. struct udevice *udev = dev;
  1320. pci_addr_t pci_bus_addr;
  1321. u32 bar_response;
  1322. int ea_off;
  1323. if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
  1324. /*
  1325. * In case of Virtual Function devices, use PF udevice
  1326. * as EA capability is defined in Physical Function
  1327. */
  1328. if (pdata->is_virtfn)
  1329. udev = pdata->pfdev;
  1330. }
  1331. /*
  1332. * if the function supports Enhanced Allocation use that instead of
  1333. * BARs
  1334. * Incase of virtual functions, pdata will help read VF BEI
  1335. * and EA entry size.
  1336. */
  1337. if (IS_ENABLED(CONFIG_PCI_ENHANCED_ALLOCATION))
  1338. ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
  1339. else
  1340. ea_off = 0;
  1341. if (ea_off)
  1342. return dm_pci_map_ea_bar(udev, bar, offset, len, ea_off, pdata);
  1343. /* read BAR address */
  1344. dm_pci_read_config32(udev, bar, &bar_response);
  1345. pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
  1346. if (~((pci_addr_t)0) - pci_bus_addr < offset)
  1347. return NULL;
  1348. /*
  1349. * Forward the length argument to dm_pci_bus_to_virt. The length will
  1350. * be used to check that the entire address range has been declared as
  1351. * a PCI range, but a better check would be to probe for the size of
  1352. * the bar and prevent overflow more locally.
  1353. */
  1354. return dm_pci_bus_to_virt(udev, pci_bus_addr + offset, len, mask, flags,
  1355. MAP_NOCACHE);
  1356. }
  1357. static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
  1358. {
  1359. int ttl = PCI_FIND_CAP_TTL;
  1360. u8 id;
  1361. u16 ent;
  1362. dm_pci_read_config8(dev, pos, &pos);
  1363. while (ttl--) {
  1364. if (pos < PCI_STD_HEADER_SIZEOF)
  1365. break;
  1366. pos &= ~3;
  1367. dm_pci_read_config16(dev, pos, &ent);
  1368. id = ent & 0xff;
  1369. if (id == 0xff)
  1370. break;
  1371. if (id == cap)
  1372. return pos;
  1373. pos = (ent >> 8);
  1374. }
  1375. return 0;
  1376. }
  1377. int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap)
  1378. {
  1379. return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT,
  1380. cap);
  1381. }
  1382. int dm_pci_find_capability(struct udevice *dev, int cap)
  1383. {
  1384. u16 status;
  1385. u8 header_type;
  1386. u8 pos;
  1387. dm_pci_read_config16(dev, PCI_STATUS, &status);
  1388. if (!(status & PCI_STATUS_CAP_LIST))
  1389. return 0;
  1390. dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type);
  1391. if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS)
  1392. pos = PCI_CB_CAPABILITY_LIST;
  1393. else
  1394. pos = PCI_CAPABILITY_LIST;
  1395. return _dm_pci_find_next_capability(dev, pos, cap);
  1396. }
  1397. int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap)
  1398. {
  1399. u32 header;
  1400. int ttl;
  1401. int pos = PCI_CFG_SPACE_SIZE;
  1402. /* minimum 8 bytes per capability */
  1403. ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
  1404. if (start)
  1405. pos = start;
  1406. dm_pci_read_config32(dev, pos, &header);
  1407. /*
  1408. * If we have no capabilities, this is indicated by cap ID,
  1409. * cap version and next pointer all being 0.
  1410. */
  1411. if (header == 0)
  1412. return 0;
  1413. while (ttl--) {
  1414. if (PCI_EXT_CAP_ID(header) == cap)
  1415. return pos;
  1416. pos = PCI_EXT_CAP_NEXT(header);
  1417. if (pos < PCI_CFG_SPACE_SIZE)
  1418. break;
  1419. dm_pci_read_config32(dev, pos, &header);
  1420. }
  1421. return 0;
  1422. }
  1423. int dm_pci_find_ext_capability(struct udevice *dev, int cap)
  1424. {
  1425. return dm_pci_find_next_ext_capability(dev, 0, cap);
  1426. }
  1427. int dm_pci_flr(struct udevice *dev)
  1428. {
  1429. int pcie_off;
  1430. u32 cap;
  1431. /* look for PCI Express Capability */
  1432. pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP);
  1433. if (!pcie_off)
  1434. return -ENOENT;
  1435. /* check FLR capability */
  1436. dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap);
  1437. if (!(cap & PCI_EXP_DEVCAP_FLR))
  1438. return -ENOENT;
  1439. dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0,
  1440. PCI_EXP_DEVCTL_BCR_FLR);
  1441. /* wait 100ms, per PCI spec */
  1442. mdelay(100);
  1443. return 0;
  1444. }
  1445. #if defined(CONFIG_PCI_SRIOV)
  1446. int pci_sriov_init(struct udevice *pdev, int vf_en)
  1447. {
  1448. u16 vendor, device;
  1449. struct udevice *bus;
  1450. struct udevice *dev;
  1451. pci_dev_t bdf;
  1452. u16 ctrl;
  1453. u16 num_vfs;
  1454. u16 total_vf;
  1455. u16 vf_offset;
  1456. u16 vf_stride;
  1457. int vf, ret;
  1458. int pos;
  1459. pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  1460. if (!pos) {
  1461. debug("Error: SRIOV capability not found\n");
  1462. return -ENOENT;
  1463. }
  1464. dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
  1465. dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
  1466. if (vf_en > total_vf)
  1467. vf_en = total_vf;
  1468. dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
  1469. ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
  1470. dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
  1471. dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
  1472. if (num_vfs > vf_en)
  1473. num_vfs = vf_en;
  1474. dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
  1475. dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
  1476. dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
  1477. dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
  1478. bdf = dm_pci_get_bdf(pdev);
  1479. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  1480. if (ret)
  1481. return ret;
  1482. bdf += PCI_BDF(0, 0, vf_offset);
  1483. for (vf = 0; vf < num_vfs; vf++) {
  1484. struct pci_child_plat *pplat;
  1485. ulong class;
  1486. pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
  1487. &class, PCI_SIZE_16);
  1488. debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
  1489. dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
  1490. /* Find this device in the device tree */
  1491. ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
  1492. if (ret == -ENODEV) {
  1493. struct pci_device_id find_id;
  1494. memset(&find_id, '\0', sizeof(find_id));
  1495. find_id.vendor = vendor;
  1496. find_id.device = device;
  1497. find_id.class = class;
  1498. ret = pci_find_and_bind_driver(bus, &find_id,
  1499. bdf, &dev);
  1500. if (ret)
  1501. return ret;
  1502. }
  1503. /* Update the platform data */
  1504. pplat = dev_get_parent_plat(dev);
  1505. pplat->devfn = PCI_MASK_BUS(bdf);
  1506. pplat->vendor = vendor;
  1507. pplat->device = device;
  1508. pplat->class = class;
  1509. pplat->is_virtfn = true;
  1510. pplat->pfdev = pdev;
  1511. pplat->virtid = vf * vf_stride + vf_offset;
  1512. debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
  1513. __func__, dev_seq(dev), dev->name, PCI_DEV(bdf),
  1514. PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
  1515. bdf += PCI_BDF(0, 0, vf_stride);
  1516. }
  1517. return 0;
  1518. }
  1519. int pci_sriov_get_totalvfs(struct udevice *pdev)
  1520. {
  1521. u16 total_vf;
  1522. int pos;
  1523. pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  1524. if (!pos) {
  1525. debug("Error: SRIOV capability not found\n");
  1526. return -ENOENT;
  1527. }
  1528. dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
  1529. return total_vf;
  1530. }
  1531. #endif /* SRIOV */
  1532. UCLASS_DRIVER(pci) = {
  1533. .id = UCLASS_PCI,
  1534. .name = "pci",
  1535. .flags = DM_UC_FLAG_SEQ_ALIAS | DM_UC_FLAG_NO_AUTO_SEQ,
  1536. .post_bind = dm_scan_fdt_dev,
  1537. .pre_probe = pci_uclass_pre_probe,
  1538. .post_probe = pci_uclass_post_probe,
  1539. .child_post_bind = pci_uclass_child_post_bind,
  1540. .per_device_auto = sizeof(struct pci_controller),
  1541. .per_child_plat_auto = sizeof(struct pci_child_plat),
  1542. };
  1543. static const struct dm_pci_ops pci_bridge_ops = {
  1544. .read_config = pci_bridge_read_config,
  1545. .write_config = pci_bridge_write_config,
  1546. };
  1547. static const struct udevice_id pci_bridge_ids[] = {
  1548. { .compatible = "pci-bridge" },
  1549. { }
  1550. };
  1551. U_BOOT_DRIVER(pci_bridge_drv) = {
  1552. .name = "pci_bridge_drv",
  1553. .id = UCLASS_PCI,
  1554. .of_match = pci_bridge_ids,
  1555. .ops = &pci_bridge_ops,
  1556. };
  1557. UCLASS_DRIVER(pci_generic) = {
  1558. .id = UCLASS_PCI_GENERIC,
  1559. .name = "pci_generic",
  1560. };
  1561. static const struct udevice_id pci_generic_ids[] = {
  1562. { .compatible = "pci-generic" },
  1563. { }
  1564. };
  1565. U_BOOT_DRIVER(pci_generic_drv) = {
  1566. .name = "pci_generic_drv",
  1567. .id = UCLASS_PCI_GENERIC,
  1568. .of_match = pci_generic_ids,
  1569. };
  1570. int pci_init(void)
  1571. {
  1572. struct udevice *bus;
  1573. /*
  1574. * Enumerate all known controller devices. Enumeration has the side-
  1575. * effect of probing them, so PCIe devices will be enumerated too.
  1576. */
  1577. for (uclass_first_device_check(UCLASS_PCI, &bus);
  1578. bus;
  1579. uclass_next_device_check(&bus)) {
  1580. ;
  1581. }
  1582. return 0;
  1583. }