pci-common.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128
  1. /*
  2. * Contains common pci routines for ALL ppc platform
  3. * (based on pci_32.c and pci_64.c)
  4. *
  5. * Port for PPC64 David Engebretsen, IBM Corp.
  6. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
  7. *
  8. * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
  9. * Rework, based on alpha PCI code.
  10. *
  11. * Common pmac/prep/chrp pci routines. -- Cort
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License
  15. * as published by the Free Software Foundation; either version
  16. * 2 of the License, or (at your option) any later version.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/pci.h>
  20. #include <linux/string.h>
  21. #include <linux/init.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/mm.h>
  24. #include <linux/shmem_fs.h>
  25. #include <linux/list.h>
  26. #include <linux/syscalls.h>
  27. #include <linux/irq.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/slab.h>
  30. #include <linux/of.h>
  31. #include <linux/of_address.h>
  32. #include <linux/of_irq.h>
  33. #include <linux/of_pci.h>
  34. #include <linux/export.h>
  35. #include <asm/processor.h>
  36. #include <linux/io.h>
  37. #include <asm/pci-bridge.h>
  38. #include <asm/byteorder.h>
  39. static DEFINE_SPINLOCK(hose_spinlock);
  40. LIST_HEAD(hose_list);
  41. /* XXX kill that some day ... */
  42. static int global_phb_number; /* Global phb counter */
  43. /* ISA Memory physical address */
  44. resource_size_t isa_mem_base;
  45. unsigned long isa_io_base;
  46. EXPORT_SYMBOL(isa_io_base);
  47. static int pci_bus_count;
  48. struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
  49. {
  50. struct pci_controller *phb;
  51. phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
  52. if (!phb)
  53. return NULL;
  54. spin_lock(&hose_spinlock);
  55. phb->global_number = global_phb_number++;
  56. list_add_tail(&phb->list_node, &hose_list);
  57. spin_unlock(&hose_spinlock);
  58. phb->dn = dev;
  59. phb->is_dynamic = mem_init_done;
  60. return phb;
  61. }
  62. void pcibios_free_controller(struct pci_controller *phb)
  63. {
  64. spin_lock(&hose_spinlock);
  65. list_del(&phb->list_node);
  66. spin_unlock(&hose_spinlock);
  67. if (phb->is_dynamic)
  68. kfree(phb);
  69. }
  70. static resource_size_t pcibios_io_size(const struct pci_controller *hose)
  71. {
  72. return resource_size(&hose->io_resource);
  73. }
  74. int pcibios_vaddr_is_ioport(void __iomem *address)
  75. {
  76. int ret = 0;
  77. struct pci_controller *hose;
  78. resource_size_t size;
  79. spin_lock(&hose_spinlock);
  80. list_for_each_entry(hose, &hose_list, list_node) {
  81. size = pcibios_io_size(hose);
  82. if (address >= hose->io_base_virt &&
  83. address < (hose->io_base_virt + size)) {
  84. ret = 1;
  85. break;
  86. }
  87. }
  88. spin_unlock(&hose_spinlock);
  89. return ret;
  90. }
  91. unsigned long pci_address_to_pio(phys_addr_t address)
  92. {
  93. struct pci_controller *hose;
  94. resource_size_t size;
  95. unsigned long ret = ~0;
  96. spin_lock(&hose_spinlock);
  97. list_for_each_entry(hose, &hose_list, list_node) {
  98. size = pcibios_io_size(hose);
  99. if (address >= hose->io_base_phys &&
  100. address < (hose->io_base_phys + size)) {
  101. unsigned long base =
  102. (unsigned long)hose->io_base_virt - _IO_BASE;
  103. ret = base + (address - hose->io_base_phys);
  104. break;
  105. }
  106. }
  107. spin_unlock(&hose_spinlock);
  108. return ret;
  109. }
  110. EXPORT_SYMBOL_GPL(pci_address_to_pio);
  111. /* This routine is meant to be used early during boot, when the
  112. * PCI bus numbers have not yet been assigned, and you need to
  113. * issue PCI config cycles to an OF device.
  114. * It could also be used to "fix" RTAS config cycles if you want
  115. * to set pci_assign_all_buses to 1 and still use RTAS for PCI
  116. * config cycles.
  117. */
  118. struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
  119. {
  120. while (node) {
  121. struct pci_controller *hose, *tmp;
  122. list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
  123. if (hose->dn == node)
  124. return hose;
  125. node = node->parent;
  126. }
  127. return NULL;
  128. }
  129. void pcibios_set_master(struct pci_dev *dev)
  130. {
  131. /* No special bus mastering setup handling */
  132. }
  133. /*
  134. * Platform support for /proc/bus/pci/X/Y mmap()s.
  135. */
  136. int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
  137. {
  138. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  139. resource_size_t ioaddr = pci_resource_start(pdev, bar);
  140. if (!hose)
  141. return -EINVAL; /* should never happen */
  142. /* Convert to an offset within this PCI controller */
  143. ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
  144. vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
  145. return 0;
  146. }
  147. /*
  148. * This one is used by /dev/mem and fbdev who have no clue about the
  149. * PCI device, it tries to find the PCI device first and calls the
  150. * above routine
  151. */
  152. pgprot_t pci_phys_mem_access_prot(struct file *file,
  153. unsigned long pfn,
  154. unsigned long size,
  155. pgprot_t prot)
  156. {
  157. struct pci_dev *pdev = NULL;
  158. struct resource *found = NULL;
  159. resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
  160. int i;
  161. if (page_is_ram(pfn))
  162. return prot;
  163. prot = pgprot_noncached(prot);
  164. for_each_pci_dev(pdev) {
  165. for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
  166. struct resource *rp = &pdev->resource[i];
  167. int flags = rp->flags;
  168. /* Active and same type? */
  169. if ((flags & IORESOURCE_MEM) == 0)
  170. continue;
  171. /* In the range of this resource? */
  172. if (offset < (rp->start & PAGE_MASK) ||
  173. offset > rp->end)
  174. continue;
  175. found = rp;
  176. break;
  177. }
  178. if (found)
  179. break;
  180. }
  181. if (found) {
  182. if (found->flags & IORESOURCE_PREFETCH)
  183. prot = pgprot_noncached_wc(prot);
  184. pci_dev_put(pdev);
  185. }
  186. pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
  187. (unsigned long long)offset, pgprot_val(prot));
  188. return prot;
  189. }
  190. /* This provides legacy IO read access on a bus */
  191. int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
  192. {
  193. unsigned long offset;
  194. struct pci_controller *hose = pci_bus_to_host(bus);
  195. struct resource *rp = &hose->io_resource;
  196. void __iomem *addr;
  197. /* Check if port can be supported by that bus. We only check
  198. * the ranges of the PHB though, not the bus itself as the rules
  199. * for forwarding legacy cycles down bridges are not our problem
  200. * here. So if the host bridge supports it, we do it.
  201. */
  202. offset = (unsigned long)hose->io_base_virt - _IO_BASE;
  203. offset += port;
  204. if (!(rp->flags & IORESOURCE_IO))
  205. return -ENXIO;
  206. if (offset < rp->start || (offset + size) > rp->end)
  207. return -ENXIO;
  208. addr = hose->io_base_virt + port;
  209. switch (size) {
  210. case 1:
  211. *((u8 *)val) = in_8(addr);
  212. return 1;
  213. case 2:
  214. if (port & 1)
  215. return -EINVAL;
  216. *((u16 *)val) = in_le16(addr);
  217. return 2;
  218. case 4:
  219. if (port & 3)
  220. return -EINVAL;
  221. *((u32 *)val) = in_le32(addr);
  222. return 4;
  223. }
  224. return -EINVAL;
  225. }
  226. /* This provides legacy IO write access on a bus */
  227. int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
  228. {
  229. unsigned long offset;
  230. struct pci_controller *hose = pci_bus_to_host(bus);
  231. struct resource *rp = &hose->io_resource;
  232. void __iomem *addr;
  233. /* Check if port can be supported by that bus. We only check
  234. * the ranges of the PHB though, not the bus itself as the rules
  235. * for forwarding legacy cycles down bridges are not our problem
  236. * here. So if the host bridge supports it, we do it.
  237. */
  238. offset = (unsigned long)hose->io_base_virt - _IO_BASE;
  239. offset += port;
  240. if (!(rp->flags & IORESOURCE_IO))
  241. return -ENXIO;
  242. if (offset < rp->start || (offset + size) > rp->end)
  243. return -ENXIO;
  244. addr = hose->io_base_virt + port;
  245. /* WARNING: The generic code is idiotic. It gets passed a pointer
  246. * to what can be a 1, 2 or 4 byte quantity and always reads that
  247. * as a u32, which means that we have to correct the location of
  248. * the data read within those 32 bits for size 1 and 2
  249. */
  250. switch (size) {
  251. case 1:
  252. out_8(addr, val >> 24);
  253. return 1;
  254. case 2:
  255. if (port & 1)
  256. return -EINVAL;
  257. out_le16(addr, val >> 16);
  258. return 2;
  259. case 4:
  260. if (port & 3)
  261. return -EINVAL;
  262. out_le32(addr, val);
  263. return 4;
  264. }
  265. return -EINVAL;
  266. }
  267. /* This provides legacy IO or memory mmap access on a bus */
  268. int pci_mmap_legacy_page_range(struct pci_bus *bus,
  269. struct vm_area_struct *vma,
  270. enum pci_mmap_state mmap_state)
  271. {
  272. struct pci_controller *hose = pci_bus_to_host(bus);
  273. resource_size_t offset =
  274. ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
  275. resource_size_t size = vma->vm_end - vma->vm_start;
  276. struct resource *rp;
  277. pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
  278. pci_domain_nr(bus), bus->number,
  279. mmap_state == pci_mmap_mem ? "MEM" : "IO",
  280. (unsigned long long)offset,
  281. (unsigned long long)(offset + size - 1));
  282. if (mmap_state == pci_mmap_mem) {
  283. /* Hack alert !
  284. *
  285. * Because X is lame and can fail starting if it gets an error
  286. * trying to mmap legacy_mem (instead of just moving on without
  287. * legacy memory access) we fake it here by giving it anonymous
  288. * memory, effectively behaving just like /dev/zero
  289. */
  290. if ((offset + size) > hose->isa_mem_size) {
  291. #ifdef CONFIG_MMU
  292. pr_debug("Process %s (pid:%d) mapped non-existing PCI",
  293. current->comm, current->pid);
  294. pr_debug("legacy memory for 0%04x:%02x\n",
  295. pci_domain_nr(bus), bus->number);
  296. #endif
  297. if (vma->vm_flags & VM_SHARED)
  298. return shmem_zero_setup(vma);
  299. return 0;
  300. }
  301. offset += hose->isa_mem_phys;
  302. } else {
  303. unsigned long io_offset = (unsigned long)hose->io_base_virt -
  304. _IO_BASE;
  305. unsigned long roffset = offset + io_offset;
  306. rp = &hose->io_resource;
  307. if (!(rp->flags & IORESOURCE_IO))
  308. return -ENXIO;
  309. if (roffset < rp->start || (roffset + size) > rp->end)
  310. return -ENXIO;
  311. offset += hose->io_base_phys;
  312. }
  313. pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
  314. vma->vm_pgoff = offset >> PAGE_SHIFT;
  315. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  316. return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  317. vma->vm_end - vma->vm_start,
  318. vma->vm_page_prot);
  319. }
  320. void pci_resource_to_user(const struct pci_dev *dev, int bar,
  321. const struct resource *rsrc,
  322. resource_size_t *start, resource_size_t *end)
  323. {
  324. struct pci_bus_region region;
  325. if (rsrc->flags & IORESOURCE_IO) {
  326. pcibios_resource_to_bus(dev->bus, &region,
  327. (struct resource *) rsrc);
  328. *start = region.start;
  329. *end = region.end;
  330. return;
  331. }
  332. /* We pass a CPU physical address to userland for MMIO instead of a
  333. * BAR value because X is lame and expects to be able to use that
  334. * to pass to /dev/mem!
  335. *
  336. * That means we may have 64-bit values where some apps only expect
  337. * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
  338. */
  339. *start = rsrc->start;
  340. *end = rsrc->end;
  341. }
  342. /**
  343. * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
  344. * @hose: newly allocated pci_controller to be setup
  345. * @dev: device node of the host bridge
  346. * @primary: set if primary bus (32 bits only, soon to be deprecated)
  347. *
  348. * This function will parse the "ranges" property of a PCI host bridge device
  349. * node and setup the resource mapping of a pci controller based on its
  350. * content.
  351. *
  352. * Life would be boring if it wasn't for a few issues that we have to deal
  353. * with here:
  354. *
  355. * - We can only cope with one IO space range and up to 3 Memory space
  356. * ranges. However, some machines (thanks Apple !) tend to split their
  357. * space into lots of small contiguous ranges. So we have to coalesce.
  358. *
  359. * - We can only cope with all memory ranges having the same offset
  360. * between CPU addresses and PCI addresses. Unfortunately, some bridges
  361. * are setup for a large 1:1 mapping along with a small "window" which
  362. * maps PCI address 0 to some arbitrary high address of the CPU space in
  363. * order to give access to the ISA memory hole.
  364. * The way out of here that I've chosen for now is to always set the
  365. * offset based on the first resource found, then override it if we
  366. * have a different offset and the previous was set by an ISA hole.
  367. *
  368. * - Some busses have IO space not starting at 0, which causes trouble with
  369. * the way we do our IO resource renumbering. The code somewhat deals with
  370. * it for 64 bits but I would expect problems on 32 bits.
  371. *
  372. * - Some 32 bits platforms such as 4xx can have physical space larger than
  373. * 32 bits so we need to use 64 bits values for the parsing
  374. */
  375. void pci_process_bridge_OF_ranges(struct pci_controller *hose,
  376. struct device_node *dev, int primary)
  377. {
  378. int memno = 0, isa_hole = -1;
  379. unsigned long long isa_mb = 0;
  380. struct resource *res;
  381. struct of_pci_range range;
  382. struct of_pci_range_parser parser;
  383. pr_info("PCI host bridge %pOF %s ranges:\n",
  384. dev, primary ? "(primary)" : "");
  385. /* Check for ranges property */
  386. if (of_pci_range_parser_init(&parser, dev))
  387. return;
  388. pr_debug("Parsing ranges property...\n");
  389. for_each_of_pci_range(&parser, &range) {
  390. /* Read next ranges element */
  391. pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
  392. range.pci_space, range.pci_addr);
  393. pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
  394. range.cpu_addr, range.size);
  395. /* If we failed translation or got a zero-sized region
  396. * (some FW try to feed us with non sensical zero sized regions
  397. * such as power3 which look like some kind of attempt
  398. * at exposing the VGA memory hole)
  399. */
  400. if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
  401. continue;
  402. /* Act based on address space type */
  403. res = NULL;
  404. switch (range.flags & IORESOURCE_TYPE_BITS) {
  405. case IORESOURCE_IO:
  406. pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
  407. range.cpu_addr, range.cpu_addr + range.size - 1,
  408. range.pci_addr);
  409. /* We support only one IO range */
  410. if (hose->pci_io_size) {
  411. pr_info(" \\--> Skipped (too many) !\n");
  412. continue;
  413. }
  414. /* On 32 bits, limit I/O space to 16MB */
  415. if (range.size > 0x01000000)
  416. range.size = 0x01000000;
  417. /* 32 bits needs to map IOs here */
  418. hose->io_base_virt = ioremap(range.cpu_addr,
  419. range.size);
  420. /* Expect trouble if pci_addr is not 0 */
  421. if (primary)
  422. isa_io_base =
  423. (unsigned long)hose->io_base_virt;
  424. /* pci_io_size and io_base_phys always represent IO
  425. * space starting at 0 so we factor in pci_addr
  426. */
  427. hose->pci_io_size = range.pci_addr + range.size;
  428. hose->io_base_phys = range.cpu_addr - range.pci_addr;
  429. /* Build resource */
  430. res = &hose->io_resource;
  431. range.cpu_addr = range.pci_addr;
  432. break;
  433. case IORESOURCE_MEM:
  434. pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
  435. range.cpu_addr, range.cpu_addr + range.size - 1,
  436. range.pci_addr,
  437. (range.pci_space & 0x40000000) ?
  438. "Prefetch" : "");
  439. /* We support only 3 memory ranges */
  440. if (memno >= 3) {
  441. pr_info(" \\--> Skipped (too many) !\n");
  442. continue;
  443. }
  444. /* Handles ISA memory hole space here */
  445. if (range.pci_addr == 0) {
  446. isa_mb = range.cpu_addr;
  447. isa_hole = memno;
  448. if (primary || isa_mem_base == 0)
  449. isa_mem_base = range.cpu_addr;
  450. hose->isa_mem_phys = range.cpu_addr;
  451. hose->isa_mem_size = range.size;
  452. }
  453. /* We get the PCI/Mem offset from the first range or
  454. * the, current one if the offset came from an ISA
  455. * hole. If they don't match, bugger.
  456. */
  457. if (memno == 0 ||
  458. (isa_hole >= 0 && range.pci_addr != 0 &&
  459. hose->pci_mem_offset == isa_mb))
  460. hose->pci_mem_offset = range.cpu_addr -
  461. range.pci_addr;
  462. else if (range.pci_addr != 0 &&
  463. hose->pci_mem_offset != range.cpu_addr -
  464. range.pci_addr) {
  465. pr_info(" \\--> Skipped (offset mismatch) !\n");
  466. continue;
  467. }
  468. /* Build resource */
  469. res = &hose->mem_resources[memno++];
  470. break;
  471. }
  472. if (res != NULL) {
  473. res->name = dev->full_name;
  474. res->flags = range.flags;
  475. res->start = range.cpu_addr;
  476. res->end = range.cpu_addr + range.size - 1;
  477. res->parent = res->child = res->sibling = NULL;
  478. }
  479. }
  480. /* If there's an ISA hole and the pci_mem_offset is -not- matching
  481. * the ISA hole offset, then we need to remove the ISA hole from
  482. * the resource list for that brige
  483. */
  484. if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
  485. unsigned int next = isa_hole + 1;
  486. pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
  487. if (next < memno)
  488. memmove(&hose->mem_resources[isa_hole],
  489. &hose->mem_resources[next],
  490. sizeof(struct resource) * (memno - next));
  491. hose->mem_resources[--memno].flags = 0;
  492. }
  493. }
  494. /* Display the domain number in /proc */
  495. int pci_proc_domain(struct pci_bus *bus)
  496. {
  497. return pci_domain_nr(bus);
  498. }
  499. /* This header fixup will do the resource fixup for all devices as they are
  500. * probed, but not for bridge ranges
  501. */
  502. static void pcibios_fixup_resources(struct pci_dev *dev)
  503. {
  504. struct pci_controller *hose = pci_bus_to_host(dev->bus);
  505. int i;
  506. if (!hose) {
  507. pr_err("No host bridge for PCI dev %s !\n",
  508. pci_name(dev));
  509. return;
  510. }
  511. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  512. struct resource *res = dev->resource + i;
  513. if (!res->flags)
  514. continue;
  515. if (res->start == 0) {
  516. pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
  517. pci_name(dev), i,
  518. (unsigned long long)res->start,
  519. (unsigned long long)res->end,
  520. (unsigned int)res->flags);
  521. pr_debug("is unassigned\n");
  522. res->end -= res->start;
  523. res->start = 0;
  524. res->flags |= IORESOURCE_UNSET;
  525. continue;
  526. }
  527. pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
  528. pci_name(dev), i,
  529. (unsigned long long)res->start,
  530. (unsigned long long)res->end,
  531. (unsigned int)res->flags);
  532. }
  533. }
  534. DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
  535. int pcibios_add_device(struct pci_dev *dev)
  536. {
  537. dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
  538. return 0;
  539. }
  540. EXPORT_SYMBOL(pcibios_add_device);
  541. /*
  542. * Reparent resource children of pr that conflict with res
  543. * under res, and make res replace those children.
  544. */
  545. static int __init reparent_resources(struct resource *parent,
  546. struct resource *res)
  547. {
  548. struct resource *p, **pp;
  549. struct resource **firstpp = NULL;
  550. for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
  551. if (p->end < res->start)
  552. continue;
  553. if (res->end < p->start)
  554. break;
  555. if (p->start < res->start || p->end > res->end)
  556. return -1; /* not completely contained */
  557. if (firstpp == NULL)
  558. firstpp = pp;
  559. }
  560. if (firstpp == NULL)
  561. return -1; /* didn't find any conflicting entries? */
  562. res->parent = parent;
  563. res->child = *firstpp;
  564. res->sibling = *pp;
  565. *firstpp = res;
  566. *pp = NULL;
  567. for (p = res->child; p != NULL; p = p->sibling) {
  568. p->parent = res;
  569. pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
  570. p->name,
  571. (unsigned long long)p->start,
  572. (unsigned long long)p->end, res->name);
  573. }
  574. return 0;
  575. }
  576. /*
  577. * Handle resources of PCI devices. If the world were perfect, we could
  578. * just allocate all the resource regions and do nothing more. It isn't.
  579. * On the other hand, we cannot just re-allocate all devices, as it would
  580. * require us to know lots of host bridge internals. So we attempt to
  581. * keep as much of the original configuration as possible, but tweak it
  582. * when it's found to be wrong.
  583. *
  584. * Known BIOS problems we have to work around:
  585. * - I/O or memory regions not configured
  586. * - regions configured, but not enabled in the command register
  587. * - bogus I/O addresses above 64K used
  588. * - expansion ROMs left enabled (this may sound harmless, but given
  589. * the fact the PCI specs explicitly allow address decoders to be
  590. * shared between expansion ROMs and other resource regions, it's
  591. * at least dangerous)
  592. *
  593. * Our solution:
  594. * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
  595. * This gives us fixed barriers on where we can allocate.
  596. * (2) Allocate resources for all enabled devices. If there is
  597. * a collision, just mark the resource as unallocated. Also
  598. * disable expansion ROMs during this step.
  599. * (3) Try to allocate resources for disabled devices. If the
  600. * resources were assigned correctly, everything goes well,
  601. * if they weren't, they won't disturb allocation of other
  602. * resources.
  603. * (4) Assign new addresses to resources which were either
  604. * not configured at all or misconfigured. If explicitly
  605. * requested by the user, configure expansion ROM address
  606. * as well.
  607. */
  608. static void pcibios_allocate_bus_resources(struct pci_bus *bus)
  609. {
  610. struct pci_bus *b;
  611. int i;
  612. struct resource *res, *pr;
  613. pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
  614. pci_domain_nr(bus), bus->number);
  615. pci_bus_for_each_resource(bus, res, i) {
  616. if (!res || !res->flags
  617. || res->start > res->end || res->parent)
  618. continue;
  619. if (bus->parent == NULL)
  620. pr = (res->flags & IORESOURCE_IO) ?
  621. &ioport_resource : &iomem_resource;
  622. else {
  623. /* Don't bother with non-root busses when
  624. * re-assigning all resources. We clear the
  625. * resource flags as if they were colliding
  626. * and as such ensure proper re-allocation
  627. * later.
  628. */
  629. pr = pci_find_parent_resource(bus->self, res);
  630. if (pr == res) {
  631. /* this happens when the generic PCI
  632. * code (wrongly) decides that this
  633. * bridge is transparent -- paulus
  634. */
  635. continue;
  636. }
  637. }
  638. pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
  639. bus->self ? pci_name(bus->self) : "PHB",
  640. bus->number, i,
  641. (unsigned long long)res->start,
  642. (unsigned long long)res->end);
  643. pr_debug("[0x%x], parent %p (%s)\n",
  644. (unsigned int)res->flags,
  645. pr, (pr && pr->name) ? pr->name : "nil");
  646. if (pr && !(pr->flags & IORESOURCE_UNSET)) {
  647. struct pci_dev *dev = bus->self;
  648. if (request_resource(pr, res) == 0)
  649. continue;
  650. /*
  651. * Must be a conflict with an existing entry.
  652. * Move that entry (or entries) under the
  653. * bridge resource and try again.
  654. */
  655. if (reparent_resources(pr, res) == 0)
  656. continue;
  657. if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
  658. pci_claim_bridge_resource(dev,
  659. i + PCI_BRIDGE_RESOURCES) == 0)
  660. continue;
  661. }
  662. pr_warn("PCI: Cannot allocate resource region ");
  663. pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
  664. res->start = res->end = 0;
  665. res->flags = 0;
  666. }
  667. list_for_each_entry(b, &bus->children, node)
  668. pcibios_allocate_bus_resources(b);
  669. }
  670. static inline void alloc_resource(struct pci_dev *dev, int idx)
  671. {
  672. struct resource *pr, *r = &dev->resource[idx];
  673. pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
  674. pci_name(dev), idx,
  675. (unsigned long long)r->start,
  676. (unsigned long long)r->end,
  677. (unsigned int)r->flags);
  678. pr = pci_find_parent_resource(dev, r);
  679. if (!pr || (pr->flags & IORESOURCE_UNSET) ||
  680. request_resource(pr, r) < 0) {
  681. pr_warn("PCI: Cannot allocate resource region %d ", idx);
  682. pr_cont("of device %s, will remap\n", pci_name(dev));
  683. if (pr)
  684. pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
  685. pr,
  686. (unsigned long long)pr->start,
  687. (unsigned long long)pr->end,
  688. (unsigned int)pr->flags);
  689. /* We'll assign a new address later */
  690. r->flags |= IORESOURCE_UNSET;
  691. r->end -= r->start;
  692. r->start = 0;
  693. }
  694. }
  695. static void __init pcibios_allocate_resources(int pass)
  696. {
  697. struct pci_dev *dev = NULL;
  698. int idx, disabled;
  699. u16 command;
  700. struct resource *r;
  701. for_each_pci_dev(dev) {
  702. pci_read_config_word(dev, PCI_COMMAND, &command);
  703. for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
  704. r = &dev->resource[idx];
  705. if (r->parent) /* Already allocated */
  706. continue;
  707. if (!r->flags || (r->flags & IORESOURCE_UNSET))
  708. continue; /* Not assigned at all */
  709. /* We only allocate ROMs on pass 1 just in case they
  710. * have been screwed up by firmware
  711. */
  712. if (idx == PCI_ROM_RESOURCE)
  713. disabled = 1;
  714. if (r->flags & IORESOURCE_IO)
  715. disabled = !(command & PCI_COMMAND_IO);
  716. else
  717. disabled = !(command & PCI_COMMAND_MEMORY);
  718. if (pass == disabled)
  719. alloc_resource(dev, idx);
  720. }
  721. if (pass)
  722. continue;
  723. r = &dev->resource[PCI_ROM_RESOURCE];
  724. if (r->flags) {
  725. /* Turn the ROM off, leave the resource region,
  726. * but keep it unregistered.
  727. */
  728. u32 reg;
  729. pci_read_config_dword(dev, dev->rom_base_reg, &reg);
  730. if (reg & PCI_ROM_ADDRESS_ENABLE) {
  731. pr_debug("PCI: Switching off ROM of %s\n",
  732. pci_name(dev));
  733. r->flags &= ~IORESOURCE_ROM_ENABLE;
  734. pci_write_config_dword(dev, dev->rom_base_reg,
  735. reg & ~PCI_ROM_ADDRESS_ENABLE);
  736. }
  737. }
  738. }
  739. }
  740. static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
  741. {
  742. struct pci_controller *hose = pci_bus_to_host(bus);
  743. resource_size_t offset;
  744. struct resource *res, *pres;
  745. int i;
  746. pr_debug("Reserving legacy ranges for domain %04x\n",
  747. pci_domain_nr(bus));
  748. /* Check for IO */
  749. if (!(hose->io_resource.flags & IORESOURCE_IO))
  750. goto no_io;
  751. offset = (unsigned long)hose->io_base_virt - _IO_BASE;
  752. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  753. BUG_ON(res == NULL);
  754. res->name = "Legacy IO";
  755. res->flags = IORESOURCE_IO;
  756. res->start = offset;
  757. res->end = (offset + 0xfff) & 0xfffffffful;
  758. pr_debug("Candidate legacy IO: %pR\n", res);
  759. if (request_resource(&hose->io_resource, res)) {
  760. pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
  761. pci_domain_nr(bus), bus->number, res);
  762. kfree(res);
  763. }
  764. no_io:
  765. /* Check for memory */
  766. offset = hose->pci_mem_offset;
  767. pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
  768. for (i = 0; i < 3; i++) {
  769. pres = &hose->mem_resources[i];
  770. if (!(pres->flags & IORESOURCE_MEM))
  771. continue;
  772. pr_debug("hose mem res: %pR\n", pres);
  773. if ((pres->start - offset) <= 0xa0000 &&
  774. (pres->end - offset) >= 0xbffff)
  775. break;
  776. }
  777. if (i >= 3)
  778. return;
  779. res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  780. BUG_ON(res == NULL);
  781. res->name = "Legacy VGA memory";
  782. res->flags = IORESOURCE_MEM;
  783. res->start = 0xa0000 + offset;
  784. res->end = 0xbffff + offset;
  785. pr_debug("Candidate VGA memory: %pR\n", res);
  786. if (request_resource(pres, res)) {
  787. pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
  788. pci_domain_nr(bus), bus->number, res);
  789. kfree(res);
  790. }
  791. }
  792. void __init pcibios_resource_survey(void)
  793. {
  794. struct pci_bus *b;
  795. /* Allocate and assign resources. If we re-assign everything, then
  796. * we skip the allocate phase
  797. */
  798. list_for_each_entry(b, &pci_root_buses, node)
  799. pcibios_allocate_bus_resources(b);
  800. pcibios_allocate_resources(0);
  801. pcibios_allocate_resources(1);
  802. /* Before we start assigning unassigned resource, we try to reserve
  803. * the low IO area and the VGA memory area if they intersect the
  804. * bus available resources to avoid allocating things on top of them
  805. */
  806. list_for_each_entry(b, &pci_root_buses, node)
  807. pcibios_reserve_legacy_regions(b);
  808. /* Now proceed to assigning things that were left unassigned */
  809. pr_debug("PCI: Assigning unassigned resources...\n");
  810. pci_assign_unassigned_resources();
  811. }
  812. static void pcibios_setup_phb_resources(struct pci_controller *hose,
  813. struct list_head *resources)
  814. {
  815. unsigned long io_offset;
  816. struct resource *res;
  817. int i;
  818. /* Hookup PHB IO resource */
  819. res = &hose->io_resource;
  820. /* Fixup IO space offset */
  821. io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
  822. res->start = (res->start + io_offset) & 0xffffffffu;
  823. res->end = (res->end + io_offset) & 0xffffffffu;
  824. if (!res->flags) {
  825. pr_warn("PCI: I/O resource not set for host ");
  826. pr_cont("bridge %pOF (domain %d)\n",
  827. hose->dn, hose->global_number);
  828. /* Workaround for lack of IO resource only on 32-bit */
  829. res->start = (unsigned long)hose->io_base_virt - isa_io_base;
  830. res->end = res->start + IO_SPACE_LIMIT;
  831. res->flags = IORESOURCE_IO;
  832. }
  833. pci_add_resource_offset(resources, res,
  834. (__force resource_size_t)(hose->io_base_virt - _IO_BASE));
  835. pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
  836. (unsigned long long)res->start,
  837. (unsigned long long)res->end,
  838. (unsigned long)res->flags);
  839. /* Hookup PHB Memory resources */
  840. for (i = 0; i < 3; ++i) {
  841. res = &hose->mem_resources[i];
  842. if (!res->flags) {
  843. if (i > 0)
  844. continue;
  845. pr_err("PCI: Memory resource 0 not set for ");
  846. pr_cont("host bridge %pOF (domain %d)\n",
  847. hose->dn, hose->global_number);
  848. /* Workaround for lack of MEM resource only on 32-bit */
  849. res->start = hose->pci_mem_offset;
  850. res->end = (resource_size_t)-1LL;
  851. res->flags = IORESOURCE_MEM;
  852. }
  853. pci_add_resource_offset(resources, res, hose->pci_mem_offset);
  854. pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
  855. i, (unsigned long long)res->start,
  856. (unsigned long long)res->end,
  857. (unsigned long)res->flags);
  858. }
  859. pr_debug("PCI: PHB MEM offset = %016llx\n",
  860. (unsigned long long)hose->pci_mem_offset);
  861. pr_debug("PCI: PHB IO offset = %08lx\n",
  862. (unsigned long)hose->io_base_virt - _IO_BASE);
  863. }
  864. static void pcibios_scan_phb(struct pci_controller *hose)
  865. {
  866. LIST_HEAD(resources);
  867. struct pci_bus *bus;
  868. struct device_node *node = hose->dn;
  869. pr_debug("PCI: Scanning PHB %pOF\n", node);
  870. pcibios_setup_phb_resources(hose, &resources);
  871. bus = pci_scan_root_bus(hose->parent, hose->first_busno,
  872. hose->ops, hose, &resources);
  873. if (bus == NULL) {
  874. pr_err("Failed to create bus for PCI domain %04x\n",
  875. hose->global_number);
  876. pci_free_resource_list(&resources);
  877. return;
  878. }
  879. bus->busn_res.start = hose->first_busno;
  880. hose->bus = bus;
  881. hose->last_busno = bus->busn_res.end;
  882. }
  883. static int __init pcibios_init(void)
  884. {
  885. struct pci_controller *hose, *tmp;
  886. int next_busno = 0;
  887. pr_info("PCI: Probing PCI hardware\n");
  888. /* Scan all of the recorded PCI controllers. */
  889. list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
  890. hose->last_busno = 0xff;
  891. pcibios_scan_phb(hose);
  892. if (next_busno <= hose->last_busno)
  893. next_busno = hose->last_busno + 1;
  894. }
  895. pci_bus_count = next_busno;
  896. /* Call common code to handle resource allocation */
  897. pcibios_resource_survey();
  898. list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
  899. if (hose->bus)
  900. pci_bus_add_devices(hose->bus);
  901. }
  902. return 0;
  903. }
  904. subsys_initcall(pcibios_init);
  905. static struct pci_controller *pci_bus_to_hose(int bus)
  906. {
  907. struct pci_controller *hose, *tmp;
  908. list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
  909. if (bus >= hose->first_busno && bus <= hose->last_busno)
  910. return hose;
  911. return NULL;
  912. }
  913. /* Provide information on locations of various I/O regions in physical
  914. * memory. Do this on a per-card basis so that we choose the right
  915. * root bridge.
  916. * Note that the returned IO or memory base is a physical address
  917. */
  918. long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
  919. {
  920. struct pci_controller *hose;
  921. long result = -EOPNOTSUPP;
  922. hose = pci_bus_to_hose(bus);
  923. if (!hose)
  924. return -ENODEV;
  925. switch (which) {
  926. case IOBASE_BRIDGE_NUMBER:
  927. return (long)hose->first_busno;
  928. case IOBASE_MEMORY:
  929. return (long)hose->pci_mem_offset;
  930. case IOBASE_IO:
  931. return (long)hose->io_base_phys;
  932. case IOBASE_ISA_IO:
  933. return (long)isa_io_base;
  934. case IOBASE_ISA_MEM:
  935. return (long)isa_mem_base;
  936. }
  937. return result;
  938. }
  939. /*
  940. * Null PCI config access functions, for the case when we can't
  941. * find a hose.
  942. */
  943. #define NULL_PCI_OP(rw, size, type) \
  944. static int \
  945. null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
  946. { \
  947. return PCIBIOS_DEVICE_NOT_FOUND; \
  948. }
  949. static int
  950. null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
  951. int len, u32 *val)
  952. {
  953. return PCIBIOS_DEVICE_NOT_FOUND;
  954. }
  955. static int
  956. null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
  957. int len, u32 val)
  958. {
  959. return PCIBIOS_DEVICE_NOT_FOUND;
  960. }
  961. static struct pci_ops null_pci_ops = {
  962. .read = null_read_config,
  963. .write = null_write_config,
  964. };
  965. /*
  966. * These functions are used early on before PCI scanning is done
  967. * and all of the pci_dev and pci_bus structures have been created.
  968. */
  969. static struct pci_bus *
  970. fake_pci_bus(struct pci_controller *hose, int busnr)
  971. {
  972. static struct pci_bus bus;
  973. if (!hose)
  974. pr_err("Can't find hose for PCI bus %d!\n", busnr);
  975. bus.number = busnr;
  976. bus.sysdata = hose;
  977. bus.ops = hose ? hose->ops : &null_pci_ops;
  978. return &bus;
  979. }
  980. #define EARLY_PCI_OP(rw, size, type) \
  981. int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
  982. int devfn, int offset, type value) \
  983. { \
  984. return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
  985. devfn, offset, value); \
  986. }
  987. EARLY_PCI_OP(read, byte, u8 *)
  988. EARLY_PCI_OP(read, word, u16 *)
  989. EARLY_PCI_OP(read, dword, u32 *)
  990. EARLY_PCI_OP(write, byte, u8)
  991. EARLY_PCI_OP(write, word, u16)
  992. EARLY_PCI_OP(write, dword, u32)
  993. int early_find_capability(struct pci_controller *hose, int bus, int devfn,
  994. int cap)
  995. {
  996. return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
  997. }